Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- chatunivi/lib/libpython3.10.so.1.0 +3 -0
- infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/RECORD +40 -0
- infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/REQUESTED +0 -0
- infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/WHEEL +6 -0
- infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/entry_points.txt +3 -0
- infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/top_level.txt +1 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/__init__.py +11 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/bert_padding.py +218 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py +1574 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton.py +1160 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/bwd_prefill.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/utils.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/bench.py +290 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/test.py +724 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_og.py +365 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_blocksparse_attention.py +197 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_blocksparse_attn_interface.py +200 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/fused_softmax.py +201 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__init__.py +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/patch_embed.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/rotary.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__init__.py +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/block.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/embedding.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/mlp.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/block.py +397 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/embedding.py +216 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/mha.py +1020 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/mlp.py +191 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/__init__.py +0 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/activations.py +135 -0
- infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/layer_norm.py +800 -0
- infer_4_30_0/lib/python3.10/site-packages/gguf-0.10.0.dist-info/LICENSE +21 -0
- infer_4_30_0/lib/python3.10/site-packages/gguf-0.10.0.dist-info/METADATA +114 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/__init__.py +349 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/import_np_parallel.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/test_lazy_loader.cpython-310.pyc +0 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/some_func.py +3 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/import_np_parallel.py +13 -0
- infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/test_lazy_loader.py +192 -0
- infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/INSTALLER +1 -0
- infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/License.txt +1568 -0
- infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/METADATA +37 -0
- infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/RECORD +29 -0
- infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/WHEEL +5 -0
.gitattributes
CHANGED
|
@@ -2083,3 +2083,4 @@ infer_4_30_0/lib/python3.10/site-packages/pyrender/fonts/OpenSans-Regular.ttf fi
|
|
| 2083 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/nvrtc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 2084 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/nccl.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 2085 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 2083 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/nvrtc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 2084 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/nccl.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 2085 |
infer_4_30_0/lib/python3.10/site-packages/cupy_backends/cuda/libs/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 2086 |
+
chatunivi/lib/libpython3.10.so.1.0 filter=lfs diff=lfs merge=lfs -text
|
chatunivi/lib/libpython3.10.so.1.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9d6aa485ec5e7170b1b69272dfa7baf70d2999ae90ba01a1ec771f11705f123
|
| 3 |
+
size 17434008
|
infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
../../../bin/fifty,sha256=Q2HL_2jLhXemjzfVnrhnfLGc_fgQtNIIgUGqhcHBc04,226
|
| 2 |
+
fifty-1.0.0.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
|
| 3 |
+
fifty-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 4 |
+
fifty-1.0.0.dist-info/METADATA,sha256=xilZAM-x6UalrPXjNa4LznFbxU8eJoZ3DvwJmlwc67o,759
|
| 5 |
+
fifty-1.0.0.dist-info/RECORD,,
|
| 6 |
+
fifty-1.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
fifty-1.0.0.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
|
| 8 |
+
fifty-1.0.0.dist-info/entry_points.txt,sha256=aHLUEDsx9-8zJEU_jckdia-NzpvPtBWQyx0Y-nsGyCA,42
|
| 9 |
+
fifty-1.0.0.dist-info/metadata.json,sha256=1Bc0EON1AoZYVmx8xvJu1s6uA5myNDqiiD4RvFwVh-4,972
|
| 10 |
+
fifty-1.0.0.dist-info/top_level.txt,sha256=OBcuZK-ZI8PpMf5vTeCFMd_ok7F5XL1cQ-ciTTTkTSI,6
|
| 11 |
+
fifty/__init__.py,sha256=eBuy6W29ni2Vrns0RBhwZ0H9ggcPflT2MEYeKm7ByCE,81
|
| 12 |
+
fifty/__pycache__/__init__.cpython-310.pyc,,
|
| 13 |
+
fifty/__pycache__/cli.cpython-310.pyc,,
|
| 14 |
+
fifty/cli.py,sha256=b_ts2m9pNm8ppoJsxLb-3kmsydW55kjeAG8noOS2TVM,2956
|
| 15 |
+
fifty/commands/__init__.py,sha256=idgE7jI307AXaHiZAe2KhqoTPTSHH9MD7-Qpdmp-LF4,85
|
| 16 |
+
fifty/commands/__pycache__/__init__.cpython-310.pyc,,
|
| 17 |
+
fifty/commands/__pycache__/train.cpython-310.pyc,,
|
| 18 |
+
fifty/commands/__pycache__/whatis.cpython-310.pyc,,
|
| 19 |
+
fifty/commands/train.py,sha256=FVDswfRaOvrGVx7nMy_ylaLLdoS-VuFunXvM1T6CklA,11864
|
| 20 |
+
fifty/commands/whatis.py,sha256=69wM7Vje9JAMc1Zk2g9ib7ltbE_CXLshGBXZgw0OCsI,9245
|
| 21 |
+
fifty/utilities/__init__.py,sha256=5YdTwwQoP6maFB-_aV12Jf5cZJMLoUeunz-pb1XWKV0,14
|
| 22 |
+
fifty/utilities/__pycache__/__init__.cpython-310.pyc,,
|
| 23 |
+
fifty/utilities/__pycache__/framework.cpython-310.pyc,,
|
| 24 |
+
fifty/utilities/framework.py,sha256=x8UZ1IOsdrNh_qDaxQRa6HfwgGnWsP0zfv8YAcTxjrA,2799
|
| 25 |
+
fifty/utilities/labels.json,sha256=oI0kmIQbJe-rJdcdTgf0tt_3uTJykqHrnpDViYQLTcg,1720
|
| 26 |
+
fifty/utilities/models/4096_1.h5,sha256=x4XUZcUtiTstpi9vGGZLmHBgNcK4nf9Tkrg4TA1gk14,3635424
|
| 27 |
+
fifty/utilities/models/4096_1_lighter.h5,sha256=t1gyDhtYiI99TkkjS5bcTZA6492_3lzj2NJOSO0JnN0,1487064
|
| 28 |
+
fifty/utilities/models/4096_2.h5,sha256=5_muAJtA-9IYH3eI-WOmk9pcZNRD2HnnUu5IvYL11I4,4815072
|
| 29 |
+
fifty/utilities/models/4096_3.h5,sha256=mtPKFnCSstW-GCS_nyjE5TfIIjZKj4g5kubgzDaAOYg,3672784
|
| 30 |
+
fifty/utilities/models/4096_4.h5,sha256=b79zLoRVEufxPyQfZpxC_GW3F0hVHpLZR3ErNC-AXwA,5511008
|
| 31 |
+
fifty/utilities/models/4096_5.h5,sha256=LIPRxb_4VuY3cVlD2StytAkuGahKJZ-SaAA3Xxpug58,1151680
|
| 32 |
+
fifty/utilities/models/4096_6.h5,sha256=57diY_It9tkkQxWvw0-SNXKINg4V7t9-qClu7eqOpnM,5364808
|
| 33 |
+
fifty/utilities/models/512_1.h5,sha256=_5No2B7ifhNtKbYPOX0S3azxSuwvUcbS87GG4knL5h0,2348428
|
| 34 |
+
fifty/utilities/models/512_2.h5,sha256=qe-mRdaZtnl4jPWgzdtNHNGd8jZsKPp_h_1Ia61amio,2189792
|
| 35 |
+
fifty/utilities/models/512_3.h5,sha256=uQaTreDWNITmN_ZfCHpr_sEJlJXVQgAexALgbfeiIwQ,5555680
|
| 36 |
+
fifty/utilities/models/512_4.h5,sha256=6jLV_PTLAi7H13oedYw-C-JITjNRfvrRGLC8Vdw_vk0,3836128
|
| 37 |
+
fifty/utilities/models/512_5.h5,sha256=Q3ym6inGoRvipYDq2Kf5SqV10kh2MY3KVYdIO_2VMxg,2722912
|
| 38 |
+
fifty/utilities/models/512_6.h5,sha256=p7jDJP-ra4-4oiZ3VHHNBuz-4niaeVX6Fsh3DtmDoNw,1972192
|
| 39 |
+
fifty/utilities/models/__init__.py,sha256=7mJLvkFrxv0cGSmznV-EqiX22qy8e2F0h172FfmN9fw,59
|
| 40 |
+
fifty/utilities/models/__pycache__/__init__.cpython-310.pyc,,
|
infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/REQUESTED
ADDED
|
File without changes
|
infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.30.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py2-none-any
|
| 5 |
+
Tag: py3-none-any
|
| 6 |
+
|
infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[console_scripts]
|
| 2 |
+
fifty = fifty.cli:main
|
| 3 |
+
|
infer_4_30_0/lib/python3.10/site-packages/fifty-1.0.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
fifty
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__version__ = "2.7.2.post1"
|
| 2 |
+
|
| 3 |
+
from flash_attn.flash_attn_interface import (
|
| 4 |
+
flash_attn_func,
|
| 5 |
+
flash_attn_kvpacked_func,
|
| 6 |
+
flash_attn_qkvpacked_func,
|
| 7 |
+
flash_attn_varlen_func,
|
| 8 |
+
flash_attn_varlen_kvpacked_func,
|
| 9 |
+
flash_attn_varlen_qkvpacked_func,
|
| 10 |
+
flash_attn_with_kvcache,
|
| 11 |
+
)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/bert_padding.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from einops import rearrange, repeat
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class IndexFirstAxis(torch.autograd.Function):
|
| 9 |
+
@staticmethod
|
| 10 |
+
def forward(ctx, input, indices):
|
| 11 |
+
ctx.save_for_backward(indices)
|
| 12 |
+
assert input.ndim >= 2
|
| 13 |
+
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
|
| 14 |
+
second_dim = other_shape.numel()
|
| 15 |
+
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
|
| 16 |
+
# return input[indices]
|
| 17 |
+
return torch.gather(
|
| 18 |
+
rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
|
| 19 |
+
).reshape(-1, *other_shape)
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def backward(ctx, grad_output):
|
| 23 |
+
(indices,) = ctx.saved_tensors
|
| 24 |
+
assert grad_output.ndim >= 2
|
| 25 |
+
other_shape = grad_output.shape[1:]
|
| 26 |
+
grad_output = rearrange(grad_output, "b ... -> b (...)")
|
| 27 |
+
grad_input = torch.zeros(
|
| 28 |
+
[ctx.first_axis_dim, grad_output.shape[1]],
|
| 29 |
+
device=grad_output.device,
|
| 30 |
+
dtype=grad_output.dtype,
|
| 31 |
+
)
|
| 32 |
+
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
|
| 33 |
+
# grad_input[indices] = grad_output
|
| 34 |
+
grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
|
| 35 |
+
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
index_first_axis = IndexFirstAxis.apply
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class IndexPutFirstAxis(torch.autograd.Function):
|
| 42 |
+
@staticmethod
|
| 43 |
+
def forward(ctx, values, indices, first_axis_dim):
|
| 44 |
+
ctx.save_for_backward(indices)
|
| 45 |
+
assert indices.ndim == 1
|
| 46 |
+
assert values.ndim >= 2
|
| 47 |
+
output = torch.zeros(
|
| 48 |
+
first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
|
| 49 |
+
)
|
| 50 |
+
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
|
| 51 |
+
output[indices] = values
|
| 52 |
+
# output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
|
| 53 |
+
return output
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def backward(ctx, grad_output):
|
| 57 |
+
(indices,) = ctx.saved_tensors
|
| 58 |
+
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
|
| 59 |
+
grad_values = grad_output[indices]
|
| 60 |
+
# grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
|
| 61 |
+
return grad_values, None, None
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
index_put_first_axis = IndexPutFirstAxis.apply
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class IndexFirstAxisResidual(torch.autograd.Function):
|
| 68 |
+
@staticmethod
|
| 69 |
+
def forward(ctx, input, indices):
|
| 70 |
+
ctx.save_for_backward(indices)
|
| 71 |
+
assert input.ndim >= 2
|
| 72 |
+
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
|
| 73 |
+
second_dim = other_shape.numel()
|
| 74 |
+
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
|
| 75 |
+
output = input[indices]
|
| 76 |
+
# We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
|
| 77 |
+
# memory format to channel_first. In other words, input might not be contiguous.
|
| 78 |
+
# If we don't detach, Pytorch complains about output being a view and is being modified inplace
|
| 79 |
+
return output, input.detach()
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def backward(ctx, grad_output, grad_residual):
|
| 83 |
+
(indices,) = ctx.saved_tensors
|
| 84 |
+
assert grad_output.ndim >= 2
|
| 85 |
+
other_shape = grad_output.shape[1:]
|
| 86 |
+
assert grad_residual.shape[1:] == other_shape
|
| 87 |
+
grad_input = grad_residual
|
| 88 |
+
# grad_input[indices] += grad_output
|
| 89 |
+
indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
|
| 90 |
+
indices = indices.expand_as(grad_output)
|
| 91 |
+
grad_input.scatter_add_(0, indices, grad_output)
|
| 92 |
+
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
index_first_axis_residual = IndexFirstAxisResidual.apply
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def unpad_input(hidden_states, attention_mask, unused_mask=None):
|
| 99 |
+
"""
|
| 100 |
+
Arguments:
|
| 101 |
+
hidden_states: (batch, seqlen, ...)
|
| 102 |
+
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
|
| 103 |
+
unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
|
| 104 |
+
Return:
|
| 105 |
+
hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
|
| 106 |
+
indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
|
| 107 |
+
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
|
| 108 |
+
max_seqlen_in_batch: int
|
| 109 |
+
seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
|
| 110 |
+
"""
|
| 111 |
+
all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
|
| 112 |
+
seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
|
| 113 |
+
used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| 114 |
+
indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
|
| 115 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 116 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 117 |
+
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 118 |
+
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 119 |
+
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
| 120 |
+
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
|
| 121 |
+
# so we write custom forward and backward to make it a bit faster.
|
| 122 |
+
return (
|
| 123 |
+
index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
|
| 124 |
+
indices,
|
| 125 |
+
cu_seqlens,
|
| 126 |
+
max_seqlen_in_batch,
|
| 127 |
+
used_seqlens_in_batch,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
|
| 132 |
+
"""
|
| 133 |
+
Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
|
| 134 |
+
The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
|
| 135 |
+
|
| 136 |
+
For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
|
| 137 |
+
```
|
| 138 |
+
[
|
| 139 |
+
[2, 3, 0, 0, 0, 0],
|
| 140 |
+
[3, 2, 0, 0, 0, 0],
|
| 141 |
+
[6, 0, 0, 0, 0, 0]
|
| 142 |
+
]
|
| 143 |
+
```
|
| 144 |
+
, which refers to the 3D-attention mask:
|
| 145 |
+
```
|
| 146 |
+
[
|
| 147 |
+
[
|
| 148 |
+
[1, 0, 0, 0, 0, 0],
|
| 149 |
+
[1, 1, 0, 0, 0, 0],
|
| 150 |
+
[0, 0, 1, 0, 0, 0],
|
| 151 |
+
[0, 0, 1, 1, 0, 0],
|
| 152 |
+
[0, 0, 1, 1, 1, 0],
|
| 153 |
+
[0, 0, 0, 0, 0, 1]
|
| 154 |
+
],
|
| 155 |
+
[
|
| 156 |
+
[1, 0, 0, 0, 0, 0],
|
| 157 |
+
[1, 1, 0, 0, 0, 0],
|
| 158 |
+
[1, 1, 1, 0, 0, 0],
|
| 159 |
+
[0, 0, 0, 1, 0, 0],
|
| 160 |
+
[0, 0, 0, 1, 1, 0],
|
| 161 |
+
[0, 0, 0, 0, 0, 1]
|
| 162 |
+
],
|
| 163 |
+
[
|
| 164 |
+
[1, 0, 0, 0, 0, 0],
|
| 165 |
+
[1, 1, 0, 0, 0, 0],
|
| 166 |
+
[1, 1, 1, 0, 0, 0],
|
| 167 |
+
[1, 1, 1, 1, 0, 0],
|
| 168 |
+
[1, 1, 1, 1, 1, 0],
|
| 169 |
+
[1, 1, 1, 1, 1, 1]
|
| 170 |
+
]
|
| 171 |
+
]
|
| 172 |
+
```.
|
| 173 |
+
|
| 174 |
+
Arguments:
|
| 175 |
+
hidden_states: (batch, seqlen, ...)
|
| 176 |
+
attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
|
| 177 |
+
Return:
|
| 178 |
+
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 179 |
+
indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
|
| 180 |
+
cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
|
| 181 |
+
max_seqlen_in_batch: int
|
| 182 |
+
"""
|
| 183 |
+
length = attention_mask_in_length.sum(dim=-1)
|
| 184 |
+
seqlen = attention_mask_in_length.size(-1)
|
| 185 |
+
attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length), seqlen) < length.unsqueeze(1)
|
| 186 |
+
real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
|
| 187 |
+
seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
|
| 188 |
+
indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
|
| 189 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 190 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
| 191 |
+
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 192 |
+
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 193 |
+
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
| 194 |
+
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
|
| 195 |
+
# so we write custom forward and backward to make it a bit faster.
|
| 196 |
+
return (
|
| 197 |
+
index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
|
| 198 |
+
indices,
|
| 199 |
+
cu_seqlens,
|
| 200 |
+
max_seqlen_in_batch,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def pad_input(hidden_states, indices, batch, seqlen):
|
| 205 |
+
"""
|
| 206 |
+
Arguments:
|
| 207 |
+
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 208 |
+
indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
|
| 209 |
+
batch: int, batch size for the padded sequence.
|
| 210 |
+
seqlen: int, maximum sequence length for the padded sequence.
|
| 211 |
+
Return:
|
| 212 |
+
hidden_states: (batch, seqlen, ...)
|
| 213 |
+
"""
|
| 214 |
+
dim = hidden_states.shape[-1]
|
| 215 |
+
# output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
|
| 216 |
+
# output[indices] = hidden_states
|
| 217 |
+
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
|
| 218 |
+
return rearrange(output, "(b s) ... -> b s ...", b=batch)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_interface.py
ADDED
|
@@ -0,0 +1,1574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2023, Tri Dao.
|
| 2 |
+
|
| 3 |
+
from typing import Optional, Sequence, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# isort: off
|
| 10 |
+
# We need to import the CUDA kernels after importing torch
|
| 11 |
+
USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
|
| 12 |
+
if USE_TRITON_ROCM:
|
| 13 |
+
from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
|
| 14 |
+
else:
|
| 15 |
+
import flash_attn_2_cuda as flash_attn_gpu
|
| 16 |
+
|
| 17 |
+
# isort: on
|
| 18 |
+
|
| 19 |
+
def maybe_contiguous(x):
|
| 20 |
+
return x.contiguous() if x is not None and x.stride(-1) != 1 else x
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _get_block_size_n(device, head_dim, is_dropout, is_causal):
|
| 24 |
+
# This should match the block sizes in the CUDA kernel
|
| 25 |
+
assert head_dim <= 256
|
| 26 |
+
major, minor = torch.cuda.get_device_capability(device)
|
| 27 |
+
is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
|
| 28 |
+
is_sm80 = major == 8 and minor == 0
|
| 29 |
+
is_sm90 = major == 9 and minor == 0
|
| 30 |
+
if head_dim <= 32:
|
| 31 |
+
return 128
|
| 32 |
+
if head_dim <= 64:
|
| 33 |
+
return 128 if not is_dropout else 64
|
| 34 |
+
elif head_dim <= 96:
|
| 35 |
+
return 64
|
| 36 |
+
elif head_dim <= 128:
|
| 37 |
+
if is_sm8x:
|
| 38 |
+
return 64 if (not is_dropout and is_causal) else 32
|
| 39 |
+
else:
|
| 40 |
+
return 64 if not is_dropout else 32
|
| 41 |
+
elif head_dim <= 160:
|
| 42 |
+
if is_sm8x:
|
| 43 |
+
return 64
|
| 44 |
+
else:
|
| 45 |
+
return 32
|
| 46 |
+
elif head_dim <= 192:
|
| 47 |
+
return 64
|
| 48 |
+
elif head_dim <= 224:
|
| 49 |
+
return 64
|
| 50 |
+
elif head_dim <= 256:
|
| 51 |
+
return 64
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def round_multiple(x, m):
|
| 55 |
+
return (x + m - 1) // m * m
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# torch.compile() support is only enabled for pytorch >= 2.4
|
| 59 |
+
# The reason for this is that we are using the new custom_op and register_fake
|
| 60 |
+
# APIs, which support inplace modification of inputs in the function itself
|
| 61 |
+
if torch.__version__ >= "2.4.0":
|
| 62 |
+
_torch_custom_op_wrapper = torch.library.custom_op
|
| 63 |
+
_torch_register_fake_wrapper = torch.library.register_fake
|
| 64 |
+
else:
|
| 65 |
+
def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
|
| 66 |
+
def wrap(func):
|
| 67 |
+
return func
|
| 68 |
+
if fn is None:
|
| 69 |
+
return wrap
|
| 70 |
+
return fn
|
| 71 |
+
def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
|
| 72 |
+
def wrap(func):
|
| 73 |
+
return func
|
| 74 |
+
if fn is None:
|
| 75 |
+
return wrap
|
| 76 |
+
return fn
|
| 77 |
+
_torch_custom_op_wrapper = noop_custom_op_wrapper
|
| 78 |
+
_torch_register_fake_wrapper = noop_register_fake_wrapper
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
|
| 82 |
+
def _flash_attn_forward(
|
| 83 |
+
q: torch.Tensor,
|
| 84 |
+
k: torch.Tensor,
|
| 85 |
+
v: torch.Tensor,
|
| 86 |
+
dropout_p: float,
|
| 87 |
+
softmax_scale: float,
|
| 88 |
+
causal: bool,
|
| 89 |
+
window_size_left: int,
|
| 90 |
+
window_size_right: int,
|
| 91 |
+
softcap: float,
|
| 92 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 93 |
+
return_softmax: bool
|
| 94 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 95 |
+
q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
|
| 96 |
+
out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
|
| 97 |
+
q,
|
| 98 |
+
k,
|
| 99 |
+
v,
|
| 100 |
+
None,
|
| 101 |
+
alibi_slopes,
|
| 102 |
+
dropout_p,
|
| 103 |
+
softmax_scale,
|
| 104 |
+
causal,
|
| 105 |
+
window_size_left,
|
| 106 |
+
window_size_right,
|
| 107 |
+
softcap,
|
| 108 |
+
return_softmax,
|
| 109 |
+
None,
|
| 110 |
+
)
|
| 111 |
+
return out, softmax_lse, S_dmask, rng_state
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
|
| 115 |
+
def _flash_attn_forward_fake(
|
| 116 |
+
q: torch.Tensor,
|
| 117 |
+
k: torch.Tensor,
|
| 118 |
+
v: torch.Tensor,
|
| 119 |
+
dropout_p: float,
|
| 120 |
+
softmax_scale: float,
|
| 121 |
+
causal: bool,
|
| 122 |
+
window_size_left: int,
|
| 123 |
+
window_size_right: int,
|
| 124 |
+
softcap: float,
|
| 125 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 126 |
+
return_softmax: bool
|
| 127 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 128 |
+
q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
|
| 129 |
+
batch_size, seqlen_q, num_heads, head_size = q.shape
|
| 130 |
+
seqlen_k = k.shape[1]
|
| 131 |
+
out = torch.empty_like(q)
|
| 132 |
+
softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
|
| 133 |
+
p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
|
| 134 |
+
if return_softmax:
|
| 135 |
+
p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
|
| 136 |
+
rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
|
| 137 |
+
|
| 138 |
+
return out, softmax_lse, p, rng_state
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
if torch.__version__ >= "2.4.0":
|
| 142 |
+
_wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
|
| 143 |
+
else:
|
| 144 |
+
_wrapped_flash_attn_forward = _flash_attn_forward
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
|
| 148 |
+
def _flash_attn_varlen_forward(
|
| 149 |
+
q: torch.Tensor,
|
| 150 |
+
k: torch.Tensor,
|
| 151 |
+
v: torch.Tensor,
|
| 152 |
+
cu_seqlens_q: torch.Tensor,
|
| 153 |
+
cu_seqlens_k: torch.Tensor,
|
| 154 |
+
max_seqlen_q: int,
|
| 155 |
+
max_seqlen_k: int,
|
| 156 |
+
dropout_p: float,
|
| 157 |
+
softmax_scale: float,
|
| 158 |
+
causal: bool,
|
| 159 |
+
window_size_left: int = -1,
|
| 160 |
+
window_size_right: int = -1,
|
| 161 |
+
softcap: float = 0.0,
|
| 162 |
+
alibi_slopes: Optional[torch.Tensor] = None,
|
| 163 |
+
return_softmax: bool = False,
|
| 164 |
+
block_table: Optional[torch.Tensor] = None,
|
| 165 |
+
leftpad_k: Optional[torch.Tensor] = None,
|
| 166 |
+
seqused_k: Optional[torch.Tensor] = None,
|
| 167 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 168 |
+
q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
|
| 169 |
+
out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
|
| 170 |
+
q,
|
| 171 |
+
k,
|
| 172 |
+
v,
|
| 173 |
+
None,
|
| 174 |
+
cu_seqlens_q,
|
| 175 |
+
cu_seqlens_k,
|
| 176 |
+
seqused_k,
|
| 177 |
+
leftpad_k,
|
| 178 |
+
block_table,
|
| 179 |
+
alibi_slopes,
|
| 180 |
+
max_seqlen_q,
|
| 181 |
+
max_seqlen_k,
|
| 182 |
+
dropout_p,
|
| 183 |
+
softmax_scale,
|
| 184 |
+
False,
|
| 185 |
+
causal,
|
| 186 |
+
window_size_left,
|
| 187 |
+
window_size_right,
|
| 188 |
+
softcap,
|
| 189 |
+
return_softmax,
|
| 190 |
+
None,
|
| 191 |
+
)
|
| 192 |
+
# if out.isnan().any() or softmax_lse.isnan().any():
|
| 193 |
+
# breakpoint()
|
| 194 |
+
return out, softmax_lse, S_dmask, rng_state
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
|
| 198 |
+
def _flash_attn_varlen_forward_fake(
|
| 199 |
+
q: torch.Tensor,
|
| 200 |
+
k: torch.Tensor,
|
| 201 |
+
v: torch.Tensor,
|
| 202 |
+
cu_seqlens_q: torch.Tensor,
|
| 203 |
+
cu_seqlens_k: torch.Tensor,
|
| 204 |
+
max_seqlen_q: int,
|
| 205 |
+
max_seqlen_k: int,
|
| 206 |
+
dropout_p: float,
|
| 207 |
+
softmax_scale: float,
|
| 208 |
+
causal: bool,
|
| 209 |
+
window_size_left: int = -1,
|
| 210 |
+
window_size_right: int = -1,
|
| 211 |
+
softcap: float = 0.0,
|
| 212 |
+
alibi_slopes: Optional[torch.Tensor] = None,
|
| 213 |
+
return_softmax: bool = False,
|
| 214 |
+
block_table: Optional[torch.Tensor] = None,
|
| 215 |
+
leftpad_k: Optional[torch.Tensor] = None,
|
| 216 |
+
seqused_k: Optional[torch.Tensor] = None,
|
| 217 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 218 |
+
q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
|
| 219 |
+
paged_kv = block_table is not None
|
| 220 |
+
batch_size = cu_seqlens_q.numel() - 1
|
| 221 |
+
total_q, num_heads, _ = q.shape
|
| 222 |
+
|
| 223 |
+
out = torch.empty_like(q)
|
| 224 |
+
softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
|
| 225 |
+
p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
|
| 226 |
+
seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
|
| 227 |
+
seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
|
| 228 |
+
if return_softmax:
|
| 229 |
+
p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
|
| 230 |
+
rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
|
| 231 |
+
return out, softmax_lse, p, rng_state
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
if torch.__version__ >= "2.4.0":
|
| 235 |
+
_wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
|
| 236 |
+
else:
|
| 237 |
+
_wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
|
| 241 |
+
def _flash_attn_backward(
|
| 242 |
+
dout: torch.Tensor,
|
| 243 |
+
q: torch.Tensor,
|
| 244 |
+
k: torch.Tensor,
|
| 245 |
+
v: torch.Tensor,
|
| 246 |
+
out: torch.Tensor,
|
| 247 |
+
softmax_lse: torch.Tensor,
|
| 248 |
+
dq: Optional[torch.Tensor],
|
| 249 |
+
dk: Optional[torch.Tensor],
|
| 250 |
+
dv: Optional[torch.Tensor],
|
| 251 |
+
dropout_p: float,
|
| 252 |
+
softmax_scale: float,
|
| 253 |
+
causal: bool,
|
| 254 |
+
window_size_left: int,
|
| 255 |
+
window_size_right: int,
|
| 256 |
+
softcap: float,
|
| 257 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 258 |
+
deterministic: bool,
|
| 259 |
+
rng_state: Optional[torch.Tensor] = None,
|
| 260 |
+
) -> torch.Tensor:
|
| 261 |
+
# dq, dk, dv are allocated by us so they should already be contiguous
|
| 262 |
+
dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
|
| 263 |
+
(
|
| 264 |
+
dq,
|
| 265 |
+
dk,
|
| 266 |
+
dv,
|
| 267 |
+
softmax_d,
|
| 268 |
+
) = flash_attn_gpu.bwd(
|
| 269 |
+
dout,
|
| 270 |
+
q,
|
| 271 |
+
k,
|
| 272 |
+
v,
|
| 273 |
+
out,
|
| 274 |
+
softmax_lse,
|
| 275 |
+
dq,
|
| 276 |
+
dk,
|
| 277 |
+
dv,
|
| 278 |
+
alibi_slopes,
|
| 279 |
+
dropout_p,
|
| 280 |
+
softmax_scale,
|
| 281 |
+
causal,
|
| 282 |
+
window_size_left,
|
| 283 |
+
window_size_right,
|
| 284 |
+
softcap,
|
| 285 |
+
deterministic,
|
| 286 |
+
None,
|
| 287 |
+
rng_state,
|
| 288 |
+
)
|
| 289 |
+
return softmax_d
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
|
| 293 |
+
def _flash_attn_backward_fake(
|
| 294 |
+
dout: torch.Tensor,
|
| 295 |
+
q: torch.Tensor,
|
| 296 |
+
k: torch.Tensor,
|
| 297 |
+
v: torch.Tensor,
|
| 298 |
+
out: torch.Tensor,
|
| 299 |
+
softmax_lse: torch.Tensor,
|
| 300 |
+
dq: Optional[torch.Tensor],
|
| 301 |
+
dk: Optional[torch.Tensor],
|
| 302 |
+
dv: Optional[torch.Tensor],
|
| 303 |
+
dropout_p: float,
|
| 304 |
+
softmax_scale: float,
|
| 305 |
+
causal: bool,
|
| 306 |
+
window_size_left: int,
|
| 307 |
+
window_size_right: int,
|
| 308 |
+
softcap: float,
|
| 309 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 310 |
+
deterministic: bool,
|
| 311 |
+
rng_state: Optional[torch.Tensor] = None,
|
| 312 |
+
) -> torch.Tensor:
|
| 313 |
+
dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
|
| 314 |
+
if dq is None:
|
| 315 |
+
dq = torch.empty_like(q)
|
| 316 |
+
if dk is None:
|
| 317 |
+
dk = torch.empty_like(k)
|
| 318 |
+
if dv is None:
|
| 319 |
+
dv = torch.empty_like(v)
|
| 320 |
+
batch_size, seqlen_q, num_heads, _ = q.shape
|
| 321 |
+
softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
|
| 322 |
+
|
| 323 |
+
return softmax_d
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
if torch.__version__ >= "2.4.0":
|
| 327 |
+
_wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
|
| 328 |
+
else:
|
| 329 |
+
_wrapped_flash_attn_backward = _flash_attn_backward
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
@_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
|
| 333 |
+
def _flash_attn_varlen_backward(
|
| 334 |
+
dout: torch.Tensor,
|
| 335 |
+
q: torch.Tensor,
|
| 336 |
+
k: torch.Tensor,
|
| 337 |
+
v: torch.Tensor,
|
| 338 |
+
out: torch.Tensor,
|
| 339 |
+
softmax_lse: torch.Tensor,
|
| 340 |
+
dq: Optional[torch.Tensor],
|
| 341 |
+
dk: Optional[torch.Tensor],
|
| 342 |
+
dv: Optional[torch.Tensor],
|
| 343 |
+
cu_seqlens_q: torch.Tensor,
|
| 344 |
+
cu_seqlens_k: torch.Tensor,
|
| 345 |
+
max_seqlen_q: int,
|
| 346 |
+
max_seqlen_k: int,
|
| 347 |
+
dropout_p: float,
|
| 348 |
+
softmax_scale: float,
|
| 349 |
+
causal: bool,
|
| 350 |
+
window_size_left: int,
|
| 351 |
+
window_size_right: int,
|
| 352 |
+
softcap: float,
|
| 353 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 354 |
+
deterministic: bool,
|
| 355 |
+
rng_state: Optional[torch.Tensor] = None,
|
| 356 |
+
) -> torch.Tensor:
|
| 357 |
+
# dq, dk, dv are allocated by us so they should already be contiguous
|
| 358 |
+
dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
|
| 359 |
+
(
|
| 360 |
+
dq,
|
| 361 |
+
dk,
|
| 362 |
+
dv,
|
| 363 |
+
softmax_d,
|
| 364 |
+
) = flash_attn_gpu.varlen_bwd(
|
| 365 |
+
dout,
|
| 366 |
+
q,
|
| 367 |
+
k,
|
| 368 |
+
v,
|
| 369 |
+
out,
|
| 370 |
+
softmax_lse,
|
| 371 |
+
dq,
|
| 372 |
+
dk,
|
| 373 |
+
dv,
|
| 374 |
+
cu_seqlens_q,
|
| 375 |
+
cu_seqlens_k,
|
| 376 |
+
alibi_slopes,
|
| 377 |
+
max_seqlen_q,
|
| 378 |
+
max_seqlen_k,
|
| 379 |
+
dropout_p,
|
| 380 |
+
softmax_scale,
|
| 381 |
+
False,
|
| 382 |
+
causal,
|
| 383 |
+
window_size_left,
|
| 384 |
+
window_size_right,
|
| 385 |
+
softcap,
|
| 386 |
+
deterministic,
|
| 387 |
+
None,
|
| 388 |
+
rng_state,
|
| 389 |
+
)
|
| 390 |
+
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
|
| 391 |
+
# breakpoint()
|
| 392 |
+
return softmax_d
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
@_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
|
| 396 |
+
def _flash_attn_varlen_backward_fake(
|
| 397 |
+
dout: torch.Tensor,
|
| 398 |
+
q: torch.Tensor,
|
| 399 |
+
k: torch.Tensor,
|
| 400 |
+
v: torch.Tensor,
|
| 401 |
+
out: torch.Tensor,
|
| 402 |
+
softmax_lse: torch.Tensor,
|
| 403 |
+
dq: Optional[torch.Tensor],
|
| 404 |
+
dk: Optional[torch.Tensor],
|
| 405 |
+
dv: Optional[torch.Tensor],
|
| 406 |
+
cu_seqlens_q: torch.Tensor,
|
| 407 |
+
cu_seqlens_k: torch.Tensor,
|
| 408 |
+
max_seqlen_q: int,
|
| 409 |
+
max_seqlen_k: int,
|
| 410 |
+
dropout_p: float,
|
| 411 |
+
softmax_scale: float,
|
| 412 |
+
causal: bool,
|
| 413 |
+
window_size_left: int,
|
| 414 |
+
window_size_right: int,
|
| 415 |
+
softcap: float,
|
| 416 |
+
alibi_slopes: Optional[torch.Tensor],
|
| 417 |
+
deterministic: bool,
|
| 418 |
+
rng_state: Optional[torch.Tensor] = None,
|
| 419 |
+
) -> torch.Tensor:
|
| 420 |
+
dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
|
| 421 |
+
batch_size = cu_seqlens_q.numel() - 1
|
| 422 |
+
total_q, num_heads, _ = q.shape
|
| 423 |
+
|
| 424 |
+
if dq is None:
|
| 425 |
+
dq = torch.empty_like(q)
|
| 426 |
+
if dk is None:
|
| 427 |
+
dk = torch.empty_like(k)
|
| 428 |
+
if dv is None:
|
| 429 |
+
dv = torch.empty_like(v)
|
| 430 |
+
softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
|
| 431 |
+
|
| 432 |
+
return softmax_d
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
if torch.__version__ >= "2.4.0":
|
| 436 |
+
_wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
|
| 437 |
+
else:
|
| 438 |
+
_wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
class FlashAttnQKVPackedFunc(torch.autograd.Function):
|
| 442 |
+
@staticmethod
|
| 443 |
+
def forward(
|
| 444 |
+
ctx,
|
| 445 |
+
qkv,
|
| 446 |
+
dropout_p,
|
| 447 |
+
softmax_scale,
|
| 448 |
+
causal,
|
| 449 |
+
window_size,
|
| 450 |
+
softcap,
|
| 451 |
+
alibi_slopes,
|
| 452 |
+
deterministic,
|
| 453 |
+
return_softmax,
|
| 454 |
+
):
|
| 455 |
+
if softmax_scale is None:
|
| 456 |
+
softmax_scale = qkv.shape[-1] ** (-0.5)
|
| 457 |
+
q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
|
| 458 |
+
head_size_og = q.size(3)
|
| 459 |
+
if head_size_og % 8 != 0:
|
| 460 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 461 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 462 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 463 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
|
| 464 |
+
q,
|
| 465 |
+
k,
|
| 466 |
+
v,
|
| 467 |
+
dropout_p,
|
| 468 |
+
softmax_scale,
|
| 469 |
+
causal=causal,
|
| 470 |
+
window_size_left=window_size[0],
|
| 471 |
+
window_size_right=window_size[1],
|
| 472 |
+
softcap=softcap,
|
| 473 |
+
alibi_slopes=alibi_slopes,
|
| 474 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 475 |
+
)
|
| 476 |
+
ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
|
| 477 |
+
ctx.dropout_p = dropout_p
|
| 478 |
+
ctx.softmax_scale = softmax_scale
|
| 479 |
+
ctx.causal = causal
|
| 480 |
+
ctx.window_size = window_size
|
| 481 |
+
ctx.softcap = softcap
|
| 482 |
+
ctx.alibi_slopes = alibi_slopes
|
| 483 |
+
ctx.deterministic = deterministic
|
| 484 |
+
out = out_padded[..., :head_size_og]
|
| 485 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 486 |
+
|
| 487 |
+
@staticmethod
|
| 488 |
+
def backward(ctx, dout, *args):
|
| 489 |
+
q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
|
| 490 |
+
qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
|
| 491 |
+
dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
|
| 492 |
+
head_size_og = dout.size(3)
|
| 493 |
+
dout_padded = dout
|
| 494 |
+
if head_size_og % 8 != 0:
|
| 495 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 496 |
+
_wrapped_flash_attn_backward(
|
| 497 |
+
dout_padded,
|
| 498 |
+
q,
|
| 499 |
+
k,
|
| 500 |
+
v,
|
| 501 |
+
out,
|
| 502 |
+
softmax_lse,
|
| 503 |
+
dqkv[:, :, 0],
|
| 504 |
+
dqkv[:, :, 1],
|
| 505 |
+
dqkv[:, :, 2],
|
| 506 |
+
ctx.dropout_p,
|
| 507 |
+
ctx.softmax_scale,
|
| 508 |
+
ctx.causal,
|
| 509 |
+
ctx.window_size[0],
|
| 510 |
+
ctx.window_size[1],
|
| 511 |
+
ctx.softcap,
|
| 512 |
+
ctx.alibi_slopes,
|
| 513 |
+
ctx.deterministic,
|
| 514 |
+
rng_state=rng_state,
|
| 515 |
+
)
|
| 516 |
+
dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 517 |
+
return dqkv, None, None, None, None, None, None, None, None
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
|
| 521 |
+
@staticmethod
|
| 522 |
+
def forward(
|
| 523 |
+
ctx,
|
| 524 |
+
qkv,
|
| 525 |
+
cu_seqlens,
|
| 526 |
+
max_seqlen,
|
| 527 |
+
dropout_p,
|
| 528 |
+
softmax_scale,
|
| 529 |
+
causal,
|
| 530 |
+
window_size,
|
| 531 |
+
softcap,
|
| 532 |
+
alibi_slopes,
|
| 533 |
+
deterministic,
|
| 534 |
+
return_softmax,
|
| 535 |
+
):
|
| 536 |
+
if softmax_scale is None:
|
| 537 |
+
softmax_scale = qkv.shape[-1] ** (-0.5)
|
| 538 |
+
q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
|
| 539 |
+
head_size_og = q.size(2)
|
| 540 |
+
if head_size_og % 8 != 0:
|
| 541 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 542 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 543 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 544 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
|
| 545 |
+
q,
|
| 546 |
+
k,
|
| 547 |
+
v,
|
| 548 |
+
cu_seqlens,
|
| 549 |
+
cu_seqlens,
|
| 550 |
+
max_seqlen,
|
| 551 |
+
max_seqlen,
|
| 552 |
+
dropout_p,
|
| 553 |
+
softmax_scale,
|
| 554 |
+
causal=causal,
|
| 555 |
+
window_size_left=window_size[0],
|
| 556 |
+
window_size_right=window_size[1],
|
| 557 |
+
softcap=softcap,
|
| 558 |
+
alibi_slopes=alibi_slopes,
|
| 559 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 560 |
+
block_table=None,
|
| 561 |
+
)
|
| 562 |
+
ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
|
| 563 |
+
ctx.dropout_p = dropout_p
|
| 564 |
+
ctx.max_seqlen = max_seqlen
|
| 565 |
+
ctx.softmax_scale = softmax_scale
|
| 566 |
+
ctx.causal = causal
|
| 567 |
+
ctx.window_size = window_size
|
| 568 |
+
ctx.softcap = softcap
|
| 569 |
+
ctx.alibi_slopes = alibi_slopes
|
| 570 |
+
ctx.deterministic = deterministic
|
| 571 |
+
out = out_padded[..., :head_size_og]
|
| 572 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 573 |
+
|
| 574 |
+
@staticmethod
|
| 575 |
+
def backward(ctx, dout, *args):
|
| 576 |
+
q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
|
| 577 |
+
qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
|
| 578 |
+
dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
|
| 579 |
+
head_size_og = dout.size(2)
|
| 580 |
+
dout_padded = dout
|
| 581 |
+
if head_size_og % 8 != 0:
|
| 582 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 583 |
+
_wrapped_flash_attn_varlen_backward(
|
| 584 |
+
dout_padded,
|
| 585 |
+
q,
|
| 586 |
+
k,
|
| 587 |
+
v,
|
| 588 |
+
out,
|
| 589 |
+
softmax_lse,
|
| 590 |
+
dqkv[:, 0],
|
| 591 |
+
dqkv[:, 1],
|
| 592 |
+
dqkv[:, 2],
|
| 593 |
+
cu_seqlens,
|
| 594 |
+
cu_seqlens,
|
| 595 |
+
ctx.max_seqlen,
|
| 596 |
+
ctx.max_seqlen,
|
| 597 |
+
ctx.dropout_p,
|
| 598 |
+
ctx.softmax_scale,
|
| 599 |
+
ctx.causal,
|
| 600 |
+
ctx.window_size[0],
|
| 601 |
+
ctx.window_size[1],
|
| 602 |
+
ctx.softcap,
|
| 603 |
+
ctx.alibi_slopes,
|
| 604 |
+
ctx.deterministic,
|
| 605 |
+
rng_state=rng_state,
|
| 606 |
+
)
|
| 607 |
+
dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 608 |
+
return dqkv, None, None, None, None, None, None, None, None, None, None
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
class FlashAttnKVPackedFunc(torch.autograd.Function):
|
| 612 |
+
@staticmethod
|
| 613 |
+
def forward(
|
| 614 |
+
ctx,
|
| 615 |
+
q,
|
| 616 |
+
kv,
|
| 617 |
+
dropout_p,
|
| 618 |
+
softmax_scale,
|
| 619 |
+
causal,
|
| 620 |
+
window_size,
|
| 621 |
+
softcap,
|
| 622 |
+
alibi_slopes,
|
| 623 |
+
deterministic,
|
| 624 |
+
return_softmax,
|
| 625 |
+
):
|
| 626 |
+
if softmax_scale is None:
|
| 627 |
+
softmax_scale = q.shape[-1] ** (-0.5)
|
| 628 |
+
k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
|
| 629 |
+
head_size_og = q.size(3)
|
| 630 |
+
if head_size_og % 8 != 0:
|
| 631 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 632 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 633 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 634 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
|
| 635 |
+
q,
|
| 636 |
+
k,
|
| 637 |
+
v,
|
| 638 |
+
dropout_p,
|
| 639 |
+
softmax_scale,
|
| 640 |
+
causal=causal,
|
| 641 |
+
window_size_left=window_size[0],
|
| 642 |
+
window_size_right=window_size[1],
|
| 643 |
+
softcap=softcap,
|
| 644 |
+
alibi_slopes=alibi_slopes,
|
| 645 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 646 |
+
)
|
| 647 |
+
ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
|
| 648 |
+
ctx.dropout_p = dropout_p
|
| 649 |
+
ctx.softmax_scale = softmax_scale
|
| 650 |
+
ctx.causal = causal
|
| 651 |
+
ctx.window_size = window_size
|
| 652 |
+
ctx.softcap = softcap
|
| 653 |
+
ctx.alibi_slopes = alibi_slopes
|
| 654 |
+
ctx.deterministic = deterministic
|
| 655 |
+
out = out_padded[..., :head_size_og]
|
| 656 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 657 |
+
|
| 658 |
+
@staticmethod
|
| 659 |
+
def backward(ctx, dout, *args):
|
| 660 |
+
q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
|
| 661 |
+
dq = torch.empty_like(q)
|
| 662 |
+
kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
|
| 663 |
+
dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
|
| 664 |
+
head_size_og = dout.size(3)
|
| 665 |
+
dout_padded = dout
|
| 666 |
+
if head_size_og % 8 != 0:
|
| 667 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 668 |
+
_wrapped_flash_attn_backward(
|
| 669 |
+
dout_padded,
|
| 670 |
+
q,
|
| 671 |
+
k,
|
| 672 |
+
v,
|
| 673 |
+
out,
|
| 674 |
+
softmax_lse,
|
| 675 |
+
dq,
|
| 676 |
+
dkv[:, :, 0],
|
| 677 |
+
dkv[:, :, 1],
|
| 678 |
+
ctx.dropout_p,
|
| 679 |
+
ctx.softmax_scale,
|
| 680 |
+
ctx.causal,
|
| 681 |
+
ctx.window_size[0],
|
| 682 |
+
ctx.window_size[1],
|
| 683 |
+
ctx.softcap,
|
| 684 |
+
ctx.alibi_slopes,
|
| 685 |
+
ctx.deterministic,
|
| 686 |
+
rng_state=rng_state,
|
| 687 |
+
)
|
| 688 |
+
dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 689 |
+
dkv = dkv[..., : dout.shape[-1]]
|
| 690 |
+
return dq, dkv, None, None, None, None, None, None, None, None
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
|
| 694 |
+
@staticmethod
|
| 695 |
+
def forward(
|
| 696 |
+
ctx,
|
| 697 |
+
q,
|
| 698 |
+
kv,
|
| 699 |
+
cu_seqlens_q,
|
| 700 |
+
cu_seqlens_k,
|
| 701 |
+
max_seqlen_q,
|
| 702 |
+
max_seqlen_k,
|
| 703 |
+
dropout_p,
|
| 704 |
+
softmax_scale,
|
| 705 |
+
causal,
|
| 706 |
+
window_size,
|
| 707 |
+
softcap,
|
| 708 |
+
alibi_slopes,
|
| 709 |
+
deterministic,
|
| 710 |
+
return_softmax,
|
| 711 |
+
):
|
| 712 |
+
if softmax_scale is None:
|
| 713 |
+
softmax_scale = q.shape[-1] ** (-0.5)
|
| 714 |
+
k, v = kv[:, 0].detach(), kv[:, 1].detach()
|
| 715 |
+
head_size_og = q.size(2)
|
| 716 |
+
if head_size_og % 8 != 0:
|
| 717 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 718 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 719 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 720 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
|
| 721 |
+
q,
|
| 722 |
+
k,
|
| 723 |
+
v,
|
| 724 |
+
cu_seqlens_q,
|
| 725 |
+
cu_seqlens_k,
|
| 726 |
+
max_seqlen_q,
|
| 727 |
+
max_seqlen_k,
|
| 728 |
+
dropout_p,
|
| 729 |
+
softmax_scale,
|
| 730 |
+
causal=causal,
|
| 731 |
+
window_size_left=window_size[0],
|
| 732 |
+
window_size_right=window_size[1],
|
| 733 |
+
softcap=softcap,
|
| 734 |
+
alibi_slopes=alibi_slopes,
|
| 735 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 736 |
+
block_table=None,
|
| 737 |
+
)
|
| 738 |
+
ctx.save_for_backward(
|
| 739 |
+
q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
|
| 740 |
+
)
|
| 741 |
+
ctx.dropout_p = dropout_p
|
| 742 |
+
ctx.max_seqlen_q = max_seqlen_q
|
| 743 |
+
ctx.max_seqlen_k = max_seqlen_k
|
| 744 |
+
ctx.softmax_scale = softmax_scale
|
| 745 |
+
ctx.causal = causal
|
| 746 |
+
ctx.window_size = window_size
|
| 747 |
+
ctx.softcap = softcap
|
| 748 |
+
ctx.alibi_slopes = alibi_slopes
|
| 749 |
+
ctx.deterministic = deterministic
|
| 750 |
+
out = out_padded[..., :head_size_og]
|
| 751 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 752 |
+
|
| 753 |
+
@staticmethod
|
| 754 |
+
def backward(ctx, dout, *args):
|
| 755 |
+
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
|
| 756 |
+
dq = torch.empty_like(q)
|
| 757 |
+
kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
|
| 758 |
+
dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
|
| 759 |
+
head_size_og = dout.size(2)
|
| 760 |
+
dout_padded = dout
|
| 761 |
+
if head_size_og % 8 != 0:
|
| 762 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 763 |
+
_wrapped_flash_attn_varlen_backward(
|
| 764 |
+
dout_padded,
|
| 765 |
+
q,
|
| 766 |
+
k,
|
| 767 |
+
v,
|
| 768 |
+
out,
|
| 769 |
+
softmax_lse,
|
| 770 |
+
dq,
|
| 771 |
+
dkv[:, 0],
|
| 772 |
+
dkv[:, 1],
|
| 773 |
+
cu_seqlens_q,
|
| 774 |
+
cu_seqlens_k,
|
| 775 |
+
ctx.max_seqlen_q,
|
| 776 |
+
ctx.max_seqlen_k,
|
| 777 |
+
ctx.dropout_p,
|
| 778 |
+
ctx.softmax_scale,
|
| 779 |
+
ctx.causal,
|
| 780 |
+
ctx.window_size[0],
|
| 781 |
+
ctx.window_size[1],
|
| 782 |
+
ctx.softcap,
|
| 783 |
+
ctx.alibi_slopes,
|
| 784 |
+
ctx.deterministic,
|
| 785 |
+
rng_state=rng_state,
|
| 786 |
+
)
|
| 787 |
+
dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 788 |
+
dkv = dkv[..., : dout.shape[-1]]
|
| 789 |
+
return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
class FlashAttnFunc(torch.autograd.Function):
|
| 793 |
+
@staticmethod
|
| 794 |
+
def forward(
|
| 795 |
+
ctx,
|
| 796 |
+
q,
|
| 797 |
+
k,
|
| 798 |
+
v,
|
| 799 |
+
dropout_p,
|
| 800 |
+
softmax_scale,
|
| 801 |
+
causal,
|
| 802 |
+
window_size,
|
| 803 |
+
softcap,
|
| 804 |
+
alibi_slopes,
|
| 805 |
+
deterministic,
|
| 806 |
+
return_softmax,
|
| 807 |
+
):
|
| 808 |
+
if softmax_scale is None:
|
| 809 |
+
softmax_scale = q.shape[-1] ** (-0.5)
|
| 810 |
+
head_size_og = q.size(3)
|
| 811 |
+
if head_size_og % 8 != 0:
|
| 812 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 813 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 814 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 815 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
|
| 816 |
+
q,
|
| 817 |
+
k,
|
| 818 |
+
v,
|
| 819 |
+
dropout_p,
|
| 820 |
+
softmax_scale,
|
| 821 |
+
causal=causal,
|
| 822 |
+
window_size_left=window_size[0],
|
| 823 |
+
window_size_right=window_size[1],
|
| 824 |
+
softcap=softcap,
|
| 825 |
+
alibi_slopes=alibi_slopes,
|
| 826 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 827 |
+
)
|
| 828 |
+
ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
|
| 829 |
+
ctx.dropout_p = dropout_p
|
| 830 |
+
ctx.softmax_scale = softmax_scale
|
| 831 |
+
ctx.causal = causal
|
| 832 |
+
ctx.window_size = window_size
|
| 833 |
+
ctx.softcap = softcap
|
| 834 |
+
ctx.alibi_slopes = alibi_slopes
|
| 835 |
+
ctx.deterministic = deterministic
|
| 836 |
+
out = out_padded[..., :head_size_og]
|
| 837 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 838 |
+
|
| 839 |
+
@staticmethod
|
| 840 |
+
def backward(ctx, dout, *args):
|
| 841 |
+
q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
|
| 842 |
+
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
|
| 843 |
+
head_size_og = dout.size(3)
|
| 844 |
+
dout_padded = dout
|
| 845 |
+
if head_size_og % 8 != 0:
|
| 846 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 847 |
+
_wrapped_flash_attn_backward(
|
| 848 |
+
dout_padded,
|
| 849 |
+
q,
|
| 850 |
+
k,
|
| 851 |
+
v,
|
| 852 |
+
out,
|
| 853 |
+
softmax_lse,
|
| 854 |
+
dq,
|
| 855 |
+
dk,
|
| 856 |
+
dv,
|
| 857 |
+
ctx.dropout_p,
|
| 858 |
+
ctx.softmax_scale,
|
| 859 |
+
ctx.causal,
|
| 860 |
+
ctx.window_size[0],
|
| 861 |
+
ctx.window_size[1],
|
| 862 |
+
ctx.softcap,
|
| 863 |
+
ctx.alibi_slopes,
|
| 864 |
+
ctx.deterministic,
|
| 865 |
+
rng_state=rng_state,
|
| 866 |
+
)
|
| 867 |
+
dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 868 |
+
dk = dk[..., : dout.shape[-1]]
|
| 869 |
+
dv = dv[..., : dout.shape[-1]]
|
| 870 |
+
return dq, dk, dv, None, None, None, None, None, None, None, None
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
class FlashAttnVarlenFunc(torch.autograd.Function):
|
| 874 |
+
@staticmethod
|
| 875 |
+
def forward(
|
| 876 |
+
ctx,
|
| 877 |
+
q,
|
| 878 |
+
k,
|
| 879 |
+
v,
|
| 880 |
+
cu_seqlens_q,
|
| 881 |
+
cu_seqlens_k,
|
| 882 |
+
max_seqlen_q,
|
| 883 |
+
max_seqlen_k,
|
| 884 |
+
dropout_p,
|
| 885 |
+
softmax_scale,
|
| 886 |
+
causal,
|
| 887 |
+
window_size,
|
| 888 |
+
softcap,
|
| 889 |
+
alibi_slopes,
|
| 890 |
+
deterministic,
|
| 891 |
+
return_softmax,
|
| 892 |
+
block_table,
|
| 893 |
+
):
|
| 894 |
+
if softmax_scale is None:
|
| 895 |
+
softmax_scale = q.shape[-1] ** (-0.5)
|
| 896 |
+
head_size_og = q.size(2)
|
| 897 |
+
if head_size_og % 8 != 0:
|
| 898 |
+
q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
|
| 899 |
+
k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
|
| 900 |
+
v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
|
| 901 |
+
out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
|
| 902 |
+
q,
|
| 903 |
+
k,
|
| 904 |
+
v,
|
| 905 |
+
cu_seqlens_q,
|
| 906 |
+
cu_seqlens_k,
|
| 907 |
+
max_seqlen_q,
|
| 908 |
+
max_seqlen_k,
|
| 909 |
+
dropout_p,
|
| 910 |
+
softmax_scale,
|
| 911 |
+
causal=causal,
|
| 912 |
+
window_size_left=window_size[0],
|
| 913 |
+
window_size_right=window_size[1],
|
| 914 |
+
softcap=softcap,
|
| 915 |
+
alibi_slopes=alibi_slopes,
|
| 916 |
+
return_softmax=return_softmax and dropout_p > 0,
|
| 917 |
+
block_table=block_table,
|
| 918 |
+
)
|
| 919 |
+
ctx.save_for_backward(
|
| 920 |
+
q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
|
| 921 |
+
)
|
| 922 |
+
ctx.dropout_p = dropout_p
|
| 923 |
+
ctx.max_seqlen_q = max_seqlen_q
|
| 924 |
+
ctx.max_seqlen_k = max_seqlen_k
|
| 925 |
+
ctx.softmax_scale = softmax_scale
|
| 926 |
+
ctx.causal = causal
|
| 927 |
+
ctx.window_size = window_size
|
| 928 |
+
ctx.softcap = softcap
|
| 929 |
+
ctx.alibi_slopes = alibi_slopes
|
| 930 |
+
ctx.deterministic = deterministic
|
| 931 |
+
out = out_padded[..., :head_size_og]
|
| 932 |
+
return out if not return_softmax else (out, softmax_lse, S_dmask)
|
| 933 |
+
|
| 934 |
+
@staticmethod
|
| 935 |
+
def backward(ctx, dout, *args):
|
| 936 |
+
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
|
| 937 |
+
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
|
| 938 |
+
head_size_og = dout.size(2)
|
| 939 |
+
dout_padded = dout
|
| 940 |
+
if head_size_og % 8 != 0:
|
| 941 |
+
dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
|
| 942 |
+
_wrapped_flash_attn_varlen_backward(
|
| 943 |
+
dout_padded,
|
| 944 |
+
q,
|
| 945 |
+
k,
|
| 946 |
+
v,
|
| 947 |
+
out,
|
| 948 |
+
softmax_lse,
|
| 949 |
+
dq,
|
| 950 |
+
dk,
|
| 951 |
+
dv,
|
| 952 |
+
cu_seqlens_q,
|
| 953 |
+
cu_seqlens_k,
|
| 954 |
+
ctx.max_seqlen_q,
|
| 955 |
+
ctx.max_seqlen_k,
|
| 956 |
+
ctx.dropout_p,
|
| 957 |
+
ctx.softmax_scale,
|
| 958 |
+
ctx.causal,
|
| 959 |
+
ctx.window_size[0],
|
| 960 |
+
ctx.window_size[1],
|
| 961 |
+
ctx.softcap,
|
| 962 |
+
ctx.alibi_slopes,
|
| 963 |
+
ctx.deterministic,
|
| 964 |
+
rng_state=rng_state,
|
| 965 |
+
)
|
| 966 |
+
dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
|
| 967 |
+
dk = dk[..., : dout.shape[-1]]
|
| 968 |
+
dv = dv[..., : dout.shape[-1]]
|
| 969 |
+
return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None
|
| 970 |
+
|
| 971 |
+
|
| 972 |
+
def flash_attn_qkvpacked_func(
|
| 973 |
+
qkv,
|
| 974 |
+
dropout_p=0.0,
|
| 975 |
+
softmax_scale=None,
|
| 976 |
+
causal=False,
|
| 977 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 978 |
+
softcap=0.0, # <=0.0 means deactivate
|
| 979 |
+
alibi_slopes=None,
|
| 980 |
+
deterministic=False,
|
| 981 |
+
return_attn_probs=False,
|
| 982 |
+
):
|
| 983 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 984 |
+
If Q, K, V are already stacked into 1 tensor, this function will be faster than
|
| 985 |
+
calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
|
| 986 |
+
of the gradients of Q, K, V.
|
| 987 |
+
For multi-query and grouped-query attention (MQA/GQA), please see
|
| 988 |
+
flash_attn_kvpacked_func and flash_attn_func.
|
| 989 |
+
|
| 990 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 991 |
+
will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
|
| 992 |
+
|
| 993 |
+
Arguments:
|
| 994 |
+
qkv: (batch_size, seqlen, 3, nheads, headdim)
|
| 995 |
+
dropout_p: float. Dropout probability.
|
| 996 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 997 |
+
Default to 1 / sqrt(headdim).
|
| 998 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 999 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1000 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1001 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
|
| 1002 |
+
the attention score of query i and key j.
|
| 1003 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1004 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1005 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1006 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1007 |
+
(they might not have the right scaling).
|
| 1008 |
+
Return:
|
| 1009 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 1010 |
+
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
|
| 1011 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1012 |
+
normalization factor).
|
| 1013 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1014 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1015 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1016 |
+
"""
|
| 1017 |
+
return FlashAttnQKVPackedFunc.apply(
|
| 1018 |
+
qkv,
|
| 1019 |
+
dropout_p,
|
| 1020 |
+
softmax_scale,
|
| 1021 |
+
causal,
|
| 1022 |
+
window_size,
|
| 1023 |
+
softcap,
|
| 1024 |
+
alibi_slopes,
|
| 1025 |
+
deterministic,
|
| 1026 |
+
return_attn_probs,
|
| 1027 |
+
)
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
def flash_attn_kvpacked_func(
|
| 1031 |
+
q,
|
| 1032 |
+
kv,
|
| 1033 |
+
dropout_p=0.0,
|
| 1034 |
+
softmax_scale=None,
|
| 1035 |
+
causal=False,
|
| 1036 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1037 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1038 |
+
alibi_slopes=None,
|
| 1039 |
+
deterministic=False,
|
| 1040 |
+
return_attn_probs=False,
|
| 1041 |
+
):
|
| 1042 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 1043 |
+
If K, V are already stacked into 1 tensor, this function will be faster than
|
| 1044 |
+
calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
|
| 1045 |
+
of the gradients of K, V.
|
| 1046 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 1047 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 1048 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 1049 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 1050 |
+
|
| 1051 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 1052 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 1053 |
+
1 1 1 1 0
|
| 1054 |
+
1 1 1 1 1
|
| 1055 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 1056 |
+
0 0
|
| 1057 |
+
0 0
|
| 1058 |
+
0 0
|
| 1059 |
+
1 0
|
| 1060 |
+
1 1
|
| 1061 |
+
If the row of the mask is all zero, the output will be zero.
|
| 1062 |
+
|
| 1063 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1064 |
+
will only attend to keys between
|
| 1065 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 1066 |
+
|
| 1067 |
+
Arguments:
|
| 1068 |
+
q: (batch_size, seqlen, nheads, headdim)
|
| 1069 |
+
kv: (batch_size, seqlen, 2, nheads_k, headdim)
|
| 1070 |
+
dropout_p: float. Dropout probability.
|
| 1071 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1072 |
+
Default to 1 / sqrt(headdim).
|
| 1073 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1074 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1075 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1076 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 1077 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 1078 |
+
is added to the attention score of query i and key j.
|
| 1079 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1080 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1081 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1082 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1083 |
+
(they might not have the right scaling).
|
| 1084 |
+
Return:
|
| 1085 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 1086 |
+
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
|
| 1087 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1088 |
+
normalization factor).
|
| 1089 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1090 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1091 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1092 |
+
"""
|
| 1093 |
+
return FlashAttnKVPackedFunc.apply(
|
| 1094 |
+
q,
|
| 1095 |
+
kv,
|
| 1096 |
+
dropout_p,
|
| 1097 |
+
softmax_scale,
|
| 1098 |
+
causal,
|
| 1099 |
+
window_size,
|
| 1100 |
+
softcap,
|
| 1101 |
+
alibi_slopes,
|
| 1102 |
+
deterministic,
|
| 1103 |
+
return_attn_probs,
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
def flash_attn_func(
|
| 1108 |
+
q,
|
| 1109 |
+
k,
|
| 1110 |
+
v,
|
| 1111 |
+
dropout_p=0.0,
|
| 1112 |
+
softmax_scale=None,
|
| 1113 |
+
causal=False,
|
| 1114 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1115 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1116 |
+
alibi_slopes=None,
|
| 1117 |
+
deterministic=False,
|
| 1118 |
+
return_attn_probs=False,
|
| 1119 |
+
):
|
| 1120 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 1121 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 1122 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 1123 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 1124 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 1125 |
+
|
| 1126 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 1127 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 1128 |
+
1 1 1 1 0
|
| 1129 |
+
1 1 1 1 1
|
| 1130 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 1131 |
+
0 0
|
| 1132 |
+
0 0
|
| 1133 |
+
0 0
|
| 1134 |
+
1 0
|
| 1135 |
+
1 1
|
| 1136 |
+
If the row of the mask is all zero, the output will be zero.
|
| 1137 |
+
|
| 1138 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1139 |
+
will only attend to keys between
|
| 1140 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 1141 |
+
|
| 1142 |
+
Arguments:
|
| 1143 |
+
q: (batch_size, seqlen, nheads, headdim)
|
| 1144 |
+
k: (batch_size, seqlen, nheads_k, headdim)
|
| 1145 |
+
v: (batch_size, seqlen, nheads_k, headdim)
|
| 1146 |
+
dropout_p: float. Dropout probability.
|
| 1147 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1148 |
+
Default to 1 / sqrt(headdim).
|
| 1149 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1150 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1151 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 1152 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 1153 |
+
is added to the attention score of query i and key j.
|
| 1154 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1155 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1156 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1157 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1158 |
+
(they might not have the right scaling).
|
| 1159 |
+
Return:
|
| 1160 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 1161 |
+
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
|
| 1162 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1163 |
+
normalization factor).
|
| 1164 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1165 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1166 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1167 |
+
"""
|
| 1168 |
+
return FlashAttnFunc.apply(
|
| 1169 |
+
q,
|
| 1170 |
+
k,
|
| 1171 |
+
v,
|
| 1172 |
+
dropout_p,
|
| 1173 |
+
softmax_scale,
|
| 1174 |
+
causal,
|
| 1175 |
+
window_size,
|
| 1176 |
+
softcap,
|
| 1177 |
+
alibi_slopes,
|
| 1178 |
+
deterministic,
|
| 1179 |
+
return_attn_probs,
|
| 1180 |
+
)
|
| 1181 |
+
|
| 1182 |
+
|
| 1183 |
+
def flash_attn_varlen_qkvpacked_func(
|
| 1184 |
+
qkv,
|
| 1185 |
+
cu_seqlens,
|
| 1186 |
+
max_seqlen,
|
| 1187 |
+
dropout_p=0.0,
|
| 1188 |
+
softmax_scale=None,
|
| 1189 |
+
causal=False,
|
| 1190 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1191 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1192 |
+
alibi_slopes=None,
|
| 1193 |
+
deterministic=False,
|
| 1194 |
+
return_attn_probs=False,
|
| 1195 |
+
):
|
| 1196 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 1197 |
+
If Q, K, V are already stacked into 1 tensor, this function will be faster than
|
| 1198 |
+
calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
|
| 1199 |
+
of the gradients of Q, K, V.
|
| 1200 |
+
For multi-query and grouped-query attention (MQA/GQA), please see
|
| 1201 |
+
flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
|
| 1202 |
+
|
| 1203 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1204 |
+
will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
|
| 1205 |
+
|
| 1206 |
+
Arguments:
|
| 1207 |
+
qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
|
| 1208 |
+
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 1209 |
+
of the sequences in the batch, used to index into qkv.
|
| 1210 |
+
max_seqlen: int. Maximum sequence length in the batch.
|
| 1211 |
+
dropout_p: float. Dropout probability.
|
| 1212 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1213 |
+
Default to 1 / sqrt(headdim).
|
| 1214 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1215 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1216 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1217 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
|
| 1218 |
+
is added to the attention score of query i and key j.
|
| 1219 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1220 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1221 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1222 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1223 |
+
(they might not have the right scaling).
|
| 1224 |
+
Return:
|
| 1225 |
+
out: (total, nheads, headdim).
|
| 1226 |
+
softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
|
| 1227 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1228 |
+
normalization factor).
|
| 1229 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1230 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1231 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1232 |
+
"""
|
| 1233 |
+
return FlashAttnVarlenQKVPackedFunc.apply(
|
| 1234 |
+
qkv,
|
| 1235 |
+
cu_seqlens,
|
| 1236 |
+
max_seqlen,
|
| 1237 |
+
dropout_p,
|
| 1238 |
+
softmax_scale,
|
| 1239 |
+
causal,
|
| 1240 |
+
window_size,
|
| 1241 |
+
softcap,
|
| 1242 |
+
alibi_slopes,
|
| 1243 |
+
deterministic,
|
| 1244 |
+
return_attn_probs,
|
| 1245 |
+
)
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
def flash_attn_varlen_kvpacked_func(
|
| 1249 |
+
q,
|
| 1250 |
+
kv,
|
| 1251 |
+
cu_seqlens_q,
|
| 1252 |
+
cu_seqlens_k,
|
| 1253 |
+
max_seqlen_q,
|
| 1254 |
+
max_seqlen_k,
|
| 1255 |
+
dropout_p=0.0,
|
| 1256 |
+
softmax_scale=None,
|
| 1257 |
+
causal=False,
|
| 1258 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1259 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1260 |
+
alibi_slopes=None,
|
| 1261 |
+
deterministic=False,
|
| 1262 |
+
return_attn_probs=False,
|
| 1263 |
+
):
|
| 1264 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 1265 |
+
If K, V are already stacked into 1 tensor, this function will be faster than
|
| 1266 |
+
calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
|
| 1267 |
+
of the gradients of K, V.
|
| 1268 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 1269 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 1270 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 1271 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 1272 |
+
|
| 1273 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 1274 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 1275 |
+
1 1 1 1 0
|
| 1276 |
+
1 1 1 1 1
|
| 1277 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 1278 |
+
0 0
|
| 1279 |
+
0 0
|
| 1280 |
+
0 0
|
| 1281 |
+
1 0
|
| 1282 |
+
1 1
|
| 1283 |
+
If the row of the mask is all zero, the output will be zero.
|
| 1284 |
+
|
| 1285 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1286 |
+
will only attend to keys between
|
| 1287 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 1288 |
+
|
| 1289 |
+
Arguments:
|
| 1290 |
+
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
|
| 1291 |
+
kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
|
| 1292 |
+
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 1293 |
+
of the sequences in the batch, used to index into q.
|
| 1294 |
+
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 1295 |
+
of the sequences in the batch, used to index into kv.
|
| 1296 |
+
max_seqlen_q: int. Maximum query sequence length in the batch.
|
| 1297 |
+
max_seqlen_k: int. Maximum key sequence length in the batch.
|
| 1298 |
+
dropout_p: float. Dropout probability.
|
| 1299 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1300 |
+
Default to 1 / sqrt(headdim).
|
| 1301 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1302 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1303 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1304 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 1305 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 1306 |
+
is added to the attention score of query i and key j.
|
| 1307 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1308 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1309 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1310 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1311 |
+
(they might not have the right scaling).
|
| 1312 |
+
Return:
|
| 1313 |
+
out: (total, nheads, headdim).
|
| 1314 |
+
softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
|
| 1315 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1316 |
+
normalization factor).
|
| 1317 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1318 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1319 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1320 |
+
"""
|
| 1321 |
+
return FlashAttnVarlenKVPackedFunc.apply(
|
| 1322 |
+
q,
|
| 1323 |
+
kv,
|
| 1324 |
+
cu_seqlens_q,
|
| 1325 |
+
cu_seqlens_k,
|
| 1326 |
+
max_seqlen_q,
|
| 1327 |
+
max_seqlen_k,
|
| 1328 |
+
dropout_p,
|
| 1329 |
+
softmax_scale,
|
| 1330 |
+
causal,
|
| 1331 |
+
window_size,
|
| 1332 |
+
softcap,
|
| 1333 |
+
alibi_slopes,
|
| 1334 |
+
deterministic,
|
| 1335 |
+
return_attn_probs,
|
| 1336 |
+
)
|
| 1337 |
+
|
| 1338 |
+
|
| 1339 |
+
def flash_attn_varlen_func(
|
| 1340 |
+
q,
|
| 1341 |
+
k,
|
| 1342 |
+
v,
|
| 1343 |
+
cu_seqlens_q,
|
| 1344 |
+
cu_seqlens_k,
|
| 1345 |
+
max_seqlen_q,
|
| 1346 |
+
max_seqlen_k,
|
| 1347 |
+
dropout_p=0.0,
|
| 1348 |
+
softmax_scale=None,
|
| 1349 |
+
causal=False,
|
| 1350 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1351 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1352 |
+
alibi_slopes=None,
|
| 1353 |
+
deterministic=False,
|
| 1354 |
+
return_attn_probs=False,
|
| 1355 |
+
block_table=None,
|
| 1356 |
+
):
|
| 1357 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 1358 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
|
| 1359 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 1360 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 1361 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 1362 |
+
|
| 1363 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 1364 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 1365 |
+
1 1 1 1 0
|
| 1366 |
+
1 1 1 1 1
|
| 1367 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 1368 |
+
0 0
|
| 1369 |
+
0 0
|
| 1370 |
+
0 0
|
| 1371 |
+
1 0
|
| 1372 |
+
1 1
|
| 1373 |
+
If the row of the mask is all zero, the output will be zero.
|
| 1374 |
+
|
| 1375 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1376 |
+
will only attend to keys between
|
| 1377 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 1378 |
+
|
| 1379 |
+
Arguments:
|
| 1380 |
+
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
|
| 1381 |
+
k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
|
| 1382 |
+
v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
|
| 1383 |
+
cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 1384 |
+
of the sequences in the batch, used to index into q.
|
| 1385 |
+
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 1386 |
+
of the sequences in the batch, used to index into kv.
|
| 1387 |
+
max_seqlen_q: int. Maximum query sequence length in the batch.
|
| 1388 |
+
max_seqlen_k: int. Maximum key sequence length in the batch.
|
| 1389 |
+
dropout_p: float. Dropout probability.
|
| 1390 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1391 |
+
Default to 1 / sqrt(headdim).
|
| 1392 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1393 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1394 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1395 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 1396 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 1397 |
+
is added to the attention score of query i and key j.
|
| 1398 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 1399 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 1400 |
+
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
|
| 1401 |
+
testing only. The returned probabilities are not guaranteed to be correct
|
| 1402 |
+
(they might not have the right scaling).
|
| 1403 |
+
Return:
|
| 1404 |
+
out: (total, nheads, headdim).
|
| 1405 |
+
softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
|
| 1406 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1407 |
+
normalization factor).
|
| 1408 |
+
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
|
| 1409 |
+
The output of softmax (possibly with different scaling). It also encodes the dropout
|
| 1410 |
+
pattern (negative means that location was dropped, nonnegative means it was kept).
|
| 1411 |
+
"""
|
| 1412 |
+
return FlashAttnVarlenFunc.apply(
|
| 1413 |
+
q,
|
| 1414 |
+
k,
|
| 1415 |
+
v,
|
| 1416 |
+
cu_seqlens_q,
|
| 1417 |
+
cu_seqlens_k,
|
| 1418 |
+
max_seqlen_q,
|
| 1419 |
+
max_seqlen_k,
|
| 1420 |
+
dropout_p,
|
| 1421 |
+
softmax_scale,
|
| 1422 |
+
causal,
|
| 1423 |
+
window_size,
|
| 1424 |
+
softcap,
|
| 1425 |
+
alibi_slopes,
|
| 1426 |
+
deterministic,
|
| 1427 |
+
return_attn_probs,
|
| 1428 |
+
block_table,
|
| 1429 |
+
)
|
| 1430 |
+
|
| 1431 |
+
|
| 1432 |
+
def flash_attn_with_kvcache(
|
| 1433 |
+
q,
|
| 1434 |
+
k_cache,
|
| 1435 |
+
v_cache,
|
| 1436 |
+
k=None,
|
| 1437 |
+
v=None,
|
| 1438 |
+
rotary_cos=None,
|
| 1439 |
+
rotary_sin=None,
|
| 1440 |
+
cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
|
| 1441 |
+
cache_batch_idx: Optional[torch.Tensor] = None,
|
| 1442 |
+
cache_leftpad: Optional[torch.Tensor] = None,
|
| 1443 |
+
block_table: Optional[torch.Tensor] = None,
|
| 1444 |
+
softmax_scale=None,
|
| 1445 |
+
causal=False,
|
| 1446 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 1447 |
+
softcap=0.0, # 0.0 means deactivated
|
| 1448 |
+
rotary_interleaved=True,
|
| 1449 |
+
alibi_slopes=None,
|
| 1450 |
+
num_splits=0,
|
| 1451 |
+
return_softmax_lse=False,
|
| 1452 |
+
):
|
| 1453 |
+
"""
|
| 1454 |
+
If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
|
| 1455 |
+
k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
|
| 1456 |
+
the previous step, and update them with the new keys/values from the current step, and do
|
| 1457 |
+
attention with the updated cache, all in 1 kernel.
|
| 1458 |
+
|
| 1459 |
+
If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
|
| 1460 |
+
For example, the KV cache could be pre-allocated with the max sequence length, and you can use
|
| 1461 |
+
cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
|
| 1462 |
+
|
| 1463 |
+
Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
|
| 1464 |
+
rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
|
| 1465 |
+
If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
|
| 1466 |
+
and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
|
| 1467 |
+
If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
|
| 1468 |
+
indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
|
| 1469 |
+
|
| 1470 |
+
See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
|
| 1471 |
+
|
| 1472 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 1473 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 1474 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 1475 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 1476 |
+
|
| 1477 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 1478 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 1479 |
+
1 1 1 1 0
|
| 1480 |
+
1 1 1 1 1
|
| 1481 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 1482 |
+
0 0
|
| 1483 |
+
0 0
|
| 1484 |
+
0 0
|
| 1485 |
+
1 0
|
| 1486 |
+
1 1
|
| 1487 |
+
If the row of the mask is all zero, the output will be zero.
|
| 1488 |
+
|
| 1489 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 1490 |
+
will only attend to keys between
|
| 1491 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 1492 |
+
|
| 1493 |
+
Note: Does not support backward pass.
|
| 1494 |
+
|
| 1495 |
+
Arguments:
|
| 1496 |
+
q: (batch_size, seqlen, nheads, headdim)
|
| 1497 |
+
k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
|
| 1498 |
+
or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
|
| 1499 |
+
page_block_size must be a multiple of 256.
|
| 1500 |
+
v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
|
| 1501 |
+
or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
|
| 1502 |
+
k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
|
| 1503 |
+
k with k_cache, starting at the indices specified by cache_seqlens.
|
| 1504 |
+
v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
|
| 1505 |
+
rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
|
| 1506 |
+
to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
|
| 1507 |
+
rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
|
| 1508 |
+
cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
|
| 1509 |
+
KV cache.
|
| 1510 |
+
cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
|
| 1511 |
+
If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
|
| 1512 |
+
If the indices are not distinct, and k and v are provided, the values updated in the cache
|
| 1513 |
+
might come from any of the duplicate indices.
|
| 1514 |
+
cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
|
| 1515 |
+
block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
|
| 1516 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 1517 |
+
Default to 1 / sqrt(headdim).
|
| 1518 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 1519 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 1520 |
+
softcap: float. Anything > 0 activates softcapping attention.
|
| 1521 |
+
rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
|
| 1522 |
+
If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
|
| 1523 |
+
rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
|
| 1524 |
+
(i.e. GPT-NeoX style).
|
| 1525 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 1526 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 1527 |
+
is added to the attention score of query i and key j.
|
| 1528 |
+
num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
|
| 1529 |
+
If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
|
| 1530 |
+
to automatically determine the number of splits.
|
| 1531 |
+
Don't change this unless you know what you are doing.
|
| 1532 |
+
return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
|
| 1533 |
+
|
| 1534 |
+
Return:
|
| 1535 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 1536 |
+
softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
|
| 1537 |
+
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
|
| 1538 |
+
normalization factor).
|
| 1539 |
+
"""
|
| 1540 |
+
assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
|
| 1541 |
+
assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
|
| 1542 |
+
q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
|
| 1543 |
+
if softmax_scale is None:
|
| 1544 |
+
softmax_scale = q.shape[-1] ** (-0.5)
|
| 1545 |
+
if cache_seqlens is not None and isinstance(cache_seqlens, int):
|
| 1546 |
+
cache_seqlens = torch.full(
|
| 1547 |
+
(k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
|
| 1548 |
+
)
|
| 1549 |
+
cache_seqlens = maybe_contiguous(cache_seqlens)
|
| 1550 |
+
cache_batch_idx = maybe_contiguous(cache_batch_idx)
|
| 1551 |
+
block_table = maybe_contiguous(block_table)
|
| 1552 |
+
out, softmax_lse = flash_attn_gpu.fwd_kvcache(
|
| 1553 |
+
q,
|
| 1554 |
+
k_cache,
|
| 1555 |
+
v_cache,
|
| 1556 |
+
k,
|
| 1557 |
+
v,
|
| 1558 |
+
cache_seqlens,
|
| 1559 |
+
rotary_cos,
|
| 1560 |
+
rotary_sin,
|
| 1561 |
+
cache_batch_idx,
|
| 1562 |
+
cache_leftpad,
|
| 1563 |
+
block_table,
|
| 1564 |
+
alibi_slopes,
|
| 1565 |
+
None,
|
| 1566 |
+
softmax_scale,
|
| 1567 |
+
causal,
|
| 1568 |
+
window_size[0],
|
| 1569 |
+
window_size[1],
|
| 1570 |
+
softcap,
|
| 1571 |
+
rotary_interleaved,
|
| 1572 |
+
num_splits,
|
| 1573 |
+
)
|
| 1574 |
+
return (out, softmax_lse) if return_softmax_lse else out
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton.py
ADDED
|
@@ -0,0 +1,1160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
*Experimental* implementation of FlashAttention in Triton.
|
| 3 |
+
Tested with triton==2.0.0.dev20221202.
|
| 4 |
+
Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
|
| 5 |
+
other than 64:
|
| 6 |
+
https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
|
| 7 |
+
We'll update this implementation with the new Triton backend once this is fixed.
|
| 8 |
+
|
| 9 |
+
We use the FlashAttention implementation from Phil Tillet a starting point.
|
| 10 |
+
https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
|
| 11 |
+
|
| 12 |
+
Changes:
|
| 13 |
+
- Implement both causal and non-causal attention.
|
| 14 |
+
- Implement both self-attention and cross-attention.
|
| 15 |
+
- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
|
| 16 |
+
- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
|
| 17 |
+
- Support attention bias.
|
| 18 |
+
- Speed up the forward pass a bit, and only store the LSE instead of m and l.
|
| 19 |
+
- Make the backward for d=128 much faster by reducing register spilling.
|
| 20 |
+
- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
|
| 21 |
+
small batch size * nheads.
|
| 22 |
+
|
| 23 |
+
Caution:
|
| 24 |
+
- This is an *experimental* implementation. The forward pass should be quite robust but
|
| 25 |
+
I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
|
| 26 |
+
- This implementation has only been tested on A100.
|
| 27 |
+
- If you plan to use headdim other than 64 and 128, you should test for race conditions
|
| 28 |
+
(due to the Triton compiler), as done in tests/test_flash_attn.py
|
| 29 |
+
"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
|
| 30 |
+
for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
|
| 31 |
+
that there are none left for other head dimensions.
|
| 32 |
+
|
| 33 |
+
Differences between this Triton version and the CUDA version:
|
| 34 |
+
- Triton version doesn't support dropout.
|
| 35 |
+
- Triton forward is generally faster than CUDA forward, while Triton backward is
|
| 36 |
+
generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
|
| 37 |
+
than CUDA forward + backward.
|
| 38 |
+
- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
|
| 39 |
+
- Triton version supports attention bias, while CUDA version doesn't.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
import math
|
| 43 |
+
|
| 44 |
+
import torch
|
| 45 |
+
import triton
|
| 46 |
+
import triton.language as tl
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128
|
| 50 |
+
# @triton.autotune(
|
| 51 |
+
# configs=[
|
| 52 |
+
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1),
|
| 53 |
+
# # This config has a race condition when EVEN_M == False, disabling it for now.
|
| 54 |
+
# # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1),
|
| 55 |
+
# ],
|
| 56 |
+
# key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM']
|
| 57 |
+
# )
|
| 58 |
+
@triton.heuristics(
|
| 59 |
+
{
|
| 60 |
+
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
|
| 61 |
+
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
|
| 62 |
+
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
|
| 63 |
+
}
|
| 64 |
+
)
|
| 65 |
+
@triton.jit
|
| 66 |
+
def _fwd_kernel(
|
| 67 |
+
Q,
|
| 68 |
+
K,
|
| 69 |
+
V,
|
| 70 |
+
Bias,
|
| 71 |
+
Out,
|
| 72 |
+
Lse,
|
| 73 |
+
TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
|
| 74 |
+
softmax_scale,
|
| 75 |
+
stride_qb,
|
| 76 |
+
stride_qh,
|
| 77 |
+
stride_qm,
|
| 78 |
+
stride_kb,
|
| 79 |
+
stride_kh,
|
| 80 |
+
stride_kn,
|
| 81 |
+
stride_vb,
|
| 82 |
+
stride_vh,
|
| 83 |
+
stride_vn,
|
| 84 |
+
stride_bb,
|
| 85 |
+
stride_bh,
|
| 86 |
+
stride_bm,
|
| 87 |
+
stride_ob,
|
| 88 |
+
stride_oh,
|
| 89 |
+
stride_om,
|
| 90 |
+
nheads,
|
| 91 |
+
seqlen_q,
|
| 92 |
+
seqlen_k,
|
| 93 |
+
seqlen_q_rounded,
|
| 94 |
+
headdim,
|
| 95 |
+
CACHE_KEY_SEQLEN_Q,
|
| 96 |
+
CACHE_KEY_SEQLEN_K,
|
| 97 |
+
BIAS_TYPE: tl.constexpr,
|
| 98 |
+
IS_CAUSAL: tl.constexpr,
|
| 99 |
+
BLOCK_HEADDIM: tl.constexpr,
|
| 100 |
+
EVEN_M: tl.constexpr,
|
| 101 |
+
EVEN_N: tl.constexpr,
|
| 102 |
+
EVEN_HEADDIM: tl.constexpr,
|
| 103 |
+
BLOCK_M: tl.constexpr,
|
| 104 |
+
BLOCK_N: tl.constexpr,
|
| 105 |
+
):
|
| 106 |
+
start_m = tl.program_id(0)
|
| 107 |
+
off_hb = tl.program_id(1)
|
| 108 |
+
off_b = off_hb // nheads
|
| 109 |
+
off_h = off_hb % nheads
|
| 110 |
+
# off_b = tl.program_id(1)
|
| 111 |
+
# off_h = tl.program_id(2)
|
| 112 |
+
# off_hb = off_b * nheads + off_h
|
| 113 |
+
# initialize offsets
|
| 114 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 115 |
+
offs_n = tl.arange(0, BLOCK_N)
|
| 116 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 117 |
+
# Initialize pointers to Q, K, V
|
| 118 |
+
# Adding parenthesis around indexing might use int32 math instead of int64 math?
|
| 119 |
+
# https://github.com/openai/triton/issues/741
|
| 120 |
+
# I'm seeing a tiny bit of difference (5-7us)
|
| 121 |
+
q_ptrs = (
|
| 122 |
+
Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
|
| 123 |
+
)
|
| 124 |
+
k_ptrs = (
|
| 125 |
+
K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
|
| 126 |
+
)
|
| 127 |
+
v_ptrs = (
|
| 128 |
+
V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
|
| 129 |
+
)
|
| 130 |
+
if BIAS_TYPE == "vector":
|
| 131 |
+
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
|
| 132 |
+
elif BIAS_TYPE == "matrix":
|
| 133 |
+
b_ptrs = (
|
| 134 |
+
Bias
|
| 135 |
+
+ off_b * stride_bb
|
| 136 |
+
+ off_h * stride_bh
|
| 137 |
+
+ (offs_m[:, None] * stride_bm + offs_n[None, :])
|
| 138 |
+
)
|
| 139 |
+
# initialize pointer to m and l
|
| 140 |
+
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
|
| 141 |
+
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
|
| 142 |
+
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
|
| 143 |
+
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
|
| 144 |
+
# load q: it will stay in SRAM throughout
|
| 145 |
+
# [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call
|
| 146 |
+
# tl.load(q_ptrs), we get the wrong output!
|
| 147 |
+
if EVEN_M & EVEN_N:
|
| 148 |
+
if EVEN_HEADDIM:
|
| 149 |
+
q = tl.load(q_ptrs)
|
| 150 |
+
else:
|
| 151 |
+
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 152 |
+
else:
|
| 153 |
+
if EVEN_HEADDIM:
|
| 154 |
+
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
|
| 155 |
+
else:
|
| 156 |
+
q = tl.load(
|
| 157 |
+
q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0
|
| 158 |
+
)
|
| 159 |
+
# loop over k, v and update accumulator
|
| 160 |
+
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
|
| 161 |
+
for start_n in range(0, end_n, BLOCK_N):
|
| 162 |
+
start_n = tl.multiple_of(start_n, BLOCK_N)
|
| 163 |
+
# -- compute qk ----
|
| 164 |
+
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
|
| 165 |
+
if EVEN_HEADDIM:
|
| 166 |
+
k = tl.load(k_ptrs + start_n * stride_kn)
|
| 167 |
+
else:
|
| 168 |
+
k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0)
|
| 169 |
+
else:
|
| 170 |
+
if EVEN_HEADDIM:
|
| 171 |
+
k = tl.load(
|
| 172 |
+
k_ptrs + start_n * stride_kn,
|
| 173 |
+
mask=(start_n + offs_n)[:, None] < seqlen_k,
|
| 174 |
+
other=0.0,
|
| 175 |
+
)
|
| 176 |
+
else:
|
| 177 |
+
k = tl.load(
|
| 178 |
+
k_ptrs + start_n * stride_kn,
|
| 179 |
+
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
|
| 180 |
+
other=0.0,
|
| 181 |
+
)
|
| 182 |
+
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
|
| 183 |
+
qk += tl.dot(q, k, trans_b=True)
|
| 184 |
+
# Trying to combine the two masks seem to make the result wrong
|
| 185 |
+
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
|
| 186 |
+
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
|
| 187 |
+
if IS_CAUSAL:
|
| 188 |
+
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf"))
|
| 189 |
+
if BIAS_TYPE != "none":
|
| 190 |
+
if BIAS_TYPE == "vector":
|
| 191 |
+
if EVEN_N:
|
| 192 |
+
bias = tl.load(b_ptrs + start_n).to(tl.float32)
|
| 193 |
+
else:
|
| 194 |
+
bias = tl.load(
|
| 195 |
+
b_ptrs + start_n, mask=(start_n + offs_n) < seqlen_k, other=0.0
|
| 196 |
+
).to(tl.float32)
|
| 197 |
+
bias = bias[None, :]
|
| 198 |
+
elif BIAS_TYPE == "matrix":
|
| 199 |
+
if EVEN_M & EVEN_N:
|
| 200 |
+
bias = tl.load(b_ptrs + start_n).to(tl.float32)
|
| 201 |
+
else:
|
| 202 |
+
bias = tl.load(
|
| 203 |
+
b_ptrs + start_n,
|
| 204 |
+
mask=(offs_m[:, None] < seqlen_q)
|
| 205 |
+
& ((start_n + offs_n)[None, :] < seqlen_k),
|
| 206 |
+
other=0.0,
|
| 207 |
+
).to(tl.float32)
|
| 208 |
+
# Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler
|
| 209 |
+
# can then fuse the mult and add into an fma instruction. But if we have bias we need to
|
| 210 |
+
# to multiply with softmax_scale here.
|
| 211 |
+
qk = qk * softmax_scale + bias
|
| 212 |
+
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
|
| 213 |
+
p = tl.exp(qk - m_ij[:, None])
|
| 214 |
+
else:
|
| 215 |
+
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
|
| 216 |
+
p = tl.exp(qk * softmax_scale - m_ij[:, None])
|
| 217 |
+
l_ij = tl.sum(p, 1)
|
| 218 |
+
|
| 219 |
+
# scale acc_o
|
| 220 |
+
acc_o_scale = tl.exp(m_i - m_ij)
|
| 221 |
+
|
| 222 |
+
# # -- update output accumulator --
|
| 223 |
+
# BUG: have to store and immediately load
|
| 224 |
+
tl.store(t_ptrs, acc_o_scale)
|
| 225 |
+
acc_o_scale = tl.load(t_ptrs)
|
| 226 |
+
acc_o = acc_o * acc_o_scale[:, None]
|
| 227 |
+
# update acc_o
|
| 228 |
+
if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition
|
| 229 |
+
if EVEN_HEADDIM:
|
| 230 |
+
v = tl.load(v_ptrs + start_n * stride_vn)
|
| 231 |
+
else:
|
| 232 |
+
v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0)
|
| 233 |
+
else:
|
| 234 |
+
if EVEN_HEADDIM:
|
| 235 |
+
v = tl.load(
|
| 236 |
+
v_ptrs + start_n * stride_vn,
|
| 237 |
+
mask=(start_n + offs_n)[:, None] < seqlen_k,
|
| 238 |
+
other=0.0,
|
| 239 |
+
)
|
| 240 |
+
else:
|
| 241 |
+
v = tl.load(
|
| 242 |
+
v_ptrs + start_n * stride_vn,
|
| 243 |
+
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
|
| 244 |
+
other=0.0,
|
| 245 |
+
)
|
| 246 |
+
p = p.to(v.dtype)
|
| 247 |
+
acc_o += tl.dot(p, v)
|
| 248 |
+
|
| 249 |
+
# -- update statistics
|
| 250 |
+
m_i = m_ij
|
| 251 |
+
l_i_new = tl.exp(lse_i - m_ij) + l_ij
|
| 252 |
+
lse_i = m_ij + tl.log(l_i_new)
|
| 253 |
+
|
| 254 |
+
o_scale = tl.exp(m_i - lse_i)
|
| 255 |
+
# BUG: have to store and immediately load
|
| 256 |
+
tl.store(t_ptrs, o_scale)
|
| 257 |
+
o_scale = tl.load(t_ptrs)
|
| 258 |
+
acc_o = acc_o * o_scale[:, None]
|
| 259 |
+
# rematerialize offsets to save registers
|
| 260 |
+
start_m = tl.program_id(0)
|
| 261 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 262 |
+
# write back l and m
|
| 263 |
+
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
|
| 264 |
+
tl.store(lse_ptrs, lse_i)
|
| 265 |
+
# initialize pointers to output
|
| 266 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 267 |
+
out_ptrs = (
|
| 268 |
+
Out
|
| 269 |
+
+ off_b * stride_ob
|
| 270 |
+
+ off_h * stride_oh
|
| 271 |
+
+ (offs_m[:, None] * stride_om + offs_d[None, :])
|
| 272 |
+
)
|
| 273 |
+
if EVEN_M:
|
| 274 |
+
if EVEN_HEADDIM:
|
| 275 |
+
tl.store(out_ptrs, acc_o)
|
| 276 |
+
else:
|
| 277 |
+
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
|
| 278 |
+
else:
|
| 279 |
+
if EVEN_HEADDIM:
|
| 280 |
+
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
|
| 281 |
+
else:
|
| 282 |
+
tl.store(
|
| 283 |
+
out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim)
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@triton.jit
|
| 288 |
+
def _bwd_preprocess_do_o_dot(
|
| 289 |
+
Out,
|
| 290 |
+
DO,
|
| 291 |
+
Delta,
|
| 292 |
+
stride_ob,
|
| 293 |
+
stride_oh,
|
| 294 |
+
stride_om,
|
| 295 |
+
stride_dob,
|
| 296 |
+
stride_doh,
|
| 297 |
+
stride_dom,
|
| 298 |
+
nheads,
|
| 299 |
+
seqlen_q,
|
| 300 |
+
seqlen_q_rounded,
|
| 301 |
+
headdim,
|
| 302 |
+
BLOCK_M: tl.constexpr,
|
| 303 |
+
BLOCK_HEADDIM: tl.constexpr,
|
| 304 |
+
):
|
| 305 |
+
start_m = tl.program_id(0)
|
| 306 |
+
off_hb = tl.program_id(1)
|
| 307 |
+
off_b = off_hb // nheads
|
| 308 |
+
off_h = off_hb % nheads
|
| 309 |
+
# initialize offsets
|
| 310 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 311 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 312 |
+
# load
|
| 313 |
+
o = tl.load(
|
| 314 |
+
Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :],
|
| 315 |
+
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 316 |
+
other=0.0,
|
| 317 |
+
).to(tl.float32)
|
| 318 |
+
do = tl.load(
|
| 319 |
+
DO
|
| 320 |
+
+ off_b * stride_dob
|
| 321 |
+
+ off_h * stride_doh
|
| 322 |
+
+ offs_m[:, None] * stride_dom
|
| 323 |
+
+ offs_d[None, :],
|
| 324 |
+
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 325 |
+
other=0.0,
|
| 326 |
+
).to(tl.float32)
|
| 327 |
+
delta = tl.sum(o * do, axis=1)
|
| 328 |
+
# write-back
|
| 329 |
+
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
@triton.jit
|
| 333 |
+
def _bwd_store_dk_dv(
|
| 334 |
+
dk_ptrs,
|
| 335 |
+
dv_ptrs,
|
| 336 |
+
dk,
|
| 337 |
+
dv,
|
| 338 |
+
offs_n,
|
| 339 |
+
offs_d,
|
| 340 |
+
seqlen_k,
|
| 341 |
+
headdim,
|
| 342 |
+
EVEN_M: tl.constexpr,
|
| 343 |
+
EVEN_N: tl.constexpr,
|
| 344 |
+
EVEN_HEADDIM: tl.constexpr,
|
| 345 |
+
):
|
| 346 |
+
# [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False,
|
| 347 |
+
# if we just call tl.store(dv_ptrs), there's a race condition
|
| 348 |
+
if EVEN_N & EVEN_M:
|
| 349 |
+
if EVEN_HEADDIM:
|
| 350 |
+
tl.store(dv_ptrs, dv)
|
| 351 |
+
tl.store(dk_ptrs, dk)
|
| 352 |
+
else:
|
| 353 |
+
tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
|
| 354 |
+
tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
|
| 355 |
+
else:
|
| 356 |
+
if EVEN_HEADDIM:
|
| 357 |
+
tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
|
| 358 |
+
tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
|
| 359 |
+
else:
|
| 360 |
+
tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
|
| 361 |
+
tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim))
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
@triton.jit
|
| 365 |
+
def _bwd_kernel_one_col_block(
|
| 366 |
+
start_n,
|
| 367 |
+
Q,
|
| 368 |
+
K,
|
| 369 |
+
V,
|
| 370 |
+
Bias,
|
| 371 |
+
DO,
|
| 372 |
+
DQ,
|
| 373 |
+
DK,
|
| 374 |
+
DV,
|
| 375 |
+
LSE,
|
| 376 |
+
D,
|
| 377 |
+
softmax_scale,
|
| 378 |
+
stride_qm,
|
| 379 |
+
stride_kn,
|
| 380 |
+
stride_vn,
|
| 381 |
+
stride_bm,
|
| 382 |
+
stride_dom,
|
| 383 |
+
stride_dqm,
|
| 384 |
+
stride_dkn,
|
| 385 |
+
stride_dvn,
|
| 386 |
+
seqlen_q,
|
| 387 |
+
seqlen_k,
|
| 388 |
+
headdim,
|
| 389 |
+
ATOMIC_ADD: tl.constexpr,
|
| 390 |
+
BIAS_TYPE: tl.constexpr,
|
| 391 |
+
IS_CAUSAL: tl.constexpr,
|
| 392 |
+
BLOCK_HEADDIM: tl.constexpr,
|
| 393 |
+
EVEN_M: tl.constexpr,
|
| 394 |
+
EVEN_N: tl.constexpr,
|
| 395 |
+
EVEN_HEADDIM: tl.constexpr,
|
| 396 |
+
BLOCK_M: tl.constexpr,
|
| 397 |
+
BLOCK_N: tl.constexpr,
|
| 398 |
+
):
|
| 399 |
+
# We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N)
|
| 400 |
+
begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M
|
| 401 |
+
# initialize row/col offsets
|
| 402 |
+
offs_qm = begin_m + tl.arange(0, BLOCK_M)
|
| 403 |
+
offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
|
| 404 |
+
offs_m = tl.arange(0, BLOCK_M)
|
| 405 |
+
offs_d = tl.arange(0, BLOCK_HEADDIM)
|
| 406 |
+
# initialize pointers to value-like data
|
| 407 |
+
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
|
| 408 |
+
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
|
| 409 |
+
v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
|
| 410 |
+
do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
|
| 411 |
+
dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
|
| 412 |
+
if BIAS_TYPE == "vector":
|
| 413 |
+
b_ptrs = Bias + offs_n
|
| 414 |
+
elif BIAS_TYPE == "matrix":
|
| 415 |
+
b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
|
| 416 |
+
# initialize dv and dk
|
| 417 |
+
dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
|
| 418 |
+
dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
|
| 419 |
+
# There seems to be some problem with Triton pipelining that makes results wrong for
|
| 420 |
+
# headdim=64, seqlen=(113, 255), bias_type='matrix'. In this case the for loop
|
| 421 |
+
# may have zero step, and pipelining with the bias matrix could screw it up.
|
| 422 |
+
# So we just exit early.
|
| 423 |
+
if begin_m >= seqlen_q:
|
| 424 |
+
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
|
| 425 |
+
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
|
| 426 |
+
_bwd_store_dk_dv(
|
| 427 |
+
dk_ptrs,
|
| 428 |
+
dv_ptrs,
|
| 429 |
+
dk,
|
| 430 |
+
dv,
|
| 431 |
+
offs_n,
|
| 432 |
+
offs_d,
|
| 433 |
+
seqlen_k,
|
| 434 |
+
headdim,
|
| 435 |
+
EVEN_M=EVEN_M,
|
| 436 |
+
EVEN_N=EVEN_N,
|
| 437 |
+
EVEN_HEADDIM=EVEN_HEADDIM,
|
| 438 |
+
)
|
| 439 |
+
return
|
| 440 |
+
# k and v stay in SRAM throughout
|
| 441 |
+
# [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False,
|
| 442 |
+
# if we just call tl.load(k_ptrs), we get the wrong output!
|
| 443 |
+
if EVEN_N & EVEN_M:
|
| 444 |
+
if EVEN_HEADDIM:
|
| 445 |
+
k = tl.load(k_ptrs)
|
| 446 |
+
v = tl.load(v_ptrs)
|
| 447 |
+
else:
|
| 448 |
+
k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 449 |
+
v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 450 |
+
else:
|
| 451 |
+
if EVEN_HEADDIM:
|
| 452 |
+
k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
|
| 453 |
+
v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
|
| 454 |
+
else:
|
| 455 |
+
k = tl.load(
|
| 456 |
+
k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0
|
| 457 |
+
)
|
| 458 |
+
v = tl.load(
|
| 459 |
+
v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0
|
| 460 |
+
)
|
| 461 |
+
# loop over rows
|
| 462 |
+
num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
|
| 463 |
+
for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
|
| 464 |
+
start_m = tl.multiple_of(start_m, BLOCK_M)
|
| 465 |
+
offs_m_curr = start_m + offs_m
|
| 466 |
+
# load q, k, v, do on-chip
|
| 467 |
+
# Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117)
|
| 468 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 469 |
+
q = tl.load(q_ptrs)
|
| 470 |
+
else:
|
| 471 |
+
if EVEN_HEADDIM:
|
| 472 |
+
q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
|
| 473 |
+
else:
|
| 474 |
+
q = tl.load(
|
| 475 |
+
q_ptrs,
|
| 476 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 477 |
+
other=0.0,
|
| 478 |
+
)
|
| 479 |
+
# recompute p = softmax(qk, dim=-1).T
|
| 480 |
+
qk = tl.dot(q, k, trans_b=True)
|
| 481 |
+
# Trying to combine the two masks seem to make the result wrong
|
| 482 |
+
if not EVEN_N: # Need to mask out otherwise the softmax is wrong
|
| 483 |
+
qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf"))
|
| 484 |
+
if IS_CAUSAL:
|
| 485 |
+
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
|
| 486 |
+
if BIAS_TYPE != "none":
|
| 487 |
+
tl.debug_barrier() # Race condition otherwise
|
| 488 |
+
if BIAS_TYPE == "vector":
|
| 489 |
+
if EVEN_N:
|
| 490 |
+
bias = tl.load(b_ptrs).to(tl.float32)
|
| 491 |
+
else:
|
| 492 |
+
bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32)
|
| 493 |
+
bias = bias[None, :]
|
| 494 |
+
elif BIAS_TYPE == "matrix":
|
| 495 |
+
if EVEN_M & EVEN_N:
|
| 496 |
+
bias = tl.load(b_ptrs).to(tl.float32)
|
| 497 |
+
else:
|
| 498 |
+
bias = tl.load(
|
| 499 |
+
b_ptrs,
|
| 500 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k),
|
| 501 |
+
other=0.0,
|
| 502 |
+
).to(tl.float32)
|
| 503 |
+
qk = qk * softmax_scale + bias
|
| 504 |
+
# There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong.
|
| 505 |
+
# Also wrong for headdim=64.
|
| 506 |
+
if not (EVEN_M & EVEN_HEADDIM):
|
| 507 |
+
tl.debug_barrier()
|
| 508 |
+
lse_i = tl.load(LSE + offs_m_curr)
|
| 509 |
+
if BIAS_TYPE == "none":
|
| 510 |
+
p = tl.exp(qk * softmax_scale - lse_i[:, None])
|
| 511 |
+
else:
|
| 512 |
+
p = tl.exp(qk - lse_i[:, None])
|
| 513 |
+
# compute dv
|
| 514 |
+
# [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call
|
| 515 |
+
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs
|
| 516 |
+
# in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512,
|
| 517 |
+
# the output is correct.
|
| 518 |
+
if EVEN_M & EVEN_HEADDIM:
|
| 519 |
+
do = tl.load(do_ptrs)
|
| 520 |
+
else:
|
| 521 |
+
# [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask.
|
| 522 |
+
do = tl.load(
|
| 523 |
+
do_ptrs,
|
| 524 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 525 |
+
other=0.0,
|
| 526 |
+
)
|
| 527 |
+
# if EVEN_M:
|
| 528 |
+
# if EVEN_HEADDIM:
|
| 529 |
+
# do = tl.load(do_ptrs)
|
| 530 |
+
# else:
|
| 531 |
+
# do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
|
| 532 |
+
# else:
|
| 533 |
+
# if EVEN_HEADDIM:
|
| 534 |
+
# do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
|
| 535 |
+
# else:
|
| 536 |
+
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
|
| 537 |
+
# & (offs_d[None, :] < headdim), other=0.0)
|
| 538 |
+
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
|
| 539 |
+
# compute dp = dot(v, do)
|
| 540 |
+
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
|
| 541 |
+
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
|
| 542 |
+
# Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False
|
| 543 |
+
if not (EVEN_M & EVEN_HEADDIM):
|
| 544 |
+
tl.debug_barrier()
|
| 545 |
+
dp = tl.dot(do, v, trans_b=True)
|
| 546 |
+
# There's a race condition for headdim=48
|
| 547 |
+
if not EVEN_HEADDIM:
|
| 548 |
+
tl.debug_barrier()
|
| 549 |
+
# compute ds = p * (dp - delta[:, None])
|
| 550 |
+
# Putting the subtraction after the dp matmul (instead of before) is slightly faster
|
| 551 |
+
Di = tl.load(D + offs_m_curr)
|
| 552 |
+
# Converting ds to q.dtype here reduces register pressure and makes it much faster
|
| 553 |
+
# for BLOCK_HEADDIM=128
|
| 554 |
+
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
|
| 555 |
+
# compute dk = dot(ds.T, q)
|
| 556 |
+
dk += tl.dot(ds, q, trans_a=True)
|
| 557 |
+
# compute dq
|
| 558 |
+
if not (
|
| 559 |
+
EVEN_M & EVEN_HEADDIM
|
| 560 |
+
): # Otherewise there's a race condition when BIAS_TYPE='matrix'
|
| 561 |
+
tl.debug_barrier()
|
| 562 |
+
if not ATOMIC_ADD:
|
| 563 |
+
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
|
| 564 |
+
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
|
| 565 |
+
dq += tl.dot(ds, k)
|
| 566 |
+
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
|
| 567 |
+
else:
|
| 568 |
+
if EVEN_HEADDIM:
|
| 569 |
+
dq = tl.load(
|
| 570 |
+
dq_ptrs,
|
| 571 |
+
mask=offs_m_curr[:, None] < seqlen_q,
|
| 572 |
+
other=0.0,
|
| 573 |
+
eviction_policy="evict_last",
|
| 574 |
+
)
|
| 575 |
+
dq += tl.dot(ds, k)
|
| 576 |
+
tl.store(
|
| 577 |
+
dq_ptrs,
|
| 578 |
+
dq,
|
| 579 |
+
mask=offs_m_curr[:, None] < seqlen_q,
|
| 580 |
+
eviction_policy="evict_last",
|
| 581 |
+
)
|
| 582 |
+
else:
|
| 583 |
+
dq = tl.load(
|
| 584 |
+
dq_ptrs,
|
| 585 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 586 |
+
other=0.0,
|
| 587 |
+
eviction_policy="evict_last",
|
| 588 |
+
)
|
| 589 |
+
dq += tl.dot(ds, k)
|
| 590 |
+
tl.store(
|
| 591 |
+
dq_ptrs,
|
| 592 |
+
dq,
|
| 593 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 594 |
+
eviction_policy="evict_last",
|
| 595 |
+
)
|
| 596 |
+
else: # If we're parallelizing across the seqlen_k dimension
|
| 597 |
+
dq = tl.dot(ds, k)
|
| 598 |
+
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
|
| 599 |
+
tl.atomic_add(dq_ptrs, dq)
|
| 600 |
+
else:
|
| 601 |
+
if EVEN_HEADDIM:
|
| 602 |
+
tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
|
| 603 |
+
else:
|
| 604 |
+
tl.atomic_add(
|
| 605 |
+
dq_ptrs,
|
| 606 |
+
dq,
|
| 607 |
+
mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
|
| 608 |
+
)
|
| 609 |
+
# increment pointers
|
| 610 |
+
dq_ptrs += BLOCK_M * stride_dqm
|
| 611 |
+
q_ptrs += BLOCK_M * stride_qm
|
| 612 |
+
do_ptrs += BLOCK_M * stride_dom
|
| 613 |
+
if BIAS_TYPE == "matrix":
|
| 614 |
+
b_ptrs += BLOCK_M * stride_bm
|
| 615 |
+
# write-back
|
| 616 |
+
dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
|
| 617 |
+
dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
|
| 618 |
+
_bwd_store_dk_dv(
|
| 619 |
+
dk_ptrs,
|
| 620 |
+
dv_ptrs,
|
| 621 |
+
dk,
|
| 622 |
+
dv,
|
| 623 |
+
offs_n,
|
| 624 |
+
offs_d,
|
| 625 |
+
seqlen_k,
|
| 626 |
+
headdim,
|
| 627 |
+
EVEN_M=EVEN_M,
|
| 628 |
+
EVEN_N=EVEN_N,
|
| 629 |
+
EVEN_HEADDIM=EVEN_HEADDIM,
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def init_to_zero(name):
|
| 634 |
+
return lambda nargs: nargs[name].zero_()
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
@triton.autotune(
|
| 638 |
+
configs=[
|
| 639 |
+
triton.Config(
|
| 640 |
+
{"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False},
|
| 641 |
+
num_warps=8,
|
| 642 |
+
num_stages=1,
|
| 643 |
+
pre_hook=init_to_zero("DQ"),
|
| 644 |
+
),
|
| 645 |
+
triton.Config(
|
| 646 |
+
{"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True},
|
| 647 |
+
num_warps=8,
|
| 648 |
+
num_stages=1,
|
| 649 |
+
pre_hook=init_to_zero("DQ"),
|
| 650 |
+
),
|
| 651 |
+
# Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now
|
| 652 |
+
# # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4*
|
| 653 |
+
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
|
| 654 |
+
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')),
|
| 655 |
+
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
|
| 656 |
+
# triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')),
|
| 657 |
+
],
|
| 658 |
+
key=["CACHE_KEY_SEQLEN_Q", "CACHE_KEY_SEQLEN_K", "BIAS_TYPE", "IS_CAUSAL", "BLOCK_HEADDIM"],
|
| 659 |
+
)
|
| 660 |
+
@triton.heuristics(
|
| 661 |
+
{
|
| 662 |
+
"EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
|
| 663 |
+
"EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
|
| 664 |
+
"EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
|
| 665 |
+
}
|
| 666 |
+
)
|
| 667 |
+
@triton.jit
|
| 668 |
+
def _bwd_kernel(
|
| 669 |
+
Q,
|
| 670 |
+
K,
|
| 671 |
+
V,
|
| 672 |
+
Bias,
|
| 673 |
+
DO,
|
| 674 |
+
DQ,
|
| 675 |
+
DK,
|
| 676 |
+
DV,
|
| 677 |
+
LSE,
|
| 678 |
+
D,
|
| 679 |
+
softmax_scale,
|
| 680 |
+
stride_qb,
|
| 681 |
+
stride_qh,
|
| 682 |
+
stride_qm,
|
| 683 |
+
stride_kb,
|
| 684 |
+
stride_kh,
|
| 685 |
+
stride_kn,
|
| 686 |
+
stride_vb,
|
| 687 |
+
stride_vh,
|
| 688 |
+
stride_vn,
|
| 689 |
+
stride_bb,
|
| 690 |
+
stride_bh,
|
| 691 |
+
stride_bm,
|
| 692 |
+
stride_dob,
|
| 693 |
+
stride_doh,
|
| 694 |
+
stride_dom,
|
| 695 |
+
stride_dqb,
|
| 696 |
+
stride_dqh,
|
| 697 |
+
stride_dqm,
|
| 698 |
+
stride_dkb,
|
| 699 |
+
stride_dkh,
|
| 700 |
+
stride_dkn,
|
| 701 |
+
stride_dvb,
|
| 702 |
+
stride_dvh,
|
| 703 |
+
stride_dvn,
|
| 704 |
+
nheads,
|
| 705 |
+
seqlen_q,
|
| 706 |
+
seqlen_k,
|
| 707 |
+
seqlen_q_rounded,
|
| 708 |
+
headdim,
|
| 709 |
+
CACHE_KEY_SEQLEN_Q,
|
| 710 |
+
CACHE_KEY_SEQLEN_K,
|
| 711 |
+
BIAS_TYPE: tl.constexpr,
|
| 712 |
+
IS_CAUSAL: tl.constexpr,
|
| 713 |
+
BLOCK_HEADDIM: tl.constexpr,
|
| 714 |
+
SEQUENCE_PARALLEL: tl.constexpr,
|
| 715 |
+
EVEN_M: tl.constexpr,
|
| 716 |
+
EVEN_N: tl.constexpr,
|
| 717 |
+
EVEN_HEADDIM: tl.constexpr,
|
| 718 |
+
BLOCK_M: tl.constexpr,
|
| 719 |
+
BLOCK_N: tl.constexpr,
|
| 720 |
+
):
|
| 721 |
+
off_hb = tl.program_id(1)
|
| 722 |
+
off_b = off_hb // nheads
|
| 723 |
+
off_h = off_hb % nheads
|
| 724 |
+
# offset pointers for batch/head
|
| 725 |
+
Q += off_b * stride_qb + off_h * stride_qh
|
| 726 |
+
K += off_b * stride_kb + off_h * stride_kh
|
| 727 |
+
V += off_b * stride_vb + off_h * stride_vh
|
| 728 |
+
DO += off_b * stride_dob + off_h * stride_doh
|
| 729 |
+
DQ += off_b * stride_dqb + off_h * stride_dqh
|
| 730 |
+
DK += off_b * stride_dkb + off_h * stride_dkh
|
| 731 |
+
DV += off_b * stride_dvb + off_h * stride_dvh
|
| 732 |
+
if BIAS_TYPE != "none":
|
| 733 |
+
Bias += off_b * stride_bb + off_h * stride_bh
|
| 734 |
+
# pointer to row-wise quantities in value-like data
|
| 735 |
+
D += off_hb * seqlen_q_rounded
|
| 736 |
+
LSE += off_hb * seqlen_q_rounded
|
| 737 |
+
if not SEQUENCE_PARALLEL:
|
| 738 |
+
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
|
| 739 |
+
for start_n in range(0, num_block_n):
|
| 740 |
+
_bwd_kernel_one_col_block(
|
| 741 |
+
start_n,
|
| 742 |
+
Q,
|
| 743 |
+
K,
|
| 744 |
+
V,
|
| 745 |
+
Bias,
|
| 746 |
+
DO,
|
| 747 |
+
DQ,
|
| 748 |
+
DK,
|
| 749 |
+
DV,
|
| 750 |
+
LSE,
|
| 751 |
+
D,
|
| 752 |
+
softmax_scale,
|
| 753 |
+
stride_qm,
|
| 754 |
+
stride_kn,
|
| 755 |
+
stride_vn,
|
| 756 |
+
stride_bm,
|
| 757 |
+
stride_dom,
|
| 758 |
+
stride_dqm,
|
| 759 |
+
stride_dkn,
|
| 760 |
+
stride_dvn,
|
| 761 |
+
seqlen_q,
|
| 762 |
+
seqlen_k,
|
| 763 |
+
headdim,
|
| 764 |
+
ATOMIC_ADD=False,
|
| 765 |
+
BIAS_TYPE=BIAS_TYPE,
|
| 766 |
+
IS_CAUSAL=IS_CAUSAL,
|
| 767 |
+
BLOCK_HEADDIM=BLOCK_HEADDIM,
|
| 768 |
+
EVEN_M=EVEN_M,
|
| 769 |
+
EVEN_N=EVEN_N,
|
| 770 |
+
EVEN_HEADDIM=EVEN_HEADDIM,
|
| 771 |
+
BLOCK_M=BLOCK_M,
|
| 772 |
+
BLOCK_N=BLOCK_N,
|
| 773 |
+
)
|
| 774 |
+
else:
|
| 775 |
+
start_n = tl.program_id(0)
|
| 776 |
+
_bwd_kernel_one_col_block(
|
| 777 |
+
start_n,
|
| 778 |
+
Q,
|
| 779 |
+
K,
|
| 780 |
+
V,
|
| 781 |
+
Bias,
|
| 782 |
+
DO,
|
| 783 |
+
DQ,
|
| 784 |
+
DK,
|
| 785 |
+
DV,
|
| 786 |
+
LSE,
|
| 787 |
+
D,
|
| 788 |
+
softmax_scale,
|
| 789 |
+
stride_qm,
|
| 790 |
+
stride_kn,
|
| 791 |
+
stride_vn,
|
| 792 |
+
stride_bm,
|
| 793 |
+
stride_dom,
|
| 794 |
+
stride_dqm,
|
| 795 |
+
stride_dkn,
|
| 796 |
+
stride_dvn,
|
| 797 |
+
seqlen_q,
|
| 798 |
+
seqlen_k,
|
| 799 |
+
headdim,
|
| 800 |
+
ATOMIC_ADD=True,
|
| 801 |
+
BIAS_TYPE=BIAS_TYPE,
|
| 802 |
+
IS_CAUSAL=IS_CAUSAL,
|
| 803 |
+
BLOCK_HEADDIM=BLOCK_HEADDIM,
|
| 804 |
+
EVEN_M=EVEN_M,
|
| 805 |
+
EVEN_N=EVEN_N,
|
| 806 |
+
EVEN_HEADDIM=EVEN_HEADDIM,
|
| 807 |
+
BLOCK_M=BLOCK_M,
|
| 808 |
+
BLOCK_N=BLOCK_N,
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
|
| 813 |
+
# shape constraints
|
| 814 |
+
batch, seqlen_q, nheads, d = q.shape
|
| 815 |
+
_, seqlen_k, _, _ = k.shape
|
| 816 |
+
assert k.shape == (batch, seqlen_k, nheads, d)
|
| 817 |
+
assert v.shape == (batch, seqlen_k, nheads, d)
|
| 818 |
+
assert d <= 128, "FlashAttention only support head dimensions up to 128"
|
| 819 |
+
assert q.dtype == k.dtype == v.dtype, "All tensors must have the same type"
|
| 820 |
+
assert q.dtype in [torch.float16, torch.bfloat16], "Only support fp16 and bf16"
|
| 821 |
+
assert q.is_cuda and k.is_cuda and v.is_cuda
|
| 822 |
+
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
|
| 823 |
+
|
| 824 |
+
has_bias = bias is not None
|
| 825 |
+
bias_type = "none"
|
| 826 |
+
if has_bias:
|
| 827 |
+
assert bias.dtype in [q.dtype, torch.float]
|
| 828 |
+
assert bias.is_cuda
|
| 829 |
+
assert bias.dim() == 4
|
| 830 |
+
if bias.stride(-1) != 1:
|
| 831 |
+
bias = bias.contiguous()
|
| 832 |
+
if bias.shape[2:] == (1, seqlen_k):
|
| 833 |
+
bias_type = "vector"
|
| 834 |
+
elif bias.shape[2:] == (seqlen_q, seqlen_k):
|
| 835 |
+
bias_type = "matrix"
|
| 836 |
+
else:
|
| 837 |
+
raise RuntimeError(
|
| 838 |
+
"Last 2 dimensions of bias must be (1, seqlen_k)" " or (seqlen_q, seqlen_k)"
|
| 839 |
+
)
|
| 840 |
+
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
|
| 841 |
+
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
|
| 842 |
+
|
| 843 |
+
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
|
| 844 |
+
lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
|
| 845 |
+
tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
|
| 846 |
+
o = torch.empty_like(q)
|
| 847 |
+
|
| 848 |
+
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
|
| 849 |
+
BLOCK = 128
|
| 850 |
+
num_warps = 4 if d <= 64 else 8
|
| 851 |
+
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
|
| 852 |
+
_fwd_kernel[grid](
|
| 853 |
+
q,
|
| 854 |
+
k,
|
| 855 |
+
v,
|
| 856 |
+
bias,
|
| 857 |
+
o,
|
| 858 |
+
lse,
|
| 859 |
+
tmp,
|
| 860 |
+
softmax_scale,
|
| 861 |
+
q.stride(0),
|
| 862 |
+
q.stride(2),
|
| 863 |
+
q.stride(1),
|
| 864 |
+
k.stride(0),
|
| 865 |
+
k.stride(2),
|
| 866 |
+
k.stride(1),
|
| 867 |
+
v.stride(0),
|
| 868 |
+
v.stride(2),
|
| 869 |
+
v.stride(1),
|
| 870 |
+
*bias_strides,
|
| 871 |
+
o.stride(0),
|
| 872 |
+
o.stride(2),
|
| 873 |
+
o.stride(1),
|
| 874 |
+
nheads,
|
| 875 |
+
seqlen_q,
|
| 876 |
+
seqlen_k,
|
| 877 |
+
seqlen_q_rounded,
|
| 878 |
+
d,
|
| 879 |
+
seqlen_q // 32,
|
| 880 |
+
seqlen_k // 32, # key for triton cache (limit number of compilations)
|
| 881 |
+
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
|
| 882 |
+
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
|
| 883 |
+
bias_type,
|
| 884 |
+
causal,
|
| 885 |
+
BLOCK_HEADDIM,
|
| 886 |
+
BLOCK_M=BLOCK,
|
| 887 |
+
BLOCK_N=BLOCK,
|
| 888 |
+
num_warps=num_warps,
|
| 889 |
+
num_stages=1,
|
| 890 |
+
)
|
| 891 |
+
return o, lse, softmax_scale # softmax_scale could have been updated
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
def _flash_attn_backward(
|
| 895 |
+
do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None
|
| 896 |
+
):
|
| 897 |
+
# Make sure that the last dimension is contiguous
|
| 898 |
+
if do.stride(-1) != 1:
|
| 899 |
+
do = do.contiguous()
|
| 900 |
+
batch, seqlen_q, nheads, d = q.shape
|
| 901 |
+
_, seqlen_k, _, _ = k.shape
|
| 902 |
+
# assert d in {16, 32, 64, 128}
|
| 903 |
+
assert d <= 128
|
| 904 |
+
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
|
| 905 |
+
assert lse.shape == (batch, nheads, seqlen_q_rounded)
|
| 906 |
+
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
|
| 907 |
+
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
|
| 908 |
+
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
|
| 909 |
+
# dq_accum = torch.zeros_like(q, dtype=torch.float32)
|
| 910 |
+
dq_accum = torch.empty_like(q, dtype=torch.float32)
|
| 911 |
+
delta = torch.empty_like(lse)
|
| 912 |
+
# delta = torch.zeros_like(lse)
|
| 913 |
+
|
| 914 |
+
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
|
| 915 |
+
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
|
| 916 |
+
_bwd_preprocess_do_o_dot[grid](
|
| 917 |
+
o,
|
| 918 |
+
do,
|
| 919 |
+
delta,
|
| 920 |
+
o.stride(0),
|
| 921 |
+
o.stride(2),
|
| 922 |
+
o.stride(1),
|
| 923 |
+
do.stride(0),
|
| 924 |
+
do.stride(2),
|
| 925 |
+
do.stride(1),
|
| 926 |
+
nheads,
|
| 927 |
+
seqlen_q,
|
| 928 |
+
seqlen_q_rounded,
|
| 929 |
+
d,
|
| 930 |
+
BLOCK_M=128,
|
| 931 |
+
BLOCK_HEADDIM=BLOCK_HEADDIM,
|
| 932 |
+
)
|
| 933 |
+
|
| 934 |
+
has_bias = bias is not None
|
| 935 |
+
bias_type = "none"
|
| 936 |
+
if has_bias:
|
| 937 |
+
assert bias.dtype in [q.dtype, torch.float]
|
| 938 |
+
assert bias.is_cuda
|
| 939 |
+
assert bias.dim() == 4
|
| 940 |
+
assert bias.stride(-1) == 1
|
| 941 |
+
if bias.shape[2:] == (1, seqlen_k):
|
| 942 |
+
bias_type = "vector"
|
| 943 |
+
elif bias.shape[2:] == (seqlen_q, seqlen_k):
|
| 944 |
+
bias_type = "matrix"
|
| 945 |
+
else:
|
| 946 |
+
raise RuntimeError(
|
| 947 |
+
"Last 2 dimensions of bias must be (1, seqlen_k)" " or (seqlen_q, seqlen_k)"
|
| 948 |
+
)
|
| 949 |
+
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
|
| 950 |
+
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
|
| 951 |
+
|
| 952 |
+
# BLOCK_M = 128
|
| 953 |
+
# BLOCK_N = 64
|
| 954 |
+
# num_warps = 4
|
| 955 |
+
grid = lambda META: (
|
| 956 |
+
triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1,
|
| 957 |
+
batch * nheads,
|
| 958 |
+
)
|
| 959 |
+
_bwd_kernel[grid](
|
| 960 |
+
q,
|
| 961 |
+
k,
|
| 962 |
+
v,
|
| 963 |
+
bias,
|
| 964 |
+
do,
|
| 965 |
+
dq_accum,
|
| 966 |
+
dk,
|
| 967 |
+
dv,
|
| 968 |
+
lse,
|
| 969 |
+
delta,
|
| 970 |
+
softmax_scale,
|
| 971 |
+
q.stride(0),
|
| 972 |
+
q.stride(2),
|
| 973 |
+
q.stride(1),
|
| 974 |
+
k.stride(0),
|
| 975 |
+
k.stride(2),
|
| 976 |
+
k.stride(1),
|
| 977 |
+
v.stride(0),
|
| 978 |
+
v.stride(2),
|
| 979 |
+
v.stride(1),
|
| 980 |
+
*bias_strides,
|
| 981 |
+
do.stride(0),
|
| 982 |
+
do.stride(2),
|
| 983 |
+
do.stride(1),
|
| 984 |
+
dq_accum.stride(0),
|
| 985 |
+
dq_accum.stride(2),
|
| 986 |
+
dq_accum.stride(1),
|
| 987 |
+
dk.stride(0),
|
| 988 |
+
dk.stride(2),
|
| 989 |
+
dk.stride(1),
|
| 990 |
+
dv.stride(0),
|
| 991 |
+
dv.stride(2),
|
| 992 |
+
dv.stride(1),
|
| 993 |
+
nheads,
|
| 994 |
+
seqlen_q,
|
| 995 |
+
seqlen_k,
|
| 996 |
+
seqlen_q_rounded,
|
| 997 |
+
d,
|
| 998 |
+
seqlen_q // 32,
|
| 999 |
+
seqlen_k // 32, # key for triton cache (limit number of compilations)
|
| 1000 |
+
# Can't use kwargs here because triton autotune expects key to be args, not kwargs
|
| 1001 |
+
# IS_CAUSAL=causal, BLOCK_HEADDIM=d,
|
| 1002 |
+
bias_type,
|
| 1003 |
+
causal,
|
| 1004 |
+
BLOCK_HEADDIM,
|
| 1005 |
+
# SEQUENCE_PARALLEL=False,
|
| 1006 |
+
# BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N,
|
| 1007 |
+
# num_warps=num_warps,
|
| 1008 |
+
# num_stages=1,
|
| 1009 |
+
)
|
| 1010 |
+
dq.copy_(dq_accum)
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
class FlashAttnQKVPackedFunc(torch.autograd.Function):
|
| 1014 |
+
@staticmethod
|
| 1015 |
+
def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
|
| 1016 |
+
"""
|
| 1017 |
+
qkv: (batch, seqlen, 3, nheads, headdim)
|
| 1018 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
|
| 1019 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
|
| 1020 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
|
| 1021 |
+
"""
|
| 1022 |
+
# Make sure that the last dimension is contiguous
|
| 1023 |
+
if qkv.stride(-1) != 1:
|
| 1024 |
+
qkv = qkv.contiguous()
|
| 1025 |
+
o, lse, ctx.softmax_scale = _flash_attn_forward(
|
| 1026 |
+
qkv[:, :, 0],
|
| 1027 |
+
qkv[:, :, 1],
|
| 1028 |
+
qkv[:, :, 2],
|
| 1029 |
+
bias=bias,
|
| 1030 |
+
causal=causal,
|
| 1031 |
+
softmax_scale=softmax_scale,
|
| 1032 |
+
)
|
| 1033 |
+
ctx.save_for_backward(qkv, o, lse, bias)
|
| 1034 |
+
ctx.causal = causal
|
| 1035 |
+
return o
|
| 1036 |
+
|
| 1037 |
+
@staticmethod
|
| 1038 |
+
def backward(ctx, do):
|
| 1039 |
+
qkv, o, lse, bias = ctx.saved_tensors
|
| 1040 |
+
assert not ctx.needs_input_grad[1], "FlashAttention does not support bias gradient yet"
|
| 1041 |
+
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
|
| 1042 |
+
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
|
| 1043 |
+
with torch.inference_mode():
|
| 1044 |
+
dqkv = torch.empty_like(qkv)
|
| 1045 |
+
_flash_attn_backward(
|
| 1046 |
+
do,
|
| 1047 |
+
qkv[:, :, 0],
|
| 1048 |
+
qkv[:, :, 1],
|
| 1049 |
+
qkv[:, :, 2],
|
| 1050 |
+
o,
|
| 1051 |
+
lse,
|
| 1052 |
+
dqkv[:, :, 0],
|
| 1053 |
+
dqkv[:, :, 1],
|
| 1054 |
+
dqkv[:, :, 2],
|
| 1055 |
+
bias=bias,
|
| 1056 |
+
causal=ctx.causal,
|
| 1057 |
+
softmax_scale=ctx.softmax_scale,
|
| 1058 |
+
)
|
| 1059 |
+
return dqkv, None, None, None
|
| 1060 |
+
|
| 1061 |
+
|
| 1062 |
+
flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
|
| 1063 |
+
|
| 1064 |
+
|
| 1065 |
+
class FlashAttnKVPackedFunc(torch.autograd.Function):
|
| 1066 |
+
@staticmethod
|
| 1067 |
+
def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
|
| 1068 |
+
"""
|
| 1069 |
+
q: (batch, seqlen_q, nheads, headdim)
|
| 1070 |
+
kv: (batch, seqlen_k, 2, nheads, headdim)
|
| 1071 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
|
| 1072 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
|
| 1073 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
| 1074 |
+
"""
|
| 1075 |
+
# Make sure that the last dimension is contiguous
|
| 1076 |
+
q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
|
| 1077 |
+
o, lse, ctx.softmax_scale = _flash_attn_forward(
|
| 1078 |
+
q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale
|
| 1079 |
+
)
|
| 1080 |
+
ctx.save_for_backward(q, kv, o, lse, bias)
|
| 1081 |
+
ctx.causal = causal
|
| 1082 |
+
return o
|
| 1083 |
+
|
| 1084 |
+
@staticmethod
|
| 1085 |
+
def backward(ctx, do):
|
| 1086 |
+
q, kv, o, lse, bias = ctx.saved_tensors
|
| 1087 |
+
if len(ctx.needs_input_grad) >= 3:
|
| 1088 |
+
assert not ctx.needs_input_grad[2], "FlashAttention does not support bias gradient yet"
|
| 1089 |
+
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
|
| 1090 |
+
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
|
| 1091 |
+
with torch.inference_mode():
|
| 1092 |
+
dq = torch.empty_like(q)
|
| 1093 |
+
dkv = torch.empty_like(kv)
|
| 1094 |
+
_flash_attn_backward(
|
| 1095 |
+
do,
|
| 1096 |
+
q,
|
| 1097 |
+
kv[:, :, 0],
|
| 1098 |
+
kv[:, :, 1],
|
| 1099 |
+
o,
|
| 1100 |
+
lse,
|
| 1101 |
+
dq,
|
| 1102 |
+
dkv[:, :, 0],
|
| 1103 |
+
dkv[:, :, 1],
|
| 1104 |
+
bias=bias,
|
| 1105 |
+
causal=ctx.causal,
|
| 1106 |
+
softmax_scale=ctx.softmax_scale,
|
| 1107 |
+
)
|
| 1108 |
+
return dq, dkv, None, None, None
|
| 1109 |
+
|
| 1110 |
+
|
| 1111 |
+
flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
class FlashAttnFunc(torch.autograd.Function):
|
| 1115 |
+
@staticmethod
|
| 1116 |
+
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
|
| 1117 |
+
"""
|
| 1118 |
+
q: (batch_size, seqlen_q, nheads, headdim)
|
| 1119 |
+
k, v: (batch_size, seqlen_k, nheads, headdim)
|
| 1120 |
+
bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
|
| 1121 |
+
For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
|
| 1122 |
+
ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
|
| 1123 |
+
"""
|
| 1124 |
+
# Make sure that the last dimension is contiguous
|
| 1125 |
+
q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
|
| 1126 |
+
o, lse, ctx.softmax_scale = _flash_attn_forward(
|
| 1127 |
+
q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale
|
| 1128 |
+
)
|
| 1129 |
+
ctx.save_for_backward(q, k, v, o, lse, bias)
|
| 1130 |
+
ctx.causal = causal
|
| 1131 |
+
return o
|
| 1132 |
+
|
| 1133 |
+
@staticmethod
|
| 1134 |
+
def backward(ctx, do):
|
| 1135 |
+
q, k, v, o, lse, bias = ctx.saved_tensors
|
| 1136 |
+
assert not ctx.needs_input_grad[3], "FlashAttention does not support bias gradient yet"
|
| 1137 |
+
# Triton's autotune causes the Tensor._version to change, and so Pytorch autograd
|
| 1138 |
+
# does a memcpy. To avoid this we run in inference_mode, which doesn't track the version.
|
| 1139 |
+
with torch.inference_mode():
|
| 1140 |
+
dq = torch.empty_like(q)
|
| 1141 |
+
dk = torch.empty_like(k)
|
| 1142 |
+
dv = torch.empty_like(v)
|
| 1143 |
+
_flash_attn_backward(
|
| 1144 |
+
do,
|
| 1145 |
+
q,
|
| 1146 |
+
k,
|
| 1147 |
+
v,
|
| 1148 |
+
o,
|
| 1149 |
+
lse,
|
| 1150 |
+
dq,
|
| 1151 |
+
dk,
|
| 1152 |
+
dv,
|
| 1153 |
+
bias=bias,
|
| 1154 |
+
causal=ctx.causal,
|
| 1155 |
+
softmax_scale=ctx.softmax_scale,
|
| 1156 |
+
)
|
| 1157 |
+
return dq, dk, dv, None, None, None
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
flash_attn_func = FlashAttnFunc.apply
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (190 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/bwd_prefill.cpython-310.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (8.95 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/bench.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
import triton
|
| 4 |
+
from flash_attn.flash_attn_triton_amd.utils import (
|
| 5 |
+
MetaData,
|
| 6 |
+
input_helper,
|
| 7 |
+
varlen_input_helper,
|
| 8 |
+
)
|
| 9 |
+
from flash_attn.flash_attn_triton_amd.interface_torch import attention_prefill, attention_decode
|
| 10 |
+
|
| 11 |
+
ARGS_TO_TORCH_DTYPE = {
|
| 12 |
+
"fp16": torch.float16,
|
| 13 |
+
"bf16": torch.bfloat16,
|
| 14 |
+
"fp32": torch.float32,
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
FUNCTIONS = {
|
| 18 |
+
"prefill": attention_prefill,
|
| 19 |
+
"decode": attention_decode
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
def get_benchmark_configs(args, varlen=False):
|
| 23 |
+
"""
|
| 24 |
+
Returns benchmark configurations based on whether variable-length sequences are used.
|
| 25 |
+
"""
|
| 26 |
+
if args.custom_config:
|
| 27 |
+
hk = args.hq if not args.hk else args.hk
|
| 28 |
+
sk = args.sq if not args.sk else args.sk
|
| 29 |
+
return [(args.b, args.hq, hk, args.sq, sk)]
|
| 30 |
+
elif varlen:
|
| 31 |
+
return [
|
| 32 |
+
(2, 16, 4, 1024, 1024),
|
| 33 |
+
(8, 16, 2, 2048, 2048),
|
| 34 |
+
(4, 16, 8, 4096, 4096),
|
| 35 |
+
(2, 16, 4, 8192, 8192),
|
| 36 |
+
(2, 16, 8, 16384, 16384),
|
| 37 |
+
(2, 48, 12, 1024, 1024),
|
| 38 |
+
(2, 48, 24, 2048, 2048),
|
| 39 |
+
(2, 48, 8, 4096, 4096),
|
| 40 |
+
(2, 48, 4, 8192, 8192),
|
| 41 |
+
(2, 48, 2, 16384, 16384),
|
| 42 |
+
(2, 64, 32, 1024, 1024),
|
| 43 |
+
(4, 64, 16, 2048, 2048),
|
| 44 |
+
(4, 64, 8, 4096, 4096),
|
| 45 |
+
(4, 64, 32, 8192, 8192),
|
| 46 |
+
(4, 128, 16, 16384, 16384),
|
| 47 |
+
]
|
| 48 |
+
else:
|
| 49 |
+
return [
|
| 50 |
+
(16, 16, 16, 1024, 1024),
|
| 51 |
+
(8, 16, 16, 2048, 2048),
|
| 52 |
+
(4, 16, 16, 4096, 4096),
|
| 53 |
+
(1, 8, 8, 8192, 8192),
|
| 54 |
+
(1, 2, 2, 16384, 16384),
|
| 55 |
+
(2, 48, 48, 1024, 1024),
|
| 56 |
+
(2, 48, 48, 2048, 1024),
|
| 57 |
+
(1, 8, 8, 4096, 8192),
|
| 58 |
+
(1, 8, 8, 8192, 4096),
|
| 59 |
+
(2, 4, 4, 16384, 8192),
|
| 60 |
+
(2, 8, 8, 1989, 15344),
|
| 61 |
+
(4, 16, 16, 4097, 163),
|
| 62 |
+
(2, 16, 16, 8122, 2159),
|
| 63 |
+
(1, 16, 16, 16281, 7),
|
| 64 |
+
(2, 48, 48, 1021, 1020),
|
| 65 |
+
(2, 48, 48, 2001, 2048),
|
| 66 |
+
(2, 8, 8, 3996, 9639),
|
| 67 |
+
(2, 8, 8, 8181, 1021),
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
def gen_fn_inputs(fn_name, BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, device, layout, causal):
|
| 71 |
+
flops_per_matmul = 0
|
| 72 |
+
|
| 73 |
+
if fn_name.startswith("prefill"):
|
| 74 |
+
if layout == "thd":
|
| 75 |
+
q, k, v, input_metadata = varlen_input_helper(
|
| 76 |
+
BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, device=device)
|
| 77 |
+
for i in range(input_metadata.num_contexts):
|
| 78 |
+
seqlen_q = input_metadata.cu_seqlens_q[i + 1] - input_metadata.cu_seqlens_q[i]
|
| 79 |
+
seqlen_k = input_metadata.cu_seqlens_k[i + 1] - input_metadata.cu_seqlens_k[i]
|
| 80 |
+
flops_per_matmul += seqlen_q.item() * seqlen_k.item() * HQ * D_HEAD * 2
|
| 81 |
+
else:
|
| 82 |
+
q, k, v, input_metadata = input_helper(
|
| 83 |
+
BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout, device=device
|
| 84 |
+
)
|
| 85 |
+
flops_per_matmul = 2.0 * BATCH * HQ * N_CTX_Q * N_CTX_K * D_HEAD
|
| 86 |
+
|
| 87 |
+
if causal:
|
| 88 |
+
input_metadata.need_causal()
|
| 89 |
+
|
| 90 |
+
o = torch.empty_like(q)
|
| 91 |
+
input_data = (q, k, v, o, input_metadata)
|
| 92 |
+
elif fn_name.startswith("decode"):
|
| 93 |
+
q = torch.randn(
|
| 94 |
+
[BATCH, N_CTX_Q, HK, HQ // HK, D_HEAD],
|
| 95 |
+
device=device,
|
| 96 |
+
dtype=dtype,
|
| 97 |
+
requires_grad=False,
|
| 98 |
+
)
|
| 99 |
+
k = torch.randn(
|
| 100 |
+
[BATCH, N_CTX_K, HK, 1, D_HEAD],
|
| 101 |
+
device=device,
|
| 102 |
+
dtype=dtype,
|
| 103 |
+
requires_grad=False,
|
| 104 |
+
).expand(-1, -1, -1, HQ // HK, -1)
|
| 105 |
+
v = torch.randn(
|
| 106 |
+
[BATCH, N_CTX_K, HK, 1, D_HEAD],
|
| 107 |
+
device=device,
|
| 108 |
+
dtype=dtype,
|
| 109 |
+
requires_grad=False,
|
| 110 |
+
).expand(-1, -1, -1, HQ // HK, -1)
|
| 111 |
+
input_metadata = MetaData(sm_scale=1.3)
|
| 112 |
+
input_metadata.layout = "bsghd"
|
| 113 |
+
|
| 114 |
+
# Adjust flops calculation if needed
|
| 115 |
+
flops_per_matmul = 2.0 * BATCH * HQ * N_CTX_Q * N_CTX_K * D_HEAD
|
| 116 |
+
|
| 117 |
+
input_data = (q, k, v, input_metadata)
|
| 118 |
+
else:
|
| 119 |
+
raise ValueError("Unsupported benchmark function")
|
| 120 |
+
return input_data, flops_per_matmul
|
| 121 |
+
|
| 122 |
+
def run_benchmark(args, fn_name, fn, mode):
|
| 123 |
+
"""
|
| 124 |
+
Runs the benchmark for the provided function based on the provided arguments.
|
| 125 |
+
"""
|
| 126 |
+
print(f"Benchmarking {fn_name} in {mode} mode...")
|
| 127 |
+
|
| 128 |
+
dtype = ARGS_TO_TORCH_DTYPE[args.dtype]
|
| 129 |
+
head_size = args.d if args.d else 128
|
| 130 |
+
causal = args.causal
|
| 131 |
+
varlen = args.layout == "thd"
|
| 132 |
+
return_tflops = args.return_tflops
|
| 133 |
+
line_names = "TFLOPS" if return_tflops else "Time (ms)"
|
| 134 |
+
|
| 135 |
+
# Determine configurations
|
| 136 |
+
x_vals_list = get_benchmark_configs(args, varlen=varlen)
|
| 137 |
+
|
| 138 |
+
# Setup benchmark configurations
|
| 139 |
+
configs = [
|
| 140 |
+
triton.testing.Benchmark(
|
| 141 |
+
x_names=["BATCH", "HQ", "HK", "N_CTX_Q", "N_CTX_K"],
|
| 142 |
+
x_vals=x_vals_list,
|
| 143 |
+
line_arg="provider",
|
| 144 |
+
line_vals=["triton"],
|
| 145 |
+
line_names=[line_names],
|
| 146 |
+
styles=[("red", "-")],
|
| 147 |
+
ylabel="ms",
|
| 148 |
+
plot_name=f"benchmark-{fn_name}-d{head_size}-layout{args.layout}-mode{mode}",
|
| 149 |
+
args={
|
| 150 |
+
"D_HEAD": head_size,
|
| 151 |
+
"dtype": dtype,
|
| 152 |
+
"causal": causal,
|
| 153 |
+
"mode": mode,
|
| 154 |
+
},
|
| 155 |
+
)
|
| 156 |
+
]
|
| 157 |
+
|
| 158 |
+
@triton.testing.perf_report(configs)
|
| 159 |
+
def bench_function(
|
| 160 |
+
BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, causal, mode, provider, device="cuda"
|
| 161 |
+
):
|
| 162 |
+
warmup = 25
|
| 163 |
+
rep = 100
|
| 164 |
+
flops_per_matmul = 0
|
| 165 |
+
|
| 166 |
+
# generate function inputs
|
| 167 |
+
fn_inputs, flops_per_matmul = gen_fn_inputs(
|
| 168 |
+
fn_name, BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, device, args.layout, causal
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# define the function to benchmark
|
| 172 |
+
if mode == "fwd":
|
| 173 |
+
benchmark_fn = lambda: fn(*fn_inputs)
|
| 174 |
+
total_flops = 2 * flops_per_matmul
|
| 175 |
+
elif mode == "bwd":
|
| 176 |
+
outputs = fn(*fn_inputs)
|
| 177 |
+
output = outputs[0]
|
| 178 |
+
grad_output = torch.randn_like(output)
|
| 179 |
+
benchmark_fn = lambda: output.backward(grad_output, retain_graph=True)
|
| 180 |
+
total_flops = 2 * flops_per_matmul * 2.5
|
| 181 |
+
else:
|
| 182 |
+
raise ValueError("Unsupported mode. Choose 'fwd' or 'bwd'.")
|
| 183 |
+
|
| 184 |
+
if causal:
|
| 185 |
+
total_flops *= 0.5
|
| 186 |
+
|
| 187 |
+
# Run the benchmark
|
| 188 |
+
ms = triton.testing.do_bench(benchmark_fn, warmup=warmup, rep=rep)
|
| 189 |
+
|
| 190 |
+
if return_tflops:
|
| 191 |
+
return total_flops / ms * 1e-9
|
| 192 |
+
else:
|
| 193 |
+
return ms
|
| 194 |
+
|
| 195 |
+
bench_function.run(save_path=".", print_data=True)
|
| 196 |
+
|
| 197 |
+
def supported_layouts():
|
| 198 |
+
"""
|
| 199 |
+
Returns a string describing the supported layouts.
|
| 200 |
+
"""
|
| 201 |
+
return (
|
| 202 |
+
"bhsd: Q, K, V are individual tensors of [batch, num_heads, seqlen_q/k, head_size]\n"
|
| 203 |
+
"bshd: Q, K, V are individual tensors of [batch, seqlen_q/k, num_heads, head_size]\n"
|
| 204 |
+
"thd: Q, K, V are individual tensors of [total_q/k, num_heads, head_size]\n"
|
| 205 |
+
'This layout is sometimes called "varlen" or "grouped" layout.'
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def parse_args():
|
| 209 |
+
"""
|
| 210 |
+
Parses command-line arguments.
|
| 211 |
+
"""
|
| 212 |
+
parser = argparse.ArgumentParser(
|
| 213 |
+
prog="Benchmark FlashAttention",
|
| 214 |
+
allow_abbrev=False,
|
| 215 |
+
)
|
| 216 |
+
parser.add_argument("-b", type=int, default=0)
|
| 217 |
+
parser.add_argument("-hq", type=int, default=0)
|
| 218 |
+
parser.add_argument("-hk", type=int, default=0)
|
| 219 |
+
parser.add_argument("-sq", type=int, default=0)
|
| 220 |
+
parser.add_argument("-sk", type=int, default=0)
|
| 221 |
+
parser.add_argument(
|
| 222 |
+
"-equal_seqlens",
|
| 223 |
+
action="store_true",
|
| 224 |
+
default=False,
|
| 225 |
+
help="If specified, each context within the thd layout has same seqlen as sq and sk",
|
| 226 |
+
)
|
| 227 |
+
parser.add_argument("-d", type=int, default=0)
|
| 228 |
+
parser.add_argument("-causal", action="store_true", default=False)
|
| 229 |
+
parser.add_argument("-dtype", default="fp16")
|
| 230 |
+
parser.add_argument("-return_tflops", action="store_true", default=False)
|
| 231 |
+
parser.add_argument(
|
| 232 |
+
"-layout",
|
| 233 |
+
type=str,
|
| 234 |
+
default="bhsd",
|
| 235 |
+
help=supported_layouts(),
|
| 236 |
+
)
|
| 237 |
+
parser.add_argument(
|
| 238 |
+
"-benchmark_fn",
|
| 239 |
+
type=str,
|
| 240 |
+
nargs="*",
|
| 241 |
+
choices=FUNCTIONS.keys(),
|
| 242 |
+
help="Function(s) to benchmark: prefill, decode, or both",
|
| 243 |
+
)
|
| 244 |
+
parser.add_argument(
|
| 245 |
+
"-mode",
|
| 246 |
+
type=str,
|
| 247 |
+
nargs='*',
|
| 248 |
+
default=["fwd", "bwd"],
|
| 249 |
+
choices=["fwd", "bwd"],
|
| 250 |
+
help="Mode(s) to run: 'fwd' for forward pass, 'bwd' for backward pass",
|
| 251 |
+
)
|
| 252 |
+
return parser.parse_args()
|
| 253 |
+
|
| 254 |
+
def main():
|
| 255 |
+
"""
|
| 256 |
+
Main function to run benchmarks.
|
| 257 |
+
"""
|
| 258 |
+
args = parse_args()
|
| 259 |
+
|
| 260 |
+
# Validate arguments
|
| 261 |
+
assert (
|
| 262 |
+
args.layout == "thd" or not args.equal_seqlens
|
| 263 |
+
), "Equal sequence lengths arg must be used with the thd layout."
|
| 264 |
+
args.custom_config = False
|
| 265 |
+
if args.b or args.hq or args.hk or args.sq or args.sk or args.d:
|
| 266 |
+
args.custom_config = True
|
| 267 |
+
assert args.b and args.hq and args.sq and args.d, (
|
| 268 |
+
"If custom config is specified, please provide all of batch, "
|
| 269 |
+
"number of Q heads, Q sequence length, and head size."
|
| 270 |
+
)
|
| 271 |
+
assert args.dtype in ARGS_TO_TORCH_DTYPE, "Only fp16, bf16 and fp32 types currently supported."
|
| 272 |
+
|
| 273 |
+
# determine the functions to benchmark
|
| 274 |
+
if args.benchmark_fn is None or len(args.benchmark_fn) == 0:
|
| 275 |
+
bench_fn_list = FUNCTIONS.keys()
|
| 276 |
+
else:
|
| 277 |
+
bench_fn_list = args.benchmark_fn
|
| 278 |
+
|
| 279 |
+
# benchmark functions
|
| 280 |
+
for fn_name in bench_fn_list:
|
| 281 |
+
if fn_name not in FUNCTIONS:
|
| 282 |
+
raise ValueError(f"Invalid benchmark function specified: {fn_name}")
|
| 283 |
+
for mode in args.mode:
|
| 284 |
+
if fn_name == "decode" and mode == "bwd":
|
| 285 |
+
print(f"Decode kernel doesnot have a backward pass")
|
| 286 |
+
continue
|
| 287 |
+
run_benchmark(args, fn_name, FUNCTIONS[fn_name], mode)
|
| 288 |
+
|
| 289 |
+
if __name__ == "__main__":
|
| 290 |
+
main()
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_amd/test.py
ADDED
|
@@ -0,0 +1,724 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from .utils import MetaData, get_input_shapes, input_helper, varlen_input_helper, DEBUG
|
| 5 |
+
from .interface_torch import attention_prefill, attention_decode
|
| 6 |
+
from .fwd_ref import attention_forward_pytorch_ref_impl, compute_alibi_tensor_ref
|
| 7 |
+
from .fwd_prefill import attention_prefill_forward_triton_impl
|
| 8 |
+
from .bwd_prefill import attention_prefill_backward_triton_impl
|
| 9 |
+
from .bwd_ref import attention_backward_pytorch_ref_impl
|
| 10 |
+
from .fwd_decode import dequantize_kv_fp16, quantize_kv_int4
|
| 11 |
+
|
| 12 |
+
# defailt fp16 tolerance is ATOL, RTOL = 1e-5, 1e-3. See table https://pytorch.org/docs/stable/testing.html
|
| 13 |
+
ATOL, RTOL = 1e-2, 1e-2 # old standard. maybe to lose.
|
| 14 |
+
# ATOL, RTOL = 1e-3, 1e-3 # catchs fa mismatch issues
|
| 15 |
+
# ATOL, RTOL = 1e-4, 1e-3 # to strict. there will be small diffs
|
| 16 |
+
# ATOL, RTOL = 1e-5, 1e-3 # # default fp16. there will be small diffs
|
| 17 |
+
EQUAL_NAN = True
|
| 18 |
+
|
| 19 |
+
@pytest.mark.parametrize('Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD', [
|
| 20 |
+
(4, 48, 24, 1024, 1024, 64),
|
| 21 |
+
(1, 24, 6, 8192, 8192, 64),
|
| 22 |
+
(1, 4, 2, 16384, 16384, 128),
|
| 23 |
+
(2, 16, 4, 1020, 987, 128),
|
| 24 |
+
(2, 16, 4, 15498, 2, 128),
|
| 25 |
+
(2, 16, 2, 7, 16219, 64),
|
| 26 |
+
(4, 48, 12, 1, 1, 64),
|
| 27 |
+
(4, 48, 48, 1, 1, 128),
|
| 28 |
+
(4, 48, 24, 3, 3, 128),
|
| 29 |
+
(4, 48, 48, 1001, 990, 64),
|
| 30 |
+
(1, 8, 8, 8081, 7099, 64),
|
| 31 |
+
(1, 4, 4, 16330, 15989, 128),
|
| 32 |
+
(4, 4, 1, 1024, 1024, 33),
|
| 33 |
+
(4, 4, 2, 65, 1018, 65),
|
| 34 |
+
(4, 4, 4, 128, 128, 65),
|
| 35 |
+
(4, 4, 4, 113, 123, 1),
|
| 36 |
+
])
|
| 37 |
+
@pytest.mark.parametrize('causal', [True, False])
|
| 38 |
+
@pytest.mark.parametrize('use_alibi', [True, False])
|
| 39 |
+
@pytest.mark.parametrize('layout', ['bshd', 'bhsd'])
|
| 40 |
+
def test_op_fwd_prefill(Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, causal, use_alibi, layout, dtype=torch.float16):
|
| 41 |
+
torch.manual_seed(20)
|
| 42 |
+
q, k, v, input_metadata = input_helper(Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout)
|
| 43 |
+
if causal:
|
| 44 |
+
input_metadata.need_causal()
|
| 45 |
+
|
| 46 |
+
if use_alibi:
|
| 47 |
+
# for n heads the set of slopes is the geometric sequence that starts 2^(-8/n)
|
| 48 |
+
alibi_slopes = torch.tensor([2**(-8 / HQ * i) for i in range(1, HQ + 1)], dtype=torch.float32,
|
| 49 |
+
device="cuda").repeat(Z, 1)
|
| 50 |
+
input_metadata.need_alibi(alibi_slopes, Z, HQ)
|
| 51 |
+
else:
|
| 52 |
+
alibi_slopes = None
|
| 53 |
+
|
| 54 |
+
o = torch.empty_like(q)
|
| 55 |
+
|
| 56 |
+
# triton implementation
|
| 57 |
+
tri_out, _, _ = attention_prefill(q, k, v, o, input_metadata)
|
| 58 |
+
|
| 59 |
+
# Transpose here if layout is bshd so we have same reference code for all layouts
|
| 60 |
+
if layout == 'bshd':
|
| 61 |
+
q = q.transpose(1, 2).clone()
|
| 62 |
+
k = k.transpose(1, 2).clone()
|
| 63 |
+
v = v.transpose(1, 2).clone()
|
| 64 |
+
# Replicate K and V if using MQA/GQA
|
| 65 |
+
if HQ != HK:
|
| 66 |
+
k = k.view(k.shape[0], k.shape[1], -1, k.shape[2],
|
| 67 |
+
k.shape[3]).expand(-1, -1, HQ // HK, -1, -1).reshape(k.shape[0], -1, k.shape[2], k.shape[3])
|
| 68 |
+
v = v.view(v.shape[0], v.shape[1], -1, v.shape[2],
|
| 69 |
+
v.shape[3]).expand(-1, -1, HQ // HK, -1, -1).reshape(v.shape[0], -1, v.shape[2], v.shape[3])
|
| 70 |
+
|
| 71 |
+
scores = torch.einsum('bhqd,bhkd->bhqk', q, k).float() * input_metadata.sm_scale
|
| 72 |
+
if causal:
|
| 73 |
+
mask = torch.tril(torch.ones(N_CTX_Q, N_CTX_K, device="cuda"), diagonal=N_CTX_K - N_CTX_Q)
|
| 74 |
+
scores[:, :, mask == 0] = float("-inf")
|
| 75 |
+
if use_alibi:
|
| 76 |
+
scores += compute_alibi_tensor_ref(alibi_slopes, N_CTX_Q, N_CTX_K)
|
| 77 |
+
|
| 78 |
+
p = torch.softmax(scores, dim=-1)
|
| 79 |
+
if causal:
|
| 80 |
+
# If N_CTX_Q > N_CTX_K, there is at least one row of all -infs going into
|
| 81 |
+
# the softmax. This produces a row of NaNs as -inf - -inf == NaN. So we fix
|
| 82 |
+
# this by converting the NaNs to 0s, which is what they should be out of the softmax.
|
| 83 |
+
nan_mask = torch.isnan(p)
|
| 84 |
+
p[nan_mask == 1] = 0
|
| 85 |
+
ref_out = torch.einsum('bhqk,bhkd->bhqd', p.half(), v)
|
| 86 |
+
# compare
|
| 87 |
+
if layout == 'bshd':
|
| 88 |
+
ref_out = ref_out.transpose(1, 2).clone()
|
| 89 |
+
torch.testing.assert_close(ref_out, tri_out, atol=2e-2, rtol=2e-2)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@pytest.mark.parametrize('Z, H, N_CTX_Q, N_CTX_K, D_HEAD', [
|
| 93 |
+
(4, 48, 1024, 1024, 64),
|
| 94 |
+
(4, 12, 8192, 8192, 64),
|
| 95 |
+
(2, 4, 16384, 16384, 128),
|
| 96 |
+
(2, 16, 15498, 2, 128),
|
| 97 |
+
(2, 4, 7, 16219, 64),
|
| 98 |
+
(4, 48, 1, 1, 64),
|
| 99 |
+
(4, 48, 1, 1, 128),
|
| 100 |
+
(4, 48, 3, 3, 128),
|
| 101 |
+
(4, 48, 1001, 990, 64),
|
| 102 |
+
(1, 8, 8081, 7099, 64),
|
| 103 |
+
(1, 8, 16330, 15989, 128),
|
| 104 |
+
(4, 4, 1024, 1024, 33),
|
| 105 |
+
(4, 4, 65, 1019, 65),
|
| 106 |
+
(4, 4, 128, 128, 65),
|
| 107 |
+
# TODO: This config fails. Disabled until triaged and fixed.
|
| 108 |
+
# (2, 16, 1020, 987, 128),
|
| 109 |
+
# (4, 4, 113, 123, 1),
|
| 110 |
+
])
|
| 111 |
+
@pytest.mark.parametrize('causal', [True, False])
|
| 112 |
+
@pytest.mark.parametrize('use_bias', [True])
|
| 113 |
+
def test_op_fwd_prefill_bias(Z, H, N_CTX_Q, N_CTX_K, D_HEAD, causal, use_bias, dtype=torch.float16):
|
| 114 |
+
torch.manual_seed(20)
|
| 115 |
+
sm_scale = D_HEAD**-0.5
|
| 116 |
+
input_metadata = MetaData(sm_scale=sm_scale)
|
| 117 |
+
q, k, v, input_metadata = input_helper(Z, H, H, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout='bhsd')
|
| 118 |
+
if causal:
|
| 119 |
+
input_metadata.need_causal()
|
| 120 |
+
if use_bias:
|
| 121 |
+
bias = torch.randn((1, H, N_CTX_Q, N_CTX_K), dtype=torch.float32, device="cuda")
|
| 122 |
+
input_metadata.need_bias(bias, Z, H, N_CTX_Q, N_CTX_K)
|
| 123 |
+
else:
|
| 124 |
+
bias = None
|
| 125 |
+
o = torch.empty_like(q)
|
| 126 |
+
|
| 127 |
+
# triton implementation
|
| 128 |
+
tri_out, _, _ = attention_prefill(q, k, v, o, input_metadata)
|
| 129 |
+
# reference implementation:171
|
| 130 |
+
|
| 131 |
+
scores = torch.einsum('bhqd,bhkd->bhqk', q, k).float() * sm_scale
|
| 132 |
+
if causal:
|
| 133 |
+
mask = torch.tril(torch.ones(N_CTX_Q, N_CTX_K, device="cuda"), diagonal=N_CTX_K - N_CTX_Q)
|
| 134 |
+
scores[:, :, mask == 0] = float("-inf")
|
| 135 |
+
if use_bias:
|
| 136 |
+
scores += input_metadata.bias
|
| 137 |
+
p = torch.softmax(scores, dim=-1)
|
| 138 |
+
if causal:
|
| 139 |
+
# If N_CTX_Q > N_CTX_K, there is at least one row of all -infs going into
|
| 140 |
+
# the softmax. This produces a row of NaNs as -inf - -inf == NaN. So we fix
|
| 141 |
+
# this by converting the NaNs to 0s, which is what they should be out of the softmax.
|
| 142 |
+
nan_mask = torch.isnan(p)
|
| 143 |
+
p[nan_mask == 1] = 0
|
| 144 |
+
ref_out = torch.einsum('bhqk,bhkd->bhqd', p.half(), v)
|
| 145 |
+
# compare
|
| 146 |
+
torch.testing.assert_close(ref_out, tri_out, atol=2e-2, rtol=2e-2)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [
|
| 150 |
+
(4, 48, 8192, 64),
|
| 151 |
+
(4, 48, 256, 64),
|
| 152 |
+
(4, 48, 512, 64),
|
| 153 |
+
(4, 48, 1024, 64),
|
| 154 |
+
(8, 48, 4096, 64),
|
| 155 |
+
(4, 48, 8192, 64),
|
| 156 |
+
(4, 48, 128, 128),
|
| 157 |
+
(4, 48, 4096, 128),
|
| 158 |
+
(4, 48, 16384, 128),
|
| 159 |
+
(4, 16, 1024, 128),
|
| 160 |
+
(4, 16, 8192, 128),
|
| 161 |
+
(32, 48, 8192, 128)
|
| 162 |
+
]
|
| 163 |
+
)
|
| 164 |
+
@pytest.mark.parametrize('causal', [True, False])
|
| 165 |
+
def test_op_varlen_fwd(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
|
| 166 |
+
|
| 167 |
+
q, k, v, input_metadata = varlen_input_helper(Z, H, H, N_CTX, N_CTX, D_HEAD, dtype)
|
| 168 |
+
|
| 169 |
+
tri_out = torch.empty_like(q)
|
| 170 |
+
ref_out = torch.empty_like(q)
|
| 171 |
+
|
| 172 |
+
for i in range(0, input_metadata.num_contexts):
|
| 173 |
+
start_q, start_k = input_metadata.cu_seqlens_q[i], input_metadata.cu_seqlens_k[i]
|
| 174 |
+
end_q, end_k = input_metadata.cu_seqlens_q[i + 1], input_metadata.cu_seqlens_k[i + 1]
|
| 175 |
+
scores = torch.einsum('qhd,khd->qhk', q[start_q:end_q], k[start_k:end_k]).float()
|
| 176 |
+
p = torch.softmax(scores * input_metadata.sm_scale, dim=-1).half()
|
| 177 |
+
ref_out[start_q:end_q] = torch.einsum('qhk,khd->qhd', p, v[start_k:end_k])
|
| 178 |
+
attention_prefill(q, k, v, tri_out, input_metadata)
|
| 179 |
+
torch.testing.assert_close(ref_out, tri_out, atol=ATOL, rtol=RTOL)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@pytest.mark.parametrize('Z, HQ, HK, N_CTX, D_HEAD', [(2, 48, 24, 128, 64), (4, 48, 12, 256, 64), (4, 48, 4, 512, 64),
|
| 183 |
+
(4, 48, 2, 1024, 64), (8, 48, 6, 4096, 64), (4, 48, 8, 16384, 64),
|
| 184 |
+
(4, 64, 16, 128, 128), (4, 64, 4, 4096, 128),
|
| 185 |
+
(4, 64, 8, 16384, 128), (4, 16, 4, 1024, 128),
|
| 186 |
+
(4, 16, 2, 8192, 128), (32, 128, 32, 8192, 128)])
|
| 187 |
+
@pytest.mark.parametrize('causal', [False])
|
| 188 |
+
def test_op_varlen_mqa_fwd(Z, HQ, HK, N_CTX, D_HEAD, causal, dtype=torch.float16):
|
| 189 |
+
q, k, v, input_metadata = varlen_input_helper(Z, HQ, HK, N_CTX, N_CTX, D_HEAD, dtype)
|
| 190 |
+
ref_out = torch.empty_like(q)
|
| 191 |
+
tri_out = torch.empty_like(q)
|
| 192 |
+
# Make KV look like HQ/HK "groups" of HK. Later, we will reshape so the
|
| 193 |
+
# size aligns with Q.
|
| 194 |
+
k_ref = k.view(k.shape[0], k.shape[1], 1, k.shape[2]).expand(-1, -1, HQ // HK, -1)
|
| 195 |
+
v_ref = v.view(v.shape[0], v.shape[1], 1, v.shape[2]).expand(-1, -1, HQ // HK, -1)
|
| 196 |
+
for i in range(0, input_metadata.num_contexts):
|
| 197 |
+
start_q, start_k = input_metadata.cu_seqlens_q[i], input_metadata.cu_seqlens_k[i]
|
| 198 |
+
end_q, end_k = input_metadata.cu_seqlens_q[i + 1], input_metadata.cu_seqlens_k[i + 1]
|
| 199 |
+
k_curr = k_ref[start_k:end_k]
|
| 200 |
+
k_curr = k_curr.reshape(k_curr.shape[0], -1, k_curr.shape[3])
|
| 201 |
+
v_curr = v_ref[start_k:end_k]
|
| 202 |
+
v_curr = v_curr.reshape(v_curr.shape[0], -1, v_curr.shape[3])
|
| 203 |
+
scores = torch.einsum('qhd,khd->qhk', q[start_q:end_q], k_curr).float()
|
| 204 |
+
p = torch.softmax(scores * input_metadata.sm_scale, dim=-1).half()
|
| 205 |
+
ref_out[start_q:end_q] = torch.einsum('qhk,khd->qhd', p, v_curr)
|
| 206 |
+
attention_prefill(q, k, v, tri_out, input_metadata)
|
| 207 |
+
torch.testing.assert_close(ref_out, tri_out, atol=ATOL, rtol=RTOL)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@pytest.mark.parametrize('Z, H, N_CTX_Q, N_CTX_K, D_HEAD', [
|
| 211 |
+
# smallest config test
|
| 212 |
+
(1, 1, 16, 16, 64), # pass on new # fail on old
|
| 213 |
+
(1, 1, 32, 32, 64), # pass on new # fail on old
|
| 214 |
+
(1, 1, 64, 64, 16), # pass # smallest head_size = 16
|
| 215 |
+
(1, 1, 64, 64, 64), # pass # smallest seq len seems to be 64
|
| 216 |
+
(1, 1, 128, 128, 64), # pass
|
| 217 |
+
(1, 1, 256, 256, 64), # pass
|
| 218 |
+
(1, 1, 512, 512, 64), # pass
|
| 219 |
+
# failing FA
|
| 220 |
+
(1, 1, 256, 512, 16),
|
| 221 |
+
# old tests that work
|
| 222 |
+
(4, 48, 1024, 1024, 64), # pass
|
| 223 |
+
(4, 48, 2048, 2048, 64), # pass
|
| 224 |
+
(2, 48, 4096, 4096, 64), # pass
|
| 225 |
+
(1, 16, 1024, 1024, 64), # pass
|
| 226 |
+
(1, 16, 1024, 1024, 128), # pass
|
| 227 |
+
# old tests that were commented out
|
| 228 |
+
# (1, 16, 8192, 8192, 63),
|
| 229 |
+
# (1, 16, 1022, 1022, 64),
|
| 230 |
+
])
|
| 231 |
+
# @pytest.mark.parametrize('torch_sdpa_test', [False, True])
|
| 232 |
+
@pytest.mark.parametrize('torch_sdpa_test', [False])
|
| 233 |
+
# @pytest.mark.parametrize('causal', [True, False])
|
| 234 |
+
@pytest.mark.parametrize('causal', [False])
|
| 235 |
+
# @pytest.mark.parametrize('use_alibi', [False, True])
|
| 236 |
+
@pytest.mark.parametrize('use_alibi', [False])
|
| 237 |
+
def test_op_bwd(Z, H, N_CTX_Q, N_CTX_K, D_HEAD, causal, torch_sdpa_test, use_alibi, dtype=torch.float16):
|
| 238 |
+
torch.manual_seed(20)
|
| 239 |
+
|
| 240 |
+
DEBUG_INPUT = False
|
| 241 |
+
|
| 242 |
+
# seqlens
|
| 243 |
+
seqlen_q = N_CTX_Q
|
| 244 |
+
seqlen_k = N_CTX_K
|
| 245 |
+
|
| 246 |
+
# setup up metadata
|
| 247 |
+
if DEBUG_INPUT:
|
| 248 |
+
sm_scale = 1
|
| 249 |
+
else:
|
| 250 |
+
sm_scale = D_HEAD**-0.5
|
| 251 |
+
input_metadata = MetaData(sm_scale=sm_scale)
|
| 252 |
+
input_metadata.max_seqlens_q = seqlen_q
|
| 253 |
+
input_metadata.max_seqlens_k = seqlen_k
|
| 254 |
+
input_metadata.layout = "bhsd"
|
| 255 |
+
|
| 256 |
+
dropout_p = 0
|
| 257 |
+
if DEBUG_INPUT:
|
| 258 |
+
q = torch.arange(seqlen_q, dtype=dtype, device="cuda").view(1, 1, seqlen_q, 1).expand(Z, H, seqlen_q, D_HEAD).requires_grad_()
|
| 259 |
+
k = torch.arange(seqlen_k, dtype=dtype, device="cuda").view(1, 1, seqlen_k, 1).expand(Z, H, seqlen_k, D_HEAD).requires_grad_()
|
| 260 |
+
v = torch.arange(seqlen_k, dtype=dtype, device="cuda").view(1, 1, seqlen_k, 1).expand(Z, H, seqlen_k, D_HEAD).requires_grad_()
|
| 261 |
+
o = torch.zeros_like(q)
|
| 262 |
+
else:
|
| 263 |
+
# Generate random inputs
|
| 264 |
+
q = torch.randn(Z, H, N_CTX_Q, D_HEAD, device='cuda', dtype=dtype, requires_grad=True)
|
| 265 |
+
k = torch.randn(Z, H, N_CTX_K, D_HEAD, device='cuda', dtype=dtype, requires_grad=True)
|
| 266 |
+
v = torch.randn(Z, H, N_CTX_K, D_HEAD, device='cuda', dtype=dtype, requires_grad=True)
|
| 267 |
+
o = torch.empty_like(q)
|
| 268 |
+
|
| 269 |
+
if causal:
|
| 270 |
+
input_metadata.need_causal()
|
| 271 |
+
|
| 272 |
+
if use_alibi and not torch_sdpa_test:
|
| 273 |
+
# for n heads the set of slopes is the geometric sequence that starts 2^(-8/n)
|
| 274 |
+
alibi_slopes = torch.tensor([2**(-8 / H * i) for i in range(1, H + 1)], dtype=torch.float32,
|
| 275 |
+
device="cuda").repeat(Z, 1)
|
| 276 |
+
input_metadata.need_alibi(alibi_slopes, Z, H)
|
| 277 |
+
|
| 278 |
+
if DEBUG_INPUT:
|
| 279 |
+
dout = torch.ones_like(q)
|
| 280 |
+
else:
|
| 281 |
+
dout = torch.randn_like(q)
|
| 282 |
+
|
| 283 |
+
# reference implementation
|
| 284 |
+
if torch_sdpa_test:
|
| 285 |
+
ref_out, ref_softmax = torch.ops.aten._scaled_dot_product_attention_math(q, k, v, dropout_p=dropout_p,
|
| 286 |
+
is_causal=causal, scale=sm_scale,
|
| 287 |
+
dropout_mask=None)
|
| 288 |
+
ref_out.backward(dout.to(device=ref_out.device, dtype=ref_out.dtype))
|
| 289 |
+
ref_dv, v.grad = v.grad.clone(), None
|
| 290 |
+
ref_dk, k.grad = k.grad.clone(), None
|
| 291 |
+
ref_dq, q.grad = q.grad.clone(), None
|
| 292 |
+
else:
|
| 293 |
+
M = torch.tril(torch.ones((seqlen_q, seqlen_k), device="cuda"))
|
| 294 |
+
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
|
| 295 |
+
if use_alibi:
|
| 296 |
+
p += compute_alibi_tensor_ref(alibi_slopes, N_CTX_Q, N_CTX_K)
|
| 297 |
+
if causal:
|
| 298 |
+
p[:, :, M == 0] = float("-inf")
|
| 299 |
+
|
| 300 |
+
p = torch.softmax(p.float(), dim=-1).type(dtype=p.dtype)
|
| 301 |
+
ref_out = torch.matmul(p, v)
|
| 302 |
+
ref_out.backward(dout)
|
| 303 |
+
ref_dv, v.grad = v.grad.clone(), None
|
| 304 |
+
ref_dk, k.grad = k.grad.clone(), None
|
| 305 |
+
ref_dq, q.grad = q.grad.clone(), None
|
| 306 |
+
|
| 307 |
+
# # triton implementation
|
| 308 |
+
tri_out, _, _ = attention_prefill(q, k, v, o, input_metadata)
|
| 309 |
+
tri_out.backward(dout)
|
| 310 |
+
tri_dv, v.grad = v.grad.clone(), None
|
| 311 |
+
tri_dk, k.grad = k.grad.clone(), None
|
| 312 |
+
tri_dq, q.grad = q.grad.clone(), None
|
| 313 |
+
# compare
|
| 314 |
+
if DEBUG:
|
| 315 |
+
print("tri_out:", tri_out)
|
| 316 |
+
print("ref_out:",ref_out )
|
| 317 |
+
torch.testing.assert_close(ref_out, tri_out, atol=1e-2, rtol=0)
|
| 318 |
+
|
| 319 |
+
# The current block size for MI200 series is 64x64. This results in
|
| 320 |
+
# larger differences in float results due to rounding.
|
| 321 |
+
if dtype == torch.bfloat16:
|
| 322 |
+
ATOL = 1e-1 * max(1.0, (seqlen_q + D_HEAD) / 64.0)
|
| 323 |
+
if dtype == torch.float32:
|
| 324 |
+
ATOL = 1e-3 * max(1.0, (seqlen_q + D_HEAD) / 64.0)
|
| 325 |
+
else:
|
| 326 |
+
ATOL = 1e-1 * max(1.0, (seqlen_q + D_HEAD) / 64.0)
|
| 327 |
+
|
| 328 |
+
RTOL = 0
|
| 329 |
+
|
| 330 |
+
if DEBUG:
|
| 331 |
+
print("ref_dv:", ref_dv)
|
| 332 |
+
print("tri_dv:", tri_dv)
|
| 333 |
+
print("ref_dk:", ref_dk)
|
| 334 |
+
print("tri_dk:", tri_dk)
|
| 335 |
+
print("ref_dq:", ref_dq)
|
| 336 |
+
print("tri_dq:", tri_dq)
|
| 337 |
+
|
| 338 |
+
torch.testing.assert_close(ref_dv, tri_dv, atol=ATOL, rtol=RTOL)
|
| 339 |
+
torch.testing.assert_close(ref_dk, tri_dk, atol=ATOL, rtol=RTOL)
|
| 340 |
+
torch.testing.assert_close(ref_dq, tri_dq, atol=ATOL, rtol=RTOL)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
@pytest.mark.parametrize('Z, H, N_CTX_Q, N_CTX_K, D_HEAD', [
|
| 344 |
+
(1, 1, 1, 1, 1),
|
| 345 |
+
(1, 1, 2, 4, 16),
|
| 346 |
+
(1, 1, 4, 2, 16),
|
| 347 |
+
(1, 1, 4, 4, 16),
|
| 348 |
+
(1, 2, 4, 4, 16),
|
| 349 |
+
(2, 1, 4, 4, 16),
|
| 350 |
+
(2, 2, 4, 4, 16),
|
| 351 |
+
(1, 1, 128, 64, 16),
|
| 352 |
+
(2, 2, 2, 128, 1),
|
| 353 |
+
(2, 3, 2, 128, 16),
|
| 354 |
+
(3, 2, 256, 512, 16),
|
| 355 |
+
(3, 3, 128, 128, 64),
|
| 356 |
+
(2, 4, 1024, 1024, 64),
|
| 357 |
+
(4, 6, 108, 256, 224),
|
| 358 |
+
(4, 8, 2048, 2048, 128),
|
| 359 |
+
(4, 16, 4096, 4096, 64),
|
| 360 |
+
(2, 4, 8192, 8192, 32),
|
| 361 |
+
# # fa configs
|
| 362 |
+
(4, 6, 113, 203, 256),
|
| 363 |
+
(4, 6, 128, 217, 256),
|
| 364 |
+
(4, 6, 113, 211, 128),
|
| 365 |
+
(4, 6, 108, 256, 128),
|
| 366 |
+
(4, 6, 256, 512, 64),
|
| 367 |
+
(4, 6, 512, 256, 64),
|
| 368 |
+
(4, 6, 1024, 1024, 32),
|
| 369 |
+
(4, 6, 1023, 1024, 32),
|
| 370 |
+
(4, 6, 1024, 1023, 32),
|
| 371 |
+
(4, 6, 2048, 2048, 32),
|
| 372 |
+
])
|
| 373 |
+
@pytest.mark.parametrize('causal', [True, False])
|
| 374 |
+
@pytest.mark.parametrize('return_scores', [False])
|
| 375 |
+
@pytest.mark.parametrize('layout', ["bhsd", "bshd", "thd"])
|
| 376 |
+
@pytest.mark.parametrize('use_exp2', [True, False]) # works when use_exp2 is false
|
| 377 |
+
@pytest.mark.parametrize('DEBUG_INPUT', [False]) # NOTE: debug input can overflow when the tensors are large. Just use to figure out issues
|
| 378 |
+
def test_op_prefill_fwd_impl(Z, H, N_CTX_Q, N_CTX_K, D_HEAD, causal, return_scores, layout, use_exp2, DEBUG_INPUT):
|
| 379 |
+
dtype = torch.float16
|
| 380 |
+
torch.manual_seed(0)
|
| 381 |
+
alibi_slopes = None
|
| 382 |
+
dropout_p = 0.0
|
| 383 |
+
device = "cuda"
|
| 384 |
+
|
| 385 |
+
if layout == "thd":
|
| 386 |
+
q, k, v, metadata = varlen_input_helper(Z, H, H, N_CTX_Q, N_CTX_K, D_HEAD, dtype, device=device, DEBUG_INPUT=DEBUG_INPUT)
|
| 387 |
+
else:
|
| 388 |
+
q, k, v, metadata = input_helper(Z, H, H, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout, device=device, DEBUG_INPUT=DEBUG_INPUT)
|
| 389 |
+
if DEBUG_INPUT:
|
| 390 |
+
output_triton = torch.zeros_like(q).contiguous()
|
| 391 |
+
else:
|
| 392 |
+
output_triton = torch.empty_like(q)
|
| 393 |
+
|
| 394 |
+
# update metadata
|
| 395 |
+
metadata.use_exp2 = use_exp2
|
| 396 |
+
if causal:
|
| 397 |
+
metadata.need_causal()
|
| 398 |
+
|
| 399 |
+
# NOTE: the returned score is not the same as the reference because we need to adjust as we find new maxes per block. We are not doing that
|
| 400 |
+
if return_scores:
|
| 401 |
+
metadata.return_scores = True
|
| 402 |
+
|
| 403 |
+
# call Triton's forward implementation directly
|
| 404 |
+
( output_triton,
|
| 405 |
+
softmax_lse_triton,
|
| 406 |
+
exp_scores_triton,
|
| 407 |
+
_,
|
| 408 |
+
_,
|
| 409 |
+
_,
|
| 410 |
+
_,
|
| 411 |
+
_,
|
| 412 |
+
_) = attention_prefill_forward_triton_impl(
|
| 413 |
+
q,
|
| 414 |
+
k,
|
| 415 |
+
v,
|
| 416 |
+
output_triton,
|
| 417 |
+
metadata.sm_scale,
|
| 418 |
+
metadata.alibi_slopes,
|
| 419 |
+
metadata.causal,
|
| 420 |
+
metadata.bias,
|
| 421 |
+
metadata.dropout_p,
|
| 422 |
+
metadata.layout,
|
| 423 |
+
metadata.cu_seqlens_q,
|
| 424 |
+
metadata.cu_seqlens_k,
|
| 425 |
+
metadata.max_seqlens_q,
|
| 426 |
+
metadata.max_seqlens_k,
|
| 427 |
+
metadata.return_scores,
|
| 428 |
+
metadata.use_exp2)
|
| 429 |
+
|
| 430 |
+
(
|
| 431 |
+
output_ref,
|
| 432 |
+
softmax_lse_ref,
|
| 433 |
+
exp_scores_ref,
|
| 434 |
+
softmax_ref,
|
| 435 |
+
attention_shifted_scaled_scores_ref,
|
| 436 |
+
attention_scaled_scores_ref,
|
| 437 |
+
attention_scores_ref,
|
| 438 |
+
) = attention_forward_pytorch_ref_impl(
|
| 439 |
+
q.clone(),
|
| 440 |
+
k.clone(),
|
| 441 |
+
v.clone(),
|
| 442 |
+
metadata.sm_scale,
|
| 443 |
+
causal,
|
| 444 |
+
layout,
|
| 445 |
+
metadata.cu_seqlens_q,
|
| 446 |
+
metadata.cu_seqlens_k,
|
| 447 |
+
metadata.max_seqlens_q,
|
| 448 |
+
metadata.max_seqlens_k,
|
| 449 |
+
use_exp2
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
if DEBUG:
|
| 453 |
+
print("softmax_lse_triton:", softmax_lse_triton, softmax_lse_triton.shape)
|
| 454 |
+
print("softmax_lse_ref:", softmax_lse_ref, softmax_lse_ref.shape)
|
| 455 |
+
torch.testing.assert_close(softmax_lse_triton, softmax_lse_ref, atol=ATOL, rtol=RTOL)
|
| 456 |
+
|
| 457 |
+
if layout != "thd":
|
| 458 |
+
# use trick with lse to get the softmax. you need the scores but is it
|
| 459 |
+
softmax_triton = torch.exp(attention_scaled_scores_ref - softmax_lse_triton.unsqueeze(-1))
|
| 460 |
+
if DEBUG:
|
| 461 |
+
print("attention_scaled_scores_ref:", attention_scaled_scores_ref, attention_scaled_scores_ref.shape)
|
| 462 |
+
print("softmax_lse_triton:", softmax_lse_triton, softmax_lse_triton.shape)
|
| 463 |
+
print("softmax_triton:", softmax_triton, softmax_triton.shape)
|
| 464 |
+
print("softmax_ref:", softmax_ref, softmax_ref.shape)
|
| 465 |
+
torch.testing.assert_close(softmax_triton, softmax_ref, atol=ATOL, rtol=RTOL)
|
| 466 |
+
|
| 467 |
+
if DEBUG:
|
| 468 |
+
print("output_triton:", output_triton, output_triton.shape)
|
| 469 |
+
print("output_ref:", output_ref, output_ref.shape)
|
| 470 |
+
torch.testing.assert_close(output_triton, output_ref, atol=ATOL, rtol=RTOL)
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
# compare with pytorch expect thd and causal impl is different
|
| 474 |
+
if False and layout in ["bhsd", "bshd"] and not causal:
|
| 475 |
+
out_pytorch, softmax_pytorch = torch.ops.aten._scaled_dot_product_attention_math(
|
| 476 |
+
q.transpose(1, 2) if layout == "bshd" else q ,
|
| 477 |
+
k.transpose(1, 2) if layout == "bshd" else k,
|
| 478 |
+
v.transpose(1, 2) if layout == "bshd" else v,
|
| 479 |
+
dropout_p=dropout_p,
|
| 480 |
+
is_causal=causal, scale=metadata.sm_scale,
|
| 481 |
+
dropout_mask=None)
|
| 482 |
+
out_pytorch = out_pytorch.transpose(1, 2) if layout == "bshd" else out_pytorch
|
| 483 |
+
|
| 484 |
+
if DEBUG:
|
| 485 |
+
print("o:", output_triton, output_triton.shape)
|
| 486 |
+
print("out_pytorch:", out_pytorch, out_pytorch.shape)
|
| 487 |
+
torch.testing.assert_close(output_triton, out_pytorch, atol=ATOL, rtol=RTOL)
|
| 488 |
+
|
| 489 |
+
# compare with pytorch output
|
| 490 |
+
if DEBUG:
|
| 491 |
+
print("softmax_triton:", softmax_triton, softmax_triton.shape)
|
| 492 |
+
print("softmax_pytorch:", softmax_pytorch, softmax_pytorch.shape)
|
| 493 |
+
torch.testing.assert_close(softmax_triton, softmax_pytorch.to(torch.float32), atol=ATOL, rtol=RTOL)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
@pytest.mark.parametrize('Z, H, N_CTX_Q, N_CTX_K, D_HEAD', [
|
| 497 |
+
(1, 1, 1, 1, 1),
|
| 498 |
+
(1, 1, 4, 4, 4),
|
| 499 |
+
(2, 1, 4, 4, 16),
|
| 500 |
+
(1, 2, 4, 4, 16),
|
| 501 |
+
(2, 2, 4, 4, 16),
|
| 502 |
+
(1, 1, 4, 4, 16),
|
| 503 |
+
(2, 1, 4, 4 , 16),
|
| 504 |
+
(4, 6, 8, 8 , 16),
|
| 505 |
+
(1, 1, 4, 4, 32),
|
| 506 |
+
(1, 1, 16, 16, 16),
|
| 507 |
+
(1, 1, 32, 32, 16),
|
| 508 |
+
(1, 1, 64, 64, 16),
|
| 509 |
+
(1, 1, 64, 64, 64),
|
| 510 |
+
(1, 1, 64, 128, 32),
|
| 511 |
+
(1, 1, 128, 128, 64),
|
| 512 |
+
(1, 1, 128, 256, 45),
|
| 513 |
+
(1, 1, 113, 203, 192),
|
| 514 |
+
(1, 1, 256, 256, 64),
|
| 515 |
+
(1, 1, 256, 512, 16),
|
| 516 |
+
(1, 1, 512, 512, 64),
|
| 517 |
+
(1, 1, 1024, 1024, 64),
|
| 518 |
+
# fa configs
|
| 519 |
+
(2, 2, 128, 128, 65),
|
| 520 |
+
(2, 2, 128, 128, 224),
|
| 521 |
+
(4, 6, 108, 256, 224),
|
| 522 |
+
(1, 1, 256, 512, 16),
|
| 523 |
+
# old tests that work
|
| 524 |
+
(4, 48, 1024, 1024, 73),
|
| 525 |
+
(4, 48, 1024, 1024, 64),
|
| 526 |
+
(4, 48, 2048, 2048, 64),
|
| 527 |
+
(1, 24, 4096, 4096, 64),
|
| 528 |
+
(1, 16, 1024, 1024, 64),
|
| 529 |
+
(1, 16, 1024, 1024, 128),
|
| 530 |
+
])
|
| 531 |
+
@pytest.mark.parametrize('causal', [True, False])
|
| 532 |
+
@pytest.mark.parametrize('use_exp2', [False]) # FIXME: using exp2 causes issue when used with causal
|
| 533 |
+
@pytest.mark.parametrize('layout', ["bhsd", "bshd", "thd"])
|
| 534 |
+
@pytest.mark.parametrize('sequence_parallel', [True, False])
|
| 535 |
+
@pytest.mark.parametrize('DEBUG_INPUT', [False]) # debug output causes nans in both new and old backend
|
| 536 |
+
def test_op_prefill_bwd_impl(Z, H, N_CTX_Q, N_CTX_K, D_HEAD, causal, use_exp2, layout, sequence_parallel, DEBUG_INPUT):
|
| 537 |
+
dtype = torch.float16
|
| 538 |
+
torch.manual_seed(20) # seed from test_op_bwd
|
| 539 |
+
|
| 540 |
+
alibi_slopes = None
|
| 541 |
+
if layout == "thd":
|
| 542 |
+
q, k, v, metadata = varlen_input_helper(Z, H, H, N_CTX_Q, N_CTX_K, D_HEAD, dtype, DEBUG_INPUT=DEBUG_INPUT)
|
| 543 |
+
else:
|
| 544 |
+
q, k, v, metadata = input_helper(Z, H, H, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout, DEBUG_INPUT=DEBUG_INPUT)
|
| 545 |
+
if DEBUG_INPUT:
|
| 546 |
+
do = torch.ones_like(q).contiguous()
|
| 547 |
+
else:
|
| 548 |
+
do = torch.randn_like(q)
|
| 549 |
+
|
| 550 |
+
# =============================================== Reference ==============================================================
|
| 551 |
+
q_ref = q.clone()
|
| 552 |
+
k_ref = k.clone()
|
| 553 |
+
v_ref = v.clone()
|
| 554 |
+
(
|
| 555 |
+
o_ref,
|
| 556 |
+
softmax_lse_ref,
|
| 557 |
+
_,
|
| 558 |
+
_,
|
| 559 |
+
_,
|
| 560 |
+
_,
|
| 561 |
+
_,
|
| 562 |
+
) = attention_forward_pytorch_ref_impl(
|
| 563 |
+
q_ref,
|
| 564 |
+
k_ref,
|
| 565 |
+
v_ref,
|
| 566 |
+
metadata.sm_scale,
|
| 567 |
+
causal,
|
| 568 |
+
layout,
|
| 569 |
+
metadata.cu_seqlens_q,
|
| 570 |
+
metadata.cu_seqlens_k,
|
| 571 |
+
metadata.max_seqlens_q,
|
| 572 |
+
metadata.max_seqlens_k,
|
| 573 |
+
use_exp2
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
dq = torch.zeros_like(q, dtype=q.dtype) # NOTE: the kernel does inplace accumlation on dq so dq has to be zeros
|
| 577 |
+
if DEBUG_INPUT:
|
| 578 |
+
dk = torch.zeros_like(k, dtype=k.dtype)
|
| 579 |
+
dv = torch.zeros_like(v, dtype=v.dtype)
|
| 580 |
+
else:
|
| 581 |
+
dk = torch.empty_like(k, dtype=k.dtype)
|
| 582 |
+
dv = torch.empty_like(v, dtype=v.dtype)
|
| 583 |
+
|
| 584 |
+
do_ref = do.clone()
|
| 585 |
+
dq_ref, dk_ref, dv_ref, delta_ref = attention_backward_pytorch_ref_impl(
|
| 586 |
+
do_ref,
|
| 587 |
+
q_ref,
|
| 588 |
+
k_ref,
|
| 589 |
+
v_ref,
|
| 590 |
+
o_ref,
|
| 591 |
+
softmax_lse_ref,
|
| 592 |
+
metadata.sm_scale,
|
| 593 |
+
causal,
|
| 594 |
+
layout,
|
| 595 |
+
metadata.cu_seqlens_q,
|
| 596 |
+
metadata.cu_seqlens_k,
|
| 597 |
+
metadata.max_seqlens_q,
|
| 598 |
+
metadata.max_seqlens_k,
|
| 599 |
+
use_exp2
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
# =============================================== Triton ==============================================================
|
| 603 |
+
o = o_ref.clone().contiguous()
|
| 604 |
+
softmax_lse = softmax_lse_ref.clone().contiguous()
|
| 605 |
+
dq_triton, dk_triton, dv_triton, delta_triton, _, _ = attention_prefill_backward_triton_impl(
|
| 606 |
+
do,
|
| 607 |
+
q,
|
| 608 |
+
k,
|
| 609 |
+
v,
|
| 610 |
+
o,
|
| 611 |
+
softmax_lse,
|
| 612 |
+
dq,
|
| 613 |
+
dk,
|
| 614 |
+
dv,
|
| 615 |
+
metadata.sm_scale,
|
| 616 |
+
alibi_slopes,
|
| 617 |
+
causal,
|
| 618 |
+
layout,
|
| 619 |
+
metadata.cu_seqlens_q,
|
| 620 |
+
metadata.cu_seqlens_k,
|
| 621 |
+
metadata.max_seqlens_q,
|
| 622 |
+
metadata.max_seqlens_k,
|
| 623 |
+
use_exp2,
|
| 624 |
+
sequence_parallel=sequence_parallel
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
# =============================================== Check ==============================================================
|
| 628 |
+
if DEBUG:
|
| 629 |
+
print()
|
| 630 |
+
if DEBUG:
|
| 631 |
+
print("delta_triton:", delta_triton, delta_triton.shape)
|
| 632 |
+
print("delta_ref:", delta_ref, delta_ref.shape)
|
| 633 |
+
torch.testing.assert_close(delta_triton, delta_ref, atol=ATOL, rtol=RTOL, equal_nan=EQUAL_NAN)
|
| 634 |
+
|
| 635 |
+
if DEBUG:
|
| 636 |
+
print("dv_triton:", dv_triton, dv_triton.shape)
|
| 637 |
+
print("dv_ref:", dv_ref, dv_ref.shape)
|
| 638 |
+
torch.testing.assert_close(dv_triton, dv_ref, atol=ATOL, rtol=RTOL, equal_nan=EQUAL_NAN)
|
| 639 |
+
|
| 640 |
+
if DEBUG:
|
| 641 |
+
print("dk_triton:", dk_triton, dk_triton.shape)
|
| 642 |
+
print("dk_ref:", dk_ref, dk_ref.shape)
|
| 643 |
+
torch.testing.assert_close(dk_triton, dk_ref, atol=ATOL, rtol=RTOL, equal_nan=EQUAL_NAN)
|
| 644 |
+
|
| 645 |
+
if DEBUG:
|
| 646 |
+
print("dq_triton:", dq_triton, dq_triton.shape)
|
| 647 |
+
print("dq_ref:", dq_ref, dq_ref.shape)
|
| 648 |
+
torch.testing.assert_close(dq_triton, dq_ref, atol=ATOL, rtol=RTOL, equal_nan=EQUAL_NAN)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
@pytest.mark.parametrize('batch_size, seqlen_q, seqlen_k, group_q, group_k, dim', get_input_shapes())
|
| 652 |
+
def test_op_fwd_decode(batch_size, seqlen_q, seqlen_k, group_q, group_k, dim, dtype=torch.bfloat16):
|
| 653 |
+
if DEBUG:
|
| 654 |
+
print()
|
| 655 |
+
print(f"batch_size = {batch_size}, seqlen_q = {seqlen_q}, seqlen_k = {seqlen_k}, group_q = {group_q}, group_k = {group_k}, dim = {dim}")
|
| 656 |
+
torch.manual_seed(20)
|
| 657 |
+
query_group_head_size = (group_q + group_k - 1) // group_k
|
| 658 |
+
q = (torch.empty((batch_size, seqlen_q, group_k, query_group_head_size, dim), dtype=dtype,
|
| 659 |
+
device="cuda").normal_(mean=0., std=0.5).requires_grad_())
|
| 660 |
+
k = (torch.empty((batch_size, seqlen_k, group_k, 1, dim), dtype=dtype,
|
| 661 |
+
device="cuda").normal_(mean=0.,
|
| 662 |
+
std=0.5).requires_grad_()).expand(-1, -1, -1, query_group_head_size, -1)
|
| 663 |
+
v = (torch.empty((batch_size, seqlen_k, group_k, 1, dim), dtype=dtype,
|
| 664 |
+
device="cuda").normal_(mean=0.,
|
| 665 |
+
std=0.5).requires_grad_()).expand(-1, -1, -1, query_group_head_size, -1)
|
| 666 |
+
scale = 1 / dim**0.5
|
| 667 |
+
input_metadata = MetaData(sm_scale=scale)
|
| 668 |
+
input_metadata.layout = "bsghd"
|
| 669 |
+
tri_out, _ = attention_decode(q, k, v, input_metadata)
|
| 670 |
+
|
| 671 |
+
q = q.reshape([batch_size, seqlen_q, -1, dim]).permute(0, 2, 1, 3)
|
| 672 |
+
k = k.reshape([batch_size, seqlen_k, -1, dim]).permute(0, 2, 1, 3)
|
| 673 |
+
v = v.reshape([batch_size, seqlen_k, -1, dim]).permute(0, 2, 1, 3)
|
| 674 |
+
attn = (q @ k.transpose(-1, -2) * scale).softmax(-1)
|
| 675 |
+
ref_out = attn @ v
|
| 676 |
+
|
| 677 |
+
# compare
|
| 678 |
+
torch.testing.assert_close(ref_out, tri_out, atol=1e-3, rtol=0)
|
| 679 |
+
|
| 680 |
+
def test_quantization():
|
| 681 |
+
a = torch.randn((2, 4, 32), dtype=torch.float16, device='cuda')
|
| 682 |
+
qa = quantize_kv_int4(a, num_groups=4)
|
| 683 |
+
dqa = dequantize_kv_fp16(qa, num_groups=4)
|
| 684 |
+
torch.testing.assert_close(a, dqa, atol=1.5e-1, rtol=1e-1)
|
| 685 |
+
|
| 686 |
+
@pytest.mark.parametrize('B, Mq, Mkv, Hq, Hkv, K', get_input_shapes())
|
| 687 |
+
def test_op_fwd_decode_int4_kv(B, Mq, Mkv, Hq, Hkv, K, dtype=torch.float16):
|
| 688 |
+
pytest.skip("Decode kernel doesnot support quantization yet")
|
| 689 |
+
torch.manual_seed(2)
|
| 690 |
+
q = (torch.empty((B, Mq, Hkv, (Hq + Hkv - 1) // Hkv, K), dtype=dtype,
|
| 691 |
+
device="cuda").normal_(mean=1.0, std=0.5).requires_grad_())
|
| 692 |
+
k = (torch.empty((B, Mkv, Hkv, 1, K), dtype=dtype,
|
| 693 |
+
device="cuda").normal_(mean=1.0,
|
| 694 |
+
std=0.5).requires_grad_()).expand(-1, -1, -1, (Hq + Hkv - 1) // Hkv, -1)
|
| 695 |
+
v = (torch.empty((B, Mkv, Hkv, 1, K), dtype=dtype,
|
| 696 |
+
device="cuda").normal_(mean=1.0,
|
| 697 |
+
std=0.5).requires_grad_()).expand(-1, -1, -1, (Hq + Hkv - 1) // Hkv, -1)
|
| 698 |
+
|
| 699 |
+
num_groups = 1
|
| 700 |
+
quant_k = (quantize_kv_int4(k, num_groups=num_groups).contiguous().view(torch.int32))
|
| 701 |
+
quant_v = (quantize_kv_int4(v, num_groups=num_groups).contiguous().view(torch.int32))
|
| 702 |
+
scale = 1 / K**0.5
|
| 703 |
+
input_metadata = MetaData(sm_scale=scale)
|
| 704 |
+
input_metadata.layout = "bsghd"
|
| 705 |
+
tri_out, _ = attention_decode(q, quant_k, quant_v, input_metadata)
|
| 706 |
+
|
| 707 |
+
q = q.reshape([B, Mq, -1, K]).permute(0, 2, 1, 3)
|
| 708 |
+
k = k.reshape([B, Mkv, -1, K]).permute(0, 2, 1, 3)
|
| 709 |
+
v = v.reshape([B, Mkv, -1, K]).permute(0, 2, 1, 3)
|
| 710 |
+
attn = (q @ k.transpose(-1, -2) * scale).softmax(-1)
|
| 711 |
+
ref_out = attn @ v
|
| 712 |
+
# compare
|
| 713 |
+
torch.testing.assert_close(ref_out, tri_out, atol=2.1e-2, rtol=0)
|
| 714 |
+
|
| 715 |
+
# since quantization introduces rounding error, use the
|
| 716 |
+
# dequantized kv as inputs to the ref implementation to reduce
|
| 717 |
+
# the tolerance to 1e-3
|
| 718 |
+
dqk = dequantize_kv_fp16(quant_k, num_groups=num_groups)
|
| 719 |
+
dqv = dequantize_kv_fp16(quant_v, num_groups=num_groups)
|
| 720 |
+
dqk = dqk.reshape([B, Mkv, -1, K]).permute(0, 2, 1, 3)
|
| 721 |
+
dqv = dqv.reshape([B, Mkv, -1, K]).permute(0, 2, 1, 3)
|
| 722 |
+
dq_attn = (q @ dqk.transpose(-1, -2) * scale).softmax(-1)
|
| 723 |
+
dq_ref_out = dq_attn @ dqv
|
| 724 |
+
torch.testing.assert_close(dq_ref_out, tri_out, atol=1e-3, rtol=0)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_attn_triton_og.py
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
|
| 2 |
+
# for benchmarking.
|
| 3 |
+
# We fixed a few dtype cast to make it work for bf16
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
Fused Attention
|
| 7 |
+
===============
|
| 8 |
+
This is a Triton implementation of the Flash Attention algorithm
|
| 9 |
+
(see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf)
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import pytest
|
| 13 |
+
import torch
|
| 14 |
+
import triton
|
| 15 |
+
import triton.language as tl
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@triton.jit
|
| 19 |
+
def _fwd_kernel(
|
| 20 |
+
Q,
|
| 21 |
+
K,
|
| 22 |
+
V,
|
| 23 |
+
sm_scale,
|
| 24 |
+
TMP,
|
| 25 |
+
L,
|
| 26 |
+
M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
|
| 27 |
+
Out,
|
| 28 |
+
stride_qz,
|
| 29 |
+
stride_qh,
|
| 30 |
+
stride_qm,
|
| 31 |
+
stride_qk,
|
| 32 |
+
stride_kz,
|
| 33 |
+
stride_kh,
|
| 34 |
+
stride_kn,
|
| 35 |
+
stride_kk,
|
| 36 |
+
stride_vz,
|
| 37 |
+
stride_vh,
|
| 38 |
+
stride_vk,
|
| 39 |
+
stride_vn,
|
| 40 |
+
stride_oz,
|
| 41 |
+
stride_oh,
|
| 42 |
+
stride_om,
|
| 43 |
+
stride_on,
|
| 44 |
+
Z,
|
| 45 |
+
H,
|
| 46 |
+
N_CTX,
|
| 47 |
+
BLOCK_M: tl.constexpr,
|
| 48 |
+
BLOCK_DMODEL: tl.constexpr,
|
| 49 |
+
BLOCK_N: tl.constexpr,
|
| 50 |
+
):
|
| 51 |
+
start_m = tl.program_id(0)
|
| 52 |
+
off_hz = tl.program_id(1)
|
| 53 |
+
# initialize offsets
|
| 54 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 55 |
+
offs_n = tl.arange(0, BLOCK_N)
|
| 56 |
+
offs_d = tl.arange(0, BLOCK_DMODEL)
|
| 57 |
+
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
|
| 58 |
+
off_k = off_hz * stride_qh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
|
| 59 |
+
off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
|
| 60 |
+
# Initialize pointers to Q, K, V
|
| 61 |
+
q_ptrs = Q + off_q
|
| 62 |
+
k_ptrs = K + off_k
|
| 63 |
+
v_ptrs = V + off_v
|
| 64 |
+
# initialize pointer to m and l
|
| 65 |
+
t_ptrs = TMP + off_hz * N_CTX + offs_m
|
| 66 |
+
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
|
| 67 |
+
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
|
| 68 |
+
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
|
| 69 |
+
# load q: it will stay in SRAM throughout
|
| 70 |
+
q = tl.load(q_ptrs)
|
| 71 |
+
# loop over k, v and update accumulator
|
| 72 |
+
for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N):
|
| 73 |
+
start_n = tl.multiple_of(start_n, BLOCK_N)
|
| 74 |
+
# -- compute qk ----
|
| 75 |
+
k = tl.load(k_ptrs + start_n * stride_kn)
|
| 76 |
+
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
|
| 77 |
+
qk += tl.dot(q, k, trans_b=True)
|
| 78 |
+
qk *= sm_scale
|
| 79 |
+
qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf"))
|
| 80 |
+
# -- compute m_ij, p, l_ij
|
| 81 |
+
m_ij = tl.max(qk, 1)
|
| 82 |
+
p = tl.exp(qk - m_ij[:, None])
|
| 83 |
+
l_ij = tl.sum(p, 1)
|
| 84 |
+
# -- update m_i and l_i
|
| 85 |
+
m_i_new = tl.maximum(m_i, m_ij)
|
| 86 |
+
alpha = tl.exp(m_i - m_i_new)
|
| 87 |
+
beta = tl.exp(m_ij - m_i_new)
|
| 88 |
+
l_i_new = alpha * l_i + beta * l_ij
|
| 89 |
+
# -- update output accumulator --
|
| 90 |
+
# scale p
|
| 91 |
+
p_scale = beta / l_i_new
|
| 92 |
+
p = p * p_scale[:, None]
|
| 93 |
+
# scale acc
|
| 94 |
+
acc_scale = l_i / l_i_new * alpha
|
| 95 |
+
tl.store(t_ptrs, acc_scale)
|
| 96 |
+
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
|
| 97 |
+
acc = acc * acc_scale[:, None]
|
| 98 |
+
# update acc
|
| 99 |
+
v = tl.load(v_ptrs + start_n * stride_vk)
|
| 100 |
+
p = p.to(v.dtype)
|
| 101 |
+
acc += tl.dot(p, v)
|
| 102 |
+
# update m_i and l_i
|
| 103 |
+
l_i = l_i_new
|
| 104 |
+
m_i = m_i_new
|
| 105 |
+
# rematerialize offsets to save registers
|
| 106 |
+
start_m = tl.program_id(0)
|
| 107 |
+
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 108 |
+
# write back l and m
|
| 109 |
+
l_ptrs = L + off_hz * N_CTX + offs_m
|
| 110 |
+
m_ptrs = M + off_hz * N_CTX + offs_m
|
| 111 |
+
tl.store(l_ptrs, l_i)
|
| 112 |
+
tl.store(m_ptrs, m_i)
|
| 113 |
+
# initialize pointers to output
|
| 114 |
+
offs_n = tl.arange(0, BLOCK_DMODEL)
|
| 115 |
+
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
|
| 116 |
+
out_ptrs = Out + off_o
|
| 117 |
+
tl.store(out_ptrs, acc)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@triton.jit
|
| 121 |
+
def _bwd_preprocess(
|
| 122 |
+
Out,
|
| 123 |
+
DO,
|
| 124 |
+
L,
|
| 125 |
+
NewDO,
|
| 126 |
+
Delta,
|
| 127 |
+
BLOCK_M: tl.constexpr,
|
| 128 |
+
D_HEAD: tl.constexpr,
|
| 129 |
+
):
|
| 130 |
+
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 131 |
+
off_n = tl.arange(0, D_HEAD)
|
| 132 |
+
# load
|
| 133 |
+
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
|
| 134 |
+
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
|
| 135 |
+
denom = tl.load(L + off_m).to(tl.float32)
|
| 136 |
+
# compute
|
| 137 |
+
do = do / denom[:, None]
|
| 138 |
+
delta = tl.sum(o * do, axis=1)
|
| 139 |
+
# write-back
|
| 140 |
+
tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do)
|
| 141 |
+
tl.store(Delta + off_m, delta)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@triton.jit
|
| 145 |
+
def _bwd_kernel(
|
| 146 |
+
Q,
|
| 147 |
+
K,
|
| 148 |
+
V,
|
| 149 |
+
sm_scale,
|
| 150 |
+
Out,
|
| 151 |
+
DO,
|
| 152 |
+
DQ,
|
| 153 |
+
DK,
|
| 154 |
+
DV,
|
| 155 |
+
L,
|
| 156 |
+
M,
|
| 157 |
+
D,
|
| 158 |
+
stride_qz,
|
| 159 |
+
stride_qh,
|
| 160 |
+
stride_qm,
|
| 161 |
+
stride_qk,
|
| 162 |
+
stride_kz,
|
| 163 |
+
stride_kh,
|
| 164 |
+
stride_kn,
|
| 165 |
+
stride_kk,
|
| 166 |
+
stride_vz,
|
| 167 |
+
stride_vh,
|
| 168 |
+
stride_vk,
|
| 169 |
+
stride_vn,
|
| 170 |
+
Z,
|
| 171 |
+
H,
|
| 172 |
+
N_CTX,
|
| 173 |
+
num_block,
|
| 174 |
+
BLOCK_M: tl.constexpr,
|
| 175 |
+
BLOCK_DMODEL: tl.constexpr,
|
| 176 |
+
BLOCK_N: tl.constexpr,
|
| 177 |
+
):
|
| 178 |
+
off_hz = tl.program_id(0)
|
| 179 |
+
off_z = off_hz // H
|
| 180 |
+
off_h = off_hz % H
|
| 181 |
+
# offset pointers for batch/head
|
| 182 |
+
Q += off_z * stride_qz + off_h * stride_qh
|
| 183 |
+
K += off_z * stride_qz + off_h * stride_qh
|
| 184 |
+
V += off_z * stride_qz + off_h * stride_qh
|
| 185 |
+
DO += off_z * stride_qz + off_h * stride_qh
|
| 186 |
+
DQ += off_z * stride_qz + off_h * stride_qh
|
| 187 |
+
DK += off_z * stride_qz + off_h * stride_qh
|
| 188 |
+
DV += off_z * stride_qz + off_h * stride_qh
|
| 189 |
+
for start_n in range(0, num_block):
|
| 190 |
+
lo = start_n * BLOCK_M
|
| 191 |
+
# initialize row/col offsets
|
| 192 |
+
offs_qm = lo + tl.arange(0, BLOCK_M)
|
| 193 |
+
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
|
| 194 |
+
offs_m = tl.arange(0, BLOCK_N)
|
| 195 |
+
offs_k = tl.arange(0, BLOCK_DMODEL)
|
| 196 |
+
# initialize pointers to value-like data
|
| 197 |
+
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
| 198 |
+
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
|
| 199 |
+
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
| 200 |
+
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
| 201 |
+
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
| 202 |
+
# pointer to row-wise quantities in value-like data
|
| 203 |
+
D_ptrs = D + off_hz * N_CTX
|
| 204 |
+
m_ptrs = M + off_hz * N_CTX
|
| 205 |
+
# initialize dv amd dk
|
| 206 |
+
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
|
| 207 |
+
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
|
| 208 |
+
# k and v stay in SRAM throughout
|
| 209 |
+
k = tl.load(k_ptrs)
|
| 210 |
+
v = tl.load(v_ptrs)
|
| 211 |
+
# loop over rows
|
| 212 |
+
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
|
| 213 |
+
offs_m_curr = start_m + offs_m
|
| 214 |
+
# load q, k, v, do on-chip
|
| 215 |
+
q = tl.load(q_ptrs)
|
| 216 |
+
# recompute p = softmax(qk, dim=-1).T
|
| 217 |
+
# NOTE: `do` is pre-divided by `l`; no normalization here
|
| 218 |
+
qk = tl.dot(q, k, trans_b=True)
|
| 219 |
+
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
|
| 220 |
+
m = tl.load(m_ptrs + offs_m_curr)
|
| 221 |
+
p = tl.exp(qk * sm_scale - m[:, None])
|
| 222 |
+
# compute dv
|
| 223 |
+
do = tl.load(do_ptrs)
|
| 224 |
+
dv += tl.dot(p.to(do.dtype), do, trans_a=True)
|
| 225 |
+
# compute dp = dot(v, do)
|
| 226 |
+
Di = tl.load(D_ptrs + offs_m_curr)
|
| 227 |
+
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
|
| 228 |
+
dp += tl.dot(do, v, trans_b=True)
|
| 229 |
+
# compute ds = p * (dp - delta[:, None])
|
| 230 |
+
ds = p * dp * sm_scale
|
| 231 |
+
# compute dk = dot(ds.T, q)
|
| 232 |
+
dk += tl.dot(ds.to(q.dtype), q, trans_a=True)
|
| 233 |
+
# # compute dq
|
| 234 |
+
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
|
| 235 |
+
dq += tl.dot(ds.to(k.dtype), k)
|
| 236 |
+
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
|
| 237 |
+
# # increment pointers
|
| 238 |
+
dq_ptrs += BLOCK_M * stride_qm
|
| 239 |
+
q_ptrs += BLOCK_M * stride_qm
|
| 240 |
+
do_ptrs += BLOCK_M * stride_qm
|
| 241 |
+
# write-back
|
| 242 |
+
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
|
| 243 |
+
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
|
| 244 |
+
tl.store(dv_ptrs, dv)
|
| 245 |
+
tl.store(dk_ptrs, dk)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class _attention(torch.autograd.Function):
|
| 249 |
+
@staticmethod
|
| 250 |
+
def forward(ctx, q, k, v, sm_scale):
|
| 251 |
+
BLOCK = 128
|
| 252 |
+
# shape constraints
|
| 253 |
+
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
|
| 254 |
+
assert Lq == Lk and Lk == Lv
|
| 255 |
+
assert Lk in {16, 32, 64, 128}
|
| 256 |
+
o = torch.empty_like(q)
|
| 257 |
+
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
|
| 258 |
+
tmp = torch.empty(
|
| 259 |
+
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
|
| 260 |
+
)
|
| 261 |
+
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
|
| 262 |
+
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
|
| 263 |
+
num_warps = 4 if Lk <= 64 else 8
|
| 264 |
+
|
| 265 |
+
_fwd_kernel[grid](
|
| 266 |
+
q,
|
| 267 |
+
k,
|
| 268 |
+
v,
|
| 269 |
+
sm_scale,
|
| 270 |
+
tmp,
|
| 271 |
+
L,
|
| 272 |
+
m,
|
| 273 |
+
o,
|
| 274 |
+
q.stride(0),
|
| 275 |
+
q.stride(1),
|
| 276 |
+
q.stride(2),
|
| 277 |
+
q.stride(3),
|
| 278 |
+
k.stride(0),
|
| 279 |
+
k.stride(1),
|
| 280 |
+
k.stride(2),
|
| 281 |
+
k.stride(3),
|
| 282 |
+
v.stride(0),
|
| 283 |
+
v.stride(1),
|
| 284 |
+
v.stride(2),
|
| 285 |
+
v.stride(3),
|
| 286 |
+
o.stride(0),
|
| 287 |
+
o.stride(1),
|
| 288 |
+
o.stride(2),
|
| 289 |
+
o.stride(3),
|
| 290 |
+
q.shape[0],
|
| 291 |
+
q.shape[1],
|
| 292 |
+
q.shape[2],
|
| 293 |
+
BLOCK_M=BLOCK,
|
| 294 |
+
BLOCK_N=BLOCK,
|
| 295 |
+
BLOCK_DMODEL=Lk,
|
| 296 |
+
num_warps=num_warps,
|
| 297 |
+
num_stages=1,
|
| 298 |
+
)
|
| 299 |
+
ctx.save_for_backward(q, k, v, o, L, m)
|
| 300 |
+
ctx.BLOCK = BLOCK
|
| 301 |
+
ctx.grid = grid
|
| 302 |
+
ctx.sm_scale = sm_scale
|
| 303 |
+
ctx.BLOCK_DMODEL = Lk
|
| 304 |
+
return o
|
| 305 |
+
|
| 306 |
+
@staticmethod
|
| 307 |
+
def backward(ctx, do):
|
| 308 |
+
q, k, v, o, l, m = ctx.saved_tensors
|
| 309 |
+
do = do.contiguous()
|
| 310 |
+
dq = torch.zeros_like(q, dtype=torch.float32)
|
| 311 |
+
dk = torch.empty_like(k)
|
| 312 |
+
dv = torch.empty_like(v)
|
| 313 |
+
do_scaled = torch.empty_like(do)
|
| 314 |
+
delta = torch.empty_like(l)
|
| 315 |
+
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1],)](
|
| 316 |
+
o,
|
| 317 |
+
do,
|
| 318 |
+
l,
|
| 319 |
+
do_scaled,
|
| 320 |
+
delta,
|
| 321 |
+
BLOCK_M=ctx.BLOCK,
|
| 322 |
+
D_HEAD=ctx.BLOCK_DMODEL,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
# NOTE: kernel currently buggy for other values of `num_warps`
|
| 326 |
+
num_warps = 8
|
| 327 |
+
_bwd_kernel[(ctx.grid[1],)](
|
| 328 |
+
q,
|
| 329 |
+
k,
|
| 330 |
+
v,
|
| 331 |
+
ctx.sm_scale,
|
| 332 |
+
o,
|
| 333 |
+
do_scaled,
|
| 334 |
+
dq,
|
| 335 |
+
dk,
|
| 336 |
+
dv,
|
| 337 |
+
l,
|
| 338 |
+
m,
|
| 339 |
+
delta,
|
| 340 |
+
q.stride(0),
|
| 341 |
+
q.stride(1),
|
| 342 |
+
q.stride(2),
|
| 343 |
+
q.stride(3),
|
| 344 |
+
k.stride(0),
|
| 345 |
+
k.stride(1),
|
| 346 |
+
k.stride(2),
|
| 347 |
+
k.stride(3),
|
| 348 |
+
v.stride(0),
|
| 349 |
+
v.stride(1),
|
| 350 |
+
v.stride(2),
|
| 351 |
+
v.stride(3),
|
| 352 |
+
q.shape[0],
|
| 353 |
+
q.shape[1],
|
| 354 |
+
q.shape[2],
|
| 355 |
+
ctx.grid[0],
|
| 356 |
+
BLOCK_M=ctx.BLOCK,
|
| 357 |
+
BLOCK_N=ctx.BLOCK,
|
| 358 |
+
BLOCK_DMODEL=ctx.BLOCK_DMODEL,
|
| 359 |
+
num_warps=num_warps,
|
| 360 |
+
num_stages=1,
|
| 361 |
+
)
|
| 362 |
+
return dq.to(q.dtype), dk, dv, None
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
attention = _attention.apply
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_blocksparse_attention.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import hydra
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
|
| 8 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
| 9 |
+
from flash_attn.flash_blocksparse_attn_interface import (
|
| 10 |
+
convert_blockmask,
|
| 11 |
+
flash_blocksparse_attn_func,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FlashBlocksparseAttention(nn.Module):
|
| 16 |
+
"""Implement the scaled dot product attention with softmax.
|
| 17 |
+
Arguments
|
| 18 |
+
---------
|
| 19 |
+
softmax_temp: The temperature to use for the softmax attention.
|
| 20 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 21 |
+
runtime)
|
| 22 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 23 |
+
(default: 0.1)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
sparsity_config,
|
| 29 |
+
softmax_temp=None,
|
| 30 |
+
attention_dropout=0.0,
|
| 31 |
+
max_seq_length=2048,
|
| 32 |
+
device=None,
|
| 33 |
+
dtype=None,
|
| 34 |
+
):
|
| 35 |
+
super().__init__()
|
| 36 |
+
self.sparsity_config = hydra.utils.instantiate(sparsity_config)
|
| 37 |
+
self.softmax_temp = softmax_temp
|
| 38 |
+
self.dropout_p = attention_dropout
|
| 39 |
+
|
| 40 |
+
# initialize sparse layout and register as buffer
|
| 41 |
+
max_seq_length = ((max_seq_length + 256 - 1) // 256) * 256
|
| 42 |
+
layout = self.sparsity_config.make_layout(max_seq_length)
|
| 43 |
+
self.register_buffer("layout", layout)
|
| 44 |
+
blockmask_converted = convert_blockmask(self.layout, causal=False)
|
| 45 |
+
self.register_buffer("blockmask_converted", blockmask_converted)
|
| 46 |
+
# logger.info(f'Attention class {self.__class__}: saving={self.layout.float().mean()}')
|
| 47 |
+
|
| 48 |
+
def forward(
|
| 49 |
+
self,
|
| 50 |
+
qkv,
|
| 51 |
+
attn_mask=None,
|
| 52 |
+
key_padding_mask=None,
|
| 53 |
+
causal=False,
|
| 54 |
+
cu_seqlens=None,
|
| 55 |
+
max_s=None,
|
| 56 |
+
need_weights=False,
|
| 57 |
+
convert_mask=True,
|
| 58 |
+
):
|
| 59 |
+
"""Implements the multihead softmax attention.
|
| 60 |
+
Arguments
|
| 61 |
+
---------
|
| 62 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
| 63 |
+
attn_mask: An implementation of BaseMask that encodes where each
|
| 64 |
+
query can attend to
|
| 65 |
+
key_padding_mask: An implementation of BaseMask that encodes how
|
| 66 |
+
many query each sequence in the batch consists of
|
| 67 |
+
"""
|
| 68 |
+
assert not need_weights
|
| 69 |
+
assert attn_mask is None
|
| 70 |
+
assert qkv.dtype == torch.float16
|
| 71 |
+
assert qkv.is_cuda
|
| 72 |
+
|
| 73 |
+
if cu_seqlens is None:
|
| 74 |
+
batch_size = qkv.shape[0]
|
| 75 |
+
seqlen = qkv.shape[1]
|
| 76 |
+
# Convert mask to take a subset
|
| 77 |
+
seqlen_rounded = ((seqlen + 256 - 1) // 256) * 256
|
| 78 |
+
assert seqlen_rounded // 16 <= self.layout.shape[0], (
|
| 79 |
+
seqlen_rounded // 256 <= self.layout.shape[1]
|
| 80 |
+
)
|
| 81 |
+
blockmask = self.layout[: seqlen_rounded // 16, : seqlen_rounded // 256]
|
| 82 |
+
if key_padding_mask is None:
|
| 83 |
+
qkv = rearrange(qkv, "b s ... -> (b s) ...")
|
| 84 |
+
max_s = seqlen
|
| 85 |
+
cu_seqlens = torch.arange(
|
| 86 |
+
0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, device=qkv.device
|
| 87 |
+
)
|
| 88 |
+
output = flash_blocksparse_attn_func(
|
| 89 |
+
qkv,
|
| 90 |
+
cu_seqlens,
|
| 91 |
+
blockmask,
|
| 92 |
+
self.dropout_p if self.training else 0.0,
|
| 93 |
+
max_s,
|
| 94 |
+
softmax_scale=self.softmax_temp,
|
| 95 |
+
causal=causal,
|
| 96 |
+
)
|
| 97 |
+
output = rearrange(output, "(b s) ... -> b s ...", b=batch_size)
|
| 98 |
+
else:
|
| 99 |
+
key_padding_mask_bool = key_padding_mask.bool_matrix
|
| 100 |
+
nheads = qkv.shape[-2]
|
| 101 |
+
x = rearrange(qkv, "b s three h d -> b s (three h d)")
|
| 102 |
+
x_unpad, indices, cu_seqlens, max_s, _ = unpad_input(x, key_padding_mask_bool)
|
| 103 |
+
x_unpad = rearrange(x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads)
|
| 104 |
+
output_unpad = flash_blocksparse_attn_func(
|
| 105 |
+
x_unpad,
|
| 106 |
+
cu_seqlens,
|
| 107 |
+
blockmask,
|
| 108 |
+
self.dropout_p if self.training else 0.0,
|
| 109 |
+
max_s,
|
| 110 |
+
softmax_scale=self.softmax_temp,
|
| 111 |
+
causal=causal,
|
| 112 |
+
)
|
| 113 |
+
output = rearrange(
|
| 114 |
+
pad_input(
|
| 115 |
+
rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, batch_size, seqlen
|
| 116 |
+
),
|
| 117 |
+
"b s (h d) -> b s h d",
|
| 118 |
+
h=nheads,
|
| 119 |
+
)
|
| 120 |
+
else:
|
| 121 |
+
assert max_s is not None
|
| 122 |
+
seqlen = max_s
|
| 123 |
+
# Convert mask to take a subset
|
| 124 |
+
seqlen_rounded = ((seqlen + 256 - 1) // 256) * 256
|
| 125 |
+
assert seqlen_rounded // 16 <= self.layout.shape[0], (
|
| 126 |
+
seqlen_rounded // 256 <= self.layout.shape[1]
|
| 127 |
+
)
|
| 128 |
+
blockmask = self.layout[: seqlen_rounded // 16, : seqlen_rounded // 256]
|
| 129 |
+
if convert_mask:
|
| 130 |
+
output = flash_blocksparse_attn_func(
|
| 131 |
+
qkv,
|
| 132 |
+
cu_seqlens,
|
| 133 |
+
blockmask,
|
| 134 |
+
self.dropout_p if self.training else 0.0,
|
| 135 |
+
max_s,
|
| 136 |
+
softmax_scale=self.softmax_temp,
|
| 137 |
+
causal=causal,
|
| 138 |
+
)
|
| 139 |
+
else:
|
| 140 |
+
output = flash_blocksparse_attn_func(
|
| 141 |
+
qkv,
|
| 142 |
+
cu_seqlens,
|
| 143 |
+
self.blockmask_converted,
|
| 144 |
+
self.dropout_p if self.training else 0.0,
|
| 145 |
+
max_s,
|
| 146 |
+
softmax_scale=self.softmax_temp,
|
| 147 |
+
causal=causal,
|
| 148 |
+
convert_mask=False,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
return output, None
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class FlashBlocksparseMHA(nn.Module):
|
| 155 |
+
def __init__(
|
| 156 |
+
self,
|
| 157 |
+
embed_dim,
|
| 158 |
+
num_heads,
|
| 159 |
+
sparsity_config,
|
| 160 |
+
bias=True,
|
| 161 |
+
batch_first=True,
|
| 162 |
+
attention_dropout=0.0,
|
| 163 |
+
causal=False,
|
| 164 |
+
max_seq_length=2048,
|
| 165 |
+
device=None,
|
| 166 |
+
dtype=None,
|
| 167 |
+
**kwargs,
|
| 168 |
+
) -> None:
|
| 169 |
+
assert batch_first
|
| 170 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 171 |
+
super().__init__()
|
| 172 |
+
self.embed_dim = embed_dim
|
| 173 |
+
self.causal = causal
|
| 174 |
+
|
| 175 |
+
self.num_heads = num_heads
|
| 176 |
+
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
|
| 177 |
+
self.head_dim = self.embed_dim // num_heads
|
| 178 |
+
assert self.head_dim in [16, 32, 64], "Only support head_dim == 16, 32, or 64"
|
| 179 |
+
|
| 180 |
+
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
|
| 181 |
+
self.inner_attn = FlashBlocksparseAttention(
|
| 182 |
+
sparsity_config,
|
| 183 |
+
attention_dropout=attention_dropout,
|
| 184 |
+
max_seq_length=max_seq_length,
|
| 185 |
+
**factory_kwargs,
|
| 186 |
+
)
|
| 187 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
|
| 188 |
+
|
| 189 |
+
def forward(
|
| 190 |
+
self, x, x_ignored_, x_ignored_1_, attn_mask=None, key_padding_mask=None, need_weights=False
|
| 191 |
+
):
|
| 192 |
+
qkv = self.Wqkv(x)
|
| 193 |
+
qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, h=self.num_heads)
|
| 194 |
+
context, attn_weights = self.inner_attn(
|
| 195 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal
|
| 196 |
+
)
|
| 197 |
+
return self.out_proj(rearrange(context, "b s h d -> b s (h d)")), attn_weights
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/flash_blocksparse_attn_interface.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py
|
| 2 |
+
import flash_attn_cuda
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def convert_blockmask(blockmask, causal):
|
| 8 |
+
"""Convert from the 0-1 format to the format used by the CUDA code.
|
| 9 |
+
0 means the block is skipped.
|
| 10 |
+
nonzero means the block is not skipped.
|
| 11 |
+
Argument:
|
| 12 |
+
blockmask: (row, col): a 0-1 tensor
|
| 13 |
+
Return:
|
| 14 |
+
blockmask_converted: (col, row), dtype torch.int32: for each column, it contains the row
|
| 15 |
+
indices of the nonzero blocks, padded with -1 to reach length @row.
|
| 16 |
+
The indices are multiplied by 4, with the smallest bit used to encode whether
|
| 17 |
+
it is the first nonzero in its row, and the 2nd smallest bit to encode whether it is
|
| 18 |
+
the last nonzero in its row..
|
| 19 |
+
"""
|
| 20 |
+
assert not causal
|
| 21 |
+
# TD [2022-05-13]: The indexing and sorting is very tricky
|
| 22 |
+
nrow, ncol = blockmask.shape
|
| 23 |
+
# Sort does not support bool on CUDA
|
| 24 |
+
blockmask = blockmask.to(dtype=torch.uint8)
|
| 25 |
+
nonzero_val, nonzero_sorted_rowidx = blockmask.sort(dim=0, stable=True, descending=True)
|
| 26 |
+
nonzero_unsorted_rowidx = nonzero_sorted_rowidx.argsort(dim=0)
|
| 27 |
+
last_nonzero_col_per_row = blockmask.sort(dim=-1, stable=True).indices[:, -1]
|
| 28 |
+
last_nonzero_col_per_row_after_sort = nonzero_unsorted_rowidx[
|
| 29 |
+
torch.arange(nrow, device=blockmask.device), last_nonzero_col_per_row
|
| 30 |
+
]
|
| 31 |
+
first_nonzero_col_per_row = blockmask.sort(dim=-1, stable=True, descending=True).indices[:, 0]
|
| 32 |
+
first_nonzero_col_per_row_after_sort = nonzero_unsorted_rowidx[
|
| 33 |
+
torch.arange(nrow, device=blockmask.device), first_nonzero_col_per_row
|
| 34 |
+
]
|
| 35 |
+
nonzero_idx = nonzero_sorted_rowidx * 4
|
| 36 |
+
nonzero_idx[last_nonzero_col_per_row_after_sort, last_nonzero_col_per_row] += 2
|
| 37 |
+
nonzero_idx[first_nonzero_col_per_row_after_sort, first_nonzero_col_per_row] += 1
|
| 38 |
+
nonzero_idx[nonzero_val == 0] = -1
|
| 39 |
+
return nonzero_idx.T.contiguous().to(dtype=torch.int32)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _flash_blocksparse_attn_forward(
|
| 43 |
+
qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal, return_softmax
|
| 44 |
+
):
|
| 45 |
+
context, softmax_lse, *rest = flash_attn_cuda.fwd_block(
|
| 46 |
+
qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal, return_softmax, None
|
| 47 |
+
)
|
| 48 |
+
# if context.isnan().any() or softmax_lse.isnan().any():
|
| 49 |
+
# breakpoint()
|
| 50 |
+
S_dmask = rest[0] if return_softmax else None
|
| 51 |
+
return context, softmax_lse, S_dmask
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _flash_blocksparse_attn_backward(
|
| 55 |
+
dout,
|
| 56 |
+
qkv,
|
| 57 |
+
out,
|
| 58 |
+
S_dmask,
|
| 59 |
+
softmax_lse,
|
| 60 |
+
cu_seqlens,
|
| 61 |
+
blockmask,
|
| 62 |
+
dropout_p,
|
| 63 |
+
max_s,
|
| 64 |
+
softmax_scale,
|
| 65 |
+
causal,
|
| 66 |
+
):
|
| 67 |
+
dqkv, dp, softmax_d = flash_attn_cuda.bwd_block(
|
| 68 |
+
dout,
|
| 69 |
+
qkv,
|
| 70 |
+
out,
|
| 71 |
+
S_dmask,
|
| 72 |
+
softmax_lse,
|
| 73 |
+
cu_seqlens,
|
| 74 |
+
blockmask,
|
| 75 |
+
dropout_p,
|
| 76 |
+
softmax_scale,
|
| 77 |
+
max_s,
|
| 78 |
+
causal,
|
| 79 |
+
None,
|
| 80 |
+
)
|
| 81 |
+
# if dqkv.isnan().any() or softmax_d.isnan().any():
|
| 82 |
+
# breakpoint()
|
| 83 |
+
return dqkv
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class FlashBlocksparseAttnFun(torch.autograd.Function):
|
| 87 |
+
@staticmethod
|
| 88 |
+
def forward(ctx, qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal):
|
| 89 |
+
# Save rng_state because the backward pass will regenerate the dropout mask
|
| 90 |
+
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
|
| 91 |
+
if softmax_scale is None:
|
| 92 |
+
softmax_scale = qkv.shape[-1] ** (-0.5)
|
| 93 |
+
context, softmax_lse, S_dmask = _flash_blocksparse_attn_forward(
|
| 94 |
+
qkv,
|
| 95 |
+
cu_seqlens,
|
| 96 |
+
blockmask,
|
| 97 |
+
dropout_p,
|
| 98 |
+
max_s,
|
| 99 |
+
softmax_scale,
|
| 100 |
+
causal=causal,
|
| 101 |
+
return_softmax=False,
|
| 102 |
+
)
|
| 103 |
+
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state)
|
| 104 |
+
ctx.dropout_p = dropout_p
|
| 105 |
+
ctx.max_s = max_s
|
| 106 |
+
ctx.softmax_scale = softmax_scale
|
| 107 |
+
ctx.causal = causal
|
| 108 |
+
return context
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def backward(ctx, dout):
|
| 112 |
+
qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state = ctx.saved_tensors
|
| 113 |
+
if rng_state is not None:
|
| 114 |
+
cur_rng_state = torch.cuda.get_rng_state()
|
| 115 |
+
torch.cuda.set_rng_state(rng_state)
|
| 116 |
+
# S_dmask is None, temporarily use another tensor just to get it running
|
| 117 |
+
dqkv = _flash_blocksparse_attn_backward(
|
| 118 |
+
dout,
|
| 119 |
+
qkv,
|
| 120 |
+
context,
|
| 121 |
+
context,
|
| 122 |
+
softmax_lse,
|
| 123 |
+
cu_seqlens,
|
| 124 |
+
blockmask,
|
| 125 |
+
ctx.dropout_p,
|
| 126 |
+
ctx.max_s,
|
| 127 |
+
ctx.softmax_scale,
|
| 128 |
+
ctx.causal,
|
| 129 |
+
)
|
| 130 |
+
if rng_state is not None:
|
| 131 |
+
torch.cuda.set_rng_state(cur_rng_state)
|
| 132 |
+
return dqkv, None, None, None, None, None, None, None
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
# We duplicate code to return both the output and the softmax for testing
|
| 136 |
+
# Returning both makes backward a bit slower, so we want to keep using the other version for speed.
|
| 137 |
+
class FlashBlocksparseAttnFunWithS(torch.autograd.Function):
|
| 138 |
+
@staticmethod
|
| 139 |
+
def forward(ctx, qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal):
|
| 140 |
+
# Save rng_state because the backward pass is gonna regenerate the dropout mask
|
| 141 |
+
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
|
| 142 |
+
if softmax_scale is None:
|
| 143 |
+
softmax_scale = qkv.shape[-1] ** (-0.5)
|
| 144 |
+
context, softmax_lse, S_dmask = _flash_blocksparse_attn_forward(
|
| 145 |
+
qkv,
|
| 146 |
+
cu_seqlens,
|
| 147 |
+
blockmask,
|
| 148 |
+
dropout_p,
|
| 149 |
+
max_s,
|
| 150 |
+
softmax_scale,
|
| 151 |
+
causal=causal,
|
| 152 |
+
return_softmax=True,
|
| 153 |
+
)
|
| 154 |
+
ctx.save_for_backward(qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state)
|
| 155 |
+
ctx.dropout_p = dropout_p
|
| 156 |
+
ctx.max_s = max_s
|
| 157 |
+
ctx.softmax_scale = softmax_scale
|
| 158 |
+
ctx.causal = causal
|
| 159 |
+
return context, S_dmask, softmax_lse
|
| 160 |
+
|
| 161 |
+
@staticmethod
|
| 162 |
+
def backward(ctx, dout, _dS_dmask_ignored, _dsoftmax_sum_ignored):
|
| 163 |
+
qkv, context, S_dmask, softmax_lse, cu_seqlens, blockmask, rng_state = ctx.saved_tensors
|
| 164 |
+
if rng_state is not None:
|
| 165 |
+
cur_rng_state = torch.cuda.get_rng_state()
|
| 166 |
+
torch.cuda.set_rng_state(rng_state)
|
| 167 |
+
dqkv = _flash_blocksparse_attn_backward(
|
| 168 |
+
dout,
|
| 169 |
+
qkv,
|
| 170 |
+
context,
|
| 171 |
+
S_dmask,
|
| 172 |
+
softmax_lse,
|
| 173 |
+
cu_seqlens,
|
| 174 |
+
blockmask,
|
| 175 |
+
ctx.dropout_p,
|
| 176 |
+
ctx.max_s,
|
| 177 |
+
ctx.softmax_scale,
|
| 178 |
+
ctx.causal,
|
| 179 |
+
)
|
| 180 |
+
if rng_state is not None:
|
| 181 |
+
torch.cuda.set_rng_state(cur_rng_state)
|
| 182 |
+
return dqkv, None, None, None, None, None, None
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def flash_blocksparse_attn_func(
|
| 186 |
+
qkv,
|
| 187 |
+
cu_seqlens,
|
| 188 |
+
blockmask,
|
| 189 |
+
dropout_p,
|
| 190 |
+
max_s,
|
| 191 |
+
softmax_scale=None,
|
| 192 |
+
causal=False,
|
| 193 |
+
return_attn_probs=False,
|
| 194 |
+
convert_mask=True,
|
| 195 |
+
):
|
| 196 |
+
"""dropout_p should be set to 0.0 during evaluation"""
|
| 197 |
+
func = FlashBlocksparseAttnFun if not return_attn_probs else FlashBlocksparseAttnFunWithS
|
| 198 |
+
if convert_mask:
|
| 199 |
+
blockmask = convert_blockmask(blockmask, causal=causal)
|
| 200 |
+
return func.apply(qkv, cu_seqlens, blockmask, dropout_p, max_s, softmax_scale, causal)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/fused_softmax.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py
|
| 2 |
+
# for benchmarking.
|
| 3 |
+
# We added support for seqlen=2k and seqlen=4k
|
| 4 |
+
|
| 5 |
+
# coding=utf-8
|
| 6 |
+
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 7 |
+
#
|
| 8 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 9 |
+
# you may not use this file except in compliance with the License.
|
| 10 |
+
# You may obtain a copy of the License at
|
| 11 |
+
#
|
| 12 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 13 |
+
#
|
| 14 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 15 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 16 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 17 |
+
# See the License for the specific language governing permissions and
|
| 18 |
+
# limitations under the License.
|
| 19 |
+
import torch
|
| 20 |
+
from apex._autocast_utils import _cast_if_autocast_enabled
|
| 21 |
+
from apex.transformer.enums import AttnMaskType
|
| 22 |
+
from fused_softmax_lib import (
|
| 23 |
+
scaled_masked_softmax_backward,
|
| 24 |
+
scaled_masked_softmax_forward,
|
| 25 |
+
scaled_masked_softmax_get_batch_per_block,
|
| 26 |
+
scaled_upper_triang_masked_softmax_backward,
|
| 27 |
+
scaled_upper_triang_masked_softmax_forward,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
|
| 32 |
+
"""
|
| 33 |
+
Fused operation which performs following three operations in sequence
|
| 34 |
+
1. Scale the tensor.
|
| 35 |
+
2. Apply upper triangular mask (typically used in gpt models).
|
| 36 |
+
3. Perform softmax.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def forward(ctx, inputs, scale):
|
| 41 |
+
scale_t = torch.tensor([scale])
|
| 42 |
+
softmax_results = scaled_upper_triang_masked_softmax_forward(inputs, scale_t[0])
|
| 43 |
+
ctx.save_for_backward(softmax_results, scale_t)
|
| 44 |
+
return softmax_results
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
def backward(ctx, output_grads):
|
| 48 |
+
softmax_results, scale_t = ctx.saved_tensors
|
| 49 |
+
input_grads = scaled_upper_triang_masked_softmax_backward(
|
| 50 |
+
output_grads, softmax_results, scale_t[0]
|
| 51 |
+
)
|
| 52 |
+
return input_grads, None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def scaled_upper_triang_masked_softmax(inputs, _, scale):
|
| 56 |
+
b, np, sq, sk = inputs.size()
|
| 57 |
+
assert sq == sk, "causal mask is only for self attention"
|
| 58 |
+
# Reshaping input to 3D tensor (attn_batches, sq, sk)
|
| 59 |
+
inputs = inputs.view(-1, sq, sk)
|
| 60 |
+
args = _cast_if_autocast_enabled(inputs, scale)
|
| 61 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 62 |
+
probs = ScaledUpperTriangMaskedSoftmax.apply(*args)
|
| 63 |
+
return probs.view(b, np, sq, sk)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# NOTE (mkozuki): `ScaledMaskedSoftmax` somehow doesn't work well with `torch.cuda.amp.custom_fwd`.
|
| 67 |
+
# Without `cast_inputs` kwarg, somehow inputs are not cast to dtype used in the autocast context.
|
| 68 |
+
# So I needed to manually write two `torch.autograd.Function` inheritances.
|
| 69 |
+
# Fused operation which performs following three operations in sequence
|
| 70 |
+
# 1. Scale the tensor.
|
| 71 |
+
# 2. Apply the mask.
|
| 72 |
+
# 3. Perform softmax.
|
| 73 |
+
class ScaledMaskedSoftmax(torch.autograd.Function):
|
| 74 |
+
@staticmethod
|
| 75 |
+
def forward(ctx, inputs, mask, scale):
|
| 76 |
+
scale_t = torch.tensor([scale])
|
| 77 |
+
softmax_results = scaled_masked_softmax_forward(inputs, mask, scale_t[0])
|
| 78 |
+
ctx.save_for_backward(softmax_results, scale_t)
|
| 79 |
+
return softmax_results
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def backward(ctx, output_grads):
|
| 83 |
+
softmax_results, scale_t = ctx.saved_tensors
|
| 84 |
+
input_grads = scaled_masked_softmax_backward(output_grads, softmax_results, scale_t[0])
|
| 85 |
+
return input_grads, None, None
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def scaled_masked_softmax(inputs, mask, scale):
|
| 89 |
+
# input is 4D tensor (b, np, sq, sk)
|
| 90 |
+
args = _cast_if_autocast_enabled(inputs, mask, scale)
|
| 91 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 92 |
+
return ScaledMaskedSoftmax.apply(*args)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class FusedScaleMaskSoftmax(torch.nn.Module):
|
| 96 |
+
"""
|
| 97 |
+
fused operation: scaling + mask + softmax
|
| 98 |
+
|
| 99 |
+
Arguments:
|
| 100 |
+
input_in_fp16: flag to indicate if input in fp16 data format.
|
| 101 |
+
input_in_bf16: flag to indicate if input in bf16 data format.
|
| 102 |
+
attn_mask_type: attention mask type (pad or causal)
|
| 103 |
+
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
|
| 104 |
+
mask_func: mask function to be applied.
|
| 105 |
+
softmax_in_fp32: if true, softmax in performed at fp32 precision.
|
| 106 |
+
scale: scaling factor used in input tensor scaling.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
def __init__(
|
| 110 |
+
self,
|
| 111 |
+
input_in_fp16,
|
| 112 |
+
input_in_bf16,
|
| 113 |
+
attn_mask_type,
|
| 114 |
+
scaled_masked_softmax_fusion,
|
| 115 |
+
mask_func,
|
| 116 |
+
softmax_in_fp32,
|
| 117 |
+
scale,
|
| 118 |
+
):
|
| 119 |
+
super().__init__()
|
| 120 |
+
self.input_in_fp16 = input_in_fp16
|
| 121 |
+
self.input_in_bf16 = input_in_bf16
|
| 122 |
+
if self.input_in_fp16 and self.input_in_bf16:
|
| 123 |
+
raise RuntimeError("both fp16 and bf16 flags cannot be active at the same time.")
|
| 124 |
+
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
|
| 125 |
+
self.attn_mask_type = attn_mask_type
|
| 126 |
+
self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
|
| 127 |
+
self.mask_func = mask_func
|
| 128 |
+
self.softmax_in_fp32 = softmax_in_fp32
|
| 129 |
+
self.scale = scale
|
| 130 |
+
|
| 131 |
+
if not (self.scale is None or softmax_in_fp32):
|
| 132 |
+
raise RuntimeError("softmax should be in fp32 when scaled")
|
| 133 |
+
|
| 134 |
+
if self.scaled_masked_softmax_fusion:
|
| 135 |
+
if self.attn_mask_type == AttnMaskType.causal:
|
| 136 |
+
self.fused_softmax_func = scaled_upper_triang_masked_softmax
|
| 137 |
+
elif self.attn_mask_type == AttnMaskType.padding:
|
| 138 |
+
self.fused_softmax_func = scaled_masked_softmax
|
| 139 |
+
else:
|
| 140 |
+
raise ValueError("Invalid attn_mask_type.")
|
| 141 |
+
|
| 142 |
+
def forward(self, input, mask):
|
| 143 |
+
# [b, np, sq, sk]
|
| 144 |
+
assert input.dim() == 4
|
| 145 |
+
|
| 146 |
+
if self.is_kernel_available(mask, *input.size()):
|
| 147 |
+
return self.forward_fused_softmax(input, mask)
|
| 148 |
+
else:
|
| 149 |
+
return self.forward_torch_softmax(input, mask)
|
| 150 |
+
|
| 151 |
+
def is_kernel_available(self, mask, b, np, sq, sk):
|
| 152 |
+
attn_batches = b * np
|
| 153 |
+
|
| 154 |
+
if (
|
| 155 |
+
self.scaled_masked_softmax_fusion # user want to fuse
|
| 156 |
+
and self.input_in_float16 # input must be fp16
|
| 157 |
+
and (
|
| 158 |
+
self.attn_mask_type == AttnMaskType.causal
|
| 159 |
+
or (self.attn_mask_type == AttnMaskType.padding and mask is not None)
|
| 160 |
+
)
|
| 161 |
+
and 16 < sk <= 8192 # sk must be 16 ~ 8192
|
| 162 |
+
and sq % 4 == 0 # sq must be divisor of 4
|
| 163 |
+
and sk % 4 == 0 # sk must be divisor of 4
|
| 164 |
+
and attn_batches % 4 == 0 # np * b must be divisor of 4
|
| 165 |
+
):
|
| 166 |
+
if 0 <= sk <= 8192:
|
| 167 |
+
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
|
| 168 |
+
|
| 169 |
+
if self.attn_mask_type == AttnMaskType.causal:
|
| 170 |
+
if attn_batches % batch_per_block == 0:
|
| 171 |
+
return True
|
| 172 |
+
else:
|
| 173 |
+
if sq % batch_per_block == 0:
|
| 174 |
+
return True
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
def forward_fused_softmax(self, input, mask):
|
| 178 |
+
# input.shape = [b, np, sq, sk]
|
| 179 |
+
scale = self.scale if self.scale is not None else 1.0
|
| 180 |
+
return self.fused_softmax_func(input, mask, scale)
|
| 181 |
+
|
| 182 |
+
def forward_torch_softmax(self, input, mask):
|
| 183 |
+
if self.input_in_float16 and self.softmax_in_fp32:
|
| 184 |
+
input = input.float()
|
| 185 |
+
|
| 186 |
+
if self.scale is not None:
|
| 187 |
+
input = input * self.scale
|
| 188 |
+
mask_output = self.mask_func(input, mask) if mask is not None else input
|
| 189 |
+
probs = torch.nn.Softmax(dim=-1)(mask_output)
|
| 190 |
+
|
| 191 |
+
if self.input_in_float16 and self.softmax_in_fp32:
|
| 192 |
+
if self.input_in_fp16:
|
| 193 |
+
probs = probs.half()
|
| 194 |
+
else:
|
| 195 |
+
probs = probs.bfloat16()
|
| 196 |
+
|
| 197 |
+
return probs
|
| 198 |
+
|
| 199 |
+
@staticmethod
|
| 200 |
+
def get_batch_per_block(sq, sk, b, np):
|
| 201 |
+
return scaled_masked_softmax_get_batch_per_block(sq, sk, b, np)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__init__.py
ADDED
|
File without changes
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/patch_embed.cpython-310.pyc
ADDED
|
Binary file (1.91 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/layers/__pycache__/rotary.cpython-310.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__init__.py
ADDED
|
File without changes
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/block.cpython-310.pyc
ADDED
|
Binary file (8.61 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/embedding.cpython-310.pyc
ADDED
|
Binary file (6 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/__pycache__/mlp.cpython-310.pyc
ADDED
|
Binary file (4.4 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/block.py
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024, Tri Dao.
|
| 2 |
+
|
| 3 |
+
from functools import partial
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from torch import Tensor
|
| 10 |
+
from torchvision.ops import StochasticDepth
|
| 11 |
+
|
| 12 |
+
from flash_attn.modules.mha import MHA
|
| 13 |
+
from flash_attn.modules.mlp import Mlp
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from flash_attn.ops.triton.layer_norm import layer_norm_fn, RMSNorm
|
| 17 |
+
except ImportError:
|
| 18 |
+
layer_norm_fn, RMSNorm = None, None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Block(nn.Module):
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
dim,
|
| 25 |
+
mixer_cls=None,
|
| 26 |
+
mlp_cls=None,
|
| 27 |
+
norm_cls=nn.LayerNorm,
|
| 28 |
+
dropout_cls=nn.Dropout,
|
| 29 |
+
prenorm=True,
|
| 30 |
+
resid_dropout1=0.0,
|
| 31 |
+
resid_dropout2=0.0,
|
| 32 |
+
drop_path1=0.0,
|
| 33 |
+
drop_path2=0.0,
|
| 34 |
+
fused_dropout_add_ln=False,
|
| 35 |
+
return_residual=False,
|
| 36 |
+
residual_in_fp32=False,
|
| 37 |
+
sequence_parallel=False,
|
| 38 |
+
mark_shared_params=False,
|
| 39 |
+
):
|
| 40 |
+
"""
|
| 41 |
+
For prenorm=True, this Block has a slightly different structure compared to a regular
|
| 42 |
+
prenorm Transformer block.
|
| 43 |
+
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
|
| 44 |
+
[Ref: https://arxiv.org/abs/2002.04745]
|
| 45 |
+
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
|
| 46 |
+
the hidden_states (output of the MLP) and the residual.
|
| 47 |
+
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
|
| 48 |
+
The residual needs to be provided (except for the very first block).
|
| 49 |
+
|
| 50 |
+
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
|
| 51 |
+
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
|
| 52 |
+
|
| 53 |
+
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
|
| 54 |
+
This is for performance reason: for post-norm architecture, returning the input allows us
|
| 55 |
+
to fuse the backward of nn.Linear with the residual connection.
|
| 56 |
+
"""
|
| 57 |
+
super().__init__()
|
| 58 |
+
self.prenorm = prenorm
|
| 59 |
+
self.fused_dropout_add_ln = fused_dropout_add_ln
|
| 60 |
+
self.return_residual = return_residual
|
| 61 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 62 |
+
if self.residual_in_fp32:
|
| 63 |
+
assert self.prenorm, "residual_in_fp32 is only compatible with prenorm=True"
|
| 64 |
+
if mixer_cls is None:
|
| 65 |
+
mixer_cls = partial(MHA, num_heads=dim // 64)
|
| 66 |
+
if mlp_cls is None:
|
| 67 |
+
mlp_cls = partial(Mlp, hidden_features=4 * dim)
|
| 68 |
+
self.mixer = mixer_cls(dim)
|
| 69 |
+
self.dropout1 = dropout_cls(resid_dropout1)
|
| 70 |
+
self.drop_path1 = StochasticDepth(drop_path1, mode="row")
|
| 71 |
+
self.norm1 = norm_cls(dim)
|
| 72 |
+
self.mlp = mlp_cls(dim)
|
| 73 |
+
if not isinstance(self.mlp, nn.Identity):
|
| 74 |
+
self.dropout2 = dropout_cls(resid_dropout2)
|
| 75 |
+
self.drop_path2 = StochasticDepth(drop_path2, mode="row")
|
| 76 |
+
self.norm2 = norm_cls(dim)
|
| 77 |
+
|
| 78 |
+
if self.fused_dropout_add_ln:
|
| 79 |
+
assert layer_norm_fn is not None, "Triton is not installed"
|
| 80 |
+
assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
|
| 81 |
+
self.dropout1, nn.Dropout
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
|
| 85 |
+
# then the input to each worker in the tensor parallel group will be different.
|
| 86 |
+
# This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
|
| 87 |
+
# For now this is not an issue because we always use sequence_parallel=True during training
|
| 88 |
+
# and only use sequence_parallel=False during inference.
|
| 89 |
+
|
| 90 |
+
# Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
|
| 91 |
+
if sequence_parallel:
|
| 92 |
+
for p in self.norm1.parameters():
|
| 93 |
+
p._sequence_parallel = True
|
| 94 |
+
if hasattr(self, "norm2"):
|
| 95 |
+
for p in self.norm2.parameters():
|
| 96 |
+
p._sequence_parallel = True
|
| 97 |
+
# Mark the norm parameters as "shared_params" so that we sync their values at init.
|
| 98 |
+
if mark_shared_params:
|
| 99 |
+
for p in self.norm1.parameters():
|
| 100 |
+
p._shared_params = True
|
| 101 |
+
if hasattr(self, "norm2"):
|
| 102 |
+
for p in self.norm2.parameters():
|
| 103 |
+
p._shared_params = True
|
| 104 |
+
|
| 105 |
+
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
|
| 106 |
+
return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
|
| 107 |
+
|
| 108 |
+
def forward(
|
| 109 |
+
self,
|
| 110 |
+
hidden_states: Tensor,
|
| 111 |
+
residual: Optional[Tensor] = None,
|
| 112 |
+
mixer_subset=None,
|
| 113 |
+
mixer_kwargs=None,
|
| 114 |
+
):
|
| 115 |
+
r"""Pass the input through the encoder layer.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
hidden_states: the sequence to the encoder layer (required).
|
| 119 |
+
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
|
| 120 |
+
mixer_subset: for cross-attention only. If not None, will take a subset of x
|
| 121 |
+
before applying the query projection. Useful for e.g., ViT where we only care
|
| 122 |
+
about the CLS token in the last layer.
|
| 123 |
+
"""
|
| 124 |
+
if self.prenorm:
|
| 125 |
+
if not self.fused_dropout_add_ln:
|
| 126 |
+
dropped = self.drop_path1(self.dropout1(hidden_states))
|
| 127 |
+
residual = (dropped + residual) if residual is not None else dropped
|
| 128 |
+
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
|
| 129 |
+
if self.residual_in_fp32:
|
| 130 |
+
residual = residual.to(torch.float32)
|
| 131 |
+
else:
|
| 132 |
+
if self.drop_path1.p == 0 or not self.training:
|
| 133 |
+
rowscale1 = None
|
| 134 |
+
else:
|
| 135 |
+
rowscale1 = self.drop_path1(
|
| 136 |
+
torch.ones(
|
| 137 |
+
hidden_states.shape[:-1],
|
| 138 |
+
device=hidden_states.device,
|
| 139 |
+
dtype=hidden_states.dtype,
|
| 140 |
+
)
|
| 141 |
+
)
|
| 142 |
+
hidden_states, residual = layer_norm_fn(
|
| 143 |
+
hidden_states,
|
| 144 |
+
self.norm1.weight,
|
| 145 |
+
self.norm1.bias,
|
| 146 |
+
residual=residual,
|
| 147 |
+
eps=self.norm1.eps,
|
| 148 |
+
dropout_p=self.dropout1.p if self.training else 0.0,
|
| 149 |
+
rowscale=rowscale1,
|
| 150 |
+
prenorm=True,
|
| 151 |
+
residual_in_fp32=self.residual_in_fp32,
|
| 152 |
+
is_rms_norm=isinstance(self.norm1, RMSNorm)
|
| 153 |
+
)
|
| 154 |
+
if mixer_kwargs is None:
|
| 155 |
+
mixer_kwargs = {}
|
| 156 |
+
if mixer_subset is not None:
|
| 157 |
+
mixer_kwargs["mixer_subset"] = mixer_subset
|
| 158 |
+
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
|
| 159 |
+
if mixer_subset is not None:
|
| 160 |
+
residual = residual[:, mixer_subset]
|
| 161 |
+
if not isinstance(self.mlp, nn.Identity):
|
| 162 |
+
if not self.fused_dropout_add_ln:
|
| 163 |
+
dropped = self.drop_path2(self.dropout2(hidden_states))
|
| 164 |
+
residual = (dropped + residual) if residual is not None else dropped
|
| 165 |
+
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
|
| 166 |
+
if self.residual_in_fp32:
|
| 167 |
+
residual = residual.to(torch.float32)
|
| 168 |
+
else:
|
| 169 |
+
if self.drop_path2.p == 0 or not self.training:
|
| 170 |
+
rowscale2 = None
|
| 171 |
+
else:
|
| 172 |
+
rowscale2 = self.drop_path2(
|
| 173 |
+
torch.ones(
|
| 174 |
+
hidden_states.shape[:-1],
|
| 175 |
+
device=hidden_states.device,
|
| 176 |
+
dtype=hidden_states.dtype,
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
hidden_states, residual = layer_norm_fn(
|
| 180 |
+
hidden_states,
|
| 181 |
+
self.norm2.weight,
|
| 182 |
+
self.norm2.bias,
|
| 183 |
+
residual=residual,
|
| 184 |
+
eps=self.norm2.eps,
|
| 185 |
+
dropout_p=self.dropout2.p if self.training else 0.0,
|
| 186 |
+
rowscale=rowscale2,
|
| 187 |
+
prenorm=True,
|
| 188 |
+
residual_in_fp32=self.residual_in_fp32,
|
| 189 |
+
is_rms_norm=isinstance(self.norm2, RMSNorm)
|
| 190 |
+
)
|
| 191 |
+
hidden_states = self.mlp(hidden_states)
|
| 192 |
+
return hidden_states, residual
|
| 193 |
+
else:
|
| 194 |
+
assert residual is None
|
| 195 |
+
mixer_out = self.mixer(
|
| 196 |
+
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
|
| 197 |
+
)
|
| 198 |
+
if self.return_residual: # mixer out is actually a pair here
|
| 199 |
+
mixer_out, hidden_states = mixer_out
|
| 200 |
+
if not self.fused_dropout_add_ln:
|
| 201 |
+
hidden_states = self.norm1(
|
| 202 |
+
(self.drop_path1(self.dropout1(mixer_out)) + hidden_states).to(
|
| 203 |
+
dtype=self.norm1.weight.dtype
|
| 204 |
+
)
|
| 205 |
+
)
|
| 206 |
+
else:
|
| 207 |
+
if self.drop_path1.p == 0 or not self.training:
|
| 208 |
+
rowscale1 = None
|
| 209 |
+
else:
|
| 210 |
+
rowscale1 = self.drop_path1(
|
| 211 |
+
torch.ones(
|
| 212 |
+
mixer_out.shape[:-1], device=mixer_out.device, dtype=mixer_out.dtype
|
| 213 |
+
)
|
| 214 |
+
)
|
| 215 |
+
hidden_states = layer_norm_fn(
|
| 216 |
+
mixer_out,
|
| 217 |
+
self.norm1.weight,
|
| 218 |
+
self.norm1.bias,
|
| 219 |
+
residual=hidden_states,
|
| 220 |
+
eps=self.norm1.eps,
|
| 221 |
+
dropout_p=self.dropout1.p if self.training else 0.0,
|
| 222 |
+
rowscale=rowscale1,
|
| 223 |
+
prenorm=False,
|
| 224 |
+
is_rms_norm=isinstance(self.norm1, RMSNorm)
|
| 225 |
+
)
|
| 226 |
+
if not isinstance(self.mlp, nn.Identity):
|
| 227 |
+
mlp_out = self.mlp(hidden_states)
|
| 228 |
+
if self.return_residual: # mlp out is actually a pair here
|
| 229 |
+
mlp_out, hidden_states = mlp_out
|
| 230 |
+
if not self.fused_dropout_add_ln:
|
| 231 |
+
hidden_states = self.norm2(
|
| 232 |
+
(self.drop_path2(self.dropout2(mlp_out)) + hidden_states).to(
|
| 233 |
+
dtype=self.norm2.weight.dtype
|
| 234 |
+
)
|
| 235 |
+
)
|
| 236 |
+
else:
|
| 237 |
+
if self.drop_path2.p == 0 or not self.training:
|
| 238 |
+
rowscale2 = None
|
| 239 |
+
else:
|
| 240 |
+
rowscale2 = self.drop_path2(
|
| 241 |
+
torch.ones(
|
| 242 |
+
mlp_out.shape[:-1], device=mlp_out.device, dtype=mlp_out.dtype
|
| 243 |
+
)
|
| 244 |
+
)
|
| 245 |
+
hidden_states = layer_norm_fn(
|
| 246 |
+
mlp_out,
|
| 247 |
+
self.norm2.weight,
|
| 248 |
+
self.norm2.bias,
|
| 249 |
+
residual=hidden_states,
|
| 250 |
+
eps=self.norm2.eps,
|
| 251 |
+
dropout_p=self.dropout2.p if self.training else 0.0,
|
| 252 |
+
rowscale=rowscale2,
|
| 253 |
+
prenorm=False,
|
| 254 |
+
is_rms_norm=isinstance(self.norm2, RMSNorm)
|
| 255 |
+
)
|
| 256 |
+
return hidden_states
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
class ParallelBlock(nn.Module):
|
| 260 |
+
"""The attention (mixer) and MLP blocks are done in parallel, similar to GPT-J, GPT-NeoX,
|
| 261 |
+
and PaLM.
|
| 262 |
+
"""
|
| 263 |
+
|
| 264 |
+
def __init__(
|
| 265 |
+
self,
|
| 266 |
+
dim,
|
| 267 |
+
mixer_cls=None,
|
| 268 |
+
mlp_cls=None,
|
| 269 |
+
norm_cls=nn.LayerNorm,
|
| 270 |
+
dropout_cls=nn.Dropout,
|
| 271 |
+
resid_dropout1=0.0,
|
| 272 |
+
resid_dropout2=0.0,
|
| 273 |
+
tied_norm=False,
|
| 274 |
+
fused_dropout_add_ln=False,
|
| 275 |
+
residual_in_fp32=False,
|
| 276 |
+
sequence_parallel=False,
|
| 277 |
+
mark_shared_params=False,
|
| 278 |
+
):
|
| 279 |
+
"""
|
| 280 |
+
This Block has a slightly different structure compared to a regular
|
| 281 |
+
prenorm Transformer block.
|
| 282 |
+
The standard block is: LN -> MHA / MLP -> Dropout -> Add.
|
| 283 |
+
[Ref: https://arxiv.org/abs/2002.04745]
|
| 284 |
+
Here we have: Dropout -> Add -> LN -> MHA / MLP, returning both
|
| 285 |
+
the hidden_states (output1 of the MHA / MLP) and the residual.
|
| 286 |
+
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
|
| 287 |
+
The residual needs to be provided (except for the very first block).
|
| 288 |
+
"""
|
| 289 |
+
super().__init__()
|
| 290 |
+
self.tied_norm = tied_norm
|
| 291 |
+
self.fused_dropout_add_ln = fused_dropout_add_ln
|
| 292 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 293 |
+
if mixer_cls is None:
|
| 294 |
+
mixer_cls = partial(MHA, num_heads=dim // 64)
|
| 295 |
+
if mlp_cls is None:
|
| 296 |
+
mlp_cls = partial(Mlp, hidden_features=4 * dim)
|
| 297 |
+
self.mixer = mixer_cls(dim)
|
| 298 |
+
self.dropout1 = dropout_cls(resid_dropout1)
|
| 299 |
+
self.norm1 = norm_cls(dim)
|
| 300 |
+
self.mlp = mlp_cls(dim)
|
| 301 |
+
self.dropout2 = dropout_cls(resid_dropout2)
|
| 302 |
+
if not self.tied_norm:
|
| 303 |
+
self.norm2 = norm_cls(dim)
|
| 304 |
+
|
| 305 |
+
if self.fused_dropout_add_ln:
|
| 306 |
+
assert layer_norm_fn is not None, "Triton is not installed"
|
| 307 |
+
assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
|
| 308 |
+
self.dropout1, nn.Dropout
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
|
| 312 |
+
# then the input to each worker in the tensor parallel group will be different.
|
| 313 |
+
# This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
|
| 314 |
+
# For now this is not an issue because we always use sequence_parallel=True during training
|
| 315 |
+
# and only use sequence_parallel=False during inference.
|
| 316 |
+
|
| 317 |
+
# Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
|
| 318 |
+
if sequence_parallel:
|
| 319 |
+
for p in self.norm1.parameters():
|
| 320 |
+
p._sequence_parallel = True
|
| 321 |
+
if hasattr(self, "norm2"):
|
| 322 |
+
for p in self.norm2.parameters():
|
| 323 |
+
p._sequence_parallel = True
|
| 324 |
+
# Mark the norm parameters as "shared_params" so that we sync their values at init.
|
| 325 |
+
if mark_shared_params:
|
| 326 |
+
for p in self.norm1.parameters():
|
| 327 |
+
p._shared_params = True
|
| 328 |
+
if hasattr(self, "norm2"):
|
| 329 |
+
for p in self.norm2.parameters():
|
| 330 |
+
p._shared_params = True
|
| 331 |
+
|
| 332 |
+
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
|
| 333 |
+
return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
|
| 334 |
+
|
| 335 |
+
def forward(
|
| 336 |
+
self,
|
| 337 |
+
hidden_states1: Tensor,
|
| 338 |
+
hidden_states2: Optional[Tensor] = None,
|
| 339 |
+
residual: Optional[Tensor] = None,
|
| 340 |
+
mixer_kwargs=None,
|
| 341 |
+
):
|
| 342 |
+
r"""Pass the input through the encoder layer.
|
| 343 |
+
|
| 344 |
+
Args:
|
| 345 |
+
hidden_states1: the output of the previous attention (mixer) or embedding layer.
|
| 346 |
+
hidden_states2: the output of the previous MLP layer (if None, will use hidden_states1).
|
| 347 |
+
residual.
|
| 348 |
+
"""
|
| 349 |
+
# TODO: Ideally we should only do the allgather / allreduce once for
|
| 350 |
+
# the Linear to MLP & Attention
|
| 351 |
+
if not self.fused_dropout_add_ln:
|
| 352 |
+
dropped1 = self.dropout1(hidden_states1)
|
| 353 |
+
# For the very 1st block, we only want 1 dropout, not two different dropouts
|
| 354 |
+
if hidden_states2 is not None:
|
| 355 |
+
dropped2 = self.dropout2(hidden_states2)
|
| 356 |
+
residual = (
|
| 357 |
+
(residual + dropped1 + dropped2)
|
| 358 |
+
if residual is not None
|
| 359 |
+
else dropped1 + dropped2
|
| 360 |
+
)
|
| 361 |
+
else:
|
| 362 |
+
residual = (residual + dropped1) if residual is not None else dropped1
|
| 363 |
+
hidden_states1 = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
|
| 364 |
+
hidden_states2 = (
|
| 365 |
+
self.norm2(residual.to(dtype=self.norm2.weight.dtype))
|
| 366 |
+
if not self.tied_norm
|
| 367 |
+
else hidden_states1
|
| 368 |
+
)
|
| 369 |
+
if self.residual_in_fp32:
|
| 370 |
+
residual = residual.to(torch.float32)
|
| 371 |
+
else:
|
| 372 |
+
weight2, bias2 = (
|
| 373 |
+
(self.norm2.weight, self.norm2.bias) if not self.tied_norm else (None, None)
|
| 374 |
+
)
|
| 375 |
+
hidden_states1, *rest, residual = layer_norm_fn(
|
| 376 |
+
hidden_states1,
|
| 377 |
+
self.norm1.weight,
|
| 378 |
+
self.norm1.bias,
|
| 379 |
+
residual=residual,
|
| 380 |
+
x1=hidden_states2,
|
| 381 |
+
weight1=weight2,
|
| 382 |
+
bias1=bias2,
|
| 383 |
+
eps=self.norm1.eps,
|
| 384 |
+
dropout_p=self.dropout1.p if self.training else 0.0,
|
| 385 |
+
prenorm=True,
|
| 386 |
+
residual_in_fp32=self.residual_in_fp32,
|
| 387 |
+
is_rms_norm=isinstance(self.norm1, RMSNorm)
|
| 388 |
+
)
|
| 389 |
+
if self.tied_norm:
|
| 390 |
+
hidden_states2 = hidden_states1
|
| 391 |
+
else:
|
| 392 |
+
hidden_states2, = rest
|
| 393 |
+
if mixer_kwargs is None:
|
| 394 |
+
mixer_kwargs = {}
|
| 395 |
+
hidden_states1 = self.mixer(hidden_states1, **mixer_kwargs)
|
| 396 |
+
hidden_states2 = self.mlp(hidden_states2)
|
| 397 |
+
return hidden_states1, hidden_states2, residual
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/embedding.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2022, Tri Dao.
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from flash_attn.utils.distributed import all_reduce, reduce_scatter
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class GPT2Embeddings(nn.Module):
|
| 12 |
+
def __init__(
|
| 13 |
+
self,
|
| 14 |
+
embed_dim,
|
| 15 |
+
vocab_size,
|
| 16 |
+
max_position_embeddings,
|
| 17 |
+
padding_idx=None,
|
| 18 |
+
word_embed_proj_dim=None,
|
| 19 |
+
device=None,
|
| 20 |
+
dtype=None,
|
| 21 |
+
):
|
| 22 |
+
"""
|
| 23 |
+
If max_position_embeddings <= 0, there's no position embeddings
|
| 24 |
+
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
|
| 25 |
+
the project up to embed_dim
|
| 26 |
+
"""
|
| 27 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 28 |
+
super().__init__()
|
| 29 |
+
if word_embed_proj_dim is None:
|
| 30 |
+
self.word_embeddings = nn.Embedding(
|
| 31 |
+
vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
|
| 32 |
+
)
|
| 33 |
+
self.project_in = None
|
| 34 |
+
else:
|
| 35 |
+
self.word_embeddings = nn.Embedding(
|
| 36 |
+
vocab_size, word_embed_proj_dim, padding_idx=padding_idx, **factory_kwargs
|
| 37 |
+
)
|
| 38 |
+
self.project_in = nn.Linear(
|
| 39 |
+
word_embed_proj_dim, embed_dim, bias=False, **factory_kwargs
|
| 40 |
+
)
|
| 41 |
+
self.max_position_embeddings = max_position_embeddings
|
| 42 |
+
if self.max_position_embeddings > 0:
|
| 43 |
+
self.position_embeddings = nn.Embedding(
|
| 44 |
+
max_position_embeddings, embed_dim, **factory_kwargs
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def forward(self, input_ids, position_ids=None):
|
| 48 |
+
"""
|
| 49 |
+
input_ids: (batch, seqlen)
|
| 50 |
+
position_ids: (batch, seqlen)
|
| 51 |
+
"""
|
| 52 |
+
batch_size, seqlen = input_ids.shape
|
| 53 |
+
embeddings = self.word_embeddings(input_ids)
|
| 54 |
+
if self.project_in is not None:
|
| 55 |
+
embeddings = self.project_in(embeddings)
|
| 56 |
+
if self.max_position_embeddings > 0:
|
| 57 |
+
if position_ids is None:
|
| 58 |
+
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
|
| 59 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 60 |
+
embeddings = embeddings + position_embeddings
|
| 61 |
+
return embeddings
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class BertEmbeddings(nn.Module):
|
| 65 |
+
def __init__(
|
| 66 |
+
self,
|
| 67 |
+
embed_dim,
|
| 68 |
+
vocab_size,
|
| 69 |
+
max_position_embeddings,
|
| 70 |
+
type_vocab_size,
|
| 71 |
+
padding_idx=None,
|
| 72 |
+
device=None,
|
| 73 |
+
dtype=None,
|
| 74 |
+
):
|
| 75 |
+
"""
|
| 76 |
+
If max_position_embeddings <= 0, there's no position embeddings
|
| 77 |
+
If type_vocab_size <= 0, there's no token type embeddings
|
| 78 |
+
"""
|
| 79 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 80 |
+
super().__init__()
|
| 81 |
+
self.word_embeddings = nn.Embedding(
|
| 82 |
+
vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
|
| 83 |
+
)
|
| 84 |
+
self.max_position_embeddings = max_position_embeddings
|
| 85 |
+
self.type_vocab_size = type_vocab_size
|
| 86 |
+
if self.max_position_embeddings > 0:
|
| 87 |
+
self.position_embeddings = nn.Embedding(
|
| 88 |
+
max_position_embeddings, embed_dim, **factory_kwargs
|
| 89 |
+
)
|
| 90 |
+
if self.type_vocab_size > 0:
|
| 91 |
+
self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
|
| 92 |
+
|
| 93 |
+
def forward(self, input_ids, position_ids=None, token_type_ids=None):
|
| 94 |
+
"""
|
| 95 |
+
input_ids: (batch, seqlen)
|
| 96 |
+
position_ids: (batch, seqlen)
|
| 97 |
+
token_type_ids: (batch, seqlen)
|
| 98 |
+
"""
|
| 99 |
+
batch_size, seqlen = input_ids.shape
|
| 100 |
+
embeddings = self.word_embeddings(input_ids)
|
| 101 |
+
if self.max_position_embeddings > 0:
|
| 102 |
+
if position_ids is None:
|
| 103 |
+
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
|
| 104 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 105 |
+
embeddings = embeddings + position_embeddings
|
| 106 |
+
if self.type_vocab_size > 0:
|
| 107 |
+
if token_type_ids is None:
|
| 108 |
+
token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
|
| 109 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 110 |
+
embeddings = embeddings + token_type_embeddings
|
| 111 |
+
return embeddings
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class VocabParallelEmbedding(nn.Embedding):
|
| 115 |
+
def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs):
|
| 116 |
+
self.process_group = process_group
|
| 117 |
+
if process_group is not None:
|
| 118 |
+
world_size = torch.distributed.get_world_size(process_group)
|
| 119 |
+
if num_embeddings % world_size != 0:
|
| 120 |
+
raise ValueError(
|
| 121 |
+
f"num_embeddings ({num_embeddings}) must be divisible by "
|
| 122 |
+
f"world_size ({world_size})"
|
| 123 |
+
)
|
| 124 |
+
if world_size > 1 and padding_idx is not None:
|
| 125 |
+
raise RuntimeError("ParallelEmbedding does not support padding_idx")
|
| 126 |
+
else:
|
| 127 |
+
world_size = 1
|
| 128 |
+
super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs)
|
| 129 |
+
|
| 130 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 131 |
+
if self.process_group is None:
|
| 132 |
+
return super().forward(input)
|
| 133 |
+
else:
|
| 134 |
+
rank = torch.distributed.get_rank(self.process_group)
|
| 135 |
+
vocab_size = self.num_embeddings
|
| 136 |
+
vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size
|
| 137 |
+
# Create a mask of valid vocab ids (1 means it needs to be masked).
|
| 138 |
+
input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index)
|
| 139 |
+
input = input - vocab_start_index
|
| 140 |
+
input[input_ids_mask] = 0
|
| 141 |
+
embeddings = super().forward(input)
|
| 142 |
+
embeddings[input_ids_mask] = 0.0
|
| 143 |
+
return embeddings
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class ColumnParallelEmbedding(nn.Embedding):
|
| 147 |
+
def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs):
|
| 148 |
+
self.process_group = process_group
|
| 149 |
+
if process_group is not None:
|
| 150 |
+
world_size = torch.distributed.get_world_size(process_group)
|
| 151 |
+
if embedding_dim % world_size != 0:
|
| 152 |
+
raise ValueError(
|
| 153 |
+
f"embedding_dim ({embedding_dim}) must be divisible by "
|
| 154 |
+
f"world_size ({world_size})"
|
| 155 |
+
)
|
| 156 |
+
else:
|
| 157 |
+
world_size = 1
|
| 158 |
+
super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class ParallelGPT2Embeddings(nn.Module):
|
| 162 |
+
def __init__(
|
| 163 |
+
self,
|
| 164 |
+
embed_dim,
|
| 165 |
+
vocab_size,
|
| 166 |
+
max_position_embeddings,
|
| 167 |
+
process_group,
|
| 168 |
+
padding_idx=None,
|
| 169 |
+
sequence_parallel=True,
|
| 170 |
+
device=None,
|
| 171 |
+
dtype=None,
|
| 172 |
+
):
|
| 173 |
+
"""
|
| 174 |
+
If max_position_embeddings <= 0, there's no position embeddings
|
| 175 |
+
"""
|
| 176 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 177 |
+
super().__init__()
|
| 178 |
+
self.process_group = process_group
|
| 179 |
+
self.sequence_parallel = sequence_parallel
|
| 180 |
+
self.word_embeddings = VocabParallelEmbedding(
|
| 181 |
+
vocab_size,
|
| 182 |
+
embed_dim,
|
| 183 |
+
padding_idx=padding_idx,
|
| 184 |
+
process_group=process_group,
|
| 185 |
+
**factory_kwargs,
|
| 186 |
+
)
|
| 187 |
+
self.max_position_embeddings = max_position_embeddings
|
| 188 |
+
if self.max_position_embeddings > 0:
|
| 189 |
+
self.position_embeddings = ColumnParallelEmbedding(
|
| 190 |
+
max_position_embeddings, embed_dim, process_group=process_group, **factory_kwargs
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
def forward(self, input_ids, position_ids=None, combine_batch_seqlen_dim=False):
|
| 194 |
+
"""
|
| 195 |
+
input_ids: (batch, seqlen)
|
| 196 |
+
position_ids: (batch, seqlen)
|
| 197 |
+
"""
|
| 198 |
+
batch_size, seqlen = input_ids.shape
|
| 199 |
+
world_size = torch.distributed.get_world_size(self.process_group)
|
| 200 |
+
embeddings = self.word_embeddings(input_ids)
|
| 201 |
+
if self.max_position_embeddings > 0:
|
| 202 |
+
if position_ids is None:
|
| 203 |
+
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
|
| 204 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 205 |
+
if world_size <= 1:
|
| 206 |
+
embeddings = embeddings + position_embeddings
|
| 207 |
+
else:
|
| 208 |
+
partition_dim = self.position_embeddings.embedding_dim
|
| 209 |
+
rank = torch.distributed.get_rank(self.process_group)
|
| 210 |
+
embeddings[
|
| 211 |
+
..., rank * partition_dim : (rank + 1) * partition_dim
|
| 212 |
+
] += position_embeddings
|
| 213 |
+
if combine_batch_seqlen_dim:
|
| 214 |
+
embeddings = rearrange(embeddings, "b s d -> (b s) d")
|
| 215 |
+
reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
|
| 216 |
+
return embeddings if world_size <= 1 else reduce_fn(embeddings, self.process_group)
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/mha.py
ADDED
|
@@ -0,0 +1,1020 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2023, Tri Dao.
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from einops import rearrange, repeat
|
| 9 |
+
|
| 10 |
+
from flash_attn.utils.distributed import get_dim_for_local_rank
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from flash_attn import (
|
| 14 |
+
flash_attn_kvpacked_func,
|
| 15 |
+
flash_attn_qkvpacked_func,
|
| 16 |
+
flash_attn_varlen_kvpacked_func,
|
| 17 |
+
flash_attn_varlen_qkvpacked_func,
|
| 18 |
+
flash_attn_with_kvcache,
|
| 19 |
+
)
|
| 20 |
+
except ImportError:
|
| 21 |
+
flash_attn_varlen_qkvpacked_func, flash_attn_varlen_kvpacked_func = None, None
|
| 22 |
+
flash_attn_qkvpacked_func, flash_attn_kvpacked_func = None, None
|
| 23 |
+
flash_attn_with_kvcache = None
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
from flash_attn.ops.fused_dense import ColumnParallelLinear, FusedDense, RowParallelLinear
|
| 27 |
+
except ImportError:
|
| 28 |
+
FusedDense, ColumnParallelLinear, RowParallelLinear = None, None, None
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
from flash_attn.layers.rotary import RotaryEmbedding
|
| 32 |
+
except ImportError:
|
| 33 |
+
RotaryEmbedding = None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# From https://github.com/ofirpress/attention_with_linear_biases/blob/4b92f28a005ead2567abe2359f633e73e08f3833/fairseq/models/transformer.py#L742
|
| 37 |
+
def get_alibi_slopes(nheads):
|
| 38 |
+
def get_slopes_power_of_2(nheads):
|
| 39 |
+
start = 2 ** (-(2 ** -(math.log2(nheads) - 3)))
|
| 40 |
+
ratio = start
|
| 41 |
+
return [start * ratio**i for i in range(nheads)]
|
| 42 |
+
|
| 43 |
+
if math.log2(nheads).is_integer():
|
| 44 |
+
return get_slopes_power_of_2(nheads)
|
| 45 |
+
else:
|
| 46 |
+
closest_power_of_2 = 2 ** math.floor(math.log2(nheads))
|
| 47 |
+
return (
|
| 48 |
+
get_slopes_power_of_2(closest_power_of_2)
|
| 49 |
+
+ get_alibi_slopes(2 * closest_power_of_2)[0::2][: nheads - closest_power_of_2]
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class FlashSelfAttention(nn.Module):
|
| 54 |
+
"""Implement the scaled dot product attention with softmax.
|
| 55 |
+
Arguments
|
| 56 |
+
---------
|
| 57 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 58 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 59 |
+
runtime)
|
| 60 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 61 |
+
(default: 0.0)
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
causal=False,
|
| 67 |
+
softmax_scale=None,
|
| 68 |
+
attention_dropout=0.0,
|
| 69 |
+
window_size=(-1, -1),
|
| 70 |
+
alibi_slopes=None,
|
| 71 |
+
deterministic=False,
|
| 72 |
+
):
|
| 73 |
+
super().__init__()
|
| 74 |
+
assert flash_attn_varlen_qkvpacked_func is not None, "FlashAttention is not installed"
|
| 75 |
+
assert flash_attn_qkvpacked_func is not None, "FlashAttention is not installed"
|
| 76 |
+
self.causal = causal
|
| 77 |
+
self.softmax_scale = softmax_scale
|
| 78 |
+
self.drop = nn.Dropout(attention_dropout)
|
| 79 |
+
self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
|
| 80 |
+
self.window_size = window_size
|
| 81 |
+
self.deterministic = deterministic
|
| 82 |
+
|
| 83 |
+
def forward(self, qkv, causal=None, cu_seqlens=None, max_seqlen=None):
|
| 84 |
+
"""Implements the multihead softmax attention.
|
| 85 |
+
Arguments
|
| 86 |
+
---------
|
| 87 |
+
qkv: The tensor containing the query, key, and value.
|
| 88 |
+
If cu_seqlens is None and max_seqlen is None, then qkv has shape (B, S, 3, H, D).
|
| 89 |
+
If cu_seqlens is not None and max_seqlen is not None, then qkv has shape
|
| 90 |
+
(total, 3, H, D), where total is the sum of the sequence lengths in the batch.
|
| 91 |
+
causal: if passed, will override self.causal
|
| 92 |
+
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 93 |
+
of the sequences in the batch, used to index into qkv.
|
| 94 |
+
max_seqlen: int. Maximum sequence length in the batch.
|
| 95 |
+
Returns:
|
| 96 |
+
--------
|
| 97 |
+
out: (total, H, D) if cu_seqlens is not None and max_seqlen is not None,
|
| 98 |
+
else (B, S, H, D).
|
| 99 |
+
"""
|
| 100 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
| 101 |
+
assert qkv.is_cuda
|
| 102 |
+
causal = self.causal if causal is None else causal
|
| 103 |
+
unpadded = cu_seqlens is not None
|
| 104 |
+
if self.alibi_slopes is not None:
|
| 105 |
+
self.alibi_slopes = self.alibi_slopes.to(torch.float32)
|
| 106 |
+
if unpadded:
|
| 107 |
+
assert cu_seqlens.dtype == torch.int32
|
| 108 |
+
assert max_seqlen is not None
|
| 109 |
+
assert isinstance(max_seqlen, int)
|
| 110 |
+
return flash_attn_varlen_qkvpacked_func(
|
| 111 |
+
qkv,
|
| 112 |
+
cu_seqlens,
|
| 113 |
+
max_seqlen,
|
| 114 |
+
self.drop.p if self.training else 0.0,
|
| 115 |
+
softmax_scale=self.softmax_scale,
|
| 116 |
+
causal=causal,
|
| 117 |
+
alibi_slopes=self.alibi_slopes,
|
| 118 |
+
window_size=self.window_size,
|
| 119 |
+
deterministic=self.deterministic,
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
return flash_attn_qkvpacked_func(
|
| 123 |
+
qkv,
|
| 124 |
+
self.drop.p if self.training else 0.0,
|
| 125 |
+
softmax_scale=self.softmax_scale,
|
| 126 |
+
causal=causal,
|
| 127 |
+
alibi_slopes=self.alibi_slopes,
|
| 128 |
+
window_size=self.window_size,
|
| 129 |
+
deterministic=self.deterministic,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class FlashCrossAttention(nn.Module):
|
| 134 |
+
"""Implement the scaled dot product attention with softmax.
|
| 135 |
+
Arguments
|
| 136 |
+
---------
|
| 137 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 138 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 139 |
+
runtime)
|
| 140 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 141 |
+
(default: 0.0)
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
def __init__(
|
| 145 |
+
self,
|
| 146 |
+
causal=False,
|
| 147 |
+
softmax_scale=None,
|
| 148 |
+
attention_dropout=0.0,
|
| 149 |
+
alibi_slopes=None,
|
| 150 |
+
window_size=(-1, -1),
|
| 151 |
+
deterministic=False,
|
| 152 |
+
):
|
| 153 |
+
super().__init__()
|
| 154 |
+
assert flash_attn_varlen_kvpacked_func is not None, "FlashAttention is not installed"
|
| 155 |
+
assert flash_attn_kvpacked_func is not None, "FlashAttention is not installed"
|
| 156 |
+
self.causal = causal
|
| 157 |
+
self.softmax_scale = softmax_scale
|
| 158 |
+
self.drop = nn.Dropout(attention_dropout)
|
| 159 |
+
self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
|
| 160 |
+
self.window_size = window_size
|
| 161 |
+
self.deterministic = deterministic
|
| 162 |
+
|
| 163 |
+
def forward(
|
| 164 |
+
self,
|
| 165 |
+
q,
|
| 166 |
+
kv,
|
| 167 |
+
causal=None,
|
| 168 |
+
cu_seqlens=None,
|
| 169 |
+
max_seqlen=None,
|
| 170 |
+
cu_seqlens_k=None,
|
| 171 |
+
max_seqlen_k=None,
|
| 172 |
+
):
|
| 173 |
+
"""Implements the multihead softmax attention.
|
| 174 |
+
Arguments
|
| 175 |
+
---------
|
| 176 |
+
q: The tensor containing the query. (B, Sq, H, D)
|
| 177 |
+
kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
|
| 178 |
+
causal: if passed, will override self.causal
|
| 179 |
+
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 180 |
+
of the sequences in the batch, used to index into q.
|
| 181 |
+
max_seqlen: int. Maximum sequence length in the batch of q.
|
| 182 |
+
cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 183 |
+
of the sequences in the batch, used to index into kv.
|
| 184 |
+
max_seqlen_k: int. Maximum sequence length in the batch of k and v.
|
| 185 |
+
"""
|
| 186 |
+
assert q.dtype in [torch.float16, torch.bfloat16]
|
| 187 |
+
assert q.is_cuda and kv.is_cuda
|
| 188 |
+
causal = self.causal if causal is None else causal
|
| 189 |
+
unpadded = cu_seqlens is not None
|
| 190 |
+
if self.alibi_slopes is not None:
|
| 191 |
+
self.alibi_slopes = self.alibi_slopes.to(torch.float32)
|
| 192 |
+
if unpadded:
|
| 193 |
+
assert cu_seqlens.dtype == torch.int32
|
| 194 |
+
assert max_seqlen is not None
|
| 195 |
+
assert isinstance(max_seqlen, int)
|
| 196 |
+
assert cu_seqlens_k is not None
|
| 197 |
+
assert cu_seqlens_k.dtype == torch.int32
|
| 198 |
+
assert max_seqlen_k is not None
|
| 199 |
+
assert isinstance(max_seqlen_k, int)
|
| 200 |
+
return flash_attn_varlen_kvpacked_func(
|
| 201 |
+
q,
|
| 202 |
+
kv,
|
| 203 |
+
cu_seqlens,
|
| 204 |
+
cu_seqlens_k,
|
| 205 |
+
max_seqlen,
|
| 206 |
+
max_seqlen_k,
|
| 207 |
+
self.drop.p if self.training else 0.0,
|
| 208 |
+
softmax_scale=self.softmax_scale,
|
| 209 |
+
causal=causal,
|
| 210 |
+
alibi_slopes=self.alibi_slopes,
|
| 211 |
+
window_size=self.window_size,
|
| 212 |
+
deterministic=self.deterministic,
|
| 213 |
+
)
|
| 214 |
+
else:
|
| 215 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
| 216 |
+
seqlen_k = kv.shape[1]
|
| 217 |
+
assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
|
| 218 |
+
return flash_attn_kvpacked_func(
|
| 219 |
+
q,
|
| 220 |
+
kv,
|
| 221 |
+
self.drop.p if self.training else 0.0,
|
| 222 |
+
causal=causal,
|
| 223 |
+
softmax_scale=self.softmax_scale,
|
| 224 |
+
alibi_slopes=self.alibi_slopes,
|
| 225 |
+
window_size=self.window_size,
|
| 226 |
+
deterministic=self.deterministic,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class SelfAttention(nn.Module):
|
| 231 |
+
"""Implement the scaled dot product attention with softmax.
|
| 232 |
+
Arguments
|
| 233 |
+
---------
|
| 234 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 235 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 236 |
+
runtime)
|
| 237 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 238 |
+
(default: 0.0)
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.causal = causal
|
| 244 |
+
self.softmax_scale = softmax_scale
|
| 245 |
+
self.drop = nn.Dropout(attention_dropout)
|
| 246 |
+
|
| 247 |
+
def forward(self, qkv, causal=None, key_padding_mask=None):
|
| 248 |
+
"""Implements the multihead softmax attention.
|
| 249 |
+
Arguments
|
| 250 |
+
---------
|
| 251 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
|
| 252 |
+
causal: if passed, will override self.causal
|
| 253 |
+
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
|
| 254 |
+
False means to mask out. (B, S)
|
| 255 |
+
"""
|
| 256 |
+
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
| 257 |
+
causal = self.causal if causal is None else causal
|
| 258 |
+
q, k, v = qkv.unbind(dim=2)
|
| 259 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
| 260 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
| 261 |
+
if key_padding_mask is not None:
|
| 262 |
+
padding_mask = torch.full(
|
| 263 |
+
(batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device
|
| 264 |
+
)
|
| 265 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
| 266 |
+
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
| 267 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
| 268 |
+
if causal:
|
| 269 |
+
# "triu_tril_cuda_template" not implemented for 'BFloat16'
|
| 270 |
+
# So we have to construct the mask in float
|
| 271 |
+
causal_mask = torch.triu(
|
| 272 |
+
torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1
|
| 273 |
+
)
|
| 274 |
+
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
| 275 |
+
scores = scores + causal_mask.to(dtype=scores.dtype)
|
| 276 |
+
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
|
| 277 |
+
attention_drop = self.drop(attention)
|
| 278 |
+
output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
|
| 279 |
+
return output
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class CrossAttention(nn.Module):
|
| 283 |
+
"""Implement the scaled dot product attention with softmax.
|
| 284 |
+
Arguments
|
| 285 |
+
---------
|
| 286 |
+
softmax_scale: The temperature to use for the softmax attention.
|
| 287 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
| 288 |
+
runtime)
|
| 289 |
+
attention_dropout: The dropout rate to apply to the attention
|
| 290 |
+
(default: 0.0)
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
|
| 294 |
+
super().__init__()
|
| 295 |
+
self.causal = causal
|
| 296 |
+
self.softmax_scale = softmax_scale
|
| 297 |
+
self.drop = nn.Dropout(attention_dropout)
|
| 298 |
+
|
| 299 |
+
def forward(self, q, kv, causal=None, key_padding_mask=None):
|
| 300 |
+
"""Implements the multihead softmax attention.
|
| 301 |
+
Arguments
|
| 302 |
+
---------
|
| 303 |
+
q: The tensor containing the query. (B, Sq, H, D)
|
| 304 |
+
kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
|
| 305 |
+
causal: if passed, will override self.causal
|
| 306 |
+
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
|
| 307 |
+
False means to mask out. (B, Sk)
|
| 308 |
+
"""
|
| 309 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
| 310 |
+
causal = self.causal if causal is None else causal
|
| 311 |
+
seqlen_k = kv.shape[1]
|
| 312 |
+
assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
|
| 313 |
+
if kv.shape[3] != q.shape[2]: # MQA/GQA
|
| 314 |
+
kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
|
| 315 |
+
k, v = kv.unbind(dim=2)
|
| 316 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
| 317 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
| 318 |
+
if key_padding_mask is not None:
|
| 319 |
+
padding_mask = torch.full(
|
| 320 |
+
(batch_size, seqlen_k), -10000.0, dtype=scores.dtype, device=scores.device
|
| 321 |
+
)
|
| 322 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
| 323 |
+
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
| 324 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
| 325 |
+
if causal:
|
| 326 |
+
# causal mask needs to take into account the difference between seqlen_q and seqlen_k
|
| 327 |
+
row_idx = rearrange(
|
| 328 |
+
torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1"
|
| 329 |
+
)
|
| 330 |
+
col_idx = torch.arange(seqlen_k, device=kv.device, dtype=torch.long)
|
| 331 |
+
sk = (
|
| 332 |
+
seqlen_k
|
| 333 |
+
if key_padding_mask is None
|
| 334 |
+
else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
|
| 335 |
+
)
|
| 336 |
+
causal_mask = col_idx > row_idx + sk - seqlen_q
|
| 337 |
+
scores = scores.masked_fill(causal_mask, -10000.0)
|
| 338 |
+
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
|
| 339 |
+
attention_drop = self.drop(attention)
|
| 340 |
+
output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
|
| 341 |
+
return output
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
class LinearResidual(nn.Linear):
|
| 345 |
+
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense."""
|
| 346 |
+
|
| 347 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
| 348 |
+
return super().forward(input), input
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def _update_kv_cache(kv, inference_params, layer_idx):
|
| 352 |
+
"""kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
|
| 353 |
+
# Pre-allocate memory for key-values for inference.
|
| 354 |
+
num_heads, head_dim = kv.shape[-2:]
|
| 355 |
+
if layer_idx not in inference_params.key_value_memory_dict:
|
| 356 |
+
kv_cache = torch.empty(
|
| 357 |
+
inference_params.max_batch_size,
|
| 358 |
+
inference_params.max_seqlen,
|
| 359 |
+
2,
|
| 360 |
+
num_heads,
|
| 361 |
+
head_dim,
|
| 362 |
+
dtype=kv.dtype,
|
| 363 |
+
device=kv.device,
|
| 364 |
+
)
|
| 365 |
+
inference_params.key_value_memory_dict[layer_idx] = kv_cache
|
| 366 |
+
else:
|
| 367 |
+
kv_cache = inference_params.key_value_memory_dict[layer_idx]
|
| 368 |
+
# Adjust key and value for inference
|
| 369 |
+
batch_start = inference_params.batch_size_offset
|
| 370 |
+
batch_end = batch_start + kv.shape[0]
|
| 371 |
+
sequence_start = inference_params.seqlen_offset
|
| 372 |
+
sequence_end = sequence_start + kv.shape[1]
|
| 373 |
+
assert batch_end <= kv_cache.shape[0]
|
| 374 |
+
assert sequence_end <= kv_cache.shape[1]
|
| 375 |
+
assert kv_cache is not None
|
| 376 |
+
kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv
|
| 377 |
+
return kv_cache[batch_start:batch_end, :sequence_end, ...]
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
class MHA(nn.Module):
|
| 381 |
+
"""Multi-head self-attention and cross-attention"""
|
| 382 |
+
|
| 383 |
+
def __init__(
|
| 384 |
+
self,
|
| 385 |
+
embed_dim,
|
| 386 |
+
num_heads,
|
| 387 |
+
num_heads_kv=None,
|
| 388 |
+
cross_attn=False,
|
| 389 |
+
qkv_proj_bias=True,
|
| 390 |
+
out_proj_bias=True,
|
| 391 |
+
dropout=0.0,
|
| 392 |
+
softmax_scale=None,
|
| 393 |
+
causal=False,
|
| 394 |
+
layer_idx=None,
|
| 395 |
+
dwconv=False,
|
| 396 |
+
rotary_emb_dim=0,
|
| 397 |
+
rotary_emb_base=10000.0,
|
| 398 |
+
rotary_emb_scale_base=None,
|
| 399 |
+
rotary_emb_interleaved=False,
|
| 400 |
+
use_alibi=False,
|
| 401 |
+
window_size=(-1, -1),
|
| 402 |
+
fused_bias_fc=False,
|
| 403 |
+
use_flash_attn=False,
|
| 404 |
+
return_residual=False,
|
| 405 |
+
checkpointing=False,
|
| 406 |
+
device=None,
|
| 407 |
+
dtype=None,
|
| 408 |
+
) -> None:
|
| 409 |
+
"""
|
| 410 |
+
num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads.
|
| 411 |
+
return_residual: whether to return the input x along with the output. This is for
|
| 412 |
+
performance reason: for post-norm architecture, returning the input allows us
|
| 413 |
+
to fuse the backward of nn.Linear with the residual connection.
|
| 414 |
+
"""
|
| 415 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 416 |
+
super().__init__()
|
| 417 |
+
self.embed_dim = embed_dim
|
| 418 |
+
self.cross_attn = cross_attn
|
| 419 |
+
self.causal = causal
|
| 420 |
+
self.layer_idx = layer_idx
|
| 421 |
+
self.dwconv = dwconv
|
| 422 |
+
self.rotary_emb_dim = rotary_emb_dim
|
| 423 |
+
self.use_flash_attn = use_flash_attn
|
| 424 |
+
self.return_residual = return_residual
|
| 425 |
+
self.checkpointing = checkpointing
|
| 426 |
+
if use_alibi:
|
| 427 |
+
assert use_flash_attn, "ALiBi code path requires flash_attn"
|
| 428 |
+
alibi_slopes = torch.tensor(get_alibi_slopes(num_heads), device=device)
|
| 429 |
+
else:
|
| 430 |
+
alibi_slopes = None
|
| 431 |
+
if window_size != (-1, -1):
|
| 432 |
+
assert use_flash_attn, "Local (sliding window) attention code path requires flash_attn"
|
| 433 |
+
|
| 434 |
+
self.num_heads = num_heads
|
| 435 |
+
self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads
|
| 436 |
+
assert (
|
| 437 |
+
self.num_heads % self.num_heads_kv == 0
|
| 438 |
+
), "num_heads must be divisible by num_heads_kv"
|
| 439 |
+
assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads"
|
| 440 |
+
self.head_dim = self.embed_dim // num_heads
|
| 441 |
+
qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv)
|
| 442 |
+
kv_dim = 2 * self.head_dim * self.num_heads_kv
|
| 443 |
+
|
| 444 |
+
if self.rotary_emb_dim > 0:
|
| 445 |
+
assert not cross_attn, "MHA with rotary embedding does not support cross-attention yet"
|
| 446 |
+
assert RotaryEmbedding is not None, "rotary_emb is not installed"
|
| 447 |
+
self.rotary_emb = RotaryEmbedding(
|
| 448 |
+
self.rotary_emb_dim,
|
| 449 |
+
base=rotary_emb_base,
|
| 450 |
+
scale_base=rotary_emb_scale_base,
|
| 451 |
+
interleaved=rotary_emb_interleaved,
|
| 452 |
+
device=device,
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
if fused_bias_fc and FusedDense is None:
|
| 456 |
+
raise ImportError("fused_dense is not installed")
|
| 457 |
+
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
|
| 458 |
+
linear_resid_cls = (
|
| 459 |
+
LinearResidual if not fused_bias_fc else partial(FusedDense, return_residual=True)
|
| 460 |
+
)
|
| 461 |
+
wqkv_cls = linear_cls if not self.return_residual else linear_resid_cls
|
| 462 |
+
inner_attn_cls = (
|
| 463 |
+
partial(FlashSelfAttention, alibi_slopes=alibi_slopes, window_size=window_size)
|
| 464 |
+
if use_flash_attn
|
| 465 |
+
else SelfAttention
|
| 466 |
+
)
|
| 467 |
+
inner_cross_attn_cls = (
|
| 468 |
+
partial(FlashCrossAttention, alibi_slopes=alibi_slopes, window_size=window_size)
|
| 469 |
+
if use_flash_attn
|
| 470 |
+
else CrossAttention
|
| 471 |
+
)
|
| 472 |
+
if not self.cross_attn:
|
| 473 |
+
self.Wqkv = wqkv_cls(embed_dim, qkv_dim, bias=qkv_proj_bias, **factory_kwargs)
|
| 474 |
+
else:
|
| 475 |
+
self.Wq = linear_cls(embed_dim, embed_dim, bias=qkv_proj_bias, **factory_kwargs)
|
| 476 |
+
self.Wkv = wqkv_cls(embed_dim, kv_dim, bias=qkv_proj_bias, **factory_kwargs)
|
| 477 |
+
if self.dwconv:
|
| 478 |
+
if self.num_heads_kv == self.num_heads:
|
| 479 |
+
self.dwconv_qkv = nn.Conv1d(
|
| 480 |
+
qkv_dim, qkv_dim, kernel_size=3, padding=2, groups=qkv_dim
|
| 481 |
+
)
|
| 482 |
+
else:
|
| 483 |
+
self.dwconv_q = nn.Conv1d(
|
| 484 |
+
embed_dim, embed_dim, kernel_size=3, padding=2, groups=embed_dim
|
| 485 |
+
)
|
| 486 |
+
self.dwconv_kv = nn.Conv1d(kv_dim, kv_dim, kernel_size=3, padding=2, groups=kv_dim)
|
| 487 |
+
self.inner_attn = inner_attn_cls(
|
| 488 |
+
causal=causal,
|
| 489 |
+
softmax_scale=softmax_scale,
|
| 490 |
+
attention_dropout=dropout,
|
| 491 |
+
)
|
| 492 |
+
self.inner_cross_attn = inner_cross_attn_cls(
|
| 493 |
+
causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout
|
| 494 |
+
)
|
| 495 |
+
self.out_proj = linear_cls(embed_dim, embed_dim, bias=out_proj_bias, **factory_kwargs)
|
| 496 |
+
|
| 497 |
+
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None):
|
| 498 |
+
dtype = self.out_proj.weight.dtype if dtype is None else dtype
|
| 499 |
+
device = self.out_proj.weight.device
|
| 500 |
+
return torch.empty(
|
| 501 |
+
batch_size,
|
| 502 |
+
max_seqlen,
|
| 503 |
+
2,
|
| 504 |
+
self.num_heads_kv,
|
| 505 |
+
self.head_dim,
|
| 506 |
+
dtype=dtype,
|
| 507 |
+
device=device,
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
def _update_kv_cache(self, kv, inference_params):
|
| 511 |
+
"""kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
|
| 512 |
+
assert not self.dwconv, "Generation does not support dwconv yet"
|
| 513 |
+
assert self.layer_idx is not None, "Generation requires layer_idx in the constructor"
|
| 514 |
+
return _update_kv_cache(kv, inference_params, self.layer_idx)
|
| 515 |
+
|
| 516 |
+
def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params):
|
| 517 |
+
"""
|
| 518 |
+
Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention.
|
| 519 |
+
q: (batch_size, seqlen_q, nheads, head_dim)
|
| 520 |
+
kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim)
|
| 521 |
+
"""
|
| 522 |
+
assert inference_params is not None and inference_params.seqlen_offset > 0
|
| 523 |
+
assert self.use_flash_attn
|
| 524 |
+
if self.rotary_emb_dim > 0:
|
| 525 |
+
assert self.rotary_emb.scale is None, "This code path does not support xPos"
|
| 526 |
+
self.rotary_emb._update_cos_sin_cache(
|
| 527 |
+
inference_params.max_seqlen, device=q.device, dtype=q.dtype
|
| 528 |
+
)
|
| 529 |
+
rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached
|
| 530 |
+
else:
|
| 531 |
+
rotary_cos, rotary_sin = None, None
|
| 532 |
+
batch = q.shape[0]
|
| 533 |
+
kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
|
| 534 |
+
cache_seqlens = (
|
| 535 |
+
inference_params.lengths_per_sample[:batch]
|
| 536 |
+
if inference_params.lengths_per_sample is not None
|
| 537 |
+
else inference_params.seqlen_offset
|
| 538 |
+
)
|
| 539 |
+
alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
|
| 540 |
+
context = flash_attn_with_kvcache(
|
| 541 |
+
q,
|
| 542 |
+
kv_cache[:, :, 0],
|
| 543 |
+
kv_cache[:, :, 1],
|
| 544 |
+
kv[:, :, 0],
|
| 545 |
+
kv[:, :, 1],
|
| 546 |
+
rotary_cos=rotary_cos,
|
| 547 |
+
rotary_sin=rotary_sin,
|
| 548 |
+
cache_seqlens=cache_seqlens,
|
| 549 |
+
softmax_scale=self.inner_cross_attn.softmax_scale,
|
| 550 |
+
causal=self.inner_cross_attn.causal,
|
| 551 |
+
rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False,
|
| 552 |
+
alibi_slopes=alibi_slopes,
|
| 553 |
+
)
|
| 554 |
+
return context
|
| 555 |
+
|
| 556 |
+
def _update_kvcache_attention(self, q, kv, inference_params):
|
| 557 |
+
"""Write kv to inference_params, then do attention"""
|
| 558 |
+
if (
|
| 559 |
+
inference_params.seqlen_offset == 0
|
| 560 |
+
or flash_attn_with_kvcache is None
|
| 561 |
+
or not self.use_flash_attn
|
| 562 |
+
):
|
| 563 |
+
# TODO: this only uses seqlen_offset and not lengths_per_sample.
|
| 564 |
+
kv = self._update_kv_cache(kv, inference_params)
|
| 565 |
+
return self.inner_cross_attn(q, kv)
|
| 566 |
+
else:
|
| 567 |
+
batch = q.shape[0]
|
| 568 |
+
kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
|
| 569 |
+
cache_seqlens = (
|
| 570 |
+
inference_params.lengths_per_sample[:batch]
|
| 571 |
+
if inference_params.lengths_per_sample is not None
|
| 572 |
+
else inference_params.seqlen_offset
|
| 573 |
+
)
|
| 574 |
+
alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
|
| 575 |
+
return flash_attn_with_kvcache(
|
| 576 |
+
q,
|
| 577 |
+
kv_cache[:, :, 0],
|
| 578 |
+
kv_cache[:, :, 1],
|
| 579 |
+
kv[:, :, 0],
|
| 580 |
+
kv[:, :, 1],
|
| 581 |
+
cache_seqlens=cache_seqlens,
|
| 582 |
+
softmax_scale=self.inner_cross_attn.softmax_scale,
|
| 583 |
+
causal=self.inner_cross_attn.causal,
|
| 584 |
+
alibi_slopes=alibi_slopes,
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
def forward(
|
| 588 |
+
self,
|
| 589 |
+
x,
|
| 590 |
+
x_kv=None,
|
| 591 |
+
key_padding_mask=None,
|
| 592 |
+
cu_seqlens=None,
|
| 593 |
+
max_seqlen=None,
|
| 594 |
+
mixer_subset=None,
|
| 595 |
+
inference_params=None,
|
| 596 |
+
**kwargs,
|
| 597 |
+
):
|
| 598 |
+
"""
|
| 599 |
+
Arguments:
|
| 600 |
+
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
|
| 601 |
+
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
|
| 602 |
+
is the is the sum of the sequence lengths in the batch.
|
| 603 |
+
x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x.
|
| 604 |
+
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
| 605 |
+
of the sequences in the batch, used to index into x. Only applicable when using
|
| 606 |
+
FlashAttention.
|
| 607 |
+
max_seqlen: int. Maximum sequence length in the batch.
|
| 608 |
+
key_padding_mask: boolean mask, True means to keep, False means to mask out.
|
| 609 |
+
(batch, seqlen). Only applicable when not using FlashAttention.
|
| 610 |
+
mixer_subset: for cross-attention only. If not None, will take a subset of x
|
| 611 |
+
before applying the query projection. Useful for e.g., ViT where we only care
|
| 612 |
+
about the CLS token in the last layer.
|
| 613 |
+
inference_params: for generation. Adapted from Megatron-LM (and Apex)
|
| 614 |
+
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
|
| 615 |
+
"""
|
| 616 |
+
if cu_seqlens is not None:
|
| 617 |
+
assert max_seqlen is not None
|
| 618 |
+
assert key_padding_mask is None
|
| 619 |
+
assert self.use_flash_attn
|
| 620 |
+
assert not self.dwconv
|
| 621 |
+
assert self.rotary_emb_dim == 0
|
| 622 |
+
if key_padding_mask is not None:
|
| 623 |
+
assert cu_seqlens is None
|
| 624 |
+
assert max_seqlen is None
|
| 625 |
+
assert not self.use_flash_attn
|
| 626 |
+
if inference_params is not None:
|
| 627 |
+
assert key_padding_mask is None
|
| 628 |
+
assert cu_seqlens is None and max_seqlen is None
|
| 629 |
+
assert not self.dwconv
|
| 630 |
+
|
| 631 |
+
kwargs = (
|
| 632 |
+
{"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen, **kwargs}
|
| 633 |
+
if self.use_flash_attn
|
| 634 |
+
else {"key_padding_mask": key_padding_mask, **kwargs}
|
| 635 |
+
)
|
| 636 |
+
seqlen_offset = (
|
| 637 |
+
0
|
| 638 |
+
if inference_params is None
|
| 639 |
+
else (
|
| 640 |
+
inference_params.lengths_per_sample
|
| 641 |
+
if inference_params.lengths_per_sample is not None
|
| 642 |
+
else inference_params.seqlen_offset
|
| 643 |
+
)
|
| 644 |
+
)
|
| 645 |
+
rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None
|
| 646 |
+
batch, seqlen = x.shape[:2]
|
| 647 |
+
if not self.cross_attn and self.num_heads_kv == self.num_heads:
|
| 648 |
+
assert x_kv is None and mixer_subset is None
|
| 649 |
+
if not self.return_residual:
|
| 650 |
+
qkv = self.Wqkv(x)
|
| 651 |
+
else:
|
| 652 |
+
qkv, x = self.Wqkv(x)
|
| 653 |
+
if self.dwconv:
|
| 654 |
+
qkv = rearrange(
|
| 655 |
+
self.dwconv_qkv(rearrange(qkv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
|
| 656 |
+
).contiguous()
|
| 657 |
+
qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
|
| 658 |
+
if (
|
| 659 |
+
inference_params is None
|
| 660 |
+
or inference_params.seqlen_offset == 0
|
| 661 |
+
or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
|
| 662 |
+
or not self.use_flash_attn
|
| 663 |
+
):
|
| 664 |
+
if self.rotary_emb_dim > 0:
|
| 665 |
+
qkv = self.rotary_emb(
|
| 666 |
+
qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
|
| 667 |
+
)
|
| 668 |
+
if inference_params is None:
|
| 669 |
+
if not self.checkpointing:
|
| 670 |
+
context = self.inner_attn(qkv, **kwargs)
|
| 671 |
+
else:
|
| 672 |
+
context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, **kwargs)
|
| 673 |
+
else:
|
| 674 |
+
context = self._update_kvcache_attention(
|
| 675 |
+
qkv[:, :, 0], qkv[:, :, 1:], inference_params
|
| 676 |
+
)
|
| 677 |
+
else:
|
| 678 |
+
context = self._apply_rotary_update_kvcache_attention(
|
| 679 |
+
qkv[:, :, 0], qkv[:, :, 1:], inference_params
|
| 680 |
+
)
|
| 681 |
+
else:
|
| 682 |
+
if self.cross_attn:
|
| 683 |
+
if not self.return_residual:
|
| 684 |
+
q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
|
| 685 |
+
kv = self.Wkv(x_kv if x_kv is not None else x)
|
| 686 |
+
else:
|
| 687 |
+
if x_kv is not None:
|
| 688 |
+
kv, x_kv = self.Wkv(x_kv)
|
| 689 |
+
else:
|
| 690 |
+
kv, x = self.Wkv(x)
|
| 691 |
+
q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
|
| 692 |
+
else:
|
| 693 |
+
assert self.num_heads_kv != self.num_heads
|
| 694 |
+
if not self.return_residual:
|
| 695 |
+
qkv = self.Wqkv(x)
|
| 696 |
+
else:
|
| 697 |
+
qkv, x = self.Wqkv(x)
|
| 698 |
+
q = qkv[..., : self.num_heads * self.head_dim]
|
| 699 |
+
kv = qkv[..., self.num_heads * self.head_dim :]
|
| 700 |
+
q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
|
| 701 |
+
kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
|
| 702 |
+
if self.dwconv:
|
| 703 |
+
q = rearrange(
|
| 704 |
+
self.dwconv_q(rearrange(q, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
|
| 705 |
+
).contiguous()
|
| 706 |
+
kv = rearrange(
|
| 707 |
+
self.dwconv_kv(rearrange(kv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
|
| 708 |
+
).contiguous()
|
| 709 |
+
if (
|
| 710 |
+
inference_params is None
|
| 711 |
+
or inference_params.seqlen_offset == 0
|
| 712 |
+
or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
|
| 713 |
+
or not self.use_flash_attn
|
| 714 |
+
):
|
| 715 |
+
if self.rotary_emb_dim > 0:
|
| 716 |
+
q, kv = self.rotary_emb(
|
| 717 |
+
q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
|
| 718 |
+
)
|
| 719 |
+
if inference_params is None:
|
| 720 |
+
if not self.checkpointing:
|
| 721 |
+
context = self.inner_cross_attn(q, kv, **kwargs)
|
| 722 |
+
else:
|
| 723 |
+
context = torch.utils.checkpoint.checkpoint(
|
| 724 |
+
self.inner_cross_attn, q, kv, **kwargs
|
| 725 |
+
)
|
| 726 |
+
else:
|
| 727 |
+
context = self._update_kvcache_attention(q, kv, inference_params)
|
| 728 |
+
else:
|
| 729 |
+
context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
|
| 730 |
+
out = self.out_proj(rearrange(context, "... h d -> ... (h d)"))
|
| 731 |
+
return out if not self.return_residual else (out, x)
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
class ParallelMHA(nn.Module):
|
| 735 |
+
"""Multi-head self-attention and cross-attention"""
|
| 736 |
+
|
| 737 |
+
def __init__(
|
| 738 |
+
self,
|
| 739 |
+
embed_dim,
|
| 740 |
+
num_heads,
|
| 741 |
+
process_group,
|
| 742 |
+
num_heads_kv=None,
|
| 743 |
+
qkv_proj_bias=True,
|
| 744 |
+
out_proj_bias=True,
|
| 745 |
+
dropout=0.0,
|
| 746 |
+
softmax_scale=None,
|
| 747 |
+
causal=False,
|
| 748 |
+
layer_idx=None,
|
| 749 |
+
rotary_emb_dim=0,
|
| 750 |
+
rotary_emb_base=10000.0,
|
| 751 |
+
rotary_emb_scale_base=None,
|
| 752 |
+
rotary_emb_interleaved=False,
|
| 753 |
+
use_alibi=False,
|
| 754 |
+
window_size=(-1, -1),
|
| 755 |
+
use_flash_attn=False,
|
| 756 |
+
checkpointing=False,
|
| 757 |
+
sequence_parallel=True,
|
| 758 |
+
device=None,
|
| 759 |
+
dtype=None,
|
| 760 |
+
) -> None:
|
| 761 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 762 |
+
super().__init__()
|
| 763 |
+
self.embed_dim = embed_dim
|
| 764 |
+
self.causal = causal
|
| 765 |
+
self.layer_idx = layer_idx
|
| 766 |
+
self.rotary_emb_dim = rotary_emb_dim
|
| 767 |
+
self.use_flash_attn = use_flash_attn
|
| 768 |
+
self.checkpointing = checkpointing
|
| 769 |
+
self.process_group = process_group
|
| 770 |
+
self.world_size = process_group.size()
|
| 771 |
+
self.local_rank = torch.distributed.get_rank(process_group)
|
| 772 |
+
|
| 773 |
+
self.num_heads = num_heads
|
| 774 |
+
assert self.embed_dim % self.num_heads == 0, "embed_dim must be divisible by num_heads"
|
| 775 |
+
|
| 776 |
+
self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads
|
| 777 |
+
assert (
|
| 778 |
+
self.num_heads % self.num_heads_kv == 0
|
| 779 |
+
), "num_heads must be divisible by num_heads_kv"
|
| 780 |
+
|
| 781 |
+
self.num_heads_per_rank = get_dim_for_local_rank(
|
| 782 |
+
self.num_heads, self.world_size, self.local_rank
|
| 783 |
+
)
|
| 784 |
+
self.num_heads_kv_per_rank = get_dim_for_local_rank(
|
| 785 |
+
self.num_heads_kv, self.world_size, self.local_rank
|
| 786 |
+
)
|
| 787 |
+
self.head_dim = self.embed_dim // num_heads
|
| 788 |
+
qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv)
|
| 789 |
+
|
| 790 |
+
if use_alibi:
|
| 791 |
+
assert use_flash_attn, "ALiBi code path requires flash_attn"
|
| 792 |
+
num_heads_local = math.ceil(self.num_heads / self.world_size)
|
| 793 |
+
alibi_slopes = torch.tensor(
|
| 794 |
+
get_alibi_slopes(num_heads)[
|
| 795 |
+
self.local_rank * num_heads_local : (self.local_rank + 1) * num_heads_local
|
| 796 |
+
],
|
| 797 |
+
device=device,
|
| 798 |
+
)
|
| 799 |
+
else:
|
| 800 |
+
alibi_slopes = None
|
| 801 |
+
if window_size != (-1, -1):
|
| 802 |
+
assert use_flash_attn, "Local (sliding window) attention code path requires flash_attn"
|
| 803 |
+
|
| 804 |
+
if self.rotary_emb_dim > 0:
|
| 805 |
+
assert RotaryEmbedding is not None, "rotary_emb is not installed"
|
| 806 |
+
self.rotary_emb = RotaryEmbedding(
|
| 807 |
+
self.rotary_emb_dim,
|
| 808 |
+
base=rotary_emb_base,
|
| 809 |
+
scale_base=rotary_emb_scale_base,
|
| 810 |
+
interleaved=rotary_emb_interleaved,
|
| 811 |
+
device=device,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
if ColumnParallelLinear is None or RowParallelLinear is None:
|
| 815 |
+
raise ImportError("fused_dense is not installed")
|
| 816 |
+
self.Wqkv = ColumnParallelLinear(
|
| 817 |
+
embed_dim,
|
| 818 |
+
qkv_dim,
|
| 819 |
+
process_group,
|
| 820 |
+
bias=qkv_proj_bias,
|
| 821 |
+
sequence_parallel=sequence_parallel,
|
| 822 |
+
multiple_of=self.head_dim * (self.num_heads // self.num_heads_kv + 2),
|
| 823 |
+
**factory_kwargs,
|
| 824 |
+
)
|
| 825 |
+
inner_attn_cls = (
|
| 826 |
+
partial(FlashSelfAttention, alibi_slopes=alibi_slopes, window_size=window_size)
|
| 827 |
+
if use_flash_attn
|
| 828 |
+
else SelfAttention
|
| 829 |
+
)
|
| 830 |
+
inner_cross_attn_cls = (
|
| 831 |
+
partial(FlashCrossAttention, alibi_slopes=alibi_slopes, window_size=window_size)
|
| 832 |
+
if use_flash_attn
|
| 833 |
+
else CrossAttention
|
| 834 |
+
)
|
| 835 |
+
self.inner_attn = inner_attn_cls(
|
| 836 |
+
causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout
|
| 837 |
+
)
|
| 838 |
+
self.inner_cross_attn = inner_cross_attn_cls(
|
| 839 |
+
causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout
|
| 840 |
+
)
|
| 841 |
+
self.out_proj = RowParallelLinear(
|
| 842 |
+
embed_dim,
|
| 843 |
+
embed_dim,
|
| 844 |
+
process_group,
|
| 845 |
+
bias=out_proj_bias,
|
| 846 |
+
sequence_parallel=sequence_parallel,
|
| 847 |
+
multiple_of=self.head_dim,
|
| 848 |
+
**factory_kwargs,
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None):
|
| 852 |
+
dtype = self.out_proj.weight.dtype if dtype is None else dtype
|
| 853 |
+
device = self.out_proj.weight.device
|
| 854 |
+
return torch.empty(
|
| 855 |
+
batch_size,
|
| 856 |
+
max_seqlen,
|
| 857 |
+
2,
|
| 858 |
+
self.num_heads_kv_per_rank,
|
| 859 |
+
self.head_dim,
|
| 860 |
+
dtype=dtype,
|
| 861 |
+
device=device,
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
def _update_kv_cache(self, kv, inference_params):
|
| 865 |
+
"""kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
|
| 866 |
+
assert self.layer_idx is not None, "Generation requires layer_idx in the constructor"
|
| 867 |
+
return _update_kv_cache(kv, inference_params, self.layer_idx)
|
| 868 |
+
|
| 869 |
+
def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params):
|
| 870 |
+
"""
|
| 871 |
+
Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention.
|
| 872 |
+
q: (batch_size, seqlen_q, nheads, head_dim)
|
| 873 |
+
kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim)
|
| 874 |
+
"""
|
| 875 |
+
assert inference_params is not None and inference_params.seqlen_offset > 0
|
| 876 |
+
assert self.use_flash_attn
|
| 877 |
+
if self.rotary_emb_dim > 0:
|
| 878 |
+
assert self.rotary_emb.scale is None, "This code path does not support xPos"
|
| 879 |
+
self.rotary_emb._update_cos_sin_cache(
|
| 880 |
+
inference_params.max_seqlen, device=q.device, dtype=q.dtype
|
| 881 |
+
)
|
| 882 |
+
rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached
|
| 883 |
+
else:
|
| 884 |
+
rotary_cos, rotary_sin = None, None
|
| 885 |
+
batch = q.shape[0]
|
| 886 |
+
kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
|
| 887 |
+
cache_seqlens = (
|
| 888 |
+
inference_params.lengths_per_sample[:batch]
|
| 889 |
+
if inference_params.lengths_per_sample is not None
|
| 890 |
+
else inference_params.seqlen_offset
|
| 891 |
+
)
|
| 892 |
+
alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
|
| 893 |
+
context = flash_attn_with_kvcache(
|
| 894 |
+
q,
|
| 895 |
+
kv_cache[:, :, 0],
|
| 896 |
+
kv_cache[:, :, 1],
|
| 897 |
+
kv[:, :, 0],
|
| 898 |
+
kv[:, :, 1],
|
| 899 |
+
rotary_cos=rotary_cos,
|
| 900 |
+
rotary_sin=rotary_sin,
|
| 901 |
+
cache_seqlens=cache_seqlens,
|
| 902 |
+
softmax_scale=self.inner_cross_attn.softmax_scale,
|
| 903 |
+
causal=self.inner_cross_attn.causal,
|
| 904 |
+
rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False,
|
| 905 |
+
alibi_slopes=alibi_slopes,
|
| 906 |
+
)
|
| 907 |
+
return context
|
| 908 |
+
|
| 909 |
+
def _update_kvcache_attention(self, q, kv, inference_params):
|
| 910 |
+
"""Write kv to inference_params, then do attention"""
|
| 911 |
+
if inference_params.seqlen_offset == 0 or not self.use_flash_attn:
|
| 912 |
+
# TODO: this only uses seqlen_offset and not lengths_per_sample.
|
| 913 |
+
kv = self._update_kv_cache(kv, inference_params)
|
| 914 |
+
return self.inner_cross_attn(q, kv)
|
| 915 |
+
else:
|
| 916 |
+
batch = q.shape[0]
|
| 917 |
+
kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
|
| 918 |
+
cache_seqlens = (
|
| 919 |
+
inference_params.lengths_per_sample[:batch]
|
| 920 |
+
if inference_params.lengths_per_sample is not None
|
| 921 |
+
else inference_params.seqlen_offset
|
| 922 |
+
)
|
| 923 |
+
alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
|
| 924 |
+
context = flash_attn_with_kvcache(
|
| 925 |
+
q,
|
| 926 |
+
kv_cache[:, :, 0],
|
| 927 |
+
kv_cache[:, :, 1],
|
| 928 |
+
kv[:, :, 0],
|
| 929 |
+
kv[:, :, 1],
|
| 930 |
+
cache_seqlens=cache_seqlens,
|
| 931 |
+
softmax_scale=self.inner_cross_attn.softmax_scale,
|
| 932 |
+
causal=self.inner_cross_attn.causal,
|
| 933 |
+
alibi_slopes=alibi_slopes,
|
| 934 |
+
)
|
| 935 |
+
return context
|
| 936 |
+
|
| 937 |
+
def forward(self, x, seqlen=None, inference_params=None, **kwargs):
|
| 938 |
+
"""
|
| 939 |
+
Arguments:
|
| 940 |
+
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if seqlen=None.
|
| 941 |
+
If seqlen is not None, x is (batch * seqlen, hidden_dim). This is so that when we
|
| 942 |
+
split x during sequence parallel, we split the batch * seqlen dimension
|
| 943 |
+
(in case batch is small).
|
| 944 |
+
"""
|
| 945 |
+
qkv = self.Wqkv(x)
|
| 946 |
+
if seqlen is not None:
|
| 947 |
+
qkv = rearrange(qkv, "(b s) ... -> b s ...", s=seqlen)
|
| 948 |
+
seqlen_offset = (
|
| 949 |
+
0
|
| 950 |
+
if inference_params is None
|
| 951 |
+
else (
|
| 952 |
+
inference_params.lengths_per_sample
|
| 953 |
+
if inference_params.lengths_per_sample is not None
|
| 954 |
+
else inference_params.seqlen_offset
|
| 955 |
+
)
|
| 956 |
+
)
|
| 957 |
+
rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None
|
| 958 |
+
if self.num_heads_kv == self.num_heads:
|
| 959 |
+
qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, d=self.head_dim)
|
| 960 |
+
if (
|
| 961 |
+
inference_params is None
|
| 962 |
+
or inference_params.seqlen_offset == 0
|
| 963 |
+
or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
|
| 964 |
+
or not self.use_flash_attn
|
| 965 |
+
):
|
| 966 |
+
if self.rotary_emb_dim > 0:
|
| 967 |
+
qkv = self.rotary_emb(
|
| 968 |
+
qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
|
| 969 |
+
)
|
| 970 |
+
if inference_params is None:
|
| 971 |
+
if not self.checkpointing:
|
| 972 |
+
context = self.inner_attn(qkv, **kwargs)
|
| 973 |
+
else:
|
| 974 |
+
context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, **kwargs)
|
| 975 |
+
else:
|
| 976 |
+
context = self._update_kvcache_attention(
|
| 977 |
+
qkv[:, :, 0], qkv[:, :, 1:], inference_params
|
| 978 |
+
)
|
| 979 |
+
else:
|
| 980 |
+
context = self._apply_rotary_update_kvcache_attention(
|
| 981 |
+
qkv[:, :, 0], qkv[:, :, 1:], inference_params
|
| 982 |
+
)
|
| 983 |
+
else:
|
| 984 |
+
q = rearrange(
|
| 985 |
+
qkv[..., : self.num_heads_per_rank * self.head_dim],
|
| 986 |
+
"... (h d) -> ... h d",
|
| 987 |
+
d=self.head_dim,
|
| 988 |
+
)
|
| 989 |
+
kv = rearrange(
|
| 990 |
+
qkv[..., self.num_heads_per_rank * self.head_dim :],
|
| 991 |
+
"... (two hkv d) -> ... two hkv d",
|
| 992 |
+
two=2,
|
| 993 |
+
d=self.head_dim,
|
| 994 |
+
)
|
| 995 |
+
if (
|
| 996 |
+
inference_params is None
|
| 997 |
+
or inference_params.seqlen_offset == 0
|
| 998 |
+
or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
|
| 999 |
+
or not self.use_flash_attn
|
| 1000 |
+
):
|
| 1001 |
+
if self.rotary_emb_dim > 0:
|
| 1002 |
+
q, kv = self.rotary_emb(
|
| 1003 |
+
q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
|
| 1004 |
+
)
|
| 1005 |
+
if inference_params is None:
|
| 1006 |
+
if not self.checkpointing:
|
| 1007 |
+
context = self.inner_cross_attn(q, kv, **kwargs)
|
| 1008 |
+
else:
|
| 1009 |
+
context = torch.utils.checkpoint.checkpoint(
|
| 1010 |
+
self.inner_cross_attn, q, kv, **kwargs
|
| 1011 |
+
)
|
| 1012 |
+
else:
|
| 1013 |
+
context = self._update_kvcache_attention(q, kv, inference_params)
|
| 1014 |
+
else:
|
| 1015 |
+
context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
|
| 1016 |
+
context = rearrange(context, "b s h d -> b s (h d)")
|
| 1017 |
+
if seqlen is not None:
|
| 1018 |
+
context = rearrange(context, "b s d -> (b s) d")
|
| 1019 |
+
out = self.out_proj(context)
|
| 1020 |
+
return out
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/modules/mlp.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2023, Tri Dao.
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from torch.distributed import ProcessGroup
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from flash_attn.ops.activations import swiglu
|
| 11 |
+
except ImportError:
|
| 12 |
+
swiglu = None
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear
|
| 16 |
+
except ImportError:
|
| 17 |
+
ColumnParallelLinear, RowParallelLinear = None, None
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from flash_attn.ops.fused_dense import FusedMLP, ParallelFusedMLP
|
| 21 |
+
except ImportError:
|
| 22 |
+
FusedMLP, ParallelFusedMLP = None, None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class Mlp(nn.Module):
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
in_features,
|
| 29 |
+
hidden_features=None,
|
| 30 |
+
out_features=None,
|
| 31 |
+
activation=F.gelu,
|
| 32 |
+
bias1=True,
|
| 33 |
+
bias2=True,
|
| 34 |
+
return_residual=False,
|
| 35 |
+
device=None,
|
| 36 |
+
dtype=None,
|
| 37 |
+
):
|
| 38 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 39 |
+
super().__init__()
|
| 40 |
+
out_features = out_features if out_features is not None else in_features
|
| 41 |
+
hidden_features = hidden_features if hidden_features is not None else in_features * 4
|
| 42 |
+
self.return_residual = return_residual
|
| 43 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
|
| 44 |
+
self.activation = activation
|
| 45 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
|
| 46 |
+
|
| 47 |
+
def forward(self, x):
|
| 48 |
+
y = self.fc1(x)
|
| 49 |
+
y = self.activation(y)
|
| 50 |
+
y = self.fc2(y)
|
| 51 |
+
return y if not self.return_residual else (y, x)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class ParallelMLP(nn.Module):
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
in_features,
|
| 58 |
+
hidden_features=None,
|
| 59 |
+
out_features=None,
|
| 60 |
+
activation=F.gelu,
|
| 61 |
+
process_group: ProcessGroup = None,
|
| 62 |
+
sequence_parallel=True,
|
| 63 |
+
bias1=True,
|
| 64 |
+
bias2=True,
|
| 65 |
+
device=None,
|
| 66 |
+
dtype=None,
|
| 67 |
+
):
|
| 68 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 69 |
+
super().__init__()
|
| 70 |
+
assert ColumnParallelLinear is not None, "Need to install fused_dense"
|
| 71 |
+
assert RowParallelLinear is not None, "Need to install fused_dense"
|
| 72 |
+
out_features = out_features if out_features is not None else in_features
|
| 73 |
+
hidden_features = hidden_features if hidden_features is not None else in_features * 4
|
| 74 |
+
self.fc1 = ColumnParallelLinear(
|
| 75 |
+
in_features,
|
| 76 |
+
hidden_features,
|
| 77 |
+
process_group,
|
| 78 |
+
bias=bias1,
|
| 79 |
+
sequence_parallel=sequence_parallel,
|
| 80 |
+
**factory_kwargs,
|
| 81 |
+
)
|
| 82 |
+
self.activation = activation
|
| 83 |
+
self.fc2 = RowParallelLinear(
|
| 84 |
+
hidden_features,
|
| 85 |
+
out_features,
|
| 86 |
+
process_group,
|
| 87 |
+
bias=bias2,
|
| 88 |
+
sequence_parallel=sequence_parallel,
|
| 89 |
+
**factory_kwargs,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def forward(self, x):
|
| 93 |
+
y = self.fc1(x)
|
| 94 |
+
y = self.activation(y)
|
| 95 |
+
y = self.fc2(y)
|
| 96 |
+
return y
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class GatedMlp(nn.Module):
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
in_features,
|
| 103 |
+
hidden_features=None,
|
| 104 |
+
out_features=None,
|
| 105 |
+
activation=F.sigmoid,
|
| 106 |
+
bias1=True,
|
| 107 |
+
bias2=True,
|
| 108 |
+
multiple_of=128,
|
| 109 |
+
return_residual=False,
|
| 110 |
+
device=None,
|
| 111 |
+
dtype=None,
|
| 112 |
+
):
|
| 113 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 114 |
+
super().__init__()
|
| 115 |
+
out_features = out_features if out_features is not None else in_features
|
| 116 |
+
hidden_features = (
|
| 117 |
+
hidden_features if hidden_features is not None else int(8 * in_features / 3)
|
| 118 |
+
)
|
| 119 |
+
hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
|
| 120 |
+
self.return_residual = return_residual
|
| 121 |
+
self.fc1 = nn.Linear(in_features, 2 * hidden_features, bias=bias1, **factory_kwargs)
|
| 122 |
+
self.activation = activation
|
| 123 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
|
| 124 |
+
|
| 125 |
+
def forward(self, x):
|
| 126 |
+
y = self.fc1(x)
|
| 127 |
+
if self.activation == F.sigmoid: # Special case for GLU
|
| 128 |
+
y = F.glu(y, dim=-1)
|
| 129 |
+
elif self.activation == F.silu and swiglu is not None: # Special case for SwiGLU
|
| 130 |
+
y, gate = y.chunk(2, dim=-1)
|
| 131 |
+
y = swiglu(gate, y)
|
| 132 |
+
else:
|
| 133 |
+
y, gate = y.chunk(2, dim=-1)
|
| 134 |
+
y = y * self.activation(gate)
|
| 135 |
+
y = self.fc2(y)
|
| 136 |
+
return y if not self.return_residual else (y, x)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class ParallelGatedMlp(nn.Module):
|
| 140 |
+
"""Parallel GatedMlp"""
|
| 141 |
+
|
| 142 |
+
def __init__(
|
| 143 |
+
self,
|
| 144 |
+
in_features,
|
| 145 |
+
process_group,
|
| 146 |
+
hidden_features=None,
|
| 147 |
+
out_features=None,
|
| 148 |
+
activation=F.sigmoid,
|
| 149 |
+
bias1=True,
|
| 150 |
+
bias2=True,
|
| 151 |
+
multiple_of=128,
|
| 152 |
+
sequence_parallel=True,
|
| 153 |
+
device=None,
|
| 154 |
+
dtype=None,
|
| 155 |
+
):
|
| 156 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 157 |
+
super().__init__()
|
| 158 |
+
out_features = out_features if out_features is not None else in_features
|
| 159 |
+
hidden_features = (
|
| 160 |
+
hidden_features if hidden_features is not None else int(8 * in_features / 3)
|
| 161 |
+
)
|
| 162 |
+
hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
|
| 163 |
+
if ColumnParallelLinear is None or RowParallelLinear is None:
|
| 164 |
+
raise ImportError("fused_dense is not installed")
|
| 165 |
+
self.fc1 = ColumnParallelLinear(
|
| 166 |
+
in_features,
|
| 167 |
+
2 * hidden_features,
|
| 168 |
+
process_group,
|
| 169 |
+
bias=bias1,
|
| 170 |
+
sequence_parallel=sequence_parallel,
|
| 171 |
+
**factory_kwargs,
|
| 172 |
+
)
|
| 173 |
+
self.activation = activation
|
| 174 |
+
self.fc2 = RowParallelLinear(
|
| 175 |
+
hidden_features,
|
| 176 |
+
out_features,
|
| 177 |
+
process_group,
|
| 178 |
+
bias=bias2,
|
| 179 |
+
sequence_parallel=sequence_parallel,
|
| 180 |
+
**factory_kwargs,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
def forward(self, x):
|
| 184 |
+
y = self.fc1(x)
|
| 185 |
+
if self.activation == F.sigmoid: # Special case for GLU
|
| 186 |
+
y = F.glu(y, dim=-1)
|
| 187 |
+
else:
|
| 188 |
+
y, gate = y.chunk(2, dim=-1)
|
| 189 |
+
y = y * self.activation(gate)
|
| 190 |
+
y = self.fc2(y)
|
| 191 |
+
return y
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/__init__.py
ADDED
|
File without changes
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/activations.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
# 1/sqrt(2*pi)-> 0.3989423
|
| 9 |
+
# 1/sqrt(2) -> 0.70710678
|
| 10 |
+
# sqrt(2/pi) -> 0.79788456
|
| 11 |
+
|
| 12 |
+
# this function is tanh approximation of gelu
|
| 13 |
+
# actual gelu is:
|
| 14 |
+
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
|
| 15 |
+
@torch.jit.script
|
| 16 |
+
def bias_gelu(y, bias):
|
| 17 |
+
x = bias + y
|
| 18 |
+
return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# gradient of tanh approximation of gelu
|
| 22 |
+
# gradient of actual gelu is:
|
| 23 |
+
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
|
| 24 |
+
@torch.jit.script
|
| 25 |
+
def bias_gelu_back(g, y, bias):
|
| 26 |
+
"""Assume that y has shape (B, D) and bias has shape (D)"""
|
| 27 |
+
x = bias + y
|
| 28 |
+
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
|
| 29 |
+
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
|
| 30 |
+
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
|
| 31 |
+
1 + tanh_out
|
| 32 |
+
)
|
| 33 |
+
grad_y = ff * g
|
| 34 |
+
return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class GeLUFunction(torch.autograd.Function):
|
| 38 |
+
@staticmethod
|
| 39 |
+
# bias is an optional argument
|
| 40 |
+
def forward(ctx, input, bias):
|
| 41 |
+
ctx.save_for_backward(input, bias)
|
| 42 |
+
return bias_gelu(input, bias)
|
| 43 |
+
|
| 44 |
+
@staticmethod
|
| 45 |
+
def backward(ctx, grad_output):
|
| 46 |
+
input, bias = ctx.saved_tensors
|
| 47 |
+
tmp = bias_gelu_back(grad_output, input, bias)
|
| 48 |
+
return tmp, tmp
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
bias_gelu_impl = GeLUFunction.apply
|
| 52 |
+
|
| 53 |
+
# this function is tanh approximation of gelu
|
| 54 |
+
# actual gelu is:
|
| 55 |
+
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
|
| 56 |
+
@torch.jit.script
|
| 57 |
+
def gelu_fwd(x):
|
| 58 |
+
return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# gradient of tanh approximation of gelu
|
| 62 |
+
# gradient of actual gelu is:
|
| 63 |
+
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
|
| 64 |
+
@torch.jit.script
|
| 65 |
+
def gelu_bwd(g, x):
|
| 66 |
+
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
|
| 67 |
+
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
|
| 68 |
+
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
|
| 69 |
+
1 + tanh_out
|
| 70 |
+
)
|
| 71 |
+
return (ff * g).to(dtype=x.dtype)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class FastGeLUFunction(torch.autograd.Function):
|
| 75 |
+
@staticmethod
|
| 76 |
+
# bias is an optional argument
|
| 77 |
+
def forward(ctx, input):
|
| 78 |
+
ctx.save_for_backward(input)
|
| 79 |
+
return gelu_fwd(input)
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def backward(ctx, grad_output):
|
| 83 |
+
(input,) = ctx.saved_tensors
|
| 84 |
+
tmp = gelu_bwd(grad_output, input)
|
| 85 |
+
return tmp
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
fast_gelu_impl = FastGeLUFunction.apply
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@torch.jit.script
|
| 92 |
+
def relu_bwd(g, x):
|
| 93 |
+
return torch.where(x >= 0, g, 0.0).to(dtype=x.dtype)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@torch.jit.script
|
| 97 |
+
def sqrelu_fwd(x):
|
| 98 |
+
r = F.relu(x)
|
| 99 |
+
return (r * r).to(dtype=x.dtype)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@torch.jit.script
|
| 103 |
+
def sqrelu_bwd(g, x):
|
| 104 |
+
return (2.0 * g * F.relu(x)).to(dtype=x.dtype)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
swiglu_fwd_codestring = """
|
| 108 |
+
template <typename T> T swiglu_fwd(T x, T y) {
|
| 109 |
+
return float(x) * float(y) / (1.0f + ::exp(-float(x)));
|
| 110 |
+
}
|
| 111 |
+
"""
|
| 112 |
+
swiglu_bwd_codestring = """
|
| 113 |
+
template <typename T> void swiglu_bwd(T x, T y, T g, T& dx, T& dy) {
|
| 114 |
+
float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
|
| 115 |
+
dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
|
| 116 |
+
dy = float(x) * x_sigmoid * float(g);
|
| 117 |
+
}
|
| 118 |
+
"""
|
| 119 |
+
swiglu_fwd = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring)
|
| 120 |
+
swiglu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class SwiGLUFunction(torch.autograd.Function):
|
| 124 |
+
|
| 125 |
+
@staticmethod
|
| 126 |
+
def forward(ctx, x, y):
|
| 127 |
+
ctx.save_for_backward(x, y)
|
| 128 |
+
return swiglu_fwd(x, y)
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
def backward(ctx, dout):
|
| 132 |
+
x, y = ctx.saved_tensors
|
| 133 |
+
return swiglu_bwd(x, y, dout)
|
| 134 |
+
|
| 135 |
+
swiglu = SwiGLUFunction.apply
|
infer_4_30_0/lib/python3.10/site-packages/flash_attn/ops/layer_norm.py
ADDED
|
@@ -0,0 +1,800 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2022, Tri Dao.
|
| 2 |
+
# Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
|
| 3 |
+
|
| 4 |
+
import dropout_layer_norm
|
| 5 |
+
import torch
|
| 6 |
+
from torch.nn import init
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def maybe_align(x, alignment_in_bytes=16):
|
| 10 |
+
"""Assume that x already has last dim divisible by alignment_in_bytes"""
|
| 11 |
+
# TD [2023-07-04] I'm not 100% sure that clone will align the memory
|
| 12 |
+
# https://discuss.pytorch.org/t/how-to-ensure-that-tensor-data-ptr-is-aligned-to-16-bytes/183440
|
| 13 |
+
return x if x.data_ptr() % alignment_in_bytes == 0 else x.clone()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _dropout_add_layer_norm_forward(
|
| 17 |
+
x0,
|
| 18 |
+
residual,
|
| 19 |
+
gamma,
|
| 20 |
+
beta,
|
| 21 |
+
rowscale,
|
| 22 |
+
colscale,
|
| 23 |
+
dropout_p,
|
| 24 |
+
epsilon,
|
| 25 |
+
residual_in_fp32=False,
|
| 26 |
+
is_rms_norm=False,
|
| 27 |
+
):
|
| 28 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes"""
|
| 29 |
+
hidden_size = gamma.numel()
|
| 30 |
+
x0mat = x0.view((-1, hidden_size))
|
| 31 |
+
residualmat = residual.view((-1, hidden_size)) if residual is not None else None
|
| 32 |
+
rowscale = rowscale.view(-1) if rowscale is not None else None
|
| 33 |
+
zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
|
| 34 |
+
x0mat,
|
| 35 |
+
residualmat,
|
| 36 |
+
gamma,
|
| 37 |
+
beta,
|
| 38 |
+
rowscale,
|
| 39 |
+
colscale,
|
| 40 |
+
None,
|
| 41 |
+
None,
|
| 42 |
+
dropout_p,
|
| 43 |
+
epsilon,
|
| 44 |
+
1.0,
|
| 45 |
+
0,
|
| 46 |
+
None,
|
| 47 |
+
residual_in_fp32,
|
| 48 |
+
is_rms_norm,
|
| 49 |
+
)
|
| 50 |
+
# dmask is None if dropout_p == 0.0
|
| 51 |
+
# xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
|
| 52 |
+
return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _dropout_add_layer_norm_backward(
|
| 56 |
+
dz,
|
| 57 |
+
dx,
|
| 58 |
+
x,
|
| 59 |
+
x0,
|
| 60 |
+
dmask,
|
| 61 |
+
mu,
|
| 62 |
+
rsigma,
|
| 63 |
+
gamma,
|
| 64 |
+
rowscale,
|
| 65 |
+
colscale,
|
| 66 |
+
dropout_p,
|
| 67 |
+
has_residual,
|
| 68 |
+
is_rms_norm=False,
|
| 69 |
+
):
|
| 70 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes
|
| 71 |
+
dx == None means that it was a post-norm architecture
|
| 72 |
+
(x = drop(x0) + residual was not returned in the fwd).
|
| 73 |
+
x0 must not be None if we have colscale.
|
| 74 |
+
"""
|
| 75 |
+
hidden_size = gamma.numel()
|
| 76 |
+
xmat = x.view((-1, hidden_size))
|
| 77 |
+
dzmat = dz.view(xmat.shape)
|
| 78 |
+
dxmat = dx.view(xmat.shape) if dx is not None else None
|
| 79 |
+
x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
|
| 80 |
+
rowscale = rowscale.view(-1) if rowscale is not None else None
|
| 81 |
+
if colscale is not None:
|
| 82 |
+
assert x0 is not None, "x0 is required to compute the gradient of colscale"
|
| 83 |
+
dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
|
| 84 |
+
dzmat,
|
| 85 |
+
dxmat,
|
| 86 |
+
xmat,
|
| 87 |
+
x0mat,
|
| 88 |
+
dmask,
|
| 89 |
+
mu,
|
| 90 |
+
rsigma,
|
| 91 |
+
gamma,
|
| 92 |
+
rowscale,
|
| 93 |
+
colscale,
|
| 94 |
+
None,
|
| 95 |
+
None,
|
| 96 |
+
dropout_p,
|
| 97 |
+
1.0,
|
| 98 |
+
0,
|
| 99 |
+
has_residual,
|
| 100 |
+
is_rms_norm,
|
| 101 |
+
)
|
| 102 |
+
# dresidualmat is None if not has_residual
|
| 103 |
+
if colscale is None:
|
| 104 |
+
return dx0mat, dresidualmat, dgamma, dbeta
|
| 105 |
+
else:
|
| 106 |
+
dcolscale = rest[0]
|
| 107 |
+
return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _dropout_add_layer_norm_subset_forward(
|
| 111 |
+
x0,
|
| 112 |
+
residual,
|
| 113 |
+
gamma,
|
| 114 |
+
beta,
|
| 115 |
+
colscale,
|
| 116 |
+
x0_subset,
|
| 117 |
+
out_subset,
|
| 118 |
+
dropout_p,
|
| 119 |
+
epsilon,
|
| 120 |
+
rowscale_const,
|
| 121 |
+
out_numrows,
|
| 122 |
+
residual_in_fp32=False,
|
| 123 |
+
is_rms_norm=False,
|
| 124 |
+
):
|
| 125 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes"""
|
| 126 |
+
hidden_size = gamma.numel()
|
| 127 |
+
x0mat = x0.view((-1, hidden_size))
|
| 128 |
+
residualmat = residual.view((-1, hidden_size)) if residual is not None else None
|
| 129 |
+
x0_subset = x0_subset.view(-1) if x0_subset is not None else None
|
| 130 |
+
out_subset = out_subset.view(-1) if out_subset is not None else None
|
| 131 |
+
zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
|
| 132 |
+
x0mat,
|
| 133 |
+
residualmat,
|
| 134 |
+
gamma,
|
| 135 |
+
beta,
|
| 136 |
+
None,
|
| 137 |
+
colscale,
|
| 138 |
+
x0_subset,
|
| 139 |
+
out_subset,
|
| 140 |
+
dropout_p,
|
| 141 |
+
epsilon,
|
| 142 |
+
rowscale_const,
|
| 143 |
+
out_numrows,
|
| 144 |
+
None,
|
| 145 |
+
residual_in_fp32,
|
| 146 |
+
is_rms_norm,
|
| 147 |
+
)
|
| 148 |
+
# dmask is None if dropout_p == 0.0
|
| 149 |
+
# xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
|
| 150 |
+
return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _dropout_add_layer_norm_subset_backward(
|
| 154 |
+
dz,
|
| 155 |
+
dx,
|
| 156 |
+
x,
|
| 157 |
+
x0,
|
| 158 |
+
dmask,
|
| 159 |
+
mu,
|
| 160 |
+
rsigma,
|
| 161 |
+
gamma,
|
| 162 |
+
colscale,
|
| 163 |
+
x0_subset,
|
| 164 |
+
out_subset,
|
| 165 |
+
dropout_p,
|
| 166 |
+
rowscale_const,
|
| 167 |
+
x0_numrows,
|
| 168 |
+
has_residual,
|
| 169 |
+
is_rms_norm=False,
|
| 170 |
+
):
|
| 171 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes
|
| 172 |
+
dx == None means that it was a post-norm architecture
|
| 173 |
+
(x = drop(x0) + residual was not returned in the fwd).
|
| 174 |
+
x0 must not be None if we have colscale.
|
| 175 |
+
"""
|
| 176 |
+
hidden_size = gamma.numel()
|
| 177 |
+
xmat = x.view((-1, hidden_size))
|
| 178 |
+
dzmat = dz.view(-1, hidden_size)
|
| 179 |
+
dxmat = dx.view(xmat.shape) if dx is not None else None
|
| 180 |
+
x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
|
| 181 |
+
x0_subset = x0_subset.view(-1) if x0_subset is not None else None
|
| 182 |
+
out_subset = out_subset.view(-1) if out_subset is not None else None
|
| 183 |
+
if colscale is not None:
|
| 184 |
+
assert x0 is not None, "x0 is required to compute the gradient of colscale"
|
| 185 |
+
dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
|
| 186 |
+
dzmat,
|
| 187 |
+
dxmat,
|
| 188 |
+
xmat,
|
| 189 |
+
x0mat,
|
| 190 |
+
dmask,
|
| 191 |
+
mu,
|
| 192 |
+
rsigma,
|
| 193 |
+
gamma,
|
| 194 |
+
None,
|
| 195 |
+
colscale,
|
| 196 |
+
x0_subset,
|
| 197 |
+
out_subset,
|
| 198 |
+
dropout_p,
|
| 199 |
+
rowscale_const,
|
| 200 |
+
x0_numrows,
|
| 201 |
+
has_residual,
|
| 202 |
+
is_rms_norm,
|
| 203 |
+
)
|
| 204 |
+
# dresidualmat is None if not has_residual
|
| 205 |
+
if colscale is None:
|
| 206 |
+
return dx0mat, dresidualmat, dgamma, dbeta
|
| 207 |
+
else:
|
| 208 |
+
dcolscale = rest[0]
|
| 209 |
+
return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def _dropout_add_layer_norm_parallel_residual_forward(
|
| 213 |
+
x0,
|
| 214 |
+
x1,
|
| 215 |
+
residual,
|
| 216 |
+
gamma0,
|
| 217 |
+
beta0,
|
| 218 |
+
gamma1,
|
| 219 |
+
beta1,
|
| 220 |
+
dropout_p,
|
| 221 |
+
epsilon,
|
| 222 |
+
residual_in_fp32=False,
|
| 223 |
+
is_rms_norm=False,
|
| 224 |
+
):
|
| 225 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes"""
|
| 226 |
+
hidden_size = gamma0.numel()
|
| 227 |
+
x0mat = x0.view((-1, hidden_size))
|
| 228 |
+
x1mat = x1.view((-1, hidden_size)) if x1 is not None else None
|
| 229 |
+
residualmat = residual.view((-1, hidden_size)) if residual is not None else None
|
| 230 |
+
(
|
| 231 |
+
z0mat,
|
| 232 |
+
z1mat,
|
| 233 |
+
xmat,
|
| 234 |
+
dmask0,
|
| 235 |
+
dmask1,
|
| 236 |
+
mu,
|
| 237 |
+
rsigma,
|
| 238 |
+
) = dropout_layer_norm.dropout_add_ln_parallel_residual_fwd(
|
| 239 |
+
x0mat,
|
| 240 |
+
x1mat,
|
| 241 |
+
residualmat,
|
| 242 |
+
gamma0,
|
| 243 |
+
beta0,
|
| 244 |
+
gamma1,
|
| 245 |
+
beta1,
|
| 246 |
+
dropout_p,
|
| 247 |
+
epsilon,
|
| 248 |
+
None,
|
| 249 |
+
residual_in_fp32,
|
| 250 |
+
is_rms_norm,
|
| 251 |
+
)
|
| 252 |
+
# dmask0 and dmask1 are None if dropout_p == 0.0
|
| 253 |
+
# xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
|
| 254 |
+
return z0mat, z1mat, xmat if xmat is not None else x0mat, dmask0, dmask1, mu, rsigma
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _dropout_add_layer_norm_parallel_residual_backward(
|
| 258 |
+
dz0,
|
| 259 |
+
dz1,
|
| 260 |
+
dx,
|
| 261 |
+
x,
|
| 262 |
+
dmask0,
|
| 263 |
+
dmask1,
|
| 264 |
+
mu,
|
| 265 |
+
rsigma,
|
| 266 |
+
gamma0,
|
| 267 |
+
gamma1,
|
| 268 |
+
dropout_p,
|
| 269 |
+
has_x1,
|
| 270 |
+
has_residual,
|
| 271 |
+
is_rms_norm=False,
|
| 272 |
+
):
|
| 273 |
+
"""Assume that arguments are contiguous and aligned to 16 bytes
|
| 274 |
+
dx == None means that it was a post-norm architecture
|
| 275 |
+
(x = drop(x0) + residual was not returned in the fwd).
|
| 276 |
+
"""
|
| 277 |
+
hidden_size = gamma0.numel()
|
| 278 |
+
xmat = x.view((-1, hidden_size))
|
| 279 |
+
dz0mat = dz0.view(xmat.shape)
|
| 280 |
+
dz1mat = dz1.view(xmat.shape) if dz1 is not None else None
|
| 281 |
+
dxmat = dx.view(xmat.shape) if dx is not None else None
|
| 282 |
+
(
|
| 283 |
+
dx0mat,
|
| 284 |
+
dx1mat,
|
| 285 |
+
dresidualmat,
|
| 286 |
+
dgamma0,
|
| 287 |
+
dbeta0,
|
| 288 |
+
dgamma1,
|
| 289 |
+
dbeta1,
|
| 290 |
+
*rest,
|
| 291 |
+
) = dropout_layer_norm.dropout_add_ln_parallel_residual_bwd(
|
| 292 |
+
dz0mat,
|
| 293 |
+
dz1mat,
|
| 294 |
+
dxmat,
|
| 295 |
+
xmat,
|
| 296 |
+
dmask0,
|
| 297 |
+
dmask1,
|
| 298 |
+
mu,
|
| 299 |
+
rsigma,
|
| 300 |
+
gamma0,
|
| 301 |
+
gamma1,
|
| 302 |
+
dropout_p,
|
| 303 |
+
has_x1,
|
| 304 |
+
has_residual,
|
| 305 |
+
is_rms_norm,
|
| 306 |
+
)
|
| 307 |
+
# dresidualmat is None if not has_residual
|
| 308 |
+
return dx0mat, dx1mat, dresidualmat, dgamma0, dbeta0, dgamma1, dbeta1
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
class DropoutAddLayerNormFn(torch.autograd.Function):
|
| 312 |
+
@staticmethod
|
| 313 |
+
def forward(
|
| 314 |
+
ctx,
|
| 315 |
+
x0,
|
| 316 |
+
residual,
|
| 317 |
+
gamma,
|
| 318 |
+
beta,
|
| 319 |
+
rowscale,
|
| 320 |
+
colscale,
|
| 321 |
+
dropout_p,
|
| 322 |
+
epsilon,
|
| 323 |
+
residual_in_fp32=False,
|
| 324 |
+
prenorm=False,
|
| 325 |
+
is_rms_norm=False,
|
| 326 |
+
return_dmask=False,
|
| 327 |
+
):
|
| 328 |
+
x0 = maybe_align(x0.contiguous(), 16)
|
| 329 |
+
residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
|
| 330 |
+
gamma = maybe_align(gamma.contiguous(), 16)
|
| 331 |
+
beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
|
| 332 |
+
rowscale = maybe_align(rowscale.contiguous(), 16) if rowscale is not None else None
|
| 333 |
+
colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
|
| 334 |
+
zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_forward(
|
| 335 |
+
x0,
|
| 336 |
+
residual,
|
| 337 |
+
gamma,
|
| 338 |
+
beta,
|
| 339 |
+
rowscale,
|
| 340 |
+
colscale,
|
| 341 |
+
dropout_p,
|
| 342 |
+
epsilon,
|
| 343 |
+
residual_in_fp32,
|
| 344 |
+
is_rms_norm,
|
| 345 |
+
)
|
| 346 |
+
# Only need to save x0 if we need to compute gradient wrt colscale
|
| 347 |
+
x0_saved = x0 if colscale is not None else None
|
| 348 |
+
ctx.save_for_backward(
|
| 349 |
+
xmat.view(x0.shape), x0_saved, dmask, gamma, mu, rsigma, rowscale, colscale
|
| 350 |
+
)
|
| 351 |
+
ctx.prenorm = prenorm
|
| 352 |
+
ctx.dropout_p = dropout_p
|
| 353 |
+
ctx.has_residual = residual is not None
|
| 354 |
+
ctx.is_rms_norm = is_rms_norm
|
| 355 |
+
ctx.has_beta = beta is not None
|
| 356 |
+
if not return_dmask:
|
| 357 |
+
return (
|
| 358 |
+
zmat.view(x0.shape) if not prenorm else (zmat.view(x0.shape), xmat.view(x0.shape))
|
| 359 |
+
)
|
| 360 |
+
else:
|
| 361 |
+
dmask = (
|
| 362 |
+
dmask.view(x0.shape)
|
| 363 |
+
if dropout_p > 0.0
|
| 364 |
+
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
|
| 365 |
+
)
|
| 366 |
+
ctx.mark_non_differentiable(dmask)
|
| 367 |
+
return (
|
| 368 |
+
(zmat.view(x0.shape), dmask)
|
| 369 |
+
if not prenorm
|
| 370 |
+
else (zmat.view(x0.shape), xmat.view(x0.shape), dmask)
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
@staticmethod
|
| 374 |
+
def backward(ctx, dz, *args):
|
| 375 |
+
# assert dz.is_contiguous()
|
| 376 |
+
dz = maybe_align(dz.contiguous(), 16) # this happens!
|
| 377 |
+
dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
|
| 378 |
+
x, x0, dmask, gamma, mu, rsigma, rowscale, colscale = ctx.saved_tensors
|
| 379 |
+
# x0 is None if colscale is None
|
| 380 |
+
dropout_p = ctx.dropout_p
|
| 381 |
+
has_residual = ctx.has_residual
|
| 382 |
+
dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_backward(
|
| 383 |
+
dz,
|
| 384 |
+
dx,
|
| 385 |
+
x,
|
| 386 |
+
x0,
|
| 387 |
+
dmask,
|
| 388 |
+
mu,
|
| 389 |
+
rsigma,
|
| 390 |
+
gamma,
|
| 391 |
+
rowscale,
|
| 392 |
+
colscale,
|
| 393 |
+
dropout_p,
|
| 394 |
+
has_residual,
|
| 395 |
+
ctx.is_rms_norm,
|
| 396 |
+
)
|
| 397 |
+
dx0 = dx0mat.view(x.shape)
|
| 398 |
+
dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
|
| 399 |
+
dcolscale = rest[0] if colscale is not None else None
|
| 400 |
+
return (
|
| 401 |
+
dx0,
|
| 402 |
+
dresidual,
|
| 403 |
+
dgamma,
|
| 404 |
+
dbeta if ctx.has_beta else None,
|
| 405 |
+
None,
|
| 406 |
+
dcolscale,
|
| 407 |
+
None,
|
| 408 |
+
None,
|
| 409 |
+
None,
|
| 410 |
+
None,
|
| 411 |
+
None,
|
| 412 |
+
None,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class DropoutAddLayerNormSubsetFn(torch.autograd.Function):
|
| 417 |
+
@staticmethod
|
| 418 |
+
def forward(
|
| 419 |
+
ctx,
|
| 420 |
+
x0,
|
| 421 |
+
residual,
|
| 422 |
+
gamma,
|
| 423 |
+
beta,
|
| 424 |
+
colscale,
|
| 425 |
+
x0_subset,
|
| 426 |
+
out_subset,
|
| 427 |
+
dropout_p,
|
| 428 |
+
epsilon,
|
| 429 |
+
rowscale_const,
|
| 430 |
+
out_numrows,
|
| 431 |
+
residual_in_fp32=False,
|
| 432 |
+
prenorm=False,
|
| 433 |
+
is_rms_norm=False,
|
| 434 |
+
return_dmask=False,
|
| 435 |
+
):
|
| 436 |
+
x0 = maybe_align(x0.contiguous(), 16)
|
| 437 |
+
residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
|
| 438 |
+
gamma = maybe_align(gamma.contiguous(), 16)
|
| 439 |
+
beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
|
| 440 |
+
colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
|
| 441 |
+
zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_subset_forward(
|
| 442 |
+
x0,
|
| 443 |
+
residual,
|
| 444 |
+
gamma,
|
| 445 |
+
beta,
|
| 446 |
+
colscale,
|
| 447 |
+
x0_subset,
|
| 448 |
+
out_subset,
|
| 449 |
+
dropout_p,
|
| 450 |
+
epsilon,
|
| 451 |
+
rowscale_const,
|
| 452 |
+
out_numrows,
|
| 453 |
+
residual_in_fp32,
|
| 454 |
+
is_rms_norm,
|
| 455 |
+
)
|
| 456 |
+
# Only need to save x0 if we need to compute gradient wrt colscale
|
| 457 |
+
x0_saved = x0 if colscale is not None else None
|
| 458 |
+
x_shape = (-1, *x0.shape[1:])
|
| 459 |
+
ctx.save_for_backward(
|
| 460 |
+
xmat.view(x_shape), x0_saved, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset
|
| 461 |
+
)
|
| 462 |
+
ctx.prenorm = prenorm
|
| 463 |
+
ctx.dropout_p = dropout_p
|
| 464 |
+
ctx.rowscale_const = rowscale_const
|
| 465 |
+
ctx.x0_numrows = x0.shape[:-1].numel()
|
| 466 |
+
ctx.has_residual = residual is not None
|
| 467 |
+
ctx.is_rms_norm = is_rms_norm
|
| 468 |
+
ctx.has_beta = beta is not None
|
| 469 |
+
z_shape = (-1, *x0.shape[1:])
|
| 470 |
+
if not return_dmask:
|
| 471 |
+
return zmat.view(z_shape) if not prenorm else (zmat.view(z_shape), xmat.view(x0.shape))
|
| 472 |
+
else:
|
| 473 |
+
z = zmat.view(z_shape)
|
| 474 |
+
dmask = (
|
| 475 |
+
dmask.view(x0.shape)
|
| 476 |
+
if dropout_p > 0.0
|
| 477 |
+
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
|
| 478 |
+
)
|
| 479 |
+
ctx.mark_non_differentiable(dmask)
|
| 480 |
+
return (z, dmask) if not prenorm else (z, xmat.view(x_shape), dmask)
|
| 481 |
+
|
| 482 |
+
@staticmethod
|
| 483 |
+
def backward(ctx, dz, *args):
|
| 484 |
+
# assert dz.is_contiguous()
|
| 485 |
+
dz = maybe_align(dz.contiguous(), 16) # this happens!
|
| 486 |
+
dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
|
| 487 |
+
x, x0, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset = ctx.saved_tensors
|
| 488 |
+
# x0 is None if colscale is None
|
| 489 |
+
dropout_p = ctx.dropout_p
|
| 490 |
+
has_residual = ctx.has_residual
|
| 491 |
+
dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_subset_backward(
|
| 492 |
+
dz,
|
| 493 |
+
dx,
|
| 494 |
+
x,
|
| 495 |
+
x0,
|
| 496 |
+
dmask,
|
| 497 |
+
mu,
|
| 498 |
+
rsigma,
|
| 499 |
+
gamma,
|
| 500 |
+
colscale,
|
| 501 |
+
x0_subset,
|
| 502 |
+
out_subset,
|
| 503 |
+
dropout_p,
|
| 504 |
+
ctx.rowscale_const,
|
| 505 |
+
ctx.x0_numrows,
|
| 506 |
+
has_residual,
|
| 507 |
+
ctx.is_rms_norm,
|
| 508 |
+
)
|
| 509 |
+
dx0 = dx0mat.view(-1, *x.shape[1:])
|
| 510 |
+
dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
|
| 511 |
+
dcolscale = rest[0] if colscale is not None else None
|
| 512 |
+
return (
|
| 513 |
+
dx0,
|
| 514 |
+
dresidual,
|
| 515 |
+
dgamma,
|
| 516 |
+
dbeta if ctx.has_beta else None,
|
| 517 |
+
dcolscale,
|
| 518 |
+
None,
|
| 519 |
+
None,
|
| 520 |
+
None,
|
| 521 |
+
None,
|
| 522 |
+
None,
|
| 523 |
+
None,
|
| 524 |
+
None,
|
| 525 |
+
None,
|
| 526 |
+
None,
|
| 527 |
+
None,
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
class DropoutAddLayerNormParallelResidualFn(torch.autograd.Function):
|
| 532 |
+
@staticmethod
|
| 533 |
+
def forward(
|
| 534 |
+
ctx,
|
| 535 |
+
x0,
|
| 536 |
+
x1,
|
| 537 |
+
residual,
|
| 538 |
+
gamma0,
|
| 539 |
+
beta0,
|
| 540 |
+
gamma1,
|
| 541 |
+
beta1,
|
| 542 |
+
dropout_p,
|
| 543 |
+
epsilon,
|
| 544 |
+
residual_in_fp32=False,
|
| 545 |
+
prenorm=False,
|
| 546 |
+
is_rms_norm=False,
|
| 547 |
+
return_dmask=False,
|
| 548 |
+
):
|
| 549 |
+
x0 = maybe_align(x0.contiguous(), 16)
|
| 550 |
+
x1 = maybe_align(x1.contiguous(), 16) if x1 is not None else None
|
| 551 |
+
residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
|
| 552 |
+
gamma0 = maybe_align(gamma0.contiguous(), 16)
|
| 553 |
+
beta0 = maybe_align(beta0.contiguous(), 16) if beta0 is not None else None
|
| 554 |
+
gamma1 = maybe_align(gamma1.contiguous(), 16) if gamma1 is not None else None
|
| 555 |
+
beta1 = maybe_align(beta1.contiguous(), 16) if beta1 is not None else None
|
| 556 |
+
(
|
| 557 |
+
z0mat,
|
| 558 |
+
z1mat,
|
| 559 |
+
xmat,
|
| 560 |
+
dmask0,
|
| 561 |
+
dmask1,
|
| 562 |
+
mu,
|
| 563 |
+
rsigma,
|
| 564 |
+
) = _dropout_add_layer_norm_parallel_residual_forward(
|
| 565 |
+
x0,
|
| 566 |
+
x1,
|
| 567 |
+
residual,
|
| 568 |
+
gamma0,
|
| 569 |
+
beta0,
|
| 570 |
+
gamma1,
|
| 571 |
+
beta1,
|
| 572 |
+
dropout_p,
|
| 573 |
+
epsilon,
|
| 574 |
+
residual_in_fp32,
|
| 575 |
+
is_rms_norm,
|
| 576 |
+
)
|
| 577 |
+
ctx.save_for_backward(xmat.view(x0.shape), dmask0, dmask1, gamma0, gamma1, mu, rsigma)
|
| 578 |
+
ctx.prenorm = prenorm
|
| 579 |
+
ctx.dropout_p = dropout_p
|
| 580 |
+
ctx.has_x1 = x1 is not None
|
| 581 |
+
ctx.has_residual = residual is not None
|
| 582 |
+
ctx.is_rms_norm = is_rms_norm
|
| 583 |
+
ctx.has_beta = beta0 is not None
|
| 584 |
+
z = (z0mat.view(x0.shape), z1mat.view(x0.shape) if z1mat is not None else None)
|
| 585 |
+
if not return_dmask:
|
| 586 |
+
return z if not prenorm else (*z, xmat.view(x0.shape))
|
| 587 |
+
else:
|
| 588 |
+
dmask0 = (
|
| 589 |
+
dmask0.view(x0.shape)
|
| 590 |
+
if dropout_p > 0.0
|
| 591 |
+
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
|
| 592 |
+
)
|
| 593 |
+
dmask1 = (
|
| 594 |
+
dmask1.view(x0.shape)
|
| 595 |
+
if dropout_p > 0.0 and x1 is not None
|
| 596 |
+
else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
|
| 597 |
+
)
|
| 598 |
+
ctx.mark_non_differentiable(dmask0)
|
| 599 |
+
ctx.mark_non_differentiable(dmask1)
|
| 600 |
+
return (
|
| 601 |
+
(*z, dmask0, dmask1) if not prenorm else (*z, xmat.view(x0.shape), dmask0, dmask1)
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
@staticmethod
|
| 605 |
+
def backward(ctx, dz0, dz1, *args):
|
| 606 |
+
dz0 = maybe_align(dz0.contiguous(), 16) # this happens!
|
| 607 |
+
dz1 = maybe_align(dz1.contiguous(), 16) if dz1 is not None else None
|
| 608 |
+
dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
|
| 609 |
+
x, dmask0, dmask1, gamma0, gamma1, mu, rsigma = ctx.saved_tensors
|
| 610 |
+
dropout_p = ctx.dropout_p
|
| 611 |
+
has_x1 = ctx.has_x1
|
| 612 |
+
has_residual = ctx.has_residual
|
| 613 |
+
(
|
| 614 |
+
dx0mat,
|
| 615 |
+
dx1mat,
|
| 616 |
+
dresidualmat,
|
| 617 |
+
dgamma0,
|
| 618 |
+
dbeta0,
|
| 619 |
+
dgamma1,
|
| 620 |
+
dbeta1,
|
| 621 |
+
) = _dropout_add_layer_norm_parallel_residual_backward(
|
| 622 |
+
dz0,
|
| 623 |
+
dz1,
|
| 624 |
+
dx,
|
| 625 |
+
x,
|
| 626 |
+
dmask0,
|
| 627 |
+
dmask1,
|
| 628 |
+
mu,
|
| 629 |
+
rsigma,
|
| 630 |
+
gamma0,
|
| 631 |
+
gamma1,
|
| 632 |
+
dropout_p,
|
| 633 |
+
has_x1,
|
| 634 |
+
has_residual,
|
| 635 |
+
ctx.is_rms_norm,
|
| 636 |
+
)
|
| 637 |
+
dx0 = dx0mat.view(x.shape)
|
| 638 |
+
dx1 = dx1mat.view(x.shape) if dx1mat is not None else None
|
| 639 |
+
dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
|
| 640 |
+
return (
|
| 641 |
+
dx0,
|
| 642 |
+
dx1,
|
| 643 |
+
dresidual,
|
| 644 |
+
dgamma0,
|
| 645 |
+
dbeta0 if ctx.has_beta else None,
|
| 646 |
+
dgamma1,
|
| 647 |
+
dbeta1 if ctx.has_beta else None,
|
| 648 |
+
None,
|
| 649 |
+
None,
|
| 650 |
+
None,
|
| 651 |
+
None,
|
| 652 |
+
None,
|
| 653 |
+
None,
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def layer_norm(x, weight, bias, epsilon):
|
| 658 |
+
return DropoutAddLayerNormFn.apply(x, None, weight, bias, None, None, 0.0, epsilon, False)
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def dropout_add_layer_norm(
|
| 662 |
+
x0,
|
| 663 |
+
residual,
|
| 664 |
+
weight,
|
| 665 |
+
bias,
|
| 666 |
+
dropout_p,
|
| 667 |
+
epsilon,
|
| 668 |
+
rowscale=None,
|
| 669 |
+
layerscale=None,
|
| 670 |
+
prenorm=False,
|
| 671 |
+
residual_in_fp32=False,
|
| 672 |
+
return_dropout_mask=False,
|
| 673 |
+
):
|
| 674 |
+
"""residual_in_fp32 only has an effect if residual is None.
|
| 675 |
+
Otherwise residual dtype is residual.dtype.
|
| 676 |
+
"""
|
| 677 |
+
return DropoutAddLayerNormFn.apply(
|
| 678 |
+
x0,
|
| 679 |
+
residual,
|
| 680 |
+
weight,
|
| 681 |
+
bias,
|
| 682 |
+
rowscale,
|
| 683 |
+
layerscale,
|
| 684 |
+
dropout_p,
|
| 685 |
+
epsilon,
|
| 686 |
+
residual_in_fp32,
|
| 687 |
+
prenorm,
|
| 688 |
+
False,
|
| 689 |
+
return_dropout_mask,
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
def dropout_add_layer_norm_subset(
|
| 694 |
+
x0,
|
| 695 |
+
residual,
|
| 696 |
+
weight,
|
| 697 |
+
bias,
|
| 698 |
+
dropout_p,
|
| 699 |
+
epsilon,
|
| 700 |
+
layerscale=None,
|
| 701 |
+
x0_subset=None,
|
| 702 |
+
out_subset=None,
|
| 703 |
+
rowscale_const=1.0,
|
| 704 |
+
out_numrows=0,
|
| 705 |
+
prenorm=False,
|
| 706 |
+
residual_in_fp32=False,
|
| 707 |
+
return_dropout_mask=False,
|
| 708 |
+
):
|
| 709 |
+
"""residual_in_fp32 only has an effect if residual is None.
|
| 710 |
+
Otherwise residual dtype is residual.dtype.
|
| 711 |
+
"""
|
| 712 |
+
return DropoutAddLayerNormSubsetFn.apply(
|
| 713 |
+
x0,
|
| 714 |
+
residual,
|
| 715 |
+
weight,
|
| 716 |
+
bias,
|
| 717 |
+
layerscale,
|
| 718 |
+
x0_subset,
|
| 719 |
+
out_subset,
|
| 720 |
+
dropout_p,
|
| 721 |
+
epsilon,
|
| 722 |
+
rowscale_const,
|
| 723 |
+
out_numrows,
|
| 724 |
+
residual_in_fp32,
|
| 725 |
+
prenorm,
|
| 726 |
+
False,
|
| 727 |
+
return_dropout_mask,
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
def dropout_add_layer_norm_parallel_residual(
|
| 732 |
+
x0,
|
| 733 |
+
x1,
|
| 734 |
+
residual,
|
| 735 |
+
weight0,
|
| 736 |
+
bias0,
|
| 737 |
+
weight1,
|
| 738 |
+
bias1,
|
| 739 |
+
dropout_p,
|
| 740 |
+
epsilon,
|
| 741 |
+
prenorm=False,
|
| 742 |
+
residual_in_fp32=False,
|
| 743 |
+
return_dropout_mask=False,
|
| 744 |
+
):
|
| 745 |
+
"""residual_in_fp32 only has an effect if residual is None.
|
| 746 |
+
Otherwise residual dtype is residual.dtype.
|
| 747 |
+
"""
|
| 748 |
+
return DropoutAddLayerNormParallelResidualFn.apply(
|
| 749 |
+
x0,
|
| 750 |
+
x1,
|
| 751 |
+
residual,
|
| 752 |
+
weight0,
|
| 753 |
+
bias0,
|
| 754 |
+
weight1,
|
| 755 |
+
bias1,
|
| 756 |
+
dropout_p,
|
| 757 |
+
epsilon,
|
| 758 |
+
residual_in_fp32,
|
| 759 |
+
prenorm,
|
| 760 |
+
False,
|
| 761 |
+
return_dropout_mask,
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
class DropoutAddLayerNorm(torch.nn.Module):
|
| 766 |
+
def __init__(
|
| 767 |
+
self,
|
| 768 |
+
hidden_size,
|
| 769 |
+
prenorm=False,
|
| 770 |
+
p=0.0,
|
| 771 |
+
eps=1e-5,
|
| 772 |
+
residual_in_fp32=False,
|
| 773 |
+
device=None,
|
| 774 |
+
dtype=None,
|
| 775 |
+
):
|
| 776 |
+
factory_kwargs = {"device": device, "dtype": dtype}
|
| 777 |
+
super().__init__()
|
| 778 |
+
self.prenorm = prenorm
|
| 779 |
+
self.p = p
|
| 780 |
+
self.eps = eps
|
| 781 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 782 |
+
self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
|
| 783 |
+
self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
|
| 784 |
+
self.reset_parameters()
|
| 785 |
+
|
| 786 |
+
def reset_parameters(self):
|
| 787 |
+
init.ones_(self.weight)
|
| 788 |
+
init.zeros_(self.bias)
|
| 789 |
+
|
| 790 |
+
def forward(self, x0, residual=None):
|
| 791 |
+
return dropout_add_layer_norm(
|
| 792 |
+
x0,
|
| 793 |
+
residual,
|
| 794 |
+
self.weight,
|
| 795 |
+
self.bias,
|
| 796 |
+
self.p if self.training else 0.0,
|
| 797 |
+
self.eps,
|
| 798 |
+
prenorm=self.prenorm,
|
| 799 |
+
residual_in_fp32=self.residual_in_fp32,
|
| 800 |
+
)
|
infer_4_30_0/lib/python3.10/site-packages/gguf-0.10.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2023 Georgi Gerganov
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
infer_4_30_0/lib/python3.10/site-packages/gguf-0.10.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: gguf
|
| 3 |
+
Version: 0.10.0
|
| 4 |
+
Summary: Read and write ML models in GGUF for GGML
|
| 5 |
+
Home-page: https://ggml.ai
|
| 6 |
+
Keywords: ggml,gguf,llama.cpp
|
| 7 |
+
Author: GGML
|
| 8 |
+
Author-email: [email protected]
|
| 9 |
+
Requires-Python: >=3.8
|
| 10 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 11 |
+
Classifier: Operating System :: OS Independent
|
| 12 |
+
Classifier: Programming Language :: Python :: 3
|
| 13 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 14 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 15 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 16 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 18 |
+
Requires-Dist: numpy (>=1.17)
|
| 19 |
+
Requires-Dist: pyyaml (>=5.1)
|
| 20 |
+
Requires-Dist: tqdm (>=4.27)
|
| 21 |
+
Project-URL: Repository, https://github.com/ggerganov/llama.cpp
|
| 22 |
+
Description-Content-Type: text/markdown
|
| 23 |
+
|
| 24 |
+
## gguf
|
| 25 |
+
|
| 26 |
+
This is a Python package for writing binary files in the [GGUF](https://github.com/ggerganov/ggml/pull/302)
|
| 27 |
+
(GGML Universal File) format.
|
| 28 |
+
|
| 29 |
+
See [convert_hf_to_gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py)
|
| 30 |
+
as an example for its usage.
|
| 31 |
+
|
| 32 |
+
## Installation
|
| 33 |
+
```sh
|
| 34 |
+
pip install gguf
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## API Examples/Simple Tools
|
| 38 |
+
|
| 39 |
+
[examples/writer.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/writer.py) — Generates `example.gguf` in the current directory to demonstrate generating a GGUF file. Note that this file cannot be used as a model.
|
| 40 |
+
|
| 41 |
+
[scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console.
|
| 42 |
+
|
| 43 |
+
[scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key.
|
| 44 |
+
|
| 45 |
+
[scripts/gguf_convert_endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_convert_endian.py) — Allows converting the endianness of GGUF files.
|
| 46 |
+
|
| 47 |
+
[scripts/gguf_new_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_new_metadata.py) — Copies a GGUF file with added/modified/removed metadata values.
|
| 48 |
+
|
| 49 |
+
## Development
|
| 50 |
+
Maintainers who participate in development of this package are advised to install it in editable mode:
|
| 51 |
+
|
| 52 |
+
```sh
|
| 53 |
+
cd /path/to/llama.cpp/gguf-py
|
| 54 |
+
|
| 55 |
+
pip install --editable .
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
**Note**: This may require to upgrade your Pip installation, with a message saying that editable installation currently requires `setup.py`.
|
| 59 |
+
In this case, upgrade Pip to the latest:
|
| 60 |
+
|
| 61 |
+
```sh
|
| 62 |
+
pip install --upgrade pip
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
## Automatic publishing with CI
|
| 66 |
+
|
| 67 |
+
There's a GitHub workflow to make a release automatically upon creation of tags in a specified format.
|
| 68 |
+
|
| 69 |
+
1. Bump the version in `pyproject.toml`.
|
| 70 |
+
2. Create a tag named `gguf-vx.x.x` where `x.x.x` is the semantic version number.
|
| 71 |
+
|
| 72 |
+
```sh
|
| 73 |
+
git tag -a gguf-v1.0.0 -m "Version 1.0 release"
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
3. Push the tags.
|
| 77 |
+
|
| 78 |
+
```sh
|
| 79 |
+
git push origin --tags
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
## Manual publishing
|
| 83 |
+
If you want to publish the package manually for any reason, you need to have `twine` and `build` installed:
|
| 84 |
+
|
| 85 |
+
```sh
|
| 86 |
+
pip install build twine
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
Then, follow these steps to release a new version:
|
| 90 |
+
|
| 91 |
+
1. Bump the version in `pyproject.toml`.
|
| 92 |
+
2. Build the package:
|
| 93 |
+
|
| 94 |
+
```sh
|
| 95 |
+
python -m build
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
3. Upload the generated distribution archives:
|
| 99 |
+
|
| 100 |
+
```sh
|
| 101 |
+
python -m twine upload dist/*
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
## Run Unit Tests
|
| 105 |
+
|
| 106 |
+
From root of this repository you can run this command to run all the unit tests
|
| 107 |
+
|
| 108 |
+
```bash
|
| 109 |
+
python -m unittest discover ./gguf-py -v
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
## TODO
|
| 113 |
+
- [ ] Include conversion scripts as command line entry points in this package.
|
| 114 |
+
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/__init__.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
lazy_loader
|
| 3 |
+
===========
|
| 4 |
+
|
| 5 |
+
Makes it easy to load subpackages and functions on demand.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import ast
|
| 9 |
+
import importlib
|
| 10 |
+
import importlib.util
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
import threading
|
| 14 |
+
import types
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
__version__ = "0.4"
|
| 18 |
+
__all__ = ["attach", "load", "attach_stub"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
threadlock = threading.Lock()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def attach(package_name, submodules=None, submod_attrs=None):
|
| 25 |
+
"""Attach lazily loaded submodules, functions, or other attributes.
|
| 26 |
+
|
| 27 |
+
Typically, modules import submodules and attributes as follows::
|
| 28 |
+
|
| 29 |
+
import mysubmodule
|
| 30 |
+
import anothersubmodule
|
| 31 |
+
|
| 32 |
+
from .foo import someattr
|
| 33 |
+
|
| 34 |
+
The idea is to replace a package's `__getattr__`, `__dir__`, and
|
| 35 |
+
`__all__`, such that all imports work exactly the way they would
|
| 36 |
+
with normal imports, except that the import occurs upon first use.
|
| 37 |
+
|
| 38 |
+
The typical way to call this function, replacing the above imports, is::
|
| 39 |
+
|
| 40 |
+
__getattr__, __dir__, __all__ = lazy.attach(
|
| 41 |
+
__name__,
|
| 42 |
+
['mysubmodule', 'anothersubmodule'],
|
| 43 |
+
{'foo': ['someattr']}
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
This functionality requires Python 3.7 or higher.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
package_name : str
|
| 51 |
+
Typically use ``__name__``.
|
| 52 |
+
submodules : set
|
| 53 |
+
List of submodules to attach.
|
| 54 |
+
submod_attrs : dict
|
| 55 |
+
Dictionary of submodule -> list of attributes / functions.
|
| 56 |
+
These attributes are imported as they are used.
|
| 57 |
+
|
| 58 |
+
Returns
|
| 59 |
+
-------
|
| 60 |
+
__getattr__, __dir__, __all__
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
if submod_attrs is None:
|
| 64 |
+
submod_attrs = {}
|
| 65 |
+
|
| 66 |
+
if submodules is None:
|
| 67 |
+
submodules = set()
|
| 68 |
+
else:
|
| 69 |
+
submodules = set(submodules)
|
| 70 |
+
|
| 71 |
+
attr_to_modules = {
|
| 72 |
+
attr: mod for mod, attrs in submod_attrs.items() for attr in attrs
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
__all__ = sorted(submodules | attr_to_modules.keys())
|
| 76 |
+
|
| 77 |
+
def __getattr__(name):
|
| 78 |
+
if name in submodules:
|
| 79 |
+
return importlib.import_module(f"{package_name}.{name}")
|
| 80 |
+
elif name in attr_to_modules:
|
| 81 |
+
submod_path = f"{package_name}.{attr_to_modules[name]}"
|
| 82 |
+
submod = importlib.import_module(submod_path)
|
| 83 |
+
attr = getattr(submod, name)
|
| 84 |
+
|
| 85 |
+
# If the attribute lives in a file (module) with the same
|
| 86 |
+
# name as the attribute, ensure that the attribute and *not*
|
| 87 |
+
# the module is accessible on the package.
|
| 88 |
+
if name == attr_to_modules[name]:
|
| 89 |
+
pkg = sys.modules[package_name]
|
| 90 |
+
pkg.__dict__[name] = attr
|
| 91 |
+
|
| 92 |
+
return attr
|
| 93 |
+
else:
|
| 94 |
+
raise AttributeError(f"No {package_name} attribute {name}")
|
| 95 |
+
|
| 96 |
+
def __dir__():
|
| 97 |
+
return __all__
|
| 98 |
+
|
| 99 |
+
if os.environ.get("EAGER_IMPORT", ""):
|
| 100 |
+
for attr in set(attr_to_modules.keys()) | submodules:
|
| 101 |
+
__getattr__(attr)
|
| 102 |
+
|
| 103 |
+
return __getattr__, __dir__, list(__all__)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class DelayedImportErrorModule(types.ModuleType):
|
| 107 |
+
def __init__(self, frame_data, *args, message, **kwargs):
|
| 108 |
+
self.__frame_data = frame_data
|
| 109 |
+
self.__message = message
|
| 110 |
+
super().__init__(*args, **kwargs)
|
| 111 |
+
|
| 112 |
+
def __getattr__(self, x):
|
| 113 |
+
if x in ("__class__", "__file__", "__frame_data", "__message"):
|
| 114 |
+
super().__getattr__(x)
|
| 115 |
+
else:
|
| 116 |
+
fd = self.__frame_data
|
| 117 |
+
raise ModuleNotFoundError(
|
| 118 |
+
f"{self.__message}\n\n"
|
| 119 |
+
"This error is lazily reported, having originally occured in\n"
|
| 120 |
+
f' File {fd["filename"]}, line {fd["lineno"]}, in {fd["function"]}\n\n'
|
| 121 |
+
f'----> {"".join(fd["code_context"] or "").strip()}'
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def load(fullname, *, require=None, error_on_import=False):
|
| 126 |
+
"""Return a lazily imported proxy for a module.
|
| 127 |
+
|
| 128 |
+
We often see the following pattern::
|
| 129 |
+
|
| 130 |
+
def myfunc():
|
| 131 |
+
import numpy as np
|
| 132 |
+
np.norm(...)
|
| 133 |
+
....
|
| 134 |
+
|
| 135 |
+
Putting the import inside the function prevents, in this case,
|
| 136 |
+
`numpy`, from being imported at function definition time.
|
| 137 |
+
That saves time if `myfunc` ends up not being called.
|
| 138 |
+
|
| 139 |
+
This `load` function returns a proxy module that, upon access, imports
|
| 140 |
+
the actual module. So the idiom equivalent to the above example is::
|
| 141 |
+
|
| 142 |
+
np = lazy.load("numpy")
|
| 143 |
+
|
| 144 |
+
def myfunc():
|
| 145 |
+
np.norm(...)
|
| 146 |
+
....
|
| 147 |
+
|
| 148 |
+
The initial import time is fast because the actual import is delayed
|
| 149 |
+
until the first attribute is requested. The overall import time may
|
| 150 |
+
decrease as well for users that don't make use of large portions
|
| 151 |
+
of your library.
|
| 152 |
+
|
| 153 |
+
Warning
|
| 154 |
+
-------
|
| 155 |
+
While lazily loading *sub*packages technically works, it causes the
|
| 156 |
+
package (that contains the subpackage) to be eagerly loaded even
|
| 157 |
+
if the package is already lazily loaded.
|
| 158 |
+
So, you probably shouldn't use subpackages with this `load` feature.
|
| 159 |
+
Instead you should encourage the package maintainers to use the
|
| 160 |
+
`lazy_loader.attach` to make their subpackages load lazily.
|
| 161 |
+
|
| 162 |
+
Parameters
|
| 163 |
+
----------
|
| 164 |
+
fullname : str
|
| 165 |
+
The full name of the module or submodule to import. For example::
|
| 166 |
+
|
| 167 |
+
sp = lazy.load('scipy') # import scipy as sp
|
| 168 |
+
|
| 169 |
+
require : str
|
| 170 |
+
A dependency requirement as defined in PEP-508. For example::
|
| 171 |
+
|
| 172 |
+
"numpy >=1.24"
|
| 173 |
+
|
| 174 |
+
If defined, the proxy module will raise an error if the installed
|
| 175 |
+
version does not satisfy the requirement.
|
| 176 |
+
|
| 177 |
+
error_on_import : bool
|
| 178 |
+
Whether to postpone raising import errors until the module is accessed.
|
| 179 |
+
If set to `True`, import errors are raised as soon as `load` is called.
|
| 180 |
+
|
| 181 |
+
Returns
|
| 182 |
+
-------
|
| 183 |
+
pm : importlib.util._LazyModule
|
| 184 |
+
Proxy module. Can be used like any regularly imported module.
|
| 185 |
+
Actual loading of the module occurs upon first attribute request.
|
| 186 |
+
|
| 187 |
+
"""
|
| 188 |
+
with threadlock:
|
| 189 |
+
module = sys.modules.get(fullname)
|
| 190 |
+
have_module = module is not None
|
| 191 |
+
|
| 192 |
+
# Most common, short-circuit
|
| 193 |
+
if have_module and require is None:
|
| 194 |
+
return module
|
| 195 |
+
|
| 196 |
+
if "." in fullname:
|
| 197 |
+
msg = (
|
| 198 |
+
"subpackages can technically be lazily loaded, but it causes the "
|
| 199 |
+
"package to be eagerly loaded even if it is already lazily loaded."
|
| 200 |
+
"So, you probably shouldn't use subpackages with this lazy feature."
|
| 201 |
+
)
|
| 202 |
+
warnings.warn(msg, RuntimeWarning)
|
| 203 |
+
|
| 204 |
+
spec = None
|
| 205 |
+
|
| 206 |
+
if not have_module:
|
| 207 |
+
spec = importlib.util.find_spec(fullname)
|
| 208 |
+
have_module = spec is not None
|
| 209 |
+
|
| 210 |
+
if not have_module:
|
| 211 |
+
not_found_message = f"No module named '{fullname}'"
|
| 212 |
+
elif require is not None:
|
| 213 |
+
try:
|
| 214 |
+
have_module = _check_requirement(require)
|
| 215 |
+
except ModuleNotFoundError as e:
|
| 216 |
+
raise ValueError(
|
| 217 |
+
f"Found module '{fullname}' but cannot test "
|
| 218 |
+
"requirement '{require}'. "
|
| 219 |
+
"Requirements must match distribution name, not module name."
|
| 220 |
+
) from e
|
| 221 |
+
|
| 222 |
+
not_found_message = f"No distribution can be found matching '{require}'"
|
| 223 |
+
|
| 224 |
+
if not have_module:
|
| 225 |
+
if error_on_import:
|
| 226 |
+
raise ModuleNotFoundError(not_found_message)
|
| 227 |
+
import inspect
|
| 228 |
+
|
| 229 |
+
try:
|
| 230 |
+
parent = inspect.stack()[1]
|
| 231 |
+
frame_data = {
|
| 232 |
+
"filename": parent.filename,
|
| 233 |
+
"lineno": parent.lineno,
|
| 234 |
+
"function": parent.function,
|
| 235 |
+
"code_context": parent.code_context,
|
| 236 |
+
}
|
| 237 |
+
return DelayedImportErrorModule(
|
| 238 |
+
frame_data,
|
| 239 |
+
"DelayedImportErrorModule",
|
| 240 |
+
message=not_found_message,
|
| 241 |
+
)
|
| 242 |
+
finally:
|
| 243 |
+
del parent
|
| 244 |
+
|
| 245 |
+
if spec is not None:
|
| 246 |
+
module = importlib.util.module_from_spec(spec)
|
| 247 |
+
sys.modules[fullname] = module
|
| 248 |
+
|
| 249 |
+
loader = importlib.util.LazyLoader(spec.loader)
|
| 250 |
+
loader.exec_module(module)
|
| 251 |
+
|
| 252 |
+
return module
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _check_requirement(require: str) -> bool:
|
| 256 |
+
"""Verify that a package requirement is satisfied
|
| 257 |
+
|
| 258 |
+
If the package is required, a ``ModuleNotFoundError`` is raised
|
| 259 |
+
by ``importlib.metadata``.
|
| 260 |
+
|
| 261 |
+
Parameters
|
| 262 |
+
----------
|
| 263 |
+
require : str
|
| 264 |
+
A dependency requirement as defined in PEP-508
|
| 265 |
+
|
| 266 |
+
Returns
|
| 267 |
+
-------
|
| 268 |
+
satisfied : bool
|
| 269 |
+
True if the installed version of the dependency matches
|
| 270 |
+
the specified version, False otherwise.
|
| 271 |
+
"""
|
| 272 |
+
import packaging.requirements
|
| 273 |
+
|
| 274 |
+
try:
|
| 275 |
+
import importlib.metadata as importlib_metadata
|
| 276 |
+
except ImportError: # PY37
|
| 277 |
+
import importlib_metadata
|
| 278 |
+
|
| 279 |
+
req = packaging.requirements.Requirement(require)
|
| 280 |
+
return req.specifier.contains(
|
| 281 |
+
importlib_metadata.version(req.name),
|
| 282 |
+
prereleases=True,
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class _StubVisitor(ast.NodeVisitor):
|
| 287 |
+
"""AST visitor to parse a stub file for submodules and submod_attrs."""
|
| 288 |
+
|
| 289 |
+
def __init__(self):
|
| 290 |
+
self._submodules = set()
|
| 291 |
+
self._submod_attrs = {}
|
| 292 |
+
|
| 293 |
+
def visit_ImportFrom(self, node: ast.ImportFrom):
|
| 294 |
+
if node.level != 1:
|
| 295 |
+
raise ValueError(
|
| 296 |
+
"Only within-module imports are supported (`from .* import`)"
|
| 297 |
+
)
|
| 298 |
+
if node.module:
|
| 299 |
+
attrs: list = self._submod_attrs.setdefault(node.module, [])
|
| 300 |
+
aliases = [alias.name for alias in node.names]
|
| 301 |
+
if "*" in aliases:
|
| 302 |
+
raise ValueError(
|
| 303 |
+
"lazy stub loader does not support star import "
|
| 304 |
+
f"`from {node.module} import *`"
|
| 305 |
+
)
|
| 306 |
+
attrs.extend(aliases)
|
| 307 |
+
else:
|
| 308 |
+
self._submodules.update(alias.name for alias in node.names)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def attach_stub(package_name: str, filename: str):
|
| 312 |
+
"""Attach lazily loaded submodules, functions from a type stub.
|
| 313 |
+
|
| 314 |
+
This is a variant on ``attach`` that will parse a `.pyi` stub file to
|
| 315 |
+
infer ``submodules`` and ``submod_attrs``. This allows static type checkers
|
| 316 |
+
to find imports, while still providing lazy loading at runtime.
|
| 317 |
+
|
| 318 |
+
Parameters
|
| 319 |
+
----------
|
| 320 |
+
package_name : str
|
| 321 |
+
Typically use ``__name__``.
|
| 322 |
+
filename : str
|
| 323 |
+
Path to `.py` file which has an adjacent `.pyi` file.
|
| 324 |
+
Typically use ``__file__``.
|
| 325 |
+
|
| 326 |
+
Returns
|
| 327 |
+
-------
|
| 328 |
+
__getattr__, __dir__, __all__
|
| 329 |
+
The same output as ``attach``.
|
| 330 |
+
|
| 331 |
+
Raises
|
| 332 |
+
------
|
| 333 |
+
ValueError
|
| 334 |
+
If a stub file is not found for `filename`, or if the stubfile is formmated
|
| 335 |
+
incorrectly (e.g. if it contains an relative import from outside of the module)
|
| 336 |
+
"""
|
| 337 |
+
stubfile = (
|
| 338 |
+
filename if filename.endswith("i") else f"{os.path.splitext(filename)[0]}.pyi"
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
if not os.path.exists(stubfile):
|
| 342 |
+
raise ValueError(f"Cannot load imports from non-existent stub {stubfile!r}")
|
| 343 |
+
|
| 344 |
+
with open(stubfile) as f:
|
| 345 |
+
stub_node = ast.parse(f.read())
|
| 346 |
+
|
| 347 |
+
visitor = _StubVisitor()
|
| 348 |
+
visitor.visit(stub_node)
|
| 349 |
+
return attach(package_name, visitor._submodules, visitor._submod_attrs)
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/import_np_parallel.cpython-310.pyc
ADDED
|
Binary file (489 Bytes). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/test_lazy_loader.cpython-310.pyc
ADDED
|
Binary file (5.17 kB). View file
|
|
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/some_func.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def some_func():
|
| 2 |
+
"""Function with same name as submodule."""
|
| 3 |
+
pass
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/import_np_parallel.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import lazy_loader as lazy
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def import_np():
|
| 8 |
+
time.sleep(0.5)
|
| 9 |
+
lazy.load("numpy")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
for _ in range(10):
|
| 13 |
+
threading.Thread(target=import_np).start()
|
infer_4_30_0/lib/python3.10/site-packages/lazy_loader/tests/test_lazy_loader.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
import sys
|
| 5 |
+
import types
|
| 6 |
+
from unittest import mock
|
| 7 |
+
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
import lazy_loader as lazy
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_lazy_import_basics():
|
| 14 |
+
math = lazy.load("math")
|
| 15 |
+
anything_not_real = lazy.load("anything_not_real")
|
| 16 |
+
|
| 17 |
+
# Now test that accessing attributes does what it should
|
| 18 |
+
assert math.sin(math.pi) == pytest.approx(0, 1e-6)
|
| 19 |
+
# poor-mans pytest.raises for testing errors on attribute access
|
| 20 |
+
try:
|
| 21 |
+
anything_not_real.pi
|
| 22 |
+
raise AssertionError() # Should not get here
|
| 23 |
+
except ModuleNotFoundError:
|
| 24 |
+
pass
|
| 25 |
+
assert isinstance(anything_not_real, lazy.DelayedImportErrorModule)
|
| 26 |
+
# see if it changes for second access
|
| 27 |
+
try:
|
| 28 |
+
anything_not_real.pi
|
| 29 |
+
raise AssertionError() # Should not get here
|
| 30 |
+
except ModuleNotFoundError:
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_lazy_import_subpackages():
|
| 35 |
+
with pytest.warns(RuntimeWarning):
|
| 36 |
+
hp = lazy.load("html.parser")
|
| 37 |
+
assert "html" in sys.modules
|
| 38 |
+
assert type(sys.modules["html"]) == type(pytest)
|
| 39 |
+
assert isinstance(hp, importlib.util._LazyModule)
|
| 40 |
+
assert "html.parser" in sys.modules
|
| 41 |
+
assert sys.modules["html.parser"] == hp
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def test_lazy_import_impact_on_sys_modules():
|
| 45 |
+
math = lazy.load("math")
|
| 46 |
+
anything_not_real = lazy.load("anything_not_real")
|
| 47 |
+
|
| 48 |
+
assert isinstance(math, types.ModuleType)
|
| 49 |
+
assert "math" in sys.modules
|
| 50 |
+
assert isinstance(anything_not_real, lazy.DelayedImportErrorModule)
|
| 51 |
+
assert "anything_not_real" not in sys.modules
|
| 52 |
+
|
| 53 |
+
# only do this if numpy is installed
|
| 54 |
+
pytest.importorskip("numpy")
|
| 55 |
+
np = lazy.load("numpy")
|
| 56 |
+
assert isinstance(np, types.ModuleType)
|
| 57 |
+
assert "numpy" in sys.modules
|
| 58 |
+
|
| 59 |
+
np.pi # trigger load of numpy
|
| 60 |
+
|
| 61 |
+
assert isinstance(np, types.ModuleType)
|
| 62 |
+
assert "numpy" in sys.modules
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_lazy_import_nonbuiltins():
|
| 66 |
+
np = lazy.load("numpy")
|
| 67 |
+
sp = lazy.load("scipy")
|
| 68 |
+
if not isinstance(np, lazy.DelayedImportErrorModule):
|
| 69 |
+
assert np.sin(np.pi) == pytest.approx(0, 1e-6)
|
| 70 |
+
if isinstance(sp, lazy.DelayedImportErrorModule):
|
| 71 |
+
try:
|
| 72 |
+
sp.pi
|
| 73 |
+
raise AssertionError()
|
| 74 |
+
except ModuleNotFoundError:
|
| 75 |
+
pass
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def test_lazy_attach():
|
| 79 |
+
name = "mymod"
|
| 80 |
+
submods = ["mysubmodule", "anothersubmodule"]
|
| 81 |
+
myall = {"not_real_submod": ["some_var_or_func"]}
|
| 82 |
+
|
| 83 |
+
locls = {
|
| 84 |
+
"attach": lazy.attach,
|
| 85 |
+
"name": name,
|
| 86 |
+
"submods": submods,
|
| 87 |
+
"myall": myall,
|
| 88 |
+
}
|
| 89 |
+
s = "__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)"
|
| 90 |
+
|
| 91 |
+
exec(s, {}, locls)
|
| 92 |
+
expected = {
|
| 93 |
+
"attach": lazy.attach,
|
| 94 |
+
"name": name,
|
| 95 |
+
"submods": submods,
|
| 96 |
+
"myall": myall,
|
| 97 |
+
"__getattr__": None,
|
| 98 |
+
"__lazy_dir__": None,
|
| 99 |
+
"__all__": None,
|
| 100 |
+
}
|
| 101 |
+
assert locls.keys() == expected.keys()
|
| 102 |
+
for k, v in expected.items():
|
| 103 |
+
if v is not None:
|
| 104 |
+
assert locls[k] == v
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_attach_same_module_and_attr_name():
|
| 108 |
+
from lazy_loader.tests import fake_pkg
|
| 109 |
+
|
| 110 |
+
# Grab attribute twice, to ensure that importing it does not
|
| 111 |
+
# override function by module
|
| 112 |
+
assert isinstance(fake_pkg.some_func, types.FunctionType)
|
| 113 |
+
assert isinstance(fake_pkg.some_func, types.FunctionType)
|
| 114 |
+
|
| 115 |
+
# Ensure imports from submodule still work
|
| 116 |
+
from lazy_loader.tests.fake_pkg.some_func import some_func
|
| 117 |
+
|
| 118 |
+
assert isinstance(some_func, types.FunctionType)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
FAKE_STUB = """
|
| 122 |
+
from . import rank
|
| 123 |
+
from ._gaussian import gaussian
|
| 124 |
+
from .edges import sobel, scharr, prewitt, roberts
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_stub_loading(tmp_path):
|
| 129 |
+
stub = tmp_path / "stub.pyi"
|
| 130 |
+
stub.write_text(FAKE_STUB)
|
| 131 |
+
_get, _dir, _all = lazy.attach_stub("my_module", str(stub))
|
| 132 |
+
expect = {"gaussian", "sobel", "scharr", "prewitt", "roberts", "rank"}
|
| 133 |
+
assert set(_dir()) == set(_all) == expect
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_stub_loading_parity():
|
| 137 |
+
from lazy_loader.tests import fake_pkg
|
| 138 |
+
|
| 139 |
+
from_stub = lazy.attach_stub(fake_pkg.__name__, fake_pkg.__file__)
|
| 140 |
+
stub_getter, stub_dir, stub_all = from_stub
|
| 141 |
+
assert stub_all == fake_pkg.__all__
|
| 142 |
+
assert stub_dir() == fake_pkg.__lazy_dir__()
|
| 143 |
+
assert stub_getter("some_func") == fake_pkg.some_func
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def test_stub_loading_errors(tmp_path):
|
| 147 |
+
stub = tmp_path / "stub.pyi"
|
| 148 |
+
stub.write_text("from ..mod import func\n")
|
| 149 |
+
|
| 150 |
+
with pytest.raises(ValueError, match="Only within-module imports are supported"):
|
| 151 |
+
lazy.attach_stub("name", str(stub))
|
| 152 |
+
|
| 153 |
+
with pytest.raises(ValueError, match="Cannot load imports from non-existent stub"):
|
| 154 |
+
lazy.attach_stub("name", "not a file")
|
| 155 |
+
|
| 156 |
+
stub2 = tmp_path / "stub2.pyi"
|
| 157 |
+
stub2.write_text("from .mod import *\n")
|
| 158 |
+
with pytest.raises(ValueError, match=".*does not support star import"):
|
| 159 |
+
lazy.attach_stub("name", str(stub2))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def test_require_kwarg():
|
| 163 |
+
have_importlib_metadata = importlib.util.find_spec("importlib.metadata") is not None
|
| 164 |
+
dot = "." if have_importlib_metadata else "_"
|
| 165 |
+
# Test with a module that definitely exists, behavior hinges on requirement
|
| 166 |
+
with mock.patch(f"importlib{dot}metadata.version") as version:
|
| 167 |
+
version.return_value = "1.0.0"
|
| 168 |
+
math = lazy.load("math", require="somepkg >= 2.0")
|
| 169 |
+
assert isinstance(math, lazy.DelayedImportErrorModule)
|
| 170 |
+
|
| 171 |
+
math = lazy.load("math", require="somepkg >= 1.0")
|
| 172 |
+
assert math.sin(math.pi) == pytest.approx(0, 1e-6)
|
| 173 |
+
|
| 174 |
+
# We can fail even after a successful import
|
| 175 |
+
math = lazy.load("math", require="somepkg >= 2.0")
|
| 176 |
+
assert isinstance(math, lazy.DelayedImportErrorModule)
|
| 177 |
+
|
| 178 |
+
# When a module can be loaded but the version can't be checked,
|
| 179 |
+
# raise a ValueError
|
| 180 |
+
with pytest.raises(ValueError):
|
| 181 |
+
lazy.load("math", require="somepkg >= 1.0")
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def test_parallel_load():
|
| 185 |
+
pytest.importorskip("numpy")
|
| 186 |
+
|
| 187 |
+
subprocess.run(
|
| 188 |
+
[
|
| 189 |
+
sys.executable,
|
| 190 |
+
os.path.join(os.path.dirname(__file__), "import_np_parallel.py"),
|
| 191 |
+
]
|
| 192 |
+
)
|
infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/License.txt
ADDED
|
@@ -0,0 +1,1568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
End User License Agreement
|
| 2 |
+
--------------------------
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
Preface
|
| 6 |
+
-------
|
| 7 |
+
|
| 8 |
+
The Software License Agreement in Chapter 1 and the Supplement
|
| 9 |
+
in Chapter 2 contain license terms and conditions that govern
|
| 10 |
+
the use of NVIDIA software. By accepting this agreement, you
|
| 11 |
+
agree to comply with all the terms and conditions applicable
|
| 12 |
+
to the product(s) included herein.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
NVIDIA Driver
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
Description
|
| 19 |
+
|
| 20 |
+
This package contains the operating system driver and
|
| 21 |
+
fundamental system software components for NVIDIA GPUs.
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
NVIDIA CUDA Toolkit
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
Description
|
| 28 |
+
|
| 29 |
+
The NVIDIA CUDA Toolkit provides command-line and graphical
|
| 30 |
+
tools for building, debugging and optimizing the performance
|
| 31 |
+
of applications accelerated by NVIDIA GPUs, runtime and math
|
| 32 |
+
libraries, and documentation including programming guides,
|
| 33 |
+
user manuals, and API references.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
Default Install Location of CUDA Toolkit
|
| 37 |
+
|
| 38 |
+
Windows platform:
|
| 39 |
+
|
| 40 |
+
%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
|
| 41 |
+
|
| 42 |
+
Linux platform:
|
| 43 |
+
|
| 44 |
+
/usr/local/cuda-#.#
|
| 45 |
+
|
| 46 |
+
Mac platform:
|
| 47 |
+
|
| 48 |
+
/Developer/NVIDIA/CUDA-#.#
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
NVIDIA CUDA Samples
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
Description
|
| 55 |
+
|
| 56 |
+
This package includes over 100+ CUDA examples that demonstrate
|
| 57 |
+
various CUDA programming principles, and efficient CUDA
|
| 58 |
+
implementation of algorithms in specific application domains.
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
Default Install Location of CUDA Samples
|
| 62 |
+
|
| 63 |
+
Windows platform:
|
| 64 |
+
|
| 65 |
+
%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
|
| 66 |
+
|
| 67 |
+
Linux platform:
|
| 68 |
+
|
| 69 |
+
/usr/local/cuda-#.#/samples
|
| 70 |
+
|
| 71 |
+
and
|
| 72 |
+
|
| 73 |
+
$HOME/NVIDIA_CUDA-#.#_Samples
|
| 74 |
+
|
| 75 |
+
Mac platform:
|
| 76 |
+
|
| 77 |
+
/Developer/NVIDIA/CUDA-#.#/samples
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
NVIDIA Nsight Visual Studio Edition (Windows only)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
Description
|
| 84 |
+
|
| 85 |
+
NVIDIA Nsight Development Platform, Visual Studio Edition is a
|
| 86 |
+
development environment integrated into Microsoft Visual
|
| 87 |
+
Studio that provides tools for debugging, profiling, analyzing
|
| 88 |
+
and optimizing your GPU computing and graphics applications.
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
Default Install Location of Nsight Visual Studio Edition
|
| 92 |
+
|
| 93 |
+
Windows platform:
|
| 94 |
+
|
| 95 |
+
%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
1. License Agreement for NVIDIA Software Development Kits
|
| 99 |
+
---------------------------------------------------------
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
Release Date: July 26, 2018
|
| 103 |
+
---------------------------
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
Important NoticeRead before downloading, installing,
|
| 107 |
+
copying or using the licensed software:
|
| 108 |
+
-------------------------------------------------------
|
| 109 |
+
|
| 110 |
+
This license agreement, including exhibits attached
|
| 111 |
+
("Agreement”) is a legal agreement between you and NVIDIA
|
| 112 |
+
Corporation ("NVIDIA") and governs your use of a NVIDIA
|
| 113 |
+
software development kit (“SDK”).
|
| 114 |
+
|
| 115 |
+
Each SDK has its own set of software and materials, but here
|
| 116 |
+
is a description of the types of items that may be included in
|
| 117 |
+
a SDK: source code, header files, APIs, data sets and assets
|
| 118 |
+
(examples include images, textures, models, scenes, videos,
|
| 119 |
+
native API input/output files), binary software, sample code,
|
| 120 |
+
libraries, utility programs, programming code and
|
| 121 |
+
documentation.
|
| 122 |
+
|
| 123 |
+
This Agreement can be accepted only by an adult of legal age
|
| 124 |
+
of majority in the country in which the SDK is used.
|
| 125 |
+
|
| 126 |
+
If you are entering into this Agreement on behalf of a company
|
| 127 |
+
or other legal entity, you represent that you have the legal
|
| 128 |
+
authority to bind the entity to this Agreement, in which case
|
| 129 |
+
“you” will mean the entity you represent.
|
| 130 |
+
|
| 131 |
+
If you don’t have the required age or authority to accept
|
| 132 |
+
this Agreement, or if you don’t accept all the terms and
|
| 133 |
+
conditions of this Agreement, do not download, install or use
|
| 134 |
+
the SDK.
|
| 135 |
+
|
| 136 |
+
You agree to use the SDK only for purposes that are permitted
|
| 137 |
+
by (a) this Agreement, and (b) any applicable law, regulation
|
| 138 |
+
or generally accepted practices or guidelines in the relevant
|
| 139 |
+
jurisdictions.
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
1.1. License
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
1.1.1. License Grant
|
| 146 |
+
|
| 147 |
+
Subject to the terms of this Agreement, NVIDIA hereby grants
|
| 148 |
+
you a non-exclusive, non-transferable license, without the
|
| 149 |
+
right to sublicense (except as expressly provided in this
|
| 150 |
+
Agreement) to:
|
| 151 |
+
|
| 152 |
+
1. Install and use the SDK,
|
| 153 |
+
|
| 154 |
+
2. Modify and create derivative works of sample source code
|
| 155 |
+
delivered in the SDK, and
|
| 156 |
+
|
| 157 |
+
3. Distribute those portions of the SDK that are identified
|
| 158 |
+
in this Agreement as distributable, as incorporated in
|
| 159 |
+
object code format into a software application that meets
|
| 160 |
+
the distribution requirements indicated in this Agreement.
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
1.1.2. Distribution Requirements
|
| 164 |
+
|
| 165 |
+
These are the distribution requirements for you to exercise
|
| 166 |
+
the distribution grant:
|
| 167 |
+
|
| 168 |
+
1. Your application must have material additional
|
| 169 |
+
functionality, beyond the included portions of the SDK.
|
| 170 |
+
|
| 171 |
+
2. The distributable portions of the SDK shall only be
|
| 172 |
+
accessed by your application.
|
| 173 |
+
|
| 174 |
+
3. The following notice shall be included in modifications
|
| 175 |
+
and derivative works of sample source code distributed:
|
| 176 |
+
“This software contains source code provided by NVIDIA
|
| 177 |
+
Corporation.”
|
| 178 |
+
|
| 179 |
+
4. Unless a developer tool is identified in this Agreement
|
| 180 |
+
as distributable, it is delivered for your internal use
|
| 181 |
+
only.
|
| 182 |
+
|
| 183 |
+
5. The terms under which you distribute your application
|
| 184 |
+
must be consistent with the terms of this Agreement,
|
| 185 |
+
including (without limitation) terms relating to the
|
| 186 |
+
license grant and license restrictions and protection of
|
| 187 |
+
NVIDIA’s intellectual property rights. Additionally, you
|
| 188 |
+
agree that you will protect the privacy, security and
|
| 189 |
+
legal rights of your application users.
|
| 190 |
+
|
| 191 |
+
6. You agree to notify NVIDIA in writing of any known or
|
| 192 |
+
suspected distribution or use of the SDK not in compliance
|
| 193 |
+
with the requirements of this Agreement, and to enforce
|
| 194 |
+
the terms of your agreements with respect to distributed
|
| 195 |
+
SDK.
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
1.1.3. Authorized Users
|
| 199 |
+
|
| 200 |
+
You may allow employees and contractors of your entity or of
|
| 201 |
+
your subsidiary(ies) to access and use the SDK from your
|
| 202 |
+
secure network to perform work on your behalf.
|
| 203 |
+
|
| 204 |
+
If you are an academic institution you may allow users
|
| 205 |
+
enrolled or employed by the academic institution to access and
|
| 206 |
+
use the SDK from your secure network.
|
| 207 |
+
|
| 208 |
+
You are responsible for the compliance with the terms of this
|
| 209 |
+
Agreement by your authorized users. If you become aware that
|
| 210 |
+
your authorized users didn’t follow the terms of this
|
| 211 |
+
Agreement, you agree to take reasonable steps to resolve the
|
| 212 |
+
non-compliance and prevent new occurrences.
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
1.1.4. Pre-Release SDK
|
| 216 |
+
|
| 217 |
+
The SDK versions identified as alpha, beta, preview or
|
| 218 |
+
otherwise as pre-release, may not be fully functional, may
|
| 219 |
+
contain errors or design flaws, and may have reduced or
|
| 220 |
+
different security, privacy, accessibility, availability, and
|
| 221 |
+
reliability standards relative to commercial versions of
|
| 222 |
+
NVIDIA software and materials. Use of a pre-release SDK may
|
| 223 |
+
result in unexpected results, loss of data, project delays or
|
| 224 |
+
other unpredictable damage or loss.
|
| 225 |
+
|
| 226 |
+
You may use a pre-release SDK at your own risk, understanding
|
| 227 |
+
that pre-release SDKs are not intended for use in production
|
| 228 |
+
or business-critical systems.
|
| 229 |
+
|
| 230 |
+
NVIDIA may choose not to make available a commercial version
|
| 231 |
+
of any pre-release SDK. NVIDIA may also choose to abandon
|
| 232 |
+
development and terminate the availability of a pre-release
|
| 233 |
+
SDK at any time without liability.
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
1.1.5. Updates
|
| 237 |
+
|
| 238 |
+
NVIDIA may, at its option, make available patches, workarounds
|
| 239 |
+
or other updates to this SDK. Unless the updates are provided
|
| 240 |
+
with their separate governing terms, they are deemed part of
|
| 241 |
+
the SDK licensed to you as provided in this Agreement. You
|
| 242 |
+
agree that the form and content of the SDK that NVIDIA
|
| 243 |
+
provides may change without prior notice to you. While NVIDIA
|
| 244 |
+
generally maintains compatibility between versions, NVIDIA may
|
| 245 |
+
in some cases make changes that introduce incompatibilities in
|
| 246 |
+
future versions of the SDK.
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
1.1.6. Third Party Licenses
|
| 250 |
+
|
| 251 |
+
The SDK may come bundled with, or otherwise include or be
|
| 252 |
+
distributed with, third party software licensed by a NVIDIA
|
| 253 |
+
supplier and/or open source software provided under an open
|
| 254 |
+
source license. Use of third party software is subject to the
|
| 255 |
+
third-party license terms, or in the absence of third party
|
| 256 |
+
terms, the terms of this Agreement. Copyright to third party
|
| 257 |
+
software is held by the copyright holders indicated in the
|
| 258 |
+
third-party software or license.
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
1.1.7. Reservation of Rights
|
| 262 |
+
|
| 263 |
+
NVIDIA reserves all rights, title, and interest in and to the
|
| 264 |
+
SDK, not expressly granted to you under this Agreement.
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
1.2. Limitations
|
| 268 |
+
|
| 269 |
+
The following license limitations apply to your use of the
|
| 270 |
+
SDK:
|
| 271 |
+
|
| 272 |
+
1. You may not reverse engineer, decompile or disassemble,
|
| 273 |
+
or remove copyright or other proprietary notices from any
|
| 274 |
+
portion of the SDK or copies of the SDK.
|
| 275 |
+
|
| 276 |
+
2. Except as expressly provided in this Agreement, you may
|
| 277 |
+
not copy, sell, rent, sublicense, transfer, distribute,
|
| 278 |
+
modify, or create derivative works of any portion of the
|
| 279 |
+
SDK. For clarity, you may not distribute or sublicense the
|
| 280 |
+
SDK as a stand-alone product.
|
| 281 |
+
|
| 282 |
+
3. Unless you have an agreement with NVIDIA for this
|
| 283 |
+
purpose, you may not indicate that an application created
|
| 284 |
+
with the SDK is sponsored or endorsed by NVIDIA.
|
| 285 |
+
|
| 286 |
+
4. You may not bypass, disable, or circumvent any
|
| 287 |
+
encryption, security, digital rights management or
|
| 288 |
+
authentication mechanism in the SDK.
|
| 289 |
+
|
| 290 |
+
5. You may not use the SDK in any manner that would cause it
|
| 291 |
+
to become subject to an open source software license. As
|
| 292 |
+
examples, licenses that require as a condition of use,
|
| 293 |
+
modification, and/or distribution that the SDK be:
|
| 294 |
+
|
| 295 |
+
a. Disclosed or distributed in source code form;
|
| 296 |
+
|
| 297 |
+
b. Licensed for the purpose of making derivative works;
|
| 298 |
+
or
|
| 299 |
+
|
| 300 |
+
c. Redistributable at no charge.
|
| 301 |
+
|
| 302 |
+
6. Unless you have an agreement with NVIDIA for this
|
| 303 |
+
purpose, you may not use the SDK with any system or
|
| 304 |
+
application where the use or failure of the system or
|
| 305 |
+
application can reasonably be expected to threaten or
|
| 306 |
+
result in personal injury, death, or catastrophic loss.
|
| 307 |
+
Examples include use in avionics, navigation, military,
|
| 308 |
+
medical, life support or other life critical applications.
|
| 309 |
+
NVIDIA does not design, test or manufacture the SDK for
|
| 310 |
+
these critical uses and NVIDIA shall not be liable to you
|
| 311 |
+
or any third party, in whole or in part, for any claims or
|
| 312 |
+
damages arising from such uses.
|
| 313 |
+
|
| 314 |
+
7. You agree to defend, indemnify and hold harmless NVIDIA
|
| 315 |
+
and its affiliates, and their respective employees,
|
| 316 |
+
contractors, agents, officers and directors, from and
|
| 317 |
+
against any and all claims, damages, obligations, losses,
|
| 318 |
+
liabilities, costs or debt, fines, restitutions and
|
| 319 |
+
expenses (including but not limited to attorney’s fees
|
| 320 |
+
and costs incident to establishing the right of
|
| 321 |
+
indemnification) arising out of or related to your use of
|
| 322 |
+
the SDK outside of the scope of this Agreement, or not in
|
| 323 |
+
compliance with its terms.
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
1.3. Ownership
|
| 327 |
+
|
| 328 |
+
1. NVIDIA or its licensors hold all rights, title and
|
| 329 |
+
interest in and to the SDK and its modifications and
|
| 330 |
+
derivative works, including their respective intellectual
|
| 331 |
+
property rights, subject to your rights described in this
|
| 332 |
+
section. This SDK may include software and materials from
|
| 333 |
+
NVIDIA’s licensors, and these licensors are intended
|
| 334 |
+
third party beneficiaries that may enforce this Agreement
|
| 335 |
+
with respect to their intellectual property rights.
|
| 336 |
+
|
| 337 |
+
2. You hold all rights, title and interest in and to your
|
| 338 |
+
applications and your derivative works of the sample
|
| 339 |
+
source code delivered in the SDK, including their
|
| 340 |
+
respective intellectual property rights, subject to
|
| 341 |
+
NVIDIA’s rights described in this section.
|
| 342 |
+
|
| 343 |
+
3. You may, but don’t have to, provide to NVIDIA
|
| 344 |
+
suggestions, feature requests or other feedback regarding
|
| 345 |
+
the SDK, including possible enhancements or modifications
|
| 346 |
+
to the SDK. For any feedback that you voluntarily provide,
|
| 347 |
+
you hereby grant NVIDIA and its affiliates a perpetual,
|
| 348 |
+
non-exclusive, worldwide, irrevocable license to use,
|
| 349 |
+
reproduce, modify, license, sublicense (through multiple
|
| 350 |
+
tiers of sublicensees), and distribute (through multiple
|
| 351 |
+
tiers of distributors) it without the payment of any
|
| 352 |
+
royalties or fees to you. NVIDIA will use feedback at its
|
| 353 |
+
choice. NVIDIA is constantly looking for ways to improve
|
| 354 |
+
its products, so you may send feedback to NVIDIA through
|
| 355 |
+
the developer portal at https://developer.nvidia.com.
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
1.4. No Warranties
|
| 359 |
+
|
| 360 |
+
THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
|
| 361 |
+
FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
|
| 362 |
+
ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
|
| 363 |
+
OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
|
| 364 |
+
BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 365 |
+
FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
|
| 366 |
+
ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
|
| 367 |
+
WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
|
| 368 |
+
DEALING OR COURSE OF TRADE.
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
1.5. Limitation of Liability
|
| 372 |
+
|
| 373 |
+
TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
|
| 374 |
+
AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
| 375 |
+
PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
|
| 376 |
+
OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
|
| 377 |
+
PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
|
| 378 |
+
WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
|
| 379 |
+
WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
|
| 380 |
+
OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
|
| 381 |
+
PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
|
| 382 |
+
LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
|
| 383 |
+
TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
|
| 384 |
+
AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
|
| 385 |
+
NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
|
| 386 |
+
LIMIT.
|
| 387 |
+
|
| 388 |
+
These exclusions and limitations of liability shall apply
|
| 389 |
+
regardless if NVIDIA or its affiliates have been advised of
|
| 390 |
+
the possibility of such damages, and regardless of whether a
|
| 391 |
+
remedy fails its essential purpose. These exclusions and
|
| 392 |
+
limitations of liability form an essential basis of the
|
| 393 |
+
bargain between the parties, and, absent any of these
|
| 394 |
+
exclusions or limitations of liability, the provisions of this
|
| 395 |
+
Agreement, including, without limitation, the economic terms,
|
| 396 |
+
would be substantially different.
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
1.6. Termination
|
| 400 |
+
|
| 401 |
+
1. This Agreement will continue to apply until terminated by
|
| 402 |
+
either you or NVIDIA as described below.
|
| 403 |
+
|
| 404 |
+
2. If you want to terminate this Agreement, you may do so by
|
| 405 |
+
stopping to use the SDK.
|
| 406 |
+
|
| 407 |
+
3. NVIDIA may, at any time, terminate this Agreement if:
|
| 408 |
+
|
| 409 |
+
a. (i) you fail to comply with any term of this
|
| 410 |
+
Agreement and the non-compliance is not fixed within
|
| 411 |
+
thirty (30) days following notice from NVIDIA (or
|
| 412 |
+
immediately if you violate NVIDIA’s intellectual
|
| 413 |
+
property rights);
|
| 414 |
+
|
| 415 |
+
b. (ii) you commence or participate in any legal
|
| 416 |
+
proceeding against NVIDIA with respect to the SDK; or
|
| 417 |
+
|
| 418 |
+
c. (iii) NVIDIA decides to no longer provide the SDK in
|
| 419 |
+
a country or, in NVIDIA’s sole discretion, the
|
| 420 |
+
continued use of it is no longer commercially viable.
|
| 421 |
+
|
| 422 |
+
4. Upon any termination of this Agreement, you agree to
|
| 423 |
+
promptly discontinue use of the SDK and destroy all copies
|
| 424 |
+
in your possession or control. Your prior distributions in
|
| 425 |
+
accordance with this Agreement are not affected by the
|
| 426 |
+
termination of this Agreement. Upon written request, you
|
| 427 |
+
will certify in writing that you have complied with your
|
| 428 |
+
commitments under this section. Upon any termination of
|
| 429 |
+
this Agreement all provisions survive except for the
|
| 430 |
+
license grant provisions.
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
1.7. General
|
| 434 |
+
|
| 435 |
+
If you wish to assign this Agreement or your rights and
|
| 436 |
+
obligations, including by merger, consolidation, dissolution
|
| 437 |
+
or operation of law, contact NVIDIA to ask for permission. Any
|
| 438 |
+
attempted assignment not approved by NVIDIA in writing shall
|
| 439 |
+
be void and of no effect. NVIDIA may assign, delegate or
|
| 440 |
+
transfer this Agreement and its rights and obligations, and if
|
| 441 |
+
to a non-affiliate you will be notified.
|
| 442 |
+
|
| 443 |
+
You agree to cooperate with NVIDIA and provide reasonably
|
| 444 |
+
requested information to verify your compliance with this
|
| 445 |
+
Agreement.
|
| 446 |
+
|
| 447 |
+
This Agreement will be governed in all respects by the laws of
|
| 448 |
+
the United States and of the State of Delaware as those laws
|
| 449 |
+
are applied to contracts entered into and performed entirely
|
| 450 |
+
within Delaware by Delaware residents, without regard to the
|
| 451 |
+
conflicts of laws principles. The United Nations Convention on
|
| 452 |
+
Contracts for the International Sale of Goods is specifically
|
| 453 |
+
disclaimed. You agree to all terms of this Agreement in the
|
| 454 |
+
English language.
|
| 455 |
+
|
| 456 |
+
The state or federal courts residing in Santa Clara County,
|
| 457 |
+
California shall have exclusive jurisdiction over any dispute
|
| 458 |
+
or claim arising out of this Agreement. Notwithstanding this,
|
| 459 |
+
you agree that NVIDIA shall still be allowed to apply for
|
| 460 |
+
injunctive remedies or an equivalent type of urgent legal
|
| 461 |
+
relief in any jurisdiction.
|
| 462 |
+
|
| 463 |
+
If any court of competent jurisdiction determines that any
|
| 464 |
+
provision of this Agreement is illegal, invalid or
|
| 465 |
+
unenforceable, such provision will be construed as limited to
|
| 466 |
+
the extent necessary to be consistent with and fully
|
| 467 |
+
enforceable under the law and the remaining provisions will
|
| 468 |
+
remain in full force and effect. Unless otherwise specified,
|
| 469 |
+
remedies are cumulative.
|
| 470 |
+
|
| 471 |
+
Each party acknowledges and agrees that the other is an
|
| 472 |
+
independent contractor in the performance of this Agreement.
|
| 473 |
+
|
| 474 |
+
The SDK has been developed entirely at private expense and is
|
| 475 |
+
“commercial items” consisting of “commercial computer
|
| 476 |
+
software” and “commercial computer software
|
| 477 |
+
documentation” provided with RESTRICTED RIGHTS. Use,
|
| 478 |
+
duplication or disclosure by the U.S. Government or a U.S.
|
| 479 |
+
Government subcontractor is subject to the restrictions in
|
| 480 |
+
this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
|
| 481 |
+
in subparagraphs (c)(1) and (2) of the Commercial Computer
|
| 482 |
+
Software - Restricted Rights clause at FAR 52.227-19, as
|
| 483 |
+
applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
|
| 484 |
+
Expressway, Santa Clara, CA 95051.
|
| 485 |
+
|
| 486 |
+
The SDK is subject to United States export laws and
|
| 487 |
+
regulations. You agree that you will not ship, transfer or
|
| 488 |
+
export the SDK into any country, or use the SDK in any manner,
|
| 489 |
+
prohibited by the United States Bureau of Industry and
|
| 490 |
+
Security or economic sanctions regulations administered by the
|
| 491 |
+
U.S. Department of Treasury’s Office of Foreign Assets
|
| 492 |
+
Control (OFAC), or any applicable export laws, restrictions or
|
| 493 |
+
regulations. These laws include restrictions on destinations,
|
| 494 |
+
end users and end use. By accepting this Agreement, you
|
| 495 |
+
confirm that you are not a resident or citizen of any country
|
| 496 |
+
currently embargoed by the U.S. and that you are not otherwise
|
| 497 |
+
prohibited from receiving the SDK.
|
| 498 |
+
|
| 499 |
+
Any notice delivered by NVIDIA to you under this Agreement
|
| 500 |
+
will be delivered via mail, email or fax. You agree that any
|
| 501 |
+
notices that NVIDIA sends you electronically will satisfy any
|
| 502 |
+
legal communication requirements. Please direct your legal
|
| 503 |
+
notices or other correspondence to NVIDIA Corporation, 2788
|
| 504 |
+
San Tomas Expressway, Santa Clara, California 95051, United
|
| 505 |
+
States of America, Attention: Legal Department.
|
| 506 |
+
|
| 507 |
+
This Agreement and any exhibits incorporated into this
|
| 508 |
+
Agreement constitute the entire agreement of the parties with
|
| 509 |
+
respect to the subject matter of this Agreement and supersede
|
| 510 |
+
all prior negotiations or documentation exchanged between the
|
| 511 |
+
parties relating to this SDK license. Any additional and/or
|
| 512 |
+
conflicting terms on documents issued by you are null, void,
|
| 513 |
+
and invalid. Any amendment or waiver under this Agreement
|
| 514 |
+
shall be in writing and signed by representatives of both
|
| 515 |
+
parties.
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
2. CUDA Toolkit Supplement to Software License Agreement for
|
| 519 |
+
NVIDIA Software Development Kits
|
| 520 |
+
------------------------------------------------------------
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
Release date: August 16, 2018
|
| 524 |
+
-----------------------------
|
| 525 |
+
|
| 526 |
+
The terms in this supplement govern your use of the NVIDIA
|
| 527 |
+
CUDA Toolkit SDK under the terms of your license agreement
|
| 528 |
+
(“Agreement”) as modified by this supplement. Capitalized
|
| 529 |
+
terms used but not defined below have the meaning assigned to
|
| 530 |
+
them in the Agreement.
|
| 531 |
+
|
| 532 |
+
This supplement is an exhibit to the Agreement and is
|
| 533 |
+
incorporated as an integral part of the Agreement. In the
|
| 534 |
+
event of conflict between the terms in this supplement and the
|
| 535 |
+
terms in the Agreement, the terms in this supplement govern.
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
2.1. License Scope
|
| 539 |
+
|
| 540 |
+
The SDK is licensed for you to develop applications only for
|
| 541 |
+
use in systems with NVIDIA GPUs.
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
2.2. Distribution
|
| 545 |
+
|
| 546 |
+
The portions of the SDK that are distributable under the
|
| 547 |
+
Agreement are listed in Attachment A.
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
2.3. Operating Systems
|
| 551 |
+
|
| 552 |
+
Those portions of the SDK designed exclusively for use on the
|
| 553 |
+
Linux or FreeBSD operating systems, or other operating systems
|
| 554 |
+
derived from the source code to these operating systems, may
|
| 555 |
+
be copied and redistributed for use in accordance with this
|
| 556 |
+
Agreement, provided that the object code files are not
|
| 557 |
+
modified in any way (except for unzipping of compressed
|
| 558 |
+
files).
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
2.4. Audio and Video Encoders and Decoders
|
| 562 |
+
|
| 563 |
+
You acknowledge and agree that it is your sole responsibility
|
| 564 |
+
to obtain any additional third-party licenses required to
|
| 565 |
+
make, have made, use, have used, sell, import, and offer for
|
| 566 |
+
sale your products or services that include or incorporate any
|
| 567 |
+
third-party software and content relating to audio and/or
|
| 568 |
+
video encoders and decoders from, including but not limited
|
| 569 |
+
to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
|
| 570 |
+
MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
|
| 571 |
+
under this Agreement any necessary patent or other rights with
|
| 572 |
+
respect to any audio and/or video encoders and decoders.
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
2.5. Licensing
|
| 576 |
+
|
| 577 |
+
If the distribution terms in this Agreement are not suitable
|
| 578 |
+
for your organization, or for any questions regarding this
|
| 579 |
+
Agreement, please contact NVIDIA at
|
| 580 | |
| 581 |
+
|
| 582 |
+
|
| 583 |
+
2.6. Attachment A
|
| 584 |
+
|
| 585 |
+
The following portions of the SDK are distributable under the
|
| 586 |
+
Agreement:
|
| 587 |
+
|
| 588 |
+
Component
|
| 589 |
+
|
| 590 |
+
CUDA Runtime
|
| 591 |
+
|
| 592 |
+
Windows
|
| 593 |
+
|
| 594 |
+
cudart.dll, cudart_static.lib, cudadevrt.lib
|
| 595 |
+
|
| 596 |
+
Mac OSX
|
| 597 |
+
|
| 598 |
+
libcudart.dylib, libcudart_static.a, libcudadevrt.a
|
| 599 |
+
|
| 600 |
+
Linux
|
| 601 |
+
|
| 602 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
| 603 |
+
|
| 604 |
+
Android
|
| 605 |
+
|
| 606 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
| 607 |
+
|
| 608 |
+
Component
|
| 609 |
+
|
| 610 |
+
CUDA FFT Library
|
| 611 |
+
|
| 612 |
+
Windows
|
| 613 |
+
|
| 614 |
+
cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
|
| 615 |
+
|
| 616 |
+
Mac OSX
|
| 617 |
+
|
| 618 |
+
libcufft.dylib, libcufft_static.a, libcufftw.dylib,
|
| 619 |
+
libcufftw_static.a
|
| 620 |
+
|
| 621 |
+
Linux
|
| 622 |
+
|
| 623 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
| 624 |
+
libcufftw_static.a
|
| 625 |
+
|
| 626 |
+
Android
|
| 627 |
+
|
| 628 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
| 629 |
+
libcufftw_static.a
|
| 630 |
+
|
| 631 |
+
Component
|
| 632 |
+
|
| 633 |
+
CUDA BLAS Library
|
| 634 |
+
|
| 635 |
+
Windows
|
| 636 |
+
|
| 637 |
+
cublas.dll, cublasLt.dll
|
| 638 |
+
|
| 639 |
+
Mac OSX
|
| 640 |
+
|
| 641 |
+
libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
|
| 642 |
+
libcublasLt_static.a
|
| 643 |
+
|
| 644 |
+
Linux
|
| 645 |
+
|
| 646 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
| 647 |
+
libcublasLt_static.a
|
| 648 |
+
|
| 649 |
+
Android
|
| 650 |
+
|
| 651 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
| 652 |
+
libcublasLt_static.a
|
| 653 |
+
|
| 654 |
+
Component
|
| 655 |
+
|
| 656 |
+
NVIDIA "Drop-in" BLAS Library
|
| 657 |
+
|
| 658 |
+
Windows
|
| 659 |
+
|
| 660 |
+
nvblas.dll
|
| 661 |
+
|
| 662 |
+
Mac OSX
|
| 663 |
+
|
| 664 |
+
libnvblas.dylib
|
| 665 |
+
|
| 666 |
+
Linux
|
| 667 |
+
|
| 668 |
+
libnvblas.so
|
| 669 |
+
|
| 670 |
+
Component
|
| 671 |
+
|
| 672 |
+
CUDA Sparse Matrix Library
|
| 673 |
+
|
| 674 |
+
Windows
|
| 675 |
+
|
| 676 |
+
cusparse.dll, cusparse.lib
|
| 677 |
+
|
| 678 |
+
Mac OSX
|
| 679 |
+
|
| 680 |
+
libcusparse.dylib, libcusparse_static.a
|
| 681 |
+
|
| 682 |
+
Linux
|
| 683 |
+
|
| 684 |
+
libcusparse.so, libcusparse_static.a
|
| 685 |
+
|
| 686 |
+
Android
|
| 687 |
+
|
| 688 |
+
libcusparse.so, libcusparse_static.a
|
| 689 |
+
|
| 690 |
+
Component
|
| 691 |
+
|
| 692 |
+
CUDA Linear Solver Library
|
| 693 |
+
|
| 694 |
+
Windows
|
| 695 |
+
|
| 696 |
+
cusolver.dll, cusolver.lib
|
| 697 |
+
|
| 698 |
+
Mac OSX
|
| 699 |
+
|
| 700 |
+
libcusolver.dylib, libcusolver_static.a
|
| 701 |
+
|
| 702 |
+
Linux
|
| 703 |
+
|
| 704 |
+
libcusolver.so, libcusolver_static.a
|
| 705 |
+
|
| 706 |
+
Android
|
| 707 |
+
|
| 708 |
+
libcusolver.so, libcusolver_static.a
|
| 709 |
+
|
| 710 |
+
Component
|
| 711 |
+
|
| 712 |
+
CUDA Random Number Generation Library
|
| 713 |
+
|
| 714 |
+
Windows
|
| 715 |
+
|
| 716 |
+
curand.dll, curand.lib
|
| 717 |
+
|
| 718 |
+
Mac OSX
|
| 719 |
+
|
| 720 |
+
libcurand.dylib, libcurand_static.a
|
| 721 |
+
|
| 722 |
+
Linux
|
| 723 |
+
|
| 724 |
+
libcurand.so, libcurand_static.a
|
| 725 |
+
|
| 726 |
+
Android
|
| 727 |
+
|
| 728 |
+
libcurand.so, libcurand_static.a
|
| 729 |
+
|
| 730 |
+
Component
|
| 731 |
+
|
| 732 |
+
CUDA Accelerated Graph Library
|
| 733 |
+
|
| 734 |
+
Component
|
| 735 |
+
|
| 736 |
+
NVIDIA Performance Primitives Library
|
| 737 |
+
|
| 738 |
+
Windows
|
| 739 |
+
|
| 740 |
+
nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
|
| 741 |
+
nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
|
| 742 |
+
nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
|
| 743 |
+
nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
|
| 744 |
+
nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
|
| 745 |
+
|
| 746 |
+
Mac OSX
|
| 747 |
+
|
| 748 |
+
libnppc.dylib, libnppc_static.a, libnppial.dylib,
|
| 749 |
+
libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
|
| 750 |
+
libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
|
| 751 |
+
libnppidei_static.a, libnppif.dylib, libnppif_static.a,
|
| 752 |
+
libnppig.dylib, libnppig_static.a, libnppim.dylib,
|
| 753 |
+
libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
|
| 754 |
+
libnpps.dylib, libnpps_static.a
|
| 755 |
+
|
| 756 |
+
Linux
|
| 757 |
+
|
| 758 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
| 759 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
| 760 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
| 761 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
| 762 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
| 763 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
| 764 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
| 765 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
| 766 |
+
|
| 767 |
+
Android
|
| 768 |
+
|
| 769 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
| 770 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
| 771 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
| 772 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
| 773 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
| 774 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
| 775 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
| 776 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
| 777 |
+
|
| 778 |
+
Component
|
| 779 |
+
|
| 780 |
+
NVIDIA JPEG Library
|
| 781 |
+
|
| 782 |
+
Linux
|
| 783 |
+
|
| 784 |
+
libnvjpeg.so, libnvjpeg_static.a
|
| 785 |
+
|
| 786 |
+
Component
|
| 787 |
+
|
| 788 |
+
Internal common library required for statically linking to
|
| 789 |
+
cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
|
| 790 |
+
|
| 791 |
+
Mac OSX
|
| 792 |
+
|
| 793 |
+
libculibos.a
|
| 794 |
+
|
| 795 |
+
Linux
|
| 796 |
+
|
| 797 |
+
libculibos.a
|
| 798 |
+
|
| 799 |
+
Component
|
| 800 |
+
|
| 801 |
+
NVIDIA Runtime Compilation Library and Header
|
| 802 |
+
|
| 803 |
+
All
|
| 804 |
+
|
| 805 |
+
nvrtc.h
|
| 806 |
+
|
| 807 |
+
Windows
|
| 808 |
+
|
| 809 |
+
nvrtc.dll, nvrtc-builtins.dll
|
| 810 |
+
|
| 811 |
+
Mac OSX
|
| 812 |
+
|
| 813 |
+
libnvrtc.dylib, libnvrtc-builtins.dylib
|
| 814 |
+
|
| 815 |
+
Linux
|
| 816 |
+
|
| 817 |
+
libnvrtc.so, libnvrtc-builtins.so
|
| 818 |
+
|
| 819 |
+
Component
|
| 820 |
+
|
| 821 |
+
NVIDIA Optimizing Compiler Library
|
| 822 |
+
|
| 823 |
+
Windows
|
| 824 |
+
|
| 825 |
+
nvvm.dll
|
| 826 |
+
|
| 827 |
+
Mac OSX
|
| 828 |
+
|
| 829 |
+
libnvvm.dylib
|
| 830 |
+
|
| 831 |
+
Linux
|
| 832 |
+
|
| 833 |
+
libnvvm.so
|
| 834 |
+
|
| 835 |
+
Component
|
| 836 |
+
|
| 837 |
+
NVIDIA Common Device Math Functions Library
|
| 838 |
+
|
| 839 |
+
Windows
|
| 840 |
+
|
| 841 |
+
libdevice.10.bc
|
| 842 |
+
|
| 843 |
+
Mac OSX
|
| 844 |
+
|
| 845 |
+
libdevice.10.bc
|
| 846 |
+
|
| 847 |
+
Linux
|
| 848 |
+
|
| 849 |
+
libdevice.10.bc
|
| 850 |
+
|
| 851 |
+
Component
|
| 852 |
+
|
| 853 |
+
CUDA Occupancy Calculation Header Library
|
| 854 |
+
|
| 855 |
+
All
|
| 856 |
+
|
| 857 |
+
cuda_occupancy.h
|
| 858 |
+
|
| 859 |
+
Component
|
| 860 |
+
|
| 861 |
+
CUDA Half Precision Headers
|
| 862 |
+
|
| 863 |
+
All
|
| 864 |
+
|
| 865 |
+
cuda_fp16.h, cuda_fp16.hpp
|
| 866 |
+
|
| 867 |
+
Component
|
| 868 |
+
|
| 869 |
+
CUDA Profiling Tools Interface (CUPTI) Library
|
| 870 |
+
|
| 871 |
+
Windows
|
| 872 |
+
|
| 873 |
+
cupti.dll
|
| 874 |
+
|
| 875 |
+
Mac OSX
|
| 876 |
+
|
| 877 |
+
libcupti.dylib
|
| 878 |
+
|
| 879 |
+
Linux
|
| 880 |
+
|
| 881 |
+
libcupti.so
|
| 882 |
+
|
| 883 |
+
Component
|
| 884 |
+
|
| 885 |
+
NVIDIA Tools Extension Library
|
| 886 |
+
|
| 887 |
+
Windows
|
| 888 |
+
|
| 889 |
+
nvToolsExt.dll, nvToolsExt.lib
|
| 890 |
+
|
| 891 |
+
Mac OSX
|
| 892 |
+
|
| 893 |
+
libnvToolsExt.dylib
|
| 894 |
+
|
| 895 |
+
Linux
|
| 896 |
+
|
| 897 |
+
libnvToolsExt.so
|
| 898 |
+
|
| 899 |
+
Component
|
| 900 |
+
|
| 901 |
+
NVIDIA CUDA Driver Libraries
|
| 902 |
+
|
| 903 |
+
Linux
|
| 904 |
+
|
| 905 |
+
libcuda.so, libnvidia-fatbinaryloader.so,
|
| 906 |
+
libnvidia-ptxjitcompiler.so
|
| 907 |
+
|
| 908 |
+
The NVIDIA CUDA Driver Libraries are only distributable in
|
| 909 |
+
applications that meet this criteria:
|
| 910 |
+
|
| 911 |
+
1. The application was developed starting from a NVIDIA CUDA
|
| 912 |
+
container obtained from Docker Hub or the NVIDIA GPU
|
| 913 |
+
Cloud, and
|
| 914 |
+
|
| 915 |
+
2. The resulting application is packaged as a Docker
|
| 916 |
+
container and distributed to users on Docker Hub or the
|
| 917 |
+
NVIDIA GPU Cloud only.
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
2.7. Attachment B
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
Additional Licensing Obligations
|
| 924 |
+
|
| 925 |
+
The following third party components included in the SOFTWARE
|
| 926 |
+
are licensed to Licensee pursuant to the following terms and
|
| 927 |
+
conditions:
|
| 928 |
+
|
| 929 |
+
1. Licensee's use of the GDB third party component is
|
| 930 |
+
subject to the terms and conditions of GNU GPL v3:
|
| 931 |
+
|
| 932 |
+
This product includes copyrighted third-party software licensed
|
| 933 |
+
under the terms of the GNU General Public License v3 ("GPL v3").
|
| 934 |
+
All third-party software packages are copyright by their respective
|
| 935 |
+
authors. GPL v3 terms and conditions are hereby incorporated into
|
| 936 |
+
the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
|
| 937 |
+
|
| 938 |
+
Consistent with these licensing requirements, the software
|
| 939 |
+
listed below is provided under the terms of the specified
|
| 940 |
+
open source software licenses. To obtain source code for
|
| 941 |
+
software provided under licenses that require
|
| 942 |
+
redistribution of source code, including the GNU General
|
| 943 |
+
Public License (GPL) and GNU Lesser General Public License
|
| 944 |
+
(LGPL), contact [email protected]. This offer is
|
| 945 |
+
valid for a period of three (3) years from the date of the
|
| 946 |
+
distribution of this product by NVIDIA CORPORATION.
|
| 947 |
+
|
| 948 |
+
Component License
|
| 949 |
+
CUDA-GDB GPL v3
|
| 950 |
+
|
| 951 |
+
2. Licensee represents and warrants that any and all third
|
| 952 |
+
party licensing and/or royalty payment obligations in
|
| 953 |
+
connection with Licensee's use of the H.264 video codecs
|
| 954 |
+
are solely the responsibility of Licensee.
|
| 955 |
+
|
| 956 |
+
3. Licensee's use of the Thrust library is subject to the
|
| 957 |
+
terms and conditions of the Apache License Version 2.0.
|
| 958 |
+
All third-party software packages are copyright by their
|
| 959 |
+
respective authors. Apache License Version 2.0 terms and
|
| 960 |
+
conditions are hereby incorporated into the Agreement by
|
| 961 |
+
this reference.
|
| 962 |
+
http://www.apache.org/licenses/LICENSE-2.0.html
|
| 963 |
+
|
| 964 |
+
In addition, Licensee acknowledges the following notice:
|
| 965 |
+
Thrust includes source code from the Boost Iterator,
|
| 966 |
+
Tuple, System, and Random Number libraries.
|
| 967 |
+
|
| 968 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
| 969 |
+
. . . .
|
| 970 |
+
|
| 971 |
+
Permission is hereby granted, free of charge, to any person or
|
| 972 |
+
organization obtaining a copy of the software and accompanying
|
| 973 |
+
documentation covered by this license (the "Software") to use,
|
| 974 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
| 975 |
+
and to prepare derivative works of the Software, and to permit
|
| 976 |
+
third-parties to whom the Software is furnished to do so, all
|
| 977 |
+
subject to the following:
|
| 978 |
+
|
| 979 |
+
The copyright notices in the Software and this entire statement,
|
| 980 |
+
including the above license grant, this restriction and the following
|
| 981 |
+
disclaimer, must be included in all copies of the Software, in whole
|
| 982 |
+
or in part, and all derivative works of the Software, unless such
|
| 983 |
+
copies or derivative works are solely in the form of machine-executable
|
| 984 |
+
object code generated by a source language processor.
|
| 985 |
+
|
| 986 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 987 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 988 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
| 989 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
| 990 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
| 991 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
| 992 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
| 993 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
| 994 |
+
|
| 995 |
+
4. Licensee's use of the LLVM third party component is
|
| 996 |
+
subject to the following terms and conditions:
|
| 997 |
+
|
| 998 |
+
======================================================
|
| 999 |
+
LLVM Release License
|
| 1000 |
+
======================================================
|
| 1001 |
+
University of Illinois/NCSA
|
| 1002 |
+
Open Source License
|
| 1003 |
+
|
| 1004 |
+
Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
|
| 1005 |
+
All rights reserved.
|
| 1006 |
+
|
| 1007 |
+
Developed by:
|
| 1008 |
+
|
| 1009 |
+
LLVM Team
|
| 1010 |
+
|
| 1011 |
+
University of Illinois at Urbana-Champaign
|
| 1012 |
+
|
| 1013 |
+
http://llvm.org
|
| 1014 |
+
|
| 1015 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 1016 |
+
of this software and associated documentation files (the "Software"), to
|
| 1017 |
+
deal with the Software without restriction, including without limitation the
|
| 1018 |
+
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
| 1019 |
+
sell copies of the Software, and to permit persons to whom the Software is
|
| 1020 |
+
furnished to do so, subject to the following conditions:
|
| 1021 |
+
|
| 1022 |
+
* Redistributions of source code must retain the above copyright notice,
|
| 1023 |
+
this list of conditions and the following disclaimers.
|
| 1024 |
+
|
| 1025 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1026 |
+
notice, this list of conditions and the following disclaimers in the
|
| 1027 |
+
documentation and/or other materials provided with the distribution.
|
| 1028 |
+
|
| 1029 |
+
* Neither the names of the LLVM Team, University of Illinois at Urbana-
|
| 1030 |
+
Champaign, nor the names of its contributors may be used to endorse or
|
| 1031 |
+
promote products derived from this Software without specific prior
|
| 1032 |
+
written permission.
|
| 1033 |
+
|
| 1034 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 1035 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1036 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 1037 |
+
THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
| 1038 |
+
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
| 1039 |
+
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 1040 |
+
DEALINGS WITH THE SOFTWARE.
|
| 1041 |
+
|
| 1042 |
+
5. Licensee's use (e.g. nvprof) of the PCRE third party
|
| 1043 |
+
component is subject to the following terms and
|
| 1044 |
+
conditions:
|
| 1045 |
+
|
| 1046 |
+
------------
|
| 1047 |
+
PCRE LICENCE
|
| 1048 |
+
------------
|
| 1049 |
+
PCRE is a library of functions to support regular expressions whose syntax
|
| 1050 |
+
and semantics are as close as possible to those of the Perl 5 language.
|
| 1051 |
+
Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
|
| 1052 |
+
specified below. The documentation for PCRE, supplied in the "doc"
|
| 1053 |
+
directory, is distributed under the same terms as the software itself. The
|
| 1054 |
+
basic library functions are written in C and are freestanding. Also
|
| 1055 |
+
included in the distribution is a set of C++ wrapper functions, and a just-
|
| 1056 |
+
in-time compiler that can be used to optimize pattern matching. These are
|
| 1057 |
+
both optional features that can be omitted when the library is built.
|
| 1058 |
+
|
| 1059 |
+
THE BASIC LIBRARY FUNCTIONS
|
| 1060 |
+
---------------------------
|
| 1061 |
+
Written by: Philip Hazel
|
| 1062 |
+
Email local part: ph10
|
| 1063 |
+
Email domain: cam.ac.uk
|
| 1064 |
+
University of Cambridge Computing Service,
|
| 1065 |
+
Cambridge, England.
|
| 1066 |
+
Copyright (c) 1997-2012 University of Cambridge
|
| 1067 |
+
All rights reserved.
|
| 1068 |
+
|
| 1069 |
+
PCRE JUST-IN-TIME COMPILATION SUPPORT
|
| 1070 |
+
-------------------------------------
|
| 1071 |
+
Written by: Zoltan Herczeg
|
| 1072 |
+
Email local part: hzmester
|
| 1073 |
+
Emain domain: freemail.hu
|
| 1074 |
+
Copyright(c) 2010-2012 Zoltan Herczeg
|
| 1075 |
+
All rights reserved.
|
| 1076 |
+
|
| 1077 |
+
STACK-LESS JUST-IN-TIME COMPILER
|
| 1078 |
+
--------------------------------
|
| 1079 |
+
Written by: Zoltan Herczeg
|
| 1080 |
+
Email local part: hzmester
|
| 1081 |
+
Emain domain: freemail.hu
|
| 1082 |
+
Copyright(c) 2009-2012 Zoltan Herczeg
|
| 1083 |
+
All rights reserved.
|
| 1084 |
+
|
| 1085 |
+
THE C++ WRAPPER FUNCTIONS
|
| 1086 |
+
-------------------------
|
| 1087 |
+
Contributed by: Google Inc.
|
| 1088 |
+
Copyright (c) 2007-2012, Google Inc.
|
| 1089 |
+
All rights reserved.
|
| 1090 |
+
|
| 1091 |
+
THE "BSD" LICENCE
|
| 1092 |
+
-----------------
|
| 1093 |
+
Redistribution and use in source and binary forms, with or without
|
| 1094 |
+
modification, are permitted provided that the following conditions are met:
|
| 1095 |
+
|
| 1096 |
+
* Redistributions of source code must retain the above copyright notice,
|
| 1097 |
+
this list of conditions and the following disclaimer.
|
| 1098 |
+
|
| 1099 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1100 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1101 |
+
documentation and/or other materials provided with the distribution.
|
| 1102 |
+
|
| 1103 |
+
* Neither the name of the University of Cambridge nor the name of Google
|
| 1104 |
+
Inc. nor the names of their contributors may be used to endorse or
|
| 1105 |
+
promote products derived from this software without specific prior
|
| 1106 |
+
written permission.
|
| 1107 |
+
|
| 1108 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 1109 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 1110 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 1111 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
| 1112 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 1113 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 1114 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 1115 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 1116 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 1117 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1118 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1119 |
+
|
| 1120 |
+
6. Some of the cuBLAS library routines were written by or
|
| 1121 |
+
derived from code written by Vasily Volkov and are subject
|
| 1122 |
+
to the Modified Berkeley Software Distribution License as
|
| 1123 |
+
follows:
|
| 1124 |
+
|
| 1125 |
+
Copyright (c) 2007-2009, Regents of the University of California
|
| 1126 |
+
|
| 1127 |
+
All rights reserved.
|
| 1128 |
+
|
| 1129 |
+
Redistribution and use in source and binary forms, with or without
|
| 1130 |
+
modification, are permitted provided that the following conditions are
|
| 1131 |
+
met:
|
| 1132 |
+
* Redistributions of source code must retain the above copyright
|
| 1133 |
+
notice, this list of conditions and the following disclaimer.
|
| 1134 |
+
* Redistributions in binary form must reproduce the above
|
| 1135 |
+
copyright notice, this list of conditions and the following
|
| 1136 |
+
disclaimer in the documentation and/or other materials provided
|
| 1137 |
+
with the distribution.
|
| 1138 |
+
* Neither the name of the University of California, Berkeley nor
|
| 1139 |
+
the names of its contributors may be used to endorse or promote
|
| 1140 |
+
products derived from this software without specific prior
|
| 1141 |
+
written permission.
|
| 1142 |
+
|
| 1143 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
| 1144 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 1145 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 1146 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
| 1147 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 1148 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 1149 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
| 1150 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
| 1151 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
| 1152 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1153 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1154 |
+
|
| 1155 |
+
7. Some of the cuBLAS library routines were written by or
|
| 1156 |
+
derived from code written by Davide Barbieri and are
|
| 1157 |
+
subject to the Modified Berkeley Software Distribution
|
| 1158 |
+
License as follows:
|
| 1159 |
+
|
| 1160 |
+
Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
|
| 1161 |
+
|
| 1162 |
+
All rights reserved.
|
| 1163 |
+
|
| 1164 |
+
Redistribution and use in source and binary forms, with or without
|
| 1165 |
+
modification, are permitted provided that the following conditions are
|
| 1166 |
+
met:
|
| 1167 |
+
* Redistributions of source code must retain the above copyright
|
| 1168 |
+
notice, this list of conditions and the following disclaimer.
|
| 1169 |
+
* Redistributions in binary form must reproduce the above
|
| 1170 |
+
copyright notice, this list of conditions and the following
|
| 1171 |
+
disclaimer in the documentation and/or other materials provided
|
| 1172 |
+
with the distribution.
|
| 1173 |
+
* The name of the author may not be used to endorse or promote
|
| 1174 |
+
products derived from this software without specific prior
|
| 1175 |
+
written permission.
|
| 1176 |
+
|
| 1177 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
| 1178 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 1179 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 1180 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
| 1181 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 1182 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 1183 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
| 1184 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
| 1185 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
| 1186 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 1187 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 1188 |
+
|
| 1189 |
+
8. Some of the cuBLAS library routines were derived from
|
| 1190 |
+
code developed by the University of Tennessee and are
|
| 1191 |
+
subject to the Modified Berkeley Software Distribution
|
| 1192 |
+
License as follows:
|
| 1193 |
+
|
| 1194 |
+
Copyright (c) 2010 The University of Tennessee.
|
| 1195 |
+
|
| 1196 |
+
All rights reserved.
|
| 1197 |
+
|
| 1198 |
+
Redistribution and use in source and binary forms, with or without
|
| 1199 |
+
modification, are permitted provided that the following conditions are
|
| 1200 |
+
met:
|
| 1201 |
+
* Redistributions of source code must retain the above copyright
|
| 1202 |
+
notice, this list of conditions and the following disclaimer.
|
| 1203 |
+
* Redistributions in binary form must reproduce the above
|
| 1204 |
+
copyright notice, this list of conditions and the following
|
| 1205 |
+
disclaimer listed in this license in the documentation and/or
|
| 1206 |
+
other materials provided with the distribution.
|
| 1207 |
+
* Neither the name of the copyright holders nor the names of its
|
| 1208 |
+
contributors may be used to endorse or promote products derived
|
| 1209 |
+
from this software without specific prior written permission.
|
| 1210 |
+
|
| 1211 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1212 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1213 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1214 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1215 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1216 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1217 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1218 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1219 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1220 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1221 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1222 |
+
|
| 1223 |
+
9. Some of the cuBLAS library routines were written by or
|
| 1224 |
+
derived from code written by Jonathan Hogg and are subject
|
| 1225 |
+
to the Modified Berkeley Software Distribution License as
|
| 1226 |
+
follows:
|
| 1227 |
+
|
| 1228 |
+
Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
|
| 1229 |
+
|
| 1230 |
+
All rights reserved.
|
| 1231 |
+
|
| 1232 |
+
Redistribution and use in source and binary forms, with or without
|
| 1233 |
+
modification, are permitted provided that the following conditions are
|
| 1234 |
+
met:
|
| 1235 |
+
* Redistributions of source code must retain the above copyright
|
| 1236 |
+
notice, this list of conditions and the following disclaimer.
|
| 1237 |
+
* Redistributions in binary form must reproduce the above
|
| 1238 |
+
copyright notice, this list of conditions and the following
|
| 1239 |
+
disclaimer in the documentation and/or other materials provided
|
| 1240 |
+
with the distribution.
|
| 1241 |
+
* Neither the name of the STFC nor the names of its contributors
|
| 1242 |
+
may be used to endorse or promote products derived from this
|
| 1243 |
+
software without specific prior written permission.
|
| 1244 |
+
|
| 1245 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1246 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1247 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1248 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
|
| 1249 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 1250 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 1251 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
| 1252 |
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 1253 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
| 1254 |
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
| 1255 |
+
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1256 |
+
|
| 1257 |
+
10. Some of the cuBLAS library routines were written by or
|
| 1258 |
+
derived from code written by Ahmad M. Abdelfattah, David
|
| 1259 |
+
Keyes, and Hatem Ltaief, and are subject to the Apache
|
| 1260 |
+
License, Version 2.0, as follows:
|
| 1261 |
+
|
| 1262 |
+
-- (C) Copyright 2013 King Abdullah University of Science and Technology
|
| 1263 |
+
Authors:
|
| 1264 |
+
Ahmad Abdelfattah ([email protected])
|
| 1265 |
+
David Keyes ([email protected])
|
| 1266 |
+
Hatem Ltaief ([email protected])
|
| 1267 |
+
|
| 1268 |
+
Redistribution and use in source and binary forms, with or without
|
| 1269 |
+
modification, are permitted provided that the following conditions
|
| 1270 |
+
are met:
|
| 1271 |
+
|
| 1272 |
+
* Redistributions of source code must retain the above copyright
|
| 1273 |
+
notice, this list of conditions and the following disclaimer.
|
| 1274 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 1275 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1276 |
+
documentation and/or other materials provided with the distribution.
|
| 1277 |
+
* Neither the name of the King Abdullah University of Science and
|
| 1278 |
+
Technology nor the names of its contributors may be used to endorse
|
| 1279 |
+
or promote products derived from this software without specific prior
|
| 1280 |
+
written permission.
|
| 1281 |
+
|
| 1282 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1283 |
+
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1284 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1285 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1286 |
+
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1287 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1288 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1289 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1290 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1291 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1292 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
|
| 1293 |
+
|
| 1294 |
+
11. Some of the cuSPARSE library routines were written by or
|
| 1295 |
+
derived from code written by Li-Wen Chang and are subject
|
| 1296 |
+
to the NCSA Open Source License as follows:
|
| 1297 |
+
|
| 1298 |
+
Copyright (c) 2012, University of Illinois.
|
| 1299 |
+
|
| 1300 |
+
All rights reserved.
|
| 1301 |
+
|
| 1302 |
+
Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
|
| 1303 |
+
|
| 1304 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 1305 |
+
a copy of this software and associated documentation files (the
|
| 1306 |
+
"Software"), to deal with the Software without restriction, including
|
| 1307 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 1308 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 1309 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 1310 |
+
the following conditions:
|
| 1311 |
+
* Redistributions of source code must retain the above copyright
|
| 1312 |
+
notice, this list of conditions and the following disclaimer.
|
| 1313 |
+
* Redistributions in binary form must reproduce the above
|
| 1314 |
+
copyright notice, this list of conditions and the following
|
| 1315 |
+
disclaimers in the documentation and/or other materials provided
|
| 1316 |
+
with the distribution.
|
| 1317 |
+
* Neither the names of IMPACT Group, University of Illinois, nor
|
| 1318 |
+
the names of its contributors may be used to endorse or promote
|
| 1319 |
+
products derived from this Software without specific prior
|
| 1320 |
+
written permission.
|
| 1321 |
+
|
| 1322 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 1323 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 1324 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 1325 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
|
| 1326 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 1327 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
| 1328 |
+
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
|
| 1329 |
+
SOFTWARE.
|
| 1330 |
+
|
| 1331 |
+
12. Some of the cuRAND library routines were written by or
|
| 1332 |
+
derived from code written by Mutsuo Saito and Makoto
|
| 1333 |
+
Matsumoto and are subject to the following license:
|
| 1334 |
+
|
| 1335 |
+
Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
| 1336 |
+
University. All rights reserved.
|
| 1337 |
+
|
| 1338 |
+
Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
| 1339 |
+
University and University of Tokyo. All rights reserved.
|
| 1340 |
+
|
| 1341 |
+
Redistribution and use in source and binary forms, with or without
|
| 1342 |
+
modification, are permitted provided that the following conditions are
|
| 1343 |
+
met:
|
| 1344 |
+
* Redistributions of source code must retain the above copyright
|
| 1345 |
+
notice, this list of conditions and the following disclaimer.
|
| 1346 |
+
* Redistributions in binary form must reproduce the above
|
| 1347 |
+
copyright notice, this list of conditions and the following
|
| 1348 |
+
disclaimer in the documentation and/or other materials provided
|
| 1349 |
+
with the distribution.
|
| 1350 |
+
* Neither the name of the Hiroshima University nor the names of
|
| 1351 |
+
its contributors may be used to endorse or promote products
|
| 1352 |
+
derived from this software without specific prior written
|
| 1353 |
+
permission.
|
| 1354 |
+
|
| 1355 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1356 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1357 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1358 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1359 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1360 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1361 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1362 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1363 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1364 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1365 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1366 |
+
|
| 1367 |
+
13. Some of the cuRAND library routines were derived from
|
| 1368 |
+
code developed by D. E. Shaw Research and are subject to
|
| 1369 |
+
the following license:
|
| 1370 |
+
|
| 1371 |
+
Copyright 2010-2011, D. E. Shaw Research.
|
| 1372 |
+
|
| 1373 |
+
All rights reserved.
|
| 1374 |
+
|
| 1375 |
+
Redistribution and use in source and binary forms, with or without
|
| 1376 |
+
modification, are permitted provided that the following conditions are
|
| 1377 |
+
met:
|
| 1378 |
+
* Redistributions of source code must retain the above copyright
|
| 1379 |
+
notice, this list of conditions, and the following disclaimer.
|
| 1380 |
+
* Redistributions in binary form must reproduce the above
|
| 1381 |
+
copyright notice, this list of conditions, and the following
|
| 1382 |
+
disclaimer in the documentation and/or other materials provided
|
| 1383 |
+
with the distribution.
|
| 1384 |
+
* Neither the name of D. E. Shaw Research nor the names of its
|
| 1385 |
+
contributors may be used to endorse or promote products derived
|
| 1386 |
+
from this software without specific prior written permission.
|
| 1387 |
+
|
| 1388 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1389 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1390 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1391 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1392 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1393 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1394 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1395 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1396 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1397 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1398 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1399 |
+
|
| 1400 |
+
14. Some of the Math library routines were written by or
|
| 1401 |
+
derived from code developed by Norbert Juffa and are
|
| 1402 |
+
subject to the following license:
|
| 1403 |
+
|
| 1404 |
+
Copyright (c) 2015-2017, Norbert Juffa
|
| 1405 |
+
All rights reserved.
|
| 1406 |
+
|
| 1407 |
+
Redistribution and use in source and binary forms, with or without
|
| 1408 |
+
modification, are permitted provided that the following conditions
|
| 1409 |
+
are met:
|
| 1410 |
+
|
| 1411 |
+
1. Redistributions of source code must retain the above copyright
|
| 1412 |
+
notice, this list of conditions and the following disclaimer.
|
| 1413 |
+
|
| 1414 |
+
2. Redistributions in binary form must reproduce the above copyright
|
| 1415 |
+
notice, this list of conditions and the following disclaimer in the
|
| 1416 |
+
documentation and/or other materials provided with the distribution.
|
| 1417 |
+
|
| 1418 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1419 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1420 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1421 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1422 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1423 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1424 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1425 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1426 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1427 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1428 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1429 |
+
|
| 1430 |
+
15. Licensee's use of the lz4 third party component is
|
| 1431 |
+
subject to the following terms and conditions:
|
| 1432 |
+
|
| 1433 |
+
Copyright (C) 2011-2013, Yann Collet.
|
| 1434 |
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
| 1435 |
+
|
| 1436 |
+
Redistribution and use in source and binary forms, with or without
|
| 1437 |
+
modification, are permitted provided that the following conditions are
|
| 1438 |
+
met:
|
| 1439 |
+
|
| 1440 |
+
* Redistributions of source code must retain the above copyright
|
| 1441 |
+
notice, this list of conditions and the following disclaimer.
|
| 1442 |
+
* Redistributions in binary form must reproduce the above
|
| 1443 |
+
copyright notice, this list of conditions and the following disclaimer
|
| 1444 |
+
in the documentation and/or other materials provided with the
|
| 1445 |
+
distribution.
|
| 1446 |
+
|
| 1447 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 1448 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 1449 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 1450 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 1451 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 1452 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 1453 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 1454 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 1455 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 1456 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 1457 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 1458 |
+
|
| 1459 |
+
16. The NPP library uses code from the Boost Math Toolkit,
|
| 1460 |
+
and is subject to the following license:
|
| 1461 |
+
|
| 1462 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
| 1463 |
+
. . . .
|
| 1464 |
+
|
| 1465 |
+
Permission is hereby granted, free of charge, to any person or
|
| 1466 |
+
organization obtaining a copy of the software and accompanying
|
| 1467 |
+
documentation covered by this license (the "Software") to use,
|
| 1468 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
| 1469 |
+
and to prepare derivative works of the Software, and to permit
|
| 1470 |
+
third-parties to whom the Software is furnished to do so, all
|
| 1471 |
+
subject to the following:
|
| 1472 |
+
|
| 1473 |
+
The copyright notices in the Software and this entire statement,
|
| 1474 |
+
including the above license grant, this restriction and the following
|
| 1475 |
+
disclaimer, must be included in all copies of the Software, in whole
|
| 1476 |
+
or in part, and all derivative works of the Software, unless such
|
| 1477 |
+
copies or derivative works are solely in the form of machine-executable
|
| 1478 |
+
object code generated by a source language processor.
|
| 1479 |
+
|
| 1480 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 1481 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 1482 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
| 1483 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
| 1484 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
| 1485 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
| 1486 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
| 1487 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
| 1488 |
+
|
| 1489 |
+
17. Portions of the Nsight Eclipse Edition is subject to the
|
| 1490 |
+
following license:
|
| 1491 |
+
|
| 1492 |
+
The Eclipse Foundation makes available all content in this plug-in
|
| 1493 |
+
("Content"). Unless otherwise indicated below, the Content is provided
|
| 1494 |
+
to you under the terms and conditions of the Eclipse Public License
|
| 1495 |
+
Version 1.0 ("EPL"). A copy of the EPL is available at http://
|
| 1496 |
+
www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
|
| 1497 |
+
will mean the Content.
|
| 1498 |
+
|
| 1499 |
+
If you did not receive this Content directly from the Eclipse
|
| 1500 |
+
Foundation, the Content is being redistributed by another party
|
| 1501 |
+
("Redistributor") and different terms and conditions may apply to your
|
| 1502 |
+
use of any object code in the Content. Check the Redistributor's
|
| 1503 |
+
license that was provided with the Content. If no such license exists,
|
| 1504 |
+
contact the Redistributor. Unless otherwise indicated below, the terms
|
| 1505 |
+
and conditions of the EPL still apply to any source code in the
|
| 1506 |
+
Content and such source code may be obtained at http://www.eclipse.org.
|
| 1507 |
+
|
| 1508 |
+
18. Some of the cuBLAS library routines uses code from
|
| 1509 |
+
OpenAI, which is subject to the following license:
|
| 1510 |
+
|
| 1511 |
+
License URL
|
| 1512 |
+
https://github.com/openai/openai-gemm/blob/master/LICENSE
|
| 1513 |
+
|
| 1514 |
+
License Text
|
| 1515 |
+
The MIT License
|
| 1516 |
+
|
| 1517 |
+
Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
|
| 1518 |
+
|
| 1519 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 1520 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 1521 |
+
in the Software without restriction, including without limitation the rights
|
| 1522 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 1523 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 1524 |
+
furnished to do so, subject to the following conditions:
|
| 1525 |
+
|
| 1526 |
+
The above copyright notice and this permission notice shall be included in
|
| 1527 |
+
all copies or substantial portions of the Software.
|
| 1528 |
+
|
| 1529 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 1530 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1531 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 1532 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 1533 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 1534 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
| 1535 |
+
THE SOFTWARE.
|
| 1536 |
+
|
| 1537 |
+
19. Licensee's use of the Visual Studio Setup Configuration
|
| 1538 |
+
Samples is subject to the following license:
|
| 1539 |
+
|
| 1540 |
+
The MIT License (MIT)
|
| 1541 |
+
Copyright (C) Microsoft Corporation. All rights reserved.
|
| 1542 |
+
|
| 1543 |
+
Permission is hereby granted, free of charge, to any person
|
| 1544 |
+
obtaining a copy of this software and associated documentation
|
| 1545 |
+
files (the "Software"), to deal in the Software without restriction,
|
| 1546 |
+
including without limitation the rights to use, copy, modify, merge,
|
| 1547 |
+
publish, distribute, sublicense, and/or sell copies of the Software,
|
| 1548 |
+
and to permit persons to whom the Software is furnished to do so,
|
| 1549 |
+
subject to the following conditions:
|
| 1550 |
+
|
| 1551 |
+
The above copyright notice and this permission notice shall be included
|
| 1552 |
+
in all copies or substantial portions of the Software.
|
| 1553 |
+
|
| 1554 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 1555 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1556 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 1557 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 1558 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 1559 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 1560 |
+
|
| 1561 |
+
20. Licensee's use of linmath.h header for CPU functions for
|
| 1562 |
+
GL vector/matrix operations from lunarG is subject to the
|
| 1563 |
+
Apache License Version 2.0.
|
| 1564 |
+
|
| 1565 |
+
21. The DX12-CUDA sample uses the d3dx12.h header, which is
|
| 1566 |
+
subject to the MIT license .
|
| 1567 |
+
|
| 1568 |
+
-----------------
|
infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/METADATA
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: nvidia-nvtx-cu11
|
| 3 |
+
Version: 11.7.91
|
| 4 |
+
Summary: NVIDIA Tools Extension
|
| 5 |
+
Home-page: https://developer.nvidia.com/cuda-zone
|
| 6 |
+
Author: Nvidia CUDA Installer Team
|
| 7 |
+
Author-email: [email protected]
|
| 8 |
+
License: NVIDIA Proprietary Software
|
| 9 |
+
Keywords: cuda,nvidia,runtime,machine learning,deep learning
|
| 10 |
+
Classifier: Development Status :: 4 - Beta
|
| 11 |
+
Classifier: Intended Audience :: Developers
|
| 12 |
+
Classifier: Intended Audience :: Education
|
| 13 |
+
Classifier: Intended Audience :: Science/Research
|
| 14 |
+
Classifier: License :: Other/Proprietary License
|
| 15 |
+
Classifier: Natural Language :: English
|
| 16 |
+
Classifier: Programming Language :: Python :: 3
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.5
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 24 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 25 |
+
Classifier: Topic :: Scientific/Engineering
|
| 26 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
| 27 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
| 28 |
+
Classifier: Topic :: Software Development
|
| 29 |
+
Classifier: Topic :: Software Development :: Libraries
|
| 30 |
+
Classifier: Operating System :: POSIX :: Linux
|
| 31 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 32 |
+
Requires-Python: >=3
|
| 33 |
+
License-File: License.txt
|
| 34 |
+
Requires-Dist: setuptools
|
| 35 |
+
Requires-Dist: wheel
|
| 36 |
+
|
| 37 |
+
A C-based API for annotating events, code ranges, and resources in your applications. Applications which integrate NVTX can use the Visual Profiler to capture and visualize these events and ranges.
|
infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/RECORD
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nvidia/nvtx/include/nvToolsExt.h,sha256=OiT6v1G2-vlkYnpDQZjiGT1O-THDyk1gw2021qMRvQM,53680
|
| 2 |
+
nvidia/nvtx/include/nvToolsExtCuda.h,sha256=UDA1pbmvoRFmlJ11Et9tIMEztOtOVw-10mO27Q6K8jg,6009
|
| 3 |
+
nvidia/nvtx/include/nvToolsExtCudaRt.h,sha256=6IbgdRGObly53jzRqvsZ4FQoTrXJOJwSyCOLuXr9ncA,5192
|
| 4 |
+
nvidia/nvtx/include/nvToolsExtOpenCL.h,sha256=gETZH9ch_o6MYE_BYQ2pj9SSuxyAo1H4ptmRK-DMWSo,8360
|
| 5 |
+
nvidia/nvtx/include/nvToolsExtSync.h,sha256=wqONIiycUPaUUCzQBmCippilgKt8sOL9tpzG773u0nY,14562
|
| 6 |
+
nvidia/nvtx/include/nvtx3/nvToolsExt.h,sha256=TFEF3fx1043EwMdbS7FqvvavwK0koZeGrIOAsCrB12s,52247
|
| 7 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtCuda.h,sha256=4ZbZHUMcmHRf4SdKB7nH0E3uHd_9ZhZBuwuWPItK-Vs,6204
|
| 8 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtCudaRt.h,sha256=boW0zdYobNFFE9wwxCyzBGBLcSGtdbQ5osKjQGNC2E8,5393
|
| 9 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtOpenCL.h,sha256=RPfsZl3lHAPIOCzTipmz07-vaiIO4cxelcx12EjB2L0,8563
|
| 10 |
+
nvidia/nvtx/include/nvtx3/nvToolsExtSync.h,sha256=C-HIVBaupxYom3BqMggQ_ePq1bxFhw8kXsOfYJKBWrI,14756
|
| 11 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImpl.h,sha256=jEnYF3MyLsD72euw2It3Bz0X0GK4Xv_htEd8BeIrPjY,23333
|
| 12 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCore.h,sha256=sYpWqZfYrjsMddxtezPX3qSTIbAOn4dlEoLiYQ9M2nM,9756
|
| 13 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h,sha256=SoaiprvsI80yLmEAnlFX0iFufv6RtKjjMMrVwQZjjQI,4775
|
| 14 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h,sha256=IEor-ISqComCRGVDdIzKBLU3eWCuDI0Igqz-eRKKcvg,5550
|
| 15 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h,sha256=iPR2x74bJE3plFQBT9FWGBaTm4sC-Pll6WAjpKRnz7g,8275
|
| 16 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h,sha256=TqwQfEUVbwc58bpHioE13NMweFhOuHXNql65BnLzhvc,5022
|
| 17 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInit.h,sha256=foajOFacvLGx3BN5ntw5v8o4J3OY4hqkVZE5ZC0x3e4,14716
|
| 18 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDecls.h,sha256=-Qyxcy9CDXOBhEtYZ8L7iYd6daJ9aCeyQM48X0BafMM,9361
|
| 19 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDefs.h,sha256=dLhOV4knhNrmT2DnUNzXreOt_Qc6GAa3yIlmqJFCeVI,35432
|
| 20 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxLinkOnce.h,sha256=Jp-z6LTz_p8fKRulcFfdcskIxzcZ6ybbHkGB9mpJa2M,3863
|
| 21 |
+
nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxTypes.h,sha256=jkbCwyvIP1G-Ef8SwYp4kDi69hjZbzaxKSk7ScgrNI8,17352
|
| 22 |
+
nvidia/nvtx/lib/libnvToolsExt.so.1,sha256=hH148nXIzJdEKieAcyBL3BoACf_CVZv3JIxw2SEF39w,40136
|
| 23 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 24 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
|
| 25 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/METADATA,sha256=URcFR24LaHjMEr5oHAmIM5SKaWQZ1rZoxfT9dL_PoT4,1706
|
| 26 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/RECORD,,
|
| 27 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 28 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
|
| 29 |
+
nvidia_nvtx_cu11-11.7.91.dist-info/top_level.txt,sha256=Og84g1rQEkxMA2-QhXVH9uFoXABqy0ZEDYG38XjOTCk,12
|
infer_4_30_0/lib/python3.10/site-packages/nvidia_nvtx_cu11-11.7.91.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-manylinux1_x86_64
|
| 5 |
+
|