kernel
flash-attn3 / torch-ext /flash_attn3 /flash_attn_interface.py
danieldk's picture
danieldk HF Staff
Various small fixes
2e75662
# Copyright (c) 2023, Tri Dao.
from typing import Optional, Union
import torch
import torch.nn as nn
from ._ops import ops as flash_attn_3_cuda
def maybe_contiguous(x):
return x.contiguous() if x is not None and x.stride(-1) != 1 else x
def _flash_attn_forward(
q,
k,
v,
k_new,
v_new,
qv,
out,
cu_seqlens_q,
cu_seqlens_k,
cu_seqlens_k_new,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
page_table,
kv_batch_idx,
leftpad_k,
rotary_cos,
rotary_sin,
seqlens_rotary,
q_descale,
k_descale,
v_descale,
softmax_scale,
causal,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
rotary_interleaved=True,
scheduler_metadata=None,
num_splits=1,
pack_gqa=None,
sm_margin=0):
q, k, k_new, v_new = [maybe_contiguous(x) for x in (q, k, k_new, v_new)]
v = v.contiguous() if v.stride(-1) != 1 and v.stride(-3) != 1 else v
cu_seqlens_q, cu_seqlens_k, cu_seqlens_k_new = [
maybe_contiguous(x) for x in (cu_seqlens_q, cu_seqlens_k, cu_seqlens_k_new)
]
seqused_q, seqused_k = [maybe_contiguous(x) for x in (seqused_q, seqused_k)]
page_table, kv_batch_idx, leftpad_k = [
maybe_contiguous(x) for x in (page_table, kv_batch_idx, leftpad_k)
]
rotary_cos, rotary_sin = [maybe_contiguous(x) for x in (rotary_cos, rotary_sin)]
seqlens_rotary = maybe_contiguous(seqlens_rotary)
out, softmax_lse, *rest = flash_attn_3_cuda.fwd(
q,
k,
v,
k_new,
v_new,
qv,
out,
cu_seqlens_q,
cu_seqlens_k,
cu_seqlens_k_new,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
page_table,
kv_batch_idx,
leftpad_k,
rotary_cos,
rotary_sin,
seqlens_rotary,
q_descale,
k_descale,
v_descale,
softmax_scale,
causal,
window_size[0],
window_size[1],
attention_chunk,
softcap,
rotary_interleaved,
scheduler_metadata,
num_splits,
pack_gqa,
sm_margin,
)
return out, softmax_lse, *rest
def _flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
cu_seqlens_q,
cu_seqlens_k,
sequed_q,
sequed_k,
max_seqlen_q,
max_seqlen_k,
dq,
dk,
dv,
softmax_scale,
causal,
window_size=(-1, -1),
softcap=0.0,
deterministic=False,
sm_margin=0,
):
# dq, dk, dv are allocated by us so they should already be contiguous
dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
dq, dk, dv, softmax_d, *rest = flash_attn_3_cuda.bwd(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
sequed_q,
sequed_k,
max_seqlen_q,
max_seqlen_k,
softmax_scale,
causal,
window_size[0],
window_size[1],
softcap,
deterministic,
sm_margin,
)
return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
softmax_scale,
causal,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
deterministic=False,
num_heads_q=None,
sm_margin=0,
):
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
if qkv.dim() == 5:
assert qkv.shape[-3] == 3
q, k, v = qkv.unbind(dim=-3)
else:
assert qkv.dim() == 4
assert num_heads_q is not None
num_heads_k = (qkv.shape[2] - num_heads_q) // 2
assert num_heads_k * 2 + num_heads_q == qkv.shape[2]
q, k, v = qkv.split([num_heads_q, num_heads_k, num_heads_k], dim=-2)
out, softmax_lse, *rest = _flash_attn_forward(
q,
k,
v,
None, None, # k_new, v_new
None, # qv
None, # out
None, None, None, # cu_seqlens_q/k/k_new
None, None, # seqused_q/k
None, None, # max_seqlen_q/k
None, None, None, # page_table, kv_batch_idx, leftpad_k,
None, None, None, # rotary_cos/sin, seqlens_rotary
q_descale, k_descale, v_descale,
softmax_scale,
causal=causal,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
sm_margin=sm_margin,
)
# ctx.save_for_backward(q, k, v, out_padded, softmax_lse)
ctx.save_for_backward(q, k, v, out, softmax_lse)
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.window_size = window_size
ctx.attention_chunk = attention_chunk
ctx.softcap = softcap
ctx.deterministic = deterministic
ctx.ndim = qkv.dim()
ctx.sm_margin = sm_margin
# return out, softmax_lse
return out
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse = ctx.saved_tensors
assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
if ctx.ndim == 5:
qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
dq, dk, dv = dqkv.unbind(dim=-3)
else:
num_heads_q = q.shape[2]
num_heads_k = k.shape[2]
qkv_shape = q.shape[:-2] + (num_heads_q + num_heads_k * 2, *q.shape[-1:])
dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
dq, dk, dv = dqkv.split([num_heads_q, num_heads_k, num_heads_k], dim=-2)
_flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
None, None, # cu_seqlens_q, cu_seqlens_k,
None, None, # sequed_q, sequed_k,
None, None, # max_seqlen_q, max_seqlen_k,
dq,
dk,
dv,
ctx.softmax_scale,
ctx.causal,
ctx.window_size,
ctx.softcap,
ctx.deterministic,
ctx.sm_margin,
)
dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
return dqkv, None, None, None, None, None, None, None, None, None, None, None
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
softmax_scale,
causal,
qv=None,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
num_splits=1,
pack_gqa=None,
deterministic=False,
sm_margin=0,
):
if softmax_scale is None:
softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
# out, q, k, v, out_padded, softmax_lse = _flash_attn_forward(
out, softmax_lse, *rest = _flash_attn_forward(
q,
k,
v,
None, None, # k_new, v_new
qv, # qv
None, # out
None, None, None, # cu_seqlens_q/k/k_new
None, None, # seqused_q/k
None, None, # max_seqlen_q/k
None, None, None, # page_table, kv_batch_idx, leftpad_k,
None, None, None, # rotary_cos/sin, seqlens_rotary
q_descale, k_descale, v_descale,
softmax_scale,
causal=causal,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
num_splits=num_splits,
pack_gqa=pack_gqa,
sm_margin=sm_margin,
)
# ctx.save_for_backward(q, k, v, out_padded, softmax_lse)
ctx.save_for_backward(q, k, v, out, softmax_lse)
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.window_size = window_size
ctx.attention_chunk = attention_chunk
ctx.softcap = softcap
ctx.deterministic = deterministic
ctx.sm_margin = sm_margin
return out, softmax_lse
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse = ctx.saved_tensors
assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
_flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
None, None, # cu_seqlens_q, cu_seqlens_k,
None, None, # sequed_q, sequed_k,
None, None, # max_seqlen_q, max_seqlen_k,
dq,
dk,
dv,
ctx.softmax_scale,
ctx.causal,
ctx.window_size,
ctx.softcap,
ctx.deterministic,
ctx.sm_margin,
)
dq = dq[..., : q.shape[-1]] # We could have padded the head dimension
dk = dk[..., : k.shape[-1]]
dv = dv[..., : v.shape[-1]]
return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class FlashAttnVarlenFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
softmax_scale,
causal,
qv=None,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
num_splits=1,
pack_gqa=None,
deterministic=False,
sm_margin=0,
):
if softmax_scale is None:
softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
# out, q, k, v, out_padded, softmax_lse = _flash_attn_varlen_forward(
out, softmax_lse, *rest = _flash_attn_forward(
q,
k,
v,
None, None, # k_new, v_new
qv, # qv
None, # out
cu_seqlens_q,
cu_seqlens_k,
None, # cu_seqlens_k_new
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
None, None, None, # page_table, kv_batch_idx, leftpad_k,
None, None, None, # rotary_cos/sin, seqlens_rotary
q_descale, k_descale, v_descale,
softmax_scale,
causal=causal,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
num_splits=num_splits,
pack_gqa=pack_gqa,
sm_margin=sm_margin,
)
# ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.window_size = window_size
ctx.attention_chunk = attention_chunk
ctx.softcap = softcap
ctx.deterministic = deterministic
ctx.sm_margin = sm_margin
return out, softmax_lse
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k = ctx.saved_tensors
assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
_flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
ctx.max_seqlen_q,
ctx.max_seqlen_k,
dq,
dk,
dv,
ctx.softmax_scale,
ctx.causal,
ctx.window_size,
ctx.softcap,
ctx.deterministic,
ctx.sm_margin,
)
dq = dq[..., : q.shape[-1]] # We could have padded the head dimension
dk = dk[..., : k.shape[-1]]
dv = dv[..., : v.shape[-1]]
return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
def flash_attn_qkvpacked_func(
qkv,
softmax_scale=None,
causal=False,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
deterministic=False,
num_heads_q=None,
sm_margin=0,
):
"""dropout_p should be set to 0.0 during evaluation
If Q, K, V are already stacked into 1 tensor, this function will be faster than
calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
of the gradients of Q, K, V.
For multi-query and grouped-query attention (MQA/GQA), please see
flash_attn_kvpacked_func and flash_attn_func.
If window_size != (-1, -1), implements sliding window local attention. Query at position i
will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
Arguments:
qkv: (batch_size, seqlen, 3, nheads, headdim)
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
softcap: float. Anything > 0 activates softcapping attention.
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
the attention score of query i and key j.
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
which is slightly slower and uses more memory. The forward pass is always deterministic.
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (batch_size, seqlen, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
The output of softmax (possibly with different scaling). It also encodes the dropout
pattern (negative means that location was dropped, nonnegative means it was kept).
"""
return FlashAttnQKVPackedFunc.apply(
qkv,
softmax_scale,
causal,
q_descale, k_descale, v_descale,
window_size,
attention_chunk,
softcap,
deterministic,
num_heads_q,
sm_margin,
)
def flash_attn_func(
q,
k,
v,
softmax_scale=None,
causal=False,
qv=None,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
num_splits=1,
pack_gqa=None,
deterministic=False,
sm_margin=0,
):
"""dropout_p should be set to 0.0 during evaluation
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1 1 1 1 0
1 1 1 1 1
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
0 0
0 0
0 0
1 0
1 1
If the row of the mask is all zero, the output will be zero.
If window_size != (-1, -1), implements sliding window local attention. Query at position i
will only attend to keys between
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
Arguments:
q: (batch_size, seqlen, nheads, headdim)
k: (batch_size, seqlen, nheads_k, headdim)
v: (batch_size, seqlen, nheads_k, headdim)
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
is added to the attention score of query i and key j.
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
which is slightly slower and uses more memory. The forward pass is always deterministic.
return_attn_probs: bool. Whether to return the attention probabilities. This option is for
testing only. The returned probabilities are not guaranteed to be correct
(they might not have the right scaling).
Return:
out: (batch_size, seqlen, nheads, headdim).
softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
"""
return FlashAttnFunc.apply(
q,
k,
v,
softmax_scale,
causal,
qv,
q_descale, k_descale, v_descale,
window_size,
attention_chunk,
softcap,
num_splits,
pack_gqa,
deterministic,
sm_margin,
)
def flash_attn_varlen_func(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
seqused_q=None,
seqused_k=None,
softmax_scale=None,
causal=False,
qv=None,
q_descale=None, k_descale=None, v_descale=None,
window_size=(-1, -1),
attention_chunk=0,
softcap=0.0,
num_splits=1,
pack_gqa=None,
deterministic=False,
sm_margin=0,
):
return FlashAttnVarlenFunc.apply(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
softmax_scale,
causal,
qv,
q_descale, k_descale, v_descale,
window_size,
attention_chunk,
softcap,
num_splits,
pack_gqa,
deterministic,
sm_margin,
)
def flash_attn_combine(out_partial, lse_partial, out=None, out_dtype=None):
return flash_attn_3_cuda.fwd_combine(out_partial, lse_partial, out, out_dtype)
def flash_attn_with_kvcache(
q,
k_cache,
v_cache,
k=None,
v=None,
qv=None,
rotary_cos=None,
rotary_sin=None,
cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
cache_batch_idx: Optional[torch.Tensor] = None,
cache_leftpad: Optional[torch.Tensor] = None,
page_table: Optional[torch.Tensor] = None,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k_new: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
rotary_seqlens: Optional[torch.Tensor] = None,
q_descale: Optional[torch.Tensor] = None,
k_descale: Optional[torch.Tensor] = None,
v_descale: Optional[torch.Tensor] = None,
softmax_scale=None,
causal=False,
window_size=(-1, -1), # -1 means infinite context window
attention_chunk=0,
softcap=0.0, # 0.0 means deactivated
rotary_interleaved=True,
scheduler_metadata=None,
num_splits=0, # Can be tuned for speed
pack_gqa=None, # Can be tuned for speed
sm_margin=0, # Can be tuned if some SMs are used for communication
return_softmax_lse=False,
):
"""
If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
the previous step, and update them with the new keys/values from the current step, and do
attention with the updated cache, all in 1 kernel.
If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
For example, the KV cache could be pre-allocated with the max sequence length, and you can use
cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1 1 1 1 0
1 1 1 1 1
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
0 0
0 0
0 0
1 0
1 1
If the row of the mask is all zero, the output will be zero.
If window_size != (-1, -1), implements sliding window local attention. Query at position i
will only attend to keys between
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
Note: Does not support backward pass.
Arguments:
q: (batch_size, seqlen, nheads, headdim)
k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no page_table,
or (num_blocks, page_block_size, nheads_k, headdim) if there's a page_table (i.e. paged KV cache)
page_block_size must be a multiple of 256.
v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim_v) if there's no page_table,
or (num_blocks, page_block_size, nheads_k, headdim_v) if there's a page_table (i.e. paged KV cache)
k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
k with k_cache, starting at the indices specified by cache_seqlens.
v [optional]: (batch_size, seqlen_new, nheads_k, headdim_v). Similar to k.
qv [optional]: (batch_size, seqlen, nheads, headdim_v)
rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
KV cache.
cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
If the indices are not distinct, and k and v are provided, the values updated in the cache
might come from any of the duplicate indices.
cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
page_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
softcap: float. Anything > 0 activates softcapping attention.
rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
(i.e. GPT-NeoX style).
num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
to automatically determine the number of splits.
Don't change this unless you know what you are doing.
return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
Return:
out: (batch_size, seqlen, nheads, headdim).
softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
normalization factor).
"""
assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
if softmax_scale is None:
softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
if cache_seqlens is not None and isinstance(cache_seqlens, int):
cache_seqlens = torch.full(
(k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
)
cache_seqlens = maybe_contiguous(cache_seqlens)
out, softmax_lse, *rest = _flash_attn_forward(
q,
k_cache,
v_cache,
k,
v,
qv,
None, # out
cu_seqlens_q,
None, # cu_seqlens_k
cu_seqlens_k_new,
None, # seqused_q
cache_seqlens,
max_seqlen_q,
None, # max_seqlen_k
page_table,
cache_batch_idx,
cache_leftpad,
rotary_cos,
rotary_sin,
rotary_seqlens,
q_descale, k_descale, v_descale,
softmax_scale,
causal=causal,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
rotary_interleaved=rotary_interleaved,
scheduler_metadata=scheduler_metadata,
num_splits=num_splits,
pack_gqa=pack_gqa,
sm_margin=sm_margin,
)
# return (out, softmax_lse) if return_softmax_lse else out
return (out, softmax_lse, *rest) if return_softmax_lse else out
def get_scheduler_metadata(
batch_size, max_seqlen_q, max_seqlen_k, num_heads_q, num_heads_kv, headdim,
cache_seqlens: torch.Tensor,
qkv_dtype=torch.bfloat16,
headdim_v=None,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k_new: Optional[torch.Tensor] = None,
cache_leftpad: Optional[torch.Tensor] = None,
page_size: Optional[int] = None,
max_seqlen_k_new=0,
causal=False,
window_size=(-1, -1), # -1 means infinite context window
attention_chunk=0,
has_softcap=False,
num_splits=0, # Can be tuned for speed
pack_gqa=None, # Can be tuned for speed
sm_margin=0, # Can be tuned if some SMs are used for communication
):
cache_seqlens = maybe_contiguous(cache_seqlens)
if headdim_v is None:
headdim_v = headdim
scheduler_metadata = flash_attn_3_cuda.get_scheduler_metadata(
batch_size, max_seqlen_q, max_seqlen_k, num_heads_q, num_heads_kv, headdim, headdim_v,
qkv_dtype,
cache_seqlens,
cu_seqlens_q,
None, # cu_seqlens_k
cu_seqlens_k_new,
None, # seqused_q
cache_leftpad,
page_size,
max_seqlen_k_new,
causal,
window_size[0], window_size[1],
attention_chunk,
has_softcap,
num_splits,
pack_gqa,
sm_margin,
)
return scheduler_metadata