uuid
stringlengths
36
36
file_name
stringlengths
5
50
repo_name
stringclasses
110 values
file_path
stringlengths
7
112
commit_hash
stringclasses
110 values
starcount
int64
0
0
input
stringlengths
39
33.8k
category
dict
licenses
sequencelengths
1
2
github_url
stringlengths
94
193
3fed2f1b-c00b-47df-86e8-85fac9efc3b4
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_flash_attention_basic_kernel(q_ptr, k_ptr, v_ptr, offset_ptr, o_ptr, lse_i_ptr, stride_qm, stride_qd, stride_kd, stride_kn, stride_vn, stride_vd, stride_om, stride_od, max_seq_len, D: tl.constexpr, NEXT_D: tl.constexpr, use_mask: tl.constexpr, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_D: tl.constexpr): pid_m = tl.program_id(axis=0) pid_batch = tl.program_id(axis=1) begin = tl.load(offset_ptr + pid_batch) end = tl.load(offset_ptr + pid_batch + 1) seqlen = end - begin seqlen = tl.minimum(seqlen, max_seq_len) if pid_m * BLOCK_SIZE_M >= seqlen: return offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_d = tl.arange(0, BLOCK_SIZE_D) offs_nextd = tl.arange(0, NEXT_D) acc = tl.zeros([BLOCK_SIZE_M, NEXT_D], dtype=tl.float32) m_i = tl.zeros([BLOCK_SIZE_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_SIZE_M], dtype=tl.float32) for j in range(0, seqlen, BLOCK_SIZE_N): offs_n = tl.arange(0, BLOCK_SIZE_N) + j q_ptrs = q_ptr + (offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qd) + begin * stride_qm k_ptrs = k_ptr + (offs_d[:, None] * stride_kd + offs_n[None, :] * stride_kn) + begin * stride_kn qk = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for d in range(0, D, BLOCK_SIZE_D): updated_offset = d + offs_d q = tl.load(q_ptrs, mask=(updated_offset[None, :] < D) & ( offs_m[:, None] < seqlen), other=0.0) k = tl.load(k_ptrs, mask=(updated_offset[:, None] < D) & ( offs_n[None, :] < seqlen), other=0.0) qk += tl.dot(q, k, allow_tf32=allow_tf32) q_ptrs += BLOCK_SIZE_D * stride_qd k_ptrs += BLOCK_SIZE_D * stride_kd m_ij = tl.maximum(tl.max(qk, axis=1), m_i) mn_mask = (offs_m[:, None] < seqlen) & (offs_n[None, :] < seqlen) p = tl.exp(qk - m_ij[:, None]) p = tl.where(mn_mask, p, 0.0) l_ij = tl.sum(p, axis=1) alpha = tl.exp(m_i - m_ij) l_i = l_i * alpha + l_ij acc = acc * alpha[:, None] v_ptrs = v_ptr + (offs_nextd[None, :] * stride_vd + offs_n[:, None] * stride_vn) + begin * stride_vn v = tl.load(v_ptrs, mask=(offs_nextd[None, :] < D) & (offs_n[:, None] < seqlen), other=0.0) p /= max_seq_len if use_mask: attn_mask = offs_m[:, None] - offs_n[None, :] attn_mask = tl.where(mn_mask, attn_mask, 0.0) attn_mask = tl.where(attn_mask > 0, 0.0, 1.0) p = tl.where(attn_mask > 0, p, 0.0) p = p.to(v_ptr.dtype.element_ty) acc_j = tl.dot(p, v, allow_tf32=allow_tf32) acc += acc_j m_i = m_ij lse_i = m_i + tl.math.log(l_i) lse_i_offsets = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) lse_i_ptrs = lse_i_ptr + lse_i_offsets + begin tl.store(lse_i_ptrs, lse_i, mask=lse_i_offsets < seqlen) acc = acc / l_i[:, None] o_ptrs = o_ptr + (offs_m[:, None] * stride_om + offs_nextd[None, :] * stride_od + begin * stride_om) o_mask = (offs_m[:, None] < seqlen) & (offs_nextd[None, :] < D) tl.store(o_ptrs, acc, mask=o_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
b69d2cf0-95a2-423a-a626-19d6cb20f407
bnrelu.py
neuro-ml/kerops
kerops/kernels/bnrelu.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _ApplyBNReLU_cl3d_backward_impl(Input_ptr, Weight_ptr, Bias_ptr, Grad_ptr, Outgrad_ptr, Weight_outgrad_ptr, Bias_outgrad_ptr, numel_no_channels, BLOCK_SIZE: tl.constexpr, num_channels: tl.constexpr, block_other: tl.constexpr): pid = tl.program_id(0) Input_ptr += pid * BLOCK_SIZE Grad_ptr += pid * BLOCK_SIZE Outgrad_ptr += pid * BLOCK_SIZE channels_offset = tl.arange(0, num_channels) other_offset = tl.arange(0, block_other) offset = channels_offset[None, :] + other_offset[:, None] * num_channels mask = (other_offset < numel_no_channels - pid * block_other)[:, None] weight = tl.load(Weight_ptr + channels_offset[None, :]) bias = tl.load(Bias_ptr + channels_offset[None, :]) input = tl.load(Input_ptr + offset, mask=mask, other=0).to(tl.float32) grad = tl.load(Grad_ptr + offset, mask=mask, other=0).to(tl.float32) grad = grad * (input * weight > -bias) b_grad = tl.sum(grad, axis=0) w_grad = tl.sum(input * grad, axis=0) x_grad = weight * grad tl.store(Outgrad_ptr + offset, x_grad, mask=mask) tl.atomic_add(Bias_outgrad_ptr + channels_offset, b_grad) tl.atomic_add(Weight_outgrad_ptr + channels_offset, w_grad)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/bnrelu.py
5322126e-9117-4fd6-bd2b-3de14befa10d
argmax.py
daemyung/practice-triton
argmax.py
27f727726f1507c8380a1c11751d851c7c4a07ce
0
@triton.jit def argmax_kernel(output_ptr, input_ptr, num_batches, size, block_size: tl. constexpr): batch = tl.program_id(0) output_block_ptr = tl.make_block_ptr(output_ptr, shape=(num_batches,), strides=(1,), offsets=(batch,), block_shape=(1,), order=(0,)) input_block_ptr = tl.make_block_ptr(input_ptr, shape=(num_batches, size ), strides=(size, 1), offsets=(batch, 0), block_shape=(1, block_size), order=(1, 0)) input = tl.load(input_block_ptr, boundary_check=(1,)) condition = tl.arange(0, block_size) < size input = tl.where(condition, input, float('-inf')) output = tl.argmax(input, 1) tl.store(output_block_ptr, output.to(tl.int64))
{ "Data Type": [ "fp32" ], "Functionality": [ "Top-K Selection" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/argmax.py
fd103ed2-89cc-476f-a89e-f223e86b5d3b
GELUglu.py
huyz2023/2by4-pretrain
sparse/GELUglu.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _gelu_glu_bwd_kernel(grad_output_ptr, grad_input_ptr, input_ptr, grad_output_row_stride, grad_input_row_stride, input_row_stride, grad_output_col_stride, grad_input_col_stride, input_col_stride, grad_output_page_stride, grad_input_page_stride, input_page_stride, n_pages, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) col_idx = tl.program_id(1) grad_output = tl.load(grad_output_ptr + row_idx * grad_output_row_stride + col_idx * grad_output_col_stride + tl. arange(0, BLOCK_SIZE // 2) * grad_output_page_stride, mask=tl. arange(0, BLOCK_SIZE // 2) < n_pages // 2, other=-float('inf')) x = tl.load(input_ptr + row_idx * input_row_stride + col_idx * input_col_stride + tl.arange(0, BLOCK_SIZE // 2) * input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages // 2, other=-float('inf')) gate = tl.load(input_ptr + row_idx * input_row_stride + col_idx * input_col_stride + (tl.arange(0, BLOCK_SIZE // 2) + n_pages // 2) * input_page_stride, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages // 2, other=-float('inf')) gate_cube = gate * gate * gate beta = 0.7978845608028654 kappa = 0.044715 inner = beta * (gate + kappa * gate_cube) inner_tanh = tanh(inner) gate_gelu = 0.5 * gate * (inner_tanh + 1) grad_x = grad_output * gate_gelu grad_gelu = grad_output * x grad_gate = grad_gelu * (0.5 * (1 + inner_tanh) + 0.5 * gate * (1 - inner_tanh * inner_tanh) * beta * (1 + kappa * 3 * gate * gate)) tl.store(grad_input_ptr + row_idx * grad_input_row_stride + col_idx * grad_input_col_stride + tl.arange(0, BLOCK_SIZE // 2) * grad_input_page_stride, grad_x, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages // 2) tl.store(grad_input_ptr + row_idx * grad_input_row_stride + col_idx * grad_input_col_stride + (tl.arange(0, BLOCK_SIZE // 2) + n_pages // 2) * grad_input_page_stride, grad_gate, mask=tl.arange(0, BLOCK_SIZE // 2) < n_pages // 2)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py
b78a037f-d294-4568-94ea-bab6c450fa01
associative_rnn_scan.py
TushaarGVS/linear-rnn
linear_rnn/triton/associative_rnn_scan.py
48320589b73154484be7d09a144923a2b9e56b85
0
@triton.jit def _associative_rnn_scan_bwd_kernel(): pass
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/associative_rnn_scan.py
fd49ac89-d287-4343-b1e7-e546da6abbf4
triton_jagged_tensor_ops.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def tensor_elementwise_mul(x, y): return x * y
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
b2943844-2b11-4fe4-b90a-1c6e1ebcd741
triton_kernels.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/triton_kernels.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def _triton_first_order_fwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl. tensor, sph_1_0_ptr: tl.tensor, sph_1_1_ptr: tl.tensor, sph_1_2_ptr: tl .tensor, BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr): """ First order spherical harmonics in Triton. Computationally not that intensive, as we're just applying a sqrt 3 to the coordinates, but also good for validating the kernel performs as intended. Parameters ---------- x_ptr, y_ptr, z_ptr : tl.tensor Pointers to the coordinate tensors. sph_1_0_ptr, sph_1_1_ptr, sph_1_2_ptr : tl.tensor Points to tensors to write outputs to. Assumed to be the same length as the input tensors. block_size : tl.constexpr Vector length of contiguous elements to load into memory within a given block. vector_length : tl.constexpr The maximum/total length of the vectors, assumed to be the same for every one. This is used to calculate the mask to keep operations within bounds. """ sqrt_3 = 3 ** 0.5 block_id = tl.program_id(0) offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id x_row_start = x_ptr + offset y_row_start = y_ptr + offset z_row_start = z_ptr + offset x = tl.load(x_row_start, mask=offset < vector_length) y = tl.load(y_row_start, mask=offset < vector_length) z = tl.load(z_row_start, mask=offset < vector_length) sph_1_0 = sqrt_3 * x sph_1_1 = sqrt_3 * y sph_1_2 = sqrt_3 * z sph_1_0_start = sph_1_0_ptr + offset sph_1_1_start = sph_1_1_ptr + offset sph_1_2_start = sph_1_2_ptr + offset tl.store(sph_1_0_start, sph_1_0, mask=offset < vector_length) tl.store(sph_1_1_start, sph_1_1, mask=offset < vector_length) tl.store(sph_1_2_start, sph_1_2, mask=offset < vector_length)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py
6b382133-1769-4334-a6ac-ea7373510dc5
GELUglu.py
huyz2023/2by4-pretrain
sparse/GELUglu.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def tanh(x): tanh_neg = (tl.math.exp(x * 2) - 1) / (tl.math.exp(x * 2) + 1) tanh_pos = (1 - tl.math.exp(-2 * x)) / (1 + tl.math.exp(-2 * x)) tanh = tl.where(x > 0, tanh_pos, tanh_neg) return tanh
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py
fe78e351-fd17-4f43-a401-eb4f64797791
triton_fused_attn_ad.py
LouChao98/vqtree
ops/triton_fused_attn_ad.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'TOTAL_SLOTS': lambda args: sum(args['CODEBOOK_SIZE'] ** i for i in range(1, args['CODEBOOK_NUM'] + 1))}) @triton.jit def _fwd_kernel(Q, CODEBOOK_K, CODEBOOK_V, KCNT, VCNT, Out, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_ob, stride_oh, stride_om, nheads, seqlen_q, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, CODEBOOK_SIZE: tl.constexpr, CODEBOOK_NUM: tl.constexpr, TOTAL_SLOTS: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): start_m = tl.program_id(0) off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_HEADDIM) Q_block_ptr = tl.make_block_ptr(base=Q + (off_b * stride_qb + off_h * stride_qh), shape=(seqlen_q, BLOCK_HEADDIM), strides=(stride_qm, 1), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_HEADDIM ), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K + (off_b * stride_kb + off_h * stride_kh), shape=(TOTAL_SLOTS, BLOCK_HEADDIM), strides=(stride_kn, 1), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0)) V_block_ptr = tl.make_block_ptr(base=V + (off_b * stride_vb + off_h * stride_vh), shape=(TOTAL_SLOTS, BLOCK_HEADDIM), strides=(stride_vn, 1), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_HEADDIM), order=(1, 0)) lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF m_i = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32) if EVEN_M: q = tl.load(Q_block_ptr) else: q = tl.load(Q_block_ptr, boundary_check=(0,), padding_option='zero') offset = 0 for level_idx in range(1, CODEBOOK_NUM + 1): K_inner_block_ptr = K_block_ptr V_inner_block_ptr = V_block_ptr z_value = tl.zeros([BLOCK_M], dtype=tl.float32) + NEGINF z_idx = tl.zeros([BLOCK_M], dtype=tl.int32) for start_n in range(0, CODEBOOK_SIZE, BLOCK_N): k = tl.load(K_inner_block_ptr) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) qk_max = tl.max(qk, 1) m_ij = tl.maximum(qk_max * softmax_scale, lse_i) p = tl.exp(qk * softmax_scale - m_ij[:, None]) l_ij = tl.sum(p, 1) qk_max_indices = tl.argmax(qk, 1) + start_n update_codebook = z_value < qk_max z_idx = tl.where(update_codebook, qk_max_indices, z_idx) z_value = tl.where() acc_o_scale = tl.exp(m_i - m_ij) acc_o = acc_o * acc_o_scale[:, None] v = tl.load(V_inner_block_ptr) p = p.to(v.dtype) acc_o += tl.dot(p, v) m_i = m_ij l_i_new = tl.exp(lse_i - m_ij) + l_ij lse_i = m_ij + tl.log(l_i_new) K_inner_block_ptr = tl.advance(K_inner_block_ptr, (BLOCK_N, 0)) V_inner_block_ptr = tl.advance(V_inner_block_ptr, (BLOCK_N, 0)) end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k) for start_n in range(0, end_n, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) if EVEN_N & EVEN_M: k = tl.load(K_block_ptr) else: k = tl.load(K_block_ptr, boundary_check=(0,), padding_option='zero' ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) if not EVEN_N: qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, NEGINF) if IS_CAUSAL: qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, NEGINF) m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i) p = tl.exp(qk * softmax_scale - m_ij[:, None]) l_ij = tl.sum(p, 1) acc_o_scale = tl.exp(m_i - m_ij) acc_o = acc_o * acc_o_scale[:, None] if EVEN_N & EVEN_M: v = tl.load(V_block_ptr) else: v = tl.load(V_block_ptr, boundary_check=(0,), padding_option='zero' ) p = p.to(v.dtype) acc_o += tl.dot(p, v) m_i = m_ij l_i_new = tl.exp(lse_i - m_ij) + l_ij lse_i = m_ij + tl.log(l_i_new) K_block_ptr = tl.advance(K_block_ptr, (BLOCK_N, 0)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) o_scale = tl.exp(m_i - lse_i) acc_o = acc_o * o_scale[:, None] start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_d = tl.arange(0, BLOCK_HEADDIM) out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :]) if EVEN_M: tl.store(out_ptrs, acc_o) else: tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_attn_ad.py
5dee5ab7-230c-4c73-b3fd-4aa6aef7426c
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8, 16]], key=['BT', 'BK', 'BV']) @triton.jit def bwd_prepare_wy_repr_kernel(k, v, beta, A, dw, du, dk, dv, dbeta, offsets, indices, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T if HEAD_FIRST: p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,)) p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (0, i_t * BT), (BT, BT), (0, 1)) else: p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (BT, T), (1, H * BT), (0, i_t * BT), (BT, BT), (0, 1)) b_beta = tl.load(p_beta, boundary_check=(0,)) b_A = tl.load(p_A, boundary_check=(0, 1)) b_dbeta = tl.zeros([BT], dtype=tl.float32) b_dA = tl.zeros([BT, BT], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): if HEAD_FIRST: p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_du = tl.make_block_ptr(du + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_du = tl.make_block_ptr(du + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_v_beta = (b_v * b_beta[:, None]).to(b_v.dtype) b_du = tl.load(p_du, boundary_check=(0, 1)) b_dA += tl.dot(b_du, tl.trans(b_v_beta), allow_tf32=False) b_dv_beta = tl.dot(b_A, b_du, allow_tf32=False) b_dv = b_dv_beta * b_beta[:, None] b_dbeta += tl.sum(b_dv_beta * b_v, 1) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) for i_k in range(tl.cdiv(K, BK)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dw = tl.make_block_ptr(dw + i_bh * T * K, (T, K), (K, 1), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dw = tl.make_block_ptr(dw + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype) b_dw = tl.load(p_dw, boundary_check=(0, 1)) b_dA += tl.dot(b_dw, tl.trans(b_k_beta), allow_tf32=False) b_dk_beta = tl.dot(b_A, b_dw, allow_tf32=False) b_dk = b_dk_beta * b_beta[:, None] b_dbeta += tl.sum(b_dk_beta * b_k, 1) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) b_dA = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :], b_dA, 0) b_dA = tl.dot(b_dA.to(b_A.dtype), b_A) b_dA = tl.dot(b_A, b_dA.to(b_A.dtype)) b_dA = tl.where(tl.arange(0, BT)[:, None] > tl.arange(0, BT)[None, :], -b_dA, 0).to(k.dtype.element_ty) for i_k in range(tl.cdiv(K, BK)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_dk = tl.load(p_dk, boundary_check=(0, 1)) b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype) b_dk_beta = tl.dot(b_dA, b_k, allow_tf32=False) b_dbeta += tl.sum(b_dk_beta * b_k, 1) b_dk += tl.dot(tl.trans(b_dA), b_k_beta, allow_tf32=False) b_dk += b_dk_beta * b_beta[:, None] tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) if HEAD_FIRST: p_dbeta = tl.make_block_ptr(dbeta + i_bh * T, (T,), (1,), (i_t * BT ,), (BT,), (0,)) else: p_dbeta = tl.make_block_ptr(dbeta + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) tl.store(p_dbeta, b_dbeta.to(p_dbeta.dtype.element_ty), boundary_check=(0,) )
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py
a64c01e5-8feb-4826-8caa-64280d7be379
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BK', 'NC', 'BT']) @triton.jit def chunk_gla_bwd_kernel_intra(q, k, g, dA, dq, dk, offsets, indices, T: tl .constexpr, H: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BC: tl. constexpr, BK: tl.constexpr, NC: tl.constexpr, USE_OFFSETS: tl. constexpr, HEAD_FIRST: tl.constexpr): i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H i_t, i_i = i_c // NC, i_c % NC if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) else: bos, eos = i_b * T, i_b * T + T T = eos - bos if i_t * BT + i_i * BC >= T: return o_k = i_k * BK + tl.arange(0, BK) m_k = o_k < K if HEAD_FIRST: p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) b_g = tl.load(p_g, boundary_check=(0, 1)) b_dq = tl.zeros([BC, BK], dtype=tl.float32) if i_i > 0: if HEAD_FIRST: p_gn = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) else: p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) b_gn = tl.load(p_gn, mask=m_k, other=0) for i_j in range(0, i_i): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), ( H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), ( 1, 0)) p_dA = tl.make_block_ptr(dA + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_kg = b_k * tl.exp(b_gn[None, :] - b_gk) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dq += tl.dot(b_dA, b_kg) b_dq *= tl.exp(b_g - b_gn[None, :]) o_i = tl.arange(0, BC) m_dA = i_t * BT + i_i * BC + tl.arange(0, BC) < T if HEAD_FIRST: o_dA = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC) ) * BT + i_i * BC p_kj = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_gkj = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_dq = tl.make_block_ptr(dq + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: o_dA = bos * H * BT + (i_t * BT + i_i * BC + tl.arange(0, BC) ) * H * BT + i_h * BT + i_i * BC p_kj = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_gkj = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_dq = tl.make_block_ptr(dq + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) for j in range(0, min(BC, T - i_t * BT - i_i * BC)): b_dA = tl.load(dA + o_dA + j, mask=m_dA, other=0) b_kj = tl.load(p_kj, mask=m_k, other=0).to(tl.float32) b_gkj = tl.load(p_gkj, mask=m_k, other=0).to(tl.float32) m_i = o_i[:, None] >= j b_dq += tl.where(m_i, b_dA[:, None] * b_kj[None, :] * tl.exp(b_g - b_gkj[None, :]), 0.0) p_kj += K if HEAD_FIRST else H * K p_gkj += K if HEAD_FIRST else H * K tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.debug_barrier() if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_gk = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1 ), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_dk = tl.zeros([BC, BK], dtype=tl.float32) NC = min(NC, tl.cdiv(T - i_t * BT, BC)) if i_i < NC - 1: if HEAD_FIRST: p_gn = tl.max_contiguous(tl.multiple_of(g + i_bh * T * K + (i_t * BT + i_i * BC + BC - 1) * K + o_k, BK), BK) else: p_gn = tl.max_contiguous(tl.multiple_of(g + bos * H * K + (i_t * BT + i_i * BC + BC - 1) * H * K + i_h * K + o_k, BK), BK) b_gn = tl.load(p_gn, mask=m_k, other=0) for i_j in range(i_i + 1, NC): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (BT, T), (1, BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + (bos * H + i_h) * BT, (BT, T), (1, H * BT), (i_i * BC, i_t * BT + i_j * BC), (BC, BC), (0, 1)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_g = tl.load(p_g, boundary_check=(0, 1)) b_qg = b_q * tl.exp(b_g - b_gn[None, :]) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dk += tl.dot(b_dA, b_qg) b_dk *= tl.exp(b_gn[None, :] - b_gk) if HEAD_FIRST: o_dA = i_bh * T * BT + (i_t * BT + i_i * BC ) * BT + i_i * BC + tl.arange(0, BC) p_qj = tl.max_contiguous(tl.multiple_of(q + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_gqj = tl.max_contiguous(tl.multiple_of(g + (i_bh * T + i_t * BT + i_i * BC) * K + o_k, BK), BK) p_dk = tl.make_block_ptr(dk + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) else: o_dA = bos * H * BT + (i_t * BT + i_i * BC ) * H * BT + i_h * BT + i_i * BC + tl.arange(0, BC) p_qj = tl.max_contiguous(tl.multiple_of(q + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_gqj = tl.max_contiguous(tl.multiple_of(g + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k, BK), BK) p_dk = tl.make_block_ptr(dk + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) for j in range(0, min(BC, T - i_t * BT - i_i * BC)): b_dA = tl.load(dA + o_dA + j * (1 if HEAD_FIRST else H) * BT) b_qj = tl.load(p_qj, mask=m_k, other=0).to(tl.float32) b_gqj = tl.load(p_gqj, mask=m_k, other=0).to(tl.float32) m_i = o_i[:, None] <= j b_dk += tl.where(m_i, b_dA[:, None] * b_qj[None, :] * tl.exp(b_gqj[ None, :] - b_gk), 0.0) p_qj += K if HEAD_FIRST else H * K p_gqj += K if HEAD_FIRST else H * K tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
be7559f4-befc-4c5a-86c2-f17eaa7ed922
block_offsets.py
Forkxz/TritonDeepLearningKernel
kernel/block_offsets.py
add54b6318e8fa5fdbf8c7b47659de9fceaa5691
0
@triton.jit def block_offsets_2d(shape_x, shape_y, stride_x, stride_y, offset_x, offset_y, block_shape_x, block_shape_y, require_mask=False): offs_x = tl.arange(0, block_shape_x) + offset_x offs_y = tl.arange(0, block_shape_y) + offset_y ptrs = offs_x[:, None] * stride_x + offs_y[None, :] * stride_y if require_mask: mask = (offs_x[:, None] < shape_x) & (offs_y[None, :] < shape_y) return ptrs, mask else: return ptrs
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/block_offsets.py
49a57e59-6b3a-4a5b-a54d-579bb92b5c93
triton_implicit_gemm_1x1_0x0_1x1.py
l1351868270/implicit_gemm.triton
triton_implicit_gemm_1x1_0x0_1x1.py
64eb8548ccf4576883c928f6315be8b24680a455
0
@triton.autotune(configs=get_autotune_config(), key=['GEMM_M', 'GEMM_N', 'GEMM_K']) @triton.jit def conv2d_kernel_1x1_1x1_0x0_1x1(x_ptr, w_ptr, y_ptr, N, C, H, W, K, P, Q, R, S, U, V, pad_h, pad_w, dila_h, dila_w, GEMM_M, GEMM_N, GEMM_K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(GEMM_M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(GEMM_N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % num_pid_in_group % group_size_m pid_n = pid % num_pid_in_group // group_size_m gemm_i = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % GEMM_M gemm_j = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % GEMM_N n = gemm_i // (P * Q) npq_residual = gemm_i % (P * Q) p = npq_residual // Q q = npq_residual % Q k = gemm_j offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % GEMM_M offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % GEMM_N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = x_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) b_ptrs = w_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for idx_k in range(0, tl.cdiv(GEMM_K, BLOCK_SIZE_K)): gemm_k = idx_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K) r = gemm_k // (S * C) rsc_residual = gemm_k % (S * C) s = rsc_residual // C c = rsc_residual % C h = p[:, None] * U + r[None, :] * dila_h - pad_h w = q[:, None] * V + s[None, :] * dila_w - pad_w mask_x = (h >= 0) & (h < H) & (w >= 0) & (w < W) mask_w = (r < R) & (s < S) & (c < C) offs_x = n[:, None] * H * W * C + h * W * C + w * C + c offs_w = k[None, :] * R * S * C + r[:, None] * S * C + s[:, None ] * C + c[:, None] x_ptrs = x_ptr + offs_x w_ptrs = w_ptr + offs_w a = tl.load(a_ptrs, mask=offs_k[None, :] < GEMM_K - idx_k * BLOCK_SIZE_K, other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < GEMM_K - idx_k * BLOCK_SIZE_K, other=0.0) accumulator = tl.dot(a, b, accumulator, out_dtype=tl.float32) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk c = accumulator.to(tl.float16) offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = y_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, : ] c_mask = (offs_cm[:, None] < GEMM_M) & (offs_cn[None, :] < GEMM_N) tl.store(c_ptrs, c, mask=c_mask)
{ "Data Type": [ "fp16", "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_implicit_gemm_1x1_0x0_1x1.py
66ca0a7e-3044-4054-9572-e48a84e580fd
triton_fused_attention.py
pytorch-labs/tritonbench
tritonbench/kernels/triton_fused_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(list(filter(keep, configsOrig)), key=['N_CTX']) @triton.jit def _attn_fwd(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl. constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl. constexpr, ENABLE_TMA: tl.constexpr, LOOP_SCHEDULE: tl.constexpr, ENABLE_WS: tl.constexpr): tl.static_assert(BLOCK_N <= HEAD_DIM) pid = tl.program_id(0) off_hz = tl.program_id(1) _attn_fwd_compute(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, off_hz, pid, Z, H, N_CTX, BLOCK_M, BLOCK_N, HEAD_DIM, STAGE, ENABLE_TMA, LOOP_SCHEDULE)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py
d159d9b3-bf80-48b6-adf0-f8ae054b043a
mlstm_matmul.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_matmul.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def mlstm_matmul_kernel_backward(dH, dB, Q, K, V, dQ, dK, dV, F, dF, I, dI, M, B, NH: tl.constexpr, S: tl.constexpr, D: tl.constexpr, SB: tl.constexpr ): bh_id = tl.program_id(0) sb_id = tl.program_id(1) batch_id = bh_id // NH head_id = bh_id % NH batch_offset_dh = batch_id * NH * S * D + head_id * S * D batch_offset_f = batch_id * NH * S + head_id * S offset_dh = tl.arange(0, SB) + sb_id * SB offset_vk = tl.arange(0, SB) + sb_id * SB d_range = tl.arange(0, D) dh_range = batch_offset_dh + offset_dh[:, None] * D + d_range[None, :] dh_mask = (offset_dh[:, None] < S) & (d_range[None, :] < D) dh = tl.load(dH + dh_range, dh_mask) m = tl.load(M + batch_offset_f + offset_dh, offset_dh < S) b = tl.load(B + batch_offset_f + offset_dh, offset_dh < S) f = tl.load(F + batch_offset_f + offset_dh, offset_dh < S) db = tl.load(dB + batch_offset_f + offset_dh, offset_dh < S) q = tl.load(Q + dh_range, dh_mask) scale = tl.sqrt(tl.full((1,), D, dtype=tl.float32)) n = tl.maximum(tl.abs(b), tl.exp(-m)) + 1e-06 f = tl.cumsum(tl.log(tl.sigmoid(f))) f_low = f df_acc = tl.zeros((SB,), dtype=tl.float32) dq_acc = tl.zeros((SB, D), dtype=tl.float32) for j in range(sb_id, -1, -1): vk_range = batch_offset_dh + offset_vk[:, None] * D + d_range[None, :] vk_mask = (offset_vk[:, None] < S) & (d_range[None, :] < D) f_next = tl.load(F + batch_offset_f + offset_vk, offset_vk < S) i = tl.load(I + batch_offset_f + offset_vk, offset_vk < S) f_next = tl.log(tl.sigmoid(f_next)) if j == sb_id: f_next = tl.cumsum(f_next) d = f[:, None] - f_next[None, :] + i[None, :] mask = offset_dh[:, None] >= offset_vk[None, :] d = tl.where(mask, d, -float('inf')) else: f += tl.sum(f_next) f_next = tl.cumsum(f_next) d = f[:, None] - f_next[None, :] + i[None, :] d = tl.exp(d - m[:, None]) v = tl.load(V + vk_range, vk_mask) dc_tilde = matrix_mult(dh, tl.trans(v), SB) * (1 / n)[:, None] + db[ :, None] k = tl.load(K + vk_range, vk_mask) / scale dq_acc += matrix_mult(dc_tilde * d, k, SB) c_tilde = matrix_mult(q, tl.trans(k), SB) * d df_acc += tl.sum(c_tilde * dc_tilde, 1) offset_vk -= SB tl.store(dQ + dh_range, dq_acc, dh_mask) offset_q = tl.arange(0, SB) + sb_id * SB f = tl.zeros((1,), dtype=tl.float32) v = tl.load(V + dh_range, dh_mask) k = tl.load(K + dh_range, dh_mask) i = tl.load(I + batch_offset_f + offset_dh, offset_dh < S) dk_acc = tl.zeros((SB, D), dtype=tl.float32) dv_acc = tl.zeros((SB, D), dtype=tl.float32) di_acc = tl.zeros((SB,), dtype=tl.float32) for j in range(sb_id, tl.cdiv(S, SB)): q_range = batch_offset_dh + offset_q[:, None] * D + d_range[None, :] q_mask = (offset_q[:, None] < S) & (d_range[None, :] < D) f_next = tl.load(F + batch_offset_f + offset_q, offset_q < S) f_next = tl.log(tl.sigmoid(f_next)) f_next_sum = tl.sum(f_next) f_next = f + tl.cumsum(f_next) d = f_next[None, :] - f_low[:, None] + i[:, None] f += f_next_sum if j == sb_id: mask = offset_dh[:, None] <= offset_q[None, :] d = tl.where(mask, d, -float('inf')) dh = tl.load(dH + q_range, q_mask) m = tl.load(M + batch_offset_f + offset_q, offset_q < S) b = tl.load(B + batch_offset_f + offset_q, offset_q < S) db = tl.load(dB + batch_offset_f + offset_q, offset_q < S) d = tl.exp(d - m[None, :]) n = tl.maximum(tl.abs(b), tl.exp(-m)) + 1e-06 dc_tilde_T = matrix_mult(v, tl.trans(dh), SB) * (1 / n)[None, :] + db[ None, :] q = tl.load(Q + q_range, q_mask) / scale dk_acc += matrix_mult(dc_tilde_T * d, q, SB) c_tilde_T = matrix_mult(k, tl.trans(q), SB) * d dv_acc += matrix_mult(c_tilde_T / n[None, :], dh, SB) di_acc += tl.sum(c_tilde_T * dc_tilde_T, 1) offset_q += SB tl.store(dK + dh_range, dk_acc, dh_mask) tl.store(dV + dh_range, dv_acc, dh_mask) tl.store(dI + batch_offset_f + offset_dh, di_acc, offset_dh < S) tl.store(dF + batch_offset_f + offset_dh + 1, di_acc - df_acc, offset_dh + 1 < S)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py
aaf9342a-2fc0-47a4-a123-58f4a92788de
layernorm_gated.py
sustcsonglin/flash-linear-attention
fla/modules/layernorm_gated.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'HAS_BIAS': lambda args: args['B'] is not None, 'HAS_Z': lambda args: args['Z'] is not None}) @triton.jit def layer_norm_fwd_kernel(X, Y, W, B, Z, Mean, Rstd, stride_x_row, stride_y_row, stride_z_row, M, N, eps, BLOCK_N: tl.constexpr, HAS_BIAS: tl.constexpr, HAS_Z: tl.constexpr, NORM_BEFORE_GATE: tl.constexpr, IS_RMS_NORM: tl.constexpr): row = tl.program_id(0) group = tl.program_id(1) X += row * stride_x_row + group * N Y += row * stride_y_row + group * N if HAS_Z: Z += row * stride_z_row + group * N if not IS_RMS_NORM: Mean += group * M Rstd += group * M W += group * N if HAS_BIAS: B += group * N cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) if HAS_Z and not NORM_BEFORE_GATE: z = tl.load(Z + cols, mask=cols < N).to(tl.float32) x *= z * tl.sigmoid(z) if not IS_RMS_NORM: mean = tl.sum(x, axis=0) / N tl.store(Mean + row, mean) xbar = tl.where(cols < N, x - mean, 0.0) var = tl.sum(xbar * xbar, axis=0) / N else: xbar = tl.where(cols < N, x, 0.0) var = tl.sum(xbar * xbar, axis=0) / N rstd = 1 / tl.sqrt(var + eps) tl.store(Rstd + row, rstd) mask = cols < N w = tl.load(W + cols, mask=mask).to(tl.float32) if HAS_BIAS: b = tl.load(B + cols, mask=mask).to(tl.float32) x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd y = x_hat * w + b if HAS_BIAS else x_hat * w if HAS_Z and NORM_BEFORE_GATE: z = tl.load(Z + cols, mask=mask).to(tl.float32) y *= z * tl.sigmoid(z) tl.store(Y + cols, y, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/layernorm_gated.py
e45d6b24-d5c8-45cc-b1bf-50ded8c8c077
bgmv_expand.py
IBM/vllm
vllm/lora/ops/bgmv_expand.py
99523dd62be2ecf6c6db15e8133aaaf7855e7e86
0
@triton.jit def _bgmv_expand_kernel(input_ptr, lora_ptr, out_ptr, N, K, lora_indices, xm_stride, xk_stride, l0_stride, lora_k_stride, lora_n_stride, cm_stride, cn_stride, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, SPLIT_N: tl.constexpr, EVEN_K: tl.constexpr, ADD_INPUTS: tl.constexpr, CAST_TYPE: tl.constexpr): """ GroupGEMV, additionally, introducing SPLIT_N can improve large hidden_size's performance """ pid_sn = tl.program_id(axis=0) cur_batch = tl.program_id(axis=1) lora_index = tl.load(lora_indices + cur_batch) if lora_index == -1: return offset_k = tl.arange(0, BLOCK_K) offset_n = tl.arange(0, BLOCK_N) if EVEN_K: tiled_a = tl.load(input_ptr + cur_batch * xm_stride + offset_k * xk_stride) else: tiled_a = tl.load(input_ptr + cur_batch * xm_stride + offset_k * xk_stride, mask=offset_k < K, other=0) split_n_length = tl.cdiv(N, SPLIT_N) if CAST_TYPE: tiled_a = tiled_a.to(lora_ptr.dtype.element_ty) b_ptr = (lora_ptr + l0_stride * lora_index + pid_sn * split_n_length * lora_k_stride) c_ptr = out_ptr + cur_batch * cm_stride + pid_sn * split_n_length for n in range(0, split_n_length, BLOCK_N): current_n = n + offset_n current_n_c = tl.max_contiguous(current_n, BLOCK_N) b_ptr_mask = (current_n[:, None] < split_n_length) & (offset_k[None, :] < K) c_mask = current_n < split_n_length tiled_b = tl.load(b_ptr + current_n_c[:, None] * lora_k_stride + offset_k[None, :] * lora_n_stride, mask=b_ptr_mask, other=0.0) if ADD_INPUTS: tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask, other=0.0) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) tl.store(c_ptr + current_n * cn_stride, accumulator, mask=c_mask)
{ "Data Type": [ "fp32", "bf16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/lora/ops/bgmv_expand.py
1f2dca18-a3cd-441e-8a5f-75bcbedf659d
mlstm_matmul.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_matmul.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def scan_add_op(x1, x2): return x1 + x2
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py
43a7b7ff-eadb-490b-9acd-84f7d2a3ee0f
test_fused_chunk.py
sustcsonglin/flash-linear-attention
tests/test_fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def attention_fwd_kernel(q, k, v, h, o, s_qh, s_qt, s_qd, s_hh, s_ht, T, scale, BT: tl.constexpr, BD: tl.constexpr, NT: tl.constexpr, STORE: tl. constexpr, IFCOND: tl.constexpr): i_bh = tl.program_id(0) b_h = tl.zeros([BD, BD], dtype=tl.float32) for i in range(0, tl.cdiv(T, BT)): p_q = tl.make_block_ptr(q + i_bh * s_qh, (T, BD), (s_qt, s_qd), (i * BT, 0), (BT, BD), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_qh, (BD, T), (s_qd, s_qt), (0, i * BT), (BD, BT), (0, 1)) p_v = tl.make_block_ptr(v + i_bh * s_qh, (T, BD), (s_qt, s_qd), (i * BT, 0), (BT, BD), (1, 0)) p_h = tl.make_block_ptr(h + i_bh * s_hh, (NT * BD, BD), (s_ht, s_qd ), (i * BD, 0), (BD, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_qh, (T, BD), (s_qt, s_qd), (i * BT, 0), (BT, BD), (1, 0)) if STORE: tl.store(p_h, b_h.to(p_h.dtype.element_ty)) b_q = tl.load(p_q) b_q = (b_q * scale).to(b_q.dtype) b_k = tl.load(p_k) b_v = tl.load(p_v) b_s = tl.dot(b_q, b_k, allow_tf32=False) b_o = tl.dot(b_s.to(b_q.dtype), b_v, allow_tf32=False) if IFCOND: if i == 0: b_h = tl.dot(b_k, b_v, allow_tf32=False) else: b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) b_h += tl.dot(b_k, b_v, allow_tf32=False) else: b_o += tl.dot(b_q, b_h.to(b_q.dtype), allow_tf32=False) b_h += tl.dot(b_k, b_v, allow_tf32=False) tl.store(p_o, b_o.to(p_o.dtype.element_ty))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/tests/test_fused_chunk.py
242c3dab-c302-4bf0-96fc-8162444aedf6
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps) for BK in [32, 64] for BV in [64, 128] for num_warps in [2, 4, 8]], key=['BT']) @triton.jit def chunk_gla_bwd_kernel_dv(k, g, A, do, dh, dv, offsets, indices, T: tl. constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl. constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl. constexpr, HEAD_FIRST: tl.constexpr): i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_tg = i_t i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) else: NT = tl.cdiv(T, BT) i_tg = i_b * NT + i_t bos, eos = i_b * T, i_b * T + T if HEAD_FIRST: p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (0, i_t * BT), (BT, BT), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_A = tl.make_block_ptr(A + (bos * H + i_h) * BT, (BT, T), (1, H * BT), (0, i_t * BT), (BT, BT), (0, 1)) p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_A = tl.load(p_A, boundary_check=(0, 1)) b_A = tl.where(tl.arange(0, BT)[:, None] <= tl.arange(0, BT)[None, :], b_A, 0.0) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dv = tl.dot(b_A, b_do.to(b_A.dtype), allow_tf32=False) for i_k in range(tl.cdiv(K, BK)): o_k = i_k * BK + tl.arange(0, BK) m_k = o_k < K if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_gk = tl.make_block_ptr(g + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_gn = tl.max_contiguous(tl.multiple_of(g + i_bh * T * K + min( i_t * BT + BT, T) * K - K + o_k, BK), BK) p_dh = tl.make_block_ptr(dh + (i_bh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_gk = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_gn = tl.max_contiguous(tl.multiple_of(g + (bos + min(i_t * BT + BT, T) - 1) * H * K + i_h * K + o_k, BK), BK) p_dh = tl.make_block_ptr(dh + (i_tg * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_gn = tl.exp(tl.load(p_gn, mask=m_k, other=0)[None, :] - b_gk) b_k = (b_k * b_gn).to(b_k.dtype) b_dh = tl.load(p_dh, boundary_check=(0, 1)) b_dv += tl.dot(b_k, b_dh.to(b_k.dtype)) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32", "bf16" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
80930cb4-c6ca-4869-ac5f-29e8b9906e6b
paged_attn.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _inner_paged_attn_unroll_4_kernel(q, k_cache, v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope, block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE: tl.constexpr, LO: tl.constexpr, HI: tl. constexpr): for block_idx in range(LO, HI, 4): offs_kv_0 = tl.load(block_base_ptrs + block_idx + 0 ) * stride_km + base_offs_kv offs_kv_1 = tl.load(block_base_ptrs + block_idx + 1 ) * stride_km + base_offs_kv offs_kv_2 = tl.load(block_base_ptrs + block_idx + 2 ) * stride_km + base_offs_kv offs_kv_3 = tl.load(block_base_ptrs + block_idx + 3 ) * stride_km + base_offs_kv k_0 = tl.load(k_cache + offs_kv_0) k_1 = tl.load(k_cache + offs_kv_1) k_2 = tl.load(k_cache + offs_kv_2) k_3 = tl.load(k_cache + offs_kv_3) v_0 = tl.load(v_cache + offs_kv_0) v_1 = tl.load(v_cache + offs_kv_1) v_2 = tl.load(v_cache + offs_kv_2) v_3 = tl.load(v_cache + offs_kv_3) _qk_0 = tl.sum((q[None, :] * k_0).to(tl.float32), axis=1) _qk_1 = tl.sum((q[None, :] * k_1).to(tl.float32), axis=1) _qk_2 = tl.sum((q[None, :] * k_2).to(tl.float32), axis=1) _qk_3 = tl.sum((q[None, :] * k_3).to(tl.float32), axis=1) if alibi_slope is not None: _qk_0 += alibi_slope * ((block_idx + 0) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_1 += alibi_slope * ((block_idx + 1) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_2 += alibi_slope * ((block_idx + 2) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_3 += alibi_slope * ((block_idx + 3) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_max = tl.maximum(tl.max(_qk_0, axis=0), qk_max) _qk_max = tl.maximum(tl.max(_qk_1, axis=0), _qk_max) _qk_max = tl.maximum(tl.max(_qk_2, axis=0), _qk_max) _qk_max = tl.maximum(tl.max(_qk_3, axis=0), _qk_max) exp_tmp = tl.exp(_qk_0 - _qk_max) + tl.exp(_qk_1 - _qk_max) + tl.exp( _qk_2 - _qk_max) + tl.exp(_qk_3 - _qk_max) _exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(exp_tmp, axis=0) qkv_sum_tmp = tl.exp(_qk_0[:, None] - _qk_max).to(v_cache.dtype. element_ty) * v_0 + tl.exp(_qk_1[:, None] - _qk_max).to(v_cache .dtype.element_ty) * v_1 + tl.exp(_qk_2[:, None] - _qk_max).to( v_cache.dtype.element_ty) * v_2 + tl.exp(_qk_3[:, None] - _qk_max ).to(v_cache.dtype.element_ty) * v_3 qkv = (qkv * (exp_sum * tl.exp(qk_max - _qk_max)) + qkv_sum_tmp ) / _exp_sum qk_max = _qk_max exp_sum = _exp_sum return qkv, qk_max, exp_sum
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py
57901932-eb13-48bb-80fb-5ac1397e137c
swiglu.py
dame-cell/Triformer
triformer/swiglu.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def swiglu_backward(grad_output_ptr, grad_e_ptr, grad_g_ptr, e_ptr, g_ptr, n_cols, sigmoid_ptr, f_ptr, grad_output_stride, grad_e_stride, grad_g_stride, e_stride, g_stride, sigmoid_stride, f_stride, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(axis=0) col_offset = tl.arange(0, BLOCK_SIZE) mask = col_offset < n_cols grad_output_row = tl.load(grad_output_ptr + pid * grad_output_stride + col_offset, mask=mask) e_row = tl.load(e_ptr + pid * e_stride + col_offset, mask=mask) g_row = tl.load(g_ptr + pid * g_stride + col_offset, mask=mask) sigmoid_row = tl.load(sigmoid_ptr + pid * sigmoid_stride + col_offset, mask=mask) f_row = tl.load(f_ptr + pid * f_stride + col_offset, mask=mask) grad_g_row = grad_output_row * f_row grad_e_row = grad_output_row * g_row * sigmoid_row * (1.0 + e_row * ( 1.0 - sigmoid_row)) tl.store(grad_e_ptr + pid * grad_e_stride + col_offset, grad_e_row, mask=mask) tl.store(grad_g_ptr + pid * grad_g_stride + col_offset, grad_g_row, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/swiglu.py
ca5df2aa-d6b2-436b-ad6e-334a25e38f83
lightning_attn2_no_decay.py
OpenNLPLab/lightning-attention
lightning_attn/ops/triton/lightning_attn2_no_decay.py
d7439519541e966084eeaaf3ffd63eecc216f414
0
@triton.jit def _bwd_inter_kernel(Q, K, V, DO, DQ, DK, DV, b: tl.constexpr, h: tl. constexpr, n: tl.constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl .constexpr, NUM_BLOCK: tl.constexpr, CBLOCK: tl.constexpr, NUM_CBLOCK: tl.constexpr): off_bh = tl.program_id(0) off_bh % h qk_offset = off_bh * n * d v_offset = off_bh * n * e o_offset = off_bh * n * e DQ_block_ptr = DQ + qk_offset + tl.arange(0, CBLOCK)[:, None ] * d + tl.arange(0, d)[None, :] K_block_ptr = K + qk_offset + tl.arange(0, CBLOCK)[:, None ] * d + tl.arange(0, d)[None, :] V_trans_block_ptr = V + v_offset + tl.arange(0, CBLOCK)[None, : ] * e + tl.arange(0, e)[:, None] DO_block_ptr = DO + o_offset + tl.arange(0, CBLOCK)[:, None ] * e + tl.arange(0, e)[None, :] off_block1 = tl.arange(0, CBLOCK) off_block2 = tl.arange(0, CBLOCK) kv_trans = tl.zeros([e, d], dtype=tl.float32) for i in range(NUM_BLOCK): for j in range(NUM_CBLOCK): if i > 0: do = tl.load(DO_block_ptr, mask=off_block1[:, None] < n, other=0.0).to(tl.float32) dq_inter = tl.dot(do, kv_trans) dq = dq_inter + tl.load(DQ_block_ptr, mask=off_block1[:, None] < n, other=0.0) tl.store(DQ_block_ptr, dq.to(DQ_block_ptr.dtype.element_ty), mask=off_block1[:, None] < n) DQ_block_ptr += CBLOCK * d DO_block_ptr += CBLOCK * e off_block1 += CBLOCK kv_trans_current = tl.zeros([e, d], dtype=tl.float32) for j in range(NUM_CBLOCK): v_trans = tl.load(V_trans_block_ptr, mask=off_block2[None, :] < n, other=0.0).to(tl.float32) k = tl.load(K_block_ptr, mask=off_block2[:, None] < n, other=0.0 ).to(tl.float32) kv_trans_current += tl.dot(v_trans, k) K_block_ptr += CBLOCK * d V_trans_block_ptr += CBLOCK * e off_block2 += CBLOCK kv_trans += kv_trans_current m = NUM_BLOCK * BLOCK off_block1 = m + tl.arange(0, CBLOCK) off_block2 = m + tl.arange(0, CBLOCK) Q_trans_block_ptr = Q + qk_offset + m * d + tl.arange(0, CBLOCK)[None, : ] * d + tl.arange(0, d)[:, None] K_block_ptr = K + qk_offset + m * d + tl.arange(0, CBLOCK)[:, None ] * d + tl.arange(0, d)[None, :] V_trans_block_ptr = V + v_offset + m * e + tl.arange(0, CBLOCK)[None, : ] * e + tl.arange(0, e)[:, None] DK_trans_block_ptr = DK + qk_offset + m * d + tl.arange(0, CBLOCK)[None, : ] * d + tl.arange(0, d)[:, None] DV_block_ptr = DV + v_offset + m * e + tl.arange(0, CBLOCK)[:, None ] * e + tl.arange(0, e)[None, :] DO_block_ptr = DO + o_offset + m * e + tl.arange(0, CBLOCK)[:, None ] * e + tl.arange(0, e)[None, :] dkv = tl.zeros([d, e], dtype=tl.float32) for i in range(NUM_BLOCK - 1, -1, -1): for j in range(NUM_CBLOCK - 1, -1, -1): K_block_ptr -= CBLOCK * d V_trans_block_ptr -= CBLOCK * e DK_trans_block_ptr -= CBLOCK * d DV_block_ptr -= CBLOCK * e off_block1 -= CBLOCK if i < NUM_BLOCK - 1: k = tl.load(K_block_ptr, mask=off_block1[:, None] < n, other=0.0).to(tl.float32) v_trans = tl.load(V_trans_block_ptr, mask=off_block1[None, :] < n, other=0.0).to(tl.float32) dk_inter_trans = tl.dot(dkv, v_trans) dv_inter = tl.dot(k, dkv) dk_trans = dk_inter_trans + tl.load(DK_trans_block_ptr, mask=off_block1[None, :] < n, other=0.0) dv = dv_inter + tl.load(DV_block_ptr, mask=off_block1[:, None] < n, other=0.0) tl.store(DK_trans_block_ptr, dk_trans.to(DK_trans_block_ptr .dtype.element_ty), mask=off_block1[None, :] < n) tl.store(DV_block_ptr, dv.to(DV_block_ptr.dtype.element_ty), mask=off_block1[:, None] < n) dkv_current = tl.zeros([d, e], dtype=tl.float32) for j in range(NUM_CBLOCK - 1, -1, -1): DO_block_ptr -= CBLOCK * e Q_trans_block_ptr -= CBLOCK * d off_block2 -= CBLOCK do = tl.load(DO_block_ptr, mask=off_block2[:, None] < n, other=0.0 ).to(tl.float32) q_trans = tl.load(Q_trans_block_ptr, mask=off_block2[None, :] < n, other=0.0).to(tl.float32) dkv_current += tl.dot(q_trans, do) dkv += dkv_current
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/OpenNLPLab/lightning-attention/blob/d7439519541e966084eeaaf3ffd63eecc216f414/lightning_attn/ops/triton/lightning_attn2_no_decay.py
c521d63c-9555-41f2-9185-de188587390f
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_linear_attn_bwd_kernel_dh(q, do, dh, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, scale, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr): i_k, i_v, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) b_dh = tl.zeros([BK, BV], dtype=tl.float32) for i_t in range(NT - 1, -1, -1): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dh = tl.make_block_ptr(dh + i_bh * s_h_h + i_t * K * V, (K, V), ( s_h_t, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_dh, b_dh.to(p_dh.dtype.element_ty), boundary_check=(0, 1)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale).to(b_q.dtype) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dh += tl.dot(b_q, b_do.to(b_q.dtype), allow_tf32=False)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py
89f28703-00f8-4324-8d3c-5d383c132016
normalization.py
ai-compiler-study/triton-kernels
triton_kernels/kernels/normalization.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def _layer_norm_modulation_fwd(X, Y, W, B, Mean, Rstd, stride, seq_len, N, eps, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) batch_idx = row // seq_len Y += row * stride X += row * stride W += batch_idx * stride B += batch_idx * stride cols = tl.arange(0, BLOCK_SIZE) mask = cols < N x = tl.load(X + cols, mask=mask, other=0.0) w = tl.load(W + cols, mask=mask, other=0.0) b = tl.load(B + cols, mask=mask, other=0.0) mean = tl.sum(x, axis=0) / N var = tl.sum(x * x, axis=0) / N - mean * mean rstd = tl.rsqrt(var + eps) tl.store(Mean + row, mean) tl.store(Rstd + row, rstd) y = (x - mean) * rstd * (1 + w) + b tl.store(Y + cols, y, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/normalization.py
50f5f29f-f284-422f-95ef-6638cd047f96
avgpool.py
neuro-ml/kerops
kerops/kernels/avgpool.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _AvgPoolCeilStats_cl3d_impl(X_ptr, Out_ptr, Mean_ptr, Sqmean_ptr, h_input, w_input, d_input, d_output, batch_stride_input, H_stride_input, W_stride_input, batch_stride_output, H_stride_output, W_stride_output, numel_no_channels_output, num_channels: tl.constexpr, almost_half_d: tl .constexpr): batch = tl.program_id(0) H = tl.program_id(1) W = tl.program_id(2) Out_ptr += (batch * batch_stride_output + H * H_stride_output + W * W_stride_output) output = tl.zeros([almost_half_d, num_channels], dtype=tl.float32) pair_offset = tl.arange(0, 2) channels_offset = tl.arange(0, num_channels) d_offset = tl.arange(0, almost_half_d) offset = d_offset[:, None, None] * (2 * num_channels) + channels_offset[ None, :, None] + pair_offset[None, None, :] * num_channels output_offset = d_offset[:, None] * num_channels + channels_offset[None, :] mask_input = offset < d_input * num_channels output_mask = output_offset < d_output * num_channels norm_step = tl.sum(mask_input.to(tl.float32), axis=2).to(tl.float32) norm_step = tl.where(norm_step != 0, norm_step, 1.0) num_norm = 1 Temp_ptr = (X_ptr + batch * batch_stride_input + 2 * H * H_stride_input + 2 * W * W_stride_input) x = tl.load(Temp_ptr + offset, mask=mask_input, other=0.0).to(tl.float32) x = tl.sum(x, axis=2) output += x W_skip = False if 2 * (W + 1) > w_input: W_skip = True else: Temp_ptr = (X_ptr + batch * batch_stride_input + 2 * H * H_stride_input + (2 * W + 1) * W_stride_input) x = tl.load(Temp_ptr + offset, mask=mask_input, other=0.0).to(tl. float32) x = tl.sum(x, axis=2) output += x num_norm += 1 H_skip = False if 2 * (H + 1) > h_input: H_skip = True else: Temp_ptr = X_ptr + batch * batch_stride_input + (2 * H + 1 ) * H_stride_input + 2 * W * W_stride_input x = tl.load(Temp_ptr + offset, mask=mask_input, other=0.0).to(tl. float32) x = tl.sum(x, axis=2) output += x num_norm += 1 if not H_skip and not W_skip: Temp_ptr = X_ptr + batch * batch_stride_input + (2 * H + 1 ) * H_stride_input + (2 * W + 1) * W_stride_input x = tl.load(Temp_ptr + offset, mask=mask_input, other=0.0).to(tl. float32) x = tl.sum(x, axis=2) output += x num_norm += 1 output = output / (norm_step * num_norm) tl.store(Out_ptr + output_offset, output, mask=output_mask) output = tl.trans(output) mean = tl.sum(output, axis=1) / numel_no_channels_output sqmean = tl.sum(output * output, axis=1) / numel_no_channels_output tl.atomic_add(Mean_ptr + channels_offset, mean) tl.atomic_add(Sqmean_ptr + channels_offset, sqmean)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/avgpool.py
44cf831b-a88e-40b6-914b-3c52caca46e3
softmax.py
shaRk-033/learn
learn_triton/softmax.py
3108e580bf00448a10fd41e3885fa952b46439ab
0
@triton.jit def softmax_kernel(inp_ptr, out_ptr, b, t, c, BLOCK_SIZE: tl.constexpr): bid = tl.program_id(0) tid = tl.program_id(1) if bid >= b or tid >= t: return cols = tl.arange(0, BLOCK_SIZE) offset = bid * t * c + tid * c + cols mask = cols < c x = tl.load(inp_ptr + offset, mask=mask, other=float('-inf')) maxx = tl.max(x, axis=0) x = x - maxx expx = tl.exp(x) sumx = tl.sum(expx, axis=0) outx = expx / sumx tl.store(out_ptr + offset, outx, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "CC0" ]
https://github.com/shaRk-033/learn/blob/3108e580bf00448a10fd41e3885fa952b46439ab/learn_triton/softmax.py
f4086042-eb57-44c2-8406-648d389752ae
matmul.py
sustcsonglin/flash-linear-attention
fla/ops/utils/matmul.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def leaky_relu(x): return tl.where(x >= 0, x, 0.01 * x)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Low Latency" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/matmul.py
9d977a77-0a9d-44fe-8a4a-f1679ef7d33d
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_bwd_kernel_V(k, v, z, h, A, do, dh, dq, dk, dv, dA, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl. constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl. constexpr, BV: tl.constexpr): i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_p = tl.maximum(i_t * BT - 1, 0) n_bh = tl.num_programs(2) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_zc = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), ((i_t * BT + BT - 1) * K + i_k * BK,), (BK,), (0,)) p_A = tl.make_block_ptr(A + i_bh * T * BT, (BT, T), (1, BT), (0, i_t * BT), (BT, BT), (0, 1)) b_zc = tl.load(p_zc, boundary_check=(0,)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_k = tl.exp(b_k - b_zc[None, :]).to(b_k.dtype) b_A = tl.load(p_A, boundary_check=(0, 1)) b_dq = tl.zeros([BT, BK], dtype=tl.float32) b_dk = tl.zeros([BT, BK], dtype=tl.float32) b_dA = tl.zeros([BT, BT], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + i_bh * s_h_h + i_t * V * K, (V, K), ( s_h_d, s_h_t), (i_v * BV, i_k * BK), (BV, BK), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dh = tl.make_block_ptr(dh + i_bh * s_h_h + i_t * K * V, (K, V), ( s_h_t, s_h_d), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_k * n_bh + i_bh) * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_h = tl.load(p_h, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dh = tl.load(p_dh, boundary_check=(0, 1)) b_dv = tl.dot(b_k, b_dh, allow_tf32=False) if i_k == 0: b_dv += tl.dot(b_A, b_do, allow_tf32=False) b_do = (b_do * scale).to(b_do.dtype) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) b_dA += tl.dot(b_do, tl.trans(b_v), allow_tf32=False) b_dq += tl.dot(b_do, b_h, allow_tf32=False) b_dk += tl.dot(b_v, tl.trans(b_dh), allow_tf32=False) p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_zp = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), (i_p * K + i_k * BK,), (BK,), (0,)) b_zp = tl.load(p_zp, boundary_check=(0,)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_z = tl.exp(b_zp[None, :] - b_z) b_dq = b_dq * b_z b_dk = b_dk * b_k p_dq = tl.make_block_ptr(dq + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_dA = tl.where(m_s, b_dA, 0.0).to(b_k.dtype) if i_k == 0: tl.store(p_dA, b_dA.to(p_dA.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
dd0ac36c-0d7f-4fec-8eaf-5ab99edcd368
quant_triton.py
CompendiumLabs/ziggy
ziggy/backends/quant_triton.py
bd12fe50ca3475743f62ae26d4c184108e441e03
0
@triton.jit def matmul_float_kernel(A, B, C, N, M, K, stride_an, stride_ak, stride_bk, stride_bm, stride_cn, stride_cm, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_K: tl.constexpr): dtype = C.dtype.element_ty pid_n = tl.program_id(0) pid_m = tl.program_id(1) rn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) rm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) rk = tl.arange(0, BLOCK_SIZE_K) mask_a = rn[:, None] < N mask_b = rm[None, :] < M mask_c = mask_a & mask_b A1 = A + (rn[:, None] * stride_an + rk[None, :] * stride_ak) B1 = B + (rk[:, None] * stride_bk + rm[None, :] * stride_bm) C1 = C + (rn[:, None] * stride_cn + rm[None, :] * stride_cm) acc = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_M), dtype=dtype) for k in range(0, K, BLOCK_SIZE_K): a = tl.load(A1, mask=mask_a) b = tl.load(B1, mask=mask_b) acc += tl.dot(a, b, out_dtype=dtype) A1 += BLOCK_SIZE_K * stride_ak B1 += BLOCK_SIZE_K * stride_bk tl.store(C1, acc, mask=mask_c)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/CompendiumLabs/ziggy/blob/bd12fe50ca3475743f62ae26d4c184108e441e03/ziggy/backends/quant_triton.py
f2af12dc-f012-439b-bcf8-16486d668412
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_fwd_kernel_intra_V(q, k, z, A, s_k_h, s_k_t, s_k_d, scale, T: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, NC: tl.constexpr): i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_t, i_i, i_j = i_c // (NC * NC), i_c % (NC * NC) // NC, i_c % (NC * NC ) % NC n_bh = tl.num_programs(2) if i_i > i_j: p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)) p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_A = tl.make_block_ptr(A + (i_k * n_bh + i_bh) * T * BT, (T, BT), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) p_zn = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), (( i_t * BT + i_i * BC) * K + i_k * BK,), (BK,), (0,)) b_zn = tl.load(p_zn, boundary_check=(0,)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_q = (b_q * tl.exp(b_zn[None, :] - b_z) * scale).to(b_q.dtype) b_k = tl.load(p_k, boundary_check=(0, 1)) b_k = tl.exp(b_k - b_zn[:, None]).to(b_k.dtype) b_A = tl.dot(b_q, b_k, allow_tf32=False) tl.store(p_A, b_A.to(A.dtype.element_ty), boundary_check=(0, 1)) elif i_i == i_j: p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T * K,), (s_k_d,), ((i_t * BT + i_j * BC) * K + i_k * BK,), (BK,), (0,)) p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_z = tl.load(p_z, boundary_check=(0, 1)) o_i = tl.arange(0, BC) o_A = (i_bh + i_k * n_bh) * T * BT + (i_t * BT + i_i * BC + tl. arange(0, BC)) * BT + i_j * BC m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T for j in range(0, BC): b_k = tl.load(p_k, boundary_check=(0,)).to(tl.float32) b_A = tl.sum(b_q * tl.exp(b_k[None, :] - b_z) * scale, 1) b_A = tl.where(o_i >= j, b_A, 0.0) tl.store(A + o_A + j, b_A.to(b_q.dtype), mask=m_A) p_k = tl.advance(p_k, (K,))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
e8622bcb-db14-44d9-adcb-257ca07eab7d
scaled_quant.py
drisspg/transformer_nuggets
transformer_nuggets/fp8/scaled_quant.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def dynamic_scaled_cast(inpt_ptr: torch.Tensor, output_ptr: torch.Tensor, abs_max_ptr: torch.Tensor, spin_lock: torch.Tensor, numel: int, XBLOCK: tl.constexpr, float8_dtype: tl.constexpr, max_val: tl.constexpr): """Quantize tensor to fp8 using current global absmax""" n_blocks = tl.num_programs(0) offset = tl.program_id(0) * XBLOCK index = offset + tl.arange(0, XBLOCK)[:] index = tl.max_contiguous(tl.multiple_of(index, XBLOCK), XBLOCK) mask = index < numel inpt = tl.load(inpt_ptr + index, mask=mask) block_max = tl.max(tl.abs(inpt)) tl.atomic_max(abs_max_ptr, block_max) tl.atomic_add(spin_lock, 1, sem='release') while tl.load(spin_lock, volatile=True) < n_blocks: pass scale = max_val / tl.clamp(tl.load(abs_max_ptr), -1000000000000.0, float('inf')) scaled_inpt = inpt * scale scaled_inpt = tl.clamp(scaled_inpt, -1 * max_val, max_val) tl.store(output_ptr + index, scaled_inpt.to(float8_dtype), mask=mask)
{ "Data Type": [ "int8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Memory-Bound" ] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/scaled_quant.py
45e590dd-7a3c-444a-bf55-6af49405bf85
k_fused_matmul_fw.py
cpuhrsch/torchfused
torchfused/triton/k_fused_matmul_fw.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.autotune(configs=[triton.Config({'BLOCK_ROW': 16, 'BLOCK_COL': 16}, num_stages=5, num_warps=1), triton.Config({'BLOCK_ROW': 32, 'BLOCK_COL': 32}, num_stages=5, num_warps=1), triton.Config({'BLOCK_ROW': 64, 'BLOCK_COL': 32}, num_stages=5, num_warps=2), triton.Config({ 'BLOCK_ROW': 32, 'BLOCK_COL': 64}, num_stages=5, num_warps=2), triton. Config({'BLOCK_ROW': 128, 'BLOCK_COL': 64}, num_stages=4, num_warps=4), triton.Config({'BLOCK_ROW': 64, 'BLOCK_COL': 128}, num_stages=4, num_warps=4), triton.Config({'BLOCK_ROW': 128, 'BLOCK_COL': 128}, num_stages=4, num_warps=4)], key=['M', 'N', 'K']) @triton.jit def kernel_fma(OUT, ACT_INPUTS, INPUT, WEIGHT, BIAS, M, N, K, stride_om, stride_im, stride_wn, stride_wk, **META): """ Kernel for computing Out = activation(A x W + C) - Input has shape (M, K) - Weight has shape (K, N) - Bias has shape (N,) - Output has shape (M, N) - ActInputs (optional) has shape (M, N) 'ActInputs' optionally saves the A x W + C intermediate for backward computations This kernel will consolidate over K """ BLOCK_M, GROUP_M = META['BLOCK_ROW'], META['GROUP_ROW'] BLOCK_N, BLOCK_K = META['BLOCK_COL'], META['BLOCK_K'] pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_M) num_pid_n = tl.cdiv(N, BLOCK_N) num_pid_in_group = GROUP_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_M GROUP_M = min(num_pid_m - first_pid_m, GROUP_M) pid_m = first_pid_m + pid % GROUP_M pid_n = pid % num_pid_in_group // GROUP_M rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) rk = tl.arange(0, BLOCK_K) input_ptrs = INPUT + rm[:, None] * stride_im + rk[None, :] weight_ptrs = WEIGHT + rk[:, None] * stride_wk + rn[None, :] * stride_wn acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32) if META['BIAS']: bias = tl.load(BIAS + rn, mask=rn < N, other=0.0).to(tl.float32) acc += bias[None, :] for _ in range(K, 0, -BLOCK_K): a = tl.load(input_ptrs, mask=(rk[None, :] < K) & (rm[:, None] < M), other=0.0) w = tl.load(weight_ptrs, mask=(rk[:, None] < K) & (rn[None, :] < N), other=0.0) acc += tl.dot(a, w).to(tl.float32) input_ptrs += BLOCK_K weight_ptrs += BLOCK_K * stride_wk if META['SAVE_ACT_INPUTS']: act_in_ptrs = ACT_INPUTS + rm[:, None] * stride_om + rn[None, :] tl.store(act_in_ptrs, acc, mask=(rm[:, None] < M) & (rn[None, :] < N)) if META['ACTIVATION']: acc = META['ACTIVATION'](acc) out_ptrs = OUT + rm[:, None] * stride_om + rn[None, :] tl.store(out_ptrs, acc, mask=(rm[:, None] < M) & (rn[None, :] < N))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_fused_matmul_fw.py
f7ae9f93-489e-4fd5-b45e-db9d45f58144
quant_triton.py
CompendiumLabs/ziggy
ziggy/backends/quant_triton.py
bd12fe50ca3475743f62ae26d4c184108e441e03
0
@triton.jit def matmul_quant_kernel(A, B, C, N, M, K, K1, stride_an, stride_ak, stride_bk, stride_bm, stride_cn, stride_cm, scale, zero_point, BITS: tl .constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, BLOCK_SIZE_K1: tl.constexpr): dtype = C.dtype.element_ty zero_point_ty = tl.full((), zero_point, dtype=dtype) scale_ty = tl.full((), scale, dtype=dtype) QFACT = 8 // BITS QMASK = (1 << BITS) - 1 QMASK_INT = tl.full((), QMASK, dtype=tl.uint8) pid_n = tl.program_id(0) pid_m = tl.program_id(1) rn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) rm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) rk = tl.arange(0, BLOCK_SIZE_K) rk1, rq1 = rk // QFACT, rk % QFACT a_shift = BITS * rq1 mask_a = rn[:, None] < N mask_b = rm[None, :] < M mask_c = mask_a & mask_b A1 = A + (rn[:, None] * stride_an + rk1[None, :] * stride_ak) B1 = B + (rk[:, None] * stride_bk + rm[None, :] * stride_bm) C1 = C + (rn[:, None] * stride_cn + rm[None, :] * stride_cm) acc = tl.zeros((BLOCK_SIZE_N, BLOCK_SIZE_M), dtype=dtype) for k in range(0, K, BLOCK_SIZE_K): aq = tl.load(A1, mask=mask_a) b = tl.load(B1, mask=mask_b) ai = aq >> a_shift & QMASK_INT a = ai.to(dtype) - zero_point_ty b1 = b.to(dtype) acc += tl.dot(a, b1, out_dtype=dtype) A1 += BLOCK_SIZE_K1 * stride_ak B1 += BLOCK_SIZE_K * stride_bk acc *= scale_ty tl.store(C1, acc, mask=mask_c)
{ "Data Type": [ "fp32", "int8", "uint8" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/CompendiumLabs/ziggy/blob/bd12fe50ca3475743f62ae26d4c184108e441e03/ziggy/backends/quant_triton.py
4f055ba2-8e8d-4b65-989b-b19e4874a19b
06-fused-attention.py
triton-lang/triton
python/tutorials/06-fused-attention.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _attn_bwd_preprocess(O, DO, Delta, Z, H, N_CTX, BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_hz = tl.program_id(1) off_n = tl.arange(0, HEAD_DIM) o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]) do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32) delta = tl.sum(o * do, axis=1) tl.store(Delta + off_hz * N_CTX + off_m, delta)
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py
b076a099-4288-404c-911a-b081727d520c
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv6/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BC', 'BK']) @triton.jit def chunk_rwkv6_fwd_A_kernel_intra_sub_intra_split(q, k, gi, ge, u, A, offsets, indices, scale, B: tl.constexpr, T: tl.constexpr, H: tl. constexpr, K: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl. constexpr, NC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl. constexpr): i_k, i_tc, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H i_t, i_i = i_tc // NC, i_tc % NC i_j = i_i if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) all = T T = eos - bos else: bos, eos = i_b * T, i_b * T + T all = B * T if i_t * BT + i_i * BC >= T: return o_i = tl.arange(0, BC) o_k = i_k * BK + tl.arange(0, BK) m_k = o_k < K m_A = i_t * BT + i_i * BC + tl.arange(0, BC) < T if HEAD_FIRST: o_A = (i_k * B * H + i_bh) * T * BC + (i_t * BT + i_i * BC + tl. arange(0, BC)) * BC p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(ge + i_bh * T * K, (T, K), (K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_qj = tl.max_contiguous(tl.multiple_of(q + (i_bh * T + i_t * BT + i_j * BC) * K + o_k, BK), BK) p_kj = tl.max_contiguous(tl.multiple_of(k + (i_bh * T + i_t * BT + i_j * BC) * K + o_k, BK), BK) p_gk = tl.max_contiguous(tl.multiple_of(gi + (i_bh * T + i_t * BT + i_j * BC) * K + o_k, BK), BK) else: o_A = (i_k * all + bos + i_t * BT + i_i * BC + tl.arange(0, BC) ) * H * BC + i_h * BC p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_g = tl.make_block_ptr(ge + (bos * H + i_h) * K, (T, K), (H * K, 1 ), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_qj = tl.max_contiguous(tl.multiple_of(q + (bos + i_t * BT + i_j * BC) * H * K + i_h * K + o_k, BK), BK) p_kj = tl.max_contiguous(tl.multiple_of(k + (bos + i_t * BT + i_j * BC) * H * K + i_h * K + o_k, BK), BK) p_gk = tl.max_contiguous(tl.multiple_of(gi + (bos + i_t * BT + i_j * BC) * H * K + i_h * K + o_k, BK), BK) b_q = tl.load(p_q, boundary_check=(0, 1)) b_g = tl.load(p_g, boundary_check=(0, 1)) p_u = tl.make_block_ptr(u + i_h * K, (K,), (1,), i_k * BK, (BK,), (0,)) b_u = tl.load(p_u, boundary_check=(0,)) for j in range(0, min(BC, T - i_t * BT - i_i * BC)): b_qj = tl.load(p_qj, mask=m_k, other=0).to(tl.float32) b_kj = tl.load(p_kj, mask=m_k, other=0).to(tl.float32) b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32) b_A = tl.sum(b_q * b_kj[None, :] * tl.exp(b_g - b_gk[None, :]), 1) b_A = tl.where(o_i > j, b_A * scale, 0.0) b_A = tl.where(o_i != j, b_A, tl.sum(b_qj * b_kj * b_u * scale)) tl.store(A + o_A + j, b_A, mask=m_A) p_qj += K if HEAD_FIRST else H * K p_kj += K if HEAD_FIRST else H * K p_gk += K if HEAD_FIRST else H * K
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py
a0025e28-0746-4d9e-bb24-18053ce379b0
z_order.py
Kitsunetic/space-filling-pytorch
space_filling_pytorch/functional/z_order.py
0de955ad1036973ee7506c5a0124c208acec722d
0
@triton.jit def _calculate_zorder(fx, fy, fz, space_size): x = ((fx + 1) / 2 * space_size).to(tl.int64) y = ((fy + 1) / 2 * space_size).to(tl.int64) z = ((fz + 1) / 2 * space_size).to(tl.int64) x = tl.minimum(tl.maximum(x, 0), space_size - 1) y = tl.minimum(tl.maximum(y, 0), space_size - 1) z = tl.minimum(tl.maximum(z, 0), space_size - 1) ret = 0 for i in tl.static_range(0, 16): q = 1 << i ret |= (x & q) << 2 * i + 2 ret |= (y & q) << 2 * i + 1 ret |= (z & q) << 2 * i + 0 return ret
{ "Data Type": [ "fp32" ], "Functionality": [], "Memory Access Pattern": [ "Transposed Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py
834e54f1-1ce6-424e-a37a-b8bc89efdeec
cross_entropy.py
ardywibowo/triton-mode
kernels/cross_entropy.py
5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1
0
@triton.jit def triton_cross_entropy_forward(input_ptr, input_stride, target_ptr, target_stride, loss_output_ptr, loss_output_stride, num_classes, num_valid_targets, ignore_label, smoothing_factor: tl.constexpr, reduction_mode: tl.constexpr, BLOCK_SIZE: tl.constexpr): row_id = tl.program_id(0).to(tl.int64) target_ptr += row_id * target_stride target_label = tl.load(target_ptr) input_ptr += row_id * input_stride if target_label == ignore_label: for i in range(0, num_classes, BLOCK_SIZE): input_offsets = i + tl.arange(0, BLOCK_SIZE) tl.store(input_ptr + input_offsets, 0.0, mask=input_offsets < num_classes) return loss_output_ptr += row_id * loss_output_stride max_val = float('-inf') normalization_factor = 0.0 target_input_val = tl.load(input_ptr + target_label) smoothing_sum = 0.0 epsilon = smoothing_factor / num_classes for i in range(0, num_classes, BLOCK_SIZE): input_offsets = i + tl.arange(0, BLOCK_SIZE) input_block = tl.load(input_ptr + input_offsets, mask=input_offsets < num_classes, other=float('-inf')) block_max = tl.max(input_block) if smoothing_factor > 0: smoothing_sum += tl.sum(tl.where(input_offsets < num_classes, - epsilon * input_block, 0.0)) new_max = tl.maximum(max_val, block_max) normalization_factor = normalization_factor * tl.exp(max_val - new_max ) + tl.sum(tl.exp(input_block - new_max)) max_val = new_max for i in range(0, num_classes, BLOCK_SIZE): input_offsets = i + tl.arange(0, BLOCK_SIZE) input_block = tl.load(input_ptr + input_offsets, mask=input_offsets < num_classes, other=float('-inf')) if reduction_mode == 'mean': input_block = (tl.exp(input_block - max_val) / normalization_factor - epsilon) / num_valid_targets else: input_block = tl.exp(input_block - max_val ) / normalization_factor - epsilon tl.store(input_ptr + input_offsets, input_block, mask=input_offsets < num_classes) tl.debug_barrier() row_loss = -(target_input_val - max_val - tl.log(normalization_factor)) if smoothing_factor > 0: smooth_loss = smoothing_sum + smoothing_factor * (max_val + tl.log( normalization_factor)) row_loss = row_loss * (1 - smoothing_factor) + smooth_loss if reduction_mode == 'mean': row_loss /= num_valid_targets updated_target_val = tl.load(input_ptr + target_label) if reduction_mode == 'mean': updated_target_val += -(1 - smoothing_factor) / num_valid_targets else: updated_target_val += -(1 - smoothing_factor) tl.store(loss_output_ptr, row_loss) tl.store(input_ptr + target_label, updated_target_val)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Softmax" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/cross_entropy.py
514b2699-68b0-4166-9e48-359401f2a1ee
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_linear_attn_bwd_kernel_dqkv(q, k, v, h, do, dh, dq, dk, dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, scale, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr): i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) o_i = tl.arange(0, BT) p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_s = tl.dot(b_k, b_q, allow_tf32=False) * scale b_s = tl.where(o_i[:, None] <= o_i[None, :], b_s, 0) b_dq = tl.zeros([BT, BK], dtype=tl.float32) b_dk = tl.zeros([BT, BK], dtype=tl.float32) b_ds = tl.zeros([BT, BT], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + i_bh * s_h_h, (V, NT * K), (1, s_h_t), (i_v * BV, i_t * K + i_k * BK), (BV, BK), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dh = tl.make_block_ptr(dh + i_bh * s_h_h, (NT * K, V), (s_h_t, 1), (i_t * K + i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_k * n_bh + i_bh) * s_v_h, (T, V), (s_v_t, s_v_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_h = tl.load(p_h, boundary_check=(0, 1)) b_dh = tl.load(p_dh, boundary_check=(0, 1)) b_ds += tl.dot(b_do, tl.trans(b_v), allow_tf32=False) b_dq += tl.dot(b_do, b_h, allow_tf32=False) * scale b_dk += tl.dot(b_v, tl.trans(b_dh), allow_tf32=False) b_dv = tl.dot(b_k, b_dh, allow_tf32=False) + tl.dot(b_s.to(b_q. dtype), b_do, allow_tf32=False) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) b_ds = tl.where(o_i[:, None] >= o_i[None, :], b_ds * scale, 0).to(b_q.dtype ) b_dq += tl.dot(b_ds, b_k, allow_tf32=False) b_dk += tl.trans(tl.dot(b_q, b_ds, allow_tf32=False)) p_dq = tl.make_block_ptr(dq + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py
f35c6d9e-1bb6-42a6-b3bf-22b95cb2e626
mhmoe_bwd.py
dtadpole/triton-playground
mhmoe_bwd.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def _mlp_wide_kernel_bwd_dw1w2(pid_h, pid_e, x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl. constexpr, BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr): """Kernel for computing the mlp_bwd_dw1w2 Z = X @ W1, H = f(Z), O = H @ W2 - X has shape (B, D) - W1 has shape (D, E) - W2 has shape (E, D) - O has shape (B, D) - dX has shape (B, D) - dW1 has shape (D, E) - dW2 has shape (E, D) - dO has shape (B, D) """ TARGET_TYPE = x_ptr.type.element_ty offs_b = tl.arange(0, BLOCK_SIZE_B) offs_d = tl.arange(0, D) offs_e = tl.arange(0, BLOCK_SIZE_E) x_ptrs = x_ptr + ((pid_h * B + offs_b[:, None]) * stride_xb + offs_d[ None, :] * stride_xd) do_ptrs = do_ptr + ((pid_h * B + offs_b[:, None]) * stride_dob + offs_d [None, :] * stride_dod) do_mask = (offs_b[:, None] < B) & (offs_d[None, :] < D) w1_ptrs = w1_ptr + ((pid_h * D + offs_d[:, None]) * stride_w1d + (pid_e * BLOCK_SIZE_E + offs_e[None, :]) * stride_w1e) w1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - pid_e * BLOCK_SIZE_E) w2_ptrs = w2_ptr + ((pid_h * E + pid_e * BLOCK_SIZE_E + offs_e[:, None] ) * stride_w2e + offs_d[None, :] * stride_w2d) w2_mask = (offs_e[:, None] < E - pid_e * BLOCK_SIZE_E) & (offs_d[None, :] < D) w1 = tl.load(w1_ptrs, mask=w1_mask, other=0.0) w2 = tl.load(w2_ptrs, mask=w2_mask, other=0.0) do = tl.load(do_ptrs, mask=do_mask, other=0.0) dw1 = tl.zeros((D, BLOCK_SIZE_E), dtype=tl.float32) dw2 = tl.zeros((BLOCK_SIZE_E, D), dtype=tl.float32) for b in range(0, tl.cdiv(B, BLOCK_SIZE_B)): x_mask = (offs_b[:, None] < B - b * BLOCK_SIZE_B) & (offs_d[None, : ] < D) do_mask = (offs_b[:, None] < B - b * BLOCK_SIZE_B) & (offs_d[None, :] < D) x = tl.load(x_ptrs, mask=x_mask, other=0.0) do = tl.load(do_ptrs, mask=do_mask, other=0.0) z = tl.dot(x, w1, out_dtype=tl.float32) if ACTIVATION == 'leaky_relu': h = leaky_relu(z).to(TARGET_TYPE) else: h = z.to(TARGET_TYPE) dh = tl.dot(do, tl.trans(w2), out_dtype=tl.float32) dw2 += tl.dot(tl.trans(h), do, out_dtype=tl.float32) if ACTIVATION == 'leaky_relu': dz = (dh * d_leacky_relu_inv_backward(z)).to(TARGET_TYPE) else: dz = dh.to(TARGET_TYPE) dw1 += tl.dot(tl.trans(x), dz, out_dtype=tl.float32) x_ptrs += BLOCK_SIZE_B * stride_xb do_ptrs += BLOCK_SIZE_B * stride_dob return dw1, dw2
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe_bwd.py
d8147557-5e28-4b54-9d2b-db80fd84ae11
utils.py
huyz2023/2by4-pretrain
sparse/utils.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _soft_threshold(a0, a1, a2, a3): x1, x2, x3, x4, x5, x6 = tl.abs(a0) > tl.abs(a1), tl.abs(a0) > tl.abs(a2 ), tl.abs(a0) > tl.abs(a3), tl.abs(a1) > tl.abs(a2), tl.abs(a1 ) > tl.abs(a3), tl.abs(a2) > tl.abs(a3) m0, m1, m2, m3 = (x2 & x3 | x1 & x2 | x1 & x3, ~x1 & x5 | x4 & x5 | ~x1 & x4, ~x2 & ~x4 | ~x2 & x6 | ~x4 & x6, ~x3 & ~x5 | ~x3 & ~x6 | ~x5 & ~x6) threshold = tl.minimum(tl.maximum(tl.minimum(tl.abs(a0), tl.abs(a1)), tl.minimum(tl.abs(a2), tl.abs(a3))), tl.minimum(tl.maximum(tl.abs( a0), tl.abs(a1)), tl.maximum(tl.abs(a2), tl.abs(a3)))) s0 = tl.where(a0 > 0, a0 - threshold, a0 + threshold) s1 = tl.where(a1 > 0, a1 - threshold, a1 + threshold) s2 = tl.where(a2 > 0, a2 - threshold, a2 + threshold) s3 = tl.where(a3 > 0, a3 - threshold, a3 + threshold) return s0, s1, s2, s3, m0, m1, m2, m3
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/utils.py
3e563cf1-4a15-483f-8db0-7b40621112a6
RzLinearForward.py
apd10/RzLinear
python/rz_linear/impl/RzLinearForward.py
eb56657b2de0a97f398f88af421b0fbcbc5469c9
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2), triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 16, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4)], key=['M', 'N', 'K']) @triton.jit def rz_linear_forward_kernel_fp32(a_ptr, b_ptr, c_ptr, init_factor, M, N, K, H, stride_am, stride_ak, stride_cm, stride_cn, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE: tl.constexpr): rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, init_factor=init_factor, M=M, N=N, K=K, H=H, stride_am=stride_am, stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn, allow_tf32=False, R7=R7, R6=R6, R5=R5, R4=R4, R3=R3, R2=R2, R1=R1, R0=R0, BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K, GROUP_SIZE=GROUP_SIZE)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearForward.py
5e4fffd9-e14a-4bf7-a094-9d0f4377b78b
y_4.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_4.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def fourth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id x = tl.load(coord_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) y = tl.load(coord_ptr + coord_row_offset + 1, mask=coord_row_offset + 1 < coord_numel) z = tl.load(coord_ptr + coord_row_offset + 2, mask=coord_row_offset + 2 < coord_numel) output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) g_0 = tl.load(sph_grad_ptr + output_row_offset, mask=output_row_offset < output_numel) g_1 = tl.load(sph_grad_ptr + output_row_offset + 1, mask= output_row_offset + 1 < output_numel) g_2 = tl.load(sph_grad_ptr + output_row_offset + 2, mask= output_row_offset + 2 < output_numel) g_3 = tl.load(sph_grad_ptr + output_row_offset + 3, mask= output_row_offset + 3 < output_numel) g_4 = tl.load(sph_grad_ptr + output_row_offset + 4, mask= output_row_offset + 4 < output_numel) g_5 = tl.load(sph_grad_ptr + output_row_offset + 5, mask= output_row_offset + 5 < output_numel) g_6 = tl.load(sph_grad_ptr + output_row_offset + 6, mask= output_row_offset + 6 < output_numel) g_7 = tl.load(sph_grad_ptr + output_row_offset + 7, mask= output_row_offset + 7 < output_numel) g_8 = tl.load(sph_grad_ptr + output_row_offset + 8, mask= output_row_offset + 8 < output_numel) CONST000 = 2.0 CONST001 = 4.5 CONST002 = 2.25 CONST006 = 9.48683298050514 CONST008 = 12.0 CONST012 = 28.4604989415154 CONST014 = 40.2492235949962 CONST015 = -37.6497011940334 CONST016 = -6.70820393249937 CONST017 = -26.6223590239483 CONST018 = -21.3453742061366 CONST019 = -20.1246117974981 CONST020 = -18.8248505970167 CONST021 = -18.0 CONST022 = -14.2302494707577 CONST023 = -10.0623058987491 CONST024 = -9.0 CONST025 = -8.87411967464942 CONST026 = -7.11512473537885 CONST027 = -6.27495019900557 CONST028 = -3.35410196624968 VAR07 = x * x * x VAR08 = x * x VAR16 = y * y * y VAR17 = y * y VAR25 = z * z * z VAR26 = z * z g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask= coord_row_offset + 1 < coord_numel) g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask= coord_row_offset + 2 < coord_numel) g_x += CONST015 * g_7 * x * y * z + CONST022 * g_5 * x * y * z + g_0 * ( CONST017 * VAR08 * z - CONST025 * VAR25) + g_1 * y * (CONST020 * VAR08 - CONST020 * VAR26) + g_2 * (-CONST019 * VAR17 * z + CONST023 * VAR08 * z + CONST028 * VAR25) + g_3 * (CONST006 * VAR16 + CONST018 * VAR08 * y + CONST026 * VAR26 * y) + g_4 * (CONST000 * x * (CONST002 * VAR26 + CONST024 * VAR17) + CONST001 * VAR07) + g_6 * (-CONST016 * VAR07 + CONST019 * VAR17 * x) + g_8 * (CONST017 * VAR26 * x - CONST025 * VAR07) g_y += CONST000 * g_6 * y * (CONST023 * VAR08 - CONST023 * VAR26 ) + CONST014 * g_2 * x * y * z + g_1 * (-CONST020 * VAR26 * x + CONST027 * VAR07) + g_3 * (CONST026 * VAR07 + x * (CONST012 * VAR17 + CONST026 * VAR26)) + g_4 * (CONST008 * VAR16 + CONST021 * VAR08 * y + CONST021 * VAR26 * y) + g_5 * (CONST026 * VAR25 + z * (CONST012 * VAR17 + CONST026 * VAR08)) + g_7 * (CONST020 * VAR08 * z - CONST027 * VAR25) g_z += -CONST015 * g_1 * x * y * z + CONST022 * g_3 * x * y * z + g_0 * ( -CONST017 * VAR26 * x + CONST025 * VAR07) + g_2 * (CONST028 * VAR07 + x * (-CONST019 * VAR17 + CONST023 * VAR26)) + g_4 * (CONST001 * VAR08 * z + CONST001 * VAR25 + CONST021 * VAR17 * z) + g_5 * ( CONST006 * VAR16 + CONST018 * VAR26 * y + CONST026 * VAR08 * y ) + g_6 * (CONST016 * VAR25 - CONST019 * VAR17 * z) + g_7 * y * ( CONST020 * VAR08 - CONST020 * VAR26) + g_8 * (CONST017 * VAR08 * z - CONST025 * VAR25) tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask= coord_row_offset + 1 < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask= coord_row_offset + 2 < coord_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_4.py
13dbadc6-77e5-4822-8ad4-db75cbf0f44b
flash_triton.py
MayDomine/Burst-Attention
burst_attn/flash_triton.py
b088c554072935074ea9c643de5ee363be5ab1f6
0
@triton.jit def _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: tl.constexpr, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): begin_m = 0 if not IS_CAUSAL else start_n * BLOCK_N // BLOCK_M * BLOCK_M offs_qm = begin_m + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) offs_m = tl.arange(0, BLOCK_M) offs_d = tl.arange(0, BLOCK_HEADDIM) q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :]) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :]) v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :]) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :]) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :]) if BIAS_TYPE == 'vector': b_ptrs = Bias + offs_n elif BIAS_TYPE == 'matrix': b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :]) dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) if begin_m >= seqlen_q: dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM) return if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0) v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0) elif EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) else: k = tl.load(k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[ None, :] < headdim), other=0.0) v = tl.load(v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[ None, :] < headdim), other=0.0) num_block_m = tl.cdiv(seqlen_q, BLOCK_M) for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) elif EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0 ) else: q = tl.load(q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & ( offs_d[None, :] < headdim), other=0.0) qk = tl.dot(q, k, trans_b=True) if not EVEN_N: qk = tl.where(offs_n[None, :] < seqlen_k, qk, float('-inf')) if IS_CAUSAL: qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], qk, float('-inf')) if BIAS_TYPE != 'none': tl.debug_barrier() if BIAS_TYPE == 'vector': if EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0 ).to(tl.float32) bias = bias[None, :] elif BIAS_TYPE == 'matrix': if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load(b_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k), other=0.0 ).to(tl.float32) qk = qk * softmax_scale + bias if not EVEN_M & EVEN_HEADDIM: tl.debug_barrier() lse_i = tl.load(LSE + offs_m_curr) if BIAS_TYPE == 'none': p = tl.exp(qk * softmax_scale - lse_i[:, None]) else: p = tl.exp(qk - lse_i[:, None]) if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) else: do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0) dv += tl.dot(p.to(do.dtype), do, trans_a=True) if not EVEN_M & EVEN_HEADDIM: tl.debug_barrier() dp = tl.dot(do, v, trans_b=True) if not EVEN_HEADDIM: tl.debug_barrier() Di = tl.load(D + offs_m_curr) ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype) dk += tl.dot(ds, q, trans_a=True) if not EVEN_M & EVEN_HEADDIM: tl.debug_barrier() if not ATOMIC_ADD: if EVEN_M & EVEN_HEADDIM: dq = tl.load(dq_ptrs, eviction_policy='evict_last') dq += tl.dot(ds, k) tl.store(dq_ptrs, dq, eviction_policy='evict_last') elif EVEN_HEADDIM: dq = tl.load(dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, eviction_policy='evict_last') dq += tl.dot(ds, k) tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, eviction_policy='evict_last') else: dq = tl.load(dq_ptrs, mask=(offs_m_curr[:, None] < seqlen_q ) & (offs_d[None, :] < headdim), other=0.0, eviction_policy='evict_last') dq += tl.dot(ds, k) tl.store(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q ) & (offs_d[None, :] < headdim), eviction_policy= 'evict_last') else: dq = tl.dot(ds, k) if EVEN_M & EVEN_HEADDIM: tl.atomic_add(dq_ptrs, dq) elif EVEN_HEADDIM: tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q ) else: tl.atomic_add(dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim)) dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom if BIAS_TYPE == 'matrix': b_ptrs += BLOCK_M * stride_bm dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms", "Matrix Multiplication", "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py
5b0222d0-40c9-44d6-9305-fa0136e4416c
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/rebased/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def _parallel_rebased_bwd_dkv(i_bh, i_c, i_k, i_v, i_h, q, k, v, do, dz, dk, dv, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BTL: tl.constexpr, BTS: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr): p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_c * BTL, i_k * BK), (BTL, BK), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * s_v_h, (T, V), (s_v_t, s_v_d), (i_c * BTL, i_v * BV), (BTL, BV), (1, 0)) b_k, b_v = tl.load(p_k, boundary_check=(0, 1)), tl.load(p_v, boundary_check=(0, 1)) b_dk, b_dv = tl.zeros([BTL, BK], dtype=tl.float32), tl.zeros([BTL, BV], dtype=tl.float32) for i in range(tl.cdiv(T, BTS) * BTS - BTS, (i_c + 1) * BTL - BTS, -BTS): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i), (BK, BTS), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v * BV, i), (BV, BTS), (0, 1)) p_dz = dz + i_bh * T + i + tl.arange(0, BTS) b_q = tl.load(p_q, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype) b_dz = tl.load(p_dz, mask=i + tl.arange(0, BTS) < T) b_s = tl.dot(b_k.to(b_q.dtype), b_q, allow_tf32=False) * scale b_s2 = b_s * b_s b_dv += tl.dot(b_s2.to(b_q.dtype), tl.trans(b_do), allow_tf32=False) b_ds = tl.dot(b_v, b_do, allow_tf32=False) * scale if i_v == 0: b_ds += b_dz[None, :] * scale else: b_ds = b_ds b_dk += tl.dot((2 * b_ds * b_s).to(b_q.dtype), tl.trans(b_q), allow_tf32=False) tl.debug_barrier() o_q, o_k = tl.arange(0, BTS), tl.arange(0, BTL) for i in range(i_c * BTL, (i_c + 1) * BTL, BTS): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (K, T), (s_k_d, s_k_t), ( i_k * BK, i), (BK, BTS), (0, 1)) p_do = tl.make_block_ptr(do + i_bh * s_v_h, (V, T), (s_v_d, s_v_t), (i_v * BV, i), (BV, BTS), (0, 1)) p_dz = dz + i_bh * T + i + tl.arange(0, BTS) b_q = tl.load(p_q, boundary_check=(0, 1)) b_do = tl.load(p_do, boundary_check=(0, 1)).to(b_q.dtype) b_dz = tl.load(p_dz, mask=i + tl.arange(0, BTS) < T) m_s = o_k[:, None] <= o_q[None, :] b_s = tl.dot(b_k, b_q, allow_tf32=False) * scale b_s2 = b_s * b_s b_s = tl.where(m_s, b_s, 0) b_s2 = tl.where(m_s, b_s2, 0) b_ds = tl.dot(b_v, b_do, allow_tf32=False) if i_v == 0: b_ds += b_dz[None, :] else: b_ds = b_ds b_ds = tl.where(m_s, b_ds, 0) * scale b_dv += tl.dot(b_s2.to(b_q.dtype), tl.trans(b_do), allow_tf32=False) b_dk += tl.dot((2 * b_ds * b_s).to(b_q.dtype), tl.trans(b_q), allow_tf32=False) o_q += BTS p_dk = tl.make_block_ptr(dk + (i_bh + B * H * i_v) * s_k_h, (T, K), ( s_k_t, s_k_d), (i_c * BTL, i_k * BK), (BTL, BK), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_bh + B * H * i_k) * s_v_h, (T, V), ( s_v_t, s_v_d), (i_c * BTL, i_v * BV), (BTL, BV), (1, 0)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) return
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rebased/parallel.py
6e8a194d-0aed-429d-8a29-942bafe62aad
tl_evaluate.py
2986002971/TSP_GA
algorithm/tl_evaluate.py
930dd889a3b99e18cd9e07c344fc9cbc3ce6d9c8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE': 32}, num_warps=4), triton.Config({'BLOCK_SIZE': 64}, num_warps=8), triton.Config({ 'BLOCK_SIZE': 128}, num_warps=8), triton.Config({'BLOCK_SIZE': 256}, num_warps=16)], key=['n_paths', 'n_cities']) @triton.jit def evaluate_paths_kernel(dist_matrix_ptr, paths_ptr, n_cities: tl. constexpr, next_power_of_2: tl.constexpr, n_paths: tl.constexpr, output_ptr, BLOCK_SIZE: tl.constexpr): """ 评估所有路径的距离,找出最短路径 """ pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_paths path_start = offsets * n_cities city_offsets = tl.arange(0, next_power_of_2) city_mask = city_offsets < n_cities """ 利用广播机制,一次性加载一组路径的所有城市索引 假设 BLOCK_SIZE = 2 n_cities = 3 则可知 path_start = [0, 3] 则 city_offsets = [0, 1, 2] path_start[:, None] = [[0], [3]] city_offsets[None, :] = [[0, 1, 2]] 加载当前城市的索引,广播运算: [[0, 1, 2], [3, 4, 5]] 加载下一个城市的索引(模运算): [[1, 2, 0], [4, 5, 3]] """ curr_cities = tl.load(paths_ptr + path_start[:, None] + city_offsets[ None, :], mask=mask[:, None] & city_mask[None, :]) next_cities = tl.load(paths_ptr + path_start[:, None] + ((city_offsets + 1) % n_cities)[None, :], mask=mask[:, None] & city_mask[None, :]) dists = tl.load(dist_matrix_ptr + curr_cities * n_cities + next_cities, mask=mask[:, None]) distances = tl.zeros([BLOCK_SIZE], dtype=tl.float32) distances = tl.sum(dists, axis=1) tl.store(output_ptr + offsets, distances, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/2986002971/TSP_GA/blob/930dd889a3b99e18cd9e07c344fc9cbc3ce6d9c8/algorithm/tl_evaluate.py
705819f8-d177-4232-b148-dca7e6e633ff
paged_attn_v2.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn_v2.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _single_query_cached_kv_attention_v2_unroll4(exp_sums, max_logits, out, q, k_cache, v_cache, head_mapping, scale, block_tables, seq_lens, partiton_size, max_num_blocks_per_seq, alibi_slopes, stride_qm, stride_qn, stride_om, stride_on, stride_ok, stride_km, stride_kn, stride_kk, stride_exp_m, stride_exp_n, BLOCK_SIZE: tl.constexpr, HEAD_SIZE: tl.constexpr): seq_idx = tl.program_id(axis=1) par_idx = tl.program_id(axis=2) seq_len = tl.load(seq_lens + seq_idx) if par_idx * partiton_size >= seq_len: return num_context_blocks = tl.cdiv(seq_len, BLOCK_SIZE) num_blocks_per_par = partiton_size // BLOCK_SIZE start_block_idx = par_idx * num_blocks_per_par end_block_idx = tl.minimum(start_block_idx + num_blocks_per_par, num_context_blocks) head_idx = tl.program_id(axis=0) kv_head_idx = tl.load(head_mapping + head_idx) if alibi_slopes is None: alibi_slope = 0.0 else: alibi_slope = tl.load(alibi_slopes + head_idx) block_offs = tl.arange(0, BLOCK_SIZE) head_size_offs = tl.arange(0, HEAD_SIZE) q = tl.load(q + seq_idx * stride_qm + head_idx * stride_qn + head_size_offs ) q = (q * scale).to(tl.float16) qkv = tl.zeros([BLOCK_SIZE, HEAD_SIZE], dtype=tl.float32) qk_max = float('-inf') exp_sum = 0.0 fp16_0 = tl.zeros([1, 1], dtype=k_cache.dtype.element_ty) base_offs_kv = kv_head_idx * stride_kn + block_offs[:, None ] * stride_kk + head_size_offs[None, :] block_base_ptrs = block_tables + seq_idx * max_num_blocks_per_seq for block_idx in range(start_block_idx, end_block_idx, 4): mask_0 = block_offs[:, None] < seq_len - (block_idx + 0) * BLOCK_SIZE mask_1 = block_offs[:, None] < seq_len - (block_idx + 1) * BLOCK_SIZE mask_2 = block_offs[:, None] < seq_len - (block_idx + 2) * BLOCK_SIZE mask_3 = block_offs[:, None] < seq_len - (block_idx + 3) * BLOCK_SIZE offs_kv_0 = tl.load(block_base_ptrs + block_idx + 0 ) * stride_km + base_offs_kv offs_kv_1 = tl.load(block_base_ptrs + block_idx + 1 ) * stride_km + base_offs_kv offs_kv_2 = tl.load(block_base_ptrs + block_idx + 2 ) * stride_km + base_offs_kv offs_kv_3 = tl.load(block_base_ptrs + block_idx + 3 ) * stride_km + base_offs_kv k_0 = tl.load(k_cache + offs_kv_0, mask=mask_0, other=fp16_0) k_1 = tl.load(k_cache + offs_kv_1, mask=mask_1, other=fp16_0) k_2 = tl.load(k_cache + offs_kv_2, mask=mask_2, other=fp16_0) k_3 = tl.load(k_cache + offs_kv_3, mask=mask_3, other=fp16_0) v_0 = tl.load(v_cache + offs_kv_0, mask=mask_0, other=fp16_0) v_1 = tl.load(v_cache + offs_kv_1, mask=mask_1, other=fp16_0) v_2 = tl.load(v_cache + offs_kv_2, mask=mask_2, other=fp16_0) v_3 = tl.load(v_cache + offs_kv_3, mask=mask_3, other=fp16_0) _qk_0 = tl.sum((q[None, :] * k_0).to(tl.float32), axis=1) _qk_1 = tl.sum((q[None, :] * k_1).to(tl.float32), axis=1) _qk_2 = tl.sum((q[None, :] * k_2).to(tl.float32), axis=1) _qk_3 = tl.sum((q[None, :] * k_3).to(tl.float32), axis=1) _qk_0 += alibi_slope * ((block_idx + 0) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_1 += alibi_slope * ((block_idx + 1) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_2 += alibi_slope * ((block_idx + 2) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_3 += alibi_slope * ((block_idx + 3) * BLOCK_SIZE + block_offs - seq_len + 1) _qk_max = tl.maximum(tl.max(_qk_0, axis=0), qk_max) _qk_max = tl.maximum(tl.max(_qk_1, axis=0), _qk_max) _qk_max = tl.maximum(tl.max(_qk_2, axis=0), _qk_max) _qk_max = tl.maximum(tl.max(_qk_3, axis=0), _qk_max) qk_0 = tl.where(mask_0, _qk_0[:, None], float('-inf')) qk_1 = tl.where(mask_1, _qk_1[:, None], float('-inf')) qk_2 = tl.where(mask_2, _qk_2[:, None], float('-inf')) qk_3 = tl.where(mask_3, _qk_3[:, None], float('-inf')) _exp_sum = exp_sum * tl.exp(qk_max - _qk_max) + tl.sum(tl.exp(_qk_0 - _qk_max), axis=0) + tl.sum(tl.exp(_qk_1 - _qk_max), axis=0 ) + tl.sum(tl.exp(_qk_2 - _qk_max), axis=0) + tl.sum(tl.exp( _qk_3 - _qk_max), axis=0) qkv = qkv * (exp_sum * tl.exp(qk_max - _qk_max) / _exp_sum) + tl.exp( qk_0 - _qk_max) / _exp_sum * v_0 + tl.exp(qk_1 - _qk_max ) / _exp_sum * v_1 + tl.exp(qk_2 - _qk_max ) / _exp_sum * v_2 + tl.exp(qk_3 - _qk_max) / _exp_sum * v_3 qk_max = _qk_max exp_sum = _exp_sum offs_exp = seq_idx * stride_exp_m + head_idx * stride_exp_n + par_idx tl.store(exp_sums + offs_exp, exp_sum) tl.store(max_logits + offs_exp, qk_max) offs_out = (seq_idx * stride_om + head_idx * stride_on + par_idx * stride_ok + head_size_offs) tl.store(out + offs_out, tl.sum(qkv, axis=0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn_v2.py
60a6b4c9-ca74-46f7-b620-ed6b512cc8aa
chunk_h_split.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h_split.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK in [32, 64] for BV in [32, 64] for num_warps in [2, 4, 8] for num_stages in [2, 3]], key=['BT', 'USE_G', 'USE_GK', 'USE_GV']) @triton.jit def chunk_fwd_kernel_h_split(k, v, g, gk, gv, hs, hr, h0, ht, offsets, split_indices, T: tl.constexpr, S: tl.constexpr, H: tl.constexpr, K: tl .constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl .constexpr, USE_G: tl.constexpr, USE_GK: tl.constexpr, USE_GV: tl. constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl. constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_sh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_ss, i_h = i_sh // H, i_sh % H if USE_OFFSETS: i_n, i_s = tl.load(split_indices + i_ss * 2).to(tl.int32), tl.load( split_indices + i_ss * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NS = tl.cdiv(T, S) else: NS = tl.cdiv(T, S) i_n, i_s = i_ss // NS, i_ss % NS bos, eos = i_n * T, i_n * T + T i_nh = i_n * H + i_h b_h = tl.zeros([BK, BV], dtype=tl.float32) if i_s == 0: if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), ( i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h += tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) p_hr = tl.make_block_ptr(hr + i_sh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_hr, b_h.to(p_hr.dtype.element_ty), boundary_check=(0, 1)) for i_t in range(tl.cdiv(i_s * S, BT), tl.cdiv(min(i_s * S + S, T), BT)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_nh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + i_nh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) last_idx = min(i_t * BT + BT, T) - 1 if USE_G: if HEAD_FIRST: b_g_last = tl.load(g + i_nh * T + last_idx) p_g = g + i_nh * T + i_t * BT + tl.arange(0, BT) p_g = tl.max_contiguous(tl.multiple_of(p_g, BT), BT) else: b_g_last = tl.load(g + bos * H + last_idx * H + i_h) p_g = g + bos * H + (i_t * BT + tl.arange(0, BT)) * H + i_h b_h *= tl.exp(b_g_last) b_g = tl.load(p_g, mask=i_t * BT + tl.arange(0, BT) < T, other=0.0) b_v = (b_v * tl.exp(b_g_last - b_g)[:, None]).to(b_v.dtype) if USE_GK: if HEAD_FIRST: p_gk = tl.make_block_ptr(gk + i_nh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = (gk + i_nh * T * K + last_idx * K + i_k * BK + tl.arange(0, BK)) else: p_gk = tl.make_block_ptr(gk + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_gk_last = gk + (bos + last_idx ) * H * K + i_h * K + i_k * BK + tl.arange(0, BK) p_gk_last = tl.max_contiguous(tl.multiple_of(p_gk_last, BK), BK) b_gk_last = tl.load(p_gk_last, mask=i_k * BK + tl.arange(0, BK) < K, other=0.0) b_h *= tl.exp(b_gk_last)[:, None] b_gk = tl.load(p_gk, boundary_check=(0, 1)) b_k = (b_k * tl.exp(b_gk_last[:, None] - b_gk)).to(b_k.dtype) if USE_GV: if HEAD_FIRST: p_gv = tl.make_block_ptr(gv + i_nh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = (gv + i_nh * T * V + last_idx * V + i_v * BV + tl.arange(0, BV)) else: p_gv = tl.make_block_ptr(gv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_gv_last = gv + (bos + last_idx ) * H * V + i_h * V + i_v * BV + tl.arange(0, BV) p_gv_last = tl.max_contiguous(tl.multiple_of(p_gv_last, BV), BV) b_gv_last = tl.load(p_gv_last, mask=i_v * BV + tl.arange(0, BV) < V, other=0.0) b_h *= tl.exp(b_gv_last)[None, :] b_gv = tl.load(p_gv, boundary_check=(0, 1)) b_v = (b_v * tl.exp(b_gv_last[None, :] - b_gv)).to(b_v.dtype) b_h += tl.dot(b_k, b_v) if NS > 1: p_hs = tl.make_block_ptr(hs + i_sh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_hs, b_h.to(p_hs.dtype.element_ty), boundary_check=(0, 1)) elif STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_split.py
8a0c1d45-0ee4-4b70-a724-b28e532e0284
lightningAttention2.py
Computational-Machine-Intelligence/LeetDecoding
leetDecoding/methods/lightningAttention2.py
1b545c2f5bacc155255250d1f70ac9484744559a
0
@triton.jit def _fwd_kernel(Q, K, V, Out, S, b: tl.constexpr, h: tl.constexpr, n: tl. constexpr, d: tl.constexpr, e: tl.constexpr, BLOCK: tl.constexpr, NUM_BLOCK: tl.constexpr, BLOCK_MODEL: tl.constexpr): off_bh = tl.program_id(0) off_h = off_bh % h off_e = tl.program_id(1) qk_offset = off_bh * n * d v_offset = off_bh * n * e o_offset = off_bh * n * e e_offset = off_e * BLOCK_MODEL Q_block_ptr = Q + qk_offset + tl.arange(0, d)[None, :] K_trans_block_ptr = K + qk_offset + tl.arange(0, d)[:, None] V_block_ptr = V + v_offset + e_offset + tl.arange(0, BLOCK_MODEL)[None, :] O_block_ptr = Out + o_offset + e_offset + tl.arange(0, BLOCK_MODEL)[None, : ] S_block_ptr = S + off_h s = tl.load(S_block_ptr) off_block = tl.arange(0, BLOCK) q_decay = tl.exp(-s.to(tl.float32) * off_block[:, None]) k_trans_decay = tl.exp(-s.to(tl.float32) * (BLOCK - off_block[None, :])) block_decay = tl.exp(-s.to(tl.float32) * BLOCK) index = off_block[:, None] - off_block[None, :] s_index = s * index s_index = tl.where(index >= 0, -s_index, float('-inf')) diag_decay = tl.exp(s_index) kv = tl.zeros([d, BLOCK_MODEL], dtype=tl.float32) for i in range(NUM_BLOCK): q = tl.load(Q_block_ptr + off_block[:, None] * d, mask=off_block[:, None] < n, other=0.0).to(tl.float32) k_trans = tl.load(K_trans_block_ptr + off_block[None, :] * d, mask= off_block[None, :] < n, other=0.0).to(tl.float32) v = tl.load(V_block_ptr + off_block[:, None] * e, mask=off_block[:, None] < n, other=0.0).to(tl.float32) qk = tl.dot(q, k_trans) * diag_decay o_intra = tl.dot(qk, v) o_inter = tl.dot(q, kv) * q_decay o = o_intra + o_inter tl.store(O_block_ptr + off_block[:, None] * e, o.to(O_block_ptr. dtype.element_ty), mask=off_block[:, None] < n) kv = block_decay * kv + tl.dot(k_trans * k_trans_decay, v) off_block += BLOCK
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication", "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/Computational-Machine-Intelligence/LeetDecoding/blob/1b545c2f5bacc155255250d1f70ac9484744559a/leetDecoding/methods/lightningAttention2.py
006edbee-aca4-431a-a039-c2ae16158575
swiglu.py
dame-cell/Triformer
triformer/swiglu.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def swiglu_forward_optimized(e_ptr, g_ptr, output_ptr, sigmoid_ptr, f_ptr, e_stride, g_stride, output_stride, sigmoid_stride, f_stride, BLOCK_SIZE: tl.constexpr, n_cols): row_idx = tl.program_id(axis=0) col_offset = tl.arange(0, BLOCK_SIZE) mask = col_offset < n_cols e_ptr += row_idx * e_stride g_ptr += row_idx * g_stride output_ptr += row_idx * output_stride sigmoid_ptr += row_idx * sigmoid_stride f_ptr += row_idx * f_stride e_row = tl.load(e_ptr + col_offset, mask=mask).to(tl.float32) g_row = tl.load(g_ptr + col_offset, mask=mask).to(tl.float32) sigmoid_e_row = tl.sigmoid(e_row) f_row = e_row * sigmoid_e_row tl.store(sigmoid_ptr + col_offset, sigmoid_e_row, mask=mask) tl.store(f_ptr + col_offset, f_row, mask=mask) output_row = f_row * g_row tl.store(output_ptr + col_offset, output_row, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/swiglu.py
8edb5183-1d49-41a4-b064-f713cd7c7a3d
test_triton.py
pytorch/xla
test/test_triton.py
40efdb7b6571ce92797b5ba42619b79c1b147b3e
0
@triton.jit def _attn_fwd(Q, K, V, sm_scale, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, HEAD_DIM: tl.constexpr, STAGE: tl.constexpr): tl.static_assert(BLOCK_N <= HEAD_DIM) start_m = tl.program_id(0) off_hz = tl.program_id(1) off_z = off_hz // H off_h = off_hz % H qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64 ) * stride_qh Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0)) V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, HEAD_DIM), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=(HEAD_DIM, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0), block_shape =(HEAD_DIM, BLOCK_N), order=(0, 1)) O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX, HEAD_DIM), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0)) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0 acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) qk_scale = sm_scale qk_scale *= 1.44269504 q = tl.load(Q_block_ptr) if STAGE & 1: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, HEAD_DIM, BLOCK_N, 4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5) if STAGE & 2: acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, qk_scale, BLOCK_M, HEAD_DIM, BLOCK_N, 2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5) m_i += tl.math.log2(l_i) acc = acc / l_i[:, None] m_ptrs = M + off_hz * N_CTX + offs_m tl.store(m_ptrs, m_i) tl.store(O_block_ptr, acc.to(Out.type.element_ty))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication", "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/pytorch/xla/blob/40efdb7b6571ce92797b5ba42619b79c1b147b3e/test/test_triton.py
8c5238cc-c037-4b47-b27f-b517a0dadced
cross_entropy_loss_kernels.py
BobMcDear/attorch
attorch/cross_entropy_loss_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim']) @triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic, 'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])}) @triton.jit def cross_entropy_loss_forward_kernel(input_pointer, target_pointer, weight_pointer, sum_weights_pointer, output_pointer, batch_dim, feat_dim, input_batch_stride, input_feat_stride, weighted: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr): """ Measures the mean cross entropy loss between the input and target, with optional reweighing of each class. Args: input_pointer: Pointer to the input. The input must be of shape [batch_dim, feat_dim]. target_pointer: Pointer to the target. The target must be of shape [batch_dim]. weight_pointer: Pointer to an optional class weight vector. The class weight vector, if provided, must be of shape [feat_dim]. sum_weights_pointer: Pointer to a container the sum of the class weights is written to. The container must be of shape [batch_dim/BLOCK_SIZE_BATCH]. output_pointer: Pointer to a container the loss is written to. The container must be of shape [batch_dim/BLOCK_SIZE_BATCH]. batch_dim: Batch dimension. feat_dim: Dimensionality of the features. input_batch_stride: Stride necessary to jump one element along the input's batch dimension. input_feat_stride: Stride necessary to jump one element along the input's feature dimension. weighted: Flag for weighing each class. BLOCK_SIZE_BATCH: Block size across the batch dimension. BLOCK_SIZE_FEAT: Block size across the feature dimension. """ batch_pid = tl.program_id(axis=0) batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH ) feat_offset = tl.arange(0, BLOCK_SIZE_FEAT) batch_mask = batch_offset < batch_dim feat_mask = feat_offset < feat_dim target = tl.load(target_pointer + batch_offset, mask=batch_mask) pred_pointer = (input_pointer + input_feat_stride * target + input_batch_stride * batch_offset) input_pointer += input_batch_stride * batch_offset[:, None ] + input_feat_stride * feat_offset[None, :] input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[ None, :], other=-float('inf')).to(tl.float32) pred = tl.load(pred_pointer, mask=batch_mask).to(tl.float32) mx = tl.max(input, axis=1) input -= mx[:, None] loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx if weighted: weight = tl.load(weight_pointer + target, mask=batch_mask).to(tl. float32) loss *= weight tl.store(sum_weights_pointer + batch_pid, tl.sum(weight)) else: loss /= batch_dim tl.store(output_pointer + batch_pid, tl.sum(loss))
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/cross_entropy_loss_kernels.py
a74682cf-3a90-4677-b098-f7898cae3980
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/hgrn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({'BD': 32}, num_warps=1), triton. Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({ 'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton .Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps =2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8)], key=['D']) @triton.jit def chunk_hgrn_bwd_kernel_h(g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr): i_d, i_t, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_b * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_b * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_b * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_b * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_b * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/chunk.py
09e3a5f7-b511-46a3-93e4-ca2098887d89
triton_chunk.py
NX-AI/xlstm-jax
xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py
6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7
0
@triton.jit def chunk_mlstm_bwd_kernel_dqkvif(q, k, v, C, m, m_total, norm, i, f, dh, dC, dq, dk, dv, s_qk_h, s_qk_t, s_qk_d, s_vh_h, s_vh_t, s_vh_d, s_C_h, s_C_t, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl. constexpr, NT: tl.constexpr): i_k, i_t, i_bC = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) o_i = tl.arange(0, BT) p_q = tl.make_block_ptr(q + i_bC * s_qk_h, (K, T), (s_qk_d, s_qk_t), ( i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_k = tl.make_block_ptr(k + i_bC * s_qk_h, (T, K), (s_qk_t, s_qk_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_s = tl.dot(b_k, b_q, allow_tf32=False) p_f = f + i_bC * T + i_t * BT + tl.arange(0, BT) p_i = i + i_bC * T + i_t * BT + tl.arange(0, BT) b_f = tl.load(p_f) b_f_last = tl.load(f + i_bC * T + i_t * BT + BT - 1) b_m = tl.load(m + i_bC * (NT + 1) + i_t) b_m_total = tl.load(m_total + i_bC * T + i_t * BT + tl.arange(0, BT)) b_norm = tl.load(norm + i_bC * T + i_t * BT + tl.arange(0, BT)) b_i = tl.load(p_i) mask = tl.math.exp2(b_i[:, None] + b_f[None, :] - b_f[:, None] - b_m_total[None, :]) mask = tl.where(o_i[:, None] <= o_i[None, :], mask * scale, 0) b_s = b_s * mask b_m_next = tl.load(m + i_bC * (NT + 1) + i_t + 1) b_dq = tl.zeros([BT, BK], dtype=tl.float32) b_dk = tl.zeros([BT, BK], dtype=tl.float32) b_ds = tl.zeros([BT, BT], dtype=tl.float32) for i_v in range(tl.cdiv(V, BV)): p_v = tl.make_block_ptr(v + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_C = tl.make_block_ptr(C + i_bC * s_C_h, (V, NT * K), (1, s_C_t), (i_v * BV, i_t * K + i_k * BK), (BV, BK), (0, 1)) p_dh = tl.make_block_ptr(dh + i_bC * s_vh_h, (T, V), (s_vh_t, s_vh_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dC = tl.make_block_ptr(dC + i_bC * s_C_h, (NT * K, V), (s_C_t, 1), (i_t * K + i_k * BK, i_v * BV), (BK, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (i_k * n_bh + i_bC) * s_vh_h, (T, V), (s_vh_t, s_vh_d), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_dh = tl.load(p_dh, boundary_check=(0, 1)) b_C = tl.load(p_C, boundary_check=(0, 1)) b_dC = tl.load(p_dC, boundary_check=(0, 1)) b_ds += tl.dot(b_dh, tl.trans(b_v), allow_tf32=False) b_dq += tl.dot(b_dh, b_C, allow_tf32=False) * scale b_dk += tl.dot(b_v, tl.trans(b_dC), allow_tf32=False) b_dv = tl.dot(b_k, b_dC, allow_tf32=False) * tl.math.exp2(b_i - b_f + b_f_last - b_m_next)[:, None] + tl.dot(b_s.to(b_q.dtype) / b_norm[None, :], b_dh, allow_tf32=False) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1)) b_dq = b_dq * tl.math.exp2(b_f + b_m - b_m_total)[:, None] / b_norm[:, None ] b_dk = b_dk * tl.math.exp2(b_i - b_f + b_f_last - b_m_next)[:, None] b_ds = b_ds * tl.trans(mask) b_ds = b_ds.to(b_k.dtype) b_dq += tl.dot(b_ds, b_k, allow_tf32=False) / b_norm[:, None] b_dk += tl.trans(tl.dot(b_q / b_norm[None, :], b_ds, allow_tf32=False)) p_dq = tl.make_block_ptr(dq + i_bC * s_qk_h, (T, K), (s_qk_t, s_qk_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_dk = tl.make_block_ptr(dk + i_bC * s_qk_h, (T, K), (s_qk_t, s_qk_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache", "BSD" ]
https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py
b533c264-f2a6-46f8-ac1f-f1b856309aba
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [2, 4, 8]], key=['BK']) @triton.jit def fwd_prepare_wy_repr_kernel_chunk32(k, g, beta, Aw, Au, offsets, indices, T: tl.constexpr, K: tl.constexpr, H: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BC: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T b_Aw = tl.zeros([BC, BC], dtype=tl.float32) if HEAD_FIRST: p_beta = tl.make_block_ptr(beta + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,)) else: p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) b_beta = tl.load(p_beta, boundary_check=(0,)) for i_k in range(tl.cdiv(K, BK)): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_kb = (b_k * b_beta[:, None]).to(b_k.dtype) b_Aw += tl.dot(b_kb, tl.trans(b_k)) b_Aw = -tl.where(tl.arange(0, BC)[:, None] > tl.arange(0, BC)[None, :], b_Aw, 0) if HEAD_FIRST: p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) else: p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) b_g = tl.load(p_g, boundary_check=(0,)) b_Au = b_Aw * tl.exp(b_g[:, None] - b_g[None, :]) for i in range(1, BC): mask = tl.arange(0, BC) == i b_aw = tl.sum(tl.where(mask[:, None], b_Aw, 0), 0) b_au = tl.sum(tl.where(mask[:, None], b_Au, 0), 0) b_aw = b_aw + tl.sum(b_aw[:, None] * b_Aw, 0) * (tl.arange(0, BC) < i) b_au = b_au + tl.sum(b_au[:, None] * b_Au, 0) * (tl.arange(0, BC) < i) b_Aw = tl.where(mask[:, None], b_aw, b_Aw) b_Au = tl.where(mask[:, None], b_au, b_Au) b_Aw += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :] b_Au += tl.arange(0, BC)[:, None] == tl.arange(0, BC)[None, :] if HEAD_FIRST: p_Aw = tl.make_block_ptr(Aw + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BC, BC), (1, 0)) p_Au = tl.make_block_ptr(Au + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT, 0), (BC, BC), (1, 0)) else: p_Aw = tl.make_block_ptr(Aw + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BC, BC), (1, 0)) p_Au = tl.make_block_ptr(Au + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BC, BC), (1, 0)) tl.store(p_Aw, b_Aw.to(p_Aw.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_Au, b_Au.to(p_Au.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings", "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py
d7b0c935-63df-4ee8-a4db-e04227fcfa37
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def load_1d(ptr, sz: tl.constexpr, n, max, stride=1): """Chunk 1d vector (defined by ptr) into 1d grid, where each chunk has size sz. Load the nth chunk. Ie, load [n*sz,...,(n+1)*sz-1].""" offs = get_1d_offest(sz, n) mask = get_1d_mask(offs, max) return tl.load(ptr + offs, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
abd8d704-a5d9-4edd-8154-cd775adf20b5
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def bwd_inner_chunk(q, k, g, dA, dq, dk, s_k_h, s_k_t, s_k_d, T: tl. constexpr, K: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr): i_k, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) p_g = tl.make_block_ptr(g + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) mask = i_k * BK + tl.arange(0, BK) < K o_i = tl.arange(0, BT) p_q = q + i_bh * s_k_h + i_k * BK + i_t * BT * K + tl.arange(0, BK) p_dq = dq + i_bh * s_k_h + i_k * BK + i_t * BT * K + tl.arange(0, BK) p_gq = g + i_bh * s_k_h + i_k * BK + i_t * BT * K + tl.arange(0, BK) p_dA = dA + i_bh * (tl.cdiv(T, BT) * BT * BT) + i_t * BT * BT + tl.arange( 0, BT) b_dk = tl.zeros([BT, BK], dtype=tl.float32) for i in range(BT): _q = tl.load(p_q, mask=mask, other=0) gq = tl.load(p_gq, mask=mask, other=0).to(tl.float32) score = tl.exp(gq[None, :] - b_g) score = tl.where(o_i[:, None] <= i, score, 0) _dA = tl.load(p_dA) _dA = tl.where(o_i <= i, _dA, 0) b_dk += _dA[:, None] * score * _q[None, :] b_dq = tl.sum(_dA[:, None] * score * b_k, axis=0) tl.store(p_dq, b_dq, mask=mask) p_q += K p_dq += K p_gq += K p_dA += BT p_dk = tl.make_block_ptr(dk + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT, i_k * BK), (BT, BK), (1, 0)) tl.store(p_dk, b_dk.to(dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py
dfffff6c-5718-4d9e-8554-1123df93f9ca
ln_linear_triton_2.py
ethansmith2000/fused-layer-norm
ln_linear_triton_2.py
84fe243a829364acdcfd7cd70b699db04838af0f
0
@triton.jit def _layer_norm_bwd_dx_fused(DX, DY, DSc, DSh, Y, Sc, Sh, Mean, Rstd, Lock, stride, N, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr): row = tl.program_id(0) cols = tl.arange(0, BLOCK_SIZE_N) mask = cols < N Y += row * stride DY += row * stride DX += row * stride lock_id = row % GROUP_SIZE_M Lock += lock_id Count = Lock + GROUP_SIZE_M DSc = DSc + lock_id * N + cols DSh = DSh + lock_id * N + cols y = tl.load(Y + cols, mask=mask, other=0).to(tl.float32) dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) sc = tl.load(Sc + cols, mask=mask).to(tl.float32) sh = tl.load(Sh + cols, mask=mask).to(tl.float32) mean = tl.load(Mean + row) rstd = tl.load(Rstd + row) xhat = (y - sh) / sc scdy = sc * dy xhat = tl.where(mask, xhat, 0.0) scdy = tl.where(mask, scdy, 0.0) c1 = tl.sum(xhat * scdy, axis=0) / N c2 = tl.sum(scdy, axis=0) / N dx = (scdy - (xhat * c1 + c2)) * rstd tl.store(DX + cols, dx, mask=mask) partial_dsc = (dy * xhat).to(sc.dtype) partial_dsh = dy.to(sc.dtype) while tl.atomic_cas(Lock, 0, 1) == 1: pass count = tl.load(Count) if count == 0: tl.atomic_xchg(Count, 1) else: partial_dsc += tl.load(DSc, mask=mask) partial_dsh += tl.load(DSh, mask=mask) tl.store(DSc, partial_dsc, mask=mask) tl.store(DSh, partial_dsh, mask=mask) tl.atomic_xchg(Lock, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Blocked Access", "Shared Memory Intensive" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/ethansmith2000/fused-layer-norm/blob/84fe243a829364acdcfd7cd70b699db04838af0f/ln_linear_triton_2.py
0556ec3b-dfff-4fcf-bfe4-7f2f435207f6
test_sampler.py
Coco58323/vllm_blend
tests/kernels/test_sampler.py
1fe36887b3c8402d71d119f6a2ff545c2fffff4d
0
@triton.jit def _uniform_to_exponential_kernel(input, output, n: tl.constexpr): idx = tl.arange(0, n) x = tl.load(input + idx) y = _uniform_to_exponential(x) tl.store(output + idx, y)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/Coco58323/vllm_blend/blob/1fe36887b3c8402d71d119f6a2ff545c2fffff4d/tests/kernels/test_sampler.py
88444d28-55e3-434c-8f2a-ca5d9b2c5a02
triton_conv3d.py
l1351868270/implicit_gemm.triton
triton_conv3d.py
64eb8548ccf4576883c928f6315be8b24680a455
0
@triton.autotune(configs=get_autotune_config(), key=['N', 'C', 'D', 'H', 'W', 'K', 'D_out', 'H_out', 'W_out', 'T', 'R', 'S', 'stride_d', 'stride_h', 'stride_w', 'pad_d', 'pad_h', 'pad_w', 'dila_d', 'dila_h', 'dila_w']) @triton.jit def conv3d_kernel(x_ptr, w_ptr, y_ptr, N, C, D, H, W, K, D_out, H_out, W_out, T, R, S, stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, dila_d, dila_h, dila_w, GEMM_M, GEMM_N, GEMM_K, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(GEMM_M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(GEMM_N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % num_pid_in_group % group_size_m pid_n = pid % num_pid_in_group // group_size_m gemm_i = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % GEMM_M gemm_j = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % GEMM_N n = gemm_i // (D_out * H_out * W_out) ndhw_residual = gemm_i % (D_out * H_out * W_out) d_out = ndhw_residual // (H_out * W_out) dhw_residual = ndhw_residual % (H_out * W_out) h_out = dhw_residual // W_out w_out = dhw_residual % W_out k = gemm_j accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for idx_k in range(0, tl.cdiv(GEMM_K, BLOCK_SIZE_K)): gemm_k = idx_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K) t = gemm_k // (R * S * C) trsc_residual = gemm_k % (R * S * C) r = trsc_residual // (S * C) rsc_residual = gemm_k % (S * C) s = rsc_residual // C c = rsc_residual % C d = d_out[:, None] * stride_d + t[None, :] * dila_d - pad_d h = h_out[:, None] * stride_h + r[None, :] * dila_h - pad_h w = w_out[:, None] * stride_w + s[None, :] * dila_w - pad_w mask_x = (d >= 0) & (d < D) & (h >= 0) & (h < H) & (w >= 0) & (w < W) mask_w = (t < T) & (r < R) & (s < S) & (c < C) offs_x = n[:, None ] * D * H * W * C + d * H * W * C + h * W * C + w * C + c offs_w = k[None, :] * T * R * S * C + t[:, None] * R * S * C + r[:, None] * S * C + s[:, None] * C + c[:, None] x_ptrs = x_ptr + offs_x w_ptrs = w_ptr + offs_w x_data = tl.load(x_ptrs, mask=mask_x, other=0.0) w_data = tl.load(w_ptrs, mask=mask_w[:, None], other=0.0) accumulator = tl.dot(x_data, w_data, accumulator) c_data = accumulator.to(tl.float16) offs_y = gemm_i[:, None] * GEMM_N + gemm_j[None, :] mask_y = (gemm_i[:, None] < GEMM_M) & (gemm_j[None, :] < GEMM_N) y_ptrs = y_ptr + offs_y tl.store(y_ptrs, c_data, mask=mask_y)
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/l1351868270/implicit_gemm.triton/blob/64eb8548ccf4576883c928f6315be8b24680a455/triton_conv3d.py
0cdc9206-f8ac-4983-a3c9-9a7b7091b772
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_fwd_kernel_o(p, v, o, rv, cv, pv, s_qk_h, s_qk_t, s_qk_d, s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BM: tl.constexpr, BV: tl. constexpr, DM: tl.constexpr, DV: tl.constexpr, NT: tl.constexpr): i_v, i_m, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_programs(2) p_p = tl.make_block_ptr(p + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), ( 0, i_m * BM), (BT, BM), (1, 0)) p_v = tl.make_block_ptr(v + i_bh * s_qk_h, (T, DV), (s_qk_t, s_qk_d), ( 0, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + (i_m * n_bh + i_bh) * s_qk_h, (T, DV), ( s_qk_t, s_qk_d), (0, i_v * BV), (BT, BV), (1, 0)) p_rv = tl.make_block_ptr(rv + i_bh * s_sk_t * NT, (NT * DM,), (s_sk_m,), (i_m * BM,), (BM,), (0,)) p_cv = tl.make_block_ptr(cv + i_bh * s_sk_h, (DM, T), (s_sk_m, s_sk_t), (i_m * BM, 0), (BM, BT), (0, 1)) p_pv = tl.make_block_ptr(pv + i_bh * s_sk_h, (T, DM), (s_sk_t, s_sk_m), (0, i_m * BM), (BT, BM), (1, 0)) o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_hv = tl.zeros([BM, BV], dtype=tl.float32) for _ in range(NT): b_p = tl.load(p_p, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) b_rv = tl.load(p_rv, boundary_check=(0,)) b_cv = tl.load(p_cv, boundary_check=(0, 1)) b_pv = tl.load(p_pv, boundary_check=(0, 1)) b_p = b_p * b_pv b_inter = tl.dot((b_p * b_rv[None, :]).to(b_v.dtype), b_hv.to(b_v. dtype), allow_tf32=False) b_intra = tl.where(m_s, tl.dot(b_p.to(b_v.dtype), b_cv, allow_tf32= False), 0) b_intra = tl.dot(b_intra.to(b_v.dtype), b_v, allow_tf32=False) b_o = b_inter + b_intra b_hv = b_hv * b_rv[:, None] + tl.dot(b_cv, b_v, allow_tf32=False) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) p_p = tl.advance(p_p, (BT, 0)) p_v = tl.advance(p_v, (BT, 0)) p_o = tl.advance(p_o, (BT, 0)) p_rv = tl.advance(p_rv, (DM,)) p_cv = tl.advance(p_cv, (0, BT)) p_pv = tl.advance(p_pv, (BT, 0))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
aa3dc7ca-31b5-4d4d-864c-523094a3cabe
blocksparse_logsumexp.py
kimiasa/Experiments
src/models/attention/blocksparse_logsumexp.py
c4e73bfefd8290695ec52b6386b6b81838ca94a1
0
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])}) @triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] * meta['BLOCK'])}) @triton.jit def _forward(X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta ): TN = meta['TN'] BLOCK = meta['BLOCK'] pidhm = tl.program_id(0) pidz = tl.program_id(1) rxm = pidhm % BLOCK rbm = pidhm // BLOCK rxn = tl.arange(0, TN) % BLOCK rbn = tl.arange(0, TN) // BLOCK header = LUT + rbm * 2 size = tl.load(header + 0) offset = tl.load(header + 1) check = rbn < size rbmn = tl.where(check, rbn, size - 1) blockid = tl.load(LUT + offset + rbmn * 4 + 0) rowid = tl.load(LUT + offset + rbmn * 4 + 2) headid = tl.load(LUT + offset + rbmn * 4 + 3) px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn x = tl.load(px, mask=check, other=-float('inf')) x = x.to(tl.float32) c = tl.max(x, axis=0) out = tl.log(tl.sum(tl.exp(x - c), axis=0)) + c pout = (OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm) tl.store(pout, out)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/models/attention/blocksparse_logsumexp.py
090c7a0b-2d5b-41b2-ae7d-fd6bf3dd7f24
06-fused-attention.py
2lambda123/triton
python/tutorials/06-fused-attention.py
09e27725b89043a07f49c440db6a9aedcfba8432
0
@triton.jit def _fwd_kernel(Q, K, V, sm_scale, L, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl. constexpr, BLOCK_N: tl.constexpr, IS_CAUSAL: tl.constexpr): start_m = tl.program_id(0) off_hz = tl.program_id(1) qvk_offset = off_hz * stride_qh Q_block_ptr = tl.make_block_ptr(base=Q + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) K_block_ptr = tl.make_block_ptr(base=K + qvk_offset, shape=( BLOCK_DMODEL, N_CTX), strides=(stride_kk, stride_kn), offsets=(0, 0 ), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) V_block_ptr = tl.make_block_ptr(base=V + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float('inf') l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qk_scale = sm_scale * 1.44269504 q = tl.load(Q_block_ptr) q = (q * qk_scale).to(tl.float16) lo = 0 hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX for start_n in range(lo, hi, BLOCK_N): k = tl.load(K_block_ptr) v = tl.load(V_block_ptr) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) if IS_CAUSAL: qk = tl.where(offs_m[:, None] >= start_n + offs_n[None, :], qk, float('-inf')) qk += tl.dot(q, k) m_i_new = tl.maximum(m_i, tl.max(qk, 1)) alpha = tl.math.exp2(m_i - m_i_new) p = tl.math.exp2(qk - m_i_new[:, None]) acc_scale = l_i * 0 + alpha acc *= acc_scale[:, None] acc += tl.dot(p.to(tl.float16), v) l_i = l_i * alpha + tl.sum(p, 1) m_i = m_i_new K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) acc = acc / l_i[:, None] l_ptrs = L + off_hz * N_CTX + offs_m tl.store(l_ptrs, m_i + tl.math.log2(l_i)) O_block_ptr = tl.make_block_ptr(base=Out + qvk_offset, shape=(N_CTX, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) tl.store(O_block_ptr, acc.to(tl.float16))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/2lambda123/triton/blob/09e27725b89043a07f49c440db6a9aedcfba8432/python/tutorials/06-fused-attention.py
3756e1f0-c01b-4b42-bb9d-de07cfbb77e8
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/simple_gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=4)], key=['BT', 'BK', 'BV']) @triton.jit def chunk_simple_gla_fwd_kernel_o(q, k, v, h, g, o, offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_tg = i_t i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) else: NT = tl.cdiv(T, BT) i_tg = i_b * NT + i_t bos, eos = i_b * T, i_b * T + T o_i = tl.arange(0, BT) m_s = o_i[:, None] >= o_i[None, :] b_o = tl.zeros([BT, BV], dtype=tl.float32) b_s = tl.zeros([BT, BT], dtype=tl.float32) for i_k in range(tl.cdiv(K, BK)): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + i_bh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_h = tl.make_block_ptr(h + (i_bh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_h = tl.make_block_ptr(h + (i_tg * H + i_h) * K * V, (K, V), ( V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_h = tl.load(p_h, boundary_check=(0, 1)) b_o += tl.dot(b_q, b_h, allow_tf32=False) b_s += tl.dot(b_q, b_k, allow_tf32=False) if HEAD_FIRST: p_g = tl.make_block_ptr(g + i_bh * T, (T,), (1,), (i_t * BT,), (BT, ), (0,)) p_v = tl.make_block_ptr(v + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_g = tl.make_block_ptr(g + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_o = tl.make_block_ptr(o + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_g = tl.load(p_g, boundary_check=(0,)) b_o = b_o * tl.exp(b_g)[:, None] b_s = b_s * tl.exp(b_g[:, None] - b_g[None, :]) b_s = tl.where(m_s, b_s, 0) b_v = tl.load(p_v, boundary_check=(0, 1)) b_o = (b_o + tl.dot(b_s.to(b_v.dtype), b_v, allow_tf32=False)) * scale tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "bf16", "fp32" ], "Functionality": [ "Attention Mechanisms", "Quantization" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/chunk.py
4fceec9b-8d2a-47cc-a208-fb9e821e4377
fused_moe_a16w4.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/fused_moe_a16w4.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _fused_moe_kernel_a16w4_perchannel(A, B, C, scale_b_ptr, zero_points_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am, stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn, stride_scale_be, stride_scale_bn, stride_scale_bk, stride_zero_points_e, stride_zero_points_n, stride_zero_points_k, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, add_zero_points: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % num_pid_in_group % group_size_m pid_n = pid % num_pid_in_group // group_size_m num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N * 2) // 2) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = A + (offs_token[:, None] // top_k * stride_am + offs_k[None, : ] * stride_ak) off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = B + off_experts * stride_be + (offs_k[None, :] * stride_bk + offs_bn[:, None] * stride_bn) if add_zero_points: offs_zero_points = pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, 2 * BLOCK_SIZE_N) zero_points_ptrs = (zero_points_ptr + off_experts * stride_zero_points_e + offs_zero_points) _ZERO_POINT0 = tl.zeros([1], dtype=zero_points_ptr.dtype.element_ty) zero_points_vals = tl.load(zero_points_ptrs, mask=offs_zero_points < 2 * N, other=_ZERO_POINT0) _A0 = tl.zeros([1, 1], dtype=A.dtype.element_ty) _B0 = tl.zeros([1, 1], dtype=B.dtype.element_ty) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N * 2), dtype=tl.float32) l_shifter = (1 - tl.arange(0, BLOCK_SIZE_N * 2) % 2) * 4 for k in range(tl.cdiv(K, BLOCK_SIZE_K)): a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=_A0) b = tl.load(b_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=_B0) b = (b << l_shifter[:, None]).to(tl.int8).__rshift__(4) if add_zero_points: b -= zero_points_vals[:, None] b = tl.trans(b) b = b.to(a_ptrs.dtype.element_ty) accumulator += tl.dot(a, b, out_dtype=tl.float32) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk offs_scale = pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, BLOCK_SIZE_N * 2) scale_ptrs = (scale_b_ptr + off_experts * stride_scale_be + offs_scale * stride_scale_bn) _SCALE0 = tl.zeros([1], dtype=scale_b_ptr.dtype.element_ty) scales = tl.load(scale_ptrs, mask=offs_scale < 2 * N, other=_SCALE0) accumulator *= scales[None, :] if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0.0) accumulator = accumulator * moe_weight[:, None] accumulator = accumulator.to(A.dtype.element_ty) offs_cn = pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, BLOCK_SIZE_N * 2) c_ptrs = C + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :] c_mask = token_mask[:, None] & (offs_cn[None, :] < N * 2) tl.store(c_ptrs, accumulator, mask=c_mask)
{ "Data Type": [ "int8", "bf16", "fp32" ], "Functionality": [ "Quantization", "Top-K Selection", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_a16w4.py
a8ad76d1-6af5-4b88-892c-7131230d4b1c
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/retention/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK in [32, 64, 128] for BV in [32, 64, 128] for num_warps in [2, 4, 8] for num_stages in [2, 3, 4]], key=[ 'BT']) @triton.jit def chunk_retention_fwd_kernel_h(k, v, h, h0, ht, offsets, chunk_offsets, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl .constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl. constexpr, STORE_FINAL_STATE: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_k, i_v, i_nh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_n, i_h = i_nh // H, i_nh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos NT = tl.cdiv(T, BT) boh = tl.load(chunk_offsets + i_n).to(tl.int32) else: bos, eos = i_n * T, i_n * T + T NT = tl.cdiv(T, BT) boh = i_n * NT b_b = tl.math.log2(1 - tl.math.exp2(-5 - i_h * 1.0)) o_i = tl.arange(0, BT) d_b, d_i = tl.math.exp2(BT * b_b), tl.math.exp2((BT - o_i - 1) * b_b) b_h = tl.zeros([BK, BV], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = tl.make_block_ptr(h0 + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) b_h = tl.load(p_h0, boundary_check=(0, 1)).to(tl.float32) for i_t in range(NT): if HEAD_FIRST: p_k = tl.make_block_ptr(k + i_nh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + i_nh * T * V, (T, V), (V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + (i_nh * NT + i_t) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) else: p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_v = tl.make_block_ptr(v + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_h = tl.make_block_ptr(h + ((boh + i_t) * H + i_h) * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_h, b_h.to(p_h.dtype.element_ty), boundary_check=(0, 1)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_v = tl.load(p_v, boundary_check=(0, 1)) if i_t == NT - 1 and T % BT != 0: d_b = tl.math.exp2(T % BT * b_b) d_i = tl.math.exp2((T % BT - o_i - 1) * b_b) b_h = d_b * b_h + tl.dot(b_k, (b_v * d_i[:, None]).to(b_k.dtype), allow_tf32=False) if STORE_FINAL_STATE: p_ht = tl.make_block_ptr(ht + i_nh * K * V, (K, V), (V, 1), (i_k * BK, i_v * BV), (BK, BV), (1, 0)) tl.store(p_ht, b_h.to(p_ht.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/chunk.py
49950adc-5589-4676-af7f-0b95a107d8e9
dot_triton.py
markdewing/AI_kernels
dot/triton/dot_triton.py
32b2fe4b1e81cf60a16ef188e37f2d47428ce23d
0
@triton.jit def dot_product_kernel(x_ptr, y_ptr, output_ptr, n_elements, BLOCK_SIZE: tl .constexpr): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_ptr + offsets, mask=mask) z = x * y c = tl.sum(z) tl.atomic_add(output_ptr, c)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/markdewing/AI_kernels/blob/32b2fe4b1e81cf60a16ef188e37f2d47428ce23d/dot/triton/dot_triton.py
23cd9919-1bc6-4aea-9f69-aad2aa2f2839
sb_varlen_bwd.py
shawntan/stickbreaking-attention
stickbreaking_attention/sb_varlen/sb_varlen_bwd.py
8dd32ad5e58f0ee0232fd4782dc53d354ff8d283
0
@triton.jit def locked_add(Lock_ptr, Count_ptr, A_ptrs, a, B_ptrs, b, N_mask, NO_N_MASK, D_mask, NO_D_MASK: tl.constexpr): while tl.atomic_cas(Lock_ptr, 0, 1) == 1: pass count = tl.load(Count_ptr, eviction_policy='evict_last') if NO_D_MASK: if NO_N_MASK: if count == 0: tl.store(Count_ptr, True, eviction_policy='evict_last') else: a += tl.load(A_ptrs, eviction_policy='evict_last') b += tl.load(B_ptrs, eviction_policy='evict_last') tl.store(A_ptrs, a, eviction_policy='evict_last') tl.store(B_ptrs, b, eviction_policy='evict_last') else: if count == 0: tl.store(Count_ptr, True, eviction_policy='evict_last') else: a += tl.load(A_ptrs, mask=N_mask[:, None], eviction_policy= 'evict_last') b += tl.load(B_ptrs, mask=N_mask[:, None], eviction_policy= 'evict_last') tl.store(A_ptrs, a, mask=N_mask[:, None], eviction_policy= 'evict_last') tl.store(B_ptrs, b, mask=N_mask[:, None], eviction_policy= 'evict_last') else: mask = N_mask[:, None] & D_mask[None, :] if count == 0: tl.store(Count_ptr, True, eviction_policy='evict_last') else: a += tl.load(A_ptrs, mask=mask, eviction_policy='evict_last') b += tl.load(B_ptrs, mask=mask, eviction_policy='evict_last') tl.store(A_ptrs, a, mask=mask, eviction_policy='evict_last') tl.store(B_ptrs, b, mask=mask, eviction_policy='evict_last') tl.atomic_xchg(Lock_ptr, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_bwd.py
7f7cdfd8-dd2a-4c5f-825b-860f5d4fc16a
y_1.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_1.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def first_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_striding = tl.arange(0, block_size) * coord_stride coord_row_offset = coord_striding + block_size * coord_stride * block_id output_striding = tl.arange(0, block_size) * output_stride output_row_offset = (output_striding + block_size * output_stride * block_id + col_offset) g_Y10 = tl.load(sph_grad_ptr + output_row_offset, mask= output_row_offset < output_numel) g_Y11 = tl.load(sph_grad_ptr + output_row_offset + 1, mask= output_row_offset + 1 < output_numel) g_Y12 = tl.load(sph_grad_ptr + output_row_offset + 2, mask= output_row_offset + 2 < output_numel) g_x = tl.load(coord_grad_ptr + coord_row_offset, mask=coord_row_offset < coord_numel) g_y = tl.load(coord_grad_ptr + coord_row_offset + 1, mask= coord_row_offset + 1 < coord_numel) g_z = tl.load(coord_grad_ptr + coord_row_offset + 2, mask= coord_row_offset + 2 < coord_numel) CONST_00 = tl.sqrt(3.0) g_x += CONST_00 * g_Y10 g_y += CONST_00 * g_Y11 g_z += CONST_00 * g_Y12 tl.store(coord_grad_ptr + coord_row_offset, g_x, mask=coord_row_offset < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 1, g_y, mask= coord_row_offset + 1 < coord_numel) tl.store(coord_grad_ptr + coord_row_offset + 2, g_z, mask= coord_row_offset + 2 < coord_numel)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_1.py
2a143289-7f7a-4301-8cd6-b7852faa3ceb
triton_flash_attention.py
IBM/vllm
vllm/attention/ops/triton_flash_attention.py
99523dd62be2ecf6c6db15e8133aaaf7855e7e86
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({ 'BLOCK_M': 256, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': True}, num_stages=1, num_warps=4), triton.Config({ 'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4), triton.Config({'BLOCK_M': 64, 'BLOCK_N': 64, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, num_warps=8), triton.Config({ 'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, num_warps=4)], key=['IS_CAUSAL', 'dropout_p', 'BLOCK_DMODEL'] ) @triton.jit def attn_fwd(Q, K, V, bias, sm_scale, L, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, cu_seqlens_q, cu_seqlens_k, dropout_p, philox_seed, philox_offset_base, encoded_softmax, HQ: tl.constexpr, HK: tl.constexpr, ACTUAL_BLOCK_DMODEL: tl.constexpr, MAX_SEQLENS_Q: tl.constexpr, MAX_SEQLENS_K: tl.constexpr, VARLEN: tl.constexpr, IS_CAUSAL: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, PRE_LOAD_V: tl.constexpr, BIAS_TYPE: tl.constexpr, ENABLE_DROPOUT: tl.constexpr, RETURN_ENCODED_SOFTMAX: tl.constexpr): start_m = tl.program_id(0) off_h_q = tl.program_id(1) off_z = tl.program_id(2) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) if VARLEN: cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z) cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1) seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start if start_m * BLOCK_M > seqlen_q: return cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z) cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1) seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start else: cu_seqlens_q_start = 0 cu_seqlens_k_start = 0 seqlen_q = MAX_SEQLENS_Q seqlen_k = MAX_SEQLENS_K n_blocks = cdiv_fn(seqlen_k, BLOCK_N) if IS_CAUSAL: n_blocks_seqlen = cdiv_fn((start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N) n_blocks = min(n_blocks, n_blocks_seqlen) if n_blocks <= 0: o_offset = (off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh) O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=( seqlen_q, BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=(1, 0)) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty) return GROUP_SIZE: tl.constexpr = HQ // HK off_h_k = off_h_q // GROUP_SIZE if GROUP_SIZE != 1 else off_h_q n_extra_tokens = 0 if seqlen_k < BLOCK_N: n_extra_tokens = BLOCK_N - seqlen_k elif seqlen_k % BLOCK_N: n_extra_tokens = seqlen_k % BLOCK_N padded_head = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL q_offset = (off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm) Q_block_ptr = tl.make_block_ptr(base=Q + q_offset, shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), strides=(stride_qm, stride_qk), offsets=( start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=( 1, 0)) k_offset = (off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn) K_block_ptr = tl.make_block_ptr(base=K + k_offset, shape=( ACTUAL_BLOCK_DMODEL, seqlen_k), strides=(stride_kk, stride_kn), offsets=(0, 0), block_shape=(BLOCK_DMODEL, BLOCK_N), order=(0, 1)) v_offset = (off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk) V_block_ptr = tl.make_block_ptr(base=V + v_offset, shape=(seqlen_k, ACTUAL_BLOCK_DMODEL), strides=(stride_vk, stride_vn), offsets=(0, 0 ), block_shape=(BLOCK_N, BLOCK_DMODEL), order=(1, 0)) if BIAS_TYPE != 0: bias_ptr = tl.make_block_ptr(base=bias + off_h_q * stride_bh, shape =(seqlen_q, seqlen_k), strides=(stride_bm, stride_bn), offsets= (start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=( 1, 0)) else: bias_ptr = None if ENABLE_DROPOUT: batch_philox_offset = philox_offset_base + (off_z * HQ + off_h_q ) * seqlen_q * seqlen_k else: batch_philox_offset = 0 if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.make_block_ptr(base=encoded_softmax + off_h_q * seqlen_q * seqlen_k, shape=(seqlen_q, seqlen_k), strides=(seqlen_k, 1), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0)) else: encoded_softmax_block_ptr = 0 m_i = tl.full([BLOCK_M], float('-inf'), dtype=tl.float32) l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) qk_scale = sm_scale * 1.44269504089 q = load_fn(Q_block_ptr, True, padded_head, 'zero') q = (q * qk_scale).to(Q_block_ptr.type.element_ty) padded_block_k = n_extra_tokens != 0 is_modulo_mn = not padded_block_k and seqlen_q % BLOCK_M == 0 if IS_CAUSAL: masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn) else: masked_blocks = padded_block_k masked_blocks = min(masked_blocks, n_blocks) n_full_blocks = n_blocks - masked_blocks block_min = 0 block_max = n_blocks * BLOCK_N if n_full_blocks > 0: block_max = (n_blocks - masked_blocks) * BLOCK_N acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, block_min, block_max, 0, 0, 0, bias_ptr, False, BLOCK_M, BLOCK_DMODEL, BLOCK_N, offs_m, offs_n, PRE_LOAD_V, False, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX, padded_head) block_min = block_max block_max = n_blocks * BLOCK_N tl.debug_barrier() if masked_blocks > 0: offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0 K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0)) if bias_ptr is not None: bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N)) if RETURN_ENCODED_SOFTMAX: encoded_softmax_block_ptr = tl.advance(encoded_softmax_block_ptr, (0, n_full_blocks)) acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, block_min, block_max, offs_n_causal, masked_blocks, n_extra_tokens, bias_ptr, IS_CAUSAL, BLOCK_M, BLOCK_DMODEL, BLOCK_N, offs_m, offs_n, PRE_LOAD_V, True, ENABLE_DROPOUT, RETURN_ENCODED_SOFTMAX, padded_head) acc = acc / l_i[:, None] if ENABLE_DROPOUT: acc = acc / (1 - dropout_p) end_m_idx = (start_m + 1) * BLOCK_M start_m_idx = start_m * BLOCK_M causal_start_idx = seqlen_q - seqlen_k acc = acc.to(Out.type.element_ty) if IS_CAUSAL: if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx: out_mask_boundary = tl.full((BLOCK_DMODEL,), causal_start_idx, dtype=tl.int32) mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M) out_ptrs_mask = mask_m_offsets[:, None] >= out_mask_boundary[ None, :] z = 0.0 acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty)) o_offset = (off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh) O_block_ptr = tl.make_block_ptr(base=Out + o_offset, shape=(seqlen_q, ACTUAL_BLOCK_DMODEL), strides=(stride_om, stride_on), offsets=( start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_DMODEL), order=( 1, 0)) tl.store(O_block_ptr, acc, boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/IBM/vllm/blob/99523dd62be2ecf6c6db15e8133aaaf7855e7e86/vllm/attention/ops/triton_flash_attention.py
06dc48e5-e255-4b73-9331-51d5c246c0ca
dropout_rng.py
ROCm/aotriton
tritonsrc/dropout_rng.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def debug_fill_dropout_rng(R, stride_rz, stride_rh, stride_rm, stride_rn, seqlen_q, seqlen_k, philox_seed, philox_offset_base, BLOCK_M: tl. constexpr, BLOCK_N: tl.constexpr): start_m = tl.program_id(0) off_h = tl.program_id(1) off_z = tl.program_id(2) d_offset = off_h * stride_rh + off_z * stride_rz num_h = tl.num_programs(1) off_zh = off_z * num_h + off_h * 1 batch_philox_offset = philox_offset_base + off_zh * seqlen_q * seqlen_k R_block_ptr = tl.make_block_ptr(base=R + d_offset, shape=(seqlen_q, seqlen_k), strides=(stride_rm, stride_rn), offsets=(start_m * BLOCK_M, 0), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0)) for start_n in range(0, seqlen_k, BLOCK_N): philox_offset = (batch_philox_offset + start_m * BLOCK_M * seqlen_k + start_n) rng = dropout_rng(philox_seed, philox_offset, BLOCK_M, BLOCK_N, seqlen_k) tl.store(R_block_ptr, rng.to(R_block_ptr.type.element_ty), boundary_check=(0, 1)) R_block_ptr = tl.advance(R_block_ptr, (0, BLOCK_N))
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/dropout_rng.py
2fcb6b4b-d342-4810-8e57-787ada488273
mhmoe_bwd.py
dtadpole/triton-playground
mhmoe_bwd.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def _mlp_wide_kernel_bwd_dx(pid_h, pid_b, x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, stride_dw2d, stride_dob, stride_dod, BLOCK_SIZE_B: tl. constexpr, BLOCK_SIZE_E: tl.constexpr, ACTIVATION: tl.constexpr): """Kernel for computing the mlp_bwd_dx Z = X @ W1, H = f(Z), O = H @ W2 - X has shape (B, D) - W1 has shape (D, E) - W2 has shape (E, D) - O has shape (B, D) - dX has shape (B, D) - dW1 has shape (D, E) - dW2 has shape (E, D) - dO has shape (B, D) """ TARGET_TYPE = x_ptr.type.element_ty offs_b = tl.arange(0, BLOCK_SIZE_B) offs_d = tl.arange(0, D) offs_e = tl.arange(0, BLOCK_SIZE_E) x_ptrs = x_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:, None]) * stride_xb + offs_d[None, :] * stride_xd) x_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[None, : ] < D) do_ptrs = do_ptr + ((pid_h * B + pid_b * BLOCK_SIZE_B + offs_b[:, None] ) * stride_dob + offs_d[None, :] * stride_dod) do_mask = (offs_b[:, None] < B - pid_b * BLOCK_SIZE_B) & (offs_d[None, :] < D) w1_ptrs = w1_ptr + ((pid_h * D + offs_d[:, None]) * stride_w1d + offs_e [None, :] * stride_w1e) w2_ptrs = w2_ptr + ((pid_h * E + offs_e[:, None]) * stride_w2e + offs_d [None, :] * stride_w2d) dw1_ptrs = dw1_ptr + ((pid_h * D + offs_d[:, None]) * stride_dw1d + offs_e[None, :] * stride_dw1e) dw2_ptrs = dw2_ptr + ((pid_h * E + offs_e[:, None]) * stride_dw2e + offs_d[None, :] * stride_dw2d) x = tl.load(x_ptrs, mask=x_mask, other=0.0) do = tl.load(do_ptrs, mask=do_mask, other=0.0) dx = tl.zeros((BLOCK_SIZE_B, D), dtype=tl.float32) for e in range(0, tl.cdiv(E, BLOCK_SIZE_E)): w1_mask = (offs_d[:, None] < D) & (offs_e[None, :] < E - e * BLOCK_SIZE_E) w2_mask = (offs_e[:, None] < E - e * BLOCK_SIZE_E) & (offs_d[None, :] < D) w1 = tl.load(w1_ptrs, mask=w1_mask, other=0.0) w2 = tl.load(w2_ptrs, mask=w2_mask, other=0.0) z = tl.dot(x, w1, out_dtype=tl.float32) if ACTIVATION == 'leaky_relu': h = leaky_relu(z).to(TARGET_TYPE) else: h = z.to(TARGET_TYPE) dh = tl.dot(do, tl.trans(w2), out_dtype=tl.float32) if ACTIVATION == 'leaky_relu': dz = (dh * d_leacky_relu_inv_backward(z)).to(TARGET_TYPE) else: dz = dh.to(TARGET_TYPE) dx += tl.dot(dz, tl.trans(w1), out_dtype=tl.float32) w1_ptrs += BLOCK_SIZE_E * stride_w1e w2_ptrs += BLOCK_SIZE_E * stride_w2e dw1_ptrs += BLOCK_SIZE_E * stride_dw1e dw2_ptrs += BLOCK_SIZE_E * stride_dw2e return dx
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe_bwd.py
c290ae95-140d-49ca-bf58-a327ab667240
smem_triton_matmul.py
WesKwong/gemm-example-cuda2py
triton_mm/smem_triton_matmul.py
901c4488a79b6d71f7a4dc15dcdfc9546b879a23
0
@triton.jit def smem_triton_matmul(c_ptr, a_ptr, b_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): raw_pid_m = tl.program_id(0) raw_pid_n = tl.program_id(1) num_program_m = tl.num_programs(0) num_program_n = tl.num_programs(1) group_id = raw_pid_m // GROUP_SIZE_M first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_program_m - first_pid_m, GROUP_SIZE_M) linear_pid_in_group = (raw_pid_m - first_pid_m) * num_program_n + raw_pid_n pid_m = first_pid_m + linear_pid_in_group % group_size_m pid_n = linear_pid_in_group // group_size_m offset_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offset_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offset_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offset_am[:, None] * stride_am + offset_k[None, :] * stride_ak) b_ptrs = b_ptr + (offset_k[:, None] * stride_bk + offset_bn[None, :] * stride_bn) res = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(tl.cdiv(K, BLOCK_SIZE_K)): a = tl.load(a_ptrs, mask=offset_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) b = tl.load(b_ptrs, mask=offset_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) res += tl.dot(a, b, allow_tf32=False) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk c_ptrs = c_ptr + (offset_am[:, None] * stride_cm + offset_bn[None, :] * stride_cn) c_mask = (offset_am[:, None] < M) & (offset_bn[None, :] < N) tl.store(c_ptrs, res, mask=c_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings", "Persistent Kernels" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/WesKwong/gemm-example-cuda2py/blob/901c4488a79b6d71f7a4dc15dcdfc9546b879a23/triton_mm/smem_triton_matmul.py
60cd7074-f5da-4801-bf48-534ee82c78c0
copy.py
chengzeyi/stable-fast
src/sfast/triton/ops/copy.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@eval( """triton.heuristics({ 'BLOCK_M': lambda kwargs: min(4096, triton.next_power_of_2(kwargs['size_inp_0'])), 'BATCH_STRIDE_INP_IS_1': lambda kwargs: kwargs['batch_stride_inp'] == 1, 'STRIDE_INP_0_IS_1': lambda kwargs: kwargs['stride_inp_0'] == 1, 'BATCH_STRIDE_OUT_IS_1': lambda kwargs: kwargs['batch_stride_out'] == 1, 'STRIDE_OUT_0_IS_1': lambda kwargs: kwargs['stride_out_0'] == 1, })""" ) @eval( """triton.heuristics({ 'num_warps': lambda kwargs: max(1, min(16, kwargs['BLOCK_M'] // 32)), })""" ) @triton.jit def copy_2d_kernel(output_ptr, input_ptr, bs, size_inp_0, batch_stride_inp, stride_inp_0, batch_stride_out, stride_out_0, BATCH_STRIDE_INP_IS_1: tl .constexpr, STRIDE_INP_0_IS_1: tl.constexpr, BATCH_STRIDE_OUT_IS_1: tl. constexpr, STRIDE_OUT_0_IS_1: tl.constexpr, BLOCK_M: tl.constexpr): pid = tl.program_id(0) pid_batch = tl.program_id(1) grid_m = tl.cdiv(size_inp_0, BLOCK_M) pid_m = pid rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) A = input_ptr + (1 if BATCH_STRIDE_INP_IS_1 else batch_stride_inp ) * pid_batch + rm * (1 if STRIDE_INP_0_IS_1 else stride_inp_0) B = output_ptr + (1 if BATCH_STRIDE_OUT_IS_1 else batch_stride_out ) * pid_batch + rm * (1 if STRIDE_OUT_0_IS_1 else stride_out_0) mask = rm < size_inp_0 a = tl.load(A, mask=mask) tl.store(B, a, mask=mask)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/copy.py
52b19649-738b-4b5d-b8bd-609bca2dcadc
preprocess_cumsum_gk.py
berlino/seq_icl
src/models/sequence/rnn/gla_triton/inter_chunk_contribution/preprocess_cumsum_gk.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def stable_log_sigmoid(x): max_value = tl.where(x < 0, x, 0) abs_value = tl.where(x > 0, x, -x) return max_value - tl.log(1 + tl.exp(-abs_value))
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/gla_triton/inter_chunk_contribution/preprocess_cumsum_gk.py
1da3bedd-0ea9-4a1e-b787-49c4893f39be
fused_moe_a16w4.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/fused_moe_a16w4.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _fused_moe_kernel_a16w4_subchannel(A, B, C, scale_b_ptr, zero_points_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am, stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn, stride_scale_be, stride_scale_bn, stride_scale_bk, stride_zero_points_e, stride_zero_points_n, stride_zero_points_k, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, add_zero_points: tl.constexpr): pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % num_pid_in_group % group_size_m pid_n = pid % num_pid_in_group // group_size_m num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N * 2) // 2) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = A + (offs_token[:, None] // top_k * stride_am + offs_k[None, : ] * stride_ak) off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = B + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) if add_zero_points: offs_zp_n = (pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, 2 * BLOCK_SIZE_N) ) % (2 * N) _ZERO_POINT0 = tl.zeros([1], dtype=zero_points_ptr.dtype.element_ty) _A0 = tl.zeros([1, 1], dtype=A.dtype.element_ty) _B0 = tl.zeros([1, 1], dtype=B.dtype.element_ty) _SCALE0 = tl.zeros([1], dtype=scale_b_ptr.dtype.element_ty) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N * 2), dtype=tl.float32) l_shifter = (1 - tl.arange(0, BLOCK_SIZE_N * 2) % 2) * 4 for k in range(tl.cdiv(K, BLOCK_SIZE_K)): a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=_A0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=_B0) b = (b << l_shifter[None, :]).to(tl.int8).__rshift__(4) if add_zero_points: zp_ptrs = (zero_points_ptr + off_experts * stride_zero_points_e + offs_zp_n * stride_zero_points_n + k) zero_points_vals = tl.load(zp_ptrs) b = b - zero_points_vals[None, :] offs_scale_n = pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, 2 * BLOCK_SIZE_N ) scale_b_ptrs = (scale_b_ptr + off_experts * stride_scale_be + offs_scale_n * stride_scale_bn + k) scales_val = tl.load(scale_b_ptrs, mask=offs_scale_n < 2 * N, other =_SCALE0) b = b * scales_val[None, :] accumulator += tl.dot(a, b, out_dtype=tl.float32) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0.0) accumulator = accumulator * moe_weight[:, None] accumulator = accumulator.to(A.dtype.element_ty) offs_cn = pid_n * BLOCK_SIZE_N * 2 + tl.arange(0, BLOCK_SIZE_N * 2) c_ptrs = C + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :] c_mask = token_mask[:, None] & (offs_cn[None, :] < N * 2) tl.store(c_ptrs, accumulator, mask=c_mask)
{ "Data Type": [ "int8" ], "Functionality": [ "Matrix Multiplication", "Attention Mechanisms" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_a16w4.py
72381e7b-efbe-42ec-bcc7-02dd2d8675ed
y_0.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_0.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def zeroth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0)
{ "Data Type": [], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_0.py
96d3e5c8-a2b2-4caf-a2d2-f0b0671b6574
quantize.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/triton/quantize.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _compute_exp(group_max, rounding_mode, rand_bits, MBITS: tl.constexpr): """Compute shared exponent of group using specified rounding mode. Args: group_max (Tensor): Group of values to compute exponent of. rounding_mode (int or RoundingMode): Which rounding mode to use. rand_bits (int): Random integer values used for stochastic rounding. mbits (int): Number of mantissa bits in target mx4 format. Returns: Tensor: Shared exponent of group. """ MBITS_FP32: tl.constexpr = 23 M_ROUND: tl.constexpr = (1 << MBITS_FP32 - MBITS - 1) - 1 RAND_MASK: tl.constexpr = (1 << MBITS_FP32 - MBITS) - 1 if rounding_mode == 0: return tl.floor(tl.log2(group_max) + 0.5) if rounding_mode == 1: return _floor_log2(group_max) elif rounding_mode == 2: group_max = group_max.to(tl.int32, bitcast=True) + M_ROUND return _floor_log2(group_max) elif rounding_mode == 3: group_max = group_max.to(tl.int32, bitcast=True) + (RAND_MASK & rand_bits) return _floor_log2(group_max) else: return tl.ceil(tl.log2(group_max))
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/quantize.py
30aea82c-19d0-417f-9cf2-abd252b59a7d
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_bwd_kernel_intra_V(q, k, z, dA, dq, dk, s_k_h, s_k_t, s_k_d, T: tl.constexpr, K: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BK: tl.constexpr, NC: tl.constexpr): i_k, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_t, i_i = i_c // NC, i_c % NC p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_zn = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), ((i_t * BT + i_i * BC) * K + i_k * BK,), (BK,), (0,)) b_zn = tl.load(p_zn, boundary_check=(0,)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_zq = tl.exp(b_zn[None, :] - b_z) b_dq = tl.zeros([BC, BK], dtype=tl.float32) for i_j in range(0, i_i): p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_kz = tl.exp(b_k - b_zn[None, :]).to(b_k.dtype) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dq += tl.dot(b_dA, b_kz, allow_tf32=False) b_dq *= b_zq o_i = tl.arange(0, BC) o_dA = i_bh * T * BT + (i_t * BT + i_i * BC + tl.arange(0, BC) ) * BT + i_i * BC m_dA = i_t * BT + i_i * BC + tl.arange(0, BC) < T for j in range(0, BC): p_kj = tl.make_block_ptr(k + i_bh * s_k_h, (T * K,), (1,), ((i_t * BT + i_i * BC + j) * K + i_k * BK,), (BK,), (0,)) b_dA = tl.load(dA + o_dA + j, mask=m_dA, other=0) b_kj = tl.load(p_kj, boundary_check=(0,)).to(tl.float32) m_i = o_i[:, None] >= j b_dq += tl.where(m_i, b_dA[:, None] * tl.exp(b_kj[None, :] - b_z), 0.0) p_dq = tl.make_block_ptr(dq + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) tl.store(p_dq, b_dq.to(p_dq.dtype.element_ty), boundary_check=(0, 1)) tl.debug_barrier() p_k = tl.make_block_ptr(k + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) p_zn = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (s_k_d,), ((i_t * BT + i_i * BC + BC - 1) * K + i_k * BK,), (BK,), (0,)) b_zn = tl.load(p_zn, boundary_check=(0,)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_kz = tl.exp(b_k - b_zn[None, :]) b_dk = tl.zeros([BC, BK], dtype=tl.float32) for i_j in range(i_i + 1, NC): p_q = tl.make_block_ptr(q + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_z = tl.make_block_ptr(z + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_j * BC, i_k * BK), (BC, BK), (1, 0)) p_dA = tl.make_block_ptr(dA + i_bh * T * BT, (T, BT), (BT, 1), (i_t * BT + i_j * BC, i_i * BC), (BC, BC), (1, 0)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_z = tl.load(p_z, boundary_check=(0, 1)) b_qz = (b_q * tl.exp(b_zn[None, :] - b_z)).to(b_q.dtype) b_dA = tl.load(p_dA, boundary_check=(0, 1)) b_dk += tl.dot(tl.trans(b_dA), b_qz, allow_tf32=False) b_dk *= b_kz o_dA = i_bh * T * BT + (i_t * BT + i_i * BC) * BT + i_i * BC + tl.arange( 0, BC) for j in range(0, BC): p_qj = tl.make_block_ptr(q + i_bh * s_k_h, (T * K,), (1,), ((i_t * BT + i_i * BC + j) * K + i_k * BK,), (BK,), (0,)) p_zj = tl.make_block_ptr(z + i_bh * s_k_h, (T * K,), (1,), ((i_t * BT + i_i * BC + j) * K + i_k * BK,), (BK,), (0,)) b_dA = tl.load(dA + o_dA + j * BT, mask=i_t * BT + i_i * BC + j < T, other=0) b_qj = tl.load(p_qj, boundary_check=(0,)).to(tl.float32) b_zj = tl.load(p_zj, boundary_check=(0,)).to(tl.float32) m_i = o_i[:, None] <= j b_dk += tl.where(m_i, b_dA[:, None] * b_qj[None, :] * tl.exp(b_k - b_zj[None, :]), 0.0) p_dk = tl.make_block_ptr(dk + i_bh * s_k_h, (T, K), (s_k_t, s_k_d), ( i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)) tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
0e7cd672-1e72-48ba-b85f-b3d9b3529a63
rtn_kernel.py
ArthurinRUC/libquant
libquant/triton/rtn_kernel.py
f2a42a78a96e867862d24d931b70500332ece5cb
0
@triton.jit def quant_rtn_triton(mat: tl.tensor, scale: tl.tensor, zero_point: tl. tensor, quant_dim: int, nbits: int, per_channel: bool, per_tensor: bool, use_zero_point: bool, group_size: int, scale_dtype: tl.dtype, zero_dtype: tl.dtype, quant_dtype: tl.dtype, device: tl.dtype) ->tl.Tuple[ tl.tensor, tl.tensor]: origin_shape = mat.shape if group_size is None: group_size = origin_shape[-1] mat = mat.reshape(-1, group_size) if use_zero_point: qmin, qmax = 0, 2 ** nbits - 1 xmin, xmax = tl.min(mat, axis=quant_dim), tl.max(mat, axis=quant_dim) scale = (xmax - xmin).div(qmax).clamp(min=1e-05).to(scale_dtype).to( device) zero_point = (-xmin / scale).to(zero_dtype).to(device) if not per_channel and not per_tensor: scale = scale.unsqueeze(1) zero_point = zero_point.unsqueeze(1) x = mat.div(scale).add(zero_point).round().clamp(qmin, qmax).to( quant_dtype).to(device) else: qmin, qmax = -2 ** (nbits - 1), 2 ** (nbits - 1) - 1 xabsmax = tl.max(tl.abs(mat), axis=quant_dim) scale = xabsmax.div(qmax).clamp(min=1e-05).to(scale_dtype).to(device) if not per_channel and not per_tensor: scale = scale.unsqueeze(1) x = mat.div(scale).round().clamp(qmin, qmax).to(quant_dtype).to(device) return x.reshape(origin_shape), scale
{ "Data Type": [ "fp32", "int8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/ArthurinRUC/libquant/blob/f2a42a78a96e867862d24d931b70500332ece5cb/libquant/triton/rtn_kernel.py
d19a6587-227e-47d3-82af-4397af3e33c3
glu_kernels.py
BobMcDear/attorch
attorch/glu_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=element_wise_kernel_configs(), key=['size']) @triton.jit def glu_backward_kernel(output_grad_pointer, input1_pointer, input2_pointer, input1_grad_pointer, input2_grad_pointer, size, param, act_func: tl. constexpr, BLOCK_SIZE: tl.constexpr): """ Calculates the input gradient of the gated linear unit. Args: output_grad_pointer: Pointer to the unit's output gradients. The output gradients must be contiguous and contain size elements. input1_pointer: Pointer to the first half of the input that was gated. The first half must be contiguous and contain size elements. input2_pointer: Pointer to the second half of the input that was gated. The second half must be contiguous and contain size elements. input1_grad_pointer: Pointer to a container the first half's gradients are written to. The container must be contiguous and contain size elements. input2_grad_pointer: Pointer to a container the second half's gradients are written to. The container must be contiguous and contain size elements. size: Number of elements in each half of the input. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardswish', 'selu', 'mish', and 'leaky_relu'. BLOCK_SIZE: Block size. """ pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input1 = tl.load(input1_pointer + offset, mask=mask) input2 = tl.load(input2_pointer + offset, mask=mask) input1_grad = output_grad * apply_act_func(input2, None, None, None, param, act_func, False) input2_grad = output_grad * input1 * apply_act_func_grad(1, input2, None, None, None, param, act_func, False) tl.store(input1_grad_pointer + offset, input1_grad, mask=mask) tl.store(input2_grad_pointer + offset, input2_grad, mask=mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/glu_kernels.py
14d6b032-07e3-479f-afba-018a5bb6e96f
modulation.py
ai-compiler-study/triton-kernels
triton_kernels/ops/modulation.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def triton_modulation_scale_shift(x_ptr, modulation_ptr, output_ptr, batch_size, head_size, modulation_size, is_mod1, XBLOCK: tl.constexpr): pid = tl.program_id(0) xoffset = pid * XBLOCK + tl.arange(0, XBLOCK)[:] batch_idx = xoffset // batch_size head_dim_idx = xoffset % head_size modulation_offset = head_dim_idx + modulation_size * batch_idx x = tl.load(x_ptr + xoffset, None) if is_mod1: shift = tl.load(modulation_ptr + (modulation_offset + head_size * 0 ), None, eviction_policy='evict_last') scale = tl.load(modulation_ptr + (modulation_offset + head_size * 1 ), None, eviction_policy='evict_last') else: shift = tl.load(modulation_ptr + (modulation_offset + head_size * 3 ), None, eviction_policy='evict_last') scale = tl.load(modulation_ptr + (modulation_offset + head_size * 4 ), None, eviction_policy='evict_last') output = (scale + 1.0) * x + shift tl.store(output_ptr + xoffset, output, None)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations", "Normalization" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/modulation.py
f5fa9512-df93-43c8-be9e-b332055b0317
sampling.py
falkaer/multi-scale-music
seq/sampling.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def _logsumexp(X, OUT, xm_stride, xn_stride, out_stride, N, BLOCK_N: tl. constexpr): rm = tl.program_id(0) alpha = tl.zeros((1,), tl.float32) + -float('inf') res = tl.zeros((1,), tl.float32) for bn in range(0, N, BLOCK_N): rn = bn + tl.arange(0, BLOCK_N) Xmn = X + rm * xm_stride + rn * xn_stride x = tl.load(Xmn, mask=rn < N, other=-float('inf')) c = tl.max(x, axis=0) res = tl.where(c > alpha, res * tl.exp(alpha - c), res) alpha = tl.where(c > alpha, c, alpha) res += tl.sum(tl.exp(x - alpha), axis=0) out = tl.log(res) + alpha rm = tl.program_id(0) + tl.arange(0, 1) OUT = OUT + rm * out_stride tl.store(OUT, out)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/sampling.py
c95d09f9-2474-4dce-8b77-528236596854
softmax.py
dame-cell/Triformer
triformer/softmax.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def softmax_kernel_backward(grad_out_ptr, probs_ptr, grad_in_ptr, grad_stride, probs_stride, out_stride, seq_len, BLOCK_SIZE: tl. constexpr, num_warps: tl.constexpr): batch_idx = tl.program_id(0) probs_start_ptr = probs_ptr + batch_idx * probs_stride grad_start_ptr = grad_in_ptr + batch_idx * grad_stride pos_offsets = tl.arange(0, BLOCK_SIZE) probs_ptrs = probs_start_ptr + pos_offsets grad_ptrs = grad_start_ptr + pos_offsets valid_mask = pos_offsets < seq_len probs_vals = tl.load(probs_ptrs, mask=valid_mask, other=0.0) grad_vals = tl.load(grad_ptrs, mask=valid_mask, other=0.0) grad_times_probs = probs_vals * grad_vals final_grad = grad_times_probs - probs_vals * tl.sum(grad_times_probs, axis=0) out_start_ptr = grad_out_ptr + batch_idx * out_stride out_ptrs = out_start_ptr + pos_offsets tl.store(out_ptrs, final_grad, mask=valid_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/softmax.py
3f008594-2d5f-43ff-9467-54adf244ea10
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/hgrn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_hgrn_bwd_kernel_o(g, gc, o, dx, dg, s_b, s_t, s_d, T: tl. constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr): i_d, i_b = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_b * s_b, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_b * T * D + (i_t + 1) * BT * D + o_d, mask= mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/hgrn/chunk.py
943d5d1b-75b2-49d6-8775-f5cd28ee9e60
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BT': 16}, num_warps=2), triton. Config({'BT': 32}, num_warps=4), triton.Config({'BT': 32}, num_warps=2), triton.Config({'BT': 64}, num_warps=8), triton.Config({'BT': 64}, num_warps=4)], key=[]) @triton.jit def chunk_global_cumsum_scalar_kernel(s, o, offsets, T: tl.constexpr, H: tl .constexpr, BT: tl.constexpr, HEAD_FIRST: tl.constexpr, USE_OFFSETS: tl .constexpr): i_bh = tl.program_id(0) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: bos, eos = tl.load(offsets + i_b).to(tl.int32), tl.load(offsets + i_b + 1).to(tl.int32) else: bos, eos = i_b * T, i_b * T + T T = eos - bos b_z = tl.zeros([], dtype=tl.float32) for i_t in range(tl.cdiv(T, BT)): if HEAD_FIRST: p_s = tl.make_block_ptr(s + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,)) p_o = tl.make_block_ptr(o + i_bh * T, (T,), (1,), (i_t * BT,), (BT,), (0,)) else: p_s = tl.make_block_ptr(s + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) p_o = tl.make_block_ptr(o + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)) b_s = tl.load(p_s, boundary_check=(0,)).to(tl.float32) b_o = tl.cumsum(b_s, axis=0) + b_z[None] b_z += tl.sum(b_s, axis=0) tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0,))
{ "Data Type": [ "fp32" ], "Functionality": [], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py
d4e4674a-0ac2-49d8-bf16-e5324188a47b
triton_ops.py
huyz2023/2by4-pretrain
sparse/triton_ops.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _soft_threshold24_triton(dense_ptr, sparse_ptr, mask_ptr, dense_row_stride, sparse_row_stride, mask_row_stride, dense_col_stride, sparse_col_stride, mask_col_stride, m, k, BLOCK_SIZE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr): if ARRAY_LAYOUT == 'row': row_idx = tl.program_id(0) col_idx = tl.program_id(1) * 4 * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE ) * 4 mask = col_idx < k elif ARRAY_LAYOUT == 'col': row_idx = tl.arange(0, BLOCK_SIZE) + tl.program_id(0) * BLOCK_SIZE col_idx = tl.program_id(1) * 4 mask = row_idx < m dense_40 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 0) * dense_col_stride, mask=mask) dense_41 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 1) * dense_col_stride, mask=mask) dense_42 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 2) * dense_col_stride, mask=mask) dense_43 = tl.load(dense_ptr + row_idx * dense_row_stride + (col_idx + 3) * dense_col_stride, mask=mask) dense_40, dense_41, dense_42, dense_43, m0, m1, m2, m3 = _soft_threshold( dense_40, dense_41, dense_42, dense_43) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 0) * sparse_col_stride, dense_40, mask=mask & m0) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 1) * sparse_col_stride, dense_41, mask=mask & m1) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 2) * sparse_col_stride, dense_42, mask=mask & m2) tl.store(sparse_ptr + row_idx * sparse_row_stride + (col_idx + 3) * sparse_col_stride, dense_43, mask=mask & m3) tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 0) * mask_col_stride, m0, mask=mask & m0) tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 1) * mask_col_stride, m1, mask=mask & m1) tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 2) * mask_col_stride, m2, mask=mask & m2) tl.store(mask_ptr + row_idx * mask_row_stride + (col_idx + 3) * mask_col_stride, m3, mask=mask & m3)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/triton_ops.py
05304651-6ea3-4b33-a724-8242f59e5ec0
mamba_ssm.py
Charlie-XIAO/sparse-vllm
vllm/model_executor/layers/mamba/ops/mamba_ssm.py
d228909a30b0c245c35417fb7d2acdf9a3690042
0
@triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) return dt
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/mamba/ops/mamba_ssm.py
a24a6db0-d013-472f-aa2f-4b7f7c770497
causal_product_bwd.py
calclavia/Triton-Transformer
ttx/attention/causal_product_bwd.py
d1d1e5b5651cf7959866b0198d90a665e1f45354
0
@triton.jit def causal_product_bwd_kernel(q_ptr, k_ptr, v_ptr, grad_out, grad_Q_ptr, grad_K_ptr, grad_V_ptr, batch, length, dim, vdim, **meta): BLOCK_SIZE = meta['BLOCK_SIZE'] pid = tl.program_id(axis=0) state = tl.zeros((BLOCK_SIZE, BLOCK_SIZE), dtype=tl.float32) cur_qk_pos = pid * matrix_size * dim cur_v_pos = pid * matrix_size * vdim dim_ptrs = tl.arange(0, BLOCK_SIZE) qkmask = dim_ptrs < dim vmask = dim_ptrs < vdim for _ in range(0, length, 1): qk_row_offsets = cur_qk_pos + dim_ptrs v_row_offsets = cur_v_pos + dim_ptrs k = tl.load(k_ptr + qk_row_offsets, mask=qkmask, other=0) v = tl.load(v_ptr + v_row_offsets, mask=vmask, other=0) context = tl.dot(k[:, None], v[None, :]) state += context g = tl.load(grad_out + v_row_offsets, mask=vmask, other=0) grad_q = tl.dot(state, g[:, None]) tl.store(grad_Q_ptr + qk_row_offsets[:, None], grad_q, mask=qkmask[ :, None]) cur_qk_pos += dim cur_v_pos += vdim """ state *= 0 for _ in range(0, length, 1): # Move back one row cur_pos -= dim # Offset for a single row in Q, K, V row_offsets = cur_pos + dim_ptrs # Load the current row of Q, K, V vectors. All are vectors of shape [dim] q = tl.load(q_ptr + row_offsets, mask=mask, other=0) k = tl.load(k_ptr + row_offsets, mask=mask, other=0) v = tl.load(v_ptr + row_offsets, mask=vmask, other=0) # Load gradient g = tl.load(grad_out + row_offsets, mask=vmask, other=0) # Compute context [D, M] matrix from [D, 1] x [1, M] context = tl.dot(q[:, None], g[None, :]) # state += context # Compute gradients [1, D] x [D, M] => [1, M] grad_v = tl.dot(k[None, :], context) grad_v = tl.reshape(grad_v, (meta['BLOCK_SIZE'],)) # grad_v = tl.dot(k[None, :], state) # Enabling the follownig leads to a hang # grad_k = tl.dot(state, v[:, None]) # print(grad_v.shape) # print(grad_k.shape) # Store the result of this row # tl.store(grad_V_ptr + row_offsets[None, # :], grad_v, mask=vmask[None, :]) tl.store(grad_V_ptr + row_offsets, grad_v, mask=vmask) # tl.store(grad_K_ptr + row_offsets[:, None], grad_k, mask=mask[:, None]) """
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/calclavia/Triton-Transformer/blob/d1d1e5b5651cf7959866b0198d90a665e1f45354/ttx/attention/causal_product_bwd.py
e1a91636-50d2-4089-a5a6-18102bcab37e
k_layer_norm.py
cpuhrsch/torchfused
torchfused/triton/k_layer_norm.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.jit def _layer_norm_fw(X, Y, W, B, M, V, stride, N, eps, **META): """ Fused layernorm kernel over a 3d tensor. The layer norm is applied over the last dimension. Compute y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta """ y = _layer_norm_non_affine(X, M, V, stride, N, eps, META) y = _affine(W, B, N, y, META) _store(y, Y, stride, N, META)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py
9bdd2ff5-9f28-4614-bdb9-b519f0ff99ea
paged_attn_v1.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn_v1.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _single_query_cached_kv_attention_v1(out, q, k_cache, v_cache, head_mapping, scale, block_tables, seq_lens, max_num_blocks_per_seq, stride_qm, stride_qn, stride_om, stride_on, stride_km, stride_kn, stride_kk, SLOT_SIZE: tl.constexpr, HEAD_SIZE: tl.constexpr): head_idx = tl.program_id(axis=0) token_idx = tl.program_id(axis=1) kv_head_idx = tl.load(head_mapping + head_idx) offs_q = token_idx * stride_qm + head_idx * stride_qn + tl.arange(0, HEAD_SIZE) q = tl.load(q + offs_q) q = (q * scale).to(tl.float16) seq_len = tl.load(seq_lens + token_idx) qkv = tl.zeros([SLOT_SIZE, HEAD_SIZE], dtype=tl.float32) m_prev = tl.zeros([1, 1], tl.float32) - float('inf') d_prev = tl.zeros([1, 1], tl.float32) slot_offs = tl.arange(0, SLOT_SIZE) head_size_offs = tl.arange(0, HEAD_SIZE) block_base_ptrs = block_tables + token_idx * max_num_blocks_per_seq kv_base_offs = kv_head_idx * stride_kn + slot_offs[:, None ] * stride_kk + head_size_offs[None, :] for i in range(0, tl.cdiv(seq_len, SLOT_SIZE)): block_idx = tl.load(block_base_ptrs + i) mask = (slot_offs[:, None] < seq_len - i * SLOT_SIZE) & (head_size_offs [None, :] < HEAD_SIZE) kv_offs = block_idx * stride_km + kv_base_offs k = tl.load(k_cache + kv_offs, mask=mask, other=0.0) v = tl.load(v_cache + kv_offs, mask=mask, other=0.0) x_i = tl.sum(q[None, :] * k, axis=1)[:, None] x_i = tl.where(slot_offs[:, None] < seq_len - i * SLOT_SIZE, x_i, float('-inf')) m_i = tl.maximum(m_prev, tl.max(x_i, axis=0)) d_i = d_prev * tl.exp(m_prev - m_i) + tl.sum(tl.exp(x_i - m_i), axis=0) qkv = qkv * (d_prev * tl.exp(m_prev - m_i) / d_i) + tl.exp(x_i - m_i ) / d_i * v m_prev = m_i d_prev = d_i offs_q = token_idx * stride_om + head_idx * stride_on + tl.arange(0, HEAD_SIZE) tl.store(out + offs_q, tl.sum(qkv, axis=0))
{ "Data Type": [ "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn_v1.py
2bb44c20-7403-4987-8920-de61d4b20097
triton_kernels.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/triton_kernels.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def _triton_second_order_bwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl. tensor, g_x_ptr: tl.tensor, g_y_ptr: tl.tensor, g_z_ptr: tl.tensor, g_1_0_ptr: tl.tensor, g_1_1_ptr: tl.tensor, g_1_2_ptr: tl.tensor, g_2_0_ptr: tl.tensor, g_2_1_ptr: tl.tensor, g_2_2_ptr: tl.tensor, g_2_3_ptr: tl.tensor, g_2_4_ptr: tl.tensor, BLOCK_SIZE: tl.constexpr, vector_length: tl.constexpr): sqrt_3 = 3 ** 0.5 sqrt_5 = 5 ** 0.5 sqrt_15 = 15 ** 0.5 block_id = tl.program_id(0) offset = tl.arange(0, BLOCK_SIZE) + BLOCK_SIZE * block_id x_row_start = x_ptr + offset y_row_start = y_ptr + offset z_row_start = z_ptr + offset x = tl.load(x_row_start, mask=offset < vector_length) y = tl.load(y_row_start, mask=offset < vector_length) z = tl.load(z_row_start, mask=offset < vector_length) g_1_0 = tl.load(g_1_0_ptr + offset, mask=offset < vector_length) g_1_1 = tl.load(g_1_1_ptr + offset, mask=offset < vector_length) g_1_2 = tl.load(g_1_2_ptr + offset, mask=offset < vector_length) g_x = sqrt_3 * g_1_0 g_y = sqrt_3 * g_1_1 g_z = sqrt_3 * g_1_2 g_2_0 = tl.load(g_2_0_ptr + offset, mask=offset < vector_length) g_2_1 = tl.load(g_2_1_ptr + offset, mask=offset < vector_length) g_2_2 = tl.load(g_2_2_ptr + offset, mask=offset < vector_length) g_2_3 = tl.load(g_2_3_ptr + offset, mask=offset < vector_length) g_2_4 = tl.load(g_2_4_ptr + offset, mask=offset < vector_length) g_x += sqrt_15 * z * g_2_0 g_z += sqrt_15 * x * g_2_0 g_x += sqrt_15 * y * g_2_1 g_y += sqrt_15 * x * g_2_1 g_y += sqrt_15 * z * g_2_2 g_z += sqrt_15 * y * g_2_2 g_x += -1.0 * sqrt_5 * x * g_2_3 g_y += 2.0 * sqrt_5 * y * g_2_3 g_z += -1.0 * sqrt_5 * z * g_2_3 g_x += -1.0 * sqrt_15 * x * g_2_4 g_z += sqrt_15 * z * g_2_4 tl.store(g_x_ptr + offset, g_x, mask=offset < vector_length) tl.store(g_y_ptr + offset, g_y, mask=offset < vector_length) tl.store(g_z_ptr + offset, g_z, mask=offset < vector_length)
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py
cc1b6b08-4b84-41f0-8801-561eb1ccdb1d
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/jagged_mean/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r, 'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS, NUM_STAGES)], key=['M']) @triton.jit def triton_jagged_mean_kernel_variable_length_loop_buffer_then_sum( input_ptr_values, input_ptr_offsets, output_ptr, M, BLOCK_SIZE_RAGGED: tl.constexpr, BLOCK_SIZE_M: tl.constexpr): pid = tl.program_id(axis=0) pid_ragged = pid // tl.cdiv(M, BLOCK_SIZE_M) pid_m = pid % tl.cdiv(M, BLOCK_SIZE_M) buffer = tl.zeros((BLOCK_SIZE_RAGGED, BLOCK_SIZE_M), dtype=tl.float32) block_start_m = pid_m * BLOCK_SIZE_M offsets_m = block_start_m + tl.arange(0, BLOCK_SIZE_M) mask_m = offsets_m < M ragged_start, ragged_end = tl.load(input_ptr_offsets + pid_ragged ), tl.load(input_ptr_offsets + (pid_ragged + 1)) ragged_len = ragged_end - ragged_start for block_start_ragged in range(ragged_start, ragged_end, BLOCK_SIZE_RAGGED ): offsets_ragged = block_start_ragged + tl.arange(0, BLOCK_SIZE_RAGGED) mask_ragged = offsets_ragged < ragged_end idxs = offsets_ragged[:, None] * M + offsets_m mask = mask_ragged[:, None] & mask_m buffer += tl.load(input_ptr_values + idxs, mask=mask, other=0) buffer_sum = tl.sum(buffer, axis=0) buffer_view = buffer_sum.reshape((BLOCK_SIZE_M,)) buffer_view_mean = buffer_view * (1 / ragged_len) output_offsets = offsets_m + pid_ragged * M output_mask = output_offsets < M * (pid_ragged + 1) tl.store(output_ptr + output_offsets, buffer_view_mean, mask=output_mask)
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py
c2c30b99-5b11-48aa-a966-7d545ba2a465
cvmm.py
dtadpole/nanoGPT_lightning
cvmm.py
5db66f7714a9a40191f4f208ecbb650ad8c93cc6
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages= 4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 4}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4)], key=['M', 'N', 'K', 'float32_out', 'allow_tf32', 'op_float16'], reset_to_zero=['c_ptr']) @triton.jit def cvmm_backward_kernel3(a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_co, stride_cm, stride_cn, stride_index, stride_sel, stride_out_index, out_index_is_none: tl.constexpr, float32_out: tl. constexpr, allow_tf32: tl.constexpr, op_float16: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, K_BLOCKS: tl.constexpr): """Kernel for computing the matmul C = A x B. A has shape (M, K), B has shape (K, N) and C has shape (M, N) """ pid = tl.program_id(axis=0) k_block_id = tl.program_id(axis=1) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + pid % group_size_m pid_n = pid % num_pid_in_group // group_size_m offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N a_ptrs_this = a_ptr + offs_am[:, None] * stride_am b_ptrs_this = b_ptr + offs_bn[None, :] * stride_bn block_start_index = k_block_id * BLOCK_SIZE_K * K_BLOCKS block_end_index = min(block_start_index + BLOCK_SIZE_K * K_BLOCKS, K) - 1 first_mat = tl.load(sel_ptr + stride_sel * block_start_index) last_mat = tl.load(sel_ptr + stride_sel * block_end_index) for matrix_index in range(first_mat, last_mat + 1): accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) start_i = block_start_index end_i = block_end_index + 1 while start_i < end_i: middle = (start_i + end_i) // 2 middle_matrix = tl.load(sel_ptr + middle * stride_sel) if middle_matrix < matrix_index: start_i = middle + 1 else: end_i = middle start_i2 = start_i end_i = block_end_index + 1 while start_i2 < end_i: middle = (start_i2 + end_i) // 2 middle_matrix = tl.load(sel_ptr + middle * stride_sel) if middle_matrix <= matrix_index: start_i2 = middle + 1 else: end_i = middle end_i = start_i2 count = end_i - start_i block_mem_indices_f_base = start_i + tl.arange(0, BLOCK_SIZE_K) if count > 0: for k in range((count + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K): block_mem_indices_f = (block_mem_indices_f_base + k * BLOCK_SIZE_K) block_mem_indices = block_mem_indices_f % K a_index = tl.load(index_ptr + stride_index * block_mem_indices) if out_index_is_none: b_index = a_index else: b_index = tl.load(out_index_ptr + stride_out_index * block_mem_indices) sel_ok = block_mem_indices_f < end_i a_ptrs = a_ptrs_this + a_index[None, :] * stride_ak b_ptrs = b_ptrs_this + b_index[:, None] * stride_bk a = tl.load(a_ptrs, mask=sel_ok[None, :], other=0.0) b = tl.load(b_ptrs, mask=sel_ok[:, None], other=0.0) if op_float16: a = a.to(tl.float16) b = b.to(tl.float16) accumulator += tl.dot(a, b, allow_tf32=allow_tf32) if float32_out: c = accumulator else: c = accumulator.to(tl.float16) offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + stride_co * matrix_index + stride_cm * offs_cm[ :, None] + stride_cn * offs_cn[None, :] c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) tl.atomic_add(c_ptrs, c, mask=c_mask)
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/dtadpole/nanoGPT_lightning/blob/5db66f7714a9a40191f4f208ecbb650ad8c93cc6/cvmm.py
fbb2a0d9-7c7c-42af-8046-40b931ce9c09
flash_triton.py
MayDomine/Burst-Attention
burst_attn/flash_triton.py
b088c554072935074ea9c643de5ee363be5ab1f6
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': False}, num_warps=8, num_stages=1, pre_hook= init_to_zero('DQ')), triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'SEQUENCE_PARALLEL': True}, num_warps=8, num_stages=1, pre_hook= init_to_zero('DQ'))], key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM']) @triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[ 'BLOCK_N'] == 0, 'EVEN_HEADDIM': lambda args: args['headdim'] == args[ 'BLOCK_HEADDIM']}) @triton.jit def _bwd_kernel(Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads Q += off_b * stride_qb + off_h * stride_qh K += off_b * stride_kb + off_h * stride_kh V += off_b * stride_vb + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh if BIAS_TYPE != 'none': Bias += off_b * stride_bb + off_h * stride_bh D += off_hb * seqlen_q_rounded LSE += off_hb * seqlen_q_rounded if not SEQUENCE_PARALLEL: num_block_n = tl.cdiv(seqlen_k, BLOCK_N) for start_n in range(0, num_block_n): _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=False, BIAS_TYPE= BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N) else: start_n = tl.program_id(0) _bwd_kernel_one_col_block(start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=True, BIAS_TYPE= BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N)
{ "Data Type": [ "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py
cbbfcf20-c932-4670-846d-fb6982e7b184
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK, BV in [(32, 64), (64, 32), ( 64, 64), (64, 128), (128, 64)] for num_warps in [1, 2, 4] for num_stages in [2, 3, 4]], key=['BT']) @triton.jit def chunk_delta_rule_fwd_kernel_prepare_dv(q, k, do, dv, offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_OFFSETS: tl.constexpr, HEAD_FIRST: tl.constexpr): i_t, i_bh = tl.program_id(0), tl.program_id(1) i_b, i_h = i_bh // H, i_bh % H if USE_OFFSETS: i_n, i_t = tl.load(indices + i_t * 2).to(tl.int32), tl.load(indices + i_t * 2 + 1).to(tl.int32) bos, eos = tl.load(offsets + i_n).to(tl.int32), tl.load(offsets + i_n + 1).to(tl.int32) T = eos - bos else: bos, eos = i_b * T, i_b * T + T b_A = tl.zeros([BT, BT], dtype=tl.float32) for i_k in range(tl.cdiv(K, BK)): if HEAD_FIRST: p_q = tl.make_block_ptr(q + i_bh * T * K, (K, T), (1, K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K), (K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) else: p_q = tl.make_block_ptr(q + (bos * H + i_h) * K, (K, T), (1, H * K), (i_k * BK, i_t * BT), (BK, BT), (0, 1)) p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H * K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0)) b_k = tl.load(p_k, boundary_check=(0, 1)) b_q = tl.load(p_q, boundary_check=(0, 1)) b_q = (b_q * scale).to(b_k.dtype) b_A += tl.dot(b_k, b_q, allow_tf32=False) b_A = tl.where(tl.arange(0, BT)[:, None] <= tl.arange(0, BT)[None, :], b_A, 0).to(do.dtype.element_ty) for i_v in range(tl.cdiv(V, BV)): if HEAD_FIRST: p_do = tl.make_block_ptr(do + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + i_bh * T * V, (T, V), (V, 1), ( i_t * BT, i_v * BV), (BT, BV), (1, 0)) else: p_do = tl.make_block_ptr(do + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) p_dv = tl.make_block_ptr(dv + (bos * H + i_h) * V, (T, V), (H * V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0)) b_do = tl.load(p_do, boundary_check=(0, 1)) b_dv = tl.dot(b_A, b_do, allow_tf32=False) tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py
a3bb1f0d-12d4-4f1f-af81-35201c7a5bc1
gemm_streamk_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.jit def linear_tile(tile_id, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr): pid_m = tile_id // tl.cdiv(N, BLOCK_SIZE_N) pid_n = tile_id % tl.cdiv(N, BLOCK_SIZE_N) return pid_m, pid_n
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
f1703945-6176-40cb-8eaf-73d5f402dbbb
sb_varlen_fwd.py
shawntan/stickbreaking-attention
stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
8dd32ad5e58f0ee0232fd4782dc53d354ff8d283
0
@triton.jit def _forward_one_row(seq_block_id, seq_length, qk_scale, M_range, N_range, D_range, D_mask, cm, Q_head_seq_ptr, stride_qm, stride_qd: tl.constexpr, K_head_seq_ptr, stride_kn, stride_kd: tl.constexpr, V_head_seq_ptr, stride_vn, stride_vd: tl.constexpr, O_head_seq_ptr, stride_om, stride_od: tl.constexpr, R_head_seq_ptr, stride_rm, A_head_seq_ptr, stride_am, W_head_seq_ptr, stride_wm, stride_wn, BLOCK_D: tl.constexpr, NO_D_MASK: tl.constexpr, NO_M_MASK: tl.constexpr, NO_N_MASK: tl. constexpr, ALLOW_TF32: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl .constexpr, no_grad: tl.constexpr=False, acc_dtype: tl.constexpr=tl. float32, return_attention: tl.constexpr=False, is_compiling: tl. constexpr=False, use_cumsum: tl.constexpr=False, attend_current: tl. constexpr=False): block_start_offset = BLOCK_M * seq_block_id M_blk_idxs = block_start_offset + M_range M_mask = M_blk_idxs < seq_length NO_M_MASK = block_start_offset + BLOCK_M - 1 < seq_length N_blk_idxs_start = block_start_offset + BLOCK_M N_blk_idxs = N_blk_idxs_start + N_range Q_blk_ptrs = Q_head_seq_ptr + (stride_qm * M_blk_idxs[:, None] + stride_qd * D_range[None, :]) K_blk_ptrs = K_head_seq_ptr + (stride_kn * N_blk_idxs[:, None] + stride_kd * D_range[None, :]) V_blk_ptrs = V_head_seq_ptr + (stride_vn * N_blk_idxs[:, None] + stride_vd * D_range[None, :]) O_blk_ptrs = O_head_seq_ptr + (stride_om * M_blk_idxs[:, None] + stride_od * D_range[None, :]) R_blk_ptrs = R_head_seq_ptr + stride_rm * M_blk_idxs A_blk_ptrs = A_head_seq_ptr + stride_am * M_blk_idxs if NO_D_MASK: if NO_M_MASK: q = tl.load(Q_blk_ptrs) else: q = tl.load(Q_blk_ptrs, mask=M_mask[:, None], other=0.0) else: q = tl.load(Q_blk_ptrs, mask=M_mask[:, None] & D_mask[None, :], other=0.0) iters = N_blk_idxs_start // BLOCK_N neg_log_acc = tl.zeros([BLOCK_M], dtype=acc_dtype) acc = tl.zeros([BLOCK_M, BLOCK_D], dtype=acc_dtype) for i in range(iters): N_blk_idxs -= BLOCK_N N_blk_idxs_start -= BLOCK_N K_blk_ptrs -= BLOCK_N * stride_kn V_blk_ptrs -= BLOCK_N * stride_vn N_mask = N_blk_idxs < seq_length k, v = load_kv(K_blk_ptrs, V_blk_ptrs, N_mask=N_mask, NO_N_MASK= N_blk_idxs_start + BLOCK_N - 1 < seq_length, D_mask=D_mask, NO_D_MASK=NO_D_MASK) on_band = i < BLOCK_M // BLOCK_N p, _, neg_log_acc = compute_block(q, k, qk_scale, neg_log_acc, M_blk_idxs, N_blk_idxs, cm, on_band, ALLOW_TF32, attend_current =attend_current, backward=False, is_compiling=is_compiling, use_cumsum=use_cumsum) acc = tl.dot(p.to(v.dtype), v, acc, allow_tf32=ALLOW_TF32) if return_attention: tl.store(W_head_seq_ptr + stride_wm * M_blk_idxs[:, None] + stride_wn * N_blk_idxs[None, :], p, mask=(M_blk_idxs < seq_length)[:, None] & (N_blk_idxs < seq_length)[None, :]) if NO_M_MASK: tl.store(R_blk_ptrs, tl.math.exp2(neg_log_acc)) tl.store(A_blk_ptrs, neg_log_acc.to(A_head_seq_ptr.type.element_ty)) else: tl.store(R_blk_ptrs, tl.math.exp2(neg_log_acc), mask=M_mask) tl.store(A_blk_ptrs, neg_log_acc.to(A_head_seq_ptr.type.element_ty), mask=M_mask) if NO_D_MASK: tl.store(O_blk_ptrs, acc.to(O_head_seq_ptr.type.element_ty), mask= M_mask[:, None]) else: tl.store(O_blk_ptrs, acc.to(O_head_seq_ptr.type.element_ty), mask= M_mask[:, None] & D_mask[None, :])
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
4655e66d-50dd-4e01-87a7-dbc096fc693b
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/generalized_delta_rule/iplr/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_recurrent_fwd_kernel(q, k, v, alpha, beta, o, ha, h0, ht, s_k_h, s_v_h, scale, B, H, T, K: tl.constexpr, V: tl.constexpr, BK: tl. constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr): i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) p_q = q + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_k = k + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_v = v + i_bh * s_v_h + i_v * BV + tl.arange(0, BV) p_alpha = alpha + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_beta = beta + i_bh * s_k_h + i_k * BK + tl.arange(0, BK) p_o = o + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV) p_ha = ha + (i_bh + i_k * B * H) * s_v_h + i_v * BV + tl.arange(0, BV) mask_bk = i_k * BK + tl.arange(0, BK) < K mask_bv = i_v * BV + tl.arange(0, BV) < V mask_kv = mask_bk[None, :] & mask_bv[:, None] h = tl.zeros([BV, BK], dtype=tl.float32) if USE_INITIAL_STATE: p_h0 = h0 + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[None, :] ) * V + (i_v * BV + tl.arange(0, BV)[:, None]) h += tl.load(p_h0, mask=mask_kv, other=0).to(tl.float32) for _ in range(0, T): b_k = tl.load(p_k, mask=mask_bk, other=0).to(tl.float32) b_v = tl.load(p_v, mask=mask_bv, other=0).to(tl.float32) b_q = tl.load(p_q, mask=mask_bk, other=0).to(tl.float32) * scale b_alpha = tl.load(p_alpha, mask=mask_bk, other=0).to(tl.float32) b_beta = tl.load(p_beta, mask=mask_bk, other=0).to(tl.float32) tmp = tl.sum(h * b_alpha[None, :], axis=1) h += tmp[:, None] * b_beta[None, :] + b_k[None, :] * b_v[:, None] _o = h * b_q[None, :] _o = tl.sum(_o, axis=1) tl.store(p_o, _o.to(p_o.dtype.element_ty), mask=mask_bv) tl.store(p_ha, tmp.to(p_ha.dtype.element_ty), mask=mask_bv) p_q += K p_k += K p_o += V p_v += V p_ha += V p_alpha += K p_beta += K if STORE_FINAL_STATE: p_ht = ht + i_bh * K * V + (i_k * BK + tl.arange(0, BK)[None, :] ) * V + (i_v * BV + tl.arange(0, BV)[:, None]) tl.store(p_ht, h.to(p_ht.dtype.element_ty), mask=mask_kv)
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Register Intensive" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/generalized_delta_rule/iplr/fused_recurrent.py
5400e181-7552-4332-b506-bdb00f476d7a
layer_norm_kernels.py
BobMcDear/attorch
attorch/layer_norm_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'feat_dim']) @triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic, 'BLOCK_SIZE_FEAT': lambda args: next_power_of_2(args['feat_dim'])}) @triton.jit def layer_norm_backward_kernel(output_grad_pointer, input_pointer, mean_pointer, inv_std_pointer, weight_pointer, input_grad_pointer, weight_grad_pointer, bias_grad_pointer, batch_dim, feat_dim, output_grad_batch_stride, output_grad_feat_stride, input_batch_stride, input_feat_stride, input_grad_batch_stride, input_grad_feat_stride, weight_grad_batch_stride, weight_grad_feat_stride, bias_grad_batch_stride, bias_grad_feat_stride, scale_by_weight: tl. constexpr, add_bias: tl.constexpr, BLOCK_SIZE_BATCH: tl.constexpr, BLOCK_SIZE_FEAT: tl.constexpr): """ Calculates the input gradient of layer normalization. Args: output_grad_pointer: Pointer to layer normalization's output gradients. The output gradients must be of shape [batch_dim, feat_dim]. input_pointer: Pointer to the input. The input must be of shape [batch_dim, feat_dim]. mean_pointer: Pointer to the input's mean. The mean should be of shape [batch_dim]. inv_std_pointer: Pointer to the input's inverse standard deviation. The inverse standard deviation should be of shape [batch_dim]. weight_pointer: Pointer to optional weights if affine transform occurred. The weights, if provided, must be of shape [feat_dim]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [batch_dim, feat_dim]. weight_grad_pointer: Pointer to an optional container the weights' row-wise gradients are written to if scale_by_weight is True, which should later be summed. The container, if provided, must be of shape [batch_dim/BLOCK_SIZE_BATCH, feat_dim]. bias_grad_pointer: Pointer to an optional container the bias vector's row-wise gradients are written to if scale_by_weight and add_bias are True, which should later be summed. The container, if provided, must be of shape [batch_dim/BLOCK_SIZE_BATCH, feat_dim]. batch_dim: Batch dimension. feat_dim: Dimensionality of the features. output_grad_batch_stride: Stride necessary to jump one element along the output gradients' batch dimension. output_grad_feat_stride: Stride necessary to jump one element along the output gradients' feature dimension. input_batch_stride: Stride necessary to jump one element along the input's batch dimension. input_feat_stride: Stride necessary to jump one element along the input's feature dimension. input_grad_batch_stride: Stride necessary to jump one element along the input gradient container's batch dimension. input_grad_feat_stride: Stride necessary to jump one element along the input gradient container's feature dimension. weight_grad_batch_stride: Stride necessary to jump one element along the weight gradient container's batch dimension. weight_grad_feat_stride: Stride necessary to jump one element along the weight gradient container's feature dimension. bias_grad_batch_stride: Stride necessary to jump one element along the weight gradient container's batch dimension. bias_grad_feat_stride: Stride necessary to jump one element along the weight gradient container's feature dimension. scale_by_weight: Flag for scaling the normalized output by weights. add_bias: Flag for adding a bias vector to the normalized output if scale_by_weight is True. BLOCK_SIZE_BATCH: Block size across the batch dimension. BLOCK_SIZE_FEAT: Block size across the feature dimension. """ batch_pid = tl.program_id(axis=0) batch_offset = batch_pid * BLOCK_SIZE_BATCH + tl.arange(0, BLOCK_SIZE_BATCH ) feat_offset = tl.arange(0, BLOCK_SIZE_FEAT) batch_mask = batch_offset < batch_dim feat_mask = feat_offset < feat_dim output_grad_pointer += output_grad_batch_stride * batch_offset[:, None ] + output_grad_feat_stride * feat_offset[None, :] input_pointer += input_batch_stride * batch_offset[:, None ] + input_feat_stride * feat_offset[None, :] input_grad_pointer += input_grad_batch_stride * batch_offset[:, None ] + input_grad_feat_stride * feat_offset[None, :] output_grad = tl.load(output_grad_pointer, mask=batch_mask[:, None] & feat_mask[None, :]).to(tl.float32) input = tl.load(input_pointer, mask=batch_mask[:, None] & feat_mask[ None, :]).to(tl.float32) mean = tl.load(mean_pointer + batch_offset, mask=batch_mask) inv_std = tl.load(inv_std_pointer + batch_offset, mask=batch_mask) pre_lin = (input - mean[:, None]) * inv_std[:, None] if scale_by_weight: weight = tl.load(weight_pointer + feat_offset, mask=feat_mask) weight_output_grad_prod = weight * output_grad else: weight_output_grad_prod = output_grad term1 = tl.sum(pre_lin * weight_output_grad_prod, axis=1) / feat_dim term1 = pre_lin * term1[:, None] term2 = tl.sum(weight_output_grad_prod, axis=1) / feat_dim input_grad = inv_std[:, None] * (weight_output_grad_prod - (term1 + term2[:, None])) tl.store(input_grad_pointer, input_grad, mask=batch_mask[:, None] & feat_mask[None, :]) if scale_by_weight: weight_grad_pointer += (weight_grad_batch_stride * batch_pid + weight_grad_feat_stride * feat_offset) tl.store(weight_grad_pointer, tl.sum(output_grad * pre_lin, axis=0), mask=feat_mask) if add_bias: bias_grad_pointer += (bias_grad_batch_stride * batch_pid + bias_grad_feat_stride * feat_offset) tl.store(bias_grad_pointer, tl.sum(output_grad, axis=0), mask= feat_mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Cooperative Groups", "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/layer_norm_kernels.py