Coverage for src/flag_gems/fused/FLA/wy_fast.py: 22%
49 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
1# This file contains code copied from the flash-linear-attention project.
2# The original source code was licensed under the MIT license and included
3# the following copyright notice:
4# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
6# ruff: noqa: E501
8import torch
9import triton
10import triton.language as tl
12from flag_gems.fused.FLA.index import prepare_chunk_indices
13from flag_gems.utils import libentry, libtuner
16@libentry()
17@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
18@libtuner(
19 configs=[
20 triton.Config({}, num_warps=num_warps, num_stages=num_stages)
21 for num_warps in [2, 4, 8]
22 for num_stages in [2, 3, 4]
23 ],
24 key=["H", "K", "V", "BT", "BK", "BV", "IS_VARLEN"],
25)
26@triton.jit(do_not_specialize=["T"])
27def recompute_w_u_fwd_kernel(
28 k,
29 v,
30 beta,
31 w,
32 u,
33 A,
34 g,
35 cu_seqlens,
36 chunk_indices,
37 T,
38 H: tl.constexpr,
39 Hg: tl.constexpr,
40 K: tl.constexpr,
41 V: tl.constexpr,
42 BT: tl.constexpr,
43 BK: tl.constexpr,
44 BV: tl.constexpr,
45 IS_VARLEN: tl.constexpr,
46):
47 i_t, i_bh = tl.program_id(0), tl.program_id(1)
48 i_b, i_h = i_bh // H, i_bh % H
49 if IS_VARLEN:
50 i_n, i_t = (
51 tl.load(chunk_indices + i_t * 2).to(tl.int32),
52 tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32),
53 )
54 bos, eos = (
55 tl.load(cu_seqlens + i_n).to(tl.int32),
56 tl.load(cu_seqlens + i_n + 1).to(tl.int32),
57 )
58 T = eos - bos
59 else:
60 bos, eos = i_b * T, i_b * T + T
61 p_beta = tl.make_block_ptr(
62 beta + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,)
63 )
64 p_g = tl.make_block_ptr(g + (bos * H + i_h), (T,), (H,), (i_t * BT,), (BT,), (0,))
65 p_A = tl.make_block_ptr(
66 A + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)
67 )
68 b_beta = tl.load(p_beta, boundary_check=(0,))
69 b_A = tl.load(p_A, boundary_check=(0, 1))
70 b_g = tl.exp(tl.load(p_g, boundary_check=(0,)))
72 for i_v in range(tl.cdiv(V, BV)):
73 p_v = tl.make_block_ptr(
74 v + (bos * H + i_h) * V,
75 (T, V),
76 (H * V, 1),
77 (i_t * BT, i_v * BV),
78 (BT, BV),
79 (1, 0),
80 )
81 p_u = tl.make_block_ptr(
82 u + (bos * H + i_h) * V,
83 (T, V),
84 (H * V, 1),
85 (i_t * BT, i_v * BV),
86 (BT, BV),
87 (1, 0),
88 )
89 b_v = tl.load(p_v, boundary_check=(0, 1))
90 b_vb = (b_v * b_beta[:, None]).to(b_v.dtype)
91 b_u = tl.dot(b_A, b_vb, allow_tf32=False)
92 tl.store(p_u, b_u.to(p_u.dtype.element_ty), boundary_check=(0, 1))
94 for i_k in range(tl.cdiv(K, BK)):
95 p_k = tl.make_block_ptr(
96 k + (bos * Hg + i_h // (H // Hg)) * K,
97 (T, K),
98 (Hg * K, 1),
99 (i_t * BT, i_k * BK),
100 (BT, BK),
101 (1, 0),
102 )
103 p_w = tl.make_block_ptr(
104 w + (bos * H + i_h) * K,
105 (T, K),
106 (H * K, 1),
107 (i_t * BT, i_k * BK),
108 (BT, BK),
109 (1, 0),
110 )
111 b_k = tl.load(p_k, boundary_check=(0, 1))
112 b_kb = (b_k * b_beta[:, None] * b_g[:, None]).to(b_k.dtype)
113 b_w = tl.dot(b_A, b_kb)
114 tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
117def recompute_w_u_fwd(
118 k: torch.Tensor,
119 v: torch.Tensor,
120 beta: torch.Tensor,
121 g_cumsum: torch.Tensor,
122 A: torch.Tensor,
123 cu_seqlens: torch.LongTensor | None,
124) -> tuple[torch.Tensor, torch.Tensor]:
125 B, T, Hg, K, V = *k.shape, v.shape[-1]
126 H = v.shape[-2]
127 BT = A.shape[-1]
129 chunk_indices = (
130 prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
131 )
132 NT = triton.cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
133 BK = 64
134 BV = 64
135 u = torch.empty_like(v)
136 w = k.new_empty(B, T, H, K)
137 recompute_w_u_fwd_kernel[(NT, B * H)](
138 k=k,
139 v=v,
140 beta=beta,
141 w=w,
142 u=u,
143 A=A,
144 g=g_cumsum,
145 cu_seqlens=cu_seqlens,
146 chunk_indices=chunk_indices,
147 T=T,
148 H=H,
149 Hg=Hg,
150 K=K,
151 V=V,
152 BT=BT,
153 BK=BK,
154 BV=BV,
155 )
156 return w, u