Coverage for src/flag_gems/runtime/backend/_ascend/ops/var_mean.py: 0%
155 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import logging
3import torch
4import triton
5import triton.language as tl
7from flag_gems import runtime
8from flag_gems.runtime import torch_device_fn
9from flag_gems.utils import dim_compress, libentry
10from flag_gems.utils import triton_lang_extension as tle
12logger = logging.getLogger(f'flag_gems.runtime._ascend.ops.{__name__.split(".")[-1]}')
15@triton.jit
16def welford_func(mean_x, count_x, M_x, mean_y, count_y, M_y):
17 count = count_x + count_y
18 _count = tl.maximum(count, 1)
19 mc_x = mean_x * count_x
20 mc_y = mean_y * count_y
21 mean = (mc_x + mc_y) / _count
22 M = M_x + mc_x * mean_x + M_y + mc_y * mean_y - count * mean * mean
23 return mean, count, M
26@libentry()
27@triton.autotune(configs=runtime.get_tuned_config("var_mean"), key=["M", "N"])
28@triton.jit(do_not_specialize=["correction"])
29def var_mean_welford_kernel(
30 X,
31 Var,
32 Mean,
33 M,
34 N,
35 correction,
36 BLOCK_M: tl.constexpr,
37 BLOCK_N: tl.constexpr,
38):
39 # Map the program id to the row of X it should compute.
40 pid = tle.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)[:, None]
41 X = X + pid * N
42 Var = Var + pid
43 Mean = Mean + pid
44 row_mask = pid < M
46 _mean = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
47 _acc = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
48 _count = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
50 for off in range(0, N, BLOCK_N):
51 cols = off + tl.arange(0, BLOCK_N)[None, :]
52 col_mask = cols < N
53 mask = row_mask and col_mask
54 x = tl.load(X + cols, mask, other=0.0).to(tl.float32)
56 count = _count + mask
57 cnt = tl.maximum(count, 1)
58 cur_mean = (_mean * _count + x) / cnt
59 _acc += (x - cur_mean) * (x - _mean) * mask
60 _mean = cur_mean
61 _count = count
63 # 手动实现 tl.reduce 的功能,沿着 axis=1 进行归约
64 # 使用 tl.sum 来进行归约,这等价于 welford 算法在这种情况下的行为
66 # 计算每行的总计数
67 total_count = tl.sum(_count, axis=1) # shape: (BLOCK_M,)
69 # 计算加权平均值
70 weighted_sum = tl.sum(_mean * _count, axis=1) # shape: (BLOCK_M,)
71 mean = weighted_sum / tl.maximum(total_count, 1) # shape: (BLOCK_M,)
73 # 计算方差累积值
74 # 对于每个元素,计算其对总体方差的贡献
75 mean_expanded = mean[:, None] # shape: (BLOCK_M, 1)
77 # 计算每个局部统计量对总体方差的贡献
78 # 这是 Welford 算法的并行化版本
79 local_var_contrib = _acc + _count * (_mean - mean_expanded) * (
80 _mean - mean_expanded
81 )
82 acc = tl.sum(local_var_contrib, axis=1) # shape: (BLOCK_M,)
84 var = acc / (N - correction)
85 mean = mean[:, None]
86 var = var[:, None]
88 # Write mean / var
89 tl.store(Mean, mean, row_mask)
90 tl.store(Var, var, row_mask)
93@libentry()
94@triton.autotune(configs=runtime.get_tuned_config("var_mean"), key=["M", "N"])
95@triton.jit(do_not_specialize=["correction"])
96def var_mean_welford_kernel_simple(
97 X,
98 Var,
99 Mean,
100 M,
101 N,
102 correction,
103 BLOCK_M: tl.constexpr,
104 BLOCK_N: tl.constexpr,
105):
106 # 程序ID映射
107 pid = tle.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)[:, None]
108 X = X + pid * N
109 Var = Var + pid
110 Mean = Mean + pid
111 row_mask = pid < M
113 # 每行单独处理
114 for row in range(BLOCK_M):
115 if row < BLOCK_M:
116 current_row_mask = (tl.arange(0, BLOCK_M) == row)[:, None] & row_mask
118 if tl.sum(current_row_mask.to(tl.int32)) > 0:
119 # 初始化当前行的统计量
120 running_mean = 0.0
121 running_M = 0.0
122 count = 0
124 # 按块处理当前行
125 for off in range(0, N, BLOCK_N):
126 cols = off + tl.arange(0, BLOCK_N)
127 col_mask = cols < N
129 # 加载数据
130 x_vals = tl.load(X + row * N + cols, col_mask, other=0.0).to(
131 tl.float32
132 )
134 # 对块内每个有效元素进行在线更新
135 for i in range(BLOCK_N):
136 if i < BLOCK_N and (off + i) < N:
137 count += 1
138 x = x_vals[i]
140 delta = x - running_mean
141 running_mean += delta / count
142 delta2 = x - running_mean
143 running_M += delta * delta2
145 # 计算方差
146 variance = running_M / (N - correction) if N > correction else 0.0
148 # 存储结果
149 tl.store(Mean + row, running_mean, current_row_mask[:, 0])
150 tl.store(Var + row, variance, current_row_mask[:, 0])
153@libentry()
154@triton.jit
155def var_mean_kernel_1(
156 X,
157 Acc,
158 Average,
159 Count,
160 N,
161 BLOCK_N: tl.constexpr,
162):
163 # Map the program id to the row of X it should compute.
164 pid = tle.program_id(0)
165 offset = pid * BLOCK_N + tl.arange(0, BLOCK_N)
167 X = X + offset
168 Acc = Acc + pid
169 Average = Average + pid
170 Count = Count + pid
171 mask = offset < N
173 x = tl.load(X, mask, other=0.0).to(tl.float32)
175 count = tl.sum(mask.to(tl.float32))
176 average = tl.sum(x) / count
177 acc = tl.sum(x * x) - count * average * average
179 tl.store(Average, average)
180 tl.store(Acc, acc)
181 tl.store(Count, count)
184@libentry()
185@triton.heuristics(runtime.get_heuristic_config("var_mean"))
186@triton.jit(do_not_specialize=["correction"])
187def var_mean_kernel_2(
188 Acc,
189 Average,
190 Count,
191 Var,
192 Mean,
193 N,
194 correction,
195 BLOCK_NUM,
196 BLOCK_N: tl.constexpr,
197):
198 offset = tl.arange(0, BLOCK_N)
199 mask = offset < BLOCK_NUM
200 Acc = Acc + offset
201 Average = Average + offset
202 Count = Count + offset
203 acc = tl.load(Acc, mask, other=0.0).to(tl.float32)
204 average = tl.load(Average, mask, other=0.0).to(tl.float32)
205 count = tl.load(Count, mask, other=0.0).to(tl.float32)
207 # mean, _, nvar = tl.reduce((average, count, acc), axis=0, combine_fn=welford_func)
208 # 手动实现 tl.reduce 的功能,沿着 axis=0 进行归约
209 # 计算总计数
210 total_count = tl.sum(count)
212 # 计算加权平均值
213 weighted_sum = tl.sum(average * count)
214 mean = weighted_sum / tl.maximum(total_count, 1)
216 # 计算方差累积值
217 # 对于每个块,计算其对总体方差的贡献
218 # 这是 Welford 算法的并行化版本
219 local_var_contrib = acc + count * (average - mean) * (average - mean)
220 nvar = tl.sum(local_var_contrib)
222 var = nvar / (N - correction)
223 tl.store(Mean, mean)
224 tl.store(Var, var)
227def var_mean(x, dim=None, *, correction=None, keepdim=False):
228 logger.debug("GEMS_ASCEND VAR MEAN")
229 if correction is None:
230 correction = 1.0
232 if dim is None or len(dim) == x.ndim:
233 dim = list(range(x.ndim))
234 shape = [1] * x.ndim
235 N = x.numel()
236 var = torch.empty(shape, dtype=x.dtype, device=x.device)
237 mean = torch.empty(shape, dtype=x.dtype, device=x.device)
238 BLOCK_N = 1024
239 BLOCK_NUM = triton.cdiv(N, BLOCK_N)
240 acc = torch.empty([BLOCK_NUM], dtype=x.dtype, device=x.device)
241 average = torch.empty([BLOCK_NUM], dtype=x.dtype, device=x.device)
242 count = torch.empty([BLOCK_NUM], dtype=x.dtype, device=x.device)
244 with torch_device_fn.device(x.device):
245 var_mean_kernel_1[(BLOCK_NUM,)](x, acc, average, count, N, BLOCK_N=BLOCK_N)
246 var_mean_kernel_2[(1,)](
247 acc, average, count, var, mean, N, correction, BLOCK_NUM
248 )
249 else:
250 shape = list(x.shape)
251 dim = [d % x.ndim for d in dim]
252 x = dim_compress(x, dim)
253 N = 1
254 for i in dim:
255 N *= shape[i]
256 shape[i] = 1
257 M = x.numel() // N
258 var = torch.empty(shape, dtype=x.dtype, device=x.device)
259 mean = torch.empty(shape, dtype=x.dtype, device=x.device)
261 grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]),)
262 with torch_device_fn.device(x.device):
263 var_mean_welford_kernel[grid](x, var, mean, M, N, correction)
265 if not keepdim:
266 var = var.squeeze(dim=dim)
267 mean = mean.squeeze(dim=dim)
268 return var, mean