Coverage for src/flag_gems/fused/fused_add_rms_norm.py: 58%
40 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-13 10:08 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-13 10:08 +0800
1import logging
2import math
4import triton
5import triton.language as tl
7from flag_gems.runtime import torch_device_fn
8from flag_gems.utils import libentry
9from flag_gems.utils import triton_lang_extension as tle
11logger = logging.getLogger(__name__)
14@libentry()
15@triton.jit(do_not_specialize=["eps"])
16def fused_add_rms_norm_kernel(
17 input_ptr, # pointer to the input
18 residual_ptr, # pointer to the residual
19 w_ptr, # pointer to the weights
20 in_stride_r, # how much to increase the pointer when moving by 1 row
21 in_stride_c, # how much to increase the pointer when moving by 1 col
22 r_stride_r, # how much to increase the pointer when moving by 1 row
23 r_stride_c, # how much to increase the pointer when moving by 1 col
24 N, # number of columns in in_ptr
25 eps, # epsilon to avoid division by zero
26 BLOCK_SIZE: tl.constexpr,
27):
28 if tl.constexpr(input_ptr.dtype.element_ty == tl.float16) or tl.constexpr(
29 input_ptr.dtype.element_ty == tl.bfloat16
30 ):
31 cdtype = tl.float32
32 else:
33 cdtype = input_ptr.dtype.element_ty
35 pid = tle.program_id(0)
36 input_ptr += pid * in_stride_r
37 residual_ptr += pid * r_stride_r
39 mask = tl.arange(0, BLOCK_SIZE) < N
40 cols = tl.arange(0, BLOCK_SIZE)
41 x = tl.load(input_ptr + cols * in_stride_c, mask, other=0.0).to(cdtype)
42 r = tl.load(residual_ptr + cols * r_stride_c, mask, other=0.0).to(cdtype)
44 x += r
45 # write back to residual
46 tl.store(residual_ptr + cols * r_stride_c, x, mask=mask)
48 var = tl.sum(x * x / N, axis=0)
49 rrms = 1 / tl.sqrt(var + eps)
51 w = tl.load(w_ptr + tl.arange(0, BLOCK_SIZE), mask=mask, other=0.0)
52 y = (x * rrms * w).to(cdtype)
53 # write back to input
54 tl.store(input_ptr + cols * in_stride_c, y, mask=mask)
57def fused_add_rms_norm(x, residual, normalized_shape, weight, eps=1e-5):
58 """
59 This function performs fused residual addition and RMS normalization **in-place**.
60 Both `x` and `residual` tensors will be modified. Use with caution if these tensors
61 are reused elsewhere or require gradients.
62 """
63 logger.debug(
64 "GEMS FUSED_ADD_RMS_NORM FORWARD, [input shape]: %s, [residual shape]: %s, [weight shape]: %s",
65 x.size(),
66 residual.size(),
67 weight.size(),
68 )
69 dim = x.ndim - len(normalized_shape)
70 M = math.prod(x.shape[:dim])
71 N = math.prod(normalized_shape)
73 BLOCK_SIZE = triton.next_power_of_2(N)
74 x = x.contiguous()
75 residual = residual.contiguous()
76 weight = weight.contiguous()
78 with torch_device_fn.device(x.device):
79 fused_add_rms_norm_kernel[M,](
80 x, residual, weight, N, 1, N, 1, N, eps, BLOCK_SIZE
81 )
82 return x, residual