Coverage for src/flag_gems/experimental_ops/exp2_.py: 0%
30 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-25 02:48 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-25 02:48 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def exp2_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
12 x = tl.load(x_ptr + offsets, mask=mask)
13 x_f32 = x.to(tl.float32)
14 ln2 = 0.693147180559945309417232121458176568
15 y_f32 = tl.exp(x_f32 * ln2)
16 y = y_f32.to(x.dtype)
17 tl.store(x_ptr + offsets, y, mask=mask)
20# Preserve reference to the Triton kernel before defining the Python wrapper with the same name.
21exp2__kernel = exp2_
24def exp2_(*args, **kwargs):
25 x = None
26 if len(args) > 0:
27 x = args[0]
28 else:
29 x = kwargs.get("input", None)
30 if x is None:
31 x = kwargs.get("x", None)
32 assert isinstance(
33 x, torch.Tensor
34 ), "exp2_ expects a torch.Tensor as its first argument"
35 assert x.is_cuda, "exp2_ Triton kernel requires a CUDA tensor"
36 n_elements = x.numel()
37 BLOCK_SIZE = 1024
38 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
39 exp2__kernel[grid](x, n_elements, BLOCK_SIZE=BLOCK_SIZE)
40 return x