Coverage for src/flag_gems/experimental_ops/exp_.py: 0%
32 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def exp_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
12 x = tl.load(x_ptr + offsets, mask=mask)
13 x_fp32 = x.to(tl.float32)
14 y = tl.exp(x_fp32)
15 y = y.to(x.dtype)
16 tl.store(x_ptr + offsets, y, mask=mask)
19# Preserve reference to the Triton kernel before defining the Python wrapper
20# with the same name.
21exp__kernel = exp_
24def exp_(*args, **kwargs):
25 # Extract the input tensor
26 x = None
27 if len(args) >= 1:
28 x = args[0]
29 elif "input" in kwargs:
30 x = kwargs["input"]
31 elif "self" in kwargs:
32 x = kwargs["self"]
33 else:
34 raise ValueError(
35 "exp_ expects a tensor as the first positional argument "
36 "or 'input'/'self' keyword."
37 )
39 # Handle empty tensors quickly
40 if x.numel() == 0:
41 return x
43 # Fallbacks for unsupported cases
44 # - Non-CUDA tensors
45 # - Non-floating or complex dtypes
46 # - float64 (fp64) dtype
47 # - Non-contiguous tensors
48 if (
49 (not x.is_cuda)
50 or x.is_complex()
51 or (not x.is_floating_point())
52 or (x.dtype == torch.float64)
53 or (not x.is_contiguous())
54 ):
55 # Use PyTorch's in-place operation as a safe fallback
56 return torch.ops.aten.exp_(x)
58 n_elements = x.numel()
59 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) # noqa: E731
60 exp__kernel[grid](x, n_elements, BLOCK_SIZE=1024)
61 return x