Coverage for src/flag_gems/experimental_ops/selu_.py: 0%
39 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def selu_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask)
15 x_f32 = x.to(tl.float32)
16 alpha = 1.6732632423543772
17 scale = 1.0507009873554805
18 y_f32 = scale * tl.where(x_f32 > 0, x_f32, alpha * (tl.exp(x_f32) - 1.0))
19 y = y_f32.to(x.dtype)
21 tl.store(x_ptr + offsets, y, mask=mask)
24# Keep a handle to the Triton kernel before defining the Python wrapper with the same name.
25selu__kernel = selu_
28def selu_(*args, **kwargs):
29 # Extract the input tensor from positional or keyword arguments
30 x = None
31 if len(args) > 0 and torch.is_tensor(args[0]):
32 x = args[0]
33 elif "input" in kwargs and torch.is_tensor(kwargs["input"]):
34 x = kwargs["input"]
35 elif "self" in kwargs and torch.is_tensor(kwargs["self"]):
36 x = kwargs["self"]
37 elif "x" in kwargs and torch.is_tensor(kwargs["x"]):
38 x = kwargs["x"]
39 else:
40 raise ValueError(
41 "selu_ expects a Tensor as the first argument or under 'input'/'self'/'x' keyword."
42 )
44 # Fallback for unsupported cases
45 supported_dtypes = {torch.float16, torch.bfloat16, torch.float32}
46 if (not x.is_cuda) or (not x.is_contiguous()) or (x.dtype not in supported_dtypes):
47 torch.ops.aten.selu_(x)
48 return x
50 n_elements = x.numel()
51 if n_elements == 0:
52 return x
54 BLOCK_SIZE = 1024
55 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
56 selu__kernel[grid](x, n_elements, BLOCK_SIZE=BLOCK_SIZE)
57 return x