Coverage for src/flag_gems/experimental_ops/relu6.py: 0%
26 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def relu6(x_ptr, out_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask)
14 y = tl.maximum(x, 0)
15 y = tl.minimum(y, 6)
16 tl.store(out_ptr + offsets, y, mask=mask)
19relu6_kernel = relu6
22def relu6(*args, **kwargs):
23 x = (
24 args[0]
25 if len(args) > 0
26 else kwargs.get("input", kwargs.get("self", kwargs.get("x")))
27 )
28 if x is None:
29 raise TypeError(
30 "relu6 expects a tensor as the first positional argument or keyword 'input'/'self'/'x'."
31 )
33 x_contig = x.contiguous()
35 if not x_contig.is_cuda:
36 return torch.clamp(x_contig, min=0, max=6)
38 out = torch.empty_like(x_contig)
39 n_elements = out.numel()
40 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
41 relu6_kernel[grid](x_contig, out, n_elements, BLOCK_SIZE=1024)
42 return out