Coverage for src/flag_gems/experimental_ops/sinh_.py: 0%
38 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-15 02:11 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-15 02:11 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def sinh_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask, other=0.0)
14 x_f32 = x.to(tl.float32)
15 y = 0.5 * (tl.exp(x_f32) - tl.exp(-x_f32))
16 y_cast = y.to(x.dtype)
17 tl.store(x_ptr + offsets, y_cast, mask=mask)
20# Keep a reference to the Triton kernel before defining the Python wrapper with the same name.
21_sinh_kernel = sinh_
24def sinh_(*args, **kwargs):
25 # Accept various calling conventions: sinh_(tensor), sinh_(self=tensor), sinh_(input=tensor)
26 x = None
27 if args:
28 x = args[0]
29 else:
30 x = kwargs.get("self", kwargs.get("input", None))
31 if x is None:
32 raise TypeError("sinh_ expected a Tensor as the first argument")
34 if not isinstance(x, torch.Tensor):
35 raise TypeError("sinh_ expected a torch.Tensor")
37 if x.numel() == 0:
38 return x
40 if not x.is_cuda:
41 raise RuntimeError("sinh_ Triton kernel requires a CUDA tensor")
43 if not x.is_contiguous():
44 raise RuntimeError(
45 "sinh_ Triton kernel currently supports only contiguous tensors"
46 )
48 supported_dtypes = (torch.float16, torch.float32, torch.bfloat16)
49 if x.dtype not in supported_dtypes:
50 raise RuntimeError(
51 f"sinh_ Triton kernel supports dtypes {supported_dtypes}, but got {x.dtype}"
52 )
54 n_elements = x.numel()
55 BLOCK_SIZE = 1024
56 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
58 _sinh_kernel[grid](x, n_elements, BLOCK_SIZE=BLOCK_SIZE)
59 return x