Coverage for src/flag_gems/experimental_ops/arcsinh_.py: 0%
30 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-19 02:32 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-19 02:32 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def arcsinh_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask)
14 x32 = x.to(tl.float32)
15 x2 = x32 * x32
16 tmp = tl.sqrt(x2 + 1.0)
17 y32 = tl.log(x32 + tmp)
18 y = y32.to(x.dtype)
20 tl.store(x_ptr + offsets, y, mask=mask)
23# Preserve reference to the kernel before defining the wrapper with the same name
24arcsinh__kernel = arcsinh_
27def arcsinh_(*args, **kwargs):
28 if len(args) == 0:
29 raise TypeError("arcsinh_ expected at least 1 argument (a Tensor)")
30 x = args[0]
31 if not isinstance(x, torch.Tensor):
32 raise TypeError("arcsinh_ expected a torch.Tensor as the first argument")
34 # Fallback for unsupported cases
35 if (not x.is_cuda) or (not x.is_contiguous()) or (not x.dtype.is_floating_point):
36 torch.ops.aten.arcsinh_(x)
37 return x
39 n_elements = x.numel()
40 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
41 arcsinh__kernel[grid](x, n_elements, BLOCK_SIZE=1024)
42 return x