Coverage for src/flag_gems/experimental_ops/asinh_.py: 0%
45 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-10 02:30 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-10 02:30 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def asinh_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr, COMPUTE_FP32: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask)
15 if COMPUTE_FP32:
16 x32 = x.to(tl.float32)
17 y32 = tl.log(x32 + tl.sqrt(x32 * x32 + 1.0))
18 y = y32.to(x.dtype)
19 else:
20 y = tl.log(x + tl.sqrt(x * x + 1.0))
22 tl.store(x_ptr + offsets, y, mask=mask)
25asinh__kernel = asinh_
28def asinh_(*args, **kwargs):
29 x = None
30 if len(args) > 0 and isinstance(args[0], torch.Tensor):
31 x = args[0]
32 else:
33 for key in ("input", "self", "x"):
34 val = kwargs.get(key, None)
35 if isinstance(val, torch.Tensor):
36 x = val
37 break
38 if x is None:
39 raise ValueError("asinh_: expected a Tensor as the first argument")
41 if not x.is_cuda:
42 return torch.ops.aten.asinh_(x)
44 if x.dtype not in (torch.float16, torch.bfloat16, torch.float32, torch.float64):
45 return torch.ops.aten.asinh_(x)
47 BLOCK_SIZE = 1024
48 COMPUTE_FP32 = x.dtype in (torch.float16, torch.bfloat16)
50 if x.is_contiguous():
51 n_elements = x.numel()
52 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
53 asinh__kernel[grid](
54 x, n_elements, BLOCK_SIZE=BLOCK_SIZE, COMPUTE_FP32=COMPUTE_FP32
55 )
56 return x
57 else:
58 y = x.contiguous()
59 n_elements = y.numel()
60 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
61 asinh__kernel[grid](
62 y, n_elements, BLOCK_SIZE=BLOCK_SIZE, COMPUTE_FP32=COMPUTE_FP32
63 )
64 x.copy_(y)
65 return x