Coverage for src/flag_gems/experimental_ops/log1p_.py: 0%
36 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-11 02:28 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-11 02:28 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def log1p_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask)
14 x_fp32 = x.to(tl.float32)
15 y = tl.log(x_fp32 + 1.0)
16 y_cast = y.to(x.dtype)
18 tl.store(x_ptr + offsets, y_cast, mask=mask)
21_log1p_kernel = log1p_
24def log1p_(*args, **kwargs):
25 # Accept tensor from positional or keyword args
26 x = None
27 if len(args) > 0:
28 x = args[0]
29 else:
30 x = kwargs.get("input", None)
32 if x is None:
33 raise ValueError(
34 "log1p_ expects a tensor as the first argument or keyword 'input'."
35 )
37 if not isinstance(x, torch.Tensor):
38 raise TypeError("log1p_ expects a torch.Tensor as input.")
40 # Fallback for unsupported device/dtype/layout
41 if not x.is_cuda:
42 return torch.ops.aten.log1p_(x)
43 if not x.is_contiguous():
44 return torch.ops.aten.log1p_(x)
45 if x.dtype not in (torch.float16, torch.bfloat16, torch.float32, torch.float64):
46 return torch.ops.aten.log1p_(x)
48 n_elements = x.numel()
49 if n_elements == 0:
50 return x
52 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
53 _log1p_kernel[grid](x, n_elements, BLOCK_SIZE=1024)
54 return x