Coverage for src/flag_gems/experimental_ops/log10_.py: 0%
38 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-23 02:03 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def log10_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask, other=0.0)
14 x_fp32 = x.to(tl.float32)
15 y_fp32 = tl.log(x_fp32) * 0.4342944819032518 # 1 / ln(10)
16 y = y_fp32.to(x.dtype)
17 tl.store(x_ptr + offsets, y, mask=mask)
20# Keep a handle to the Triton kernel before defining the Python wrapper with the same name.
21_log10__kernel = log10_
24def log10_(*args, **kwargs):
25 if len(args) == 0:
26 raise TypeError(
27 "log10_ expects at least one positional argument: a torch.Tensor."
28 )
29 x = args[0]
30 if not isinstance(x, torch.Tensor):
31 raise TypeError("log10_ expects a torch.Tensor as its first argument.")
32 if x.numel() == 0:
33 return x
34 if x.device.type != "cuda":
35 # Fallback to PyTorch implementation for non-CUDA tensors
36 return torch.log10_(x)
37 if x.dtype not in (torch.float16, torch.bfloat16, torch.float32):
38 # Fallback to PyTorch for unsupported dtypes (e.g., float64, complex)
39 return torch.log10_(x)
41 BLOCK_SIZE = 1024
42 if x.is_contiguous():
43 n_elements = x.numel()
44 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
45 _log10__kernel[grid](x, n_elements, BLOCK_SIZE=BLOCK_SIZE)
46 else:
47 buf = x.contiguous()
48 n_elements = buf.numel()
49 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
50 _log10__kernel[grid](buf, n_elements, BLOCK_SIZE=BLOCK_SIZE)
51 x.copy_(buf)
53 return x