Coverage for src/flag_gems/ops/log1p_.py: 61%

38 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-26 15:32 +0800

1# Generated by KernelGen: https://github.com/flagos-ai/KernelGen 

2import logging 

3 

4import torch 

5import triton 

6import triton.language as tl 

7 

8from flag_gems.runtime import torch_device_fn 

9 

10logger = logging.getLogger(__name__) 

11 

12 

13@triton.jit 

14def log1p_kernel_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr): 

15 pid = tl.program_id(axis=0) 

16 block_start = pid * BLOCK_SIZE 

17 offsets = block_start + tl.arange(0, BLOCK_SIZE) 

18 mask = offsets < n_elements 

19 

20 x = tl.load(x_ptr + offsets, mask=mask) 

21 x_fp32 = x.to(tl.float32) 

22 y = tl.log(x_fp32 + 1.0) 

23 y_cast = y.to(x.dtype) 

24 

25 tl.store(x_ptr + offsets, y_cast, mask=mask) 

26 

27 

28def log1p_(*args, **kwargs): 

29 logger.debug("GEMS LOG1P_") 

30 x = None 

31 if len(args) > 0: 

32 x = args[0] 

33 else: 

34 x = kwargs.get("input", None) 

35 

36 if x is None: 

37 raise ValueError( 

38 "log1p_ expects a tensor as the first argument or keyword 'input'." 

39 ) 

40 

41 if not isinstance(x, torch.Tensor): 

42 raise TypeError("log1p_ expects a torch.Tensor as input.") 

43 

44 if not x.is_contiguous(): 

45 return torch.ops.aten.log1p_(x) 

46 if x.dtype not in (torch.float16, torch.bfloat16, torch.float32, torch.float64): 

47 return torch.ops.aten.log1p_(x) 

48 

49 n_elements = x.numel() 

50 if n_elements == 0: 

51 return x 

52 

53 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) 

54 with torch_device_fn.device(x.device): 

55 log1p_kernel_[grid](x, n_elements, BLOCK_SIZE=1024) 

56 return x