Coverage for src/flag_gems/ops/sinh_.py: 62%

40 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-25 02:48 +0800

1# Generated by KernelGen: https://github.com/flagos-ai/KernelGen 

2import logging 

3 

4import torch 

5import triton 

6import triton.language as tl 

7 

8from flag_gems.runtime import torch_device_fn 

9 

10logger = logging.getLogger(__name__) 

11 

12 

13@triton.jit 

14def sinh_kernel_(x_ptr, n_elements, BLOCK_SIZE: tl.constexpr): 

15 pid = tl.program_id(axis=0) 

16 block_start = pid * BLOCK_SIZE 

17 offsets = block_start + tl.arange(0, BLOCK_SIZE) 

18 mask = offsets < n_elements 

19 

20 x = tl.load(x_ptr + offsets, mask=mask, other=0.0) 

21 x_f32 = x.to(tl.float32) 

22 y = 0.5 * (tl.exp(x_f32) - tl.exp(-x_f32)) 

23 y_cast = y.to(x.dtype) 

24 tl.store(x_ptr + offsets, y_cast, mask=mask) 

25 

26 

27def sinh_(*args, **kwargs): 

28 logger.debug("GEMS SINH_") 

29 # Accept various calling conventions: sinh_(tensor), sinh_(self=tensor), sinh_(input=tensor) 

30 x = None 

31 if args: 

32 x = args[0] 

33 else: 

34 x = kwargs.get("self", kwargs.get("input", None)) 

35 if x is None: 

36 raise TypeError("sinh_ expected a Tensor as the first argument") 

37 

38 if not isinstance(x, torch.Tensor): 

39 raise TypeError("sinh_ expected a torch.Tensor") 

40 

41 if x.numel() == 0: 

42 return x 

43 

44 if not x.is_contiguous(): 

45 raise RuntimeError( 

46 "sinh_ Triton kernel currently supports only contiguous tensors" 

47 ) 

48 

49 supported_dtypes = (torch.float16, torch.float32, torch.bfloat16) 

50 if x.dtype not in supported_dtypes: 

51 raise RuntimeError( 

52 f"sinh_ Triton kernel supports dtypes {supported_dtypes}, but got {x.dtype}" 

53 ) 

54 

55 n_elements = x.numel() 

56 BLOCK_SIZE = 1024 

57 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) 

58 

59 with torch_device_fn.device(x.device): 

60 sinh_kernel_[grid](x, n_elements, BLOCK_SIZE=BLOCK_SIZE) 

61 return x