Coverage for src/flag_gems/runtime/backend/_kunlunxin/ops/zeros.py: 0%

31 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-19 02:32 +0800

1import logging 

2 

3import torch 

4import triton 

5import triton.language as tl 

6 

7from flag_gems.runtime import device, torch_device_fn 

8from flag_gems.utils import libentry 

9from flag_gems.utils import triton_lang_extension as tle 

10from flag_gems.utils.shape_utils import volume 

11 

12logger = logging.getLogger("flag_gems").getChild(__name__.lstrip(".")) 

13device_ = device 

14 

15 

16@libentry() 

17@triton.jit 

18def zeros_kernel( 

19 output_ptr, 

20 n_elements: tl.constexpr, 

21 BLOCK_SIZE: tl.constexpr, 

22): 

23 pid = tle.program_id(axis=0) # We use a 1D launch grid so axis is 0. 

24 block_start = pid * BLOCK_SIZE 

25 offsets = block_start + tl.arange(0, BLOCK_SIZE) 

26 mask = offsets < n_elements 

27 tl.store(output_ptr + offsets, 0.0, mask=mask) 

28 

29 

30def zeros(size, *, dtype=None, layout=None, device=None, pin_memory=None): 

31 logger.debug("GEMS ZEROS") 

32 if dtype is None: 

33 dtype = torch.get_default_dtype() 

34 if device is None: 

35 device = torch.device(device_.name) 

36 out = torch.empty(size, device=device, dtype=dtype) 

37 N = volume(size) 

38 grid_fn = (12, 1, 1) 

39 block_size = triton.next_power_of_2(triton.cdiv(N, 12)) if N > 0 else 1 

40 with torch_device_fn.device(device): 

41 zeros_kernel[grid_fn]( 

42 out, 

43 N, 

44 BLOCK_SIZE=block_size, 

45 buffer_size_limit=2048, 

46 isCloseDtypeConvert=True, 

47 ) 

48 return out