Coverage for src/flag_gems/runtime/backend/_ascend/ops/arange.py: 0%

38 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-26 15:32 +0800

1import logging 

2import math 

3 

4import torch 

5import triton 

6import triton.language as tl 

7 

8from flag_gems import runtime 

9from flag_gems.utils import libentry 

10from flag_gems.utils import triton_lang_extension as tle 

11 

12logger = logging.getLogger(f'flag_gems.runtime._ascend.ops.{__name__.split(".")[-1]}') 

13 

14 

15@libentry() 

16@triton.jit 

17def arange_func(y_ptr, start, end, step, size, BLOCK_SIZE: tl.constexpr): 

18 pid = tle.program_id(0) 

19 y_ptr += pid * BLOCK_SIZE 

20 step_offset = pid * BLOCK_SIZE * step 

21 

22 cols = tl.arange(0, BLOCK_SIZE) 

23 arange_val = cols * step + step_offset + start 

24 mask = cols + pid * BLOCK_SIZE 

25 tl.store(y_ptr + cols, arange_val, mask=mask < size) 

26 

27 

28def arange_start( 

29 start, end, step=1, *, dtype=None, layout=None, device=None, pin_memory=None 

30): 

31 logger.debug("GEMS_ASCEND ARANGE") 

32 if dtype is torch.int64: 

33 sgn = (step > 0) - (step < 0) 

34 size = (end - start + step - sgn) // step 

35 else: 

36 size = math.ceil((end - start) / step) 

37 

38 BLOCK_SIZE = 128 

39 grid = min(triton.cdiv(size, BLOCK_SIZE), 65535) 

40 

41 if dtype is None: 

42 dtype = torch.int64 

43 

44 if pin_memory is None: 

45 pin_memory = False 

46 

47 if device is None: 

48 device = ( 

49 runtime.device.name 

50 ) # Note(Zhengzekang): Torch default value is CPU, but triton is target to GPU. 

51 

52 result = torch.empty((size,), device=device, dtype=dtype, pin_memory=pin_memory) 

53 arange_func[grid,](result, start, end, step, size, BLOCK_SIZE) 

54 return result 

55 

56 

57def arange(end, *, dtype=None, layout=None, device=None, pin_memory=None): 

58 return arange_start( 

59 0, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory 

60 )