Coverage for src/flag_gems/runtime/backend/_ascend/ops/arange.py: 0%
38 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import logging
2import math
4import torch
5import triton
6import triton.language as tl
8from flag_gems import runtime
9from flag_gems.utils import libentry
10from flag_gems.utils import triton_lang_extension as tle
12logger = logging.getLogger(f'flag_gems.runtime._ascend.ops.{__name__.split(".")[-1]}')
15@libentry()
16@triton.jit
17def arange_func(y_ptr, start, end, step, size, BLOCK_SIZE: tl.constexpr):
18 pid = tle.program_id(0)
19 y_ptr += pid * BLOCK_SIZE
20 step_offset = pid * BLOCK_SIZE * step
22 cols = tl.arange(0, BLOCK_SIZE)
23 arange_val = cols * step + step_offset + start
24 mask = cols + pid * BLOCK_SIZE
25 tl.store(y_ptr + cols, arange_val, mask=mask < size)
28def arange_start(
29 start, end, step=1, *, dtype=None, layout=None, device=None, pin_memory=None
30):
31 logger.debug("GEMS_ASCEND ARANGE")
32 if dtype is torch.int64:
33 sgn = (step > 0) - (step < 0)
34 size = (end - start + step - sgn) // step
35 else:
36 size = math.ceil((end - start) / step)
38 BLOCK_SIZE = 128
39 grid = min(triton.cdiv(size, BLOCK_SIZE), 65535)
41 if dtype is None:
42 dtype = torch.int64
44 if pin_memory is None:
45 pin_memory = False
47 if device is None:
48 device = (
49 runtime.device.name
50 ) # Note(Zhengzekang): Torch default value is CPU, but triton is target to GPU.
52 result = torch.empty((size,), device=device, dtype=dtype, pin_memory=pin_memory)
53 arange_func[grid,](result, start, end, step, size, BLOCK_SIZE)
54 return result
57def arange(end, *, dtype=None, layout=None, device=None, pin_memory=None):
58 return arange_start(
59 0, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
60 )