Coverage for src/flag_gems/runtime/backend/_cambricon/ops/arange.py: 0%
41 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-22 16:54 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-22 16:54 +0800
1import logging
2import math
4import torch
5import triton
6import triton.language as tl
8from flag_gems import runtime
9from flag_gems.utils import libentry, libtuner
11from ..utils import TOTAL_CORE_NUM
13logger = logging.getLogger("flag_gems").getChild(__name__.lstrip("."))
16@libentry()
17@libtuner(
18 configs=[
19 triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_stages=3, num_warps=1),
20 triton.Config(kwargs={"BLOCK_SIZE": 4096}, num_stages=3, num_warps=1),
21 triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_stages=3, num_warps=1),
22 triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_stages=3, num_warps=1),
23 ],
24 key=["size"],
25 strategy=["log"],
26)
27@triton.jit
28def arange_func(y_ptr, start, end, step, size, BLOCK_SIZE: tl.constexpr):
29 pid = tl.program_id(axis=0)
30 num_jobs = tl.num_programs(axis=0)
31 block_start = pid * BLOCK_SIZE
32 block_step = num_jobs * BLOCK_SIZE
33 block_start = block_start
34 for block_start_offset in range(block_start, size, block_step):
35 offset = tl.arange(0, BLOCK_SIZE) + block_start_offset
36 arange_val = offset * step + start
37 tl.store(y_ptr + offset, arange_val, mask=offset < size)
40def arange_start(
41 start, end, step=1, *, dtype=None, layout=None, device=None, pin_memory=None
42):
43 logger.debug("GEMS_CAMBRICON ARANGE")
44 if dtype is torch.int64:
45 sgn = (step > 0) - (step < 0)
46 size = (end - start + step - sgn) // step
47 else:
48 size = math.ceil((end - start) / step)
50 assert (
51 size < torch.iinfo(torch.int32).max
52 ), f"Size {size} is not less than the maximum int32 value max_int32"
54 grid = lambda META: (min(triton.cdiv(size, META["BLOCK_SIZE"]), TOTAL_CORE_NUM),)
56 if dtype is None:
57 dtype = torch.int64
59 if pin_memory is None:
60 pin_memory = False
62 if device is None:
63 device = (
64 runtime.device.name
65 ) # Note(Zhengzekang): Torch default value is CPU, but triton is target to GPU.
67 result = torch.empty((size,), device=device, dtype=dtype, pin_memory=pin_memory)
68 arange_func[grid](result, start, end, step, size)
69 return result
72def arange(end, *, dtype=None, layout=None, device=None, pin_memory=None):
73 return arange_start(
74 0, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
75 )