Coverage for src/flag_gems/runtime/backend/_ascend/ops/stack.py: 0%

36 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-17 02:35 +0800

1import itertools 

2import logging 

3from typing import List, Tuple, Union 

4 

5import torch 

6import triton 

7 

8from flag_gems.utils import pointwise_dynamic 

9from flag_gems.utils.tensor_wrapper import StridedBuffer 

10 

11logger = logging.getLogger(f'flag_gems.runtime._ascend.ops.{__name__.split(".")[-1]}') 

12 

13 

14@pointwise_dynamic(is_tensor=[True], promotion_methods=[(0, "DEFAULT")]) 

15@triton.jit 

16def copy_func(x): 

17 return x 

18 

19 

20def stack( 

21 tensors: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]], dim: int = 0 

22) -> torch.Tensor: 

23 logger.debug("GEMS_ASCEND STACK") 

24 

25 if len(tensors) == 0: 

26 raise RuntimeError("stack expected a non-empty TensorList") 

27 

28 inp_shapes = [list(_.shape) for _ in tensors] 

29 inp0_shape = inp_shapes[0] 

30 for i, s in enumerate(inp_shapes[1:]): 

31 if (dim < -tensors[i + 1].dim() - 1) or (dim > tensors[i + 1].dim()): 

32 raise IndexError( 

33 "Dimension out of range (expected to be in range of [{}, {}], but got {})".format( 

34 -tensors[i + 1].dim() - 1, tensors[i + 1].dim(), dim 

35 ) 

36 ) 

37 if s != inp0_shape: 

38 raise RuntimeError( 

39 f"stack expects each tensor to be equal size, but got {inp0_shape} at entry 0 and {s} at entry {i + 1}" 

40 ) 

41 

42 if dim < 0: 

43 dim = dim + len(inp0_shape) + 1 

44 

45 in0_shape = inp0_shape[:dim] + [1] + inp0_shape[dim:] 

46 out_shape = inp0_shape[:dim] + [len(tensors)] + inp0_shape[dim:] 

47 out0 = torch.empty(out_shape, dtype=tensors[0].dtype, device=tensors[0].device) 

48 out0_strides = out0.stride() 

49 out0_offsets = list( 

50 itertools.accumulate([out0_strides[dim] for _ in inp_shapes[:-1]], initial=0) 

51 ) 

52 

53 for a, out0_offset in zip(tensors, out0_offsets): 

54 a = a.reshape(in0_shape) 

55 in_view = StridedBuffer(a, in0_shape, a.stride()) 

56 out_view = StridedBuffer(out0, in0_shape, out0.stride(), offset=out0_offset) 

57 copy_func.instantiate(a.ndim)(in_view, out0=out_view) 

58 

59 return out0