Coverage for src/flag_gems/runtime/backend/_cambricon/ops/hstack.py: 0%

50 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-27 02:51 +0800

1import itertools 

2import logging 

3from typing import List, Tuple, Union 

4 

5import torch 

6import triton 

7 

8from flag_gems.utils.tensor_wrapper import StridedBuffer 

9 

10from ..utils.pointwise_dynamic import pointwise_dynamic 

11 

12logger = logging.getLogger("flag_gems").getChild(__name__.lstrip(".")) 

13 

14 

15@pointwise_dynamic(is_tensor=[True], promotion_methods=[(0, "DEFAULT")]) 

16@triton.jit 

17def copy_func(x): 

18 return x 

19 

20 

21def hstack( 

22 tensors: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]] 

23) -> torch.Tensor: 

24 logger.debug("GEMS_CAMBRICON HSTACK") 

25 

26 if len(tensors) == 0: 

27 raise RuntimeError("hstack expected a non-empty TensorList") 

28 

29 if tensors[0].ndim == 0: 

30 tensors[0] = tensors[0].view(1) 

31 inp0_shape = tensors[0].shape 

32 out_shape = list(inp0_shape) 

33 inp_shapes = [inp0_shape] 

34 

35 dtypes = [t.dtype for t in tensors] 

36 dtype = dtypes[0] 

37 

38 for ty in dtypes[1:]: 

39 dtype = torch.promote_types(dtype, ty) 

40 

41 for i, tensor in enumerate(tensors): 

42 if tensor.dtype != dtype: 

43 tensors[i] = tensor.to(dtype) 

44 

45 if len(inp0_shape) == 1: 

46 dim = 0 

47 else: 

48 dim = 1 

49 

50 for tensor_num, tensor in enumerate(tensors[1:]): 

51 if tensor.ndim == 0: 

52 tensor = tensor.view(1) 

53 if tensor.ndim != tensors[0].ndim: 

54 raise RuntimeError( 

55 f"Tensors must have same number of dimensions: got {tensors[0].ndim} and {tensor.ndim}" 

56 ) 

57 

58 inp_shape = tensor.shape 

59 inp_shapes.append(inp_shape) 

60 

61 for i in range(len(inp_shape)): 

62 if i != dim and inp_shape[i] != inp0_shape[i]: 

63 raise RuntimeError( 

64 f"Sizes of tensors must match except in dimension {dim}. \ 

65 Expected size {inp0_shape[i]} but got size {inp_shape[i]} \ 

66 for tensor number {tensor_num + 1} in the list." 

67 ) 

68 

69 out_shape[dim] = sum(s[dim] for s in inp_shapes) 

70 

71 out0 = torch.empty(out_shape, dtype=tensors[0].dtype, device=tensors[0].device) 

72 out0_strides = out0.stride() 

73 out0_offsets = list( 

74 itertools.accumulate( 

75 [s[dim] * out0_strides[dim] for s in inp_shapes[:-1]], initial=0 

76 ) 

77 ) 

78 

79 for a, out0_offset in zip(tensors, out0_offsets): 

80 in_view = StridedBuffer(a, a.shape, a.stride()) 

81 out_view = StridedBuffer(out0, a.shape, out0.stride(), offset=out0_offset) 

82 copy_func.instantiate(a.ndim)(in_view, out0=out_view) 

83 

84 return out0