Coverage for src/flag_gems/runtime/backend/_ascend/ops/hstack.py: 0%
43 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-07 22:33 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-07 22:33 +0800
1import itertools
2import logging
3from typing import List, Tuple, Union
5import torch
6import triton
8from flag_gems.utils import pointwise_dynamic
9from flag_gems.utils.tensor_wrapper import StridedBuffer
11logger = logging.getLogger(f'flag_gems.runtime._ascend.ops.{__name__.split(".")[-1]}')
14@pointwise_dynamic(is_tensor=[True], promotion_methods=[(0, "DEFAULT")])
15@triton.jit
16def copy_func(x):
17 return x
20def hstack(
21 tensors: Union[Tuple[torch.Tensor, ...], List[torch.Tensor]]
22) -> torch.Tensor:
23 logger.debug("GEMS_ASCEND HSTACK")
25 if len(tensors) == 0:
26 raise RuntimeError("hstack expected a non-empty TensorList")
28 if tensors[0].ndim == 0:
29 tensors[0] = tensors[0].view(1)
30 inp0_shape = tensors[0].shape
31 out_shape = list(inp0_shape)
32 inp_shapes = [inp0_shape]
34 if len(inp0_shape) == 1:
35 dim = 0
36 else:
37 dim = 1
39 for tensor_num, tensor in enumerate(tensors[1:]):
40 if tensor.ndim == 0:
41 tensor = tensor.view(1)
42 if tensor.ndim != tensors[0].ndim:
43 raise RuntimeError(
44 f"Tensors must have same number of dimensions: got {tensors[0].ndim} and {tensor.ndim}"
45 )
47 inp_shape = tensor.shape
48 inp_shapes.append(inp_shape)
50 for i in range(len(inp_shape)):
51 if i != dim and inp_shape[i] != inp0_shape[i]:
52 raise RuntimeError(
53 f"Sizes of tensors must match except in dimension {dim}. \
54 Expected size {inp0_shape[i]} but got size {inp_shape[i]} \
55 for tensor number {tensor_num + 1} in the list."
56 )
58 out_shape[dim] = sum(s[dim] for s in inp_shapes)
60 out0 = torch.empty(out_shape, dtype=tensors[0].dtype, device=tensors[0].device)
61 out0_strides = out0.stride()
62 out0_offsets = list(
63 itertools.accumulate(
64 [s[dim] * out0_strides[dim] for s in inp_shapes[:-1]], initial=0
65 )
66 )
68 for a, out0_offset in zip(tensors, out0_offsets):
69 in_view = StridedBuffer(a, a.shape, a.stride())
70 out_view = StridedBuffer(out0, a.shape, out0.stride(), offset=out0_offset)
71 copy_func.instantiate(a.ndim)(in_view, out0=out_view)
73 return out0