Coverage for src/flag_gems/runtime/backend/_kunlunxin/ops/add.py: 0%
33 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-11 02:28 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-11 02:28 +0800
1import logging
3import torch
4import triton
6from ..utils.pointwise_dynamic import pointwise_dynamic
8logger = logging.getLogger("flag_gems").getChild(__name__.lstrip("."))
11@pointwise_dynamic(is_tensor=[True, True, False], promotion_methods=[(0, 1, "DEFAULT")])
12@triton.jit
13def add_func(x, y, alpha):
14 return x + y * alpha
17@pointwise_dynamic(
18 is_tensor=[True, False, False], promotion_methods=[(0, 1, "DEFAULT")]
19)
20@triton.jit
21def add_func_tensor_scalar(x, y, alpha):
22 return x + y * alpha
25@pointwise_dynamic(
26 is_tensor=[False, True, False], promotion_methods=[(0, 1, "DEFAULT")]
27)
28@triton.jit
29def add_func_scalar_tensor(x, y, alpha):
30 return x + y * alpha
33def add(A, B, *, alpha=1):
34 # print("\n.......test for mutibackend specific add........\n")
35 logger.debug("GEMS ADD")
36 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor):
37 return add_func(A, B, alpha)
38 elif isinstance(A, torch.Tensor):
39 return add_func_tensor_scalar(A, B, alpha)
40 elif isinstance(B, torch.Tensor):
41 return add_func_scalar_tensor(A, B, alpha)
42 else:
43 return torch.tensor(A + B * alpha)
46def add_(A, B, *, alpha=1.0):
47 logger.debug("GEMS ADD_")
48 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor):
49 return add_func(A, B, alpha, out0=A)
50 elif isinstance(A, torch.Tensor):
51 return add_func_tensor_scalar(A, B, alpha, out0=A)
52 # elif isinstance(B, torch.Tensor):
53 # return add_func_scalar_tensor(A, B, alpha, out0=A)
54 else:
55 raise ValueError("Unreachable.")