Coverage for src/flag_gems/runtime/backend/_cambricon/ops/sub.py: 0%
31 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-26 15:32 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-26 15:32 +0800
1import logging
3import torch
4import triton
6from ..utils.pointwise_dynamic import pointwise_dynamic
8logger = logging.getLogger("flag_gems").getChild(__name__.lstrip("."))
11@pointwise_dynamic(
12 is_tensor=[True, True, False, False], promotion_methods=[(0, 1, "DEFAULT")]
13)
14@triton.jit
15def sub_func(x, y, alpha, inplace):
16 return x - y * alpha
19@pointwise_dynamic(
20 is_tensor=[True, False, False, False], promotion_methods=[(0, 1, "DEFAULT")]
21)
22@triton.jit
23def sub_func_tensor_scalar(x, y, alpha, inplace):
24 return x - y * alpha
27@pointwise_dynamic(
28 is_tensor=[False, True, False, False], promotion_methods=[(0, 1, "DEFAULT")]
29)
30@triton.jit
31def sub_func_scalar_tensor(x, y, alpha, inplace):
32 return x - y * alpha
35def sub(A, B, *, alpha=1):
36 logger.debug("GEMS_CAMBRICON SUB")
37 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor):
38 return sub_func(A, B, alpha, False)
39 elif isinstance(A, torch.Tensor):
40 return sub_func_tensor_scalar(A, B, alpha, False)
41 elif isinstance(B, torch.Tensor):
42 return sub_func_scalar_tensor(A, B, alpha, False)
43 else:
44 # Both scalar
45 return torch.tensor(A - B * alpha)
48def sub_(A, B, *, alpha=1):
49 logger.debug("GEMS_CAMBRICON SUB_")
50 if isinstance(B, torch.Tensor):
51 return sub_func(A, B, alpha, True, out0=A)
52 else:
53 return sub_func_tensor_scalar(A, B, alpha, True, out0=A)