Coverage for src/flag_gems/runtime/backend/_kunlunxin/ops/div.py: 0%

179 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-15 02:11 +0800

1import logging 

2 

3import torch 

4import triton 

5import triton.language as tl 

6from _kunlunxin.utils.codegen_config_utils import CodeGenConfig 

7 

8from flag_gems.utils import tl_extra_shim 

9 

10from ..utils.pointwise_dynamic import pointwise_dynamic 

11 

12logger = logging.getLogger("flag_gems").getChild(__name__.lstrip(".")) 

13div_rn = tl_extra_shim.div_rn 

14div_rz = tl_extra_shim.div_rz 

15fmod = tl_extra_shim.fmod 

16trunc = tl_extra_shim.trunc 

17xpu_trunc_div = tl_extra_shim.xpu_trunc_div # use it if we need to cmp result with xpu 

18 

19config_ = CodeGenConfig( 

20 512, 

21 (65536, 65536, 65536), 

22 32, 

23 True, 

24 prefer_1d_tile=True, 

25 buffer_size_limit=4096, 

26 isCloseVectorization=True, 

27 unroll_num=8, 

28) 

29 

30 

31@pointwise_dynamic(promotion_methods=[(0, 1, "INT_TO_FLOAT")], config=config_) 

32@triton.jit 

33def true_div_func(x, y): 

34 return x / y 

35 

36 

37@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "INT_TO_FLOAT")]) 

38@triton.jit 

39def true_div_func_tensor_scalar(x, y): 

40 return x / y 

41 

42 

43@pointwise_dynamic(is_tensor=[False, True], promotion_methods=[(0, 1, "INT_TO_FLOAT")]) 

44@triton.jit 

45def true_div_func_scalar_tensor(x, y): 

46 return x / y 

47 

48 

49def true_divide(A, B): 

50 logger.debug("GEMS TRUE_DIVIDE") 

51 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor): 

52 return true_div_func(A, B) 

53 elif isinstance(A, torch.Tensor): 

54 return true_div_func_tensor_scalar(A, B) 

55 elif isinstance(B, torch.Tensor): 

56 return true_div_func_scalar_tensor(A, B) 

57 else: 

58 # Both scalar 

59 return torch.tensor(A / B) 

60 

61 

62def true_divide_out(A, B, out): 

63 logger.debug("GEMS TRUE_DIVIDE OUT") 

64 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor): 

65 return true_div_func(A, B, out0=out) 

66 elif isinstance(A, torch.Tensor): 

67 return true_div_func_tensor_scalar(A, B, out0=out) 

68 elif isinstance(B, torch.Tensor): 

69 return true_div_func_scalar_tensor(A, B, out0=out) 

70 else: 

71 # Both scalar 

72 return torch.tensor(A / B) if out is None else out.fill_(A / B) 

73 

74 

75def true_divide_(A, B): 

76 logger.debug("GEMS TRUE_DIVIDE_") 

77 if isinstance(B, torch.Tensor): 

78 return true_div_func(A, B, out0=A) 

79 else: 

80 return true_div_func_tensor_scalar(A, B, out0=A) 

81 

82 

83@pointwise_dynamic(promotion_methods=[(0, 1, "DEFAULT")]) 

84@triton.jit 

85def trunc_div_func(x, y): 

86 return xpu_trunc_div(x, y) 

87 

88 

89@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "DEFAULT")]) 

90@triton.jit 

91def trunc_div_func_tensor_scalar(x, y): 

92 return xpu_trunc_div(x, y) 

93 

94 

95@pointwise_dynamic(is_tensor=[False, True], promotion_methods=[(0, 1, "DEFAULT")]) 

96@triton.jit 

97def trunc_div_func_scalar_tensor(x, y): 

98 return xpu_trunc_div(x, y) 

99 

100 

101def trunc_divide(A, B): 

102 logger.debug("GEMS TRUNC_DIVIDE") 

103 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor): 

104 return trunc_div_func(A, B) 

105 elif isinstance(A, torch.Tensor): 

106 return trunc_div_func_tensor_scalar(A, B) 

107 elif isinstance(B, torch.Tensor): 

108 return trunc_div_func_scalar_tensor(A, B) 

109 else: 

110 # Both scalar 

111 return torch.tensor(A / B) 

112 

113 

114def trunc_divide_(A, B): 

115 logger.debug("GEMS TRUNC_DIVIDE_") 

116 if isinstance(B, torch.Tensor): 

117 return trunc_div_func(A, B, out0=A) 

118 else: 

119 return trunc_div_func_tensor_scalar(A, B, out0=A) 

120 

121 

122@triton.jit 

123def _int_floordiv(x, y): 

124 # TODO: request Triton to add an integer remainder builtin 

125 # The semantic of Triton floordiv differs from Pytorch/Numpy 

126 # Triton floordiv equates to 

127 # (x - np.fmod(x, y)) / y 

128 # whereas Pytorch floordiv is 

129 # (x - np.remainder(x, y)) y 

130 # The results show a one off difference when 

131 # C1) x and y have opposite signs 

132 # and C2) x is not multiples of y. 

133 # Apart from the above, there's an erroneous case x // 0 returns -1 

134 # whereas in Pytorch x // 0 returns -1 if x >=0 and -2 if x < 0 

135 # but this special case is coalesced into the c1 and c2 check so 

136 # there's extra handling. 

137 r = x % y 

138 c1 = r != 0 

139 c2 = (x < 0) ^ (y < 0) 

140 return tl.where(c1 & c2, x // y - 1, x // y) 

141 

142 

143# TO be consistent with python, numpy and torch, we have to implement it in the 

144# following way. 

145# CPython 

146# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 

147# numpy 

148# https://github.com/numpy/numpy/blob/a4ad142aa1282a77bbb05acd706cb57c9cc29846/numpy/_core/src/npymath/npy_math_internal.h.src#L532 

149# torch 

150# https://github.com/pytorch/pytorch/blob/d6d9183456cd07ca0b361a194b98c2fb196e7c36/c10/util/generic_math.h#L23 

151@triton.jit 

152def _float_floordiv(x, y): 

153 # NOTE: fmod's sign is the same as the dividend 

154 remainder = fmod(x, y) 

155 imperfect = remainder != 0.0 

156 different_sign = (x < 0) ^ (y < 0) 

157 

158 # NOTE: we have to use div_rn explicitly here 

159 q = div_rn(x - remainder, y) 

160 q = tl.where(imperfect & different_sign, q - 1, q) 

161 

162 floor_q = tl.math.floor(q) 

163 c = q - floor_q > 0.5 

164 floor_q = tl.where(c, floor_q + 1.0, floor_q) 

165 

166 q_is_zeros = q == 0.0 

167 floor_q = tl.where(q_is_zeros, tl.where(different_sign, -0.0, 0.0), floor_q) 

168 

169 is_div_by_zero = y == 0.0 

170 float_division = x / y 

171 out = tl.where(is_div_by_zero, float_division, floor_q) 

172 return out 

173 

174 

175@pointwise_dynamic(promotion_methods=[(0, 1, "DEFAULT")]) 

176@triton.jit 

177def floor_div_func(x, y): 

178 if x.type.scalar.is_int() & x.type.scalar.is_int(): 

179 return _int_floordiv(x, y) 

180 else: 

181 return _float_floordiv(x, y) 

182 

183 

184@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "DEFAULT")]) 

185@triton.jit 

186def floor_div_func_tensor_scalar(x, y): 

187 if x.type.scalar.is_int() & x.type.scalar.is_int(): 

188 return _int_floordiv(x, y) 

189 else: 

190 return _float_floordiv(x, y) 

191 

192 

193@pointwise_dynamic(is_tensor=[False, True], promotion_methods=[(0, 1, "DEFAULT")]) 

194@triton.jit 

195def floor_div_func_scalar_tensor(x, y): 

196 if x.type.scalar.is_int() & x.type.scalar.is_int(): 

197 return _int_floordiv(x, y) 

198 else: 

199 return _float_floordiv(x, y) 

200 

201 

202def floor_divide(A, B): 

203 logger.debug("GEMS FLOOR_DIVIDE") 

204 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor): 

205 return floor_div_func(A, B) 

206 elif isinstance(A, torch.Tensor): 

207 return floor_div_func_tensor_scalar(A, B) 

208 elif isinstance(B, torch.Tensor): 

209 return floor_div_func_scalar_tensor(A, B) 

210 else: 

211 # Both scalar 

212 return torch.tensor(A // B) 

213 

214 

215def floor_divide_(A, B): 

216 logger.debug("GEMS FLOOR_DIVIDE_") 

217 if isinstance(B, torch.Tensor): 

218 return floor_div_func(A, B, out0=A) 

219 else: 

220 return floor_div_func_tensor_scalar(A, B, out0=A) 

221 

222 

223def div_mode(A, B, rounding_mode=None): 

224 if rounding_mode is None: 

225 return true_divide(A, B) 

226 elif rounding_mode == "trunc": 

227 return trunc_divide(A, B) 

228 elif rounding_mode == "floor": 

229 return floor_divide(A, B) 

230 else: 

231 msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." 

232 raise ValueError(msg) 

233 

234 

235def div_mode_(A, B, rounding_mode=None): 

236 if rounding_mode is None: 

237 return true_divide_(A, B) 

238 elif rounding_mode == "trunc": 

239 return trunc_divide_(A, B) 

240 elif rounding_mode == "floor": 

241 return floor_divide_(A, B) 

242 else: 

243 msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." 

244 raise ValueError(msg) 

245 

246 

247@triton.jit 

248def _remainder(x, y): 

249 r = x % y 

250 c1 = r != 0 

251 c2 = (x < 0) ^ (y < 0) 

252 return tl.where(c1 & c2, r + y, r) 

253 

254 

255@pointwise_dynamic(promotion_methods=[(0, 1, "DEFAULT")]) 

256@triton.jit 

257def rem_tt(x, y): 

258 return _remainder(x, y) 

259 

260 

261@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "DEFAULT")]) 

262@triton.jit 

263def rem_ts(x, y): 

264 return _remainder(x, y) 

265 

266 

267@pointwise_dynamic(is_tensor=[False, True], promotion_methods=[(0, 1, "DEFAULT")]) 

268@triton.jit 

269def rem_st(x, y): 

270 return _remainder(x, y) 

271 

272 

273def remainder(A, B): 

274 logger.debug("GEMS FLOOR_DIVIDE") 

275 if isinstance(A, torch.Tensor) and isinstance(B, torch.Tensor): 

276 return rem_tt(A, B) 

277 elif isinstance(A, torch.Tensor): 

278 return rem_ts(A, B) 

279 elif isinstance(B, torch.Tensor): 

280 return rem_st(A, B) 

281 else: 

282 # Both scalar 

283 return torch.tensor(A % B) 

284 

285 

286def remainder_(A, B): 

287 logger.debug("GEMS REMAINDER_") 

288 if isinstance(B, torch.Tensor): 

289 return rem_tt(A, B, out0=A) 

290 else: 

291 return rem_ts(A, B, out0=A)