Coverage for src/flag_gems/runtime/backend/_nvidia/heuristics_config_utils.py: 83%

235 statements  

« prev     ^ index     » next       coverage.py v7.6.9, created at 2026-03-10 02:30 +0800

1import torch 

2import triton 

3 

4_MIN_TILE_N = 64 

5_MAX_TILE_N_PER_ROW = 4096 

6_MAX_ONE_TILE_N = 2048 

7 

8 

9def simple_elementwise_blocksize_heur(args): 

10 return 1024 

11 

12 

13def argmax_heur_tile_k(args): 

14 MAX_TILE_K = 512 

15 NUM_SMS = torch.cuda.get_device_properties( 

16 torch.cuda.current_device() 

17 ).multi_processor_count 

18 

19 K = args["K"] 

20 M = args["M"] 

21 dtype = "fp32" if args["inp"].dtype == torch.float32 else "fp16" 

22 

23 if M == 64 and K == 512: 

24 return 64 if dtype == "fp32" else 128 

25 

26 if K <= 128: 

27 return 1 << (K.bit_length() - 1) if K > 0 else 1 

28 

29 tile_k = 64 

30 upper_bound = min(K, MAX_TILE_K) 

31 

32 while tile_k <= upper_bound: 

33 num_blocks = M * triton.cdiv(K, tile_k) 

34 num_waves = num_blocks / NUM_SMS 

35 

36 if num_waves > 1 and (tile_k * 2 <= upper_bound): 

37 tile_k *= 2 

38 else: 

39 break 

40 

41 return tile_k 

42 

43 

44def argmax_heur_tile_n_non_inner(args): 

45 n = args["N"] 

46 tile_k = args["TILE_K"] 

47 

48 if n <= 128: 

49 return n 

50 

51 target_tile = min(8192, n) 

52 tile_n = triton.next_power_of_2(target_tile) 

53 tile_n = max(64, min(tile_n, 4096)) 

54 

55 if tile_n * tile_k > 32768: 

56 tile_n = max(64, 32768 // tile_k) 

57 

58 return tile_n 

59 

60 

61def argmax_heur_one_tile_per_cta(args): 

62 return args["TILE_N"] >= args["N"] 

63 

64 

65def argmax_heur_num_warps_non_inner(args): 

66 tile_n = args["TILE_N"] 

67 dtype = "fp32" if args["inp"].dtype == torch.float32 else "fp16" 

68 

69 if tile_n <= 32: 

70 num_warps = 2 

71 elif tile_n <= 64: 

72 num_warps = 4 

73 elif tile_n <= 128: 

74 num_warps = 4 

75 else: 

76 num_warps = 8 

77 

78 if dtype == "fp32": 

79 num_warps = min(num_warps, 4) 

80 

81 return num_warps 

82 

83 

84def argmax_heur_tile_n_inner(args): 

85 if args["N"] <= (32 * 1024): 

86 return triton.next_power_of_2(args["N"]) 

87 else: 

88 return 4096 

89 

90 

91def argmax_heur_num_warps_inner(args): 

92 tile_size = args["TILE_N"] 

93 if tile_size < 2048: 

94 return 4 

95 elif tile_size < 4096: 

96 return 8 

97 else: 

98 return 16 

99 

100 

101def argmin_heur_block_m(args): 

102 return 4 if args["M"] < 4096 else 8 

103 

104 

105def argmin_heur_block_n(args): 

106 return min(4096, triton.next_power_of_2(args["N"])) 

107 

108 

109def bmm_heur_divisible_m(args): 

110 return args["M"] % args["TILE_M"] == 0 

111 

112 

113def bmm_heur_divisible_n(args): 

114 return args["N"] % args["TILE_N"] == 0 

115 

116 

117def bmm_heur_divisible_k(args): 

118 return args["K"] % args["TILE_K"] == 0 

119 

120 

121def baddbmm_heur_divisible_m(args): 

122 return args["M"] % args["TILE_M"] == 0 

123 

124 

125def baddbmm_heur_divisible_n(args): 

126 return args["N"] % args["TILE_N"] == 0 

127 

128 

129def baddbmm_heur_divisible_k(args): 

130 return args["K"] % args["TILE_K"] == 0 

131 

132 

133def dropout_heur_block(args): 

134 if args["N"] <= 512: 

135 return 512 

136 else: 

137 return 1024 

138 

139 

140def dropout_heur_num_warps(args): 

141 if args["N"] <= 512: 

142 return 4 

143 elif args["N"] <= 1024: 

144 return 8 

145 else: 

146 return 16 

147 

148 

149def exponential_heur_block(args): 

150 if args["N"] <= 512: 

151 return 512 

152 else: 

153 return 1024 

154 

155 

156def exponential_heur_num_warps(args): 

157 if args["N"] <= 512: 

158 return 4 

159 elif args["N"] <= 1024: 

160 return 8 

161 else: 

162 return 16 

163 

164 

165def gather_heur_block_m(args): 

166 return min(4, triton.next_power_of_2(triton.cdiv(args["N"], 2048))) 

167 

168 

169def gather_heur_block_n(args): 

170 return min(2048, triton.next_power_of_2(args["N"])) 

171 

172 

173def index_select_heur_block_m(args): 

174 return min(4, triton.next_power_of_2(triton.cdiv(256, args["N"]))) 

175 

176 

177def index_select_heur_block_n(args): 

178 m = min(triton.next_power_of_2(triton.cdiv(args["N"], 16)), 512) 

179 return max(m, 16) 

180 

181 

182def mm_heur_even_k(args): 

183 return args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0 

184 

185 

186def rand_heur_block(args): 

187 if args["N"] <= 512: 

188 return 512 

189 else: 

190 return 1024 

191 

192 

193def rand_heur_num_warps(args): 

194 if args["N"] <= 512: 

195 return 4 

196 elif args["N"] <= 1024: 

197 return 8 

198 else: 

199 return 16 

200 

201 

202def randn_heur_block(args): 

203 if args["N"] <= 512: 

204 return 512 

205 else: 

206 return 1024 

207 

208 

209def randn_heur_num_warps(args): 

210 if args["N"] <= 512: 

211 return 4 

212 elif args["N"] <= 1024: 

213 return 8 

214 else: 

215 return 16 

216 

217 

218def softmax_heur_tile_k(args): 

219 MAX_TILE_K = 8192 

220 NUM_SMS = torch.cuda.get_device_properties( 

221 torch.cuda.current_device() 

222 ).multi_processor_count 

223 tile_k = 1 

224 upper_bound = min(args["K"], MAX_TILE_K) 

225 while tile_k <= upper_bound: 

226 num_blocks = args["M"] * triton.cdiv(args["K"], tile_k) 

227 num_waves = num_blocks / NUM_SMS 

228 if (num_waves > 1) and (tile_k * 2 <= upper_bound): 

229 tile_k *= 2 

230 else: 

231 break 

232 return tile_k 

233 

234 

235def softmax_heur_tile_n_non_inner(args): 

236 return triton.cdiv(8192, args["TILE_K"]) 

237 

238 

239def softmax_heur_one_tile_per_cta(args): 

240 return args["TILE_N"] >= args["N"] 

241 

242 

243def softmax_heur_num_warps_non_inner(args): 

244 tile_size = args["TILE_N"] * args["TILE_K"] 

245 if tile_size < 2048: 

246 return 4 

247 elif tile_size < 4096: 

248 return 8 

249 else: 

250 return 16 

251 

252 

253def softmax_heur_tile_n_inner(args): 

254 if args["N"] <= (32 * 1024): 

255 return triton.next_power_of_2(args["N"]) 

256 else: 

257 return 4096 

258 

259 

260def softmax_heur_num_warps_inner(args): 

261 tile_size = args["TILE_N"] 

262 if tile_size < 2048: 

263 return 4 

264 elif tile_size < 4096: 

265 return 8 

266 else: 

267 return 16 

268 

269 

270def softmax_heur_tile_n_bwd_non_inner(args): 

271 return max(1, 1024 // args["TILE_K"]) 

272 

273 

274def softmax_heur_tile_m(args): 

275 return max(1, 1024 // args["TILE_N"]) 

276 

277 

278def uniform_heur_block(args): 

279 if args["N"] <= 512: 

280 return 512 

281 else: 

282 return 1024 

283 

284 

285def uniform_heur_num_warps(args): 

286 if args["N"] <= 512: 

287 return 4 

288 elif args["N"] <= 1024: 

289 return 8 

290 else: 

291 return 16 

292 

293 

294def var_mean_heur_block_n(args): 

295 return triton.next_power_of_2(args["BLOCK_NUM"]) 

296 

297 

298def upsample_nearest1d_SAME_L(args): 

299 return args["OL"] == args["IL"] 

300 

301 

302def upsample_nearest1d_USE_INT32_IDX(args): 

303 return args["N"] * args["C"] * args["OL"] <= (2**31 - 1) # INT32 MAX 

304 

305 

306def upsample_nearest2d_SAME_H(args): 

307 return args["OH"] == args["IH"] 

308 

309 

310def upsample_nearest2d_SAME_W(args): 

311 return args["OW"] == args["IW"] 

312 

313 

314def upsample_nearest2d_USE_INT32_IDX(args): 

315 return args["N"] * args["C"] * args["OH"] * args["OW"] <= (2**31 - 1) # INT32 MAX 

316 

317 

318def batch_norm_heur_block_m(args): 

319 return min(2048, triton.next_power_of_2(args["batch_dim"])) 

320 

321 

322def batch_norm_heur_block_n(args): 

323 # A maximum of 16384 elements are loaded at once. 

324 BLOCK_M = batch_norm_heur_block_m(args) 

325 BLOCK_N = triton.next_power_of_2(args["spatial_dim"]) 

326 return min(BLOCK_N, max(1, 2**14 // BLOCK_M)) 

327 

328 

329def vdot_heur_block_size(args): 

330 n = args["n_elements"] 

331 if n < 1024: 

332 return 32 

333 elif n < 8192: 

334 return 256 

335 else: 

336 return 1024 

337 

338 

339def mean_heur_tile_k(args): 

340 MAX_TILE_K = 512 

341 MAX_GRID_Y = 65535 

342 NUM_SMS = torch.cuda.get_device_properties( 

343 torch.cuda.current_device() 

344 ).multi_processor_count 

345 tile_k = 1 

346 upper_bound = min(args["K"], MAX_TILE_K) 

347 max_tile_k_allowed_by_tile_n = max(1, _MAX_TILE_N_PER_ROW // _MIN_TILE_N) 

348 upper_bound = min(upper_bound, max_tile_k_allowed_by_tile_n) 

349 while tile_k <= upper_bound: 

350 num_blocks = args["M"] * triton.cdiv(args["K"], tile_k) 

351 num_waves = num_blocks / NUM_SMS 

352 if (num_waves > 1) and (tile_k * 2 <= upper_bound): 

353 tile_k *= 2 

354 else: 

355 break 

356 # Ensure grid Y dimension does not exceed CUDA limit 

357 min_tile_k = triton.cdiv(args["K"], MAX_GRID_Y) 

358 if min_tile_k > tile_k: 

359 tile_k = triton.next_power_of_2(min_tile_k) 

360 return tile_k 

361 

362 

363def mean_heur_tile_n_non_inner(args): 

364 tile_k = args.get("TILE_K", 1) 

365 limit_by_k = max(1, _MAX_TILE_N_PER_ROW // tile_k) 

366 N = args.get("N", 1) 

367 desired = min(max(N, _MIN_TILE_N), limit_by_k) 

368 desired = min(desired, _MAX_ONE_TILE_N, limit_by_k) 

369 tile_n = triton.next_power_of_2(desired) 

370 if tile_n > limit_by_k: 

371 tile_n = limit_by_k 

372 tile_n = max(tile_n, _MIN_TILE_N) 

373 return tile_n 

374 

375 

376def mean_heur_one_tile_per_cta(args): 

377 return args["TILE_N"] >= args["N"] 

378 

379 

380HEURISTICS_CONFIGS = { 

381 "argmax_non_inner": { 

382 "TILE_K": argmax_heur_tile_k, 

383 "TILE_N": argmax_heur_tile_n_non_inner, 

384 "ONE_TILE_PER_CTA": argmax_heur_one_tile_per_cta, 

385 "num_warps": argmax_heur_num_warps_non_inner, 

386 }, 

387 "argmax_inner": { 

388 "TILE_N": argmax_heur_tile_n_inner, 

389 "ONE_TILE_PER_CTA": argmax_heur_one_tile_per_cta, 

390 "num_warps": argmax_heur_num_warps_inner, 

391 }, 

392 "argmin": { 

393 "BLOCK_M": argmin_heur_block_m, 

394 "BLOCK_N": argmin_heur_block_n, 

395 }, 

396 "bmm": { 

397 "DIVISIBLE_M": bmm_heur_divisible_m, 

398 "DIVISIBLE_N": bmm_heur_divisible_n, 

399 "DIVISIBLE_K": bmm_heur_divisible_k, 

400 }, 

401 "baddbmm": { 

402 "DIVISIBLE_M": baddbmm_heur_divisible_m, 

403 "DIVISIBLE_N": baddbmm_heur_divisible_n, 

404 "DIVISIBLE_K": baddbmm_heur_divisible_k, 

405 }, 

406 "dropout": { 

407 "BLOCK": dropout_heur_block, 

408 "num_warps": dropout_heur_num_warps, 

409 }, 

410 "exponential_": { 

411 "BLOCK": exponential_heur_block, 

412 "num_warps": exponential_heur_num_warps, 

413 }, 

414 "gather": { 

415 "BLOCK_M": gather_heur_block_m, 

416 "BLOCK_N": gather_heur_block_n, 

417 }, 

418 "index_select": { 

419 "BLOCK_M": index_select_heur_block_m, 

420 "BLOCK_N": index_select_heur_block_n, 

421 }, 

422 "mm": { 

423 "EVEN_K": mm_heur_even_k, 

424 }, 

425 "rand": { 

426 "BLOCK": rand_heur_block, 

427 "num_warps": rand_heur_num_warps, 

428 }, 

429 "randn": { 

430 "BLOCK": randn_heur_block, 

431 "num_warps": randn_heur_num_warps, 

432 }, 

433 "softmax_non_inner": { 

434 "TILE_K": softmax_heur_tile_k, 

435 "TILE_N": softmax_heur_tile_n_non_inner, 

436 "ONE_TILE_PER_CTA": softmax_heur_one_tile_per_cta, 

437 "num_warps": softmax_heur_num_warps_non_inner, 

438 }, 

439 "mean_non_inner": { 

440 "TILE_K": mean_heur_tile_k, 

441 "TILE_N": mean_heur_tile_n_non_inner, 

442 "ONE_TILE_PER_CTA": mean_heur_one_tile_per_cta, 

443 "num_warps": softmax_heur_num_warps_non_inner, 

444 }, 

445 "softmax_inner": { 

446 "TILE_N": softmax_heur_tile_n_inner, 

447 "ONE_TILE_PER_CTA": softmax_heur_one_tile_per_cta, 

448 "num_warps": softmax_heur_num_warps_inner, 

449 }, 

450 "softmax_backward_non_inner": { 

451 "TILE_N": softmax_heur_tile_n_bwd_non_inner, 

452 "ONE_TILE_PER_CTA": softmax_heur_one_tile_per_cta, 

453 }, 

454 "softmax_backward_inner": { 

455 "TILE_M": softmax_heur_tile_m, 

456 "ONE_TILE_PER_CTA": softmax_heur_one_tile_per_cta, 

457 }, 

458 "uniform": { 

459 "BLOCK": uniform_heur_block, 

460 "num_warps": uniform_heur_num_warps, 

461 }, 

462 "upsample_nearest1d": { 

463 "SAME_L": upsample_nearest1d_SAME_L, 

464 "USE_INT32_IDX": upsample_nearest1d_USE_INT32_IDX, 

465 }, 

466 "upsample_nearest2d": { 

467 "SAME_H": upsample_nearest2d_SAME_H, 

468 "SAME_W": upsample_nearest2d_SAME_W, 

469 "USE_INT32_IDX": upsample_nearest2d_USE_INT32_IDX, 

470 }, 

471 "var_mean": { 

472 "BLOCK_N": var_mean_heur_block_n, 

473 }, 

474 "batch_norm": { 

475 "BLOCK_M": batch_norm_heur_block_m, 

476 "BLOCK_N": batch_norm_heur_block_n, 

477 }, 

478 "vdot": { 

479 "BLOCK_SIZE": vdot_heur_block_size, 

480 }, 

481 "mha_block_128": { 

482 "BLOCK_M": lambda args: 128, 

483 "BLOCK_N": lambda args: 32, 

484 "num_warps": lambda args: 4, 

485 "num_stages": lambda args: 3, 

486 }, 

487 "mha_block_64": { 

488 "BLOCK_M": lambda args: 64, 

489 "BLOCK_N": lambda args: 64, 

490 "num_warps": lambda args: 4, 

491 "num_stages": lambda args: 3, 

492 }, 

493 "mha_block_32": { 

494 "BLOCK_M": lambda args: 32, 

495 "BLOCK_N": lambda args: 64, 

496 "num_warps": lambda args: 4, 

497 "num_stages": lambda args: 3, 

498 }, 

499 "mha_block_16": { 

500 "BLOCK_M": lambda args: 16, 

501 "BLOCK_N": lambda args: 64, 

502 "num_warps": lambda args: 4, 

503 "num_stages": lambda args: 3, 

504 }, 

505 "elementwise_generic": { 

506 "BLOCK_SIZE": simple_elementwise_blocksize_heur, 

507 "num_warps": lambda args: 8, 

508 }, 

509}