Coverage for src/flag_gems/runtime/backend/_hygon/ops/randperm.py: 0%
266 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-12 02:21 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-12 02:21 +0800
1import logging
3import torch
4import triton
5import triton.language as tl
7from flag_gems import runtime
8from flag_gems.ops.topk import argsort
9from flag_gems.runtime import device, torch_device_fn
10from flag_gems.utils import libentry
11from flag_gems.utils.random_utils import philox_backend_seed_offset
13logger = logging.getLogger(__name__)
14device_ = device
16_MIN_INT8_VAL = tl.constexpr(torch.iinfo(torch.int8).min)
17_MAX_INT8_VAL = tl.constexpr(torch.iinfo(torch.int8).max)
18_MIN_INT16_VAL = tl.constexpr(torch.iinfo(torch.int16).min)
19_MAX_INT16_VAL = tl.constexpr(torch.iinfo(torch.int16).max)
20_MIN_INT32_VAL = tl.constexpr(torch.iinfo(torch.int32).min)
21_MAX_INT32_VAL = tl.constexpr(torch.iinfo(torch.int32).max)
22_MIN_INT64_VAL = tl.constexpr(torch.iinfo(torch.int64).min)
23_MAX_INT64_VAL = tl.constexpr(torch.iinfo(torch.int64).max)
24_MAX_UINT32_VAL = tl.constexpr((1 << 32) - 1)
25_MIN_UINT32_VAL = tl.constexpr(0)
26_MIN_INT24_VAL = tl.constexpr(-(2**23))
27_MAX_INT24_VAL = tl.constexpr(2**23 - 1)
30@triton.jit
31def _get_iinfo_val(
32 dtype,
33 return_max,
34):
35 if dtype is tl.int64:
36 if return_max:
37 return _MAX_INT64_VAL
38 else:
39 return _MIN_INT64_VAL
40 elif dtype is tl.int32:
41 if return_max:
42 return _MAX_INT32_VAL
43 else:
44 return _MIN_INT32_VAL
45 elif dtype is tl.int16:
46 if return_max:
47 return _MAX_INT16_VAL
48 else:
49 return _MIN_INT16_VAL
50 elif dtype is tl.int8:
51 if return_max:
52 return _MAX_INT8_VAL
53 else:
54 return _MIN_INT8_VAL
55 elif dtype is tl.uint32:
56 if return_max:
57 return _MAX_UINT32_VAL
58 else:
59 return _MIN_UINT32_VAL
60 else:
61 raise ValueError("Unknown dtype")
64@libentry()
65@triton.jit
66def bitonic_sortbykey_kernel(
67 y_ptr,
68 index_ptr,
69 chunk_x,
70 chunk_index,
71 N: tl.constexpr,
72 BLOCK_SIZE: tl.constexpr,
73 DESCENDING: tl.constexpr,
74):
75 cur_batch = tl.program_id(0)
76 chunk_x += cur_batch * N
77 chunk_index += cur_batch * N
78 index_ptr += cur_batch * N
79 y_ptr += cur_batch * N
81 cols = tl.arange(0, BLOCK_SIZE)
82 mask = cols < N
84 mask_val = _get_iinfo_val(chunk_x.dtype.element_ty, return_max=not DESCENDING)
86 chunk_x_val = tl.load(chunk_x + cols, mask=mask, other=mask_val)
87 chunk_index_val = tl.load(chunk_index + cols, mask=mask)
89 sorted_chunk_x, sorted_chunk_index = argsort(
90 chunk_x_val, chunk_index_val, 0, descending=DESCENDING
91 )
92 tl.store(y_ptr + cols, sorted_chunk_x, mask=cols < N)
93 tl.store(index_ptr + cols, sorted_chunk_index, mask=cols < N)
96@triton.jit
97def radix_type_convert(k):
98 ik = k.to(tl.int64)
99 if tl.constexpr(k.dtype == tl.int8):
100 mask = (ik >> 7) & 0x1
101 o = tl.where(mask, ik & 0x7F, ik | 0x80)
102 elif tl.constexpr(k.dtype == tl.int16):
103 mask = (ik >> 15) & 0x1
104 o = tl.where(mask, ik & 0x7FFF, ik | 0x8000)
105 elif tl.constexpr(k.dtype == tl.int32):
106 mask = (ik >> 31) & 0x1
107 o = tl.where(mask, ik & 0x7FFFFFFF, ik | 0x80000000)
108 elif tl.constexpr(k.dtype == tl.int64):
109 mask = (ik >> 63) & 0x1
110 o = tl.where(mask, ik & 0x7FFFFFFFFFFFFFFF, ik | 0x8000000000000000)
111 else:
112 o = k
113 return o
116@libentry()
117@triton.jit
118def digit_hist_kernel(
119 digit_hist,
120 key,
121 n_elements,
122 bits_per_pass,
123 bins,
124 passes,
125 bit_mask,
126 bins_segment,
127 BLOCK_SIZE: tl.constexpr,
128):
129 bin_segid = tl.program_id(1)
130 pid0 = tl.program_id(0)
131 grid0 = tl.num_programs(0)
133 key_offset = pid0.to(tl.int64) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
134 key_mask = key_offset < n_elements
135 key_data = tl.load(key + key_offset, mask=key_mask)
136 ikey_data = radix_type_convert(key_data)
137 bit_offset = 0
138 for p in range(passes):
139 key_digit = (ikey_data >> bit_offset) & bit_mask
140 blk_bin_start = bin_segid * bins_segment
141 for s in range(bins_segment):
142 bin_id = s + blk_bin_start
143 digit_mask = tl.where(key_digit == bin_id and key_mask, 1, 0)
144 digit_sum = tl.sum(digit_mask)
145 # +1 for exclusive
146 bin_offset = p * (bins + 1) * grid0 + (bin_id + 1) * grid0 + pid0
147 # reduce rather than global atomic for perf issue
148 tl.store(digit_hist + bin_offset, digit_sum)
149 tl.store(digit_hist + p * (bins + 1) * grid0 + pid0, 0, mask=bin_segid == 0)
150 bit_offset += bits_per_pass
153@libentry()
154@triton.autotune(
155 configs=runtime.get_tuned_config("randperm"),
156 key=["n_elements"],
157)
158@triton.jit
159def radix_sortbykey_scatter_kernel(
160 key_out,
161 value_out,
162 key_in,
163 value_in,
164 digit_hist,
165 d_lookback,
166 n_elements,
167 bit_offset,
168 passes,
169 p,
170 num_portions,
171 portion_size,
172 portion_id,
173 bit_mask,
174 bins_segment,
175 max_tiles_per_portion,
176 bins: tl.constexpr,
177 BLOCK_SIZE: tl.constexpr,
178):
179 LOOKBACK_PARTIAL_MASK = 1 << 30
180 LOOKBACK_GLOBAL_MASK = 1 << 31
181 LOOKBACK_KIND_MASK = LOOKBACK_PARTIAL_MASK | LOOKBACK_GLOBAL_MASK
182 LOOKBACK_VALUE_MASK = ~LOOKBACK_KIND_MASK
184 pid0 = tl.program_id(0)
185 portion_id_i64 = portion_id
186 portion_id_i64 = portion_id_i64.to(tl.int64)
187 key_offset = (
188 portion_id_i64 * portion_size
189 + pid0.to(tl.int64) * BLOCK_SIZE
190 + tl.arange(0, BLOCK_SIZE)
191 )
193 key_mask = key_offset < n_elements
194 value_data = tl.load(value_in + key_offset, mask=key_mask)
195 key_data = tl.load(key_in + key_offset, mask=key_mask)
197 ikey_data = radix_type_convert(key_data)
198 key_digit = (ikey_data >> bit_offset) & bit_mask
200 blk_bin_start = tl.program_id(1) * bins_segment
201 last_block = tl.program_id(0) == tl.num_programs(0) - 1
202 for s in range(bins_segment):
203 bin_id = s + blk_bin_start
204 key_digit_mask = (key_digit == bin_id) & key_mask
205 key_elem_mask = tl.where(key_digit_mask, 1, 0)
206 key_block_rank = tl.cumsum(key_elem_mask)
207 key_block_rank = tl.where(key_digit_mask, key_block_rank - 1, 0)
208 bin_of_bucket = tl.sum(key_elem_mask)
209 partial_counter = bin_of_bucket | LOOKBACK_PARTIAL_MASK
210 tl.store(
211 d_lookback
212 + ((portion_id * passes + p) * max_tiles_per_portion + pid0) * bins
213 + bin_id,
214 partial_counter,
215 cache_modifier=".cg",
216 )
217 bin_offset = p * (bins + 1) + bin_id
218 prefix_offsets = tl.load(
219 digit_hist + bin_offset + portion_id * passes * (bins + 1)
220 )
221 bk = pid0 - 1
222 inc_sum = bin_of_bucket
223 while bk >= 0:
224 rd_lbk_offset = (
225 (portion_id * passes + p) * max_tiles_per_portion + bk
226 ) * bins + bin_id
227 partial_prefix = 0
228 while partial_prefix == 0:
229 partial_prefix = tl.atomic_cas(
230 d_lookback + rd_lbk_offset, 0, 0, sem="acquire"
231 )
232 inc_sum += (partial_prefix & LOOKBACK_VALUE_MASK).to(tl.int32)
233 if partial_prefix & LOOKBACK_GLOBAL_MASK:
234 # break
235 bk = -1
236 else:
237 bk -= 1
238 global_counter = inc_sum | LOOKBACK_GLOBAL_MASK
239 tl.store(
240 d_lookback
241 + ((portion_id * passes + p) * max_tiles_per_portion + pid0) * bins
242 + bin_id,
243 global_counter,
244 cache_modifier=".cg",
245 )
246 inc_bucket_offset = prefix_offsets.to(tl.int64) + inc_sum.to(tl.int64)
247 if last_block and portion_id < num_portions - 1:
248 tl.store(
249 digit_hist + bin_offset + (portion_id + 1) * passes * (bins + 1),
250 inc_bucket_offset,
251 )
252 global_offsets = (
253 inc_bucket_offset - bin_of_bucket.to(tl.int64) + key_block_rank.to(tl.int64)
254 )
255 tl.store(key_out + global_offsets, key_data, mask=key_digit_mask)
256 tl.store(value_out + global_offsets, value_data, mask=key_digit_mask)
259# for parallelization, randomly shuffle the entire block rather than adjacent equal elements as pytorch GPU backend
260@libentry()
261@triton.jit(do_not_specialize=["philox_seed", "philox_offset"])
262def duplicate_keys_shuffle_kernel(
263 value_in, n_elements, philox_seed, philox_offset, BLOCK_SIZE: tl.constexpr
264):
265 pid0 = tl.program_id(0)
266 offset_range = tl.arange(0, BLOCK_SIZE)
267 value_offset = pid0.to(tl.int64) * BLOCK_SIZE + offset_range
268 value_mask = value_offset < n_elements
269 value_data = tl.load(value_in + value_offset, mask=value_mask)
271 philox_seed = philox_seed.to(tl.int64)
272 philox_offset = philox_offset.to(tl.int64)
273 c0 = (philox_offset & 0xFFFFFFFF).to(tl.uint32)
274 c1 = ((philox_offset >> 32) & 0xFFFFFFFF).to(tl.uint32)
275 i4 = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
276 c0 += i4
277 _O = c0 * 0
278 r0, _, _, _ = tl.philox(philox_seed, c0, c1, _O, _O)
280 _block_size = BLOCK_SIZE
281 r1 = r0 % _block_size.to(tl.uint32)
282 mask_val = _get_iinfo_val(tl.uint32, True)
283 r1 = tl.where(value_offset < n_elements, r1, mask_val)
284 _, sorted_chunk_index = argsort(r1, offset_range, 0, descending=False)
285 store_offset = pid0.to(tl.int64) * BLOCK_SIZE + sorted_chunk_index.to(tl.int64)
286 tl.store(value_in + store_offset, value_data, mask=store_offset < n_elements)
289def sort_by_key(key, value, valid_bits, generator=None):
290 n_elements = key.numel()
291 if n_elements > 2 * 1024:
292 # radix method
293 BLOCK_SIZE = 1024
294 bits_per_pass = 4
295 bits_per_segment = 3
296 passes = triton.cdiv(valid_bits, bits_per_pass)
297 bins = 2**bits_per_pass
298 bins_per_sgement = 2**bits_per_segment
299 bit_mask = bins - 1
301 portion_size = 2**30 # 2 bits reserved for mask
302 num_portions = triton.cdiv(n_elements, portion_size)
303 max_portion_items = portion_size if num_portions > 1 else n_elements
304 max_tiles_per_portion = triton.cdiv(max_portion_items, BLOCK_SIZE)
306 hist_dtype = torch.int64 if num_portions > 1 else torch.int32
307 grid_hist = (triton.cdiv(n_elements, BLOCK_SIZE), bins // bins_per_sgement)
309 digit_hist_slice = torch.empty(
310 (passes, bins + 1, grid_hist[0]), dtype=hist_dtype, device=key.device
311 )
313 digit_hist = torch.empty(
314 (num_portions, passes, bins + 1), dtype=hist_dtype, device=key.device
315 )
316 d_lookback = torch.empty(
317 num_portions * passes * bins * max_tiles_per_portion,
318 dtype=torch.int32,
319 device=key.device,
320 )
322 key_out_p = torch.empty_like(key)
323 key_out_q = torch.empty_like(key)
324 value_out_p = torch.empty_like(value)
325 value_out_q = torch.empty_like(value)
327 # step1
328 d_lookback.zero_()
329 with torch_device_fn.device(key.device):
330 digit_hist_kernel[grid_hist](
331 digit_hist_slice,
332 key,
333 n_elements,
334 bits_per_pass,
335 bins,
336 passes,
337 bit_mask,
338 bins_per_sgement,
339 BLOCK_SIZE,
340 )
342 # step2
343 digit_hist_slice = torch.sum(digit_hist_slice, dim=2, keepdim=False)
344 digit_hist_slice = digit_hist_slice.cumsum(dim=1) # shape of [passes, bins + 1]
345 digit_hist.copy_(digit_hist_slice)
347 bit_offset = 0
348 for p in range(passes):
349 k_in = (key if p == 0 else key_out_p) if p % 2 == 0 else key_out_q
350 v_in = (value if p == 0 else value_out_p) if p % 2 == 0 else value_out_q
351 k_out = key_out_q if p % 2 == 0 else key_out_p
352 v_out = value_out_q if p % 2 == 0 else value_out_p
353 # step3
354 for portion_id in range(num_portions):
355 portion_items = min(
356 n_elements - portion_id * portion_size, portion_size
357 )
358 tiles_per_portion = triton.cdiv(portion_items, BLOCK_SIZE)
359 grid_scatter = (tiles_per_portion, grid_hist[1])
360 with torch_device_fn.device(key.device):
361 radix_sortbykey_scatter_kernel[grid_scatter](
362 k_out,
363 v_out,
364 k_in,
365 v_in,
366 digit_hist,
367 d_lookback,
368 n_elements,
369 bit_offset,
370 passes,
371 p,
372 num_portions,
373 portion_size,
374 portion_id,
375 bit_mask,
376 bins_per_sgement,
377 max_tiles_per_portion,
378 bins,
379 BLOCK_SIZE,
380 )
381 bit_offset += bits_per_pass
383 # last step, shuffle inner-block data
384 BLOCK_SIZE_SHUFFLE = 512
385 grid_shuffle = (triton.cdiv(n_elements, BLOCK_SIZE_SHUFFLE),)
386 philox_seed, philox_offset = philox_backend_seed_offset(
387 n_elements, generator=generator
388 )
389 with torch_device_fn.device(key.device):
390 duplicate_keys_shuffle_kernel[grid_shuffle](
391 v_out,
392 n_elements,
393 philox_seed,
394 philox_offset,
395 BLOCK_SIZE_SHUFFLE,
396 num_warps=4,
397 )
398 return v_out
399 else:
400 # bitonic method
401 BLOCK_SIZE = triton.next_power_of_2(n_elements)
402 grid = (1,)
403 k_out = torch.empty_like(key)
404 v_out = torch.empty_like(value)
405 with torch_device_fn.device(key.device):
406 bitonic_sortbykey_kernel[grid](
407 k_out, v_out, key, value, n_elements, BLOCK_SIZE, False
408 )
409 return v_out
412def randperm(
413 n,
414 *,
415 generator=None,
416 out=None,
417 dtype=torch.int64,
418 layout=torch.strided,
419 device=None,
420 requires_grad=False,
421 pin_memory=False,
422):
423 logger.debug("GEMS_MTHREADS RANDPERM")
424 # import pdb
425 # pdb.set_trace()
426 assert dtype == torch.int16 or dtype == torch.int32 or dtype == torch.int64
427 assert n <= _MAX_INT64_VAL, "n exceeds maximum int64"
429 if device is None:
430 device = torch.device(device_.name)
431 in_range = torch.arange(n, dtype=dtype, device=device)
433 u8max = 2**8
434 u16max = 2**16
435 u24max = 2**24
436 u32max = 2**32
438 if n <= u8max:
439 valid_bits = 8
440 key_dtype = torch.int8
441 keymin = _MIN_INT8_VAL
442 keymax = _MAX_INT8_VAL
443 elif n <= u16max:
444 valid_bits = 16
445 key_dtype = torch.int16
446 keymin = _MIN_INT16_VAL
447 keymax = _MAX_INT16_VAL
448 elif n <= u24max:
449 valid_bits = 24
450 key_dtype = torch.int32
451 keymin = _MIN_INT24_VAL
452 keymax = _MAX_INT24_VAL
453 elif n <= u32max:
454 valid_bits = 32
455 key_dtype = torch.int32
456 keymin = _MIN_INT32_VAL
457 keymax = _MAX_INT32_VAL
458 else:
459 valid_bits = 64
460 key_dtype = torch.int64
461 keymin = _MIN_INT64_VAL
462 keymax = _MAX_INT64_VAL
464 rand_key = torch.randint(
465 low=keymin, high=keymax, size=[n], dtype=key_dtype, device=device
466 )
467 perm_range = sort_by_key(rand_key, in_range, valid_bits, generator=generator)
468 return perm_range