Coverage for src/flag_gems/ops/scatter.py: 99%
251 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import importlib
2import logging
3import os
4from typing import Any, Callable, List, Mapping, Tuple
6import torch
8from flag_gems.utils.code_cache import code_cache_dir
9from flag_gems.utils.code_utils import IndentedBuffer, write_atomic
10from flag_gems.utils.shape_utils import (
11 MemOverlap,
12 has_internal_overlapping,
13 restride_dim,
14)
16logger = logging.getLogger(__name__)
19def generate_imports(code: IndentedBuffer) -> IndentedBuffer:
20 code.writeline("import torch")
21 code.writeline("import triton")
22 code.writeline("import triton.language as tl")
23 code.newline()
24 code.writeline("from flag_gems.utils import libentry")
25 code.writeline("from flag_gems import runtime")
26 code.writeline("import flag_gems")
27 # code.writeline("from flag_gems.utils import triton_lang_extension as tle")
28 code.newline()
29 code.newline()
30 return code
33def generate_scatter_kernel(
34 rank: int,
35 kernel_name: str,
36 code: IndentedBuffer,
37) -> IndentedBuffer:
38 # make the inlined function visible in the context
39 code.newline()
41 # the autotune function
43 code.writeline("def heur_block(args):")
44 with code.indent():
45 code.writeline("if(flag_gems.vendor_name in ['metax', 'iluvatar']):")
46 with code.indent():
47 code.writeline("return 256")
48 code.writeline("return 128")
49 code.newline()
50 code.newline()
52 code.writeline("def loop_count(args):")
53 with code.indent():
54 code.writeline("return 4")
55 code.newline()
56 code.newline()
58 # the decorators
59 code.writeline("@libentry()")
60 code.writeline("@triton.heuristics(")
61 with code.indent():
62 code.writeline("{")
63 with code.indent():
64 code.writeline('"BLOCK": heur_block,')
65 code.writeline('"LOOP": loop_count,')
66 code.writeline("}")
67 code.writeline(")")
68 inp_stride_vars = ",".join(f"'inp_stride_{i}'" for i in range(rank))
69 index_stride_vars = ",".join(f"'index_stride_{i}'" for i in range(rank))
70 src_stride_vars = ",".join(f"'src_stride_{i}'" for i in range(rank))
71 shape_vars = ",".join(f"'shape_{i}'" for i in range(rank))
72 code.writeline(
73 f"@triton.jit(do_not_specialize=['N','stride_dim','inp_size_dim',"
74 f"{inp_stride_vars},{index_stride_vars},{src_stride_vars},{shape_vars}])"
75 )
77 # signature
78 code.writeline(f"def {kernel_name}(")
79 with code.indent():
80 if rank > 0:
81 code.writeline("src_strided,")
82 code.writeline("index,")
83 code.writeline("inp,")
84 code.writeline("out,")
86 stride_args = ", ".join(f"inp_stride_{i}: int" for i in range(rank))
87 code.writeline(f"{stride_args}, # stride for inp")
89 stride_args = ", ".join(f"index_stride_{i}: int" for i in range(rank))
90 code.writeline(f"{stride_args}, # stride for index")
92 stride_args = ", ".join(f"src_stride_{i}: int" for i in range(rank))
93 code.writeline(f"{stride_args}, # stride for src")
95 shape_args = ", ".join(f"shape_{i}: int" for i in range(rank))
96 code.writeline(f"{shape_args}, # shape")
97 code.writeline("inp_size_dim,")
98 code.writeline("stride_dim,")
99 code.writeline("N,")
100 # reduce options
101 code.writeline("IS_ADD: tl.constexpr,")
102 code.writeline("IS_MUL: tl.constexpr,")
103 code.writeline("BLOCK: tl.constexpr,")
104 code.writeline("LOOP: tl.constexpr,")
105 code.writeline("INT32_OFFSET: tl.constexpr")
107 code.writeline("):")
109 # Kernel Code
110 with code.indent():
111 code.writeline("pid = tl.program_id(0)")
112 code.writeline("if not INT32_OFFSET:")
113 with code.indent():
114 code.writeline("pid = pid.to(tl.int64)")
115 code.writeline("offsets = pid * LOOP * BLOCK + tl.arange(0, BLOCK)")
117 # 1. Calculate inp_offsets and idx_offsets
118 code.writeline("for loop_iter in tl.static_range(LOOP):")
119 with code.indent():
120 code.writeline("mask = offsets < N")
121 code.writeline("cur_idx = offsets")
122 code.writeline("if INT32_OFFSET:")
123 with code.indent():
124 code.writeline("inp_offsets = tl.zeros((BLOCK, ), dtype=tl.int32)")
125 code.writeline("idx_offsets = tl.zeros((BLOCK, ), dtype=tl.int32)")
126 code.writeline("src_offsets = tl.zeros((BLOCK, ), dtype=tl.int32)")
127 code.writeline("else:")
128 with code.indent():
129 code.writeline("inp_offsets = tl.zeros((BLOCK, ), dtype=tl.int64)")
130 code.writeline("idx_offsets = tl.zeros((BLOCK, ), dtype=tl.int64)")
131 code.writeline("src_offsets = tl.zeros((BLOCK, ), dtype=tl.int64)")
132 for i in range(rank)[::-1]:
133 code.writeline("if INT32_OFFSET:")
134 with code.indent():
135 code.writeline(f"shape_{i} = shape_{i}.to(tl.int32)")
136 code.writeline(f"inp_stride_{i} = inp_stride_{i}.to(tl.int32)")
137 code.writeline(f"index_stride_{i} = index_stride_{i}.to(tl.int32)")
138 code.writeline(f"src_stride_{i} = src_stride_{i}.to(tl.int32)")
139 code.writeline(f"mod = cur_idx % shape_{i}")
140 code.writeline(f"inp_offsets += mod * inp_stride_{i}")
141 code.writeline(f"idx_offsets += mod * index_stride_{i}")
142 code.writeline(f"src_offsets += mod * src_stride_{i}")
143 if i != 0:
144 code.writeline(f"cur_idx = cur_idx // shape_{i}")
146 # 2. Use offsets to scatter
147 code.writeline(
148 "cur_src = tl.load(src_strided + src_offsets, mask=mask, other=0)"
149 )
150 code.writeline(
151 "cur_index = tl.load(index + idx_offsets, mask=mask, other=0)"
152 )
153 code.writeline("if INT32_OFFSET:")
154 with code.indent():
155 code.writeline("cur_index = cur_index.to(tl.int32)")
156 code.writeline("stride_dim = stride_dim.to(tl.int32)")
158 code.writeline("dim_offsets = cur_index * stride_dim")
159 code.writeline("inp_offsets += dim_offsets")
160 code.newline()
161 code.writeline("if IS_ADD: ")
162 with code.indent():
163 code.writeline(
164 "tl.atomic_add(out + inp_offsets, cur_src, mask=mask, sem='relaxed')"
165 )
166 code.writeline("elif IS_MUL: ")
167 with code.indent():
168 code.writeline("stop = tl.where(mask, 0, 1).to(tl.int1)")
169 code.writeline("block_stop = False")
170 code.writeline("while not block_stop:")
171 with code.indent():
172 code.writeline
173 code.writeline(
174 "cur_inp = tl.load(out + inp_offsets, mask=mask, other=0)"
175 )
176 code.writeline("res = tl.where(stop, cur_inp, cur_inp * cur_src)")
177 code.writeline(
178 "cas_res = tl.atomic_cas(out + inp_offsets, cur_inp, res, sem='relaxed')"
179 )
180 code.writeline("stop |= cur_inp == cas_res")
181 code.writeline("block_stop = tl.sum(stop.to(tl.int32)) == BLOCK")
183 code.writeline("else: ")
184 with code.indent():
185 code.writeline("tl.store(out + inp_offsets, cur_src, mask=mask)")
187 code.writeline("offsets += BLOCK")
189 code.newline()
190 code.newline()
191 return code
194def parameter_for_wrapper() -> str:
195 # src_strided, index, inp, out, dim, M, N, reduce
196 parameters: List[str] = []
198 parameters.append("src_strided")
199 parameters.append("index")
200 parameters.append("inp")
201 parameters.append("out")
202 parameters.append("dim_size")
203 parameters.append("dim_stride")
204 parameters.append("N")
205 parameters.append("reduce: tl.constexpr=None")
206 parameters.append("int32_offset: tl.constexpr=None")
208 return ", ".join(parameters)
211def generate_destination_passing_wrapper(
212 rank: int,
213 wrapper_name: str,
214 kernel_name: str,
215 code: IndentedBuffer,
216) -> IndentedBuffer:
217 parameters: str = parameter_for_wrapper()
218 wrapper_signature: str = f"def {wrapper_name}({parameters}):"
219 code.writeline(wrapper_signature)
221 with code.indent():
222 code.writeline("inp_strides = list(inp.stride())")
223 code.writeline("index_strides = index.stride()")
224 code.writeline("src_strides = src_strided.stride()")
225 code.writeline("index_shapes = list(index.shape)")
226 code.writeline("inp_size_dim = dim_size")
227 code.writeline("stride_dim = dim_stride")
229 code.writeline('IS_ADD = reduce == "add"')
230 code.writeline('IS_MUL = reduce == "multiply"')
231 code.writeline("int32_offset = int32_offset or True")
233 # kernel launch
234 code.writeline("grid = lambda meta: (")
235 with code.indent():
236 code.writeline('triton.cdiv(N, meta["BLOCK"] * meta["LOOP"]), ')
237 code.writeline(")")
239 kernel_launch: str = f"{kernel_name}[grid]("
240 code.writeline(kernel_launch)
242 with code.indent():
243 code.writeline("src_strided, index, inp, out, ")
244 if rank > 0:
245 s = ", ".join(f"inp_strides[{i}]" for i in range(rank))
246 code.writeline(f"{s},")
248 s = ", ".join(f"index_strides[{i}]" for i in range(rank))
249 code.writeline(f"{s},")
251 s = ", ".join(f"src_strides[{i}]" for i in range(rank))
252 code.writeline(f"{s},")
254 s = ", ".join(f"index_shapes[{i}]" for i in range(rank))
255 code.writeline(f"{s},")
257 code.writeline("inp_size_dim,")
258 code.writeline("stride_dim,")
259 code.writeline("N,")
260 # reduce options
261 code.writeline("IS_ADD,")
262 code.writeline("IS_MUL,")
263 code.writeline("INT32_OFFSET=int32_offset,")
264 code.writeline(")")
265 code.writeline("return out")
267 return code
270def generate_code(
271 inputs: Tuple[Any],
272 wrapper_name: str,
273 kernel_name: str,
274 code: IndentedBuffer,
275) -> IndentedBuffer:
276 # inputs: [src_strided, index, inp, out, dim, M, N, reduce]
277 shape = inputs[1].shape
278 rank = len(shape)
280 code = generate_imports(code)
281 code = generate_scatter_kernel(rank, kernel_name, code)
282 code = generate_destination_passing_wrapper(rank, wrapper_name, kernel_name, code)
283 return code
286class ScatterFunction:
287 def __init__(self):
288 self.pid = os.getpid()
289 self.overloads: Mapping[str, Callable] = {}
291 def __call__(self, *args, **kwargs):
292 key = f"{self.arg_key(*args)}"
293 if key in self.overloads:
294 overload = self.overloads[key]
295 else:
296 code = IndentedBuffer()
297 code = generate_code(
298 args,
299 "_scatter_wrapper",
300 "_scatter_jit_function",
301 code,
302 )
304 file_name = f"scatter_rank_{key}.py"
305 file_path = code_cache_dir() / file_name
306 write_atomic(file_path, code.getvalue())
308 # load
309 spec = importlib.util.spec_from_file_location(
310 f"_gen_module_rank_{key}",
311 file_path,
312 )
314 m = importlib.util.module_from_spec(spec)
315 spec.loader.exec_module(m)
316 overload = getattr(m, "_scatter_wrapper")
317 self.overloads[key] = overload
319 return overload(*args, **kwargs)
321 def arg_key(self, *args):
322 tensors = [item for item in args if torch.is_tensor(item)]
323 max_rank = max(item.ndim for item in tensors)
324 return max_rank
327_scatter_func = ScatterFunction()
330def scatter(inp, dim, index, src, reduce=None):
331 logger.debug("GEMS SCATTER")
332 out = inp.clone()
334 if reduce is not None:
335 assert inp.dtype not in (
336 torch.bfloat16,
337 ), "Unsupported operation: reduce scatter bfloat tensors."
339 if has_internal_overlapping(out) == MemOverlap.Yes:
340 out = out.contiguous()
342 src_strided = src.as_strided(index.shape, src.stride())
343 inp_restrided = restride_dim(inp, dim, index.shape)
344 dim_size = inp.size(dim)
345 dim_stride = inp.stride(dim)
346 N = index.numel()
348 int32_size_dim = lambda x: x.stride(dim) * x.size(dim) < 2**32
349 use_int32_offset = all(map(int32_size_dim, (inp, index, src)))
350 _scatter_func(
351 src_strided,
352 index,
353 inp_restrided,
354 out,
355 dim_size,
356 dim_stride,
357 N,
358 reduce,
359 int32_offset=use_int32_offset,
360 )
362 return out
365def scatter_(inp, dim, index, src, reduce=None):
366 logger.debug("GEMS SCATTER_")
367 out = inp
369 if reduce is not None:
370 assert inp.dtype not in (
371 torch.bfloat16,
372 ), "Unsupported operation: reduce scatter bfloat tensors."
374 assert (
375 has_internal_overlapping(out) != MemOverlap.Yes
376 ), "Unsupported operation: trying to inplace write to an internally overlapping tensor."
378 src_restrided = src.as_strided(index.shape, src.stride())
379 inp_restrided = restride_dim(inp, dim, index.shape)
380 dim_size = inp.size(dim)
381 dim_stride = inp.stride(dim)
382 N = index.numel()
384 int32_size_dim = lambda x: x.stride(dim) * x.size(dim) < 2**32
385 use_int32_offset = all(map(int32_size_dim, (inp, index, src)))
386 _scatter_func(
387 src_restrided,
388 index,
389 inp_restrided,
390 out,
391 dim_size,
392 dim_stride,
393 N,
394 reduce,
395 int32_offset=use_int32_offset,
396 )
398 return inp