Coverage for src/flag_gems/ops/zeros.py: 83%
35 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-21 14:31 +0800
1import logging
3import torch
4import triton
5import triton.language as tl
7from flag_gems.runtime import device, torch_device_fn
8from flag_gems.utils import triton_lang_extension as tle
9from flag_gems.utils.shape_utils import volume
11device_ = device
12logger = logging.getLogger(__name__)
15@triton.jit
16def zeros_kernel(
17 output_ptr,
18 n_elements,
19 BLOCK_SIZE: tl.constexpr,
20):
21 pid = tle.program_id(axis=0) # We use a 1D launch grid so axis is 0.
22 block_start = pid * BLOCK_SIZE
23 offsets = block_start + tl.arange(0, BLOCK_SIZE)
24 mask = offsets < n_elements
25 tl.store(output_ptr + offsets, 0.0, mask=mask)
28def zeros(size, *, dtype=None, layout=None, device=None, pin_memory=None):
29 logger.debug("GEMS ZEROS")
30 if dtype is None:
31 dtype = torch.get_default_dtype()
32 if device is None:
33 device = torch.device(device_.name)
35 out = torch.empty(size, device=device, dtype=dtype)
36 N = volume(size)
37 grid_fn = lambda meta: (triton.cdiv(N, meta["BLOCK_SIZE"]),)
38 with torch_device_fn.device(device):
39 zeros_kernel[grid_fn](out, N, BLOCK_SIZE=1024)
40 return out
43def zero_(x: torch.Tensor) -> torch.Tensor:
44 logger.debug("GEMS ZERO_")
45 N = x.numel()
46 grid_fn = lambda meta: (triton.cdiv(N, meta["BLOCK_SIZE"]),)
47 with torch_device_fn.device(x.device):
48 zeros_kernel[grid_fn](x, N, BLOCK_SIZE=1024)
49 return x