Coverage for src/flag_gems/ops/softshrink.py: 62%
52 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-29 04:01 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-29 04:01 +0800
1# Generated by KernelGen: https://github.com/flagos-ai/KernelGen
2import torch
3import triton
4import triton.language as tl
6from flag_gems.runtime import torch_device_fn
9@triton.jit
10def softshrink_kernel(x_ptr, out_ptr, n_elements, lambd, BLOCK_SIZE: tl.constexpr):
11 pid = tl.program_id(axis=0)
12 block_start = pid * BLOCK_SIZE
13 offsets = block_start + tl.arange(0, BLOCK_SIZE)
14 mask = offsets < n_elements
16 x = tl.load(x_ptr + offsets, mask=mask, other=0)
17 x32 = x.to(tl.float32)
19 threshold = lambd # scalar float32
21 gt = x32 > threshold
22 lt = x32 < -threshold
23 res32 = tl.where(gt, x32 - threshold, tl.where(lt, x32 + threshold, 0.0))
25 # Propagate NaN: if x is NaN, keep it
26 res32 = tl.where(x32 != x32, x32, res32)
28 res = res32.to(x.dtype)
29 tl.store(out_ptr + offsets, res, mask=mask)
32def _check_supported_dtype(t: torch.Tensor):
33 if t.dtype not in (torch.float16, torch.bfloat16, torch.float32):
34 raise TypeError(
35 f"Unsupported dtype {t.dtype}. Supported dtypes are float16, bfloat16, and float32."
36 )
39def _launch_softshrink_kernel(x: torch.Tensor, out: torch.Tensor, lambd: float):
40 n_elements = x.numel()
41 if n_elements == 0:
42 return
43 BLOCK_SIZE = 1024
44 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
45 with torch_device_fn.device(x.device):
46 softshrink_kernel[grid](
47 x,
48 out,
49 n_elements,
50 float(lambd),
51 BLOCK_SIZE=BLOCK_SIZE,
52 num_warps=4,
53 )
56def softshrink(input: torch.Tensor, lambd: float = 0.5):
57 _check_supported_dtype(input)
58 x = input.contiguous()
59 out = torch.empty_like(x)
60 _launch_softshrink_kernel(x, out, lambd)
61 return out.reshape_as(input)
64def softshrink_out(input: torch.Tensor, lambd: float = 0.5, out: torch.Tensor = None):
65 if out is None:
66 raise ValueError("Argument 'out' must be provided for softshrink_out.")
67 if input.shape != out.shape:
68 raise ValueError(
69 f"Shape mismatch: input.shape={input.shape}, out.shape={out.shape}"
70 )
71 if input.dtype != out.dtype:
72 raise TypeError(
73 f"Dtype mismatch: input.dtype={input.dtype}, out.dtype={out.dtype}"
74 )
75 _check_supported_dtype(input)
77 x = input.contiguous()
78 if out.is_contiguous():
79 out_buf = out
80 else:
81 out_buf = torch.empty_like(out, memory_format=torch.contiguous_format)
83 _launch_softshrink_kernel(x, out_buf, lambd)
85 if out_buf.data_ptr() != out.data_ptr():
86 out.copy_(out_buf)
87 return out