Coverage for src/flag_gems/experimental_ops/softshrink.py: 0%
54 statements
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-09 01:57 +0800
« prev ^ index » next coverage.py v7.6.9, created at 2026-03-09 01:57 +0800
1import torch
2import triton
3import triton.language as tl
6@triton.jit
7def softshrink_kernel(x_ptr, out_ptr, n_elements, lambd, BLOCK_SIZE: tl.constexpr):
8 pid = tl.program_id(axis=0)
9 block_start = pid * BLOCK_SIZE
10 offsets = block_start + tl.arange(0, BLOCK_SIZE)
11 mask = offsets < n_elements
13 x = tl.load(x_ptr + offsets, mask=mask, other=0)
14 x32 = x.to(tl.float32)
16 threshold = lambd # scalar float32
18 gt = x32 > threshold
19 lt = x32 < -threshold
20 res32 = tl.where(gt, x32 - threshold, tl.where(lt, x32 + threshold, 0.0))
22 # Propagate NaN: if x is NaN, keep it
23 res32 = tl.where(x32 != x32, x32, res32)
25 res = res32.to(x.dtype)
26 tl.store(out_ptr + offsets, res, mask=mask)
29def _check_supported_dtype(t: torch.Tensor):
30 if t.dtype not in (torch.float16, torch.bfloat16, torch.float32):
31 raise TypeError(
32 f"Unsupported dtype {t.dtype}. Supported dtypes are float16, bfloat16, and float32."
33 )
36def _launch_softshrink_kernel(x: torch.Tensor, out: torch.Tensor, lambd: float):
37 n_elements = x.numel()
38 if n_elements == 0:
39 return
40 BLOCK_SIZE = 1024
41 grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
42 softshrink_kernel[grid](
43 x,
44 out,
45 n_elements,
46 float(lambd),
47 BLOCK_SIZE=BLOCK_SIZE,
48 num_warps=4,
49 )
52def softshrink(input: torch.Tensor, lambd: float = 0.5):
53 if not input.is_cuda:
54 raise ValueError("Input tensor must be on CUDA device.")
55 _check_supported_dtype(input)
56 x = input.contiguous()
57 out = torch.empty_like(x)
58 _launch_softshrink_kernel(x, out, lambd)
59 return out.reshape_as(input)
62def softshrink_out(input: torch.Tensor, lambd: float = 0.5, out: torch.Tensor = None):
63 if out is None:
64 raise ValueError("Argument 'out' must be provided for softshrink_out.")
65 if not input.is_cuda or not out.is_cuda:
66 raise ValueError("Input and out tensors must be on CUDA device.")
67 if input.shape != out.shape:
68 raise ValueError(
69 f"Shape mismatch: input.shape={input.shape}, out.shape={out.shape}"
70 )
71 if input.dtype != out.dtype:
72 raise TypeError(
73 f"Dtype mismatch: input.dtype={input.dtype}, out.dtype={out.dtype}"
74 )
75 _check_supported_dtype(input)
77 x = input.contiguous()
78 if out.is_contiguous():
79 out_buf = out
80 else:
81 out_buf = torch.empty_like(out, memory_format=torch.contiguous_format)
83 _launch_softshrink_kernel(x, out_buf, lambd)
85 if out_buf.data_ptr() != out.data_ptr():
86 out.copy_(out_buf)
87 return out