DiRL-8B-Instruct / modeling_sdar.py
Auraithm's picture
Upload modeling_sdar.py with huggingface_hub
8b5e6db verified
# This file is modified based on https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen3/modeling_qwen3.py.
#
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_qwen3.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Tuple, Union, List
import torch
from torch import nn
from einops import rearrange
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
from transformers.generation import GenerationMixin
from transformers.integrations import use_kernel_forward_from_hub
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_layers import GradientCheckpointingLayer
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from transformers.processing_utils import Unpack
from transformers.utils import LossKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from configuration_sdar import SDARConfig
from fused_linear_diffusion_cross_entropy import FusedLinearDiffusionCrossEntropyLoss
from flash_attn.ops.triton.layer_norm import rms_norm_fn as flash_rms_norm
import torch.nn.functional as F
try:
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
except:
pass
try:
from liger_kernel.ops.swiglu import LigerSiLUMulFunction # noqa: F401
liger_kernel_is_available = True
except ImportError:
liger_kernel_is_available = False
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import BlockMask, create_block_mask, flex_attention
from transformers.integrations.flex_attention import make_flex_block_causal_mask
logger = logging.get_logger(__name__)
def modify_padded_position_ids_2d(position_ids: torch.LongTensor) -> torch.LongTensor:
"""
使用完全向量化的 PyTorch 操作修改一个 batch 的 packed position_ids。
这个函数假设输入是一个 2D Tensor,形状为 (batch_size, sequence_length)。
它会独立地处理 batch 中的每一行。
Args:
position_ids: 二维 PyTorch Tensor, shape (batch_size, sequence_length).
Returns:
修改后的 position_ids Tensor, shape (batch_size, sequence_length).
"""
if position_ids.dim() != 2:
raise ValueError(f"Input tensor must be 2D, but got {position_ids.dim()} dimensions.")
batch_size, seq_len = position_ids.shape
device = position_ids.device
col_indices = torch.arange(seq_len, device=device, dtype=position_ids.dtype).expand(batch_size, -1)
mask = (position_ids != 0)
masked_indices = col_indices * mask
last_nonzero_idx = torch.max(masked_indices, dim=1).values
has_nonzero = torch.any(mask, dim=1)
pad_start_idx = torch.where(has_nonzero, last_nonzero_idx + 1, torch.tensor(0, device=device, dtype=position_ids.dtype))
padding_mask = col_indices >= pad_start_idx.unsqueeze(1)
new_pad_values = col_indices - pad_start_idx.unsqueeze(1)
position_ids = torch.where(padding_mask, new_pad_values, position_ids)
return position_ids
def calculate_token_nums(position_ids: torch.Tensor):
"""
使用 PyTorch 高效计算一个批次中每个打包序列的长度。
Args:
position_ids (torch.Tensor): 一个 2D Tensor,形状为 (batch_size, sequence_length)。
例如:tensor([[0,1,2,3,4,0,1,2,3,4,5,0,1,2,3,0,0,0]])
Returns:
list[list[int]]: 一个嵌套列表,包含每个批次项中各个序列的长度。
例如:[[5, 6, 4, 1, 1, 1]]
"""
# 检查输入是否为 2D Tensor
if position_ids.dim() != 2:
raise ValueError(f"输入必须是 2D Tensor,但得到了 {position_ids.dim()}D")
all_lengths = []
# 我们按批次逐行处理。因为每行的序列长度数量不同(ragged),
# 所以 Python 循环在批次维度上是最高效且最清晰的写法。
# 循环内部的操作是完全向量化的。
for pids_row in position_ids:
# 获取当前行的总长度
seq_len = pids_row.shape[0]
# 1. 找到所有值为 0 的元素的索引
# pids_row == 0 会返回一个布尔 Tensor: [True, False, ..., True, ...]
# torch.nonzero 会返回这些 True 值的索引
# .flatten() 将其从 (N, 1) 形状的 Tensor 变为 (N,) 形状
zero_indices = torch.nonzero(pids_row == 0).flatten()
# 2. 将序列的总长度作为一个额外的切分点添加到末尾
# 这对于计算最后一个序列的长度至关重要
# 注意:要确保新创建的 tensor 和原始 tensor 在同一个设备上 (cpu/cuda)
split_points = torch.cat([
zero_indices,
torch.tensor([seq_len], device=pids_row.device, dtype=zero_indices.dtype)
])
# 3. 计算相邻切分点之间的差值,这就是我们想要的长度
# torch.diff([a, b, c, d]) 会返回 [b-a, c-b, d-c]
lengths = torch.diff(split_points)
all_lengths.append(lengths)
return all_lengths
def forward_add_noise_packed(
inputs_ids: torch.Tensor,
num_tokens_list: List[torch.Tensor],
prompt_mask: torch.Tensor,
mask_id: int,
eps: float = 1e-3,
max_tries: int = 10,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
为一批打包(packed)序列的 token ID 添加噪声。
此函数保留了为每个逻辑样本(在每个批次项内拼接)生成独立随机噪声率的逻辑。
它会随机将一部分 token 的 ID 替换为 mask_id。
这个过程会避开被 prompt_mask 标记的位置。
Args:
inputs_ids (torch.Tensor):
输入的 token ID 张量,形状为 (bsz, total_tokens)。
num_tokens_list (List[torch.Tensor]):
一个张量列表,长度为 bsz。列表中的每个张量记录了对应批次项中
每个逻辑样本的长度。例如: [tensor([len1, len2]), tensor([len3, len4, len5])].
prompt_mask (torch.Tensor):
布尔型张量,形状为 (bsz, total_tokens),值为 True 的位置表示是 prompt,
不应添加噪声。
mask_id (int):
用于替换的 mask token 的 ID。
eps (float):
微小值,用于防止噪声率 t 恰好为 0,确保 p_mask > 0。
max_tries (int):
为确保至少一个非 prompt token 被 mask,对每个批次项尝试的最大次数。
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
- noisy_input_ids (torch.Tensor):
添加噪声后的 token ID 张量,形状为 (bsz, total_tokens)。
- final_masked_indices (torch.Tensor):
布尔型张量,标记了哪些位置被实际 mask 了,形状为 (bsz, total_tokens)。
- p_masks (torch.Tensor):
一个一维张量,包含了被 mask 的 token 对应的实际噪声率。
"""
# 1. 验证和获取形状
bsz, total_tokens = inputs_ids.shape
device = inputs_ids.device
# 检查输入的一致性
assert len(num_tokens_list) == bsz, f"num_tokens_list 的长度 ({len(num_tokens_list)}) 必须等于 bsz ({bsz})"
assert prompt_mask.shape == (bsz, total_tokens), f"prompt_mask 形状不匹配, 期望 {(bsz, total_tokens)}, 得到 {prompt_mask.shape}"
# 准备结果容器
noisy_ids_list = []
final_masked_indices_list = []
p_masks_per_token_list = []
# 2. 在批次维度上迭代
# 这是处理不同打包结构最直接有效的方法
for i in range(bsz):
# 提取当前批次项的数据
current_ids = inputs_ids[i:i+1] # shape: (1, total_tokens)
current_num_tokens = num_tokens_list[i]
current_prompt_mask = prompt_mask[i:i+1] # shape: (1, total_tokens)
num_samples_in_item = len(current_num_tokens)
# 验证当前批次项的 token 总数是否匹配
assert total_tokens == torch.sum(current_num_tokens), \
f"批次项 {i} 的 num_tokens 之和 ({torch.sum(current_num_tokens)}) 与 total_tokens ({total_tokens}) 不匹配"
eligible_for_masking = ~current_prompt_mask
# 如果没有任何 token 可以被 mask,直接使用原始输入,并设置 p_mask 为 eps
if not eligible_for_masking.any():
noisy_ids_list.append(current_ids)
final_masked_indices_list.append(torch.zeros_like(current_prompt_mask, dtype=torch.bool))
# p_mask_per_token 的形状应为 (1, total_tokens) 以便后续拼接
p_masks_per_token_list.append(torch.full((1, total_tokens), eps, device=device, dtype=torch.float))
continue
# --- 尝试生成 mask,确保至少 mask 一个 token ---
final_masked_indices_item = torch.zeros_like(current_prompt_mask, dtype=torch.bool)
p_mask_per_token = None
for _ in range(max_tries):
# 为每个逻辑样本生成一个独立的噪声率 t
t = torch.rand(num_samples_in_item, device=device)
p_mask_per_sample = (1 - eps) * t + eps
# 将每个样本的噪声率扩展到其所有 token 上
p_mask_per_token_1d = torch.repeat_interleave(p_mask_per_sample, current_num_tokens)
p_mask_per_token = p_mask_per_token_1d.unsqueeze(0) # shape: (1, total_tokens)
# 根据噪声率生成随机 mask
masked_indices = torch.rand_like(p_mask_per_token) < p_mask_per_token
# 应用 prompt mask,确保 prompt 不被 mask
final_masked_indices_item = masked_indices & eligible_for_masking
# 如果成功 mask 了至少一个 token,则跳出尝试循环
if final_masked_indices_item.any():
break
# 如果 max_tries 之后仍然没有 mask 任何 token (极小概率),就强制 mask 一个可 mask 的 token
if not final_masked_indices_item.any():
eligible_indices = torch.nonzero(eligible_for_masking.squeeze(0), as_tuple=True)[0]
if len(eligible_indices) > 0:
# 随机选择一个可 mask 的位置
random_choice = torch.randint(0, len(eligible_indices), (1,)).item()
force_mask_idx = eligible_indices[random_choice]
final_masked_indices_item[0, force_mask_idx] = True
# --- 根据最终的 mask 生成带噪声的 IDs ---
noisy_ids_item = torch.where(
final_masked_indices_item,
mask_id,
current_ids
)
# 保存这个批次项的结果
noisy_ids_list.append(noisy_ids_item)
final_masked_indices_list.append(final_masked_indices_item)
p_masks_per_token_list.append(p_mask_per_token)
# 3. 将列表中的结果堆叠成最终的批处理张量
noisy_input_ids = torch.cat(noisy_ids_list, dim=0)
final_masked_indices = torch.cat(final_masked_indices_list, dim=0)
p_mask_full = torch.cat(p_masks_per_token_list, dim=0)
# 4. 提取被 mask 位置对应的噪声率
p_masks = p_mask_full[final_masked_indices]
return noisy_input_ids, final_masked_indices, p_masks
def block_diff_mask(b, h, q_idx, kv_idx, block_size=None, n=None):
"""
Constructs the specialized block diffusion attention mask for training
composed of three masks:
- **Block Diagonal Mask (M_BD)**: Self-attention within noised blocks
- **Offset Block Causal Mask (M_OBC)**: Cross-attention for conditional context
- **Block Causal Mask (M_BC)**: Attention to update x0
Args:
b, h: Batch and head indices (ignored for mask logic).
q_idx, kv_idx: Query and Key indices.
seq_len: Total sequence length.
block_size: Defines the block structure.
Returns:
A boolean attention mask.
"""
# Indicate whether token belongs to xt or x0
x0_flag_q = q_idx >= n
x0_flag_kv = kv_idx >= n
# Compute block indices
block_q = torch.where(
x0_flag_q == 1, (q_idx - n) // block_size, q_idx // block_size
)
block_kv = torch.where(
x0_flag_kv == 1, (kv_idx - n) // block_size, kv_idx // block_size
)
# **1. Block Diagonal Mask (M_BD) **
block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv)
# **2. Offset Block-Causal Mask (M_OBC) **
offset_block_causal = (block_q > block_kv) & (
x0_flag_kv == 1) & (x0_flag_q == 0)
# **3. Block-Causal Mask (M_BC) **
block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1)
# **4. Combine Masks **
return block_diagonal | offset_block_causal | block_causal
def block_attn_mask(num_tokens, block_size, device):
masks = []
for i in range(len(num_tokens)):
cur_masks = []
for num in num_tokens[i]:
# 全部返回 n*n 而非 2n*2n
single_mask = block_diff_mask(
b=None,
h=None,
q_idx=torch.arange(num * 2, device=device)[:, None],
kv_idx=torch.arange(num * 2, device=device)[None, :],
block_size=block_size,
n=num,
)
cur_masks.append(single_mask)
masks.append(torch.block_diag(*cur_masks))
masks = torch.stack(masks, dim=0)
return masks
@torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
def fused_flex_attention(query, key, value, attention_mask, **kwargs):
return flex_attention(query, key, value, block_mask=attention_mask, **kwargs)
@use_kernel_forward_from_hub("RMSNorm")
class SDARRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
SDARRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
return flash_rms_norm(
hidden_states, weight=self.weight, bias=None, eps=self.variance_epsilon)
'''
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * \
torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
'''
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class SDARMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(
self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(
self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(
self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
if liger_kernel_is_available:
return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
else:
down_proj = self.down_proj(self.act_fn(
self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(
batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(
attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class SDARAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: SDARConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(
config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
# unlike olmo, only on the head dim!
self.q_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
# thus post q_norm does not need reshape
self.k_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.sliding_window = config.sliding_window
if not (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
self.sliding_window = None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
bsz, q_len = input_shape
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(
hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(
hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(
hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin)
if past_key_value is not None and kwargs.get("store_kv", False):
# sin and cos are specific to RoPE models; cache_position needed for the static cache
key_states, value_states = past_key_value.update(
key_states, value_states, self.layer_idx)
elif past_key_value is not None and not kwargs.get("store_kv", False) and len(past_key_value) > self.layer_idx:
# only retrive, do not store kv
past_key_states, past_value_states = past_key_value[self.layer_idx]
key_states = torch.cat(
[past_key_states, key_states], dim=-2)
value_states = torch.cat(
[past_value_states, value_states], dim=-2)
if self.training:
attn_output, attn_weights = fused_flex_attention(
query=query_states,
key=key_states,
value=value_states,
attention_mask=attention_mask,
enable_gqa=True,
scale=self.scaling,
return_lse=True
)
attn_weights = attn_weights.to(
value_states.dtype) if attn_weights is not None else None
attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
else:
attention_mask = attention_mask.bool() if attention_mask is not None else None
attn_weights = None
if torch.all(attention_mask): # decoding
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attn_output = flash_attn_func(
query_states,
key_states,
value_states,
causal=False,
softmax_scale=self.scaling
)
attn_output = rearrange(attn_output, 'b l h d -> b l (h d)')
else: # prefilling
attn_output = F.scaled_dot_product_attention(
query=query_states,
key=key_states,
value=value_states,
attn_mask=attention_mask,
is_causal=False,
scale=self.scaling,
enable_gqa=True
)
attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights # , attn_weights
class SDARDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SDARConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = SDARAttention(config=config, layer_idx=layer_idx)
self.mlp = SDARMLP(config)
self.input_layernorm = SDARRMSNorm(
config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = SDARRMSNorm(
config.hidden_size, eps=config.rms_norm_eps)
if (
config.sliding_window and config._attn_implementation != "flash_attention_2"
): # diff with Llama is this warning
logger.warning_once(
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
"unexpected results may be encountered."
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
store_kv: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
# necessary, but kept here for BC
position_embeddings: Optional[Tuple[torch.Tensor,
torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
store_kv=store_kv,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
class SDARPreTrainedModel(PreTrainedModel):
config_class = SDARConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["SDARDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_cache_class = True
_supports_quantized_cache = True
_supports_static_cache = True
_supports_attention_backend = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SDARRMSNorm):
module.weight.data.fill_(1.0)
class SDARRotaryEmbedding(nn.Module):
def __init__(self, config: SDARConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
self.rope_type = config.rope_scaling.get(
"rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(
self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
# power user: used with advanced RoPE types (e.g. dynamic rope)
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(
position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(
x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @
position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
class SDARModel(SDARPreTrainedModel):
def __init__(self, config: SDARConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(
config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[SDARDecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)]
)
self.norm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = SDARRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
store_kv: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
if not isinstance(past_key_values, (type(None), Cache)):
raise ValueError(
"The `past_key_values` should be either a `Cache` object or `None`.")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# causal_mask = self._update_causal_mask(
# attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
# )
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
store_kv=store_kv,
cache_position=cache_position,
position_embeddings=position_embeddings,
**flash_attn_kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -
1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
)
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
seq_len_q, seq_len_kv = attention_mask.shape
assert seq_len_q == seq_len_kv, f"got {attention_mask.shape=}"
attention_mask = create_block_mask(
# 2d bool tensor, shape: [2*seqlen, 2*seqlen]
lambda b, h, q_idx, kv_idx: attention_mask[q_idx, kv_idx],
B=None, H=None, Q_LEN=seq_len_q, KV_LEN=seq_len_kv,
)
else:
# Here we pass in flex mask computed externally
assert isinstance(attention_mask, BlockMask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
using_sliding_window_cache = isinstance(
past_key_values, SlidingWindowCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if (
self.config._attn_implementation == "sdpa"
and not (using_static_cache or using_sliding_window_cache)
and not output_attentions
):
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# SlidingWindowCache or StaticCache
if using_sliding_window_cache or using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(
causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
config: SDARConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`SDARConfig`):
The model's configuration class
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
-1, 1
)
text_config = config.get_text_config()
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
cache_position.reshape(-1, 1) -
text_config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None,
:, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs):
...
@auto_docstring
class SDARForCausalLM(SDARPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = SDARModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
def prepare_for_bd_training(self, inputs_ids, position_ids, prompt_mask, masked_indices=None, p_mask_input=None):
bsz, seq_len = inputs_ids.shape
num_tokens = calculate_token_nums(position_ids) # List[torch.Tensor]
# 如果手动传入了 masked_indices,就直接用它
if masked_indices is not None:
# 手动mask模式:用于RL训练或固定mask实验
# 注意:外部传入的masked_indices已经只在response部分(通过 & response_mask),不需要再次过滤
noisy_inputs_ids = torch.where(masked_indices, self.config.mask_token_id, inputs_ids)
logits_to_keep_half = masked_indices # (B, L) bool
# 生成默认的p_mask:扁平化后的噪声率,形状为(M,),其中M=sum(masked_indices)
# 默认值0.5表示中等噪声水平(用于扩散loss)
M = masked_indices.sum().item()
p_mask = torch.full((M,), 0.5, device=inputs_ids.device, dtype=torch.float)
else:
# 随机mask模式:用于Block Diffusion预训练
# 返回:noisy_inputs_ids (B, L), logits_to_keep_half (B, L) bool, p_mask (M,) float
noisy_inputs_ids, logits_to_keep_half, p_mask = forward_add_noise_packed(
inputs_ids=inputs_ids,
num_tokens_list=num_tokens,
prompt_mask=prompt_mask,
mask_id=self.config.mask_token_id,
)
# 确保两个分支返回的形状一致
# logits_to_keep_half: (B, L) bool - 标记哪些位置被mask
# p_mask: (M,) float - 每个被mask位置的噪声率,其中M = sum(logits_to_keep_half)
assert logits_to_keep_half.shape == (bsz, seq_len), f"logits_to_keep_half shape error: {logits_to_keep_half.shape}"
assert p_mask.shape == (logits_to_keep_half.sum(),), f"p_mask shape error: {p_mask.shape}, expected ({logits_to_keep_half.sum()},)"
# 如果提供了p_mask_input(用于RL训练),计算p_to_keep
# p_to_keep表示从masked位置中选出p_mask=True的位置
p_to_keep = None
if p_mask_input is not None:
# 注意:外部传入的p_mask_input已经只在response部分(通过 & response_mask),不需要再次过滤
# p_mask_input (B, L), logits_to_keep_half (B, L)
# p_to_keep (M,) bool,其中M=sum(logits_to_keep_half)
p_to_keep = p_mask_input[logits_to_keep_half]
router_noisy_part_list = []
for i in range(bsz):
cur_router_noisy_part = (torch.arange(num_tokens[i].shape[0] *2) % 2 == 0).to(inputs_ids.device)
cur_router_noisy_part = cur_router_noisy_part.repeat_interleave(num_tokens[i].repeat_interleave(2))
router_noisy_part_list.append(cur_router_noisy_part)
router_noisy_part = torch.stack(router_noisy_part_list, dim=0)
# concated inputs_ids: (bzs, seq_len x 2)
concat_inputs_ids = inputs_ids.repeat(1, 2)
# concated logits_to_keep: (bsz, seq_len x 2)
logits_to_keep = torch.zeros(
bsz, 2 * seq_len, dtype=torch.bool, device=inputs_ids.device)
# concated position_ids: (bsz, seq_len x 2)
concat_position_ids = torch.zeros(
bsz, 2 * seq_len, dtype=position_ids.dtype, device=position_ids.device)
for i in range(bsz):
concat_inputs_ids[i][router_noisy_part[i]] = noisy_inputs_ids[i]
concat_inputs_ids[i][~router_noisy_part[i]] = inputs_ids[i]
logits_to_keep[i][router_noisy_part[i]] = logits_to_keep_half[i]
concat_position_ids[i][router_noisy_part[i]] = position_ids[i]
concat_position_ids[i][~router_noisy_part[i]] = position_ids[i]
# create flex_attention mask
attention_mask = block_attn_mask(num_tokens, self.config.block_size, inputs_ids.device)
flex_attention_mask_3d = create_block_mask(
lambda b, h, q_idx, kv_idx: attention_mask[b, q_idx, kv_idx],
B=attention_mask.size(0), H=None,
Q_LEN=attention_mask.size(1), KV_LEN=attention_mask.size(2),
)
return concat_inputs_ids, concat_position_ids, flex_attention_mask_3d, logits_to_keep_half, logits_to_keep, p_mask, p_to_keep
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
masked_indices: Optional[torch.Tensor] = None,
return_logits: bool = False,
# RL training parameters
compute_rl_loss: bool = False,
p_mask: Optional[torch.Tensor] = None,
adv: Optional[torch.Tensor] = None,
adv_optimization: bool = False,
logp_old_tok: Optional[torch.Tensor] = None,
logp_ref_tok: Optional[torch.Tensor] = None,
is_real: Optional[torch.Tensor] = None,
ppo_eps: float = 0.2,
kl_beta: float = 0.0,
use_kl_estimator_k3: bool = True,
return_entropy: bool = False,
dynamic_threshold: Optional[float] = None,
loss_mean: bool = True,
**kwargs: Unpack[KwargsForCausalLM],
) -> CausalLMOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if self.training:
assert inputs_embeds is None, "only support input_ids during training"
prompt_mask = (labels == -100) if labels is not None else None
position_ids = modify_padded_position_ids_2d(position_ids)
(
concat_inputs_ids,
concat_position_ids,
flex_attention_mask_3d,
logits_to_keep_half,
logits_to_keep,
p_mask_out,
p_to_keep,
) = self.prepare_for_bd_training(
input_ids, position_ids, prompt_mask, masked_indices, p_mask_input=p_mask
)
outputs = self.model(
input_ids=concat_inputs_ids,
attention_mask=flex_attention_mask_3d,
position_ids=concat_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
hidden_states = hidden_states[logits_to_keep].contiguous()
# 初始化 entropy
entropy = torch.tensor(0.0, device=input_ids.device)
# ====================== RL loss(PPO) ======================
if compute_rl_loss:
assert p_to_keep is not None, "p_mask must be provided for RL loss computation."
assert adv is not None, "adv must be provided for RL loss computation."
assert is_real is not None, "is_real must be provided for RL loss computation."
assert labels is not None, "labels must be provided for RL loss computation."
assert masked_indices is not None, "masked_indices must be provided for RL loss computation."
device = input_ids.device
# logits (M, V) — 保持原样
logits = self.lm_head(hidden_states)
# mask — 保持原样
is_real_tensor = (
is_real.to(device=device, dtype=torch.bool)
if torch.is_tensor(is_real)
else torch.tensor(is_real, dtype=torch.bool, device=device)
)
p_mask_real = p_mask & is_real_tensor.unsqueeze(1) # (B, L)
p_to_keep_real = p_mask_real[masked_indices] # (M,) bool
# 选出 logits — 保持原样
logits_p = logits[p_to_keep_real] # (N, V)
N = p_to_keep_real.sum().item()
total_response_tokens = (labels != -100).sum().item()
total_p_mask = p_mask.sum().item()
total_masked_indices = masked_indices.sum().item()
total_is_real = is_real_tensor.sum().item() if is_real_tensor.dim() > 0 else (1 if is_real_tensor.item() else 0)
# log_softmax
log_probs_p = torch.nn.functional.log_softmax(logits_p, dim=-1)
# labels / logp — 保持原样
labels_p = labels[masked_indices][p_to_keep_real] # (N,)
logp_p = log_probs_p.gather(dim=-1, index=labels_p.unsqueeze(-1)).squeeze(-1)
# entropy(可选)
if return_entropy:
with torch.no_grad():
entropy_p = -(log_probs_p.exp() * log_probs_p).sum(dim=-1)
entropy = entropy_p.mean() if entropy_p.numel() > 0 else torch.tensor(0.0, device=device)
del entropy_p
# advantage 处理
adv_tensor = adv.to(device) if torch.is_tensor(adv) else torch.tensor(adv, dtype=torch.float, device=device)
adv_optimization=False
if adv_optimization:
# token级别优化:对相同前缀取最大advantage(剪枝优化版本)
response_mask = (labels != -100) # (B, L)
bsz, seq_len = input_ids.shape
# 预计算每个样本的response起始位置
response_starts = torch.full((bsz,), seq_len, dtype=torch.long, device=device)
for b in range(bsz):
if response_mask[b].any():
response_starts[b] = response_mask[b].long().argmax()
# 剪枝1: 找出已经是最大advantage的样本,直接填充不参与比较
max_adv_value = adv_tensor.max()
is_max_adv = (adv_tensor == max_adv_value) # (B,) bool
# 创建优化后的 advantage map (B, L),确保dtype与adv_tensor一致
optimized_adv = torch.zeros_like(labels, dtype=adv_tensor.dtype)
# 对于已是最大advantage的样本,直接填充
for b in range(bsz):
if is_max_adv[b]:
optimized_adv[b][response_mask[b]] = max_adv_value
# 统计信息
total_response_tokens = 0
updated_tokens = 0
skipped_tokens = 0
original_adv_sum = 0.0
optimized_adv_sum = 0.0
# 按position处理,批量比较前缀
for pos in range(seq_len):
valid_samples = response_mask[:, pos] # (B,)
if not valid_samples.any():
continue
# 剪枝2: 排除已是最大advantage的样本
valid_samples = valid_samples & ~is_max_adv
if not valid_samples.any():
# 所有样本都是最大值,统计后跳过
max_count = (response_mask[:, pos] & is_max_adv).sum().item()
total_response_tokens += max_count
skipped_tokens += max_count
original_adv_sum += max_adv_value.item() * max_count
optimized_adv_sum += max_adv_value.item() * max_count
continue
# 获取所有需要处理的样本索引
valid_indices = valid_samples.nonzero(as_tuple=True)[0] # (N,)
for b in valid_indices:
b_item = b.item()
response_start = response_starts[b_item].item()
prefix_len = pos + 1 - response_start
if prefix_len <= 0:
optimized_adv[b_item, pos] = adv_tensor[b_item]
continue
# 找出所有response起始位置相同且在pos位置有效的样本(包括已是最大值的)
same_start_mask = (response_starts == response_start) & response_mask[:, pos]
same_start_indices = same_start_mask.nonzero(as_tuple=True)[0]
if len(same_start_indices) == 1:
# 只有自己,不需要比较
optimized_adv[b_item, pos] = adv_tensor[b_item]
total_response_tokens += 1
original_adv_sum += adv_tensor[b_item].item()
optimized_adv_sum += adv_tensor[b_item].item()
continue
# 剪枝3: 如果候选中有最大advantage样本,可以直接用最大值
has_max_in_candidates = (same_start_mask & is_max_adv).any()
prefix_end = pos + 1
current_prefix = input_ids[b_item, response_start:prefix_end]
# 批量比较:提取所有候选样本的前缀
prefixes = input_ids[same_start_indices, response_start:prefix_end] # (M, prefix_len)
# 使用广播比较:(M, prefix_len) vs (prefix_len,)
matches = (prefixes == current_prefix.unsqueeze(0)).all(dim=1) # (M,)
# 找到匹配的样本
matching_indices = same_start_indices[matches]
# 在相同前缀的样本中取最大 advantage
original_adv_value = adv_tensor[b_item].item()
if matching_indices.numel() > 0:
# 剪枝4: 如果匹配中有最大值样本,直接用最大值
if has_max_in_candidates and is_max_adv[matching_indices].any():
max_adv = max_adv_value
else:
max_adv = adv_tensor[matching_indices].max()
optimized_adv[b_item, pos] = max_adv
# 统计
if abs(max_adv.item() - original_adv_value) > 1e-6:
updated_tokens += 1
original_adv_sum += original_adv_value
optimized_adv_sum += max_adv.item()
else:
optimized_adv[b_item, pos] = adv_tensor[b_item]
original_adv_sum += original_adv_value
optimized_adv_sum += original_adv_value
total_response_tokens += 1
# 输出统计信息
if total_response_tokens > 0:
update_ratio = updated_tokens / total_response_tokens
skip_ratio = skipped_tokens / total_response_tokens
avg_original = original_adv_sum / total_response_tokens
avg_optimized = optimized_adv_sum / total_response_tokens
print(f"[Adv Optimization] Total: {total_response_tokens}, "
f"Updated: {updated_tokens} ({update_ratio:.2%}), "
f"Skipped: {skipped_tokens} ({skip_ratio:.2%}), "
f"Avg adv: {avg_original:.4f} -> {avg_optimized:.4f} "
f"(+{avg_optimized - avg_original:.4f})")
# 使用优化后的 advantage
adv_expanded = optimized_adv
else:
# 不优化:直接使用原始 advantage
adv_expanded = adv_tensor.unsqueeze(1).expand_as(p_mask)
adv_p = adv_expanded[masked_indices][p_to_keep_real]
# old logp
if logp_old_tok is not None and logp_old_tok.numel() > 0:
logp_old_p = logp_old_tok.to(device)[masked_indices][p_to_keep_real]
else:
logp_old_p = logp_p.detach()
# ratio/exp
ratio_p = (logp_p - logp_old_p).clamp(-10.0, 10.0).exp()
clipped = ratio_p.clamp(1 - ppo_eps, 1 + ppo_eps+0.08)
surrogate_p = torch.minimum(ratio_p * adv_p, clipped * adv_p)
# 输出离1最远的ratio值
# if not torch.allclose(ratio_p, torch.ones_like(ratio_p)):
furthest_value = ratio_p[torch.abs(ratio_p - 1).argmax()]
# print(f"Furthest ratio from 1: {furthest_value.item()}")
# Policy loss: use mean or sum based on loss_mean parameter
num_masked = masked_indices.sum().item()
num_loss_elements = surrogate_p.numel()
print(f"masked_indices.sum()={num_masked}, surrogate_p.numel()={num_loss_elements}")
if loss_mean:
policy_loss = -surrogate_p.mean()
else:
policy_loss = -surrogate_p.sum()
# KL(可选)
kl_loss = torch.tensor(0.0, device=device)
if kl_beta > 0 and logp_ref_tok is not None:
logp_ref_p = logp_ref_tok.to(device)[masked_indices][p_to_keep_real]
kl_seq_p = logp_p - logp_ref_p
if use_kl_estimator_k3:
kl_seq_p = (-kl_seq_p).clamp(-10.0, 10.0).exp() - 1.0 + kl_seq_p
# KL loss: use mean or sum based on loss_mean parameter
if loss_mean:
kl_loss = kl_beta * kl_seq_p.mean()
else:
kl_loss = kl_beta * kl_seq_p.sum()
del logp_ref_p, kl_seq_p
loss = policy_loss + kl_loss
kl_loss_value = kl_loss.detach().clone()
# 清理
del logits, logits_p, log_probs_p, labels_p
del is_real_tensor, p_mask_real, p_to_keep_real
del adv_tensor, adv_expanded, adv_p
del logp_p, logp_old_p, ratio_p, clipped, surrogate_p
del policy_loss, kl_loss
logits = None
# ====================== GRPO / return logits ======================
elif return_logits:
logits = self.lm_head(hidden_states)
loss = None
# ====================== Block Diffusion fused loss ======================
else:
assert labels is not None, "Labels must be provided for training."
answer_len = (labels != -100).sum()
loss_fct = FusedLinearDiffusionCrossEntropyLoss(reduction="sum")
loss = loss_fct(
x=hidden_states,
target=labels[logits_to_keep_half].contiguous(),
weight=self.lm_head.weight,
bias=self.lm_head.bias,
p_mask=p_mask_out,
)
loss = loss / answer_len
logits = None
# ====================== eval / inference ======================
else:
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
hidden_states = hidden_states[:, slice_indices, :].contiguous()
fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
if fuse_linear_and_cross_entropy:
logits = None
else:
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
output = CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
if self.training and compute_rl_loss:
output.entropy = entropy
output.kl_loss = kl_loss_value if "kl_loss_value" in locals() else torch.tensor(0.0, device=input_ids.device)
return output
__all__ = [
"SDARForCausalLM",
"SDARModel",
"SDARPreTrainedModel",
]