|
13 | 13 | from ktransformers.models.configuration_llama import LlamaConfig |
14 | 14 | from ktransformers.models.modeling_llama import LlamaRotaryEmbedding |
15 | 15 | from ktransformers.models.modeling_deepseek import DeepseekV2Attention, apply_rotary_pos_emb |
16 | | -from ktransformers.models.modeling_qwen3_moe import Qwen3MoeAttention |
| 16 | +from ktransformers.models.modeling_qwen3_moe import Qwen3MoeAttention, Qwen3MoeRotaryEmbedding |
17 | 17 | from typing import Optional, Tuple |
18 | 18 | from ktransformers.operators.base_operator import BaseInjectedModule |
19 | 19 | from ktransformers.util.custom_loader import GGUFLoader |
20 | 20 | from ktransformers.util.utils import get_compute_capability |
21 | 21 | import logging |
22 | 22 | from transformers.configuration_utils import PretrainedConfig |
23 | 23 | from transformers.cache_utils import Cache |
| 24 | +from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS |
24 | 25 | from ktransformers.util.vendors import device_manager, get_device, to_device, GPUVendor |
25 | 26 |
|
26 | 27 | try: |
@@ -943,3 +944,140 @@ def forward( |
943 | 944 | attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
944 | 945 | attn_output = self.o_proj(attn_output).to(input_dtype) |
945 | 946 | return attn_output, attn_weights |
| 947 | + |
| 948 | + |
| 949 | +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| 950 | + """ |
| 951 | + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| 952 | + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| 953 | + """ |
| 954 | + batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| 955 | + if n_rep == 1: |
| 956 | + return hidden_states |
| 957 | + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| 958 | + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
| 959 | + |
| 960 | + |
| 961 | +def eager_attention_forward( |
| 962 | + module: nn.Module, |
| 963 | + query: torch.Tensor, |
| 964 | + key: torch.Tensor, |
| 965 | + value: torch.Tensor, |
| 966 | + attention_mask: Optional[torch.Tensor], |
| 967 | + scaling: float, |
| 968 | + dropout: float = 0.0, |
| 969 | + **kwargs, |
| 970 | +): |
| 971 | + key_states = repeat_kv(key, module.num_key_value_groups) |
| 972 | + value_states = repeat_kv(value, module.num_key_value_groups) |
| 973 | + |
| 974 | + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| 975 | + if attention_mask is not None: |
| 976 | + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] |
| 977 | + attn_weights = attn_weights + causal_mask |
| 978 | + |
| 979 | + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| 980 | + attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| 981 | + attn_output = torch.matmul(attn_weights, value_states) |
| 982 | + attn_output = attn_output.transpose(1, 2).contiguous() |
| 983 | + |
| 984 | + return attn_output, attn_weights |
| 985 | + |
| 986 | + |
| 987 | +class KQwen3MoeAttention(BaseInjectedModule, Qwen3MoeAttention ): |
| 988 | + def __init__(self, |
| 989 | + key: str, |
| 990 | + gguf_loader: GGUFLoader, |
| 991 | + config: PretrainedConfig, |
| 992 | + orig_module: nn.Module, |
| 993 | + prefill_device: str = "cuda", |
| 994 | + generate_device: str = "cuda", |
| 995 | + chunck_size: int = 1000, |
| 996 | + **kwargs): |
| 997 | + BaseInjectedModule.__init__(self, key, gguf_loader, config, orig_module, prefill_device, generate_device, |
| 998 | + **kwargs) |
| 999 | + self.orig_module.__init__(self.orig_module.config, |
| 1000 | + orig_module.layer_idx) |
| 1001 | + self.chunck_size = chunck_size # TODO, generate chunck_size automatically. |
| 1002 | + |
| 1003 | + # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb |
| 1004 | + def apply_rotary_pos_emb(self, q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
| 1005 | + """Applies Rotary Position Embedding to the query and key tensors. |
| 1006 | +
|
| 1007 | + Args: |
| 1008 | + q (`torch.Tensor`): The query tensor. |
| 1009 | + k (`torch.Tensor`): The key tensor. |
| 1010 | + cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| 1011 | + sin (`torch.Tensor`): The sine part of the rotary embedding. |
| 1012 | + position_ids (`torch.Tensor`): |
| 1013 | + Deprecated and unused. |
| 1014 | + unsqueeze_dim (`int`, *optional*, defaults to 1): |
| 1015 | + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| 1016 | + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| 1017 | + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| 1018 | + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| 1019 | + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| 1020 | + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| 1021 | + Returns: |
| 1022 | + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| 1023 | + """ |
| 1024 | + cos = cos.unsqueeze(unsqueeze_dim) |
| 1025 | + sin = sin.unsqueeze(unsqueeze_dim) |
| 1026 | + q_embed = (q * cos) + (rotate_half(q) * sin) |
| 1027 | + k_embed = (k * cos) + (rotate_half(k) * sin) |
| 1028 | + return q_embed, k_embed |
| 1029 | + |
| 1030 | + def forward(self, |
| 1031 | + hidden_states: torch.Tensor, |
| 1032 | + position_ids: Optional[torch.Tensor], |
| 1033 | + position_embeddings: Tuple[torch.Tensor, torch.Tensor], |
| 1034 | + attention_mask: Optional[torch.Tensor], |
| 1035 | + past_key_value: Optional[Cache] = None, |
| 1036 | + cache_position: Optional[torch.LongTensor] = None, |
| 1037 | + **kwargs |
| 1038 | + ): |
| 1039 | + input_shape = hidden_states.shape[:-1] |
| 1040 | + hidden_shape = (*input_shape, -1, self.head_dim) |
| 1041 | + |
| 1042 | + query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| 1043 | + key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) |
| 1044 | + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
| 1045 | + |
| 1046 | + if position_embeddings is None: |
| 1047 | + position_embeddings = self.rotary_emb(hidden_states, position_ids) |
| 1048 | + |
| 1049 | + cos, sin = position_embeddings |
| 1050 | + |
| 1051 | + query_states, key_states = self.apply_rotary_pos_emb(query_states, key_states, cos, sin) |
| 1052 | + |
| 1053 | + |
| 1054 | + if past_key_value is not None: |
| 1055 | + # sin and cos are specific to RoPE models; cache_position needed for the static cache |
| 1056 | + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} |
| 1057 | + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) |
| 1058 | + |
| 1059 | + attention_interface: Callable = eager_attention_forward |
| 1060 | + if self.config._attn_implementation != "eager": |
| 1061 | + if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): |
| 1062 | + logger.warning_once( |
| 1063 | + "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
| 1064 | + 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| 1065 | + ) |
| 1066 | + else: |
| 1067 | + attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
| 1068 | + |
| 1069 | + attn_output, attn_weights = attention_interface( |
| 1070 | + self, |
| 1071 | + query_states, |
| 1072 | + key_states, |
| 1073 | + value_states, |
| 1074 | + attention_mask, |
| 1075 | + dropout=0.0 if not self.training else self.attention_dropout, |
| 1076 | + scaling=self.scaling, |
| 1077 | + sliding_window=self.sliding_window, # diff with Llama |
| 1078 | + **kwargs, |
| 1079 | + ) |
| 1080 | + |
| 1081 | + attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| 1082 | + attn_output = self.o_proj(attn_output) |
| 1083 | + return attn_output, attn_weights |
0 commit comments