Skip to content

Commit 7f915f4

Browse files
committed
qwen25vl does not need maybe_get_vit_flash_attn_backend
Signed-off-by: tjtanaa <[email protected]>
1 parent 5ed59e6 commit 7f915f4

File tree

1 file changed

+0
-8
lines changed

1 file changed

+0
-8
lines changed

vllm/model_executor/models/qwen2_5_vl.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
)
4444

4545
from vllm.attention.backends.registry import _MHA_Backend
46-
from vllm.attention.layer import maybe_get_vit_flash_attn_backend
4746
from vllm.attention.ops.vit_attn_wrappers import (
4847
vit_flash_attn_wrapper,
4948
vit_torch_sdpa_wrapper,
@@ -351,9 +350,6 @@ def __init__(
351350
disable_tp=use_data_parallel,
352351
)
353352
self.attn_backend = attn_backend
354-
self.flash_attn_varlen_func = maybe_get_vit_flash_attn_backend(
355-
self.attn_backend,
356-
)
357353

358354
self.is_flash_attn_backend = self.attn_backend in {
359355
_MHA_Backend.FLASH_ATTN,
@@ -681,10 +677,6 @@ def __init__(
681677
attn_backend_override=attn_backend_override,
682678
)
683679

684-
self.flash_attn_varlen_func = maybe_get_vit_flash_attn_backend(
685-
self.attn_backend,
686-
)
687-
688680
if self.attn_backend not in {
689681
_MHA_Backend.FLASH_ATTN,
690682
_MHA_Backend.VLLM_FLASH_ATTN,

0 commit comments

Comments
 (0)