Skip to content

Commit f19421e

Browse files
a-r-r-o-wasomoza
andauthored
Helper functions to return skip-layer compatible layers (#12048)
update Co-authored-by: Álvaro Somoza <[email protected]>
1 parent 69cdc25 commit f19421e

File tree

2 files changed

+44
-0
lines changed

2 files changed

+44
-0
lines changed

src/diffusers/hooks/_helpers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ def _register_attention_processors_metadata():
133133
skip_processor_output_fn=_skip_proc_output_fn_Attention_WanAttnProcessor2_0,
134134
),
135135
)
136+
136137
# FluxAttnProcessor
137138
AttentionProcessorRegistry.register(
138139
model_class=FluxAttnProcessor,

src/diffusers/hooks/utils.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Copyright 2025 The HuggingFace Team. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import torch
16+
17+
from ._common import _ALL_TRANSFORMER_BLOCK_IDENTIFIERS, _ATTENTION_CLASSES, _FEEDFORWARD_CLASSES
18+
19+
20+
def _get_identifiable_transformer_blocks_in_module(module: torch.nn.Module):
21+
module_list_with_transformer_blocks = []
22+
for name, submodule in module.named_modules():
23+
name_endswith_identifier = any(name.endswith(identifier) for identifier in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS)
24+
is_modulelist = isinstance(submodule, torch.nn.ModuleList)
25+
if name_endswith_identifier and is_modulelist:
26+
module_list_with_transformer_blocks.append((name, submodule))
27+
return module_list_with_transformer_blocks
28+
29+
30+
def _get_identifiable_attention_layers_in_module(module: torch.nn.Module):
31+
attention_layers = []
32+
for name, submodule in module.named_modules():
33+
if isinstance(submodule, _ATTENTION_CLASSES):
34+
attention_layers.append((name, submodule))
35+
return attention_layers
36+
37+
38+
def _get_identifiable_feedforward_layers_in_module(module: torch.nn.Module):
39+
feedforward_layers = []
40+
for name, submodule in module.named_modules():
41+
if isinstance(submodule, _FEEDFORWARD_CLASSES):
42+
feedforward_layers.append((name, submodule))
43+
return feedforward_layers

0 commit comments

Comments
 (0)