Skip to content
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
6001899
xx
yao-matrix Jun 10, 2025
8a1d6e5
fix
yao-matrix Jun 11, 2025
603257b
Update model_loading_utils.py
yao-matrix Jun 11, 2025
8cdfdd8
Update test_models_unet_2d_condition.py
yao-matrix Jun 11, 2025
45e29bd
Update test_models_unet_2d_condition.py
yao-matrix Jun 11, 2025
2a7c17d
Merge branch 'main' into xpu
yao-matrix Jun 11, 2025
fae7c70
fix style
yao-matrix Jun 11, 2025
80fdbfc
Merge branch 'main' into xpu
yao-matrix Jun 11, 2025
97a37a1
Merge branch 'main' into xpu
yao-matrix Jun 11, 2025
5f0c794
Merge branch 'main' into xpu
yao-matrix Jun 12, 2025
8cd06b3
Merge branch 'main' into xpu
yao-matrix Jun 13, 2025
02a6a35
Merge branch 'main' into xpu
yao-matrix Jun 17, 2025
ed1a788
Merge branch 'main' into xpu
yao-matrix Jun 18, 2025
220ce94
Merge branch 'main' into xpu
yao-matrix Jun 18, 2025
e59cb0c
Merge branch 'main' into xpu
yao-matrix Jun 24, 2025
fd618b5
Merge branch 'main' into xpu
yao-matrix Jun 24, 2025
c340f9e
Merge branch 'main' into xpu
yao-matrix Jun 24, 2025
e674ce7
Merge branch 'main' into xpu
yao-matrix Jun 27, 2025
d389758
Merge branch 'main' into xpu
yao-matrix Jun 30, 2025
7e8ae22
Merge branch 'main' into xpu
yao-matrix Jul 1, 2025
c43bb19
Merge branch 'main' into xpu
yao-matrix Jul 3, 2025
49ac5d4
Merge branch 'main' into xpu
yao-matrix Jul 7, 2025
b7148d6
Merge branch 'main' into xpu
yao-matrix Jul 8, 2025
bda0afd
Merge branch 'main' into xpu
yao-matrix Jul 18, 2025
1ba8a88
fix comments
yao-matrix Jul 18, 2025
fd9fa99
Update unet_2d_blocks.py
yao-matrix Jul 18, 2025
692f0bd
Merge branch 'main' into xpu
sayakpaul Jul 18, 2025
ab5f55c
Merge branch 'main' into xpu
sayakpaul Jul 18, 2025
9b41f3a
Merge branch 'main' into xpu
yao-matrix Jul 19, 2025
9948c9c
update
yao-matrix Jul 19, 2025
38ff983
Merge branch 'main' into xpu
yao-matrix Jul 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/diffusers/models/unets/unet_2d_condition.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ class conditioning with `class_embed_type` equal to `None`.
"""

_supports_gradient_checkpointing = True
_no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
_no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"]
_skip_layerwise_casting_patterns = ["norm"]
_repeated_blocks = ["BasicTransformerBlock"]

Expand Down
13 changes: 6 additions & 7 deletions tests/models/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@
require_torch_2,
require_torch_accelerator,
require_torch_accelerator_with_training,
require_torch_gpu,
require_torch_multi_accelerator,
require_torch_version_greater,
run_test_in_subprocess,
Expand Down Expand Up @@ -1829,8 +1828,8 @@ def test_wrong_device_map_raises_error(self, device_map, msg_substring):

assert msg_substring in str(err_ctx.exception)

@parameterized.expand([0, "cuda", torch.device("cuda")])
@require_torch_gpu
@parameterized.expand([0, torch_device, torch.device(torch_device)])
@require_torch_accelerator
def test_passing_non_dict_device_map_works(self, device_map):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict).eval()
Expand All @@ -1839,8 +1838,8 @@ def test_passing_non_dict_device_map_works(self, device_map):
loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map)
_ = loaded_model(**inputs_dict)

@parameterized.expand([("", "cuda"), ("", torch.device("cuda"))])
@require_torch_gpu
@parameterized.expand([("", torch_device), ("", torch.device(torch_device))])
@require_torch_accelerator
def test_passing_dict_device_map_works(self, name, device):
# There are other valid dict-based `device_map` values too. It's best to refer to
# the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap.
Expand Down Expand Up @@ -1945,7 +1944,7 @@ def test_push_to_hub_library_name(self):
delete_repo(self.repo_id, token=TOKEN)


@require_torch_gpu
@require_torch_accelerator
@require_torch_2
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is unrelated to this PR. Going forward prefer not including unrelated changes in a particular PR.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sure, will follow the rule going forward.

@is_torch_compile
@slow
Expand Down Expand Up @@ -2013,7 +2012,7 @@ def test_compile_with_group_offloading(self):
model.eval()
# TODO: Can test for other group offloading kwargs later if needed.
group_offload_kwargs = {
"onload_device": "cuda",
"onload_device": torch_device,
"offload_device": "cpu",
"offload_type": "block_level",
"num_blocks_per_group": 1,
Expand Down