Skip to content

Commit a74e210

Browse files
authored
Release/3.1.0 (#4397)
## What type of PR is this? (check all applicable) This is the 3.1.0 release candidate. Minor bugfixes will be applied here during testing and then merged into main upon release.
2 parents 2bd3cf2 + ca5689d commit a74e210

30 files changed

+794
-784
lines changed

installer/create_installer.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build"
4646
pip install --user build
4747
fi
4848

49+
rm -r ../build
4950
python -m build --wheel --outdir dist/ ../.
5051

5152
# ----------------------

invokeai/app/invocations/compel.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,29 @@ def invoke(self, context: InvocationContext) -> ConditioningOutput:
302302

303303
add_time_ids = torch.tensor([original_size + crop_coords + target_size])
304304

305+
# [1, 77, 768], [1, 154, 1280]
306+
if c1.shape[1] < c2.shape[1]:
307+
c1 = torch.cat(
308+
[
309+
c1,
310+
torch.zeros(
311+
(c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype
312+
),
313+
],
314+
dim=1,
315+
)
316+
317+
elif c1.shape[1] > c2.shape[1]:
318+
c2 = torch.cat(
319+
[
320+
c2,
321+
torch.zeros(
322+
(c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype
323+
),
324+
],
325+
dim=1,
326+
)
327+
305328
conditioning_data = ConditioningFieldData(
306329
conditionings=[
307330
SDXLConditioningInfo(

invokeai/app/invocations/latent.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,8 +445,14 @@ def invoke(self, context: InvocationContext) -> LatentsOutput:
445445
latents = context.services.latents.get(self.latents.latents_name)
446446
if seed is None:
447447
seed = self.latents.seed
448-
else:
448+
449+
if noise is not None and noise.shape[1:] != latents.shape[1:]:
450+
raise Exception(f"Incompatable 'noise' and 'latents' shapes: {latents.shape=} {noise.shape=}")
451+
452+
elif noise is not None:
449453
latents = torch.zeros_like(noise)
454+
else:
455+
raise Exception("'latents' or 'noise' must be provided!")
450456

451457
if seed is None:
452458
seed = 0

invokeai/app/invocations/primitives.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ class IntegerCollectionInvocation(BaseInvocation):
109109
"""A collection of integer primitive values"""
110110

111111
collection: list[int] = InputField(
112-
default=0, description="The collection of integer values", ui_type=UIType.IntegerCollection
112+
default_factory=list, description="The collection of integer values", ui_type=UIType.IntegerCollection
113113
)
114114

115115
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
@@ -261,7 +261,7 @@ class ImageCollectionInvocation(BaseInvocation):
261261
"""A collection of image primitive values"""
262262

263263
collection: list[ImageField] = InputField(
264-
default=0, description="The collection of image values", ui_type=UIType.ImageCollection
264+
default_factory=list, description="The collection of image values", ui_type=UIType.ImageCollection
265265
)
266266

267267
def invoke(self, context: InvocationContext) -> ImageCollectionOutput:
@@ -451,7 +451,9 @@ class ConditioningCollectionInvocation(BaseInvocation):
451451
"""A collection of conditioning tensor primitive values"""
452452

453453
collection: list[ConditioningField] = InputField(
454-
default=0, description="The collection of conditioning tensors", ui_type=UIType.ConditioningCollection
454+
default_factory=list,
455+
description="The collection of conditioning tensors",
456+
ui_type=UIType.ConditioningCollection,
455457
)
456458

457459
def invoke(self, context: InvocationContext) -> ConditioningCollectionOutput:

invokeai/app/services/config/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,4 @@
66
InvokeAIAppConfig,
77
get_invokeai_config,
88
)
9+
from .base import PagingArgumentParser # noqa F401

invokeai/backend/install/migrate_to_3.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -492,10 +492,10 @@ def _parse_legacy_yamlfile(root: Path, initfile: Path) -> ModelPaths:
492492
loras = paths.get("lora_dir", "loras")
493493
controlnets = paths.get("controlnet_dir", "controlnets")
494494
return ModelPaths(
495-
models=root / models,
496-
embeddings=root / embeddings,
497-
loras=root / loras,
498-
controlnets=root / controlnets,
495+
models=root / models if models else None,
496+
embeddings=root / embeddings if embeddings else None,
497+
loras=root / loras if loras else None,
498+
controlnets=root / controlnets if controlnets else None,
499499
)
500500

501501

invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def einsum_op_mps_v1(self, q, k, v):
265265
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
266266
return self.einsum_lowest_level(q, k, v, None, None, None)
267267
else:
268-
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
268+
slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1]))
269269
return self.einsum_op_slice_dim1(q, k, v, slice_size)
270270

271271
def einsum_op_mps_v2(self, q, k, v):

invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -215,10 +215,7 @@ def do_controlnet_step(
215215
dim=0,
216216
),
217217
}
218-
(
219-
encoder_hidden_states,
220-
encoder_attention_mask,
221-
) = self._concat_conditionings_for_batch(
218+
(encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch(
222219
conditioning_data.unconditioned_embeddings.embeds,
223220
conditioning_data.text_embeddings.embeds,
224221
)
@@ -280,32 +277,23 @@ def do_unet_step(
280277
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
281278

282279
if wants_cross_attention_control:
283-
(
284-
unconditioned_next_x,
285-
conditioned_next_x,
286-
) = self._apply_cross_attention_controlled_conditioning(
280+
(unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning(
287281
sample,
288282
timestep,
289283
conditioning_data,
290284
cross_attention_control_types_to_do,
291285
**kwargs,
292286
)
293287
elif self.sequential_guidance:
294-
(
295-
unconditioned_next_x,
296-
conditioned_next_x,
297-
) = self._apply_standard_conditioning_sequentially(
288+
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially(
298289
sample,
299290
timestep,
300291
conditioning_data,
301292
**kwargs,
302293
)
303294

304295
else:
305-
(
306-
unconditioned_next_x,
307-
conditioned_next_x,
308-
) = self._apply_standard_conditioning(
296+
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning(
309297
sample,
310298
timestep,
311299
conditioning_data,

invokeai/backend/stable_diffusion/image_degradation/bsrgan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
395395
D = np.diag(np.random.rand(3))
396396
U = orth(np.random.rand(3, 3))
397397
conv = np.dot(np.dot(np.transpose(U), D), U)
398-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
398+
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
399399
img = np.clip(img, 0.0, 1.0)
400400
return img
401401

@@ -413,7 +413,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
413413
D = np.diag(np.random.rand(3))
414414
U = orth(np.random.rand(3, 3))
415415
conv = np.dot(np.dot(np.transpose(U), D), U)
416-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
416+
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
417417
img = np.clip(img, 0.0, 1.0)
418418
return img
419419

invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
399399
D = np.diag(np.random.rand(3))
400400
U = orth(np.random.rand(3, 3))
401401
conv = np.dot(np.dot(np.transpose(U), D), U)
402-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
402+
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
403403
img = np.clip(img, 0.0, 1.0)
404404
return img
405405

@@ -417,7 +417,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
417417
D = np.diag(np.random.rand(3))
418418
U = orth(np.random.rand(3, 3))
419419
conv = np.dot(np.dot(np.transpose(U), D), U)
420-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32)
420+
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
421421
img = np.clip(img, 0.0, 1.0)
422422
return img
423423

0 commit comments

Comments
 (0)