Skip to content

Commit f268959

Browse files
author
Billy
committed
Formatting
1 parent 551c78d commit f268959

File tree

11 files changed

+64
-60
lines changed

11 files changed

+64
-60
lines changed

invokeai/app/invocations/baseinvocation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -417,7 +417,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
417417

418418
ui_type = field.json_schema_extra.get("ui_type", None)
419419
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
420-
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
420+
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
421421
field.json_schema_extra.pop("ui_type")
422422
return None
423423

invokeai/app/invocations/compel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,7 @@ def log_tokenization_for_text(
513513
usedTokens += 1
514514

515515
if usedTokens > 0:
516-
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
516+
print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):")
517517
print(f"{tokenized}\x1b[0m")
518518

519519
if discarded != "":

invokeai/app/invocations/segment_anything.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,9 +185,9 @@ def _filter_masks(
185185
# Find the largest mask.
186186
return [max(masks, key=lambda x: float(x.sum()))]
187187
elif self.mask_filter == "highest_box_score":
188-
assert (
189-
bounding_boxes is not None
190-
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
188+
assert bounding_boxes is not None, (
189+
"Bounding boxes must be provided to use the 'highest_box_score' mask filter."
190+
)
191191
assert len(masks) == len(bounding_boxes)
192192
# Find the index of the bounding box with the highest score.
193193
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most

invokeai/app/services/config/config_default.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -476,9 +476,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
476476
try:
477477
# Meta is not included in the model fields, so we need to validate it separately
478478
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
479-
assert (
480-
config.schema_version == CONFIG_SCHEMA_VERSION
481-
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
479+
assert config.schema_version == CONFIG_SCHEMA_VERSION, (
480+
f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
481+
)
482482
return config
483483
except Exception as e:
484484
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e

invokeai/backend/image_util/pngwriter.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,10 +91,10 @@ def normalize_prompt(self):
9191

9292
switches = []
9393
switches.append(f'"{opt.prompt}"')
94-
switches.append(f"-s{opt.steps or t2i.steps}")
95-
switches.append(f"-W{opt.width or t2i.width}")
96-
switches.append(f"-H{opt.height or t2i.height}")
97-
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
94+
switches.append(f"-s{opt.steps or t2i.steps}")
95+
switches.append(f"-W{opt.width or t2i.width}")
96+
switches.append(f"-H{opt.height or t2i.height}")
97+
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
9898
switches.append(f"-A{opt.sampler_name or t2i.sampler_name}")
9999
# to do: put model name into the t2i object
100100
# switches.append(f'--model{t2i.model_name}')
@@ -109,7 +109,7 @@ def normalize_prompt(self):
109109
if opt.gfpgan_strength:
110110
switches.append(f"-G{opt.gfpgan_strength}")
111111
if opt.upscale:
112-
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
112+
switches.append(f"-U {' '.join([str(u) for u in opt.upscale])}")
113113
if opt.variation_amount > 0:
114114
switches.append(f"-v{opt.variation_amount}")
115115
if opt.with_variations:

invokeai/backend/model_manager/load/memory_snapshot.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: O
7070

7171
def get_msg_line(prefix: str, val1: int, val2: int) -> str:
7272
diff = val2 - val1
73-
return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n"
73+
return f"{prefix: <30} ({(diff / GB):+5.3f}): {(val1 / GB):5.3f}GB -> {(val2 / GB):5.3f}GB\n"
7474

7575
msg = ""
7676

invokeai/backend/model_manager/load/model_cache/model_cache.py

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ def put(self, key: str, model: AnyModel) -> None:
192192
self._cached_models[key] = cache_record
193193
self._cache_stack.append(key)
194194
self._logger.debug(
195-
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
195+
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)"
196196
)
197197

198198
@synchronized
@@ -303,7 +303,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
303303
# 2. If the model can't fit fully into VRAM, then unload all other models and load as much of the model as
304304
# possible.
305305
vram_bytes_freed = self._offload_unlocked_models(model_vram_needed, working_mem_bytes)
306-
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed/MB):.2f}MB")
306+
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed / MB):.2f}MB")
307307

308308
# Check the updated vram_available after offloading.
309309
vram_available = self._get_vram_available(working_mem_bytes)
@@ -317,7 +317,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
317317
vram_bytes_freed_from_own_model = self._move_model_to_ram(cache_entry, -vram_available)
318318
vram_available = self._get_vram_available(working_mem_bytes)
319319
self._logger.debug(
320-
f"Unloaded {vram_bytes_freed_from_own_model/MB:.2f}MB from the model being locked ({cache_entry.key})."
320+
f"Unloaded {vram_bytes_freed_from_own_model / MB:.2f}MB from the model being locked ({cache_entry.key})."
321321
)
322322

323323
# Move as much of the model as possible into VRAM.
@@ -333,10 +333,12 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
333333
self._logger.info(
334334
f"Loaded model '{cache_entry.key}' ({cache_entry.cached_model.model.__class__.__name__}) onto "
335335
f"{self._execution_device.type} device in {(time.time() - start_time):.2f}s. "
336-
f"Total model size: {model_total_bytes/MB:.2f}MB, "
337-
f"VRAM: {model_cur_vram_bytes/MB:.2f}MB ({loaded_percent:.1%})"
336+
f"Total model size: {model_total_bytes / MB:.2f}MB, "
337+
f"VRAM: {model_cur_vram_bytes / MB:.2f}MB ({loaded_percent:.1%})"
338+
)
339+
self._logger.debug(
340+
f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded / MB):.2f}MB, "
338341
)
339-
self._logger.debug(f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded/MB):.2f}MB, ")
340342
self._logger.debug(
341343
f"After loading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}"
342344
)
@@ -495,10 +497,10 @@ def _get_vram_state_str(self, model_cur_vram_bytes: int, model_total_bytes: int,
495497
"""Helper function for preparing a VRAM state log string."""
496498
model_cur_vram_bytes_percent = model_cur_vram_bytes / model_total_bytes if model_total_bytes > 0 else 0
497499
return (
498-
f"model_total={model_total_bytes/MB:.0f} MB, "
499-
+ f"model_vram={model_cur_vram_bytes/MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
500+
f"model_total={model_total_bytes / MB:.0f} MB, "
501+
+ f"model_vram={model_cur_vram_bytes / MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
500502
# + f"vram_total={int(self._max_vram_cache_size * GB)/MB:.0f} MB, "
501-
+ f"vram_available={(vram_available/MB):.0f} MB, "
503+
+ f"vram_available={(vram_available / MB):.0f} MB, "
502504
)
503505

504506
def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: Optional[int] = None) -> int:
@@ -509,7 +511,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes:
509511
int: The number of bytes freed based on believed model sizes. The actual change in VRAM may be different.
510512
"""
511513
self._logger.debug(
512-
f"Offloading unlocked models with goal of making room for {vram_bytes_required/MB:.2f}MB of VRAM."
514+
f"Offloading unlocked models with goal of making room for {vram_bytes_required / MB:.2f}MB of VRAM."
513515
)
514516
vram_bytes_freed = 0
515517
# TODO(ryand): Give more thought to the offloading policy used here.
@@ -527,7 +529,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes:
527529
cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free)
528530
if cache_entry_bytes_freed > 0:
529531
self._logger.debug(
530-
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed/MB):.0f} MB."
532+
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed / MB):.0f} MB."
531533
)
532534
vram_bytes_freed += cache_entry_bytes_freed
533535

@@ -609,7 +611,7 @@ def make_room(self, bytes_needed: int) -> None:
609611
external references to the model, there's nothing that the cache can do about it, and those models will not be
610612
garbage-collected.
611613
"""
612-
self._logger.debug(f"Making room for {bytes_needed/MB:.2f}MB of RAM.")
614+
self._logger.debug(f"Making room for {bytes_needed / MB:.2f}MB of RAM.")
613615
self._log_cache_state(title="Before dropping models:")
614616

615617
ram_bytes_available = self._get_ram_available()
@@ -625,7 +627,7 @@ def make_room(self, bytes_needed: int) -> None:
625627
if not cache_entry.is_locked:
626628
ram_bytes_freed += cache_entry.cached_model.total_bytes()
627629
self._logger.debug(
628-
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes()/MB):.2f}MB."
630+
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes() / MB):.2f}MB."
629631
)
630632
self._delete_cache_entry(cache_entry)
631633
del cache_entry
@@ -650,7 +652,7 @@ def make_room(self, bytes_needed: int) -> None:
650652
gc.collect()
651653

652654
TorchDevice.empty_cache()
653-
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed/MB:.2f}MB of RAM.")
655+
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed / MB:.2f}MB of RAM.")
654656
self._log_cache_state(title="After dropping models:")
655657

656658
def _delete_cache_entry(self, cache_entry: CacheRecord) -> None:

invokeai/backend/model_manager/merge.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -115,19 +115,19 @@ def merge_diffusion_models_and_save(
115115
base_models: Set[BaseModelType] = set()
116116
variant = None if self._installer.app_config.precision == "float32" else "fp16"
117117

118-
assert (
119-
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
120-
), "When merging three models, only the 'add_difference' merge method is supported"
118+
assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, (
119+
"When merging three models, only the 'add_difference' merge method is supported"
120+
)
121121

122122
for key in model_keys:
123123
info = store.get_model(key)
124124
model_names.append(info.name)
125-
assert isinstance(
126-
info, MainDiffusersConfig
127-
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
128-
assert info.variant == ModelVariantType(
129-
"normal"
130-
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
125+
assert isinstance(info, MainDiffusersConfig), (
126+
f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
127+
)
128+
assert info.variant == ModelVariantType("normal"), (
129+
f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
130+
)
131131

132132
# tally base models used
133133
base_models.add(info.base)

invokeai/backend/model_manager/util/libc_util.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,19 +37,21 @@ class Struct_mallinfo2(ctypes.Structure):
3737

3838
def __str__(self) -> str:
3939
s = ""
40-
s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
40+
s += (
41+
f"{'arena': <10}= {(self.arena / 2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
42+
)
4143
s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n"
4244
s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n"
4345
s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n"
44-
s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
46+
s += f"{'hblkhd': <10}= {(self.hblkhd / 2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
4547
s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n"
46-
s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
48+
s += f"{'fsmblks': <10}= {(self.fsmblks / 2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
4749
s += (
48-
f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
50+
f"{'uordblks': <10}= {(self.uordblks / 2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
4951
" (GB)\n"
5052
)
51-
s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
52-
s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n"
53+
s += f"{'fordblks': <10}= {(self.fordblks / 2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
54+
s += f"{'keepcost': <10}= {(self.keepcost / 2**30):15.5f} # Top-most, releasable space (GB)\n"
5355
return s
5456

5557

invokeai/backend/patches/lora_conversions/sdxl_lora_conversion_utils.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -73,36 +73,36 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
7373
for j in range(2):
7474
# loop over resnets/attentions for downblocks
7575
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
76-
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
76+
sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
7777
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
7878

7979
if i < 3:
8080
# no attention layers in down_blocks.3
8181
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
82-
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
82+
sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
8383
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
8484

8585
for j in range(3):
8686
# loop over resnets/attentions for upblocks
8787
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
88-
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
88+
sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
8989
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
9090

9191
# if i > 0: commentout for sdxl
9292
# no attention layers in up_blocks.0
9393
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
94-
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
94+
sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
9595
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
9696

9797
if i < 3:
9898
# no downsample in down_blocks.3
9999
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
100-
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
100+
sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
101101
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
102102

103103
# no upsample in up_blocks.3
104104
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
105-
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
105+
sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{2}." # change for sdxl
106106
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
107107

108108
hf_mid_atn_prefix = "mid_block.attentions.0."
@@ -111,7 +111,7 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
111111

112112
for j in range(2):
113113
hf_mid_res_prefix = f"mid_block.resnets.{j}."
114-
sd_mid_res_prefix = f"middle_block.{2*j}."
114+
sd_mid_res_prefix = f"middle_block.{2 * j}."
115115
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
116116

117117
unet_conversion_map_resnet = [
@@ -133,13 +133,13 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
133133
unet_conversion_map.append((sd, hf))
134134

135135
for j in range(2):
136-
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
137-
sd_time_embed_prefix = f"time_embed.{j*2}."
136+
hf_time_embed_prefix = f"time_embedding.linear_{j + 1}."
137+
sd_time_embed_prefix = f"time_embed.{j * 2}."
138138
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
139139

140140
for j in range(2):
141-
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
142-
sd_label_embed_prefix = f"label_emb.0.{j*2}."
141+
hf_label_embed_prefix = f"add_embedding.linear_{j + 1}."
142+
sd_label_embed_prefix = f"label_emb.0.{j * 2}."
143143
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
144144

145145
unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))

0 commit comments

Comments
 (0)