From c8bfec86b3d1a548008c6a7a8cd81bdb447d9787 Mon Sep 17 00:00:00 2001 From: AIGCZero Date: Mon, 15 Sep 2025 13:50:49 +0800 Subject: [PATCH 1/4] Add TextEncodeQwenImageEdit node with intelligent scaling - Implements Qwen image editing functionality with CLIP text encoding - Features intelligent scaling algorithm selection: - Uses 'area' method for downscaling to preserve details - Uses 'lanczos' method for upscaling for better quality - Supports optional VAE encoding for reference latents - Maintains aspect ratio with 'disabled' crop method - Scales images to target resolution (1024x1024 pixels) with 8-pixel alignment --- comfy_extras/nodes_qwen.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index fff89556f4fb..afba4006916d 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -1,4 +1,4 @@ -import node_helpers +import node_helpers import comfy.utils import math @@ -13,13 +13,15 @@ def INPUT_TYPES(s): "optional": {"vae": ("VAE", ), "image": ("IMAGE", ),}} - RETURN_TYPES = ("CONDITIONING",) + RETURN_TYPES = ("CONDITIONING", "IMAGE", "LATENT") FUNCTION = "encode" CATEGORY = "advanced/conditioning" def encode(self, clip, prompt, vae=None, image=None): ref_latent = None + output_image = None + if image is None: images = [] else: @@ -27,12 +29,23 @@ def encode(self, clip, prompt, vae=None, image=None): total = int(1024 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - width = round(samples.shape[3] * scale_by) - height = round(samples.shape[2] * scale_by) - s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + width = math.floor(samples.shape[3] * scale_by / 8) * 8 + height = math.floor(samples.shape[2] * scale_by / 8) * 8 + + original_width = samples.shape[3] + original_height = samples.shape[2] + + if width < original_width or height < original_height: + upscale_method = "area" + else: + upscale_method = "lanczos" + + s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") image = s.movedim(1, -1) images = [image[:, :, :, :3]] + output_image = image[:, :, :, :3] + if vae is not None: ref_latent = vae.encode(image[:, :, :, :3]) @@ -40,7 +53,10 @@ def encode(self, clip, prompt, vae=None, image=None): conditioning = clip.encode_from_tokens_scheduled(tokens) if ref_latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) - return (conditioning, ) + + latent_output = {"samples": ref_latent} if ref_latent is not None else None + + return (conditioning, output_image, latent_output) NODE_CLASS_MAPPINGS = { From cc1c5da4f63033a70100cd3ef119413ebd417426 Mon Sep 17 00:00:00 2001 From: AIGCZero Date: Mon, 15 Sep 2025 13:53:09 +0800 Subject: [PATCH 2/4] Add intelligent scaling algorithm selection for Qwen image edit node - Implement automatic algorithm selection: area for downscaling, lanczos for upscaling - Improve image quality by choosing optimal scaling method based on target size - Add Chinese comments for better code documentation - Ensure 8-pixel alignment for better compatibility with diffusion models --- comfy_extras/nodes_qwen.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index afba4006916d..c7e9fdecc3c5 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -29,16 +29,19 @@ def encode(self, clip, prompt, vae=None, image=None): total = int(1024 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - + # 修改缩放规则:乘数为8,向下取整 width = math.floor(samples.shape[3] * scale_by / 8) * 8 height = math.floor(samples.shape[2] * scale_by / 8) * 8 + # 根据缩放比例选择算法:缩小用area,放大用lanczos original_width = samples.shape[3] original_height = samples.shape[2] if width < original_width or height < original_height: + # 缩小图像,使用area算法保持细节 upscale_method = "area" else: + # 放大图像,使用lanczos算法获得更好质量 upscale_method = "lanczos" s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") @@ -54,6 +57,7 @@ def encode(self, clip, prompt, vae=None, image=None): if ref_latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) + # 将ref_latent包装成ComfyUI LATENT类型要求的格式 latent_output = {"samples": ref_latent} if ref_latent is not None else None return (conditioning, output_image, latent_output) From 2debbcf50e296ac2b687bbfc67d2353fa5bc0818 Mon Sep 17 00:00:00 2001 From: AIGCZero Date: Mon, 15 Sep 2025 14:00:33 +0800 Subject: [PATCH 3/4] Remove Chinese comments from Qwen image edit node --- comfy_extras/nodes_qwen.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index c7e9fdecc3c5..afba4006916d 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -29,19 +29,16 @@ def encode(self, clip, prompt, vae=None, image=None): total = int(1024 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) - # 修改缩放规则:乘数为8,向下取整 + width = math.floor(samples.shape[3] * scale_by / 8) * 8 height = math.floor(samples.shape[2] * scale_by / 8) * 8 - # 根据缩放比例选择算法:缩小用area,放大用lanczos original_width = samples.shape[3] original_height = samples.shape[2] if width < original_width or height < original_height: - # 缩小图像,使用area算法保持细节 upscale_method = "area" else: - # 放大图像,使用lanczos算法获得更好质量 upscale_method = "lanczos" s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") @@ -57,7 +54,6 @@ def encode(self, clip, prompt, vae=None, image=None): if ref_latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) - # 将ref_latent包装成ComfyUI LATENT类型要求的格式 latent_output = {"samples": ref_latent} if ref_latent is not None else None return (conditioning, output_image, latent_output) From ffad7324822ea7b291086416f708f7ab95ca110e Mon Sep 17 00:00:00 2001 From: AIGCZero Date: Thu, 25 Sep 2025 13:33:35 +0800 Subject: [PATCH 4/4] Fix alpha channel handling in Qwen image edit nodes - Fix TextEncodeQwenImageEditPlus to ensure only RGB channels are used - Prevents RuntimeError when input images have alpha channels - Ensures proper tensor shape for vision language models --- comfy_extras/nodes_qwen.py | 83 ++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 22 deletions(-) diff --git a/comfy_extras/nodes_qwen.py b/comfy_extras/nodes_qwen.py index afba4006916d..7876d0e80b46 100644 --- a/comfy_extras/nodes_qwen.py +++ b/comfy_extras/nodes_qwen.py @@ -1,4 +1,4 @@ -import node_helpers +import node_helpers import comfy.utils import math @@ -13,15 +13,13 @@ def INPUT_TYPES(s): "optional": {"vae": ("VAE", ), "image": ("IMAGE", ),}} - RETURN_TYPES = ("CONDITIONING", "IMAGE", "LATENT") + RETURN_TYPES = ("CONDITIONING",) FUNCTION = "encode" CATEGORY = "advanced/conditioning" def encode(self, clip, prompt, vae=None, image=None): ref_latent = None - output_image = None - if image is None: images = [] else: @@ -29,23 +27,12 @@ def encode(self, clip, prompt, vae=None, image=None): total = int(1024 * 1024) scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) - width = math.floor(samples.shape[3] * scale_by / 8) * 8 - height = math.floor(samples.shape[2] * scale_by / 8) * 8 - - original_width = samples.shape[3] - original_height = samples.shape[2] - - if width < original_width or height < original_height: - upscale_method = "area" - else: - upscale_method = "lanczos" - - s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") image = s.movedim(1, -1) images = [image[:, :, :, :3]] - output_image = image[:, :, :, :3] - if vae is not None: ref_latent = vae.encode(image[:, :, :, :3]) @@ -53,12 +40,64 @@ def encode(self, clip, prompt, vae=None, image=None): conditioning = clip.encode_from_tokens_scheduled(tokens) if ref_latent is not None: conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True) - - latent_output = {"samples": ref_latent} if ref_latent is not None else None - - return (conditioning, output_image, latent_output) + return (conditioning, ) + + +class TextEncodeQwenImageEditPlus: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP", ), + "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}), + }, + "optional": {"vae": ("VAE", ), + "image1": ("IMAGE", ), + "image2": ("IMAGE", ), + "image3": ("IMAGE", ), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "advanced/conditioning" + + def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None): + ref_latents = [] + images = [image1, image2, image3] + images_vl = [] + llama_template = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" + image_prompt = "" + + for i, image in enumerate(images): + if image is not None: + samples = image.movedim(-1, 1) + total = int(384 * 384) + + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by) + height = round(samples.shape[2] * scale_by) + + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + images_vl.append(s.movedim(1, -1)[:, :, :, :3]) + if vae is not None: + total = int(1024 * 1024) + scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2])) + width = round(samples.shape[3] * scale_by / 8.0) * 8 + height = round(samples.shape[2] * scale_by / 8.0) * 8 + + s = comfy.utils.common_upscale(samples, width, height, "area", "disabled") + ref_latents.append(vae.encode(s.movedim(1, -1)[:, :, :, :3])) + + image_prompt += "Picture {}: <|vision_start|><|image_pad|><|vision_end|>".format(i + 1) + + tokens = clip.tokenize(image_prompt + prompt, images=images_vl, llama_template=llama_template) + conditioning = clip.encode_from_tokens_scheduled(tokens) + if len(ref_latents) > 0: + conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True) + return (conditioning, ) NODE_CLASS_MAPPINGS = { "TextEncodeQwenImageEdit": TextEncodeQwenImageEdit, + "TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus, }