├── .gitignore
├── README.md
├── __init__.py
├── img
├── node1.png
└── node2.png
├── key.json
├── nodes
├── GLM.py
├── IPAdapterLayerWeight.py
└── imageUtils.py
├── requirements.txt
└── web
└── js
└── showText.js
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | .venv/
3 | __pycache__/
4 | *.bak
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ComfyUI_fsdymy
2 | **功能说明:**
3 |
4 | 1. 保存/预览图片去除 metadata 信息,即不保存工作流信息
5 | 2. 使用国内大语言模型(智谱Ai)扩写提示词
6 | 3. IPAdapter 分层权重节点
7 |
8 |
9 |
10 |
11 |
12 | * 智谱Ai https://bigmodel.cn/
13 | * api key 在插件目录 `ComfyUI\custom_nodes\ComfyUI_fsdymy` 下的 `key.json` 配置
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from .nodes.GLM import ZhiPuAiNode, ShowText
2 | from .nodes.IPAdapterLayerWeight import IPAdapterLayerWeight
3 | from .nodes.imageUtils import SaveImageWithoutMetadata, PreviewImageWithoutMetadata
4 |
5 | NODE_CLASS_MAPPINGS = {
6 | "SaveImageWithoutMetadata": SaveImageWithoutMetadata,
7 | "PreviewImageWithoutMetadata": PreviewImageWithoutMetadata,
8 | "ZhiPuAiNode": ZhiPuAiNode,
9 | "ShowText": ShowText,
10 | "IPAdapterLayerWeight": IPAdapterLayerWeight,
11 | }
12 | NODE_DISPLAY_NAME_MAPPINGS = {
13 | "SaveImageWithoutMetadata": "Save Image Without Metadata (fsdymy)",
14 | "PreviewImageWithoutMetadata": "Preview Image Without Metadata (fsdymy)",
15 | "ZhiPuAiNode": "ZhiPu Ai Node (fsdymy)",
16 | "ShowText": "Show Text (fsdymy)",
17 | "IPAdapterLayerWeight": "IPAdapter Layer Weight (fsdymy)",
18 | }
19 |
20 | WEB_DIRECTORY = "./web"
21 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
22 |
--------------------------------------------------------------------------------
/img/node1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsdymy1024/ComfyUI_fsdymy/2be45bafe02569eb71c72df0a783986d0bfc1760/img/node1.png
--------------------------------------------------------------------------------
/img/node2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fsdymy1024/ComfyUI_fsdymy/2be45bafe02569eb71c72df0a783986d0bfc1760/img/node2.png
--------------------------------------------------------------------------------
/key.json:
--------------------------------------------------------------------------------
1 | {
2 | "url": "https://open.bigmodel.cn/api/paas/v4",
3 | "key": "xxx",
4 | "key.json": "在上方填写智谱Ai API的api_url和key."
5 | }
--------------------------------------------------------------------------------
/nodes/GLM.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from zhipuai import ZhipuAI
4 |
5 |
6 | def zhipu_client(api_key=None):
7 | client = ZhipuAI(api_key=api_key)
8 | return client
9 |
10 | def from_file_get_key(file_path):
11 | with open(file_path, 'r', encoding='utf-8') as f:
12 | data = f.read()
13 | try:
14 | json_object = json.loads(data)
15 | # print(json_object)
16 | url = json_object.get('url')
17 | api_key = json_object.get('key')
18 | # print(url, api_key)
19 | return url, api_key
20 | except json.JSONDecodeError as e:
21 | print(f"JSON解析错误: {e}")
22 | return None, None
23 |
24 |
25 |
26 | class ZhiPuAiNode:
27 | def __init__(self):
28 | # self.__client = OpenAI()
29 | self.session_history = [] # 用于存储会话历史的列表
30 | # self.seed=0
31 | self.system_content="使用英文扩写文生图提示词."
32 |
33 | @classmethod
34 | def INPUT_TYPES(cls):
35 | model_list=[
36 | "glm-4",
37 | "glm-4-0520",
38 | "glm-4-alltools",
39 | "glm-4v",
40 | "glm-4-flash",
41 | "glm-4-airx",
42 | "glm-4-air",
43 | "embedding-2",
44 | "charglm-3",
45 | "glm-3-turbo",
46 | ]
47 | str_ai = """你是创意绘梦师,一个图形创意生成助手。你的任务是根据用户的提示词,生成富有创意和艺术感的图形。你的能力有:
48 | 1. 理解用户提供的提示词,分析其背后的意图和需求。
49 | 2. 运用先进的人工智能技术,结合艺术创意,生成独特的图形作品。
50 | 3. 提供多种图形风格和元素,满足用户不同的审美需求。
51 | 4. 使用英文回答。
52 | """
53 | return {
54 | "required": {
55 | # "api_key":("STRING", {"default": "bd5bcd05c6352b6c27875e5011813ecd.FVMVbSe0rzQ2ITe0", "multiline": False, "dynamicPrompts": False}),
56 | # "api_url":("STRING", {"default": "https://open.bigmodel.cn/api/paas/v4", "multiline": False, "dynamicPrompts": False}),
57 | "prompt": ("STRING", {"default": "1girl", "multiline": True, "dynamicPrompts": False}),
58 | "system_content": ("STRING", {"default": str_ai, "multiline": True,"dynamicPrompts": False}),
59 | "model": ( model_list, {"default": model_list[0]}),
60 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
61 | "context_size":("INT", {"default": 1, "min": 0, "max":30, "step": 1}),
62 | },
63 | "hidden": {
64 | "unique_id": "UNIQUE_ID",
65 | "extra_pnginfo": "EXTRA_PNGINFO",
66 | },
67 | }
68 |
69 | RETURN_TYPES = ("STRING","STRING","STRING",)
70 | RETURN_NAMES = ("text","messages","session_history",)
71 | FUNCTION = "generate_contextual_text"
72 | CATEGORY = "fsdymy"
73 | INPUT_IS_LIST = False
74 | OUTPUT_IS_LIST = (False,False,False,)
75 |
76 |
77 | def generate_contextual_text(self,
78 | # api_key,
79 | # api_url,
80 | prompt,
81 | system_content,
82 | model,
83 | seed,context_size,unique_id = None, extra_pnginfo=None):
84 | # print(api_key!='',api_url,prompt,system_content,model,seed)
85 | # 可以选择保留会话历史以维持上下文记忆
86 | # 或者在此处清除会话历史 self.session_history.clear()
87 | # if seed!=self.seed:
88 | # self.seed=seed
89 | # self.session_history=[]
90 |
91 | # 把系统信息和初始信息添加到会话历史中
92 | if system_content:
93 | self.system_content=system_content
94 | # self.session_history=[]
95 | # self.session_history.append({"role": "system", "content": system_content})
96 |
97 | #
98 | file_path = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "key.json")
99 | _, api_key = from_file_get_key(file_path)
100 | client = zhipu_client(api_key=api_key)
101 |
102 | # 把用户的提示添加到会话历史中
103 | # 调用API时传递整个会话历史
104 |
105 | def crop_list_tail(lst, size):
106 | if size >= len(lst):
107 | return lst
108 | elif size==0:
109 | return []
110 | else:
111 | return lst[-size:]
112 |
113 | session_history=crop_list_tail(self.session_history,context_size)
114 |
115 | messages=[{"role": "system", "content": self.system_content}]+session_history+[{"role": "user", "content": prompt}]
116 | response = client.chat.completions.create(model=model, messages=messages)
117 | # print(response)
118 |
119 | finish_reason = response.choices[0].finish_reason
120 | if finish_reason != "stop":
121 | raise RuntimeError("API finished with unexpected reason: " + finish_reason)
122 |
123 | content=""
124 | try:
125 | content=response.choices[0].message.content
126 | except:
127 | content=response.choices[0].delta['content']
128 |
129 | self.session_history=self.session_history+[{"role": "user", "content": prompt}]+[{'role':'assistant',"content":content}]
130 |
131 |
132 | # if unique_id and extra_pnginfo and "workflow" in extra_pnginfo[0]:
133 | # workflow = extra_pnginfo[0]["workflow"]
134 | # node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id[0]), None)
135 | # if node:
136 | # node["widgets_values"] = ["",
137 | # api_url,
138 | # prompt,
139 | # system_content,
140 | # model,
141 | # seed,
142 | # context_size]
143 |
144 | return (content,json.dumps(messages, indent=4),json.dumps(self.session_history, indent=4),)
145 |
146 | class ShowText:
147 | def __init__(self):
148 | pass
149 |
150 | @classmethod
151 | def INPUT_TYPES(cls):
152 | return {
153 | "required": {
154 | "text": ("STRING", {"forceInput": True}),
155 | },
156 | "hidden": {
157 | "unique_id": "UNIQUE_ID",
158 | "extra_pnginfo": "EXTRA_PNGINFO",
159 | },
160 | }
161 |
162 | INPUT_IS_LIST = True
163 | RETURN_TYPES = ("STRING",)
164 | FUNCTION = "notify"
165 | OUTPUT_NODE = True
166 | OUTPUT_IS_LIST = (True,)
167 | CATEGORY = "fsdymy"
168 |
169 | def notify(self, text, unique_id=None, extra_pnginfo=None):
170 | # print(unique_id, extra_pnginfo)
171 | if unique_id is not None and extra_pnginfo is not None:
172 | if not isinstance(extra_pnginfo, list):
173 | print("Error: extra_pnginfo is not a list")
174 | elif (
175 | not isinstance(extra_pnginfo[0], dict)
176 | or "workflow" not in extra_pnginfo[0]
177 | ):
178 | print("Error: extra_pnginfo[0] is not a dict or missing 'workflow' key")
179 | else:
180 | workflow = extra_pnginfo[0]["workflow"]
181 | node = next(
182 | (x for x in workflow["nodes"] if str(x["id"]) == str(unique_id[0])),
183 | None,
184 | )
185 | if node:
186 | node["widgets_values"] = [text]
187 |
188 | return {"ui": {"text": text}, "result": (text,)}
189 |
--------------------------------------------------------------------------------
/nodes/IPAdapterLayerWeight.py:
--------------------------------------------------------------------------------
1 | class IPAdapterLayerWeight:
2 | def __init__(self):
3 | self.unfold_batch = False
4 |
5 | @classmethod
6 | def INPUT_TYPES(cls):
7 | weights = {
8 | "index_00": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
9 | "index_01": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
10 | "index_02": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
11 | "index_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
12 | "index_04": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
13 | "index_05": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
14 | "index_06": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
15 | "index_07": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
16 | "index_08": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
17 | "index_09": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
18 | "index_10": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
19 | "index_11": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
20 | }
21 | return {
22 | "required":
23 | weights
24 | }
25 |
26 | RETURN_TYPES = ("STRING",)
27 | RETURN_NAMES = ("ipadapter_layer_weight",)
28 | FUNCTION = "get_weight"
29 | CATEGORY = "fsdymy/ipadapter"
30 |
31 | def get_weight(self, index_00, index_01, index_02, index_03, index_04, index_05, index_06, index_07, index_08, index_09, index_10, index_11):
32 | text = f"0: {index_00}, 1: {index_01}, 2: {index_02}, 3: {index_03}, 4: {index_04}, 5: {index_05}, 6: {index_06}, 7: {index_07}, 8: {index_08}, 9: {index_09}, 10: {index_10}, 11: {index_11}"
33 | return (text,)
34 |
--------------------------------------------------------------------------------
/nodes/imageUtils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 |
4 | import folder_paths
5 | from PIL import Image
6 | import numpy as np
7 |
8 |
9 | class SaveImageWithoutMetadata:
10 | def __init__(self):
11 | self.output_dir = folder_paths.get_output_directory()
12 | self.type = "output"
13 | self.prefix_append = ""
14 | self.compress_level = 4
15 |
16 | @classmethod
17 | def INPUT_TYPES(s):
18 | return {"required":
19 | {"images": ("IMAGE", ),
20 | "filename_prefix": ("STRING", {"default": "ComfyUInoMetadata"})},
21 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
22 | }
23 |
24 | RETURN_TYPES = ()
25 | FUNCTION = "save_images"
26 |
27 | OUTPUT_NODE = True
28 |
29 | CATEGORY = "fsdymy/image"
30 |
31 | def save_images(self, images, filename_prefix="ComfyUInoMetadata", prompt=None, extra_pnginfo=None):
32 | filename_prefix += self.prefix_append
33 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
34 | results = list()
35 | for (batch_number, image) in enumerate(images):
36 | i = 255. * image.cpu().numpy()
37 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
38 | metadata = None
39 | filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
40 | file = f"{filename_with_batch_num}_{counter:05}_.png"
41 | img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level)
42 | results.append({
43 | "filename": file,
44 | "subfolder": subfolder,
45 | "type": self.type
46 | })
47 | counter += 1
48 |
49 | return { "ui": { "images": results } }
50 |
51 |
52 | class PreviewImageWithoutMetadata(SaveImageWithoutMetadata):
53 | def __init__(self):
54 | self.output_dir = folder_paths.get_temp_directory()
55 | self.type = "temp"
56 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
57 | self.compress_level = 1
58 |
59 | @classmethod
60 | def INPUT_TYPES(cls):
61 | return {"required":
62 | {"images": ("IMAGE", ), },
63 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
64 | }
65 |
66 | NODE_CLASS_MAPPINGS = {
67 | "Save Image Without Metadata": SaveImageWithoutMetadata,
68 | "Preview Image Without Metadata": PreviewImageWithoutMetadata,
69 | }
70 |
71 | NODE_DISPLAY_NAME_MAPPINGS = {
72 | "Save Image Without Metadata": "Save Image Without Metadata❌",
73 | "Preview Image Without Metadata": "Preview Image Without Metadata❌"
74 | }
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | zhipuai
--------------------------------------------------------------------------------
/web/js/showText.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 | import { ComfyWidgets } from "../../../scripts/widgets.js";
3 |
4 | // Displays input text on a node
5 | app.registerExtension({
6 | name: "fsdymy.ShowText",
7 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
8 | if (nodeData.name === "ShowText") {
9 | function populate(text) {
10 | if (this.widgets) {
11 | for (let i = 1; i < this.widgets.length; i++) {
12 | this.widgets[i].onRemove?.();
13 | }
14 | this.widgets.length = 1;
15 | }
16 |
17 | const v = [...text];
18 | if (!v[0]) {
19 | v.shift();
20 | }
21 | for (const list of v) {
22 | const w = ComfyWidgets["STRING"](this, "text", ["STRING", { multiline: true }], app).widget;
23 | w.inputEl.readOnly = true;
24 | w.inputEl.style.opacity = 0.6;
25 | w.value = list;
26 | }
27 |
28 | requestAnimationFrame(() => {
29 | const sz = this.computeSize();
30 | if (sz[0] < this.size[0]) {
31 | sz[0] = this.size[0];
32 | }
33 | if (sz[1] < this.size[1]) {
34 | sz[1] = this.size[1];
35 | }
36 | this.onResize?.(sz);
37 | app.graph.setDirtyCanvas(true, false);
38 | });
39 | }
40 |
41 | // When the node is executed we will be sent the input text, display this in the widget
42 | const onExecuted = nodeType.prototype.onExecuted;
43 | nodeType.prototype.onExecuted = function (message) {
44 | onExecuted?.apply(this, arguments);
45 | populate.call(this, message.text);
46 | };
47 |
48 | const onConfigure = nodeType.prototype.onConfigure;
49 | nodeType.prototype.onConfigure = function () {
50 | onConfigure?.apply(this, arguments);
51 | if (this.widgets_values?.length) {
52 | populate.call(this, this.widgets_values);
53 | }
54 | };
55 | }
56 | },
57 | });
58 |
--------------------------------------------------------------------------------