├── README.md ├── __init__.py ├── cxh_gemini.py ├── cxh_prompt.py ├── folder └── prompt │ ├── 描述图片.txt │ └── 故事.txt ├── key.txt ├── local_api.py ├── requirements.txt ├── web └── custom_Prompt.js └── workflow.png /README.md: -------------------------------------------------------------------------------- 1 | # Comfyui_Gemini2 2 | Comfyui_Gemini2 3 | 4 | 1.pip install -r requirements.txt 5 | 6 | 2.Apply for key and write to key.txt https://aistudio.google.com/ 7 | 8 | 9 | ![workflow](https://github.com/user-attachments/assets/70fa4244-aa58-43ac-aaf9-957508bf04ee) 10 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .cxh_gemini import CXH_Gemini2_TX, CXH_Gemini2_Vision 2 | from .cxh_prompt import CXH_Local_Prompt 3 | # Other 4 | from .local_api import register_routes 5 | 6 | NODE_CLASS_MAPPINGS = { 7 | "CXH_Gemini2_TX":CXH_Gemini2_TX, 8 | "CXH_Gemini2_Vision":CXH_Gemini2_Vision, 9 | "CXH_Local_Prompt":CXH_Local_Prompt 10 | } 11 | 12 | NODE_DISPLAY_NAME_MAPPINGS = { 13 | "CXH_Gemini2_TX":"CXH_Gemini2_TX", 14 | "CXH_Gemini2_Vision":"CXH_Gemini2_Vision", 15 | "CXH_Local_Prompt":"CXH_Local_Prompt" 16 | } 17 | 18 | WEB_DIRECTORY = "./web" 19 | 20 | # 注册api 21 | register_routes() -------------------------------------------------------------------------------- /cxh_gemini.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import torch 4 | from PIL import Image 5 | import folder_paths 6 | import json 7 | import google.generativeai as genai 8 | import numpy as np 9 | import cv2 as cv 10 | 11 | current_folder = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | def get_gemini_key(): 14 | try: 15 | config_path = os.path.join(current_folder, 'key.txt') 16 | # 读取整个文件内容 17 | with open(config_path, 'r', encoding='utf-8') as file: 18 | content = file.read() 19 | return content 20 | except: 21 | print("Error: Gemini API key is required") 22 | return "" 23 | 24 | def tensor2pil(t_image: torch.Tensor) -> Image: 25 | return Image.fromarray(np.clip(255.0 * t_image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 26 | 27 | def pil2tensor(image:Image) -> torch.Tensor: 28 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 29 | 30 | 31 | class CXH_Gemini2_TX: 32 | 33 | def __init__(self): 34 | self.gemini_key = get_gemini_key() 35 | if self.gemini_key: 36 | genai.configure(api_key=self.gemini_key, transport='rest') 37 | 38 | @classmethod 39 | def INPUT_TYPES(s): 40 | return { 41 | "required": { 42 | "model": (["gemini-2.0-flash-exp","gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b","learnlm-1.5-pro-experimental","gemini-exp-1114","gemini-exp-1121"],), 43 | "prompt": ("STRING", {"multiline": True, "default": ""},), # forceInput 让节点直接显示在连接处 44 | } 45 | } 46 | 47 | RETURN_TYPES = ("STRING",) 48 | RETURN_NAMES = ("out",) 49 | FUNCTION = "gen" 50 | OUTPUT_NODE = False 51 | CATEGORY = "CXH/gemini" 52 | 53 | def gen(self, model,prompt): 54 | if not self.gemini_key: 55 | raise ValueError("Gemini API key is required") 56 | 57 | model = genai.GenerativeModel(model) 58 | 59 | response = model.generate_content(prompt) 60 | print(response.text) 61 | # 将结果列表中的张量连接在一起 62 | return (response.text,) 63 | 64 | class CXH_Gemini2_Vision: 65 | 66 | def __init__(self): 67 | self.gemini_key = get_gemini_key() 68 | if self.gemini_key: 69 | genai.configure(api_key=self.gemini_key, transport='rest') 70 | 71 | @classmethod 72 | def INPUT_TYPES(s): 73 | return { 74 | "required": { 75 | "image": ("IMAGE",{"forceInput": True},), 76 | "model": (["gemini-2.0-flash-exp","gemini-1.5-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b","learnlm-1.5-pro-experimental","gemini-exp-1114","gemini-exp-1121"],), 77 | "prompt": ("STRING", {"multiline": True, "default": ""},), # forceInput 让节点直接显示在连接处 78 | } 79 | } 80 | 81 | RETURN_TYPES = ("STRING",) 82 | RETURN_NAMES = ("out",) 83 | FUNCTION = "gen" 84 | OUTPUT_NODE = False 85 | CATEGORY = "CXH/gemini" 86 | 87 | def gen(self,image, model,prompt): 88 | if not self.gemini_key: 89 | raise ValueError("Gemini API key is required") 90 | 91 | model = genai.GenerativeModel(model) 92 | pil_image = tensor2pil(image) 93 | response = model.generate_content([prompt, pil_image]) 94 | print(response.text) 95 | # 将结果列表中的张量连接在一起 96 | return (response.text,) 97 | 98 | -------------------------------------------------------------------------------- /cxh_prompt.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import torch 4 | from PIL import Image 5 | import folder_paths 6 | import random 7 | import string 8 | import glob 9 | 10 | current_folder = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | def list_files_names(input_dir, ext): 13 | # 确保目录存在,如果不存在则创建 14 | if not os.path.exists(input_dir): 15 | os.makedirs(input_dir) 16 | 17 | # 使用glob查找指定扩展名的文件 18 | file_paths = glob.glob(os.path.join(input_dir, '*' + ext)) 19 | 20 | # 初始化一个空列表来存储文件名(不带扩展名) 21 | file_names = [] 22 | 23 | # 遍历文件路径,提取文件名并去除扩展名 24 | for file_path in file_paths: 25 | # os.path.splitext() 返回文件名和扩展名,我们只需要文件名部分 26 | filename, _ = os.path.splitext(os.path.basename(file_path)) 27 | file_names.append(filename) 28 | 29 | # 返回文件名列表 30 | return file_names 31 | 32 | class CXH_Local_Prompt: 33 | 34 | def __init__(self): 35 | pass 36 | 37 | @classmethod 38 | def INPUT_TYPES(s): 39 | input_dir = os.path.join(current_folder,"folder","prompt") 40 | if not os.path.exists(input_dir): 41 | os.makedirs(input_dir) 42 | # 创建文件并写入内容 43 | file_path = os.path.join(input_dir, "prompt.txt") 44 | with open(file_path, 'w', encoding='utf-8') as file: 45 | file.write("write prompt") 46 | FILE_LIST = list_files_names(input_dir,".txt") 47 | return { 48 | "required": { 49 | "prompt": (FILE_LIST,), 50 | } 51 | } 52 | 53 | RETURN_TYPES = ("STRING",) 54 | RETURN_NAMES = ("out",) 55 | FUNCTION = "gen" 56 | OUTPUT_NODE = False 57 | CATEGORY = "CXH/LLM" 58 | 59 | def gen(self, prompt): 60 | file_path = os.path.join(current_folder,"folder","prompt",prompt+".txt") 61 | new_string = "" 62 | # 打开文件 63 | with open(file_path, 'r', encoding='utf-8') as file: 64 | # 读取文件全部内容 65 | new_string = file.read() 66 | return (new_string,) -------------------------------------------------------------------------------- /folder/prompt/描述图片.txt: -------------------------------------------------------------------------------- 1 | Detailed description of the picture below -------------------------------------------------------------------------------- /folder/prompt/故事.txt: -------------------------------------------------------------------------------- 1 | Write a story about a magic backpack. -------------------------------------------------------------------------------- /key.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StartHua/Comfyui_Gemini2/1f01f21a1c8237bd6d929e6ef7d739005c8ed55b/key.txt -------------------------------------------------------------------------------- /local_api.py: -------------------------------------------------------------------------------- 1 | import folder_paths 2 | import os 3 | import nodes 4 | from server import PromptServer 5 | from aiohttp import web 6 | 7 | 8 | comfy_path = os.path.dirname(folder_paths.__file__) 9 | custom_nodes_path = os.path.join(comfy_path, "custom_nodes") 10 | 11 | mode_path = folder_paths.models_dir 12 | current_folder = os.path.dirname(os.path.abspath(__file__)) 13 | 14 | prompt_dir =os.path.join(current_folder,"folder","prompt") 15 | 16 | def register_routes(): 17 | 18 | @PromptServer.instance.routes.get("/cxh/cmf/gemini/open") 19 | async def get_comfyui_folderInfo(request): 20 | os.startfile(prompt_dir) 21 | return web.json_response({"success": "ok"},status=200) 22 | 23 | @PromptServer.instance.routes.post("/cxh/cmf/gemini/open/file") 24 | async def open_tag_folder(request): 25 | data = await request.json() 26 | path = data.get("path", "./") 27 | 28 | file = os.path.join(prompt_dir,path + ".txt") 29 | print(file) 30 | 31 | 32 | os.startfile(file) 33 | return web.json_response({"success": "ok"},status=200) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | google-generativeai 2 | requests -------------------------------------------------------------------------------- /web/custom_Prompt.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | 3 | app.registerExtension({ 4 | name: "CXH_Local_Prompt", // Extension name 5 | async nodeCreated(node) { 6 | 7 | // 开头做一个小隔离 8 | if(!node.comfyClass.startsWith("CXH")) { 9 | return; 10 | } 11 | node.color = "#1b4669"; 12 | // node.bgcolor = "#0198cb"; 13 | 14 | if(node.comfyClass === "CXH_Local_Prompt"){ 15 | node.setSize([600, 120]); 16 | const dir_pathWidget = node.widgets.find(w => w.name === "prompt"); 17 | dir_pathWidget.hidden = false; 18 | let dir = dir_pathWidget.value; 19 | node.addWidget("button", "openFile", null, () => { 20 | let file = dir_pathWidget.value; 21 | // api方式获取路径 22 | fetch('/api/cxh/cmf/gemini/open/file', { 23 | method: 'POST', 24 | headers: { 'Content-Type': 'application/json' }, 25 | body: JSON.stringify({ path: file }) 26 | }).then(response => response.json()) 27 | .then(data => { 28 | }).catch(error => console.error("API请求失败:", error)); 29 | }); 30 | node.addWidget("button", "OpenDir", null, () => { 31 | console.log("点击") 32 | // api方式获取路径 33 | fetch('/api/cxh/cmf/gemini/open') 34 | .then(response => response.json()) 35 | .then(data => {}) 36 | .catch(error => console.error("API请求失败:", error)); 37 | }); 38 | } 39 | } 40 | }); 41 | -------------------------------------------------------------------------------- /workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/StartHua/Comfyui_Gemini2/1f01f21a1c8237bd6d929e6ef7d739005c8ed55b/workflow.png --------------------------------------------------------------------------------