├── LLM+DeepSeek,OpenAI+AP.json ├── README.md ├── __init__.py └── node.py /LLM+DeepSeek,OpenAI+AP.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_link_id":3, 3 | "nodes":[ 4 | { 5 | "mode":0, 6 | "outputs":[ 7 | { 8 | "shape":3, 9 | "name":"describe", 10 | "slot_index":0, 11 | "links":[ 12 | 2 13 | ], 14 | "label":"describe", 15 | "type":"STRING" 16 | } 17 | ], 18 | "size":{ 19 | "0":400, 20 | "1":296 21 | }, 22 | "pos":[ 23 | 796.6666870117188, 24 | 304.8958435058594 25 | ], 26 | "widgets_values":[ 27 | "", 28 | "", 29 | "", 30 | "You are a helpful assistant", 31 | "Describe this image.", 32 | 0.6, 33 | 300, 34 | "randomize" 35 | ], 36 | "inputs":[ 37 | { 38 | "name":"ref_image", 39 | "link":1, 40 | "slot_index":0, 41 | "label":"ref_image", 42 | "type":"IMAGE" 43 | } 44 | ], 45 | "flags":{ 46 | 47 | }, 48 | "id":1, 49 | "type":"RH_LLMAPI_NODE", 50 | "properties":{ 51 | "Node name for S&R":"RH_LLMAPI_NODE" 52 | }, 53 | "order":1 54 | }, 55 | { 56 | "mode":0, 57 | "outputs":[ 58 | { 59 | "shape":3, 60 | "name":"IMAGE", 61 | "slot_index":0, 62 | "links":[ 63 | 1, 64 | 3 65 | ], 66 | "label":"IMAGE", 67 | "type":"IMAGE" 68 | }, 69 | { 70 | "shape":3, 71 | "name":"MASK", 72 | "label":"MASK", 73 | "type":"MASK" 74 | } 75 | ], 76 | "size":[ 77 | 315, 78 | 314 79 | ], 80 | "pos":[ 81 | 436, 82 | 296 83 | ], 84 | "widgets_values":[ 85 | "1ba9ed13e32828b15843f481dca12bfeb5f21f55f5a71d535c7205195819dfb5.png", 86 | "image" 87 | ], 88 | "flags":{ 89 | 90 | }, 91 | "id":2, 92 | "type":"LoadImage", 93 | "properties":{ 94 | "Node name for S&R":"LoadImage" 95 | }, 96 | "order":0 97 | }, 98 | { 99 | "mode":0, 100 | "size":[ 101 | 315, 102 | 270 103 | ], 104 | "pos":[ 105 | 451, 106 | 658 107 | ], 108 | "widgets_values":[ 109 | "ComfyUI" 110 | ], 111 | "inputs":[ 112 | { 113 | "name":"images", 114 | "link":3, 115 | "label":"images", 116 | "type":"IMAGE" 117 | } 118 | ], 119 | "flags":{ 120 | 121 | }, 122 | "id":5, 123 | "type":"SaveImage", 124 | "properties":{ 125 | "Node name for S&R":"SaveImage" 126 | }, 127 | "order":2 128 | }, 129 | { 130 | "mode":0, 131 | "outputs":[ 132 | { 133 | "shape":6, 134 | "name":"STRING", 135 | "label":"STRING", 136 | "type":"STRING" 137 | } 138 | ], 139 | "size":[ 140 | 405.9541276073287, 141 | 380.66760093760433 142 | ], 143 | "pos":[ 144 | 1223, 145 | 308 146 | ], 147 | "widgets_values":[ 148 | "", 149 | "The image shows a person wearing a dark jacket with a visible collar. The background consists of a softly blurred interior setting, possibly near a window, as light is filtering through. Strands of light brown or blonde hair are visible on the left side." 150 | ], 151 | "inputs":[ 152 | { 153 | "widget":{ 154 | "name":"text" 155 | }, 156 | "name":"text", 157 | "link":2, 158 | "type":"STRING" 159 | } 160 | ], 161 | "flags":{ 162 | 163 | }, 164 | "id":4, 165 | "type":"ShowText|pysssss", 166 | "properties":{ 167 | "Node name for S&R":"ShowText|pysssss" 168 | }, 169 | "order":3 170 | } 171 | ], 172 | "extra":{ 173 | "0246.VERSION":[ 174 | 0, 175 | 0, 176 | 4 177 | ], 178 | "ds":{ 179 | "offset":{ 180 | "0":-491.2147794944354, 181 | "1":-115.99652408830687 182 | }, 183 | "scale":1.610510000000001 184 | } 185 | }, 186 | "groups":[ 187 | 188 | ], 189 | "links":[ 190 | [ 191 | 1, 192 | 2, 193 | 0, 194 | 1, 195 | 0, 196 | "IMAGE" 197 | ], 198 | [ 199 | 2, 200 | 1, 201 | 0, 202 | 4, 203 | 0, 204 | "STRING" 205 | ], 206 | [ 207 | 3, 208 | 2, 209 | 0, 210 | 5, 211 | 0, 212 | "IMAGE" 213 | ] 214 | ], 215 | "config":{ 216 | 217 | }, 218 | "version":0.4, 219 | "last_node_id":5 220 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI_RH_LLM_API 2 | **Very easy to use. LLM DeepSeek, OpenAI API compatible plugin** 3 | ## 4 | Because the vast majority of LLM APIs are compatible with OpenAI's API interface specifications, this plugin was created 5 | Through triples, any LLM model compatible with the OpenAI interface can be accessed and called, such as deepseek, Qianwen, Doubao, GLM, MinMax, etc. API 6 | - **baseurl** 7 | - **apikey** 8 | - **model** 9 | 10 | **Online Demo** 11 | https://www.runninghub.ai/post/1890402871119368194 12 | ![image](https://github.com/user-attachments/assets/31b35db4-4d61-4767-a41c-6f1445fbea5e) 13 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .node import * 2 | 3 | NODE_CLASS_MAPPINGS = { 4 | "RH_LLMAPI_NODE": RH_LLMAPI_Node, 5 | } 6 | NODE_DISPLAY_NAME_MAPPINGS = { 7 | "RH_LLMAPI_NODE": "Runninghub LLM API Node", 8 | } 9 | 10 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] -------------------------------------------------------------------------------- /node.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | import time 3 | from PIL import Image 4 | import numpy as np 5 | import base64 6 | import os 7 | 8 | def encode_image_b64(ref_image): 9 | i = 255. * ref_image.cpu().numpy()[0] 10 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) 11 | 12 | lsize = np.max(img.size) 13 | factor = 1 14 | while lsize / factor > 2048: 15 | factor *= 2 16 | img = img.resize((img.size[0] // factor, img.size[1] // factor)) 17 | 18 | image_path = f'{time.time()}.webp' 19 | img.save(image_path, 'WEBP') 20 | 21 | with open(image_path, "rb") as image_file: 22 | base64_image = base64.b64encode(image_file.read()).decode('utf-8') 23 | 24 | # print(img_base64) 25 | os.remove(image_path) 26 | return base64_image 27 | 28 | class RH_LLMAPI_Node(): 29 | 30 | def __init__(self): 31 | pass 32 | 33 | @classmethod 34 | def INPUT_TYPES(cls): 35 | return { 36 | "required": { 37 | "api_baseurl": ("STRING", {"multiline": True}), 38 | "api_key": ("STRING", {"default": ""}), 39 | "model": ("STRING", {"default": ""}), 40 | "role": ("STRING", {"multiline": True, "default": "You are a helpful assistant"}), 41 | "prompt": ("STRING", {"multiline": True, "default": "Hello"}), 42 | "temperature": ("FLOAT", {"default": 0.6}), 43 | "seed": ("INT", {"default": 100}), 44 | }, 45 | "optional": { 46 | "ref_image": ("IMAGE",), 47 | } 48 | } 49 | 50 | RETURN_TYPES = ("STRING",) 51 | RETURN_NAMES = ("describe",) 52 | FUNCTION = "rh_run_llmapi" 53 | CATEGORY = "Runninghub" 54 | 55 | def rh_run_llmapi(self, api_baseurl, api_key, model, role, prompt, temperature, seed, ref_image=None): 56 | 57 | client = OpenAI(api_key=api_key, base_url=api_baseurl) 58 | if ref_image is None: 59 | messages = [ 60 | {'role': 'system', 'content': f'{role}'}, 61 | {'role': 'user', 'content': f'{prompt}'}, 62 | ] 63 | else: 64 | base64_image = encode_image_b64(ref_image) 65 | messages = [ 66 | {'role': 'system', 'content': f'{role}'}, 67 | {'role': 'user', 68 | 'content': [ 69 | { 70 | "type": "text", 71 | "text": f"{prompt}" 72 | }, 73 | { 74 | "type": "image_url", 75 | "image_url": { 76 | "url": f"data:image/jpeg;base64,{base64_image}" 77 | } 78 | }, 79 | ]}, 80 | ] 81 | completion = client.chat.completions.create(model=model, messages=messages, temperature=temperature) 82 | if completion is not None and hasattr(completion, 'choices'): 83 | prompt = completion.choices[0].message.content 84 | else: 85 | prompt = 'Error' 86 | return (prompt,) --------------------------------------------------------------------------------