├── .gitignore ├── flowy ├── lib_omost │ ├── __init__.py │ ├── utils.py │ ├── greedy_encode.py │ └── canvas.py ├── api_nodes │ ├── replicate │ │ ├── requirements.txt │ │ ├── __init__.py │ │ ├── pyproject.toml │ │ ├── LICENSE │ │ ├── supported_models.json │ │ ├── import_schemas.py │ │ ├── README.md │ │ ├── example_workflows │ │ │ ├── bark_and_musicgen.json │ │ │ ├── simple-llava.json │ │ │ ├── simple-garment-try-on.json │ │ │ ├── flux.json │ │ │ ├── simple-llama3.json │ │ │ └── llama3-405b.json │ │ ├── replicate_bridge.py │ │ ├── schema_to_node.py │ │ └── schemas │ │ │ ├── salesforce_blip.json │ │ │ └── jingyunliang_swinir.json │ ├── __init__.py │ ├── hailuo.py │ ├── fluxultra.py │ ├── keling.py │ ├── luma.py │ ├── flux.py │ ├── ideogram.py │ ├── fluxdevlora.py │ ├── recraft.py │ ├── clarityupscaler.py │ └── base.py ├── api_key_manager.py ├── nodes_previewvideo.py ├── utils.py ├── nodes_llm.py ├── nodes_http.py ├── types.py ├── nodes.py └── nodes_json.py ├── requirements.txt ├── makefile ├── images ├── LLM.png ├── Logo.png ├── API_Key.png ├── clarity.png ├── Omost_LLM.png ├── API_Key_Node.png └── comflowy_banner.png ├── __init__.py ├── nodes_test.py ├── README_CN.md ├── web └── js │ └── previewVideo.js ├── README.md └── workflows ├── LLM_CN.json └── Omost_LLM.json /.gitignore: -------------------------------------------------------------------------------- 1 | __Pycache__ -------------------------------------------------------------------------------- /flowy/lib_omost/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/requirements.txt: -------------------------------------------------------------------------------- 1 | replicate>=1.0.3 2 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test 2 | 3 | test: 4 | python -m nodes_test -------------------------------------------------------------------------------- /images/LLM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/LLM.png -------------------------------------------------------------------------------- /images/Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/Logo.png -------------------------------------------------------------------------------- /images/API_Key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/API_Key.png -------------------------------------------------------------------------------- /images/clarity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/clarity.png -------------------------------------------------------------------------------- /images/Omost_LLM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/Omost_LLM.png -------------------------------------------------------------------------------- /images/API_Key_Node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/API_Key_Node.png -------------------------------------------------------------------------------- /images/comflowy_banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/6174/comflowy-nodes/HEAD/images/comflowy_banner.png -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/__init__.py: -------------------------------------------------------------------------------- 1 | from .replicate_bridge import REPLICATE_NODE_CLASS_MAPPINGS 2 | 3 | __all__ = ["REPLICATE_NODE_CLASS_MAPPINGS"] -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .flowy.nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 2 | WEB_DIRECTORY = "./web" 3 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] 4 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-replicate" 3 | description = "Run Replicate models in ComfyUI" 4 | version = "1.1.0" 5 | license = { file = "LICENSE" } 6 | 7 | [project.urls] 8 | Repository = "https://github.com/replicate/comfyui-replicate" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "fofr" 13 | DisplayName = "ComfyUI-Replicate" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /flowy/api_nodes/__init__.py: -------------------------------------------------------------------------------- 1 | from .clarityupscaler import FlowyClarityUpscale 2 | from .flux import FlowyFlux 3 | from .fluxultra import FlowyFluxProUltra 4 | from .hailuo import FlowyHailuo 5 | from .ideogram import FlowyIdeogram 6 | from .keling import FlowyKling 7 | from .luma import FlowyLuma 8 | from .recraft import FlowyRecraft 9 | from .fluxdevlora import FlowyFluxDevLora 10 | from .replicate import REPLICATE_NODE_CLASS_MAPPINGS 11 | 12 | __all__ = [ 13 | "FlowyClarityUpscale", 14 | "FlowyFlux", 15 | "FlowyFluxProUltra", 16 | "FlowyHailuo", 17 | "FlowyIdeogram", 18 | "FlowyKling", 19 | "FlowyLuma", 20 | "FlowyRecraft", 21 | "FlowyFluxDevLora", 22 | "REPLICATE_NODE_CLASS_MAPPINGS", 23 | ] 24 | -------------------------------------------------------------------------------- /flowy/api_key_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | API_KEY_FILE = os.path.join(os.path.dirname(__file__), "api_key.json") 8 | 9 | def save_api_key(api_key): 10 | """ 11 | Save the API key to a JSON file. 12 | 13 | Args: 14 | api_key (str): The API key to be saved. 15 | """ 16 | try: 17 | with open(API_KEY_FILE, "w") as f: 18 | json.dump({"api_key": api_key}, f) 19 | logger.debug(f"API Key saved to {API_KEY_FILE}") 20 | except Exception as e: 21 | logger.error(f"Failed to save API Key: {str(e)}") 22 | 23 | def load_api_key(): 24 | """ 25 | Load the API key from the JSON file. 26 | 27 | Returns: 28 | str or None: The loaded API key, or None if not found or an error occurred. 29 | """ 30 | try: 31 | if os.path.exists(API_KEY_FILE): 32 | with open(API_KEY_FILE, "r") as f: 33 | data = json.load(f) 34 | return data.get("api_key") 35 | except Exception as e: 36 | logger.error(f"Failed to load API Key: {str(e)}") 37 | return None 38 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Replicate 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/supported_models.json: -------------------------------------------------------------------------------- 1 | { 2 | "models": [ 3 | "andreasjansson/blip-2", 4 | "batouresearch/high-resolution-controlnet-tile", 5 | "batouresearch/magic-image-refiner", 6 | "batouresearch/magic-style-transfer", 7 | "bytedance/sdxl-lightning-4step", 8 | "cjwbw/hyper-sdxl-1step-t2i", 9 | "cjwbw/supir", 10 | "cuuupid/glm-4v-9b", 11 | "cuuupid/idm-vton", 12 | "declare-lab/tango", 13 | "falcons-ai/nsfw_image_detection", 14 | "fofr/consistent-character", 15 | "fofr/face-to-many", 16 | "fofr/latent-consistency-model", 17 | "fofr/style-transfer", 18 | "jingyunliang/swinir", 19 | "lucataco/hunyuandit-v1.1", 20 | "lucataco/llama-3-vision-alpha", 21 | "lucataco/pasd-magnify", 22 | "lucataco/xtts-v2", 23 | "meta/meta-llama-3.1-405b-instruct", 24 | "meta/llama-2-70b-chat", 25 | "meta/llama-2-7b-chat", 26 | "meta/musicgen", 27 | "okaris/omni-zero-couples", 28 | "okaris/omni-zero", 29 | "pharmapsychotic/clip-interrogator", 30 | "salesforce/blip", 31 | "smoretalk/rembg-enhance", 32 | "stability-ai/sdxl", 33 | "stability-ai/stable-diffusion", 34 | "stability-ai/stable-diffusion-3", 35 | "suno-ai/bark", 36 | "tstramer/material-diffusion", 37 | "xiankgx/face-swap", 38 | "tencentarc/photomaker", 39 | "tencentarc/photomaker-style", 40 | "yorickvp/llava-13b", 41 | "yorickvp/llava-v1.6-34b", 42 | "yorickvp/llava-v1.6-mistral-7b", 43 | "zsxkib/realistic-voice-cloning" 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /flowy/api_nodes/hailuo.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT, BOOLEAN, get_modal_cloud_web_url 3 | 4 | class FlowyHailuo(FlowyApiNode): 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "image": ("IMAGE",), 10 | "prompt": ("STRING", {"multiline": True}), 11 | "prompt_optimizer": BOOLEAN, 12 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 13 | } 14 | } 15 | 16 | RETURN_TYPES = ("VIDEO",) 17 | RETURN_NAMES = ("video",) 18 | OUTPUT_IS_PREVIEW = True 19 | FUNCTION = "generate" 20 | DESCRIPTION = """ 21 | Nodes from https://comflowy.com: 22 | - Description: A service to generate videos from images by Hailuo AI. 23 | - How to use: 24 | - Provide an image and a prompt. 25 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 26 | - Output: Returns the generated video. 27 | """ 28 | 29 | def get_model_type(self) -> str: 30 | return "hailuo" 31 | 32 | def get_api_host(self) -> str: 33 | API_HOST = get_modal_cloud_web_url() 34 | return f"{API_HOST}/api/open/v0/flowy" 35 | 36 | def prepare_payload(self, **kwargs) -> dict: 37 | image_base64 = self.image_to_base64(kwargs["image"]) 38 | return { 39 | "image": image_base64, 40 | "prompt": kwargs["prompt"], 41 | "prompt_optimizer": kwargs["prompt_optimizer"], 42 | "seed": kwargs["seed"], 43 | } 44 | -------------------------------------------------------------------------------- /flowy/lib_omost/utils.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | 3 | import torch 4 | import numpy as np 5 | 6 | 7 | @torch.inference_mode() 8 | def numpy2pytorch(imgs: list[np.ndarray]): 9 | """Convert a list of numpy images to a pytorch tensor. 10 | Input: images in list[[H, W, C]] format. 11 | Output: images in [B, H, W, C] format. 12 | 13 | Note: ComfyUI expects [B, H, W, C] format instead of [B, C, H, W] format. 14 | """ 15 | assert len(imgs) > 0 16 | assert all(img.ndim == 3 for img in imgs) 17 | h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 255.0 18 | return h 19 | 20 | 21 | @contextmanager 22 | def scoped_numpy_random(seed: int): 23 | state = np.random.get_state() # Save the current state 24 | np.random.seed(seed) # Set the seed 25 | try: 26 | yield 27 | finally: 28 | np.random.set_state(state) # Restore the original state 29 | 30 | 31 | @contextmanager 32 | def scoped_torch_random(seed: int): 33 | cpu_state = torch.random.get_rng_state() 34 | gpu_states = [] 35 | if torch.cuda.is_available(): 36 | gpu_states = [ 37 | torch.cuda.get_rng_state(device) 38 | for device in range(torch.cuda.device_count()) 39 | ] 40 | 41 | try: 42 | torch.manual_seed(seed) 43 | if torch.cuda.is_available(): 44 | torch.cuda.manual_seed_all(seed) 45 | yield 46 | finally: 47 | torch.random.set_rng_state(cpu_state) 48 | if torch.cuda.is_available(): 49 | for idx, state in enumerate(gpu_states): 50 | torch.cuda.set_rng_state(state, device=idx) 51 | -------------------------------------------------------------------------------- /flowy/nodes_previewvideo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | # 设置日志记录器 5 | logger = logging.getLogger(__name__) 6 | 7 | class PreviewVideo: 8 | @classmethod 9 | def INPUT_TYPES(s): 10 | return {"required": { 11 | "video": ("VIDEO",), 12 | }} 13 | 14 | CATEGORY = "Comflowy" 15 | DESCRIPTION = "Preview Video Node" 16 | RETURN_TYPES = () 17 | OUTPUT_NODE = True 18 | FUNCTION = "load_video" 19 | 20 | def load_video(self, video): 21 | logger.info(f"PreviewVideo.load_video called with video: {video}") 22 | logger.info(f"Video type: {type(video)}") 23 | 24 | # 确保视频路径是有效的字符串 25 | if not video or not isinstance(video, str): 26 | logger.error(f'Invalid video path or type: {video}') 27 | return {"ui": {"video": ["error.mp4", "output"]}} 28 | 29 | # 检查文件是否存在 30 | if not os.path.exists(video): 31 | logger.error(f'Video file does not exist at path: {video}') 32 | return {"ui": {"video": ["error.mp4", "output"]}} 33 | 34 | # 获取完整的视频文件名和目录 35 | video_filename = os.path.basename(video) 36 | video_dir = os.path.dirname(video) 37 | 38 | logger.info(f'Video filename: {video_filename}') 39 | logger.info(f'Video directory: {video_dir}') 40 | logger.info(f'Full video path: {video}') 41 | logger.info(f'File exists: {os.path.exists(video)}') 42 | logger.info(f'File size: {os.path.getsize(video)} bytes') 43 | 44 | result = {"ui": {"video": [video, video_dir]}} 45 | logger.info(f'Returning result: {result}') 46 | 47 | return result -------------------------------------------------------------------------------- /flowy/api_nodes/fluxultra.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT, SAFETY_TOLERANCE, BOOLEAN_FALSE 3 | 4 | 5 | class FlowyFluxProUltra(FlowyApiNode): 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "prompt": ("STRING", {"multiline": True}), 11 | "image_size": ( 12 | ["21:9", "16:9", "4:3", "1:1", "3:4", "9:16", "9:21"], 13 | ), 14 | "raw": BOOLEAN_FALSE, 15 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 16 | "safety_tolerance": (SAFETY_TOLERANCE,), 17 | "num_images": ("INT", {"default": 1, "min": 1, "max": 4}), 18 | } 19 | } 20 | 21 | RETURN_TYPES = ("IMAGE",) 22 | DESCRIPTION = """ 23 | Nodes from https://comflowy.com: 24 | - Description: A service to generate images using Flux AI. 25 | - How to use: 26 | - Provide a prompt to generate an image. 27 | - Raw: Generate less processed, more natural-looking images 28 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 29 | - Output: Returns the generated image. 30 | """ 31 | 32 | def get_model_type(self) -> str: 33 | return "fluxproultra" 34 | 35 | def prepare_payload(self, **kwargs) -> dict: 36 | return { 37 | "prompt": kwargs["prompt"], 38 | "image_size": kwargs["image_size"], 39 | "raw": kwargs["raw"], 40 | "seed": kwargs["seed"], 41 | "safety_tolerance": kwargs["safety_tolerance"], 42 | "num_images": kwargs["num_images"], 43 | } 44 | -------------------------------------------------------------------------------- /flowy/api_nodes/keling.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT 3 | 4 | class FlowyKling(FlowyApiNode): 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "image": ("IMAGE",), 10 | "prompt": ("STRING", {"multiline": True}), 11 | "version": (["standard", "pro"],), 12 | "aspect_ratio": (["16:9", "9:16", "1:1"],), 13 | "duration": ([5, 10],), 14 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 15 | } 16 | } 17 | 18 | RETURN_TYPES = ("VIDEO",) 19 | RETURN_NAMES = ("video",) 20 | OUTPUT_IS_PREVIEW = True 21 | FUNCTION = "generate" # Changed from image_to_video to match parent class 22 | DESCRIPTION = """ 23 | Nodes from https://comflowy.com: 24 | - Description: A service to generate videos from images by Kling AI. 25 | - How to use: 26 | - Provide an image and a prompt. 27 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 28 | - Pro costs 1250 per second of video. Standard will cost 300 per second of video. 29 | - Output: Returns the generated video. 30 | """ 31 | 32 | def get_model_type(self) -> str: 33 | return "kling" 34 | 35 | def prepare_payload(self, **kwargs) -> dict: 36 | image_base64 = self.image_to_base64(kwargs["image"]) 37 | return { 38 | "image": image_base64, 39 | "prompt": kwargs["prompt"], 40 | "version": kwargs["version"], 41 | "aspect_ratio": kwargs["aspect_ratio"], 42 | "duration": kwargs["duration"], 43 | "seed": kwargs["seed"], 44 | } 45 | -------------------------------------------------------------------------------- /flowy/api_nodes/luma.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT, BOOLEAN_FALSE 3 | 4 | 5 | class FlowyLuma(FlowyApiNode): 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "image": ("IMAGE",), 11 | "prompt": ("STRING", {"multiline": True}), 12 | "aspect_ratio": (["16:9", "9:16", "1:1"],), 13 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 14 | }, 15 | "optional": { 16 | "end_image_optional": ("IMAGE",), 17 | "loop": BOOLEAN_FALSE, 18 | }, 19 | } 20 | 21 | RETURN_TYPES = ("VIDEO",) 22 | RETURN_NAMES = ("video",) 23 | OUTPUT_IS_PREVIEW = True 24 | FUNCTION = "generate" # Changed from image_to_video to match parent class 25 | DESCRIPTION = """ 26 | Nodes from https://comflowy.com: 27 | - Description: A service to generate videos from images by Luma AI. 28 | - How to use: 29 | - Provide an image and a prompt. 30 | - Loop: Whether the video should loop (end of video is blended with the beginning). 31 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 32 | - Output: Returns the generated video. 33 | """ 34 | 35 | def get_model_type(self) -> str: 36 | return "luma" 37 | 38 | def prepare_payload(self, **kwargs) -> dict: 39 | image_base64 = self.image_to_base64(kwargs["image"]) 40 | return { 41 | "image": image_base64, 42 | "prompt": kwargs["prompt"], 43 | "aspect_ratio": kwargs["aspect_ratio"], 44 | "end_image": kwargs.get("end_image_optional"), # Optional parameter 45 | "loop": kwargs.get("loop", False), # Optional parameter with default 46 | "seed": kwargs["seed"], 47 | } 48 | -------------------------------------------------------------------------------- /nodes_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from flowy.nodes import FlowyHttpRequest 4 | 5 | class TestFlowyHttpRequest(unittest.TestCase): 6 | def test_send_http_request_get(self): 7 | # Arrange 8 | flowy_http_request = FlowyHttpRequest() 9 | url = "https://jsonplaceholder.typicode.com/posts/1" 10 | method = "GET" 11 | headers = {} 12 | body = {} 13 | output_type = "TEXT" 14 | 15 | # Act 16 | result = flowy_http_request.send_http_request( 17 | url, method, headers, body 18 | ) 19 | 20 | # Parse the result 21 | parsed_result = result["result"][0] 22 | 23 | print(parsed_result) 24 | 25 | # Assert 26 | self.assertIn("userId", parsed_result) 27 | self.assertIn("id", parsed_result) 28 | self.assertIn("title", parsed_result) 29 | self.assertIn("body", parsed_result) 30 | 31 | def test_send_http_request_post(self): 32 | # Arrange 33 | flowy_http_request = FlowyHttpRequest() 34 | url = "https://jsonplaceholder.typicode.com/posts" 35 | method = "POST" 36 | headers = {"Content-type": "application/json; charset=UTF-8"} 37 | body = {"title": "foo", "body": "bar", "userId": 1} 38 | output_type = "TEXT" 39 | 40 | # Act 41 | result = flowy_http_request.send_http_request( 42 | url, method, headers, body 43 | ) 44 | 45 | # Parse the result 46 | parsed_result = result["result"][0] 47 | 48 | print(parsed_result) 49 | # Assert 50 | self.assertIn("id", parsed_result) 51 | self.assertEqual(parsed_result["title"], "foo") 52 | self.assertEqual(parsed_result["body"], "bar") 53 | self.assertEqual(parsed_result["userId"], 1) 54 | 55 | # Add more tests for PUT, DELETE, PATCH, and error handling 56 | 57 | 58 | if __name__ == "__main__": 59 | unittest.main() 60 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/import_schemas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import replicate 3 | import json 4 | import os 5 | import argparse 6 | 7 | 8 | def format_json_file(file_path): 9 | try: 10 | with open(file_path, "r") as f: 11 | data = json.load(f) 12 | data["run_count"] = 0 13 | 14 | with open(file_path, "w") as f: 15 | json.dump(data, f, indent=4, ensure_ascii=False) 16 | except json.JSONDecodeError: 17 | print(f"Error: {file_path} contains invalid JSON") 18 | except IOError: 19 | print(f"Error: Could not read or write to {file_path}") 20 | 21 | 22 | def format_json_files_in_directory(directory): 23 | for filename in os.listdir(directory): 24 | if filename.endswith(".json"): 25 | file_path = os.path.join(directory, filename) 26 | format_json_file(file_path) 27 | 28 | 29 | def update_schemas(update=False): 30 | with open("supported_models.json", "r", encoding="utf-8") as f: 31 | supported_models = json.load(f) 32 | 33 | schemas_directory = "schemas" 34 | existing_schemas = set(os.listdir(schemas_directory)) 35 | 36 | for model in supported_models["models"]: 37 | schema_filename = f"{model.replace('/', '_')}.json" 38 | schema_path = os.path.join(schemas_directory, schema_filename) 39 | 40 | if update or schema_filename not in existing_schemas: 41 | try: 42 | m = replicate.models.get(model) 43 | with open(schema_path, "w", encoding="utf-8") as f: 44 | f.write(m.json()) 45 | print(f"{'Updated' if update else 'Added'} schema for {model}") 46 | except replicate.exceptions.ReplicateError as e: 47 | print(f"Error fetching schema for {model}: {str(e)}") 48 | continue 49 | 50 | format_json_files_in_directory(schemas_directory) 51 | 52 | 53 | if __name__ == "__main__": 54 | parser = argparse.ArgumentParser(description="Update model schemas") 55 | parser.add_argument("--update", action="store_true", help="Update all schemas, not just new ones") 56 | args = parser.parse_args() 57 | 58 | update_schemas(update=args.update) 59 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/README.md: -------------------------------------------------------------------------------- 1 | # comfyui-replicate 2 | 3 | Custom nodes for running [Replicate models](https://replicate.com/explore) in ComfyUI. 4 | 5 | Take a look at the [example workflows](https://github.com/replicate/comfyui-replicate/tree/main/example_workflows) and the [supported Replicate models](https://github.com/replicate/comfyui-replicate/blob/main/supported_models.json) to get started. 6 | 7 | ![example-screenshot](https://github.com/replicate/comfyui-replicate/assets/319055/0eedb026-de3e-402a-b8fc-0a14c2fd209e) 8 | 9 | ## Set your Replicate API token before running 10 | 11 | Make sure you set your REPLICATE_API_TOKEN in your environment. Get your API tokens here, we recommend creating a new one: 12 | 13 | https://replicate.com/account/api-tokens 14 | 15 | To pass in your API token when running ComfyUI you could do: 16 | 17 | On MacOS or Linux: 18 | 19 | ```sh 20 | export REPLICATE_API_TOKEN="r8_************"; python main.py 21 | ``` 22 | 23 | On Windows: 24 | 25 | ```sh 26 | set REPLICATE_API_TOKEN="r8_************"; python main.py 27 | ``` 28 | 29 | ## Direct installation 30 | 31 | ```sh 32 | cd ComfyUI/custom-nodes 33 | git clone https://github.com/replicate/comfyui-replicate 34 | cd comfyui-replicate 35 | pip install -r requirements.txt 36 | ``` 37 | 38 | ## Supported Replicate models 39 | 40 | View the `supported_models.json` to see which models are packaged by default. 41 | 42 | ## Update Replicate models 43 | 44 | Simply run `./import_schemas.py` to update all model nodes. The latest version of a model is used by default. 45 | 46 | ## Add more models 47 | 48 | Only models that return simple text or image outputs are currently supported. If a model returns audio, video, JSON objects or a combination of outputs, the node will not work as expected. 49 | 50 | If you want to add more models, you can: 51 | 52 | - add the model to `supported_models.json` (for example, `fofr/consistent-character`) 53 | - run `./import_schemas.py`, this will update all schemas and import your new model 54 | - restart ComfyUI 55 | - use the model in workflow, it’ll have the title ‘Replicate [model author/model name]’ 56 | 57 | ## Roadmap 58 | 59 | Things to investigate and add to this custom node package: 60 | 61 | - support for more types of Replicate model (audio and video first) 62 | - showing logs, prediction status and progress (via tqdm) 63 | 64 | ## Contributing 65 | 66 | If you add models that others would find useful, feel free to raise PRs. 67 | -------------------------------------------------------------------------------- /flowy/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import requests 3 | from .types import API_HOST 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | def llm_request(prompt, system_prompt, llm_model, api_key, max_tokens=3000, timeout=10): 8 | """ 9 | Send a request to the Comflowy LLM API. 10 | 11 | Args: 12 | prompt (str): The main prompt for the LLM. 13 | system_prompt (str): The system prompt for the LLM. 14 | llm_model (str): The LLM model to use. 15 | api_key (str): The API key for authentication. 16 | max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 3000. 17 | timeout (int, optional): Timeout for the request in seconds. Defaults to 10. 18 | 19 | Returns: 20 | str: The generated text from the LLM. 21 | 22 | Raises: 23 | Exception: If there's an error in the API request or response. 24 | """ 25 | try: 26 | response = requests.post( 27 | f"{API_HOST}/api/open/v0/prompt", 28 | headers={ 29 | "Content-Type": "application/json", 30 | "Authorization": f"Bearer {api_key}", 31 | }, 32 | json={ 33 | "prompt": prompt, 34 | "system_prompt": system_prompt, 35 | "model": llm_model, 36 | "max_tokens": max_tokens, 37 | }, 38 | timeout=timeout, 39 | ) 40 | 41 | response.raise_for_status() # Raise an HTTPError for bad responses 42 | 43 | ret = response.json() 44 | if ret.get("success"): 45 | return ret.get("text") 46 | else: 47 | raise Exception(f"Error: {ret.get('error')}") 48 | except Exception as e: 49 | raise Exception(f"Failed to get response from LLM model with {API_HOST}/api/open/v0/prompt, error: {str(e)}") 50 | 51 | def get_nested_value(obj, path, default=None): 52 | """ 53 | Get a nested value from a dictionary using a dot-separated path. 54 | 55 | Args: 56 | obj (dict): The dictionary to search in. 57 | path (str): The dot-separated path to the desired value. 58 | default: The value to return if the path is not found. 59 | 60 | Returns: 61 | The value at the specified path, or the default value if not found. 62 | """ 63 | keys = path.split('.') 64 | for key in keys: 65 | if isinstance(obj, dict): 66 | obj = obj.get(key, default) 67 | else: 68 | return default 69 | return obj 70 | 71 | # Make sure to export the functions 72 | __all__ = ['llm_request', 'get_nested_value'] 73 | -------------------------------------------------------------------------------- /flowy/nodes_llm.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .api_key_manager import load_api_key 3 | from .types import ( 4 | API_HOST, 5 | LLM_MODELS, 6 | STRING, 7 | STRING_ML, 8 | ) 9 | from .utils import llm_request 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class FlowyLLM: 14 | """ 15 | A node for making requests to the Comflowy LLM service. 16 | """ 17 | @classmethod 18 | def INPUT_TYPES(s): 19 | return { 20 | "required": { 21 | "prompt": STRING_ML, 22 | "system_prompt": STRING_ML, 23 | "llm_model": (LLM_MODELS,), 24 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFFFFFFFFFFFF}), 25 | } 26 | } 27 | 28 | RETURN_TYPES = ("STRING",) 29 | FUNCTION = "llm_request" 30 | OUTPUT_NODE = True 31 | CATEGORY = "Comflowy" 32 | DESCRIPTION = """ 33 | Nodes from https://comflowy.com: 34 | - Description: A free service to send a prompt to a LLM model, and get the response. 35 | - How to use: 36 | - Provide a prompt and a system prompt to generate a response from the LLM model. 37 | - Choose the LLM model from the available options. 38 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 39 | - Output: Return the generated text from the LLM model. 40 | """ 41 | 42 | def llm_request(self, prompt, system_prompt, llm_model, seed, timeout=10): 43 | """ 44 | Make a request to the Comflowy LLM service. 45 | 46 | Args: 47 | prompt (str): The main prompt for the LLM. 48 | system_prompt (str): The system prompt for the LLM. 49 | llm_model (str): The LLM model to use. 50 | seed (int): The seed for random number generation. 51 | timeout (int, optional): Timeout for the request in seconds. Defaults to 10. 52 | 53 | Returns: 54 | dict: A dictionary containing the UI output and the result. 55 | """ 56 | if seed > 0xFFFFFFFF: 57 | seed = seed & 0xFFFFFFFF 58 | logger.warning("Seed is too large. Truncating to 32-bit: %d", seed) 59 | 60 | api_key = load_api_key() 61 | 62 | if not api_key: 63 | error_msg = "API Key is not set. Please use the 'Comflowy Set API Key' node to set a global API Key before using this node." 64 | logger.error(error_msg) 65 | raise ValueError(error_msg) 66 | 67 | try: 68 | generated_text = llm_request( 69 | prompt=prompt, 70 | llm_model=llm_model, 71 | system_prompt=system_prompt, 72 | api_key=api_key, 73 | max_tokens=4000, 74 | timeout=10 75 | ) 76 | return {"ui": {"text": [generated_text]}, "result": (generated_text,)} 77 | except Exception as e: 78 | logger.error(f"Error in LLM request: {str(e)}") 79 | return {"ui": {"text": [str(e)]}, "result": (str(e),)} -------------------------------------------------------------------------------- /flowy/api_nodes/flux.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | import base64 4 | import io 5 | from PIL import Image 6 | import torch 7 | import numpy as np 8 | import logging 9 | import json 10 | from ..types import STRING, INT, SAFETY_TOLERANCE, BOOLEAN, NUM_IMAGES 11 | from ..utils import logger, get_nested_value 12 | from ..api_key_manager import load_api_key 13 | 14 | logger = logging.getLogger(__name__) 15 | from .base import FlowyApiNode 16 | 17 | class FlowyFlux(FlowyApiNode): 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "prompt": ("STRING", {"multiline": True}), 23 | "version": (["flux-1.1-pro", "flux-pro", "flux-dev"],), 24 | "image_size": ( 25 | [ 26 | "square_hd", 27 | "square", 28 | "portrait_4_3", 29 | "portrait_16_9", 30 | "landscape_4_3", 31 | "landscape_16_9", 32 | "custom", 33 | ], 34 | ), 35 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 36 | "safety_tolerance": (SAFETY_TOLERANCE,), 37 | "num_images": (NUM_IMAGES,), 38 | }, 39 | "optional": { 40 | "height": ( 41 | "INT", 42 | { 43 | "default": 512, 44 | "min": 256, 45 | "max": 2048, 46 | "hidden": "image_size != 'custom'", 47 | }, 48 | ), 49 | "width": ( 50 | "INT", 51 | { 52 | "default": 512, 53 | "min": 256, 54 | "max": 2048, 55 | "hidden": "image_size != 'custom'", 56 | }, 57 | ), 58 | }, 59 | } 60 | 61 | RETURN_TYPES = ("IMAGE",) 62 | FUNCTION = "generate" 63 | DESCRIPTION = """Nodes from https://comflowy.com: 64 | - Description: A service to generate images using Flux AI. 65 | - How to use: 66 | - Provide a prompt to generate an image. 67 | - Choose version, image_size, height, width, and seed. 68 | - Height and width are only used when image_size=custom. 69 | - Make sure to set your API Key using the 'Comflowy Set API Key' node first. 70 | - Output: Returns the generated image.""" 71 | 72 | def get_model_type(self) -> str: 73 | return "flux" 74 | 75 | def prepare_payload(self, **kwargs) -> dict: 76 | return { 77 | "prompt": kwargs["prompt"], 78 | "version": kwargs["version"], 79 | "image_size": kwargs["image_size"], 80 | "height": kwargs["height"], 81 | "width": kwargs["width"], 82 | "seed": kwargs["seed"], 83 | "safety_tolerance": kwargs["safety_tolerance"], 84 | "num_images": kwargs["num_images"], 85 | } 86 | -------------------------------------------------------------------------------- /flowy/nodes_http.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | # You can use this node to save full size images through the websocket, the 5 | # images will be sent in exactly the same format as the image previews: as 6 | # binary images on the websocket with a 8 byte header indicating the type 7 | # of binary message (first 4 bytes) and the image format (next 4 bytes). 8 | 9 | # Note that no metadata will be put in the images saved with this node. 10 | from .types import ( 11 | HTTP_REQUEST_METHOD, 12 | STRING, 13 | STRING_ML, 14 | ) 15 | 16 | class FlowyHttpRequest: 17 | @classmethod 18 | def INPUT_TYPES(s): 19 | return { 20 | "required": {"url": STRING, "method": (HTTP_REQUEST_METHOD,)}, 21 | "optional": { 22 | "headers_json": STRING_ML, 23 | "body_json": STRING_ML, 24 | # "json_path": STRING, 25 | }, 26 | } 27 | 28 | RETURN_TYPES = ("JSON",) 29 | FUNCTION = "send_http_request" 30 | OUTPUT_NODE = True 31 | CATEGORY = "Comflowy" 32 | DESCRIPTION = """ 33 | Nodes from https://comflowy.com: 34 | - Description: Send an HTTP request to a URL. 35 | - Output: Return a JSON result from the request. 36 | """ 37 | 38 | def send_http_request(self, url, method, headers_json, body_json): 39 | try: 40 | timeout = 10 41 | try: 42 | headers = json.loads(headers_json) if headers_json else {} 43 | body = json.loads(body_json) if body_json else {} 44 | except Exception as e: 45 | raise ValueError(f"Invalid headers or body: {e}") 46 | print("http request with", url, headers, body) 47 | 48 | response = None 49 | if method == "GET": 50 | response = requests.get(url, headers=headers, timeout=timeout) 51 | elif method == "POST": 52 | response = requests.post( 53 | url, headers=headers, json=body, timeout=timeout 54 | ) 55 | elif method == "PUT": 56 | response = requests.put( 57 | url, headers=headers, json=body, timeout=timeout 58 | ) 59 | elif method == "DELETE": 60 | response = requests.delete(url, headers=headers, timeout=timeout) 61 | elif method == "PATCH": 62 | response = requests.patch( 63 | url, headers=headers, json=body, timeout=timeout 64 | ) 65 | else: 66 | raise ValueError(f"Invalid method {method}") 67 | 68 | response.raise_for_status() # Raise an HTTPError for bad responses 69 | 70 | ret = response.json() 71 | 72 | print("http request result", ret) 73 | # if json_path: 74 | # print("json_path", json_path) 75 | # ret = get_nested_value[json_path] 76 | 77 | return {"ui": {"text": [json.dumps(ret, indent=4)]}, "result": (ret,)} 78 | 79 | except requests.exceptions.RequestException as e: 80 | print("http request error", e) 81 | return {"ui": {"text": [str(e)]}, "result": (str(e),)} 82 | -------------------------------------------------------------------------------- /flowy/api_nodes/ideogram.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT, get_api_host 3 | 4 | 5 | class FlowyIdeogram(FlowyApiNode): 6 | @classmethod 7 | def INPUT_TYPES(cls): 8 | return { 9 | "required": { 10 | "prompt": ("STRING", {"multiline": True}), 11 | "negative_prompt": ("STRING", {"multiline": True}), 12 | "version": (["ideogram-v2-turbo", "ideogram-v2"],), 13 | "resolution": ( [ "None", "512x1536", "576x1408", "576x1472", "576x1536", "640x1024", "640x1344", "640x1408", "640x1472", "640x1536", "704x1152", "704x1216", "704x1280", "704x1344", "704x1408", "704x1472", "720x1280", "736x1312", "768x1024", "768x1088", "768x1152", "768x1216", "768x1232", "768x1280", "768x1344", "832x960", "832x1024", "832x1088", "832x1152", "832x1216", "832x1248", "864x1152", "896x960", "896x1024", "896x1088", "896x1120", "896x1152", "960x832", "960x896", "960x1024", "960x1088", "1024x640", "1024x768", "1024x832", "1024x896", "1024x960", "1024x1024", "1088x768", "1088x832", "1088x896", "1088x960", "1120x896", "1152x704", "1152x768", "1152x832", "1152x864", "1152x896", "1216x704", "1216x768", "1216x832", "1232x768", "1248x832", "1280x704", "1280x720", "1280x768", "1280x800", "1312x736", "1344x640", "1344x704", "1344x768", "1408x576", "1408x640", "1408x704", "1472x576", "1472x640", "1472x704", "1536x512", "1536x576", "1536x640", ], ), 14 | "style_type": ( 15 | ["None", "Auto", "Realistic", "Design", "Anime", "Render 3D"], 16 | ), 17 | "aspect_ratio": ( [ "1:1", "4:3", "3:4", "16:9", "9:16", "3:2", "2:3", "16:10", "10:16", "3:1", "1:3", ], ), 18 | "magic_prompt_option": (["On", "Off"],), 19 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE",) 24 | DESCRIPTION = """ 25 | Nodes from https://comflowy.com: 26 | - Description: A service to generate images using Ideogram AI. 27 | - How to use: 28 | - Provide a prompt to generate an image. 29 | - Choose resolution, style type, aspect ratio, and magic prompt option. 30 | - Resolution overrides aspect ratio. 31 | - Magic Prompt will interpret your prompt and optimize it to maximize variety and quality of the images generated. You can also use it to write prompts in different languages. 32 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 33 | - Output: Returns the generated image. 34 | """ 35 | 36 | def get_model_type(self) -> str: 37 | return "ideogram" 38 | 39 | def get_api_host(self) -> str: 40 | API_HOST = get_api_host() 41 | return f"{API_HOST}/api/open/v0/ideogram" 42 | 43 | def prepare_payload(self, **kwargs) -> dict: 44 | return { 45 | "prompt": kwargs["prompt"], 46 | "negative_prompt": kwargs["negative_prompt"], 47 | "version": kwargs["version"], 48 | "resolution": ( 49 | kwargs["resolution"] if kwargs["resolution"] != "None" else None 50 | ), 51 | "style_type": ( 52 | kwargs["style_type"] if kwargs["style_type"] != "None" else None 53 | ), 54 | "aspect_ratio": kwargs["aspect_ratio"], 55 | "magic_prompt_option": kwargs["magic_prompt_option"], 56 | "seed": kwargs["seed"], 57 | } 58 | -------------------------------------------------------------------------------- /flowy/types.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import os 4 | 5 | API_HOST = "https://app.comflowy.com" 6 | # API_HOST = "http://127.0.0.1:3000" 7 | PPT_TOKEN = "" 8 | RUN_ID = "" 9 | ENV = "pro" 10 | # ENV = "preview" 11 | 12 | def _read_config(): 13 | try: 14 | # 首先尝试从线程上下文获取 15 | thread_context_module = sys.modules.get("flowy_execute_thread_context") 16 | if thread_context_module and hasattr(thread_context_module, "get_run_context"): 17 | options = thread_context_module.get_run_context("options") 18 | if options: 19 | print("get_run_context", options.get("custom_node_api_config", {})) 20 | return options.get("custom_node_api_config", {}) 21 | except Exception as e: 22 | print(f"Error getting context from thread: {e}") 23 | 24 | # 回退到文件读取 25 | try: 26 | config_path = "/comfyui/custom_node_api_config.json" 27 | if os.path.exists(config_path): 28 | with open(config_path, "r") as f: 29 | ret = json.load(f) 30 | print("read_config", ret) 31 | return ret 32 | except Exception as e: 33 | print(f"Error reading custom node api config file: {e}") 34 | 35 | # 如果都失败了,返回空字典 36 | return {} 37 | 38 | 39 | def get_api_host(): 40 | config = _read_config() 41 | return config.get("domain", API_HOST) 42 | 43 | def get_modal_cloud_web_url(): 44 | config = _read_config() 45 | env = config.get("env", ENV) 46 | if env == "dev": 47 | return "https://comflowy--cloud-web-dev.modal.run" 48 | else: 49 | return "https://comflowy--comflowyspacecloud-web-main.modal.run" 50 | 51 | def get_ppt_token(): 52 | config = _read_config() 53 | return config.get("ppt_token", PPT_TOKEN) 54 | 55 | def get_run_id(): 56 | config = _read_config() 57 | return config.get("run_id", RUN_ID) 58 | 59 | FLOAT = ( 60 | "FLOAT", 61 | {"default": 1, "min": -sys.float_info.max, "max": sys.float_info.max, "step": 0.01}, 62 | ) 63 | 64 | BOOLEAN = ("BOOLEAN", {"default": True}) 65 | BOOLEAN_FALSE = ("BOOLEAN", {"default": False}) 66 | 67 | INT = ("INT", {"default": 1, "min": -sys.maxsize, "max": sys.maxsize, "step": 1}) 68 | 69 | STRING = ("STRING", {"default": ""}) 70 | 71 | STRING_ML = ("STRING", {"multiline": True, "default": ""}) 72 | 73 | STRING_WIDGET = ("STRING", {"forceInput": True}) 74 | 75 | JSON_WIDGET = ("JSON", {"forceInput": True}) 76 | 77 | METADATA_RAW = ("METADATA_RAW", {"forceInput": True}) 78 | 79 | HTTP_REQUEST_METHOD = ["GET", "POST", "PUT", "DELETE", "PATCH"] 80 | 81 | HTTP_REQUEST_TYPE = ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"] 82 | 83 | HTTP_REQUEST_RETURN_TYPE = ["TEXT", "JSON"] 84 | 85 | LLM_MODELS = [ 86 | "Qwen/Qwen2-7B-Instruct", 87 | "Qwen/Qwen2-1.5B-Instruct", 88 | "THUDM/glm-4-9b-chat", 89 | "THUDM/chatglm3-6b", 90 | "01-ai/Yi-1.5-9B-Chat-16K", 91 | "01-ai/Yi-1.5-6B-Chat", 92 | "internlm/internlm2_5-7b-chat" 93 | ] 94 | 95 | SAFETY_TOLERANCE = ["1", "2", "3", "4", "5", "6"] 96 | 97 | NUM_IMAGES = ["1", "2", "3", "4"] 98 | 99 | OUTPUT_FORMAT = ["jpeg", "png"] 100 | 101 | class AnyType(str): 102 | """A special class that is always equal in not equal comparisons. Credit to pythongosssss""" 103 | 104 | def __eq__(self, _) -> bool: 105 | return True 106 | 107 | def __ne__(self, __value: object) -> bool: 108 | return False 109 | 110 | any = AnyType("*") 111 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/bark_and_musicgen.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 7, 3 | "last_link_id": 2, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "Replicate suno-ai/bark", 8 | "pos": [ 9 | 540, 10 | 264 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 266 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "AUDIO", 22 | "type": "AUDIO", 23 | "links": [ 24 | 1 25 | ], 26 | "shape": 3, 27 | "slot_index": 0 28 | }, 29 | { 30 | "name": "STRING", 31 | "type": "STRING", 32 | "links": null, 33 | "shape": 3 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "Replicate suno-ai/bark" 38 | }, 39 | "widgets_values": [ 40 | "Hello, my name is Suno. And, uh — and I like pizza. [laughs] But I also have other interests such as playing tic tac toe.", 41 | "announcer", 42 | "", 43 | 0.7, 44 | 0.7, 45 | false, 46 | false 47 | ] 48 | }, 49 | { 50 | "id": 7, 51 | "type": "PreviewAudio", 52 | "pos": [ 53 | 1457, 54 | 272 55 | ], 56 | "size": { 57 | "0": 315, 58 | "1": 76 59 | }, 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "audio", 66 | "type": "AUDIO", 67 | "link": 2 68 | } 69 | ], 70 | "properties": { 71 | "Node name for S&R": "PreviewAudio" 72 | }, 73 | "widgets_values": [ 74 | null 75 | ] 76 | }, 77 | { 78 | "id": 6, 79 | "type": "Replicate meta/musicgen", 80 | "pos": [ 81 | 1001, 82 | 269 83 | ], 84 | "size": { 85 | "0": 400, 86 | "1": 436 87 | }, 88 | "flags": {}, 89 | "order": 1, 90 | "mode": 0, 91 | "inputs": [ 92 | { 93 | "name": "input_audio", 94 | "type": "AUDIO", 95 | "link": 1 96 | } 97 | ], 98 | "outputs": [ 99 | { 100 | "name": "AUDIO", 101 | "type": "AUDIO", 102 | "links": [ 103 | 2 104 | ], 105 | "shape": 3, 106 | "slot_index": 0 107 | } 108 | ], 109 | "properties": { 110 | "Node name for S&R": "Replicate meta/musicgen" 111 | }, 112 | "widgets_values": [ 113 | "stereo-melody-large", 114 | "rap, rock", 115 | 20, 116 | true, 117 | 0, 118 | 6, 119 | false, 120 | "loudness", 121 | 250, 122 | 0, 123 | 1, 124 | 3, 125 | "wav", 126 | 1607, 127 | "randomize", 128 | false 129 | ] 130 | } 131 | ], 132 | "links": [ 133 | [ 134 | 1, 135 | 5, 136 | 0, 137 | 6, 138 | 0, 139 | "AUDIO" 140 | ], 141 | [ 142 | 2, 143 | 6, 144 | 0, 145 | 7, 146 | 0, 147 | "AUDIO" 148 | ] 149 | ], 150 | "groups": [], 151 | "config": {}, 152 | "extra": { 153 | "ds": { 154 | "scale": 1, 155 | "offset": [ 156 | -123.2666015625, 157 | -48.2666015625 158 | ] 159 | } 160 | }, 161 | "version": 0.4 162 | } -------------------------------------------------------------------------------- /flowy/api_nodes/fluxdevlora.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | import base64 4 | import io 5 | from PIL import Image 6 | import torch 7 | import numpy as np 8 | import logging 9 | import json 10 | from ..types import STRING, INT, SAFETY_TOLERANCE, BOOLEAN, FLOAT, NUM_IMAGES, OUTPUT_FORMAT 11 | from ..utils import logger, get_nested_value 12 | from ..api_key_manager import load_api_key 13 | 14 | logger = logging.getLogger(__name__) 15 | from .base import FlowyApiNode 16 | 17 | class FlowyFluxDevLora(FlowyApiNode): 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "prompt": ("STRING", {"multiline": True}), 23 | "lora_path": ("STRING", {"multiline": True}), 24 | "lora_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 4.0, "step": 0.1}), 25 | "image_size": ( 26 | [ 27 | "square_hd", 28 | "square", 29 | "portrait_4_3", 30 | "portrait_16_9", 31 | "landscape_4_3", 32 | "landscape_16_9", 33 | "custom", 34 | ], 35 | ), 36 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 37 | "num_inference_steps": ("INT", {"default": 28, "min": 1, "max": 50}), 38 | "guidance_scale": ("FLOAT", {"default": 3.5, "min": 1.0, "max": 20.0, "step": 0.1}), 39 | "num_images": (NUM_IMAGES,), 40 | "safety_tolerance": (SAFETY_TOLERANCE,), 41 | "output_format": (OUTPUT_FORMAT,), 42 | }, 43 | "optional": { 44 | "height": ( 45 | "INT", 46 | { 47 | "default": 512, 48 | "min": 256, 49 | "max": 2048, 50 | "hidden": "image_size != 'custom'", 51 | }, 52 | ), 53 | "width": ( 54 | "INT", 55 | { 56 | "default": 512, 57 | "min": 256, 58 | "max": 2048, 59 | "hidden": "image_size != 'custom'", 60 | }, 61 | ), 62 | }, 63 | } 64 | 65 | RETURN_TYPES = ("IMAGE",) 66 | FUNCTION = "generate" 67 | DESCRIPTION = """Nodes from https://comflowy.com: 68 | - Description: A service to generate images using Flux AI. 69 | - How to use: 70 | - Provide a prompt to generate an image. 71 | - Choose version, image_size, height, width, seed, and other parameters. 72 | - Height and width are only used when image_size=custom. 73 | - Make sure to set your API Key using the 'Comflowy Set API Key' node first. 74 | - Output: Returns the generated image.""" 75 | 76 | def get_model_type(self) -> str: 77 | return "fluxdevlora" 78 | 79 | def prepare_payload(self, **kwargs) -> dict: 80 | return { 81 | "prompt": kwargs["prompt"], 82 | "lora_path": kwargs["lora_path"], 83 | "lora_scale": kwargs["lora_scale"], 84 | "image_size": kwargs["image_size"], 85 | "seed": kwargs["seed"], 86 | "num_inference_steps": kwargs["num_inference_steps"], 87 | "guidance_scale": kwargs["guidance_scale"], 88 | "num_images": kwargs["num_images"], 89 | "safety_tolerance": kwargs["safety_tolerance"], 90 | "output_format": kwargs["output_format"], 91 | "height": kwargs["height"], 92 | "width": kwargs["width"], 93 | } 94 | -------------------------------------------------------------------------------- /flowy/api_nodes/recraft.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT 3 | 4 | class FlowyRecraft(FlowyApiNode): 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "prompt": ("STRING", {"multiline": True}), 10 | "image_size": ( 11 | [ 12 | "square_hd", 13 | "square", 14 | "portrait_4_3", 15 | "portrait_16_9", 16 | "landscape_4_3", 17 | "landscape_16_9", 18 | "custom", 19 | ], 20 | ), 21 | "style": ( 22 | [ 23 | "realistic_image", 24 | "digital_illustration", 25 | "vector_illustration", 26 | "realistic_image/b_and_w", 27 | "realistic_image/hard_flash", 28 | "realistic_image/hdr", 29 | "realistic_image/natural_light", 30 | "realistic_image/studio_portrait", 31 | "realistic_image/enterprise", 32 | "realistic_image/motion_blur", 33 | "digital_illustration/pixel_art", 34 | "digital_illustration/hand_drawn", 35 | "digital_illustration/grain", 36 | "digital_illustration/infantile_sketch", 37 | "digital_illustration/2d_art_poster", 38 | "digital_illustration/handmade_3d", 39 | "digital_illustration/hand_drawn_outline", 40 | "digital_illustration/engraving_color", 41 | "digital_illustration/2d_art_poster_2", 42 | "vector_illustration/engraving", 43 | "vector_illustration/line_art", 44 | "vector_illustration/line_circuit", 45 | "vector_illustration/linocut", 46 | ], 47 | ), 48 | "seed": ("INT", {"default": 0, "min": 0, "max": 2147483647}), 49 | }, 50 | "optional": { 51 | "height": ( 52 | "INT", 53 | { 54 | "default": 512, 55 | "min": 256, 56 | "max": 2048, 57 | "hidden": "image_size != 'custom'", 58 | }, 59 | ), 60 | "width": ( 61 | "INT", 62 | { 63 | "default": 512, 64 | "min": 256, 65 | "max": 2048, 66 | "hidden": "image_size != 'custom'", 67 | }, 68 | ), 69 | }, 70 | } 71 | 72 | RETURN_TYPES = ("IMAGE",) 73 | DESCRIPTION = """ 74 | Nodes from https://comflowy.com: 75 | - Description: A service to generate images using Recraft AI. 76 | - How to use: 77 | - Provide a prompt to generate an image. 78 | - Style: The style of the generated images. Vector images cost 2X as much. 79 | - Output: Returns the generated image. 80 | """ 81 | 82 | def get_model_type(self) -> str: 83 | return "recraft" 84 | 85 | def prepare_payload(self, **kwargs) -> dict: 86 | return { 87 | "prompt": kwargs["prompt"], 88 | "image_size": kwargs["image_size"], 89 | "height": kwargs.get("height"), 90 | "width": kwargs.get("width"), 91 | "style": kwargs["style"], 92 | "seed": kwargs["seed"], 93 | } 94 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/simple-llava.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 5, 3 | "last_link_id": 4, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "Replicate yorickvp/llava-v1.6-34b", 8 | "pos": [ 9 | 900, 10 | 386 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 200 15 | }, 16 | "flags": {}, 17 | "order": 1, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "image", 22 | "type": "IMAGE", 23 | "link": 3 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "STRING", 29 | "type": "STRING", 30 | "links": [ 31 | 4 32 | ], 33 | "shape": 3, 34 | "slot_index": 0 35 | } 36 | ], 37 | "properties": { 38 | "Node name for S&R": "Replicate yorickvp/llava-v1.6-34b" 39 | }, 40 | "widgets_values": [ 41 | "Describe this image", 42 | 1, 43 | 0.2, 44 | 1024, 45 | "", 46 | false 47 | ] 48 | }, 49 | { 50 | "id": 5, 51 | "type": "ShowText|pysssss", 52 | "pos": [ 53 | 1348, 54 | 390 55 | ], 56 | "size": [ 57 | 366.6171875, 58 | 247.916015625 59 | ], 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "text", 66 | "type": "STRING", 67 | "link": 4, 68 | "widget": { 69 | "name": "text" 70 | } 71 | } 72 | ], 73 | "outputs": [ 74 | { 75 | "name": "STRING", 76 | "type": "STRING", 77 | "links": null, 78 | "shape": 6 79 | } 80 | ], 81 | "properties": { 82 | "Node name for S&R": "ShowText|pysssss" 83 | }, 84 | "widgets_values": [ 85 | "", 86 | "The image appears to be a stylized drawing of two individuals, likely a man and a woman, depicted in a cartoon or caricature style. The man has short, light-colored hair and a beard, and is wearing a dark suit with a white shirt and a black tie. The woman has long, blonde hair and is wearing what seems to be a black dress. Both characters are smiling and looking directly at the viewer. The background is a solid light blue color. There is a watermark or logo in the upper right corner of the image, but the text is not legible in this description. The overall style of the image is playful and artistic, with a focus on the facial features and expressions of the characters." 87 | ] 88 | }, 89 | { 90 | "id": 1, 91 | "type": "LoadImage", 92 | "pos": [ 93 | 551, 94 | 393 95 | ], 96 | "size": [ 97 | 315, 98 | 314 99 | ], 100 | "flags": {}, 101 | "order": 0, 102 | "mode": 0, 103 | "outputs": [ 104 | { 105 | "name": "IMAGE", 106 | "type": "IMAGE", 107 | "links": [ 108 | 3 109 | ], 110 | "shape": 3, 111 | "slot_index": 0 112 | }, 113 | { 114 | "name": "MASK", 115 | "type": "MASK", 116 | "links": null, 117 | "shape": 3 118 | } 119 | ], 120 | "properties": { 121 | "Node name for S&R": "LoadImage" 122 | }, 123 | "widgets_values": [ 124 | "R8__00002_-4.webp", 125 | "image" 126 | ] 127 | } 128 | ], 129 | "links": [ 130 | [ 131 | 3, 132 | 1, 133 | 0, 134 | 4, 135 | 0, 136 | "IMAGE" 137 | ], 138 | [ 139 | 4, 140 | 4, 141 | 0, 142 | 5, 143 | 0, 144 | "STRING" 145 | ] 146 | ], 147 | "groups": [], 148 | "config": {}, 149 | "extra": { 150 | "ds": { 151 | "scale": 1, 152 | "offset": [ 153 | 0, 154 | 0 155 | ] 156 | } 157 | }, 158 | "version": 0.4 159 | } -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/simple-garment-try-on.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 4, 3 | "last_link_id": 3, 4 | "nodes": [ 5 | { 6 | "id": 3, 7 | "type": "LoadImage", 8 | "pos": [ 9 | 237, 10 | 507 11 | ], 12 | "size": [ 13 | 315, 14 | 314 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "IMAGE", 22 | "type": "IMAGE", 23 | "links": [ 24 | 1 25 | ], 26 | "shape": 3, 27 | "slot_index": 0 28 | }, 29 | { 30 | "name": "MASK", 31 | "type": "MASK", 32 | "links": null, 33 | "shape": 3 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "LoadImage" 38 | }, 39 | "widgets_values": [ 40 | "KakaoTalk_Photo_2024-04-04-21-44-45-1.png", 41 | "image" 42 | ] 43 | }, 44 | { 45 | "id": 2, 46 | "type": "LoadImage", 47 | "pos": [ 48 | 240, 49 | 137 50 | ], 51 | "size": [ 52 | 315, 53 | 314 54 | ], 55 | "flags": {}, 56 | "order": 1, 57 | "mode": 0, 58 | "outputs": [ 59 | { 60 | "name": "IMAGE", 61 | "type": "IMAGE", 62 | "links": [ 63 | 2 64 | ], 65 | "shape": 3, 66 | "slot_index": 0 67 | }, 68 | { 69 | "name": "MASK", 70 | "type": "MASK", 71 | "links": null, 72 | "shape": 3 73 | } 74 | ], 75 | "properties": { 76 | "Node name for S&R": "LoadImage" 77 | }, 78 | "widgets_values": [ 79 | "sweater.webp", 80 | "image" 81 | ] 82 | }, 83 | { 84 | "id": 1, 85 | "type": "Replicate cuuupid/idm-vton", 86 | "pos": [ 87 | 915, 88 | 300 89 | ], 90 | "size": { 91 | "0": 315, 92 | "1": 290 93 | }, 94 | "flags": {}, 95 | "order": 2, 96 | "mode": 0, 97 | "inputs": [ 98 | { 99 | "name": "garm_img", 100 | "type": "IMAGE", 101 | "link": 2 102 | }, 103 | { 104 | "name": "human_img", 105 | "type": "IMAGE", 106 | "link": 1 107 | }, 108 | { 109 | "name": "mask_img", 110 | "type": "IMAGE", 111 | "link": null 112 | } 113 | ], 114 | "outputs": [ 115 | { 116 | "name": "IMAGE", 117 | "type": "IMAGE", 118 | "links": [ 119 | 3 120 | ], 121 | "shape": 3, 122 | "slot_index": 0 123 | } 124 | ], 125 | "properties": { 126 | "Node name for S&R": "Replicate cuuupid/idm-vton" 127 | }, 128 | "widgets_values": [ 129 | "", 130 | "upper_body", 131 | false, 132 | false, 133 | false, 134 | 30, 135 | 71, 136 | "randomize", 137 | false 138 | ] 139 | }, 140 | { 141 | "id": 4, 142 | "type": "SaveImage", 143 | "pos": [ 144 | 1312, 145 | 296 146 | ], 147 | "size": [ 148 | 368.7568359375, 149 | 308.6533203125 150 | ], 151 | "flags": {}, 152 | "order": 3, 153 | "mode": 0, 154 | "inputs": [ 155 | { 156 | "name": "images", 157 | "type": "IMAGE", 158 | "link": 3 159 | } 160 | ], 161 | "properties": {}, 162 | "widgets_values": [ 163 | "ComfyUI" 164 | ] 165 | } 166 | ], 167 | "links": [ 168 | [ 169 | 1, 170 | 3, 171 | 0, 172 | 1, 173 | 1, 174 | "IMAGE" 175 | ], 176 | [ 177 | 2, 178 | 2, 179 | 0, 180 | 1, 181 | 0, 182 | "IMAGE" 183 | ], 184 | [ 185 | 3, 186 | 1, 187 | 0, 188 | 4, 189 | 0, 190 | "IMAGE" 191 | ] 192 | ], 193 | "groups": [], 194 | "config": {}, 195 | "extra": { 196 | "ds": { 197 | "scale": 1, 198 | "offset": [ 199 | 33.1787109375, 200 | -26.455078125 201 | ] 202 | } 203 | }, 204 | "version": 0.4 205 | } -------------------------------------------------------------------------------- /flowy/nodes.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | from .types import STRING 5 | from .api_key_manager import save_api_key 6 | 7 | # 设置日志 8 | logging.basicConfig(level=logging.DEBUG) 9 | logger = logging.getLogger(__name__) 10 | 11 | from .nodes_omost import ( 12 | OmostLLMNode, 13 | OmostToConditioning, 14 | ComflowyOmostPreviewNode, 15 | ComflowyOmostLoadCanvasPythonCodeNode, 16 | ComflowyOmostLoadCanvasConditioningNode, 17 | ) 18 | 19 | from .nodes_json import FlowyPreviewJSON, FlowyExtractJSON, ComflowyLoadJSON 20 | from .nodes_http import FlowyHttpRequest 21 | from .nodes_llm import FlowyLLM 22 | from .api_nodes import ( 23 | FlowyClarityUpscale, 24 | FlowyFlux, 25 | FlowyFluxProUltra, 26 | FlowyFluxDevLora, 27 | FlowyHailuo, 28 | FlowyIdeogram, 29 | FlowyKling, 30 | FlowyLuma, 31 | FlowyRecraft, 32 | REPLICATE_NODE_CLASS_MAPPINGS, 33 | ) 34 | 35 | from .nodes_previewvideo import PreviewVideo 36 | 37 | API_KEY_FILE = os.path.join(os.path.dirname(__file__), "api_key.json") 38 | 39 | class ComflowySetAPIKey: 40 | """ 41 | A node for setting the global Comflowy API Key. 42 | """ 43 | @classmethod 44 | def INPUT_TYPES(s): 45 | return {"required": {"api_key": STRING}} 46 | 47 | RETURN_TYPES = () 48 | FUNCTION = "set_api_key" 49 | OUTPUT_NODE = True 50 | CATEGORY = "Comflowy" 51 | 52 | def set_api_key(self, api_key): 53 | """ 54 | Set the global API key for Comflowy. 55 | 56 | Args: 57 | api_key (str): The API key to be set. 58 | 59 | Returns: 60 | tuple: An empty tuple as this node doesn't produce any output. 61 | 62 | Raises: 63 | ValueError: If the provided API key is empty. 64 | """ 65 | if not api_key.strip(): 66 | raise ValueError("API Key cannot be empty") 67 | save_api_key(api_key) 68 | print("Comflowy API Key has been set globally") 69 | return () 70 | 71 | NODE_CLASS_MAPPINGS = { 72 | "Comflowy_Http_Request": FlowyHttpRequest, 73 | "Comflowy_LLM": FlowyLLM, 74 | "Comflowy_Preview_JSON": FlowyPreviewJSON, 75 | "Comflowy_Extract_JSON": FlowyExtractJSON, 76 | "Comflowy_Load_JSON": ComflowyLoadJSON, 77 | "Comflowy_Omost_LLM": OmostLLMNode, 78 | "Comflowy_Omost_To_Conditioning": OmostToConditioning, 79 | "Comflowy_Omost_Preview": ComflowyOmostPreviewNode, 80 | "Comflowy_Omost_Load_Canvas_Python_Code": ComflowyOmostLoadCanvasPythonCodeNode, 81 | "Comflowy_Omost_Load_Canvas_Conditioning": ComflowyOmostLoadCanvasConditioningNode, 82 | "Comflowy_Set_API_Key": ComflowySetAPIKey, 83 | "Comflowy_Clarity_Upscale": FlowyClarityUpscale, 84 | "Comflowy_Ideogram": FlowyIdeogram, 85 | "Comflowy_Flux": FlowyFlux, 86 | "Comflowy_Recraft": FlowyRecraft, 87 | "Comflowy_Hailuo": FlowyHailuo, 88 | "Comflowy_Preview_Video": PreviewVideo, 89 | "Comflowy_Luma": FlowyLuma, 90 | "Comflowy_Kling": FlowyKling, 91 | "Comflowy_Flux_Pro_Ultra": FlowyFluxProUltra, 92 | "Comflowy_Flux_Dev_Lora": FlowyFluxDevLora, 93 | **REPLICATE_NODE_CLASS_MAPPINGS 94 | } 95 | 96 | 97 | NODE_DISPLAY_NAME_MAPPINGS = { 98 | "Comflowy_Http_Request": "Comflowy Http Request", 99 | "Comflowy_LLM": "Comflowy LLM", 100 | "Comflowy_Preview_JSON": "Comflowy Preview JSON", 101 | "Comflowy_Extract_JSON": "Comflowy Extract JSON", 102 | "Comflowy_Load_JSON": "Comflowy Load JSON", 103 | "Comflowy_Omost_LLM": "Comflowy Omost LLM", 104 | "Comflowy_Omost_To_Conditioning": "Comflowy Omost To Conditioning", 105 | "Comflowy_Omost_Preview": "Comflowy Omost Preview", 106 | "Comflowy_Omost_Load_Canvas_Python_Code": "Comflowy Omost Load Canvas Python Code", 107 | "Comflowy_Omost_Load_Canvas_Conditioning": "Comflowy Omost Load Canvas Conditioning", 108 | "Comflowy_Set_API_Key": "Comflowy Set API Key", 109 | "Comflowy_Clarity_Upscale": "Comflowy Clarity Upscale", 110 | "Comflowy_Ideogram": "Comflowy Ideogram", 111 | "Comflowy_Flux": "Comflowy Flux", 112 | "Comflowy_Recraft": "Comflowy Recraft", 113 | "Comflowy_Hailuo": "Comflowy Hailuo", 114 | "Comflowy_Preview_Video": "Comflowy Preview Video", 115 | "Comflowy_Luma": "Comflowy Luma", 116 | "Comflowy_Kling": "Comflowy Kling", 117 | "Comflowy_Flux_Pro_Ultra": "Comflowy Flux Pro Ultra", 118 | "Comflowy_Flux_Dev_Lora": "Comflowy Flux Dev Lora", 119 | } 120 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/replicate_bridge.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import time 4 | 5 | import torch 6 | from ..base import FlowyApiNode 7 | from .schema_to_node import ( 8 | schema_to_comfyui_input_types, 9 | get_return_type, 10 | name_and_version, 11 | inputs_that_need_arrays, 12 | ) 13 | from ...api_key_manager import load_api_key 14 | 15 | def create_comfyui_node(schema): 16 | replicate_model, node_name = name_and_version(schema) 17 | return_type = get_return_type(schema) 18 | 19 | class ReplicateNode(FlowyApiNode): 20 | @classmethod 21 | def IS_CHANGED(cls, **kwargs): 22 | return time.time() if kwargs["force_rerun"] else "" 23 | 24 | @classmethod 25 | def INPUT_TYPES(cls): 26 | return schema_to_comfyui_input_types(schema) 27 | 28 | RETURN_TYPES = ( 29 | tuple(return_type.values()) 30 | if isinstance(return_type, dict) 31 | else (return_type,) 32 | ) 33 | CATEGORY = "Comflowy Replicate" 34 | 35 | def get_model_type(self) -> str: 36 | return "replicate" 37 | 38 | def get_api_key(self) -> str: 39 | """获取 Comflowy API token""" 40 | api_key = load_api_key() 41 | if not api_key: 42 | raise ValueError("Comflowy API key not found. Please set your API key first.") 43 | return api_key 44 | 45 | def prepare_payload(self, **kwargs): 46 | # Remove force_rerun from kwargs 47 | kwargs = {k: v for k, v in kwargs.items() if k != "force_rerun"} 48 | 49 | # Handle array inputs 50 | array_inputs = inputs_that_need_arrays(schema) 51 | for input_name in array_inputs: 52 | if input_name in kwargs: 53 | if isinstance(kwargs[input_name], str): 54 | kwargs[input_name] = ( 55 | [] 56 | if kwargs[input_name] == "" 57 | else kwargs[input_name].split("\n") 58 | ) 59 | else: 60 | kwargs[input_name] = [kwargs[input_name]] 61 | 62 | # Process image and audio inputs 63 | for key, value in kwargs.items(): 64 | if value is not None: 65 | input_type = ( 66 | self.INPUT_TYPES()["required"].get(key, (None,))[0] 67 | or self.INPUT_TYPES().get("optional", {}).get(key, (None,))[0] 68 | ) 69 | if input_type == "IMAGE": 70 | kwargs[key] = self.image_to_base64(value) 71 | elif input_type == "AUDIO": 72 | kwargs[key] = self.audio_to_base64(value) 73 | 74 | # Remove empty optional inputs 75 | optional_inputs = self.INPUT_TYPES().get("optional", {}) 76 | for key in list(kwargs.keys()): 77 | if key in optional_inputs: 78 | if isinstance(kwargs[key], torch.Tensor): 79 | continue 80 | elif not kwargs[key]: 81 | del kwargs[key] 82 | 83 | # 添加 API token 到 payload 84 | return { 85 | "replicate_model": replicate_model, 86 | "input": kwargs, 87 | "api_key": self.get_api_key() # 添加 API token 88 | } 89 | 90 | return node_name, ReplicateNode 91 | 92 | 93 | def create_comfyui_nodes_from_schemas(schemas_dir): 94 | nodes = {} 95 | current_path = os.path.dirname(os.path.abspath(__file__)) 96 | schemas_dir_path = os.path.join(current_path, schemas_dir) 97 | for schema_file in os.listdir(schemas_dir_path): 98 | if schema_file.endswith(".json"): 99 | with open( 100 | os.path.join(schemas_dir_path, schema_file), "r", encoding="utf-8" 101 | ) as f: 102 | schema = json.load(f) 103 | node_name, node_class = create_comfyui_node(schema) 104 | nodes[node_name] = node_class 105 | return nodes 106 | 107 | 108 | _cached_node_class_mappings = None 109 | 110 | def get_node_class_mappings(): 111 | global _cached_node_class_mappings 112 | if _cached_node_class_mappings is None: 113 | _cached_node_class_mappings = create_comfyui_nodes_from_schemas("schemas") 114 | return _cached_node_class_mappings 115 | 116 | 117 | REPLICATE_NODE_CLASS_MAPPINGS = get_node_class_mappings() 118 | -------------------------------------------------------------------------------- /flowy/lib_omost/greedy_encode.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import logging 3 | import torch 4 | from typing import Callable, NamedTuple, TypedDict 5 | 6 | 7 | class SpecialTokens(TypedDict): 8 | start: int 9 | end: int 10 | pad: int 11 | 12 | 13 | class CLIPTokens(NamedTuple): 14 | clip_l_tokens: list[int] 15 | clip_g_tokens: list[int] | None = None 16 | 17 | @classmethod 18 | def empty_tokens(cls) -> CLIPTokens: 19 | return CLIPTokens(clip_l_tokens=[], clip_g_tokens=[]) 20 | 21 | @property 22 | def length(self) -> int: 23 | return len(self.clip_l_tokens) 24 | 25 | def __repr__(self) -> str: 26 | return f"CLIPTokens(clip_l_tokens({len(self.clip_l_tokens)}), clip_g_tokens={len(self.clip_g_tokens) if self.clip_g_tokens else None})" 27 | 28 | def __add__(self, other: CLIPTokens) -> CLIPTokens: 29 | if self.clip_g_tokens is None or other.clip_g_tokens is None: 30 | clip_g_tokens = None 31 | else: 32 | clip_g_tokens = self.clip_g_tokens + other.clip_g_tokens 33 | 34 | return CLIPTokens( 35 | clip_l_tokens=self.clip_l_tokens + other.clip_l_tokens, 36 | clip_g_tokens=clip_g_tokens, 37 | ) 38 | 39 | @staticmethod 40 | def _get_77_tokens(subprompt_inds: list[int]) -> list[int]: 41 | # Note that all subprompt are theoretically less than 75 tokens (without bos/eos) 42 | result = ( 43 | [SPECIAL_TOKENS["start"]] 44 | + subprompt_inds[:75] 45 | + [SPECIAL_TOKENS["end"]] 46 | + [SPECIAL_TOKENS["pad"]] * 75 47 | ) 48 | return result[:77] 49 | 50 | def clamp_to_77_tokens(self) -> CLIPTokens: 51 | return CLIPTokens( 52 | clip_l_tokens=self._get_77_tokens(self.clip_l_tokens), 53 | clip_g_tokens=( 54 | self._get_77_tokens(self.clip_g_tokens) 55 | if self.clip_g_tokens 56 | else None 57 | ), 58 | ) 59 | 60 | 61 | class EncoderOutput(NamedTuple): 62 | cond: torch.Tensor 63 | pooler: torch.Tensor 64 | 65 | 66 | TokenizeFunc = Callable[[str], CLIPTokens] 67 | EncodeFunc = Callable[[CLIPTokens], EncoderOutput] 68 | 69 | 70 | # ComfyUI protocol. See sd1_clip.py/sdxl_clip.py for the actual implementation. 71 | SPECIAL_TOKENS: SpecialTokens = {"start": 49406, "end": 49407, "pad": 49407} 72 | 73 | 74 | def greedy_partition(items: list[CLIPTokens], max_sum: int) -> list[list[CLIPTokens]]: 75 | bags: list[list[CLIPTokens]] = [] 76 | current_bag: list[CLIPTokens] = [] 77 | current_sum: int = 0 78 | 79 | for item in items: 80 | num = item.length 81 | if current_sum + num > max_sum: 82 | if current_bag: 83 | bags.append(current_bag) 84 | current_bag = [item] 85 | current_sum = num 86 | else: 87 | current_bag.append(item) 88 | current_sum += num 89 | 90 | if current_bag: 91 | bags.append(current_bag) 92 | 93 | return bags 94 | 95 | 96 | def encode_bag_of_subprompts_greedy( 97 | prefixes: list[str], 98 | suffixes: list[str], 99 | tokenize_func: TokenizeFunc, 100 | encode_func: EncodeFunc, 101 | logger: logging.Logger | None = None, 102 | ) -> EncoderOutput: 103 | """ 104 | Note: tokenize_func is expected to clamp the tokens to 75 tokens. 105 | """ 106 | if logger is None: 107 | logger = logging.getLogger(__name__) 108 | 109 | # Begin with tokenizing prefixes 110 | prefix_tokens: CLIPTokens = sum( 111 | [tokenize_func(prefix) for prefix in prefixes], CLIPTokens.empty_tokens() 112 | ) 113 | logger.debug(f"Prefix tokens: {prefix_tokens}") 114 | 115 | # Then tokenizing suffixes 116 | allowed_suffix_length = 75 - prefix_tokens.length 117 | logger.debug(f"Allowed suffix length: {allowed_suffix_length}") 118 | suffix_targets: list[CLIPTokens] = [ 119 | tokenize_func(subprompt) for subprompt in suffixes 120 | ] 121 | logger.debug(f"Suffix targets: {suffix_targets}") 122 | 123 | # Then merge prefix and suffix tokens 124 | suffix_targets = greedy_partition(suffix_targets, max_sum=allowed_suffix_length) 125 | targets = [ 126 | sum([prefix_tokens, *b], CLIPTokens.empty_tokens()).clamp_to_77_tokens() 127 | for b in suffix_targets 128 | ] 129 | 130 | # Encode! 131 | encoded_embeds = [encode_func(target) for target in targets] 132 | conds_merged = torch.concat([embed.cond for embed in encoded_embeds], dim=1) 133 | poolers_merged = encoded_embeds[0].pooler 134 | logger.debug(f"merged conds: {conds_merged.shape}, pooler: {poolers_merged.shape}") 135 | 136 | return EncoderOutput(cond=conds_merged, pooler=poolers_merged) 137 | -------------------------------------------------------------------------------- /flowy/nodes_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Tuple 3 | 4 | # You can use this node to save full size images through the websocket, the 5 | # images will be sent in exactly the same format as the image previews: as 6 | # binary images on the websocket with a 8 byte header indicating the type 7 | # of binary message (first 4 bytes) and the image format (next 4 bytes). 8 | 9 | # Note that no metadata will be put in the images saved with this node. 10 | from .types import ( 11 | STRING, 12 | ) 13 | from .utils import get_nested_value, logger 14 | 15 | class FlowyExtractJSON: 16 | @classmethod 17 | def INPUT_TYPES(s): 18 | return { 19 | "required": {"json_value": ("JSON",)}, 20 | "optional": { 21 | "json_path1": STRING, 22 | "json_path2": STRING, 23 | "json_path3": STRING, 24 | "json_path4": STRING, 25 | "json_path5": STRING, 26 | }, 27 | } 28 | 29 | CATEGORY = "Comflowy" 30 | RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING") 31 | RETURN_NAMES = ("text1", "text2", "text3", "text4", "text5") 32 | OUTPUT_NODE = True 33 | FUNCTION = "extract_json" 34 | DESCRIPTION = """ 35 | Nodes from https://comflowy.com: 36 | - Description: Extract values from a JSON object. 37 | - How to use: Provide a JSON object and the json_path to extract the value. 38 | - eg1. json_path: "a.b.c" 39 | - eg2. json_path: "outputs.0.text" if there is an array in the json object. 40 | - Output: All output values are strings, according to the provided `json_path`, if the target json_path is not a string, will return the json dump value. 41 | - Note: If the json_path is not found, it will return an error string. 42 | """ 43 | 44 | def extract_json( 45 | self, 46 | json_value=None, 47 | json_path1=None, 48 | json_path2=None, 49 | json_path3=None, 50 | json_path4=None, 51 | json_path5=None, 52 | ): 53 | ret = ["", "", "", "", ""] 54 | paths = [json_path1, json_path2, json_path3, json_path4, json_path5] 55 | # return {"ui": {"text": [text]}, "result": (text,)} 56 | if json_value is not None: 57 | for i, path in enumerate(paths): 58 | if path: 59 | try: 60 | ret[i] = json.dumps( 61 | get_nested_value(json_value, path), indent=4 62 | ) 63 | except Exception as e: 64 | ret[i] = f"Extract error: {e}" 65 | logger.warn(e) 66 | 67 | logger.info(f"Extract json is running: {ret}") 68 | all_text = "\n".join(ret) 69 | return {"ui": {"text": [all_text]}, "result": tuple(ret)} 70 | 71 | 72 | class FlowyPreviewJSON: 73 | 74 | @classmethod 75 | def INPUT_TYPES(s): 76 | return { 77 | "required": {"json_value": ("JSON",)}, 78 | } 79 | 80 | CATEGORY = "Comflowy" 81 | RETURN_TYPES = ("STRING",) 82 | OUTPUT_NODE = True 83 | FUNCTION = "preview_json" 84 | DESCRIPTION = """ 85 | Nodes from https://comflowy.com: 86 | - Description: Show a JSON in a human-readable format. 87 | - Output: Return a JSON object as a string. 88 | """ 89 | 90 | def preview_json(self, json_value=None): 91 | text = "" 92 | logger.info(f"preview json", json_value) 93 | # return {"ui": {"text": [text]}, "result": (text,)} 94 | if json_value is not None: 95 | if isinstance(json_value, dict): 96 | try: 97 | text = json.dumps(json_value, indent=4) 98 | except Exception as e: 99 | text = "The input is a dict, but could not be serialized.\n" 100 | logger.warn(e) 101 | 102 | elif isinstance(json_value, list): 103 | try: 104 | text = json.dumps(json_value, indent=4) 105 | except Exception as e: 106 | text = "The input is a list, but could not be serialized.\n" 107 | logger.warn(e) 108 | 109 | else: 110 | text = str(json_value) 111 | 112 | return {"ui": {"text": [text]}, "result": (text,)} 113 | 114 | 115 | class ComflowyLoadJSON: 116 | @classmethod 117 | def INPUT_TYPES(s): 118 | return { 119 | "required": { 120 | "json_str": ("STRING", {"multiline": True}), 121 | } 122 | } 123 | 124 | RETURN_TYPES = ("JSON",) 125 | FUNCTION = "load_json" 126 | CATEGORY = "Comflowy" 127 | 128 | def load_json(self, json_str: str) -> Tuple[list[any]]: 129 | """Load canvas from file""" 130 | return (json.loads(json_str),) 131 | -------------------------------------------------------------------------------- /flowy/api_nodes/clarityupscaler.py: -------------------------------------------------------------------------------- 1 | from .base import FlowyApiNode 2 | from ..types import STRING, INT, get_api_host 3 | 4 | class FlowyClarityUpscale(FlowyApiNode): 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "image": ("IMAGE",), 10 | "scale_factor": ( 11 | "FLOAT", 12 | {"default": 2.0, "min": 1.0, "max": 4.0, "step": 0.1}, 13 | ), 14 | "dynamic": ( 15 | "FLOAT", 16 | {"default": 6.0, "min": 1.0, "max": 50.0, "step": 0.1}, 17 | ), 18 | "creativity": ( 19 | "FLOAT", 20 | {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.01}, 21 | ), 22 | "resemblance": ( 23 | "FLOAT", 24 | {"default": 0.6, "min": 0.0, "max": 3.0, "step": 0.01}, 25 | ), 26 | "tiling_width": ( [ 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, ], {"default": 112}, ), 27 | "tiling_height": ( [ 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, ], {"default": 144}, ), 28 | "num_inference_steps": ("INT", {"default": 18, "min": 1, "max": 100}), 29 | "seed": ("INT", {"default": 1337, "min": 0, "max": 2147483647}), 30 | "handfix": ( 31 | ["disabled", "hands_only", "image_and_hands"], 32 | {"default": "disabled"}, 33 | ), 34 | "pattern": ("BOOLEAN", {"default": False}), 35 | "sharpen": ( 36 | "FLOAT", 37 | {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.1}, 38 | ), 39 | "downscaling": ("BOOLEAN", {"default": False}), 40 | "downscaling_resolution": ( 41 | "INT", 42 | {"default": 768, "min": 256, "max": 2048}, 43 | ), 44 | "sd_model": ( 45 | [ 46 | "epicrealism_naturalSinRC1VAE.safetensors [84d76a0328]", 47 | "juggernaut_reborn.safetensors [338b85bc4f]", 48 | "flat2DAnimerge_v45Sharp.safetensors", 49 | ], 50 | {"default": "juggernaut_reborn.safetensors [338b85bc4f]"}, 51 | ), 52 | "scheduler": ( [ "DPM++ 2M Karras", "DPM++ SDE Karras", "DPM++ 2M SDE Exponential", "DPM++ 2M SDE Karras", "Euler a", "Euler", "LMS", "Heun", "DPM2", "DPM2 a", "DPM++ 2S a", "DPM++ 2M", "DPM++ SDE", "DPM++ 2M SDE", "DPM++ 2M SDE Heun", "DPM++ 2M SDE Heun Karras", "DPM++ 2M SDE Heun Exponential", "DPM++ 3M SDE", "DPM++ 3M SDE Karras", "DPM++ 3M SDE Exponential", "DPM fast", "DPM adaptive", "LMS Karras", "DPM2 Karras", "DPM2 a Karras", "DPM++ 2S a Karras", "Restart", "DDIM", "PLMS", "UniPC", ], {"default": "DPM++ 3M SDE Karras"}, ), 53 | } 54 | } 55 | 56 | RETURN_TYPES = ("IMAGE",) 57 | FUNCTION = "generate" # Changed from upscale to match parent class 58 | DESCRIPTION = """ 59 | Nodes from https://comflowy.com: 60 | - Description: A service to upscale images using AI models. 61 | - How to use: 62 | - Provide an image to upscale. 63 | - Dynamic: HDR, try from 3 - 9. 64 | - Pattern: Upscale a pattern with seamless tiling. 65 | - Creativity: Try from 0.3 - 0.9. 66 | - Downscaling: Downscale the image before upscaling. Can improve quality and speed for images with high resolution but lower quality. 67 | - Resemblance: Try from 0.3 - 1.6. 68 | - Make sure to set your API Key using the 'Comflowy Set API Key' node before using this node. 69 | - Output: Returns the upscaled image. 70 | """ 71 | 72 | def get_model_type(self) -> str: 73 | return "clarityupscaler" 74 | 75 | def get_api_host(self) -> str: 76 | API_HOST = get_api_host() 77 | return f"{API_HOST}/api/open/v0/clarityupscaler" 78 | 79 | def prepare_payload(self, **kwargs) -> dict: 80 | image_base64 = self.image_to_base64(kwargs["image"]) 81 | return { 82 | "image": image_base64, 83 | "scale_factor": kwargs["scale_factor"], 84 | "dynamic": kwargs["dynamic"], 85 | "creativity": kwargs["creativity"], 86 | "resemblance": kwargs["resemblance"], 87 | "tiling_width": kwargs["tiling_width"], 88 | "tiling_height": kwargs["tiling_height"], 89 | "num_inference_steps": kwargs["num_inference_steps"], 90 | "seed": kwargs["seed"], 91 | "handfix": kwargs["handfix"], 92 | "pattern": kwargs["pattern"], 93 | "sharpen": kwargs["sharpen"], 94 | "downscaling": kwargs["downscaling"], 95 | "downscaling_resolution": kwargs["downscaling_resolution"], 96 | "sd_model": kwargs["sd_model"], 97 | "scheduler": kwargs["scheduler"], 98 | } 99 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | banner 2 | 3 | # Comflowy 插件 4 | 5 |
6 | 7 | ![Version](https://img.shields.io/badge/node_version-0.2-lightblue) 8 | 9 | 10 | 11 | 12 |
13 | 14 | 在使用 ComfyUI 的时候,我们使用的模型基本上都是开源的模型。有些效果很不错的闭源模型,却无法在 ComfyUI 里使用。为了解决这个问题,我们开发了 Comflowy 插件。希望能将这些效果不错的闭源模型也接入到 ComfyUI 里,这样各位就能通过 ComfyUI 将各种闭源模型串联起来使用了。 15 | 16 | ## 一、节点列表 17 | 18 | 1. **Comflowy LLM 节点:** 这是一个调用 LLM 的节点。你可以用它来实现类似 Prompt Generator 的功能。它与市面上的 LLM 节点不同的是,它是通过调用 API 的方式获取结果,这就意味着你不需要安装 Ollama 就能调用 LLM 模型。不再需要担心你的电脑配置是否足够运行这些 LLM 模型。**同时它还是免费的**。 19 | * 你可以使用我们的在线版 Comflowy 运行包含此节点的 [工作流](https://app.comflowy.com/template/84bea01c-e109-41f2-89c6-914fc999a1cf) 。 20 | * 也可以下载 [工作流文件](workflows/LLM_CN.json) 并导入到 ComfyUI 里使用。 21 | *
22 | 工作流截图 23 |
24 | 25 | ![image](images/LLM.png) 26 | 27 | 2. **Comflowy Omost 节点:** [Omost](https://github.com/lllyasviel/Omost) 插件是一个能帮助你撰写 Prompt 的插件,但本地运行此插件,需要配置较高的电脑。我们基于对 Omost 的理解,实现了一款类似的节点,但与之稍微不同的是,我们并没有运行 Omost 官方的模型,而是通过 Prompt Engineering 的方式实现。这样运行的速度会更快一些。 28 | * 在线版 [工作流](https://app.comflowy.com/template/1ce47688-4c85-42af-88ad-290f283eb9ec)。 29 | * 本地版 [工作流文件](workflows/Omost_LLM.json) 。 30 | *
31 | 工作流截图 32 |
33 | 34 | ![image](images/Omost_LLM.png) 35 | 36 | 3. **Comflowy Flux Pro 节点:** Flux Pro 是一个非开源的模型,所以大多数情况下,你无法在 ComfyUI 里使用这个模型。为了解决这个问题,我们开发了这个节点,它允许你直接在 ComfyUI 里生成图片。但请注意,这个模型是一个商业模型,所以每次使用都会扣除你的积分。 37 | * 在线版 [应用](https://app.comflowy.com/app/app-general-image-by-flux-153b)(无需 Workflow 即可使用)。 38 | *
39 | 工作流截图 40 |
41 | 42 | ![image](images/flux.png) 43 |
44 | 4. **Comflowy Flux Pro Ultra 节点:** Flux Pro Ultra 模型是 Flux 最新推出的高清模型。并且你还可以将 Raw 参数设置为 True,这样 Flux 输出的图片会更加真实与精细。 45 | 5. **Comflowy Flux Dev Lora 节点:** Flux Dev Lora 节点是一个可以让你任意加载 Flux LoRA 的节点,你只需要将 Flux LoRA 的模型下载地址填写到节点里,就可以加载使用。 46 | 6. **Comflowy Ideogram 节点:** 与 Flux 类似 Ideogram 也是一个非开源的模型,所以我们还开发了这个节点,它允许你直接在 ComfyUI 里生成图片。但请注意,这个模型是一个商业模型,所以每次使用都会扣除你的积分。这个模型非常适合生成带字的海报。效果个人感觉比 Flux 更好。 47 | * 在线版 [应用](https://app.comflowy.com/app/app-general-image-by-ideogram-b453)(无需 Workflow 即可使用)。 48 | *
49 | 工作流截图 50 |
51 | 52 | ![image](images/ideogram.png) 53 |
54 | 7. **Comflowy Recraft 节点:** 与 Flux 类似,Recraft 也是一个非开源的模型,所以我们还开发了这个节点,它允许你直接在 ComfyUI 里生成图片。但请注意,这个模型是一个商业模型,所以每次使用都会扣除你的积分。这个模型与 Ideogram 类似,非常适合生成带字的海报或图片,另外,因为其预设了很多效果,使用起来会更加方便。 55 | 8. **Comflowy Clarity Upscale 节点:** 这是一个能将图片放大,提升图片质量的节点。这个节点号称是 Magnific 的替代模型。海外网红开发者 [levlsio](https://twitter.com/levelsio/status/1827404021684170902) 盛赞过此模型。 56 | * 在线版 [应用](https://app.comflowy.com/app/app-clarity-upscale-4257)(无需 Workflow 即可使用)。 57 | *
58 | 工作流截图 59 |
60 | 61 | ![image](images/clarity.png) 62 |
63 | 9. **Comflowy Replicate 节点:** 感谢 [Replicate](https://github.com/replicate/comfyui-replicate) 提供的服务和代码,现在你可以使用 Replicate 里的模型了。 64 | 10. **Comflowy Hailuo 视频节点:** 这个节点使用的是 Hailuo AI 的模型,它可以将图片转换为视频。需要注意,使用此节点需要搭配 Comflowy Preview Video 节点使用。 65 | 11. **Comflowy Kling 图片节点:** 这个节点使用的是 Kling AI 的模型,它可以将图片转换为视频。需要注意,使用此节点需要搭配 Comflowy Preview Video 节点使用。 66 | 67 | ## 二、价格 68 | 69 | | 节点 | 价格 | 70 | | --- | --- | 71 | | LLM | 免费 | 72 | | Omost | 免费 | 73 | | Flux Pro | Flux-1.1-pro 每张图大约消耗 400 积分。Flux-pro 每张图大约消耗 550 积分。 | 74 | | Flux Pro Ultra | 每张图大约消耗 600 积分。 | 75 | | Flux Dev Lora | 每张图大约消耗 350 积分。 | 76 | | Ideogram | Ideogram-v2-turbo 每张图大约消耗 800 积分。Ideogram-v2 每张图大约消耗 500 积分。 | 77 | | Recraft | 每张图大约消耗 400~800 积分不等。 | 78 | | Replicate | 这个取决于你使用了何种模型,以及所消耗的时间。 | 79 | | Hailuo | 每个视频大约消耗 5000 积分。 | 80 | | Kling | 消耗的积分个取决于视频的长度。 | 81 | 82 | 83 | ## 三、如何使用 84 | 85 | > [!NOTE] 86 | > 需要注意,在使用 Comflowy 插件的时候,有可能会出现因为网络问题导致无法正常使用的情况。如果遇到类似 `Failed to get response from LLM model with https://app.comflowy.com/api/open/v0/prompt` 的报错,请检查一下网络状态。 87 | 88 |
89 | Step 1: 安装 Comflowy 插件 90 | 91 | - 方法一:使用 [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) 安装(推荐) 92 | - 方法二:Git 安装 93 | 94 | CompyUI插件目录(例如“CompyUI\custom_nodes\”)中打开cmd窗口,键入以下命令: 95 | 96 | ```shell 97 | git clone git clone https://github.com/6174/comflowy-nodes.git 98 | ``` 99 | 100 | 然后启动 ComfyUI。 101 | 102 | - 方法三:下载zip文件 103 | 104 | 或者下载解压zip文件,将得到的文件夹复制到 ```ComfyUI\custom_nodes\``` 目录下,然后启动 ComfyUI。 105 | 106 |
107 | 108 |
109 | Step 2: 获取 Comflowy API Key 110 | 111 | 接着你需要获取 Comflowy 的 API Key,点击左下角的头像(图①),再点击设置(图②), 最后找到 API Key(图③) 并复制它。**注意,为了后续使用的安全性,请不要将你的 API Key 泄露给他人。** 112 | 113 | ![image](images/API_Key.png) 114 |
115 | 116 |
117 | Step 3: 输入 Comflowy API Key 118 | 119 | 最后需要将 API Key 输入到 Comflowy Set API Key 节点里。输入完后,你可以删除此节点。然后使用 Comflowy 的其他节点。如果你没有输入这个节点,那么你将无法使用 Comflowy 的节点。 120 | 121 | ![image](images/API_Key_Node.png) 122 |
123 | 124 | ## 四、更新记录 125 | 126 | * V0.2:新增 Flux 节点、Ideogram 节点。 127 | * V0.1:支持 LLM 节点、Omost 节点、Http 节点。 128 | 129 | ## 五、感谢 130 | 131 | 1. 感谢 [SiliconFlow](https://siliconflow.cn/) 提供的免费 LLM 服务。 132 | 2. 感谢 [Omost](https://github.com/lllyasviel/Omost) 作者以及 [ComfyUI-Omost](https://github.com/huchenlei/ComfyUI_omost?tab=readme-ov-file) 插件的作者。 133 | 3. 感谢所有为此开源项目做出贡献的人: 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/flux.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 21, 3 | "last_link_id": 27, 4 | "nodes": [ 5 | { 6 | "id": 17, 7 | "type": "DF_Text_Box", 8 | "pos": [ 9 | -55, 10 | -228 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 200 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "STRING", 22 | "type": "STRING", 23 | "links": [ 24 | 22, 25 | 23, 26 | 24 27 | ], 28 | "shape": 3, 29 | "slot_index": 0 30 | } 31 | ], 32 | "properties": { 33 | "Node name for S&R": "DF_Text_Box" 34 | }, 35 | "widgets_values": [ 36 | "a neon light saying \"flux now in comfyui-replicate\"" 37 | ] 38 | }, 39 | { 40 | "id": 13, 41 | "type": "SaveImage", 42 | "pos": [ 43 | 1023, 44 | -699 45 | ], 46 | "size": { 47 | "0": 315, 48 | "1": 270 49 | }, 50 | "flags": {}, 51 | "order": 4, 52 | "mode": 0, 53 | "inputs": [ 54 | { 55 | "name": "images", 56 | "type": "IMAGE", 57 | "link": 25 58 | } 59 | ], 60 | "properties": {}, 61 | "widgets_values": [ 62 | "flux-schnell" 63 | ] 64 | }, 65 | { 66 | "id": 21, 67 | "type": "Replicate black-forest-labs/flux-schnell", 68 | "pos": [ 69 | 580, 70 | -697 71 | ], 72 | "size": [ 73 | 400, 74 | 268 75 | ], 76 | "flags": {}, 77 | "order": 1, 78 | "mode": 0, 79 | "inputs": [ 80 | { 81 | "name": "prompt", 82 | "type": "STRING", 83 | "link": 22, 84 | "widget": { 85 | "name": "prompt" 86 | } 87 | } 88 | ], 89 | "outputs": [ 90 | { 91 | "name": "IMAGE", 92 | "type": "IMAGE", 93 | "links": [ 94 | 25 95 | ], 96 | "shape": 3, 97 | "slot_index": 0 98 | } 99 | ], 100 | "properties": { 101 | "Node name for S&R": "Replicate black-forest-labs/flux-schnell" 102 | }, 103 | "widgets_values": [ 104 | "", 105 | "3:2", 106 | 1, 107 | 1331, 108 | "randomize", 109 | "webp", 110 | 80, 111 | false, 112 | false 113 | ] 114 | }, 115 | { 116 | "id": 14, 117 | "type": "SaveImage", 118 | "pos": [ 119 | 1027, 120 | -375 121 | ], 122 | "size": { 123 | "0": 315, 124 | "1": 270 125 | }, 126 | "flags": {}, 127 | "order": 5, 128 | "mode": 0, 129 | "inputs": [ 130 | { 131 | "name": "images", 132 | "type": "IMAGE", 133 | "link": 26 134 | } 135 | ], 136 | "properties": {}, 137 | "widgets_values": [ 138 | "flux-dev" 139 | ] 140 | }, 141 | { 142 | "id": 20, 143 | "type": "Replicate black-forest-labs/flux-dev", 144 | "pos": [ 145 | 579, 146 | -377 147 | ], 148 | "size": [ 149 | 400, 150 | 316 151 | ], 152 | "flags": {}, 153 | "order": 2, 154 | "mode": 0, 155 | "inputs": [ 156 | { 157 | "name": "image", 158 | "type": "IMAGE", 159 | "link": null 160 | }, 161 | { 162 | "name": "prompt", 163 | "type": "STRING", 164 | "link": 23, 165 | "widget": { 166 | "name": "prompt" 167 | } 168 | } 169 | ], 170 | "outputs": [ 171 | { 172 | "name": "IMAGE", 173 | "type": "IMAGE", 174 | "links": [ 175 | 26 176 | ], 177 | "shape": 3, 178 | "slot_index": 0 179 | } 180 | ], 181 | "properties": { 182 | "Node name for S&R": "Replicate black-forest-labs/flux-dev" 183 | }, 184 | "widgets_values": [ 185 | "", 186 | "3:2", 187 | 0.8, 188 | 1, 189 | 3.5, 190 | 1454, 191 | "randomize", 192 | "webp", 193 | 80, 194 | false, 195 | false 196 | ] 197 | }, 198 | { 199 | "id": 15, 200 | "type": "SaveImage", 201 | "pos": [ 202 | 1030, 203 | -7 204 | ], 205 | "size": { 206 | "0": 315, 207 | "1": 270 208 | }, 209 | "flags": {}, 210 | "order": 6, 211 | "mode": 0, 212 | "inputs": [ 213 | { 214 | "name": "images", 215 | "type": "IMAGE", 216 | "link": 27 217 | } 218 | ], 219 | "properties": {}, 220 | "widgets_values": [ 221 | "flux-pro" 222 | ] 223 | }, 224 | { 225 | "id": 19, 226 | "type": "Replicate black-forest-labs/flux-pro", 227 | "pos": [ 228 | 579, 229 | -3 230 | ], 231 | "size": [ 232 | 400, 233 | 268 234 | ], 235 | "flags": {}, 236 | "order": 3, 237 | "mode": 0, 238 | "inputs": [ 239 | { 240 | "name": "prompt", 241 | "type": "STRING", 242 | "link": 24, 243 | "widget": { 244 | "name": "prompt" 245 | } 246 | } 247 | ], 248 | "outputs": [ 249 | { 250 | "name": "IMAGE", 251 | "type": "IMAGE", 252 | "links": [ 253 | 27 254 | ], 255 | "shape": 3, 256 | "slot_index": 0 257 | } 258 | ], 259 | "properties": { 260 | "Node name for S&R": "Replicate black-forest-labs/flux-pro" 261 | }, 262 | "widgets_values": [ 263 | "", 264 | "3:2", 265 | 25, 266 | 3, 267 | 2, 268 | 2, 269 | 1262, 270 | "randomize", 271 | false 272 | ] 273 | } 274 | ], 275 | "links": [ 276 | [ 277 | 22, 278 | 17, 279 | 0, 280 | 21, 281 | 0, 282 | "STRING" 283 | ], 284 | [ 285 | 23, 286 | 17, 287 | 0, 288 | 20, 289 | 1, 290 | "STRING" 291 | ], 292 | [ 293 | 24, 294 | 17, 295 | 0, 296 | 19, 297 | 0, 298 | "STRING" 299 | ], 300 | [ 301 | 25, 302 | 21, 303 | 0, 304 | 13, 305 | 0, 306 | "IMAGE" 307 | ], 308 | [ 309 | 26, 310 | 20, 311 | 0, 312 | 14, 313 | 0, 314 | "IMAGE" 315 | ], 316 | [ 317 | 27, 318 | 19, 319 | 0, 320 | 15, 321 | 0, 322 | "IMAGE" 323 | ] 324 | ], 325 | "groups": [], 326 | "config": {}, 327 | "extra": { 328 | "ds": { 329 | "scale": 1, 330 | "offset": [ 331 | 189.2626953125, 332 | 773.8525390625 333 | ] 334 | } 335 | }, 336 | "version": 0.4 337 | } -------------------------------------------------------------------------------- /web/js/previewVideo.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | import { api } from '../../../scripts/api.js' 3 | 4 | function fitHeight(node) { 5 | node.setSize([node.size[0], node.computeSize([node.size[0], node.size[1]])[1]]) 6 | node?.graph?.setDirtyCanvas(true); 7 | } 8 | function chainCallback(object, property, callback) { 9 | if (object == undefined) { 10 | //This should not happen. 11 | console.error("Tried to add callback to non-existant object") 12 | return; 13 | } 14 | if (property in object) { 15 | const callback_orig = object[property] 16 | object[property] = function () { 17 | const r = callback_orig.apply(this, arguments); 18 | callback.apply(this, arguments); 19 | return r 20 | }; 21 | } else { 22 | object[property] = callback; 23 | } 24 | } 25 | 26 | function addPreviewOptions(nodeType) { 27 | chainCallback(nodeType.prototype, "getExtraMenuOptions", function (_, options) { 28 | // The intended way of appending options is returning a list of extra options, 29 | // but this isn't used in widgetInputs.js and would require 30 | // less generalization of chainCallback 31 | let optNew = [] 32 | try { 33 | const previewWidget = this.widgets.find((w) => w.name === "videopreview"); 34 | 35 | let url = null 36 | if (previewWidget.videoEl?.hidden == false && previewWidget.videoEl.src) { 37 | //Use full quality video 38 | //url = api.apiURL('/view?' + new URLSearchParams(previewWidget.value.params)); 39 | url = previewWidget.videoEl.src 40 | } 41 | if (url) { 42 | optNew.push( 43 | { 44 | content: "Open preview", 45 | callback: () => { 46 | window.open(url, "_blank") 47 | }, 48 | }, 49 | { 50 | content: "Save preview", 51 | callback: () => { 52 | const a = document.createElement("a"); 53 | a.href = url; 54 | a.setAttribute("download", new URLSearchParams(previewWidget.value.params).get("filename")); 55 | document.body.append(a); 56 | a.click(); 57 | requestAnimationFrame(() => a.remove()); 58 | }, 59 | } 60 | ); 61 | } 62 | if (options.length > 0 && options[0] != null && optNew.length > 0) { 63 | optNew.push(null); 64 | } 65 | options.unshift(...optNew); 66 | 67 | } catch (error) { 68 | console.log(error); 69 | } 70 | 71 | }); 72 | } 73 | function previewVideo(node, file, type) { 74 | console.log("========== Preview Video Debug Info =========="); 75 | console.log("Preview Video params:", { file, type }); 76 | console.log("Node:", node); 77 | 78 | var element = document.createElement("div"); 79 | console.log("Created element:", element); 80 | 81 | const previewNode = node; 82 | var previewWidget = node.addDOMWidget("videopreview", "preview", element, { 83 | serialize: false, 84 | hideOnZoom: false, 85 | getValue() { 86 | return element.value; 87 | }, 88 | setValue(v) { 89 | element.value = v; 90 | }, 91 | }); 92 | console.log("Created preview widget:", previewWidget); 93 | 94 | previewWidget.computeSize = function (width) { 95 | if (this.aspectRatio && !this.parentEl.hidden) { 96 | let height = (previewNode.size[0] - 20) / this.aspectRatio + 10; 97 | if (!(height > 0)) { 98 | height = 0; 99 | } 100 | this.computedHeight = height + 10; 101 | return [width, height]; 102 | } 103 | return [width, -4];//no loaded src, widget should not display 104 | } 105 | // element.style['pointer-events'] = "none" 106 | previewWidget.value = { hidden: false, paused: false, params: {} } 107 | previewWidget.parentEl = document.createElement("div"); 108 | previewWidget.parentEl.className = "video_preview"; 109 | previewWidget.parentEl.style['width'] = "100%" 110 | element.appendChild(previewWidget.parentEl); 111 | previewWidget.videoEl = document.createElement("video"); 112 | previewWidget.videoEl.controls = true; 113 | previewWidget.videoEl.loop = false; 114 | previewWidget.videoEl.muted = false; 115 | previewWidget.videoEl.style['width'] = "100%" 116 | previewWidget.videoEl.addEventListener("loadedmetadata", () => { 117 | previewWidget.aspectRatio = previewWidget.videoEl.videoWidth / previewWidget.videoEl.videoHeight; 118 | fitHeight(previewNode); 119 | }); 120 | previewWidget.videoEl.addEventListener("error", () => { 121 | //TODO: consider a way to properly notify the user why a preview isn't shown. 122 | previewWidget.parentEl.hidden = true; 123 | fitHeight(previewNode); 124 | }); 125 | 126 | let params = { 127 | "filename": file, 128 | "type": type, 129 | } 130 | console.log("Preview Video URL params:", params); 131 | console.log("File path check:", { 132 | exists: file ? "yes" : "no", 133 | filePath: file, 134 | type: type 135 | }); 136 | 137 | previewWidget.parentEl.hidden = previewWidget.value.hidden; 138 | console.log("Parent element hidden state:", previewWidget.parentEl.hidden); 139 | 140 | previewWidget.videoEl.autoplay = !previewWidget.value.paused && !previewWidget.value.hidden; 141 | console.log("Video autoplay state:", previewWidget.videoEl.autoplay); 142 | 143 | let target_width = 256 144 | if (element.style?.width) { 145 | //overscale to allow scrolling. Endpoint won't return higher than native 146 | target_width = element.style.width.slice(0, -2) * 2; 147 | } 148 | if (!params.force_size || params.force_size.includes("?") || params.force_size == "Disabled") { 149 | params.force_size = target_width + "x?" 150 | } else { 151 | let size = params.force_size.split("x") 152 | let ar = parseInt(size[0]) / parseInt(size[1]) 153 | params.force_size = target_width + "x" + (target_width / ar) 154 | } 155 | 156 | const apiUrl = api.apiURL('/view?' + new URLSearchParams(params)); 157 | console.log("Final video URL:", apiUrl); 158 | console.log("API URL components:", { 159 | baseUrl: api.apiURL(''), 160 | params: new URLSearchParams(params).toString() 161 | }); 162 | 163 | previewWidget.videoEl.src = apiUrl; 164 | console.log("Set video element source:", previewWidget.videoEl.src); 165 | 166 | previewWidget.videoEl.hidden = false; 167 | previewWidget.parentEl.appendChild(previewWidget.videoEl); 168 | 169 | // 添加视频加载状态监听 170 | previewWidget.videoEl.addEventListener('loadstart', () => { 171 | console.log('Video loading started'); 172 | }); 173 | 174 | previewWidget.videoEl.addEventListener('loadeddata', () => { 175 | console.log('Video data loaded'); 176 | }); 177 | 178 | previewWidget.videoEl.addEventListener('error', (e) => { 179 | console.error('Video loading error:', e); 180 | console.error('Video error details:', previewWidget.videoEl.error); 181 | }); 182 | 183 | console.log("========== End Preview Video Debug Info =========="); 184 | } 185 | 186 | app.registerExtension({ 187 | name: "Comflowy.VideoPreviewer", 188 | async init() { 189 | console.log("Comflowy Video Previewer Extension Initialized"); 190 | }, 191 | async setup() { 192 | console.log("Comflowy Video Previewer Extension Setup"); 193 | }, 194 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 195 | console.log("Registering node def for:", nodeData?.name); 196 | if (nodeData?.name === "Comflowy_Preview_Video") { 197 | console.log("Configuring Preview Video node"); 198 | nodeType.prototype.onExecuted = function (data) { 199 | console.log("Preview Video onExecuted called with data:", data); 200 | previewVideo(this, data.video[0], data.video[1]); 201 | } 202 | addPreviewOptions(nodeType) 203 | } 204 | } 205 | }); 206 | 207 | // 额外添加一个全局调试日志 208 | console.log("Comflowy Video Previewer JS Loaded"); -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | banner 2 | 3 | # Comflowy ComfyUI Extension 4 | 5 |
6 | 7 | ![Version](https://img.shields.io/badge/node_version-0.2-lightblue) 8 | 9 | 10 | 11 | 12 | [中文说明](./README_CN.md) 13 |
14 | 15 | When using ComfyUI, most of the models we use are open-source models. Some closed-source models with excellent results cannot be used in ComfyUI. To solve this problem, we developed the Comflowy extension. We hope to integrate these high-quality closed-source models into ComfyUI, so that users can chain together various closed-source models through ComfyUI. 16 | 17 | 18 | ## I. Node List 19 | 20 | 1. **Comflowy LLM Node:** This is a node that calls LLM. You can use it to implement functions similar to Prompt Generator. Unlike other LLM nodes on the market, it obtains results by calling APIs, which means you don't need to install Ollama to call LLM models. No need to worry about whether your computer configuration is sufficient to run these LLM models. **It's also free**. 21 | * You can use our online version of Comflowy to run [workflows](https://app.comflowy.com/template/84bea01c-e109-41f2-89c6-914fc999a1cf) containing this node. 22 | * You can also download the [workflow file](workflows/LLM_CN.json) and import it into ComfyUI for use. 23 | *
24 | Workflow Screenshot 25 |
26 | 27 | ![image](images/LLM.png) 28 |
29 | 2. **Comflowy Omost Node:** The [Omost](https://github.com/lllyasviel/Omost) extension is a extension that helps you write prompts, but running this extension locally requires a computer with higher configuration. Based on our understanding of Omost, we implemented a similar node, but slightly different in that we didn't run Omost's official model, but implemented it through Prompt Engineering. This way, the running speed will be faster. 30 | * Online version [workflow](https://app.comflowy.com/template/1ce47688-4c85-42af-88ad-290f283eb9ec). 31 | * Local version [workflow file](workflows/Omost_LLM.json). 32 | *
33 | Workflow Screenshot 34 |
35 | 36 | ![image](images/Omost_LLM.png) 37 |
38 | 3. **Comflowy Flux Pro Node:** Flux is a node that can generate images with Flux Pro. Flux Pro is a non open source model, so in most cases, you cannot use this model in ComfyUI. To solve this problem, we developed this node, which allows you to generate images directly in ComfyUI. But please note that this model is a commercial model, so each use will deduct your credits. 39 | * Online version [App](https://app.comflowy.com/app/app-general-image-by-flux-153b). 40 | *
41 | Workflow Screenshot 42 |
43 | 44 | ![image](images/flux.png) 45 |
46 | 4. **Comflowy Flux Pro Ultra Node:** Flux Pro Ultra is the latest high-definition model released by Flux. You can also set the Raw parameter to True, so that the output images will be more realistic and detailed. 47 | 5. **Comflowy Flux Dev Lora Node:** Flux Dev Lora node is a node that allows you to load any Flux LoRA. You only need to fill in the download address of the Flux LoRA model into the node to load and use it. 48 | 6. **Comflowy Ideogram Node:** Ideogram is a node that can generate images with Ideogram. Similar to Flux, Ideogram is a non open source model, so in most cases, you cannot use this model in ComfyUI. To solve this problem, we developed this node, which allows you to generate images directly in ComfyUI. But please note that this model is a commercial model, so each use will deduct your credits. 49 | * Online version [App](https://app.comflowy.com/app/app-general-image-by-ideogram-b453). 50 | *
51 | Workflow Screenshot 52 |
53 | 54 | ![image](images/ideogram.png) 55 |
56 | 7. **Comflowy Recraft Node:** Similar to Flux, Recraft is a non open source model, so in most cases, you cannot use this model in ComfyUI. To solve this problem, we developed this node, which allows you to generate images directly in ComfyUI. But please note that this model is a commercial model, so each use will deduct your credits. This model is similar to Ideogram, and is very suitable for generating images with text. 57 | 8. **Comflowy Clarity Upscale Node:** This is a node that can upscale images. This node is claimed to be a replacement for Magnific. The overseas influencer developer [levlsio](https://twitter.com/levelsio/status/1827404021684170902) praised this model. 58 | * Online version [App](https://app.comflowy.com/app/app-clarity-upscale-4257). 59 | *
60 | Workflow Screenshot 61 |
62 | 63 | ![image](images/clarity.png) 64 |
65 | 9. **Comflowy Replicate Node:** Thanks to [Replicate](https://github.com/replicate/comfyui-replicate) for providing services and code, now you can use the models in Replicate. 66 | 10. **Comflowy Hailuo Video Node:** This node uses the Hailuo AI model, which can convert images to videos. Note that this node requires the Comflowy Preview Video node to be used. 67 | 11. **Comflowy Kling Image Node:** This node uses the Kling AI model, which can convert images to videos. Note that this node requires the Comflowy Preview Video node to be used. 68 | 69 | 70 | ## II. Price 71 | 72 | | Node | Price | 73 | | --- | --- | 74 | | LLM | Free | 75 | | Omost | Free | 76 | | Flux Pro | Flux-1.1-pro costs approximately 400 credit per image. Flux-pro costs 550 credits per image. | 77 | | Flux Pro Ultra | 600 credits per image. | 78 | | Flux Dev Lora | 350 credits per image. | 79 | | Ideogram | Ideogram-v2-turbo costs approximately 800 credit per image. Ideogram-v2 costs 500 credits per image. | 80 | | Recraft | This model costs approximately 400~800 credits per image, depending on your inputs. | 81 | | Replicate | This depends on the model you use and the time consumed. | 82 | | Hailuo | This costs approximately 5000 credits per video. | 83 | | Kling | This costs credits based on the length of the video. | 84 | 85 | ## III. How to Use 86 | 87 | > [!NOTE] 88 | > It should be noted that when using the Comflowy extension, there may be situations where it cannot be used normally due to network problems. If you encounter an error like `Failed to get response from LLM model with https://app.comflowy.com/api/open/v0/prompt`, you need to check your network status. 89 | 90 |
91 | Step 1: Install Comflowy ComfyUI Extension 92 | 93 | - Method 1: Install using [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) (recommended) 94 | - Method 2: Git installation 95 | 96 | Open a cmd window in the CompyUI extension directory (e.g., "CompyUI\custom_nodes\") and type the following command: 97 | 98 | ```shell 99 | git clone https://github.com/6174/comflowy-nodes.git 100 | ``` 101 | 102 | - Method 3: Download zip file 103 | 104 | Or download and unzip the zip file, copy the resulting folder to the ```ComfyUI\custom_nodes\``` directory. 105 | 106 |
107 | 108 |
109 | Step 2: Obtain Comflowy API Key 110 | 111 | Next, you need to obtain the Comflowy API Key. Click on the avatar in the bottom left corner (Figure ①), then click on Settings (Figure ②), and finally find the API Key (Figure ③) and copy it. **Note: For security reasons in future use, please do not disclose your API Key to others.** 112 | 113 | ![image](images/API_Key.png) 114 |
115 | 116 |
117 | Step 3: Enter Comflowy API Key 118 | 119 | Lastly, you need to input the API Key into the Comflowy Set API Key node. After entering it, you can delete this node. Then you can use other Comflowy nodes. If you don't input this node, you won't be able to use Comflowy nodes. 120 | 121 | ![image](images/API_Key_Node.png) 122 |
123 | 124 | ## IV. Update Log 125 | 126 | * V0.2: Added Flux node, Ideogram node. 127 | * V0.1: Support for LLM node, Omost node, Http node. 128 | 129 | ## V. Acknowledgements 130 | 131 | 1. Thanks to [SiliconFlow](https://siliconflow.cn/) for providing free LLM services. 132 | 2. Thanks to the author of [Omost](https://github.com/lllyasviel/Omost) and the author of the [ComfyUI-Omost](https://github.com/huchenlei/ComfyUI_omost?tab=readme-ov-file) extension. 133 | 3. Thanks to all who contributed to this open source project: 134 | 135 | 136 | 137 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/schema_to_node.py: -------------------------------------------------------------------------------- 1 | DEFAULT_STEP = 0.01 2 | DEFAULT_ROUND = 0.001 3 | 4 | IMAGE_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".webp") 5 | VIDEO_EXTENSIONS = (".mp4", ".mkv", ".webm", ".mov", ".mpg", ".mpeg") 6 | AUDIO_EXTENSIONS = (".mp3", ".wav", ".flac", ".mpga", ".m4a") 7 | 8 | TYPE_MAPPING = { 9 | "string": "STRING", 10 | "integer": "INT", 11 | "number": "FLOAT", 12 | "boolean": "BOOLEAN", 13 | } 14 | 15 | 16 | def convert_to_comfyui_input_type( 17 | input_name, openapi_type, openapi_format=None, default_example_input=None 18 | ): 19 | if openapi_type == "string" and openapi_format == "uri": 20 | if ( 21 | default_example_input 22 | and isinstance(default_example_input, dict) 23 | and input_name in default_example_input 24 | ): 25 | if is_type(default_example_input[input_name], IMAGE_EXTENSIONS): 26 | return "IMAGE" 27 | elif is_type(default_example_input[input_name], VIDEO_EXTENSIONS): 28 | return "VIDEO" 29 | elif is_type(default_example_input[input_name], AUDIO_EXTENSIONS): 30 | return "AUDIO" 31 | elif any(x in input_name.lower() for x in ["image", "mask"]): 32 | return "IMAGE" 33 | elif "audio" in input_name.lower(): 34 | return "AUDIO" 35 | else: 36 | return "STRING" 37 | 38 | return TYPE_MAPPING.get(openapi_type, "STRING") 39 | 40 | 41 | def name_and_version(schema): 42 | author = schema["owner"] 43 | name = schema["name"] 44 | version = schema["latest_version"]["id"] 45 | replicate_model = f"{author}/{name}:{version}" 46 | node_name = f"Replicate {author}/{name}" 47 | return replicate_model, node_name 48 | 49 | 50 | def resolve_schema(prop_data, openapi_schema): 51 | if "$ref" in prop_data: 52 | ref_path = prop_data["$ref"].split("/") 53 | current = openapi_schema 54 | for path in ref_path[1:]: # Skip the first '#' element 55 | if path not in current: 56 | return prop_data # Return original if path is invalid 57 | current = current[path] 58 | return current 59 | return prop_data 60 | 61 | 62 | def schema_to_comfyui_input_types(schema): 63 | openapi_schema = schema["latest_version"]["openapi_schema"] 64 | input_schema = openapi_schema["components"]["schemas"]["Input"] 65 | input_types = {"required": {}, "optional": {}} 66 | default_example_input = get_default_example_input(schema) 67 | 68 | required_props = input_schema.get("required", []) 69 | 70 | for prop_name, prop_data in input_schema["properties"].items(): 71 | prop_data = resolve_schema(prop_data, openapi_schema) 72 | default_value = prop_data.get("default", None) 73 | 74 | if "allOf" in prop_data: 75 | prop_data = resolve_schema(prop_data["allOf"][0], openapi_schema) 76 | 77 | if "enum" in prop_data: 78 | input_type = prop_data["enum"] 79 | elif "type" in prop_data: 80 | input_type = convert_to_comfyui_input_type( 81 | prop_name, 82 | prop_data["type"], 83 | prop_data.get("format"), 84 | default_example_input, 85 | ) 86 | else: 87 | input_type = "STRING" 88 | 89 | input_config = {"default": default_value} if default_value is not None else {} 90 | 91 | if "minimum" in prop_data: 92 | input_config["min"] = prop_data["minimum"] 93 | if "maximum" in prop_data: 94 | input_config["max"] = prop_data["maximum"] 95 | if input_type == "FLOAT": 96 | input_config["step"] = DEFAULT_STEP 97 | input_config["round"] = DEFAULT_ROUND 98 | 99 | if "prompt" in prop_name and prop_data.get("type") == "string": 100 | input_config["multiline"] = True 101 | 102 | # Meta prompt_template needs `{prompt}` to be sent through 103 | # dynamicPrompts would strip it out 104 | if "template" not in prop_name: 105 | input_config["dynamicPrompts"] = True 106 | 107 | if prop_name in required_props: 108 | input_types["required"][prop_name] = (input_type, input_config) 109 | else: 110 | input_types["optional"][prop_name] = (input_type, input_config) 111 | 112 | input_types["optional"]["force_rerun"] = ("BOOLEAN", {"default": False}) 113 | 114 | return order_inputs(input_types, input_schema) 115 | 116 | 117 | def order_inputs(input_types, input_schema): 118 | ordered_input_types = {"required": {}, "optional": {}} 119 | sorted_properties = sorted( 120 | input_schema["properties"].items(), 121 | key=lambda x: x[1].get("x-order", float("inf")), 122 | ) 123 | 124 | for prop_name, _ in sorted_properties: 125 | if prop_name in input_types["required"]: 126 | ordered_input_types["required"][prop_name] = input_types["required"][ 127 | prop_name 128 | ] 129 | elif prop_name in input_types["optional"]: 130 | ordered_input_types["optional"][prop_name] = input_types["optional"][ 131 | prop_name 132 | ] 133 | 134 | ordered_input_types["optional"]["force_rerun"] = input_types["optional"][ 135 | "force_rerun" 136 | ] 137 | 138 | return ordered_input_types 139 | 140 | 141 | def inputs_that_need_arrays(schema): 142 | openapi_schema = schema["latest_version"]["openapi_schema"] 143 | input_schema = openapi_schema["components"]["schemas"]["Input"] 144 | array_inputs = [] 145 | for prop_name, prop_data in input_schema["properties"].items(): 146 | if prop_data.get("type") == "array": 147 | array_inputs.append(prop_name) 148 | 149 | return array_inputs 150 | 151 | 152 | def is_type(default_example_output, extensions): 153 | if isinstance( 154 | default_example_output, str 155 | ) and default_example_output.lower().endswith(extensions): 156 | return True 157 | elif ( 158 | isinstance(default_example_output, list) 159 | and default_example_output 160 | and isinstance(default_example_output[0], str) 161 | and default_example_output[0].lower().endswith(extensions) 162 | ): 163 | return True 164 | return False 165 | 166 | 167 | def get_default_example(schema): 168 | default_example = schema.get("default_example") 169 | return default_example if default_example else None 170 | 171 | 172 | def get_default_example_input(schema): 173 | default_example = get_default_example(schema) 174 | return default_example.get("input") if default_example else None 175 | 176 | 177 | def get_default_example_output(schema): 178 | default_example = get_default_example(schema) 179 | return default_example.get("output") if default_example else None 180 | 181 | 182 | def get_return_type(schema): 183 | openapi_schema = schema["latest_version"]["openapi_schema"] 184 | output_schema = ( 185 | openapi_schema.get("components", {}).get("schemas", {}).get("Output") 186 | ) 187 | default_example_output = get_default_example_output(schema) 188 | 189 | if output_schema and "$ref" in output_schema: 190 | output_schema = resolve_schema(output_schema, openapi_schema) 191 | 192 | if isinstance(output_schema, dict) and output_schema.get("properties"): 193 | return_types = {} 194 | for prop_name, prop_data in output_schema["properties"].items(): 195 | if isinstance(default_example_output, dict): 196 | prop_value = default_example_output.get(prop_name) 197 | 198 | if is_type(prop_value, IMAGE_EXTENSIONS): 199 | return_types[prop_name] = "IMAGE" 200 | elif is_type(prop_value, AUDIO_EXTENSIONS): 201 | return_types[prop_name] = "AUDIO" 202 | elif is_type(prop_value, VIDEO_EXTENSIONS): 203 | return_types[prop_name] = "VIDEO_URI" 204 | else: 205 | return_types[prop_name] = "STRING" 206 | elif prop_data.get("format") == "uri": 207 | if "audio" in prop_name.lower(): 208 | return_types[prop_name] = "AUDIO" 209 | elif "image" in prop_name.lower(): 210 | return_types[prop_name] = "IMAGE" 211 | else: 212 | return_types[prop_name] = "STRING" 213 | elif prop_data.get("type") == "string": 214 | return_types[prop_name] = "STRING" 215 | else: 216 | return_types[prop_name] = "STRING" 217 | 218 | return return_types 219 | 220 | if is_type(default_example_output, IMAGE_EXTENSIONS): 221 | return "IMAGE" 222 | elif is_type(default_example_output, VIDEO_EXTENSIONS): 223 | return "VIDEO_URI" 224 | elif is_type(default_example_output, AUDIO_EXTENSIONS): 225 | return "AUDIO" 226 | 227 | if output_schema: 228 | if ( 229 | output_schema.get("type") == "string" 230 | and output_schema.get("format") == "uri" 231 | ): 232 | # Handle single image output 233 | return "IMAGE" 234 | elif ( 235 | output_schema.get("type") == "array" 236 | and output_schema.get("items", {}).get("type") == "string" 237 | and output_schema.get("items", {}).get("format") == "uri" 238 | ): 239 | # Handle multiple image output 240 | return "IMAGE" 241 | 242 | return "STRING" -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/simple-llama3.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 12, 3 | "last_link_id": 11, 4 | "nodes": [ 5 | { 6 | "id": 7, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 413, 10 | 389 11 | ], 12 | "size": { 13 | "0": 425.27801513671875, 14 | "1": 180.6060791015625 15 | }, 16 | "flags": {}, 17 | "order": 3, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 5 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "CONDITIONING", 29 | "type": "CONDITIONING", 30 | "links": [ 31 | 6 32 | ], 33 | "slot_index": 0 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "CLIPTextEncode" 38 | }, 39 | "widgets_values": [ 40 | "text, watermark" 41 | ] 42 | }, 43 | { 44 | "id": 5, 45 | "type": "EmptyLatentImage", 46 | "pos": [ 47 | 473, 48 | 609 49 | ], 50 | "size": { 51 | "0": 315, 52 | "1": 106 53 | }, 54 | "flags": {}, 55 | "order": 0, 56 | "mode": 0, 57 | "outputs": [ 58 | { 59 | "name": "LATENT", 60 | "type": "LATENT", 61 | "links": [ 62 | 2 63 | ], 64 | "slot_index": 0 65 | } 66 | ], 67 | "properties": { 68 | "Node name for S&R": "EmptyLatentImage" 69 | }, 70 | "widgets_values": [ 71 | 512, 72 | 512, 73 | 1 74 | ] 75 | }, 76 | { 77 | "id": 8, 78 | "type": "VAEDecode", 79 | "pos": [ 80 | 1209, 81 | 188 82 | ], 83 | "size": { 84 | "0": 210, 85 | "1": 46 86 | }, 87 | "flags": {}, 88 | "order": 7, 89 | "mode": 0, 90 | "inputs": [ 91 | { 92 | "name": "samples", 93 | "type": "LATENT", 94 | "link": 7 95 | }, 96 | { 97 | "name": "vae", 98 | "type": "VAE", 99 | "link": 8 100 | } 101 | ], 102 | "outputs": [ 103 | { 104 | "name": "IMAGE", 105 | "type": "IMAGE", 106 | "links": [ 107 | 9 108 | ], 109 | "slot_index": 0 110 | } 111 | ], 112 | "properties": { 113 | "Node name for S&R": "VAEDecode" 114 | } 115 | }, 116 | { 117 | "id": 6, 118 | "type": "CLIPTextEncode", 119 | "pos": [ 120 | 415, 121 | 187 122 | ], 123 | "size": [ 124 | 422.84503173828125, 125 | 164.31304931640625 126 | ], 127 | "flags": {}, 128 | "order": 4, 129 | "mode": 0, 130 | "inputs": [ 131 | { 132 | "name": "clip", 133 | "type": "CLIP", 134 | "link": 3 135 | }, 136 | { 137 | "name": "text", 138 | "type": "STRING", 139 | "link": 10, 140 | "widget": { 141 | "name": "text" 142 | } 143 | } 144 | ], 145 | "outputs": [ 146 | { 147 | "name": "CONDITIONING", 148 | "type": "CONDITIONING", 149 | "links": [ 150 | 4 151 | ], 152 | "slot_index": 0 153 | } 154 | ], 155 | "properties": { 156 | "Node name for S&R": "CLIPTextEncode" 157 | }, 158 | "widgets_values": [ 159 | "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," 160 | ] 161 | }, 162 | { 163 | "id": 4, 164 | "type": "CheckpointLoaderSimple", 165 | "pos": [ 166 | 43, 167 | 437 168 | ], 169 | "size": { 170 | "0": 315, 171 | "1": 98 172 | }, 173 | "flags": {}, 174 | "order": 1, 175 | "mode": 0, 176 | "outputs": [ 177 | { 178 | "name": "MODEL", 179 | "type": "MODEL", 180 | "links": [ 181 | 1 182 | ], 183 | "slot_index": 0 184 | }, 185 | { 186 | "name": "CLIP", 187 | "type": "CLIP", 188 | "links": [ 189 | 3, 190 | 5 191 | ], 192 | "slot_index": 1 193 | }, 194 | { 195 | "name": "VAE", 196 | "type": "VAE", 197 | "links": [ 198 | 8 199 | ], 200 | "slot_index": 2 201 | } 202 | ], 203 | "properties": { 204 | "Node name for S&R": "CheckpointLoaderSimple" 205 | }, 206 | "widgets_values": [ 207 | "dreamshaperXL_lightningDPMSDE.safetensors" 208 | ] 209 | }, 210 | { 211 | "id": 3, 212 | "type": "KSampler", 213 | "pos": [ 214 | 863, 215 | 186 216 | ], 217 | "size": { 218 | "0": 315, 219 | "1": 262 220 | }, 221 | "flags": {}, 222 | "order": 6, 223 | "mode": 0, 224 | "inputs": [ 225 | { 226 | "name": "model", 227 | "type": "MODEL", 228 | "link": 1 229 | }, 230 | { 231 | "name": "positive", 232 | "type": "CONDITIONING", 233 | "link": 4 234 | }, 235 | { 236 | "name": "negative", 237 | "type": "CONDITIONING", 238 | "link": 6 239 | }, 240 | { 241 | "name": "latent_image", 242 | "type": "LATENT", 243 | "link": 2 244 | } 245 | ], 246 | "outputs": [ 247 | { 248 | "name": "LATENT", 249 | "type": "LATENT", 250 | "links": [ 251 | 7 252 | ], 253 | "slot_index": 0 254 | } 255 | ], 256 | "properties": { 257 | "Node name for S&R": "KSampler" 258 | }, 259 | "widgets_values": [ 260 | 106695855998949, 261 | "randomize", 262 | 4, 263 | 2, 264 | "dpmpp_sde", 265 | "karras", 266 | 1 267 | ] 268 | }, 269 | { 270 | "id": 9, 271 | "type": "SaveImage", 272 | "pos": [ 273 | 1451, 274 | 189 275 | ], 276 | "size": [ 277 | 210, 278 | 270 279 | ], 280 | "flags": {}, 281 | "order": 8, 282 | "mode": 0, 283 | "inputs": [ 284 | { 285 | "name": "images", 286 | "type": "IMAGE", 287 | "link": 9 288 | } 289 | ], 290 | "properties": {}, 291 | "widgets_values": [ 292 | "ComfyUI" 293 | ] 294 | }, 295 | { 296 | "id": 12, 297 | "type": "ShowText|pysssss", 298 | "pos": [ 299 | 50, 300 | 85 301 | ], 302 | "size": [ 303 | 320.3125, 304 | 217.568359375 305 | ], 306 | "flags": {}, 307 | "order": 5, 308 | "mode": 0, 309 | "inputs": [ 310 | { 311 | "name": "text", 312 | "type": "STRING", 313 | "link": 11, 314 | "widget": { 315 | "name": "text" 316 | } 317 | } 318 | ], 319 | "outputs": [ 320 | { 321 | "name": "STRING", 322 | "type": "STRING", 323 | "links": null, 324 | "shape": 6 325 | } 326 | ], 327 | "properties": { 328 | "Node name for S&R": "ShowText|pysssss" 329 | }, 330 | "widgets_values": [ 331 | "", 332 | "cozy living room with large windows, plush couch, crackling fireplace, vintage rug, warm lighting, and a beautiful cityscape view outside" 333 | ] 334 | }, 335 | { 336 | "id": 10, 337 | "type": "Replicate meta/meta-llama-3-70b-instruct", 338 | "pos": [ 339 | -446, 340 | 83 341 | ], 342 | "size": [ 343 | 449.5546875, 344 | 696.453125 345 | ], 346 | "flags": {}, 347 | "order": 2, 348 | "mode": 0, 349 | "outputs": [ 350 | { 351 | "name": "STRING", 352 | "type": "STRING", 353 | "links": [ 354 | 10, 355 | 11 356 | ], 357 | "shape": 3, 358 | "slot_index": 0 359 | } 360 | ], 361 | "properties": { 362 | "Node name for S&R": "Replicate meta/meta-llama-3-70b-instruct" 363 | }, 364 | "widgets_values": [ 365 | "A living room", 366 | "You are a helpful text to image prompt assistant. You write short comma separated prompts for image generators. Return only the prompt, do not use quotes. Embellish the request.", 367 | 512, 368 | 0, 369 | 1, 370 | 0.95, 371 | 0, 372 | "<|end_of_text|>,<|eot_id|>", 373 | 1, 374 | 0, 375 | 1840, 376 | "randomize", 377 | "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>", 378 | false, 379 | false 380 | ] 381 | } 382 | ], 383 | "links": [ 384 | [ 385 | 1, 386 | 4, 387 | 0, 388 | 3, 389 | 0, 390 | "MODEL" 391 | ], 392 | [ 393 | 2, 394 | 5, 395 | 0, 396 | 3, 397 | 3, 398 | "LATENT" 399 | ], 400 | [ 401 | 3, 402 | 4, 403 | 1, 404 | 6, 405 | 0, 406 | "CLIP" 407 | ], 408 | [ 409 | 4, 410 | 6, 411 | 0, 412 | 3, 413 | 1, 414 | "CONDITIONING" 415 | ], 416 | [ 417 | 5, 418 | 4, 419 | 1, 420 | 7, 421 | 0, 422 | "CLIP" 423 | ], 424 | [ 425 | 6, 426 | 7, 427 | 0, 428 | 3, 429 | 2, 430 | "CONDITIONING" 431 | ], 432 | [ 433 | 7, 434 | 3, 435 | 0, 436 | 8, 437 | 0, 438 | "LATENT" 439 | ], 440 | [ 441 | 8, 442 | 4, 443 | 2, 444 | 8, 445 | 1, 446 | "VAE" 447 | ], 448 | [ 449 | 9, 450 | 8, 451 | 0, 452 | 9, 453 | 0, 454 | "IMAGE" 455 | ], 456 | [ 457 | 10, 458 | 10, 459 | 0, 460 | 6, 461 | 1, 462 | "STRING" 463 | ], 464 | [ 465 | 11, 466 | 10, 467 | 0, 468 | 12, 469 | 0, 470 | "STRING" 471 | ] 472 | ], 473 | "groups": [], 474 | "config": {}, 475 | "extra": { 476 | "ds": { 477 | "scale": 1, 478 | "offset": { 479 | "0": 574.140625, 480 | "1": 77.529296875 481 | } 482 | } 483 | }, 484 | "version": 0.4 485 | } -------------------------------------------------------------------------------- /flowy/api_nodes/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | import time 3 | import requests 4 | import base64 5 | import io 6 | from PIL import Image 7 | import torch 8 | import numpy as np 9 | import logging 10 | import json 11 | import os 12 | from ..types import get_api_host 13 | from ..api_key_manager import load_api_key 14 | import soundfile as sf 15 | 16 | import folder_paths 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | class FlowyApiNode(ABC): 21 | CATEGORY = "Comflowy" 22 | FUNCTION = "generate" 23 | RETURN_TYPES = [] 24 | 25 | @classmethod 26 | @abstractmethod 27 | def INPUT_TYPES(cls): 28 | """Define input types for the node""" 29 | pass 30 | 31 | # 返回模型类型标识符 32 | @abstractmethod 33 | def get_model_type(self) -> str: 34 | """Return the model type identifier""" 35 | pass 36 | 37 | # 准备 API 请求的 payload 38 | @abstractmethod 39 | def prepare_payload(self, **kwargs) -> dict: 40 | """Prepare the API request payload""" 41 | pass 42 | 43 | # 返回请求需要的 API URL, 默认是 /api/open/v0/flowy, 如果是特殊模型, 则子类返回特殊模型的 API URL 44 | def get_api_host(self) -> str: 45 | API_HOST = get_api_host() 46 | API_URL = f"{API_HOST}/api/open/v0/flowy" 47 | return API_URL 48 | 49 | # 从 URL 下载图片并转换为 tensor 50 | def parse_image_output(self, output_url: str) -> torch.Tensor: 51 | """Convert image URL to tensor""" 52 | start_time = time.time() 53 | for attempt in range(3): 54 | try: 55 | img_response = requests.get(output_url, stream=True) 56 | img_response.raise_for_status() 57 | break 58 | except requests.RequestException as e: 59 | if attempt == 2: # Last attempt 60 | logger.error(f"Unable to access output URL after 3 attempts: {str(e)}") 61 | raise Exception(f"Unable to access output URL: {str(e)}") 62 | logger.warning(f"Attempt {attempt + 1} failed, retrying in 1s...") 63 | time.sleep(1) 64 | 65 | logger.info(f"[Timing] Image download took {time.time() - start_time:.2f}s") 66 | download_start = time.time() 67 | 68 | logger.info(f"[Timing] Image download took {time.time() - download_start:.2f}s") 69 | process_start = time.time() 70 | 71 | # Convert image data to PIL Image 72 | img = Image.open(img_response.raw) 73 | 74 | # Convert to numpy array 75 | img_np = np.array(img) 76 | 77 | # Ensure image has 3 RGB channels 78 | if len(img_np.shape) == 2: # Grayscale image 79 | img_np = np.stack([img_np] * 3, axis=-1) 80 | elif img_np.shape[-1] == 4: # RGBA image 81 | img_np = img_np[:, :, :3] 82 | 83 | # Convert to float32 and normalize to 0-1 range 84 | img_np = img_np.astype(np.float32) / 255.0 85 | 86 | # Convert to torch tensor, ensure shape is [B,H,W,C] 87 | img_tensor = torch.from_numpy(img_np).unsqueeze(0) # Add batch dimension 88 | 89 | logger.info(f"[Timing] Image processing took {time.time() - process_start:.2f}s") 90 | logger.info(f"[Timing] Total image parsing took {time.time() - start_time:.2f}s") 91 | 92 | return img_tensor 93 | 94 | # 从 URL 下载视频并返回本地路径 95 | def parse_video_output(self, output_url: str) -> str: 96 | """Download video from URL and return local path""" 97 | output_dir = folder_paths.get_output_directory() 98 | curr_time = time.time() 99 | vid_name = f"output_{curr_time}.mp4" 100 | output_video_path = os.path.join(output_dir, vid_name) 101 | 102 | response = requests.get(output_url) 103 | response.raise_for_status() 104 | 105 | with open(output_video_path, "wb") as f: 106 | f.write(response.content) 107 | 108 | return output_video_path 109 | 110 | def parse_audio_output(self, output_url: str) -> str: 111 | """Download audio from URL and return local path""" 112 | output_dir = folder_paths.get_output_directory() 113 | curr_time = time.time() 114 | audio_name = f"output_{curr_time}.wav" 115 | output_audio_path = os.path.join(output_dir, audio_name) 116 | 117 | response = requests.get(output_url) 118 | response.raise_for_status() 119 | 120 | with open(output_audio_path, "wb") as f: 121 | f.write(response.content) 122 | 123 | return output_audio_path 124 | 125 | # 处理输入图像 126 | def image_to_base64(self, image) -> str: 127 | """Process input image to base64 string""" 128 | if isinstance(image, torch.Tensor): 129 | if image.dim() == 4: 130 | image = image.squeeze(0) # Remove batch dimension 131 | if image.shape[-1] == 3: 132 | image = (image.cpu().numpy() * 255).astype(np.uint8) 133 | elif image.shape[0] == 3: 134 | image = (image.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8) 135 | else: 136 | raise ValueError(f"Unsupported image shape: {image.shape}") 137 | elif isinstance(image, np.ndarray): 138 | if image.ndim == 2: 139 | image = np.stack([image] * 3, axis=-1) 140 | elif image.shape[-1] == 1: 141 | image = np.repeat(image, 3, axis=-1) 142 | elif image.shape[-1] != 3: 143 | raise ValueError(f"Unsupported number of channels: {image.shape[-1]}") 144 | image = (image * 255).astype(np.uint8) 145 | else: 146 | raise ValueError(f"Unsupported image type: {type(image)}") 147 | 148 | buffered = io.BytesIO() 149 | Image.fromarray(image).save(buffered, format="JPEG", quality=85) 150 | return ( 151 | f"data:image/jpeg;base64,{base64.b64encode(buffered.getvalue()).decode()}" 152 | ) 153 | 154 | def audio_to_base64(self, audio): 155 | if isinstance(audio, dict) and "waveform" in audio and "sample_rate" in audio: 156 | waveform = audio["waveform"] 157 | sample_rate = audio["sample_rate"] 158 | else: 159 | waveform, sample_rate = audio 160 | 161 | # Ensure waveform is 2D 162 | if waveform.dim() == 1: 163 | waveform = waveform.unsqueeze(0) 164 | elif waveform.dim() > 2: 165 | waveform = waveform.squeeze() 166 | if waveform.dim() > 2: 167 | raise ValueError("Waveform must be 1D or 2D") 168 | 169 | buffer = io.BytesIO() 170 | sf.write(buffer, waveform.numpy().T, sample_rate, format="wav") 171 | buffer.seek(0) 172 | audio_str = base64.b64encode(buffer.getvalue()).decode() 173 | return f"data:audio/wav;base64,{audio_str}" 174 | 175 | # 发送 API 请求 176 | def make_api_request(self, payload: dict): 177 | """Make API request and return response""" 178 | api_key = load_api_key() 179 | if not api_key: 180 | raise ValueError( 181 | "API Key is not set. Please use the 'Comflowy Set API Key' node first." 182 | ) 183 | 184 | API_URL = self.get_api_host() 185 | response = requests.post( 186 | API_URL, 187 | headers={ 188 | "Authorization": f"Bearer {api_key}", 189 | "Content-Type": "application/json", 190 | }, 191 | json=payload, 192 | ) 193 | response.raise_for_status() 194 | return response.json() 195 | 196 | # 主生成方法 197 | def generate(self, **kwargs): 198 | """Main generation method""" 199 | try: 200 | start_time = time.time() 201 | 202 | # Prepare payload with common fields 203 | payload = self.prepare_payload(**kwargs) 204 | payload["model_type"] = self.get_model_type() 205 | 206 | logger.info(f"[Timing] Payload preparation took {time.time() - start_time:.2f}s") 207 | api_start = time.time() 208 | 209 | # Make API request 210 | result = self.make_api_request(payload) 211 | 212 | logger.info(f"[Timing] API request took {time.time() - api_start:.2f}s") 213 | 214 | if not result.get("success"): 215 | raise Exception( 216 | f"API request failed. Response: {json.dumps(result, indent=2)}" 217 | ) 218 | 219 | output_urls = result.get("data", {}).get("output") 220 | if not output_urls or not isinstance(output_urls, list): 221 | raise Exception( 222 | f"Invalid output URLs in response: {json.dumps(result, indent=2)}" 223 | ) 224 | 225 | parse_start = time.time() 226 | 227 | # Handle different return types based on RETURN_TYPES 228 | if self.RETURN_TYPES[0] == "IMAGE": 229 | if len(output_urls) == 1: 230 | # 单个图像,直接返回 231 | result = (self.parse_image_output(output_urls[0]),) 232 | else: 233 | # 多个图像,合并成一个批次 234 | tensors = [self.parse_image_output(url) for url in output_urls] 235 | result = (torch.cat(tensors, dim=0),) 236 | elif self.RETURN_TYPES[0] == "VIDEO": 237 | if len(output_urls) == 1: 238 | result = (self.parse_video_output(output_urls[0]),) 239 | else: 240 | result = tuple(self.parse_video_output(url) for url in output_urls) 241 | elif self.RETURN_TYPES[0] == "AUDIO": 242 | if len(output_urls) == 1: 243 | result = (self.parse_audio_output(output_urls[0]),) 244 | else: 245 | result = tuple(self.parse_audio_output(url) for url in output_urls) 246 | elif self.RETURN_TYPES[0] == "STERING": 247 | if len(output_urls) == 1: 248 | result = (output_urls[0],) 249 | else: 250 | result = tuple(output_urls) 251 | else: 252 | raise ValueError(f"Unsupported return type: {self.RETURN_TYPES[0]}") 253 | 254 | logger.info(f"[Timing] Output parsing took {time.time() - parse_start:.2f}s") 255 | logger.info(f"[Timing] Total generation took {time.time() - start_time:.2f}s") 256 | 257 | return result 258 | 259 | except Exception as e: 260 | logger.error(f"Error during generation: {str(e)}") 261 | logger.exception("Detailed error information:") 262 | 263 | if self.RETURN_TYPES[0] == "IMAGE": 264 | return (torch.zeros((1, 100, 400, 3), dtype=torch.float32),) 265 | elif self.RETURN_TYPES[0] == "VIDEO": 266 | return (os.path.join("output", "error.mp4"),) 267 | elif self.RETURN_TYPES[0] == "AUDIO": 268 | return (os.path.join("output", "error.wav"),) 269 | elif self.RETURN_TYPES[0] == "STERING": 270 | return (str(e),) 271 | else: 272 | raise ValueError(f"Unsupported return type: {self.RETURN_TYPES[0]}") 273 | -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/schemas/salesforce_blip.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/salesforce/blip", 3 | "owner": "salesforce", 4 | "name": "blip", 5 | "description": "Generate image captions", 6 | "visibility": "public", 7 | "github_url": "https://github.com/salesforce/BLIP", 8 | "paper_url": "https://arxiv.org/abs/2201.12086", 9 | "license_url": "https://github.com/salesforce/BLIP/blob/main/LICENSE.txt", 10 | "run_count": 0, 11 | "cover_image_url": "https://tjzk.replicate.delivery/models_models_featured_image/b59b459c-c475-414f-ba67-c424a7e6e6ca/demo.jpg", 12 | "default_example": { 13 | "id": "i5xhgehjkjfyhlx7dg2f5tnrti", 14 | "model": "salesforce/blip", 15 | "version": "5a977fcb091363cc3938cc870c9f6242f5938267040e6837373387bb90d23099", 16 | "status": "succeeded", 17 | "input": { 18 | "task": "image_captioning", 19 | "image": "https://replicate.delivery/mgxm/f4e50a7b-e8ca-432f-8e68-082034ebcc70/demo.jpg" 20 | }, 21 | "output": [ 22 | { 23 | "text": "Caption: a woman sitting on the beach with a dog" 24 | } 25 | ], 26 | "logs": null, 27 | "error": null, 28 | "metrics": { 29 | "predict_time": 6.644098, 30 | "total_time": 100.906686 31 | }, 32 | "created_at": "2022-02-06T17:52:02.993876Z", 33 | "started_at": "2022-02-06T17:53:37.256464Z", 34 | "completed_at": "2022-02-06T17:53:43.900562Z", 35 | "urls": { 36 | "get": "https://api.replicate.com/v1/predictions/i5xhgehjkjfyhlx7dg2f5tnrti", 37 | "cancel": "https://api.replicate.com/v1/predictions/i5xhgehjkjfyhlx7dg2f5tnrti/cancel" 38 | } 39 | }, 40 | "latest_version": { 41 | "id": "2e1dddc8621f72155f24cf2e0adbde548458d3cab9f00c0139eea840d0ac4746", 42 | "created_at": "2022-09-29T02:21:53.164660+00:00", 43 | "cog_version": "0.4.1", 44 | "openapi_schema": { 45 | "info": { 46 | "title": "Cog", 47 | "version": "0.1.0" 48 | }, 49 | "paths": { 50 | "/": { 51 | "get": { 52 | "summary": "Root", 53 | "responses": { 54 | "200": { 55 | "content": { 56 | "application/json": { 57 | "schema": {} 58 | } 59 | }, 60 | "description": "Successful Response" 61 | } 62 | }, 63 | "operationId": "root__get" 64 | } 65 | }, 66 | "/predictions": { 67 | "post": { 68 | "summary": "Predict", 69 | "responses": { 70 | "200": { 71 | "content": { 72 | "application/json": { 73 | "schema": { 74 | "$ref": "#/components/schemas/Response" 75 | } 76 | } 77 | }, 78 | "description": "Successful Response" 79 | }, 80 | "422": { 81 | "content": { 82 | "application/json": { 83 | "schema": { 84 | "$ref": "#/components/schemas/HTTPValidationError" 85 | } 86 | } 87 | }, 88 | "description": "Validation Error" 89 | } 90 | }, 91 | "description": "Run a single prediction on the model", 92 | "operationId": "predict_predictions_post", 93 | "requestBody": { 94 | "content": { 95 | "application/json": { 96 | "schema": { 97 | "$ref": "#/components/schemas/Request" 98 | } 99 | } 100 | } 101 | } 102 | } 103 | } 104 | }, 105 | "openapi": "3.0.2", 106 | "components": { 107 | "schemas": { 108 | "task": { 109 | "enum": [ 110 | "image_captioning", 111 | "visual_question_answering", 112 | "image_text_matching" 113 | ], 114 | "type": "string", 115 | "title": "task", 116 | "description": "An enumeration." 117 | }, 118 | "Input": { 119 | "type": "object", 120 | "title": "Input", 121 | "required": [ 122 | "image" 123 | ], 124 | "properties": { 125 | "task": { 126 | "allOf": [ 127 | { 128 | "$ref": "#/components/schemas/task" 129 | } 130 | ], 131 | "default": "image_captioning", 132 | "x-order": 1, 133 | "description": "Choose a task." 134 | }, 135 | "image": { 136 | "type": "string", 137 | "title": "Image", 138 | "format": "uri", 139 | "x-order": 0, 140 | "description": "Input image" 141 | }, 142 | "caption": { 143 | "type": "string", 144 | "title": "Caption", 145 | "x-order": 3, 146 | "description": "Type caption for the input image for image text matching task." 147 | }, 148 | "question": { 149 | "type": "string", 150 | "title": "Question", 151 | "x-order": 2, 152 | "description": "Type question for the input image for visual question answering task." 153 | } 154 | } 155 | }, 156 | "Output": { 157 | "type": "string", 158 | "title": "Output" 159 | }, 160 | "Status": { 161 | "enum": [ 162 | "processing", 163 | "succeeded", 164 | "failed" 165 | ], 166 | "type": "string", 167 | "title": "Status", 168 | "description": "An enumeration." 169 | }, 170 | "Request": { 171 | "type": "object", 172 | "title": "Request", 173 | "properties": { 174 | "input": { 175 | "$ref": "#/components/schemas/Input" 176 | }, 177 | "output_file_prefix": { 178 | "type": "string", 179 | "title": "Output File Prefix" 180 | } 181 | }, 182 | "description": "The request body for a prediction" 183 | }, 184 | "Response": { 185 | "type": "object", 186 | "title": "Response", 187 | "required": [ 188 | "status" 189 | ], 190 | "properties": { 191 | "error": { 192 | "type": "string", 193 | "title": "Error" 194 | }, 195 | "output": { 196 | "$ref": "#/components/schemas/Output" 197 | }, 198 | "status": { 199 | "$ref": "#/components/schemas/Status" 200 | } 201 | }, 202 | "description": "The response body for a prediction" 203 | }, 204 | "ValidationError": { 205 | "type": "object", 206 | "title": "ValidationError", 207 | "required": [ 208 | "loc", 209 | "msg", 210 | "type" 211 | ], 212 | "properties": { 213 | "loc": { 214 | "type": "array", 215 | "items": { 216 | "anyOf": [ 217 | { 218 | "type": "string" 219 | }, 220 | { 221 | "type": "integer" 222 | } 223 | ] 224 | }, 225 | "title": "Location" 226 | }, 227 | "msg": { 228 | "type": "string", 229 | "title": "Message" 230 | }, 231 | "type": { 232 | "type": "string", 233 | "title": "Error Type" 234 | } 235 | } 236 | }, 237 | "HTTPValidationError": { 238 | "type": "object", 239 | "title": "HTTPValidationError", 240 | "properties": { 241 | "detail": { 242 | "type": "array", 243 | "items": { 244 | "$ref": "#/components/schemas/ValidationError" 245 | }, 246 | "title": "Detail" 247 | } 248 | } 249 | } 250 | } 251 | } 252 | } 253 | } 254 | } -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/schemas/jingyunliang_swinir.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/jingyunliang/swinir", 3 | "owner": "jingyunliang", 4 | "name": "swinir", 5 | "description": "Image Restoration Using Swin Transformer", 6 | "visibility": "public", 7 | "github_url": "https://github.com/JingyunLiang/SwinIR", 8 | "paper_url": "https://arxiv.org/abs/2108.10257", 9 | "license_url": "https://github.com/JingyunLiang/SwinIR/blob/main/LICENSE", 10 | "run_count": 0, 11 | "cover_image_url": "https://tjzk.replicate.delivery/models_models_featured_image/c62290f9-ba1d-419b-95b8-eedfe5863122/out.png", 12 | "default_example": { 13 | "id": "csp4aecvzneq7pmzoyzyuwu4ci", 14 | "model": "jingyunliang/swinir", 15 | "version": "a6655af5d286c0362310303ace66a638b0e1e01be584a327f18d0d6c8c00025a", 16 | "status": "succeeded", 17 | "input": { 18 | "jpeg": "40", 19 | "image": "https://replicate.delivery/mgxm/efd1b6b0-4d79-4a42-ab31-2dcd29754a2d/chip.png", 20 | "noise": "15", 21 | "task_type": "Real-World Image Super-Resolution-Large" 22 | }, 23 | "output": [ 24 | { 25 | "file": "https://replicate.delivery/mgxm/1e3c0b87-01a7-4795-abac-aaf17479cf84/out.png" 26 | } 27 | ], 28 | "logs": "", 29 | "error": "", 30 | "metrics": { 31 | "total_time": 0.96962 32 | }, 33 | "created_at": "2021-09-15T13:53:54.030380Z", 34 | "started_at": "2021-12-18T09:22:11.531194Z", 35 | "completed_at": "2021-09-15T13:53:55Z", 36 | "urls": { 37 | "get": "https://api.replicate.com/v1/predictions/csp4aecvzneq7pmzoyzyuwu4ci", 38 | "cancel": "https://api.replicate.com/v1/predictions/csp4aecvzneq7pmzoyzyuwu4ci/cancel" 39 | } 40 | }, 41 | "latest_version": { 42 | "id": "660d922d33153019e8c263a3bba265de882e7f4f70396546b6c9c8f9d47a021a", 43 | "created_at": "2022-09-29T13:41:56.854576+00:00", 44 | "cog_version": "0.4.1", 45 | "openapi_schema": { 46 | "info": { 47 | "title": "Cog", 48 | "version": "0.1.0" 49 | }, 50 | "paths": { 51 | "/": { 52 | "get": { 53 | "summary": "Root", 54 | "responses": { 55 | "200": { 56 | "content": { 57 | "application/json": { 58 | "schema": {} 59 | } 60 | }, 61 | "description": "Successful Response" 62 | } 63 | }, 64 | "operationId": "root__get" 65 | } 66 | }, 67 | "/predictions": { 68 | "post": { 69 | "summary": "Predict", 70 | "responses": { 71 | "200": { 72 | "content": { 73 | "application/json": { 74 | "schema": { 75 | "$ref": "#/components/schemas/Response" 76 | } 77 | } 78 | }, 79 | "description": "Successful Response" 80 | }, 81 | "422": { 82 | "content": { 83 | "application/json": { 84 | "schema": { 85 | "$ref": "#/components/schemas/HTTPValidationError" 86 | } 87 | } 88 | }, 89 | "description": "Validation Error" 90 | } 91 | }, 92 | "description": "Run a single prediction on the model", 93 | "operationId": "predict_predictions_post", 94 | "requestBody": { 95 | "content": { 96 | "application/json": { 97 | "schema": { 98 | "$ref": "#/components/schemas/Request" 99 | } 100 | } 101 | } 102 | } 103 | } 104 | } 105 | }, 106 | "openapi": "3.0.2", 107 | "components": { 108 | "schemas": { 109 | "Input": { 110 | "type": "object", 111 | "title": "Input", 112 | "required": [ 113 | "image" 114 | ], 115 | "properties": { 116 | "jpeg": { 117 | "type": "integer", 118 | "title": "Jpeg", 119 | "default": 40, 120 | "x-order": 3, 121 | "description": "scale factor, activated for JPEG Compression Artifact Reduction. Leave it as default or arbitrary if other tasks are selected" 122 | }, 123 | "image": { 124 | "type": "string", 125 | "title": "Image", 126 | "format": "uri", 127 | "x-order": 0, 128 | "description": "input image" 129 | }, 130 | "noise": { 131 | "allOf": [ 132 | { 133 | "$ref": "#/components/schemas/noise" 134 | } 135 | ], 136 | "default": 15, 137 | "x-order": 2, 138 | "description": "noise level, activated for Grayscale Image Denoising and Color Image Denoising. Leave it as default or arbitrary if other tasks are selected" 139 | }, 140 | "task_type": { 141 | "allOf": [ 142 | { 143 | "$ref": "#/components/schemas/task_type" 144 | } 145 | ], 146 | "default": "Real-World Image Super-Resolution-Large", 147 | "x-order": 1, 148 | "description": "Choose a task" 149 | } 150 | } 151 | }, 152 | "noise": { 153 | "enum": [ 154 | 15, 155 | 25, 156 | 50 157 | ], 158 | "type": "integer", 159 | "title": "noise", 160 | "description": "An enumeration." 161 | }, 162 | "Output": { 163 | "type": "string", 164 | "title": "Output", 165 | "format": "uri" 166 | }, 167 | "Status": { 168 | "enum": [ 169 | "processing", 170 | "succeeded", 171 | "failed" 172 | ], 173 | "type": "string", 174 | "title": "Status", 175 | "description": "An enumeration." 176 | }, 177 | "Request": { 178 | "type": "object", 179 | "title": "Request", 180 | "properties": { 181 | "input": { 182 | "$ref": "#/components/schemas/Input" 183 | }, 184 | "output_file_prefix": { 185 | "type": "string", 186 | "title": "Output File Prefix" 187 | } 188 | }, 189 | "description": "The request body for a prediction" 190 | }, 191 | "Response": { 192 | "type": "object", 193 | "title": "Response", 194 | "required": [ 195 | "status" 196 | ], 197 | "properties": { 198 | "error": { 199 | "type": "string", 200 | "title": "Error" 201 | }, 202 | "output": { 203 | "$ref": "#/components/schemas/Output" 204 | }, 205 | "status": { 206 | "$ref": "#/components/schemas/Status" 207 | } 208 | }, 209 | "description": "The response body for a prediction" 210 | }, 211 | "task_type": { 212 | "enum": [ 213 | "Real-World Image Super-Resolution-Large", 214 | "Real-World Image Super-Resolution-Medium", 215 | "Grayscale Image Denoising", 216 | "Color Image Denoising", 217 | "JPEG Compression Artifact Reduction" 218 | ], 219 | "type": "string", 220 | "title": "task_type", 221 | "description": "An enumeration." 222 | }, 223 | "ValidationError": { 224 | "type": "object", 225 | "title": "ValidationError", 226 | "required": [ 227 | "loc", 228 | "msg", 229 | "type" 230 | ], 231 | "properties": { 232 | "loc": { 233 | "type": "array", 234 | "items": { 235 | "anyOf": [ 236 | { 237 | "type": "string" 238 | }, 239 | { 240 | "type": "integer" 241 | } 242 | ] 243 | }, 244 | "title": "Location" 245 | }, 246 | "msg": { 247 | "type": "string", 248 | "title": "Message" 249 | }, 250 | "type": { 251 | "type": "string", 252 | "title": "Error Type" 253 | } 254 | } 255 | }, 256 | "HTTPValidationError": { 257 | "type": "object", 258 | "title": "HTTPValidationError", 259 | "properties": { 260 | "detail": { 261 | "type": "array", 262 | "items": { 263 | "$ref": "#/components/schemas/ValidationError" 264 | }, 265 | "title": "Detail" 266 | } 267 | } 268 | } 269 | } 270 | } 271 | } 272 | } 273 | } -------------------------------------------------------------------------------- /flowy/api_nodes/replicate/example_workflows/llama3-405b.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 10, 3 | "last_link_id": 13, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 1199, 10 | 389 11 | ], 12 | "size": [ 13 | 400, 14 | 200 15 | ], 16 | "flags": {}, 17 | "order": 6, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 4 24 | }, 25 | { 26 | "name": "text", 27 | "type": "STRING", 28 | "link": 3, 29 | "widget": { 30 | "name": "text" 31 | } 32 | } 33 | ], 34 | "outputs": [ 35 | { 36 | "name": "CONDITIONING", 37 | "type": "CONDITIONING", 38 | "links": [ 39 | 5 40 | ], 41 | "shape": 3, 42 | "slot_index": 0 43 | } 44 | ], 45 | "properties": { 46 | "Node name for S&R": "CLIPTextEncode" 47 | }, 48 | "widgets_values": [ 49 | "" 50 | ] 51 | }, 52 | { 53 | "id": 4, 54 | "type": "CLIPTextEncode", 55 | "pos": [ 56 | 1195, 57 | 649 58 | ], 59 | "size": { 60 | "0": 400, 61 | "1": 200 62 | }, 63 | "flags": {}, 64 | "order": 3, 65 | "mode": 0, 66 | "inputs": [ 67 | { 68 | "name": "clip", 69 | "type": "CLIP", 70 | "link": 2 71 | } 72 | ], 73 | "outputs": [ 74 | { 75 | "name": "CONDITIONING", 76 | "type": "CONDITIONING", 77 | "links": [ 78 | 6 79 | ], 80 | "shape": 3, 81 | "slot_index": 0 82 | } 83 | ], 84 | "properties": { 85 | "Node name for S&R": "CLIPTextEncode" 86 | }, 87 | "widgets_values": [ 88 | "" 89 | ] 90 | }, 91 | { 92 | "id": 6, 93 | "type": "KSampler", 94 | "pos": [ 95 | 1664, 96 | 404 97 | ], 98 | "size": { 99 | "0": 315, 100 | "1": 262 101 | }, 102 | "flags": {}, 103 | "order": 7, 104 | "mode": 0, 105 | "inputs": [ 106 | { 107 | "name": "model", 108 | "type": "MODEL", 109 | "link": 11 110 | }, 111 | { 112 | "name": "positive", 113 | "type": "CONDITIONING", 114 | "link": 5 115 | }, 116 | { 117 | "name": "negative", 118 | "type": "CONDITIONING", 119 | "link": 6 120 | }, 121 | { 122 | "name": "latent_image", 123 | "type": "LATENT", 124 | "link": 7 125 | } 126 | ], 127 | "outputs": [ 128 | { 129 | "name": "LATENT", 130 | "type": "LATENT", 131 | "links": [ 132 | 8 133 | ], 134 | "shape": 3, 135 | "slot_index": 0 136 | } 137 | ], 138 | "properties": { 139 | "Node name for S&R": "KSampler" 140 | }, 141 | "widgets_values": [ 142 | 690393467025334, 143 | "randomize", 144 | 25, 145 | 7, 146 | "dpmpp_2m_sde_gpu", 147 | "karras", 148 | 1 149 | ] 150 | }, 151 | { 152 | "id": 8, 153 | "type": "VAEDecode", 154 | "pos": [ 155 | 2061, 156 | 406 157 | ], 158 | "size": { 159 | "0": 210, 160 | "1": 46 161 | }, 162 | "flags": {}, 163 | "order": 8, 164 | "mode": 0, 165 | "inputs": [ 166 | { 167 | "name": "samples", 168 | "type": "LATENT", 169 | "link": 8 170 | }, 171 | { 172 | "name": "vae", 173 | "type": "VAE", 174 | "link": 9 175 | } 176 | ], 177 | "outputs": [ 178 | { 179 | "name": "IMAGE", 180 | "type": "IMAGE", 181 | "links": [ 182 | 10 183 | ], 184 | "shape": 3, 185 | "slot_index": 0 186 | } 187 | ], 188 | "properties": { 189 | "Node name for S&R": "VAEDecode" 190 | } 191 | }, 192 | { 193 | "id": 3, 194 | "type": "CheckpointLoaderSimple", 195 | "pos": [ 196 | 735, 197 | 527 198 | ], 199 | "size": { 200 | "0": 315, 201 | "1": 98 202 | }, 203 | "flags": {}, 204 | "order": 0, 205 | "mode": 0, 206 | "outputs": [ 207 | { 208 | "name": "MODEL", 209 | "type": "MODEL", 210 | "links": [ 211 | 11 212 | ], 213 | "shape": 3, 214 | "slot_index": 0 215 | }, 216 | { 217 | "name": "CLIP", 218 | "type": "CLIP", 219 | "links": [ 220 | 2, 221 | 4 222 | ], 223 | "shape": 3, 224 | "slot_index": 1 225 | }, 226 | { 227 | "name": "VAE", 228 | "type": "VAE", 229 | "links": [ 230 | 9 231 | ], 232 | "shape": 3, 233 | "slot_index": 2 234 | } 235 | ], 236 | "properties": { 237 | "Node name for S&R": "CheckpointLoaderSimple" 238 | }, 239 | "widgets_values": [ 240 | "proteusV0.5.safetensors" 241 | ] 242 | }, 243 | { 244 | "id": 7, 245 | "type": "EmptyLatentImage", 246 | "pos": [ 247 | 1270, 248 | 219 249 | ], 250 | "size": [ 251 | 315, 252 | 106 253 | ], 254 | "flags": {}, 255 | "order": 4, 256 | "mode": 0, 257 | "inputs": [ 258 | { 259 | "name": "width", 260 | "type": "INT", 261 | "link": 12, 262 | "widget": { 263 | "name": "width" 264 | } 265 | }, 266 | { 267 | "name": "height", 268 | "type": "INT", 269 | "link": 13, 270 | "widget": { 271 | "name": "height" 272 | } 273 | } 274 | ], 275 | "outputs": [ 276 | { 277 | "name": "LATENT", 278 | "type": "LATENT", 279 | "links": [ 280 | 7 281 | ], 282 | "shape": 3, 283 | "slot_index": 0 284 | } 285 | ], 286 | "properties": { 287 | "Node name for S&R": "EmptyLatentImage" 288 | }, 289 | "widgets_values": [ 290 | 1024, 291 | 1024, 292 | 1 293 | ] 294 | }, 295 | { 296 | "id": 10, 297 | "type": "Width and height from aspect ratio 🪴", 298 | "pos": [ 299 | 838, 300 | 183 301 | ], 302 | "size": { 303 | "0": 315, 304 | "1": 126 305 | }, 306 | "flags": {}, 307 | "order": 1, 308 | "mode": 0, 309 | "outputs": [ 310 | { 311 | "name": "width", 312 | "type": "INT", 313 | "links": [ 314 | 12 315 | ], 316 | "shape": 3, 317 | "slot_index": 0 318 | }, 319 | { 320 | "name": "height", 321 | "type": "INT", 322 | "links": [ 323 | 13 324 | ], 325 | "shape": 3, 326 | "slot_index": 1 327 | } 328 | ], 329 | "properties": { 330 | "Node name for S&R": "Width and height from aspect ratio 🪴" 331 | }, 332 | "widgets_values": [ 333 | "16:9", 334 | 1024, 335 | 8 336 | ] 337 | }, 338 | { 339 | "id": 9, 340 | "type": "SaveImage", 341 | "pos": [ 342 | 157, 343 | 474 344 | ], 345 | "size": [ 346 | 500.09607697684555, 347 | 307.000781640146 348 | ], 349 | "flags": {}, 350 | "order": 9, 351 | "mode": 0, 352 | "inputs": [ 353 | { 354 | "name": "images", 355 | "type": "IMAGE", 356 | "link": 10 357 | } 358 | ], 359 | "properties": {}, 360 | "widgets_values": [ 361 | "ComfyUI" 362 | ] 363 | }, 364 | { 365 | "id": 2, 366 | "type": "ShowText|pysssss", 367 | "pos": [ 368 | 153, 369 | 234 370 | ], 371 | "size": [ 372 | 450.830078125, 373 | 192.2900390625 374 | ], 375 | "flags": {}, 376 | "order": 5, 377 | "mode": 0, 378 | "inputs": [ 379 | { 380 | "name": "text", 381 | "type": "STRING", 382 | "link": 1, 383 | "widget": { 384 | "name": "text" 385 | } 386 | } 387 | ], 388 | "outputs": [ 389 | { 390 | "name": "STRING", 391 | "type": "STRING", 392 | "links": null, 393 | "shape": 6 394 | } 395 | ], 396 | "properties": { 397 | "Node name for S&R": "ShowText|pysssss" 398 | }, 399 | "widgets_values": [ 400 | "", 401 | "A film still of a sci-fi anime movie, two space explorers in a tense, mystical confrontation on a bizarre, abandoned space station, weird, dreamlike, muted metallic colors, soft, ethereal lighting, unusual, organic architecture, anime style, intricate details, dramatic posing." 402 | ] 403 | }, 404 | { 405 | "id": 1, 406 | "type": "Replicate meta/meta-llama-3.1-405b-instruct", 407 | "pos": [ 408 | -274, 409 | 235 410 | ], 411 | "size": [ 412 | 398.7880859375, 413 | 447.314453125 414 | ], 415 | "flags": {}, 416 | "order": 2, 417 | "mode": 0, 418 | "outputs": [ 419 | { 420 | "name": "STRING", 421 | "type": "STRING", 422 | "links": [ 423 | 1, 424 | 3 425 | ], 426 | "shape": 3, 427 | "slot_index": 0 428 | } 429 | ], 430 | "properties": { 431 | "Node name for S&R": "Replicate meta/meta-llama-3.1-405b-instruct" 432 | }, 433 | "widgets_values": [ 434 | "an awesome scifi anime scene, with some dynamic interaction between two people, don't use neon stuff though, be weird", 435 | "You are a helpful text to image prompt assistant. You enhance prompts using short sentences and comma separated keywords, subject first then style. Return just the prompt. Make sure to keep all details from given prompt (like if it's a photo, etc)\n\nGood prompt example:\nA film still of an anime movie, two space explorers in a tense standoff on a desolate alien planet, surreal, muted color palette, eerie atmospheric lighting, unusual architecture, anime style, detailed textures, dramatic posing.", 436 | 0, 437 | 512, 438 | 0.6, 439 | 0.9, 440 | 50, 441 | 0, 442 | 0, 443 | "", 444 | false 445 | ] 446 | } 447 | ], 448 | "links": [ 449 | [ 450 | 1, 451 | 1, 452 | 0, 453 | 2, 454 | 0, 455 | "STRING" 456 | ], 457 | [ 458 | 2, 459 | 3, 460 | 1, 461 | 4, 462 | 0, 463 | "CLIP" 464 | ], 465 | [ 466 | 3, 467 | 1, 468 | 0, 469 | 5, 470 | 1, 471 | "STRING" 472 | ], 473 | [ 474 | 4, 475 | 3, 476 | 1, 477 | 5, 478 | 0, 479 | "CLIP" 480 | ], 481 | [ 482 | 5, 483 | 5, 484 | 0, 485 | 6, 486 | 1, 487 | "CONDITIONING" 488 | ], 489 | [ 490 | 6, 491 | 4, 492 | 0, 493 | 6, 494 | 2, 495 | "CONDITIONING" 496 | ], 497 | [ 498 | 7, 499 | 7, 500 | 0, 501 | 6, 502 | 3, 503 | "LATENT" 504 | ], 505 | [ 506 | 8, 507 | 6, 508 | 0, 509 | 8, 510 | 0, 511 | "LATENT" 512 | ], 513 | [ 514 | 9, 515 | 3, 516 | 2, 517 | 8, 518 | 1, 519 | "VAE" 520 | ], 521 | [ 522 | 10, 523 | 8, 524 | 0, 525 | 9, 526 | 0, 527 | "IMAGE" 528 | ], 529 | [ 530 | 11, 531 | 3, 532 | 0, 533 | 6, 534 | 0, 535 | "MODEL" 536 | ], 537 | [ 538 | 12, 539 | 10, 540 | 0, 541 | 7, 542 | 0, 543 | "INT" 544 | ], 545 | [ 546 | 13, 547 | 10, 548 | 1, 549 | 7, 550 | 1, 551 | "INT" 552 | ] 553 | ], 554 | "groups": [], 555 | "config": {}, 556 | "extra": { 557 | "ds": { 558 | "scale": 0.6209213230591553, 559 | "offset": [ 560 | 357.0363126973379, 561 | 28.753132988760044 562 | ] 563 | } 564 | }, 565 | "version": 0.4 566 | } -------------------------------------------------------------------------------- /workflows/LLM_CN.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 15, 3 | "last_link_id": 12, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "EmptyLatentImage", 8 | "pos": [ 9 | 473, 10 | 609 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 106 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "LATENT", 22 | "type": "LATENT", 23 | "links": [ 24 | 2 25 | ], 26 | "slot_index": 0, 27 | "label": "LATENT" 28 | } 29 | ], 30 | "properties": { 31 | "Node name for S&R": "EmptyLatentImage" 32 | }, 33 | "widgets_values": [ 34 | 512, 35 | 512, 36 | 1 37 | ] 38 | }, 39 | { 40 | "id": 8, 41 | "type": "VAEDecode", 42 | "pos": [ 43 | 1209, 44 | 188 45 | ], 46 | "size": { 47 | "0": 210, 48 | "1": 46 49 | }, 50 | "flags": {}, 51 | "order": 7, 52 | "mode": 0, 53 | "inputs": [ 54 | { 55 | "name": "samples", 56 | "type": "LATENT", 57 | "link": 7, 58 | "label": "samples" 59 | }, 60 | { 61 | "name": "vae", 62 | "type": "VAE", 63 | "link": 8, 64 | "label": "vae" 65 | } 66 | ], 67 | "outputs": [ 68 | { 69 | "name": "IMAGE", 70 | "type": "IMAGE", 71 | "links": [ 72 | 9 73 | ], 74 | "slot_index": 0, 75 | "label": "IMAGE" 76 | } 77 | ], 78 | "properties": { 79 | "Node name for S&R": "VAEDecode" 80 | } 81 | }, 82 | { 83 | "id": 3, 84 | "type": "KSampler", 85 | "pos": [ 86 | 863, 87 | 186 88 | ], 89 | "size": { 90 | "0": 338.0391540527344, 91 | "1": 571.4179077148438 92 | }, 93 | "flags": {}, 94 | "order": 6, 95 | "mode": 0, 96 | "inputs": [ 97 | { 98 | "name": "model", 99 | "type": "MODEL", 100 | "link": 1, 101 | "label": "model" 102 | }, 103 | { 104 | "name": "positive", 105 | "type": "CONDITIONING", 106 | "link": 4, 107 | "label": "positive" 108 | }, 109 | { 110 | "name": "negative", 111 | "type": "CONDITIONING", 112 | "link": 6, 113 | "label": "negative" 114 | }, 115 | { 116 | "name": "latent_image", 117 | "type": "LATENT", 118 | "link": 2, 119 | "label": "latent_image" 120 | } 121 | ], 122 | "outputs": [ 123 | { 124 | "name": "LATENT", 125 | "type": "LATENT", 126 | "links": [ 127 | 7 128 | ], 129 | "slot_index": 0, 130 | "label": "LATENT" 131 | } 132 | ], 133 | "properties": { 134 | "Node name for S&R": "KSampler" 135 | }, 136 | "widgets_values": [ 137 | 817735087992348, 138 | "randomize", 139 | 20, 140 | 8, 141 | "dpmpp_2m", 142 | "karras", 143 | 1 144 | ] 145 | }, 146 | { 147 | "id": 4, 148 | "type": "CheckpointLoaderSimple", 149 | "pos": [ 150 | -125, 151 | 488 152 | ], 153 | "size": { 154 | "0": 315, 155 | "1": 98 156 | }, 157 | "flags": {}, 158 | "order": 1, 159 | "mode": 0, 160 | "outputs": [ 161 | { 162 | "name": "MODEL", 163 | "type": "MODEL", 164 | "links": [ 165 | 1 166 | ], 167 | "slot_index": 0, 168 | "label": "MODEL" 169 | }, 170 | { 171 | "name": "CLIP", 172 | "type": "CLIP", 173 | "links": [ 174 | 3, 175 | 5 176 | ], 177 | "slot_index": 1, 178 | "label": "CLIP" 179 | }, 180 | { 181 | "name": "VAE", 182 | "type": "VAE", 183 | "links": [ 184 | 8 185 | ], 186 | "slot_index": 2, 187 | "label": "VAE" 188 | } 189 | ], 190 | "properties": { 191 | "Node name for S&R": "CheckpointLoaderSimple" 192 | }, 193 | "widgets_values": [ 194 | "dreamshaper_8.safetensors" 195 | ] 196 | }, 197 | { 198 | "id": 9, 199 | "type": "SaveImage", 200 | "pos": [ 201 | 1451, 202 | 189 203 | ], 204 | "size": { 205 | "0": 239.60360717773438, 206 | "1": 334.3671569824219 207 | }, 208 | "flags": {}, 209 | "order": 8, 210 | "mode": 0, 211 | "inputs": [ 212 | { 213 | "name": "images", 214 | "type": "IMAGE", 215 | "link": 9, 216 | "label": "images" 217 | } 218 | ], 219 | "properties": { 220 | "Node name for S&R": "SaveImage" 221 | }, 222 | "widgets_values": [ 223 | "ComfyUI" 224 | ] 225 | }, 226 | { 227 | "id": 7, 228 | "type": "CLIPTextEncode", 229 | "pos": [ 230 | 414, 231 | 370 232 | ], 233 | "size": { 234 | "0": 425.27801513671875, 235 | "1": 180.6060791015625 236 | }, 237 | "flags": {}, 238 | "order": 3, 239 | "mode": 0, 240 | "inputs": [ 241 | { 242 | "name": "clip", 243 | "type": "CLIP", 244 | "link": 5, 245 | "label": "clip" 246 | } 247 | ], 248 | "outputs": [ 249 | { 250 | "name": "CONDITIONING", 251 | "type": "CONDITIONING", 252 | "links": [ 253 | 6 254 | ], 255 | "slot_index": 0, 256 | "label": "CONDITIONING" 257 | } 258 | ], 259 | "properties": { 260 | "Node name for S&R": "CLIPTextEncode" 261 | }, 262 | "widgets_values": [ 263 | "text, watermark", 264 | false 265 | ] 266 | }, 267 | { 268 | "id": 6, 269 | "type": "CLIPTextEncode", 270 | "pos": [ 271 | 411, 272 | 137 273 | ], 274 | "size": { 275 | "0": 422.84503173828125, 276 | "1": 164.31304931640625 277 | }, 278 | "flags": {}, 279 | "order": 4, 280 | "mode": 0, 281 | "inputs": [ 282 | { 283 | "name": "clip", 284 | "type": "CLIP", 285 | "link": 3, 286 | "label": "clip" 287 | }, 288 | { 289 | "name": "text", 290 | "type": "STRING", 291 | "link": 11, 292 | "widget": { 293 | "name": "text" 294 | }, 295 | "label": "text" 296 | } 297 | ], 298 | "outputs": [ 299 | { 300 | "name": "CONDITIONING", 301 | "type": "CONDITIONING", 302 | "links": [ 303 | 4 304 | ], 305 | "slot_index": 0, 306 | "label": "CONDITIONING" 307 | } 308 | ], 309 | "properties": { 310 | "Node name for S&R": "CLIPTextEncode" 311 | }, 312 | "widgets_values": [ 313 | "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,", 314 | false 315 | ] 316 | }, 317 | { 318 | "id": 14, 319 | "type": "ShowText|pysssss", 320 | "pos": [ 321 | 392, 322 | -219 323 | ], 324 | "size": { 325 | "0": 341.71337890625, 326 | "1": 261.8714599609375 327 | }, 328 | "flags": {}, 329 | "order": 5, 330 | "mode": 0, 331 | "inputs": [ 332 | { 333 | "name": "text", 334 | "type": "STRING", 335 | "link": 12, 336 | "widget": { 337 | "name": "text" 338 | }, 339 | "label": "text" 340 | } 341 | ], 342 | "outputs": [ 343 | { 344 | "name": "STRING", 345 | "type": "STRING", 346 | "links": null, 347 | "shape": 6, 348 | "label": "STRING" 349 | } 350 | ], 351 | "properties": { 352 | "Node name for S&R": "ShowText|pysssss" 353 | }, 354 | "widgets_values": [ 355 | "", 356 | "\n(best quality,4k,8k,highres,masterpiece:1.2),ultra-detailed,(realistic,photorealistic,photo-realistic:1.37),ancient warship,wooden hull,bronze cannons,bearded warriors in armor,rigged sails,stormy sea,crimson flag,medieval architecture,stone decks,rigid ropes,shiny metal,weathered wood,stormy sky,crashing waves,extreme detail description,professional,vivid colors,bokeh" 357 | ] 358 | }, 359 | { 360 | "id": 13, 361 | "type": "Comflowy_LLM", 362 | "pos": [ 363 | -133, 364 | -228 365 | ], 366 | "size": { 367 | "0": 362.8030700683594, 368 | "1": 619.6361083984375 369 | }, 370 | "flags": {}, 371 | "order": 2, 372 | "mode": 0, 373 | "outputs": [ 374 | { 375 | "name": "STRING", 376 | "type": "STRING", 377 | "links": [ 378 | 11, 379 | 12 380 | ], 381 | "slot_index": 0, 382 | "shape": 3, 383 | "label": "STRING" 384 | } 385 | ], 386 | "properties": { 387 | "Node name for S&R": "Comflowy_LLM" 388 | }, 389 | "widgets_values": [ 390 | "一艘古代战舰", 391 | "# Role: Stable Diffusion prompt 助理\n你来充当一位有艺术气息的Stable Diffusion prompt 助理。\n\n## 任务\n我用自然语言告诉你要生成的prompt的主题,你的任务是根据这个主题想象一幅完整的画面,然后转化成一份详细的、高质量的prompt,让Stable Diffusion可以生成高质量的图像。\n\n## 背景介绍\nStable Diffusion是一款利用深度学习的文生图模型,支持通过使用 prompt 来产生新的图像,描述要包含或省略的元素。\n\n## prompt 概念\n- prompt 用来描述图像,由普通常见的单词构成,使用英文半角\",\"做为分隔符。\n- negative prompt用来描述你不想在生成的图像中出现的内容。\n- 以\",\"分隔的每个单词或词组称为 tag。所以prompt是由系列由\",\"分隔的tag组成的。\n\n## () 和 [] 语法\n调整关键字强度的等效方法是使用 () 和 []。 (keyword) 将tag的强度增加 1.1 倍,与 (keyword:1.1) 相同,最多可加三层。 [keyword] 将强度降低 0.9 倍,与 (keyword:0.9) 相同。\n\n## Prompt 格式要求\n下面我将说明 prompt 的生成步骤,这里的 prompt 可用于描述人物、风景、物体或抽象数字艺术图画。你可以根据需要添加合理的、但不少于5处的画面细节。\n\n### prompt 要求\n- prompt 内容包含画面主体、材质、附加细节、图像质量、艺术风格、色彩色调、灯光等部分,但你输出的 prompt 不能分段,例如类似\"medium:\"这样的分段描述是不需要的,也不能包含\":\"和\".\"。\n- 画面主体:不简短的英文描述画面主体, 如 A girl in a garden,主体细节概括(主体可以是人、事、物、景)画面核心内容。这部分根据我每次给你的主题来生成。你可以添加更多主题相关的合理的细节。\n- 对于人物主题,你必须描述人物的眼睛、鼻子、嘴唇,例如'beautiful detailed eyes,beautiful detailed lips,extremely detailed eyes and face,longeyelashes',以免Stable Diffusion随机生成变形的面部五官,这点非常重要。你还可以描述人物的外表、情绪、衣服、姿势、视角、动作、背景等。人物属性中,1girl表示一个女孩,2girls表示两个女孩。\n- 材质:用来制作艺术品的材料。 例如:插图、油画、3D 渲染和摄影。 Medium 有很强的效果,因为一个关键字就可以极大地改变风格。\n- 附加细节:画面场景细节,或人物细节,描述画面细节内容,让图像看起来更充实和合理。这部分是可选的,要注意画面的整体和谐,不能与主题冲突。\n- 图像质量:这部分内容开头永远要加上“(best quality,4k,8k,highres,masterpiece:1.2),ultra-detailed,(realistic,photorealistic,photo-realistic:1.37)”, 这是高质量的标志。其它常用的提高质量的tag还有,你可以根据主题的需求添加:HDR,UHD,studio lighting,ultra-fine painting,sharp focus,physically-based rendering,extreme detail description,professional,vivid colors,bokeh。\n- 艺术风格:这部分描述图像的风格。加入恰当的艺术风格,能提升生成的图像效果。常用的艺术风格例如:portraits,landscape,horror,anime,sci-fi,photography,concept artists等。\n- 色彩色调:颜色,通过添加颜色来控制画面的整体颜色。\n- 灯光:整体画面的光线效果。\n\n### 限制:\n- tag 内容用英语单词或短语来描述,并不局限于我给你的单词。注意只能包含关键词或词组。\n- 注意不要输出句子,不要有任何解释。\n- tag数量限制40个以内,单词数量限制在60个以内。\n- tag不要带引号(\"\")。\n- 使用英文半角\",\"做分隔符。\n- tag 按重要性从高到低的顺序排列。\n- 我给你的主题可能是用中文描述,你给出的prompt和negative prompt只用英文,不要包含中文。", 392 | "THUDM/glm-4-9b-chat", 393 | "", 394 | 28799307290994, 395 | "randomize", 396 | false, 397 | false 398 | ] 399 | } 400 | ], 401 | "links": [ 402 | [ 403 | 1, 404 | 4, 405 | 0, 406 | 3, 407 | 0, 408 | "MODEL" 409 | ], 410 | [ 411 | 2, 412 | 5, 413 | 0, 414 | 3, 415 | 3, 416 | "LATENT" 417 | ], 418 | [ 419 | 3, 420 | 4, 421 | 1, 422 | 6, 423 | 0, 424 | "CLIP" 425 | ], 426 | [ 427 | 4, 428 | 6, 429 | 0, 430 | 3, 431 | 1, 432 | "CONDITIONING" 433 | ], 434 | [ 435 | 5, 436 | 4, 437 | 1, 438 | 7, 439 | 0, 440 | "CLIP" 441 | ], 442 | [ 443 | 6, 444 | 7, 445 | 0, 446 | 3, 447 | 2, 448 | "CONDITIONING" 449 | ], 450 | [ 451 | 7, 452 | 3, 453 | 0, 454 | 8, 455 | 0, 456 | "LATENT" 457 | ], 458 | [ 459 | 8, 460 | 4, 461 | 2, 462 | 8, 463 | 1, 464 | "VAE" 465 | ], 466 | [ 467 | 9, 468 | 8, 469 | 0, 470 | 9, 471 | 0, 472 | "IMAGE" 473 | ], 474 | [ 475 | 11, 476 | 13, 477 | 0, 478 | 6, 479 | 1, 480 | "STRING" 481 | ], 482 | [ 483 | 12, 484 | 13, 485 | 0, 486 | 14, 487 | 0, 488 | "STRING" 489 | ] 490 | ], 491 | "groups": [], 492 | "config": {}, 493 | "extra": { 494 | "ds": { 495 | "scale": 0.8264462809917354, 496 | "offset": [ 497 | 255.78564012490165, 498 | 461.2748835297254 499 | ] 500 | } 501 | }, 502 | "version": 0.4 503 | } -------------------------------------------------------------------------------- /workflows/Omost_LLM.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 23, 3 | "last_link_id": 20, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "EmptyLatentImage", 8 | "pos": [ 9 | 473, 10 | 609 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 106 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "LATENT", 22 | "type": "LATENT", 23 | "links": [ 24 | 2 25 | ], 26 | "slot_index": 0, 27 | "label": "LATENT" 28 | } 29 | ], 30 | "properties": { 31 | "Node name for S&R": "EmptyLatentImage" 32 | }, 33 | "widgets_values": [ 34 | 512, 35 | 512, 36 | 1 37 | ] 38 | }, 39 | { 40 | "id": 8, 41 | "type": "VAEDecode", 42 | "pos": [ 43 | 1209, 44 | 188 45 | ], 46 | "size": { 47 | "0": 210, 48 | "1": 46 49 | }, 50 | "flags": {}, 51 | "order": 9, 52 | "mode": 0, 53 | "inputs": [ 54 | { 55 | "name": "samples", 56 | "type": "LATENT", 57 | "link": 7, 58 | "label": "samples" 59 | }, 60 | { 61 | "name": "vae", 62 | "type": "VAE", 63 | "link": 8, 64 | "label": "vae" 65 | } 66 | ], 67 | "outputs": [ 68 | { 69 | "name": "IMAGE", 70 | "type": "IMAGE", 71 | "links": [ 72 | 9 73 | ], 74 | "slot_index": 0, 75 | "label": "IMAGE" 76 | } 77 | ], 78 | "properties": { 79 | "Node name for S&R": "VAEDecode" 80 | } 81 | }, 82 | { 83 | "id": 3, 84 | "type": "KSampler", 85 | "pos": [ 86 | 863, 87 | 186 88 | ], 89 | "size": { 90 | "0": 338.0391540527344, 91 | "1": 571.4179077148438 92 | }, 93 | "flags": {}, 94 | "order": 7, 95 | "mode": 0, 96 | "inputs": [ 97 | { 98 | "name": "model", 99 | "type": "MODEL", 100 | "link": 1, 101 | "label": "model" 102 | }, 103 | { 104 | "name": "positive", 105 | "type": "CONDITIONING", 106 | "link": 16, 107 | "label": "positive" 108 | }, 109 | { 110 | "name": "negative", 111 | "type": "CONDITIONING", 112 | "link": 6, 113 | "label": "negative" 114 | }, 115 | { 116 | "name": "latent_image", 117 | "type": "LATENT", 118 | "link": 2, 119 | "label": "latent_image" 120 | } 121 | ], 122 | "outputs": [ 123 | { 124 | "name": "LATENT", 125 | "type": "LATENT", 126 | "links": [ 127 | 7 128 | ], 129 | "slot_index": 0, 130 | "label": "LATENT" 131 | } 132 | ], 133 | "properties": { 134 | "Node name for S&R": "KSampler" 135 | }, 136 | "widgets_values": [ 137 | 222261456855657, 138 | "randomize", 139 | 20, 140 | 8, 141 | "dpmpp_2m", 142 | "karras", 143 | 1 144 | ] 145 | }, 146 | { 147 | "id": 9, 148 | "type": "SaveImage", 149 | "pos": [ 150 | 1451, 151 | 189 152 | ], 153 | "size": { 154 | "0": 239.60360717773438, 155 | "1": 334.3671569824219 156 | }, 157 | "flags": {}, 158 | "order": 10, 159 | "mode": 0, 160 | "inputs": [ 161 | { 162 | "name": "images", 163 | "type": "IMAGE", 164 | "link": 9, 165 | "label": "images" 166 | } 167 | ], 168 | "properties": { 169 | "Node name for S&R": "SaveImage" 170 | }, 171 | "widgets_values": [ 172 | "ComfyUI" 173 | ] 174 | }, 175 | { 176 | "id": 17, 177 | "type": "Comflowy_Omost_To_Conditioning", 178 | "pos": [ 179 | 400, 180 | 122 181 | ], 182 | "size": { 183 | "0": 317.4000244140625, 184 | "1": 126 185 | }, 186 | "flags": {}, 187 | "order": 4, 188 | "mode": 0, 189 | "inputs": [ 190 | { 191 | "name": "canvas_conds", 192 | "type": "OMOST_CANVAS_CONDITIONING", 193 | "link": 15, 194 | "label": "canvas_conds" 195 | }, 196 | { 197 | "name": "clip", 198 | "type": "CLIP", 199 | "link": 14, 200 | "label": "clip" 201 | } 202 | ], 203 | "outputs": [ 204 | { 205 | "name": "CONDITIONING", 206 | "type": "CONDITIONING", 207 | "links": [ 208 | 16 209 | ], 210 | "slot_index": 0, 211 | "shape": 3, 212 | "label": "CONDITIONING" 213 | } 214 | ], 215 | "properties": { 216 | "Node name for S&R": "Comflowy_Omost_To_Conditioning" 217 | }, 218 | "widgets_values": [ 219 | 0.2, 220 | 0.8, 221 | "average" 222 | ] 223 | }, 224 | { 225 | "id": 7, 226 | "type": "CLIPTextEncode", 227 | "pos": [ 228 | 408, 229 | 348 230 | ], 231 | "size": { 232 | "0": 425.27801513671875, 233 | "1": 180.6060791015625 234 | }, 235 | "flags": {}, 236 | "order": 3, 237 | "mode": 0, 238 | "inputs": [ 239 | { 240 | "name": "clip", 241 | "type": "CLIP", 242 | "link": 5, 243 | "label": "clip" 244 | } 245 | ], 246 | "outputs": [ 247 | { 248 | "name": "CONDITIONING", 249 | "type": "CONDITIONING", 250 | "links": [ 251 | 6 252 | ], 253 | "slot_index": 0, 254 | "label": "CONDITIONING" 255 | } 256 | ], 257 | "properties": { 258 | "Node name for S&R": "CLIPTextEncode" 259 | }, 260 | "widgets_values": [ 261 | "text, watermark", 262 | false 263 | ] 264 | }, 265 | { 266 | "id": 4, 267 | "type": "CheckpointLoaderSimple", 268 | "pos": [ 269 | -127, 270 | 394 271 | ], 272 | "size": { 273 | "0": 315, 274 | "1": 98 275 | }, 276 | "flags": {}, 277 | "order": 1, 278 | "mode": 0, 279 | "outputs": [ 280 | { 281 | "name": "MODEL", 282 | "type": "MODEL", 283 | "links": [ 284 | 1 285 | ], 286 | "slot_index": 0, 287 | "label": "MODEL" 288 | }, 289 | { 290 | "name": "CLIP", 291 | "type": "CLIP", 292 | "links": [ 293 | 5, 294 | 14 295 | ], 296 | "slot_index": 1, 297 | "label": "CLIP" 298 | }, 299 | { 300 | "name": "VAE", 301 | "type": "VAE", 302 | "links": [ 303 | 8 304 | ], 305 | "slot_index": 2, 306 | "label": "VAE" 307 | } 308 | ], 309 | "properties": { 310 | "Node name for S&R": "CheckpointLoaderSimple" 311 | }, 312 | "widgets_values": [ 313 | "dreamshaper_8.safetensors" 314 | ] 315 | }, 316 | { 317 | "id": 22, 318 | "type": "Comflowy_Omost_Preview", 319 | "pos": [ 320 | -198, 321 | 148 322 | ], 323 | "size": { 324 | "0": 229.20001220703125, 325 | "1": 46 326 | }, 327 | "flags": {}, 328 | "order": 5, 329 | "mode": 0, 330 | "inputs": [ 331 | { 332 | "name": "canvas_conds", 333 | "type": "OMOST_CANVAS_CONDITIONING", 334 | "link": 19, 335 | "label": "canvas_conds" 336 | } 337 | ], 338 | "outputs": [ 339 | { 340 | "name": "IMAGE", 341 | "type": "IMAGE", 342 | "links": [ 343 | 20 344 | ], 345 | "slot_index": 0, 346 | "shape": 3, 347 | "label": "IMAGE" 348 | }, 349 | { 350 | "name": "JSON", 351 | "type": "JSON", 352 | "links": null, 353 | "shape": 3, 354 | "label": "JSON" 355 | } 356 | ], 357 | "properties": { 358 | "Node name for S&R": "Comflowy_Omost_Preview" 359 | } 360 | }, 361 | { 362 | "id": 14, 363 | "type": "ShowText|pysssss", 364 | "pos": [ 365 | -216, 366 | -299 367 | ], 368 | "size": [ 369 | 328.5512870484456, 370 | 374.97700441128086 371 | ], 372 | "flags": {}, 373 | "order": 6, 374 | "mode": 0, 375 | "inputs": [ 376 | { 377 | "name": "text", 378 | "type": "STRING", 379 | "link": 13, 380 | "widget": { 381 | "name": "text" 382 | }, 383 | "label": "text" 384 | } 385 | ], 386 | "outputs": [ 387 | { 388 | "name": "STRING", 389 | "type": "STRING", 390 | "links": null, 391 | "shape": 6, 392 | "label": "STRING" 393 | } 394 | ], 395 | "properties": { 396 | "Node name for S&R": "ShowText|pysssss" 397 | }, 398 | "widgets_values": [ 399 | "", 400 | "\n\n{\n \"global_description\": {\n \"description\": \"An ancient warship, weathered by time, dominates the center of the canvas.\",\n \"detailed_descriptions\": [\n \"The hull is covered in barnacles and moss, hinting at its long journey across the seas.\",\n \"The sails are tattered and faded, yet they still hold a sense of grandeur.\",\n \"The ship's rigging is intricate, showcasing the craftsmanship of a bygone era.\"\n ],\n \"tags\": \"ancient, warship, sea, weathered, barnacles, moss, sails, rigging\",\n \"HTML_web_color_name\": \"navy\"\n },\n \"local_descriptions\": [\n {\n \"location\": \"center\",\n \"offset\": \"no\",\n \"area\": \"a large square area\",\n \"distance_to_viewer\": 10.0,\n \"description\": \"An ancient warship stands prominently, its silhouette against the horizon.\",\n \"detailed_descriptions\": [\n \"The warship's silhouette is defined by its towering masts and the curve of its hull.\",\n \"The ship is adorned with intricate carvings and symbols of ancient cultures.\"\n ],\n \"tags\": \"ancient, warship, silhouette, masts, hull, carvings, symbols\",\n \"atmosphere\": \"nostalgic, mysterious\",\n \"style\": \"realistic\",\n \"quality_meta\": \"vivid\",\n \"HTML_web_color_name\": \"navy\"\n },\n {\n \"location\": \"top-right\",\n \"offset\": \"upper-right\",\n \"area\": \"a small square area\",\n \"distance_to_viewer\": 7.0,\n \"description\": \"A distant view of the ship's stern, with a flag waving in the breeze.\",\n \"detailed_descriptions\": [\n \"The flag is a deep red, symbolizing the bravery of the crew who once sailed this vessel.\"\n ],\n \"tags\": \"ship's stern, flag, red, bravery\",\n \"atmosphere\": \"victorious, nostalgic\",\n \"style\": \"realistic\",\n \"quality_meta\": \"vivid\",\n \"HTML_web_color_name\": \"crimson\"\n }\n ]\n}\n" 401 | ] 402 | }, 403 | { 404 | "id": 23, 405 | "type": "PreviewImage", 406 | "pos": [ 407 | 138, 408 | 87 409 | ], 410 | "size": [ 411 | 210, 412 | 246 413 | ], 414 | "flags": {}, 415 | "order": 8, 416 | "mode": 0, 417 | "inputs": [ 418 | { 419 | "name": "images", 420 | "type": "IMAGE", 421 | "link": 20, 422 | "label": "images" 423 | } 424 | ], 425 | "properties": { 426 | "Node name for S&R": "PreviewImage" 427 | } 428 | }, 429 | { 430 | "id": 16, 431 | "type": "Comflowy_Omost_LLM", 432 | "pos": [ 433 | -695, 434 | -31 435 | ], 436 | "size": { 437 | "0": 400, 438 | "1": 200 439 | }, 440 | "flags": {}, 441 | "order": 2, 442 | "mode": 0, 443 | "outputs": [ 444 | { 445 | "name": "canvas_conds", 446 | "type": "OMOST_CANVAS_CONDITIONING", 447 | "links": [ 448 | 15, 449 | 19 450 | ], 451 | "slot_index": 0, 452 | "shape": 3, 453 | "label": "canvas_conds" 454 | }, 455 | { 456 | "name": "generated_text", 457 | "type": "STRING", 458 | "links": [ 459 | 13 460 | ], 461 | "slot_index": 1, 462 | "shape": 3, 463 | "label": "generated_text" 464 | } 465 | ], 466 | "properties": { 467 | "Node name for S&R": "Comflowy_Omost_LLM" 468 | }, 469 | "widgets_values": [ 470 | "An ancient warship", 471 | "THUDM/glm-4-9b-chat", 472 | "", 473 | 673046078093462, 474 | "randomize", 475 | false 476 | ] 477 | } 478 | ], 479 | "links": [ 480 | [ 481 | 1, 482 | 4, 483 | 0, 484 | 3, 485 | 0, 486 | "MODEL" 487 | ], 488 | [ 489 | 2, 490 | 5, 491 | 0, 492 | 3, 493 | 3, 494 | "LATENT" 495 | ], 496 | [ 497 | 5, 498 | 4, 499 | 1, 500 | 7, 501 | 0, 502 | "CLIP" 503 | ], 504 | [ 505 | 6, 506 | 7, 507 | 0, 508 | 3, 509 | 2, 510 | "CONDITIONING" 511 | ], 512 | [ 513 | 7, 514 | 3, 515 | 0, 516 | 8, 517 | 0, 518 | "LATENT" 519 | ], 520 | [ 521 | 8, 522 | 4, 523 | 2, 524 | 8, 525 | 1, 526 | "VAE" 527 | ], 528 | [ 529 | 9, 530 | 8, 531 | 0, 532 | 9, 533 | 0, 534 | "IMAGE" 535 | ], 536 | [ 537 | 13, 538 | 16, 539 | 1, 540 | 14, 541 | 0, 542 | "STRING" 543 | ], 544 | [ 545 | 14, 546 | 4, 547 | 1, 548 | 17, 549 | 1, 550 | "CLIP" 551 | ], 552 | [ 553 | 15, 554 | 16, 555 | 0, 556 | 17, 557 | 0, 558 | "OMOST_CANVAS_CONDITIONING" 559 | ], 560 | [ 561 | 16, 562 | 17, 563 | 0, 564 | 3, 565 | 1, 566 | "CONDITIONING" 567 | ], 568 | [ 569 | 19, 570 | 16, 571 | 0, 572 | 22, 573 | 0, 574 | "OMOST_CANVAS_CONDITIONING" 575 | ], 576 | [ 577 | 20, 578 | 22, 579 | 0, 580 | 23, 581 | 0, 582 | "IMAGE" 583 | ] 584 | ], 585 | "groups": [], 586 | "config": {}, 587 | "extra": { 588 | "ds": { 589 | "scale": 0.6209213230591554, 590 | "offset": [ 591 | 739.9752097484295, 592 | 420.49411898715607 593 | ] 594 | } 595 | }, 596 | "version": 0.4 597 | } -------------------------------------------------------------------------------- /flowy/lib_omost/canvas.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import re 3 | import difflib 4 | import torch 5 | import numpy as np 6 | from typing import TypedDict 7 | 8 | 9 | system_prompt = r"""You are a helpful AI assistant to compose images using the below python class `Canvas`: 10 | 11 | ```python 12 | class Canvas: 13 | def set_global_description(self, description: str, detailed_descriptions: list[str], tags: str, HTML_web_color_name: str): 14 | pass 15 | 16 | def add_local_description(self, location: str, offset: str, area: str, distance_to_viewer: float, description: str, detailed_descriptions: list[str], tags: str, atmosphere: str, style: str, quality_meta: str, HTML_web_color_name: str): 17 | assert location in ["in the center", "on the left", "on the right", "on the top", "on the bottom", "on the top-left", "on the top-right", "on the bottom-left", "on the bottom-right"] 18 | assert offset in ["no offset", "slightly to the left", "slightly to the right", "slightly to the upper", "slightly to the lower", "slightly to the upper-left", "slightly to the upper-right", "slightly to the lower-left", "slightly to the lower-right"] 19 | assert area in ["a small square area", "a small vertical area", "a small horizontal area", "a medium-sized square area", "a medium-sized vertical area", "a medium-sized horizontal area", "a large square area", "a large vertical area", "a large horizontal area"] 20 | assert distance_to_viewer > 0 21 | pass 22 | ```""" 23 | 24 | valid_colors = { # r, g, b 25 | "aliceblue": (240, 248, 255), 26 | "antiquewhite": (250, 235, 215), 27 | "aqua": (0, 255, 255), 28 | "aquamarine": (127, 255, 212), 29 | "azure": (240, 255, 255), 30 | "beige": (245, 245, 220), 31 | "bisque": (255, 228, 196), 32 | "black": (0, 0, 0), 33 | "blanchedalmond": (255, 235, 205), 34 | "blue": (0, 0, 255), 35 | "blueviolet": (138, 43, 226), 36 | "brown": (165, 42, 42), 37 | "burlywood": (222, 184, 135), 38 | "cadetblue": (95, 158, 160), 39 | "chartreuse": (127, 255, 0), 40 | "chocolate": (210, 105, 30), 41 | "coral": (255, 127, 80), 42 | "cornflowerblue": (100, 149, 237), 43 | "cornsilk": (255, 248, 220), 44 | "crimson": (220, 20, 60), 45 | "cyan": (0, 255, 255), 46 | "darkblue": (0, 0, 139), 47 | "darkcyan": (0, 139, 139), 48 | "darkgoldenrod": (184, 134, 11), 49 | "darkgray": (169, 169, 169), 50 | "darkgrey": (169, 169, 169), 51 | "darkgreen": (0, 100, 0), 52 | "darkkhaki": (189, 183, 107), 53 | "darkmagenta": (139, 0, 139), 54 | "darkolivegreen": (85, 107, 47), 55 | "darkorange": (255, 140, 0), 56 | "darkorchid": (153, 50, 204), 57 | "darkred": (139, 0, 0), 58 | "darksalmon": (233, 150, 122), 59 | "darkseagreen": (143, 188, 143), 60 | "darkslateblue": (72, 61, 139), 61 | "darkslategray": (47, 79, 79), 62 | "darkslategrey": (47, 79, 79), 63 | "darkturquoise": (0, 206, 209), 64 | "darkviolet": (148, 0, 211), 65 | "deeppink": (255, 20, 147), 66 | "deepskyblue": (0, 191, 255), 67 | "dimgray": (105, 105, 105), 68 | "dimgrey": (105, 105, 105), 69 | "dodgerblue": (30, 144, 255), 70 | "firebrick": (178, 34, 34), 71 | "floralwhite": (255, 250, 240), 72 | "forestgreen": (34, 139, 34), 73 | "fuchsia": (255, 0, 255), 74 | "gainsboro": (220, 220, 220), 75 | "ghostwhite": (248, 248, 255), 76 | "gold": (255, 215, 0), 77 | "goldenrod": (218, 165, 32), 78 | "gray": (128, 128, 128), 79 | "grey": (128, 128, 128), 80 | "green": (0, 128, 0), 81 | "greenyellow": (173, 255, 47), 82 | "honeydew": (240, 255, 240), 83 | "hotpink": (255, 105, 180), 84 | "indianred": (205, 92, 92), 85 | "indigo": (75, 0, 130), 86 | "ivory": (255, 255, 240), 87 | "khaki": (240, 230, 140), 88 | "lavender": (230, 230, 250), 89 | "lavenderblush": (255, 240, 245), 90 | "lawngreen": (124, 252, 0), 91 | "lemonchiffon": (255, 250, 205), 92 | "lightblue": (173, 216, 230), 93 | "lightcoral": (240, 128, 128), 94 | "lightcyan": (224, 255, 255), 95 | "lightgoldenrodyellow": (250, 250, 210), 96 | "lightgray": (211, 211, 211), 97 | "lightgrey": (211, 211, 211), 98 | "lightgreen": (144, 238, 144), 99 | "lightpink": (255, 182, 193), 100 | "lightsalmon": (255, 160, 122), 101 | "lightseagreen": (32, 178, 170), 102 | "lightskyblue": (135, 206, 250), 103 | "lightslategray": (119, 136, 153), 104 | "lightslategrey": (119, 136, 153), 105 | "lightsteelblue": (176, 196, 222), 106 | "lightyellow": (255, 255, 224), 107 | "lime": (0, 255, 0), 108 | "limegreen": (50, 205, 50), 109 | "linen": (250, 240, 230), 110 | "magenta": (255, 0, 255), 111 | "maroon": (128, 0, 0), 112 | "mediumaquamarine": (102, 205, 170), 113 | "mediumblue": (0, 0, 205), 114 | "mediumorchid": (186, 85, 211), 115 | "mediumpurple": (147, 112, 219), 116 | "mediumseagreen": (60, 179, 113), 117 | "mediumslateblue": (123, 104, 238), 118 | "mediumspringgreen": (0, 250, 154), 119 | "mediumturquoise": (72, 209, 204), 120 | "mediumvioletred": (199, 21, 133), 121 | "midnightblue": (25, 25, 112), 122 | "mintcream": (245, 255, 250), 123 | "mistyrose": (255, 228, 225), 124 | "moccasin": (255, 228, 181), 125 | "navajowhite": (255, 222, 173), 126 | "navy": (0, 0, 128), 127 | "navyblue": (0, 0, 128), 128 | "oldlace": (253, 245, 230), 129 | "olive": (128, 128, 0), 130 | "olivedrab": (107, 142, 35), 131 | "orange": (255, 165, 0), 132 | "orangered": (255, 69, 0), 133 | "orchid": (218, 112, 214), 134 | "palegoldenrod": (238, 232, 170), 135 | "palegreen": (152, 251, 152), 136 | "paleturquoise": (175, 238, 238), 137 | "palevioletred": (219, 112, 147), 138 | "papayawhip": (255, 239, 213), 139 | "peachpuff": (255, 218, 185), 140 | "peru": (205, 133, 63), 141 | "pink": (255, 192, 203), 142 | "plum": (221, 160, 221), 143 | "powderblue": (176, 224, 230), 144 | "purple": (128, 0, 128), 145 | "rebeccapurple": (102, 51, 153), 146 | "red": (255, 0, 0), 147 | "rosybrown": (188, 143, 143), 148 | "royalblue": (65, 105, 225), 149 | "saddlebrown": (139, 69, 19), 150 | "salmon": (250, 128, 114), 151 | "sandybrown": (244, 164, 96), 152 | "seagreen": (46, 139, 87), 153 | "seashell": (255, 245, 238), 154 | "sienna": (160, 82, 45), 155 | "silver": (192, 192, 192), 156 | "skyblue": (135, 206, 235), 157 | "slateblue": (106, 90, 205), 158 | "slategray": (112, 128, 144), 159 | "slategrey": (112, 128, 144), 160 | "snow": (255, 250, 250), 161 | "springgreen": (0, 255, 127), 162 | "steelblue": (70, 130, 180), 163 | "tan": (210, 180, 140), 164 | "teal": (0, 128, 128), 165 | "thistle": (216, 191, 216), 166 | "tomato": (255, 99, 71), 167 | "turquoise": (64, 224, 208), 168 | "violet": (238, 130, 238), 169 | "wheat": (245, 222, 179), 170 | "white": (255, 255, 255), 171 | "whitesmoke": (245, 245, 245), 172 | "yellow": (255, 255, 0), 173 | "yellowgreen": (154, 205, 50), 174 | } 175 | 176 | valid_locations = { # x, y in 90*90 177 | "in the center": (45, 45), 178 | "on the left": (15, 45), 179 | "on the right": (75, 45), 180 | "on the top": (45, 15), 181 | "on the bottom": (45, 75), 182 | "on the top-left": (15, 15), 183 | "on the top-right": (75, 15), 184 | "on the bottom-left": (15, 75), 185 | "on the bottom-right": (75, 75), 186 | "center": (45, 45), 187 | "left": (15, 45), 188 | "right": (75, 45), 189 | "top": (45, 15), 190 | "bottom": (45, 75), 191 | "top-left": (15, 15), 192 | "top-right": (75, 15), 193 | "bottom-left": (15, 75), 194 | "bottom-right": (75, 75), 195 | } 196 | 197 | valid_offsets = { # x, y in 90*90 198 | "no offset": (0, 0), 199 | "slightly to the left": (-10, 0), 200 | "slightly to the right": (10, 0), 201 | "slightly to the upper": (0, -10), 202 | "slightly to the lower": (0, 10), 203 | "slightly to the upper-left": (-10, -10), 204 | "slightly to the upper-right": (10, -10), 205 | "slightly to the lower-left": (-10, 10), 206 | "slightly to the lower-right": (10, 10), 207 | "no": (0, 0), 208 | "left": (-10, 0), 209 | "right": (10, 0), 210 | "upper": (0, -10), 211 | "lower": (0, 10), 212 | "upper-left": (-10, -10), 213 | "upper-right": (10, -10), 214 | "lower-left": (-10, 10), 215 | "lower-right": (10, 10), 216 | } 217 | 218 | valid_areas = { # w, h in 90*90 219 | "a small square area": (50, 50), 220 | "a small vertical area": (40, 60), 221 | "a small horizontal area": (60, 40), 222 | "a medium-sized square area": (60, 60), 223 | "a medium-sized vertical area": (50, 80), 224 | "a medium-sized horizontal area": (80, 50), 225 | "a large square area": (70, 70), 226 | "a large vertical area": (60, 90), 227 | "a large horizontal area": (90, 60), 228 | } 229 | 230 | 231 | def closest_name(input_str, options): 232 | input_str = input_str.lower() 233 | 234 | closest_match = difflib.get_close_matches( 235 | input_str, list(options.keys()), n=1, cutoff=0.5 236 | ) 237 | assert ( 238 | isinstance(closest_match, list) and len(closest_match) > 0 239 | ), f"The value [{input_str}] is not valid!" 240 | result = closest_match[0] 241 | 242 | if result != input_str: 243 | print(f"Automatically corrected [{input_str}] -> [{result}].") 244 | 245 | return result 246 | 247 | 248 | def safe_str(x): 249 | return x.strip(",. ") + "." 250 | 251 | 252 | def binary_nonzero_positions(n, offset=0): 253 | binary_str = bin(n)[2:] 254 | positions = [i + offset for i, bit in enumerate(reversed(binary_str)) if bit == "1"] 255 | return positions 256 | 257 | 258 | class OmostCanvasCondition(TypedDict): 259 | prefixes: list[str] 260 | suffixes: list[str] 261 | rect: tuple[int, int, int, int] 262 | color: tuple[int, int, int] 263 | 264 | 265 | class Canvas: 266 | @staticmethod 267 | def from_bot_response(response: str) -> Canvas: 268 | matched = re.search(r"```python\n(.*?)\n```", response, re.DOTALL) 269 | assert matched, f"Response does not contain codes!\n{response}" 270 | code_content = matched.group(1) 271 | assert ( 272 | "canvas = Canvas()" in code_content 273 | ), f"Code block must include valid canvas var!\n{response}" 274 | return Canvas.from_python_code(code_content) 275 | 276 | @staticmethod 277 | def from_python_code(code: str) -> Canvas: 278 | local_vars = {"Canvas": Canvas} 279 | exec(code, {}, local_vars) 280 | canvas = local_vars.get("canvas", None) 281 | assert isinstance(canvas, Canvas), "Code must produce valid canvas var!" 282 | return canvas 283 | 284 | def __init__(self): 285 | self.components = [] 286 | self.color = None 287 | self.record_tags = True 288 | self.prefixes = [] 289 | self.suffixes = [] 290 | return 291 | 292 | def set_global_description( 293 | self, 294 | description: str, 295 | detailed_descriptions: list[str], 296 | tags: str, 297 | HTML_web_color_name: str, 298 | ): 299 | assert isinstance(description, str), "Global description is not valid!" 300 | assert isinstance(detailed_descriptions, list) and all( 301 | isinstance(item, str) for item in detailed_descriptions 302 | ), "Global detailed_descriptions is not valid!" 303 | assert isinstance(tags, str), "Global tags is not valid!" 304 | 305 | HTML_web_color_name = closest_name(HTML_web_color_name, valid_colors) 306 | self.color = valid_colors[HTML_web_color_name] 307 | 308 | self.prefixes = [description] 309 | self.suffixes = detailed_descriptions 310 | 311 | if self.record_tags: 312 | self.suffixes = self.suffixes + [tags] 313 | 314 | self.prefixes = [safe_str(x) for x in self.prefixes] 315 | self.suffixes = [safe_str(x) for x in self.suffixes] 316 | 317 | return 318 | 319 | def add_local_description( 320 | self, 321 | location: str, 322 | offset: str, 323 | area: str, 324 | distance_to_viewer: float, 325 | description: str, 326 | detailed_descriptions: list[str], 327 | tags: str, 328 | atmosphere: str, 329 | style: str, 330 | quality_meta: str, 331 | HTML_web_color_name: str, 332 | ): 333 | assert isinstance(description, str), "Local description is wrong!" 334 | assert ( 335 | isinstance(distance_to_viewer, (int, float)) and distance_to_viewer > 0 336 | ), f"The distance_to_viewer for [{description}] is not positive float number!" 337 | assert isinstance(detailed_descriptions, list) and all( 338 | isinstance(item, str) for item in detailed_descriptions 339 | ), f"The detailed_descriptions for [{description}] is not valid!" 340 | assert isinstance(tags, str), f"The tags for [{description}] is not valid!" 341 | assert isinstance( 342 | atmosphere, str 343 | ), f"The atmosphere for [{description}] is not valid!" 344 | assert isinstance(style, str), f"The style for [{description}] is not valid!" 345 | assert isinstance( 346 | quality_meta, str 347 | ), f"The quality_meta for [{description}] is not valid!" 348 | 349 | location = closest_name(location, valid_locations) 350 | offset = closest_name(offset, valid_offsets) 351 | area = closest_name(area, valid_areas) 352 | HTML_web_color_name = closest_name(HTML_web_color_name, valid_colors) 353 | 354 | xb, yb = valid_locations[location] 355 | xo, yo = valid_offsets[offset] 356 | w, h = valid_areas[area] 357 | rect = (yb + yo - h // 2, yb + yo + h // 2, xb + xo - w // 2, xb + xo + w // 2) 358 | rect = [max(0, min(90, i)) for i in rect] 359 | color = valid_colors[HTML_web_color_name] 360 | 361 | prefixes = self.prefixes + [description] 362 | suffixes = detailed_descriptions 363 | 364 | if self.record_tags: 365 | suffixes = suffixes + [tags, atmosphere, style, quality_meta] 366 | 367 | prefixes = [safe_str(x) for x in prefixes] 368 | suffixes = [safe_str(x) for x in suffixes] 369 | 370 | self.components.append( 371 | dict( 372 | rect=rect, 373 | distance_to_viewer=distance_to_viewer, 374 | color=color, 375 | prefixes=prefixes, 376 | suffixes=suffixes, 377 | ) 378 | ) 379 | 380 | return 381 | 382 | @staticmethod 383 | def render_initial_latent(conds: list[OmostCanvasCondition]) -> np.ndarray: 384 | def np_color(rgb: tuple[int, int, int]) -> np.ndarray: 385 | return np.array([[rgb]], dtype=np.uint8) 386 | 387 | initial_latent = np.zeros(shape=(90, 90, 3), dtype=np.float32) + np_color( 388 | conds[0]["color"] 389 | ) 390 | 391 | for cond in conds[1:]: 392 | a, b, c, d = cond["rect"] 393 | initial_latent[a:b, c:d] = ( 394 | 0.7 * np_color(cond["color"]) + 0.3 * initial_latent[a:b, c:d] 395 | ) 396 | 397 | initial_latent = initial_latent.clip(0, 255).astype(np.uint8) 398 | 399 | return initial_latent 400 | 401 | def render_mask(cond: OmostCanvasCondition) -> torch.Tensor: 402 | """Returns mask of shape [H, W]""" 403 | mask = torch.zeros([90, 90], dtype=torch.float32) 404 | a, b, c, d = cond["rect"] 405 | mask[a:b, c:d] = 1.0 406 | return mask 407 | 408 | def process(self) -> list[OmostCanvasCondition]: 409 | # sort components 410 | self.components = sorted( 411 | self.components, key=lambda x: x["distance_to_viewer"], reverse=True 412 | ) 413 | 414 | # compute conditions 415 | bag_of_conditions = [ 416 | dict( 417 | rect=(0, 90, 0, 90), 418 | prefixes=self.prefixes, 419 | suffixes=self.suffixes, 420 | color=self.color, 421 | ) 422 | ] 423 | 424 | for component in self.components: 425 | bag_of_conditions.append( 426 | dict( 427 | color=component["color"], 428 | rect=component["rect"], 429 | prefixes=component["prefixes"], 430 | suffixes=component["suffixes"], 431 | ) 432 | ) 433 | 434 | return bag_of_conditions 435 | --------------------------------------------------------------------------------