├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── assets └── screenshot.jpg ├── clientlibs ├── main.js ├── types.js └── utils.js ├── modules ├── apiManager.py ├── bgremoval.py ├── bridges │ └── mainRoute.py ├── controlNet.py ├── controlNetCombine.py ├── controlNetPreprocessor.py ├── deepCache.py ├── embeddingSearch.py ├── embeddingsCombine.py ├── imageCaptioning.py ├── imageInference.py ├── imageMasking.py ├── ipAdapter.py ├── ipAdapterCombine.py ├── loraCombine.py ├── loraSearch.py ├── modelSearch.py ├── multiInference.py ├── outpaintSettings.py ├── photoMaker.py ├── referenceImages.py ├── refiner.py ├── runwareBFL.py ├── teaCache.py ├── upscaler.py ├── utils │ └── runwareUtils.py └── vaeSearch.py ├── requirements.txt └── workflows ├── Runware_Adetailer.json ├── Runware_Basic_Background_Removal.json ├── Runware_Basic_Image_Inference.json ├── Runware_Basic_Image_Inference_IPAdapter.json ├── Runware_Basic_Image_Inference_Lora.json ├── Runware_Basic_Image_Upscaling.json ├── Runware_ControlNet.json ├── Runware_Image_Inference_Captioning.json ├── Runware_Image_Inference_Embedding.json ├── Runware_Image_Inference_Refiner.json ├── Runware_Image_Inference_VAE.json ├── Runware_Inpainting.json ├── Runware_Multi_Inference.json ├── Runware_Outpainting.json └── Runware_PhotoMaker_v2.json /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | *$py.class 4 | imagesCache.json 5 | 6 | .env 7 | .venv 8 | env/ 9 | venv/ 10 | ENV/ 11 | env.bak/ 12 | venv.bak/ 13 | 14 | cython_debug/ 15 | .pyre/ 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Runware 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-Runware Integration 🚀 2 | 3 | Welcome to **ComfyUI-Runware**! 🌟 This is the official integration for **[Runware](https://runware.ai/?utm_source=github&utm_medium=referral&utm_campaign=comfyui)**, bringing you the power of **inference-as-a-service** directly into **ComfyUI**! 4 | 5 | ## 🌟 What is ComfyUI-Runware? 6 | 7 | Imagine creating stunning images, refining details, or even removing backgrounds—all without needing a powerful GPU. With **Runware**, all the heavy lifting happens on our servers. No GPU? No problem! 🖥️💨 8 | 9 | This integration adds a suite of custom nodes to ComfyUI, allowing you to: 10 | - Perform advanced image inference tasks, including image generation and editing with powerful models. 11 | - Utilize PhotoMaker V2 pipelines 12 | - Leverage cutting-edge models like **Flux .1 Kontext Pro** and **Flux .1 Kontext Max** for superior image generation and editing. 13 | - Upscale images 14 | - Remove backgrounds 15 | - Leverage specialized models and LoRAs 16 | - Use your custom uploaded models and LoRAs 17 | 18 | ... and so much more! Let’s make your workflows seamless and efficient with **Runware**. 🖌️✨ 19 | 20 | --- 21 | 22 | ## 📖 Table of Contents 23 | 24 | 1. [Introduction](#-what-is-comfyui-runware) 25 | 2. [Installation](#-installation) 26 | 3. [Node Features](#-node-features) 27 | 4. [Support & Community](#-support--community) 28 | 29 | --- 30 | 31 | Here’s a sneak peek of the ComfyUI-Runware nodes in action: 32 | 33 | ![Screenshot](assets/screenshot.jpg) 34 | 35 | --- 36 | 37 | ## 🔧 Installation 38 | 39 | ### Step 1: Install ComfyUI 40 | 41 | First, ensure you have ComfyUI installed. You can follow the [pre-built package guide](https://docs.comfy.org/get_started/pre_package) or the [manual installation guide](https://docs.comfy.org/get_started/manual_install). 42 | 43 | Make sure your system meets these requirements: 44 | - **Python 3.10+** 45 | - **ComfyUI installed** 46 | 47 | ### Step 2: Install ComfyUI-Runware 48 | 49 | You have two options to install this integration: 50 | 51 | #### Option 1: Using ComfyUI Manager (Recommended) 52 | First, ensure you have the `ComfyUI-Manager` custom node installed. If you don't have it or are using the beta ComfyUI desktop version, follow the instructions on this [GitHub Repo](https://github.com/ltdrdata/ComfyUI-Manager?tab=readme-ov-file#installation). 53 | 54 | After installing ComfyUI-Manager, open it and click on "Custom Nodes Manager". Search for "Runware" or **"Runware.ai"** and then click on install or update. Finally, restart your ComfyUI. ✨ 55 | 56 | #### Option 2: Manual Installation 57 | 58 | 1. Navigate to the custom nodes directory: 59 | ```bash 60 | cd custom_nodes 61 | ``` 62 | 63 | 2. Clone the repository: 64 | ```bash 65 | git clone https://github.com/Runware/ComfyUI-Runware.git 66 | ``` 67 | 68 | 3. Navigate to the repository folder: 69 | ```bash 70 | cd ComfyUI-Runware 71 | ``` 72 | 73 | 4. Install dependencies: 74 | ```bash 75 | pip install -r requirements.txt 76 | ``` 77 | 78 | ### Step 3: Run ComfyUI 79 | 80 | Install latest ComfyUI frontend package 81 | ```bash 82 | pip install -U comfyui-frontend-package 83 | ```` 84 | 85 | Start ComfyUI with the following command: 86 | ```bash 87 | python main.py 88 | ``` 89 | 90 | #### Optional: CPU-Only Mode 91 | If you want to run ComfyUI without a GPU, add the `--cpu` flag: 92 | ```bash 93 | python main.py --cpu 94 | ``` 95 | 96 | ### Step 4: Explore Workflows 97 | Inside the `ComfyUI-Runware` custom node folder, you’ll find a `workflows` folder with pre-made workflows to get you started! 🚀 98 | 99 | --- 100 | 101 | ## 🧩 Node Features 102 | 103 | Here’s a breakdown of the amazing nodes included in this integration: 104 | 105 | - **Runware API Manager**: Set or change your API keys, adjust the max connection timeout, adjust the image output quality and format, and enable or disable image caching directly in ComfyUI—no need to edit config files manually! 🔑 106 | - **Runware Image Inference**: Perform advanced tasks like inpainting, outpainting, and more. 🎨 107 | - **Runware PhotoMakerV2**: Create consistent identities with our photomaker pipeline. 🖼️ 108 | - **Runware Image Upscale**: Upscale your images up to 4x. 🔍 109 | - **Runware Background Removal**: Effortlessly remove image backgrounds. 🧹 110 | - **Runware LoRA**: Search and select LoRAs to enhance your workflow. 📂 111 | - **Runware Model**: Choose specific models to connect with image inference. 🤖 112 | - **Runware ControlNet**: Guide your image generation with ControlNet and guide images. 🗺️ 113 | - **Runware Refiner**: Refine your images with advanced tools. ✨ 114 | - **Runware LoRA Combine**: Combine up to 3 LoRAs together. 🔗 115 | - **Runware ControlNet Combine**: Combine multiple ControlNets for complex workflows. 🧩 116 | - **Runware Image Masking**: Automatically mask elements like faces, hands, and more. 🖌️ 117 | - **Runware ControlNet Preprocessor**: Preprocess images before using them as guide images in ControlNet. 🔄 118 | - **Runware VAE**: Search and connect a VAE to Image inference. 🖼️ 119 | - **Runware Embedding**: Search and connect Embedding to image inference. 🧩 120 | - **Runware Embedding Combine**: Combine multiple embeddings together. 🔗 121 | - **Runware Image Caption**: Generate descriptive text from images for further workflow integration. 🖼️ 122 | - **Runware IPAdapter**: Use reference images to guide the style and content of generated images. 🖌️ 123 | - **Runware IPAdapters Combine**: Combine multiple IP-Adapter inputs for sophisticated image conditioning. 🔗 124 | 125 | --- 126 | 127 | ## 🤝 Support & Community 128 | 129 | This is the **official Runware integration**, maintained by **Runware Inc**. We’re here to help you every step of the way! 💬 130 | 131 | Join our community on Discord for support, updates, and to connect with fellow creators: [Runware Discord](https://discord.com/invite/aJ4UzvBqNU) 🎉 132 | 133 | --- 134 | 135 | Thank you for using **ComfyUI-Runware**! Let’s create something amazing together. 🌟 136 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .modules.imageInference import txt2img 2 | from .modules.outpaintSettings import outpaintSettings 3 | from .modules.bgremoval import bgremoval 4 | from .modules.photoMaker import photoMaker 5 | from .modules.upscaler import upscaler 6 | from .modules.modelSearch import modelSearch 7 | from .modules.bridges import mainRoute 8 | from .modules.controlNet import controlNet 9 | from .modules.multiInference import multiInference 10 | from .modules.runwareBFL import runwareKontext 11 | from .modules.teaCache import teaCache 12 | from .modules.deepCache import deepCache 13 | from .modules.loraSearch import loraSearch 14 | from .modules.loraCombine import loraCombine 15 | from .modules.refiner import refiner 16 | from .modules.imageMasking import imageMasking 17 | from .modules.controlNetPreprocessor import controlNetPreprocessor 18 | from .modules.apiManager import apiManager 19 | from .modules.imageCaptioning import imageCaptioning 20 | from .modules.controlNetCombine import controlNetCombine 21 | from .modules.embeddingSearch import embeddingSearch 22 | from .modules.embeddingsCombine import embeddingsCombine 23 | from .modules.ipAdapter import ipAdapter 24 | from .modules.ipAdapterCombine import ipAdapterCombine 25 | from .modules.vaeSearch import vaeSearch 26 | from .modules.referenceImages import referenceImages 27 | 28 | RUNWARE_COMFYUI_VERSION = "1.1.0 Beta" 29 | 30 | RESET_COLOR = "\033[0m" 31 | BLUE_COLOR = "\033[94m" 32 | GREEN_COLOR = "\033[92m" 33 | print(BLUE_COLOR + "##############################################################" + RESET_COLOR) 34 | print(GREEN_COLOR + " Runware ComfyUI Inference Services Are Loaded Successfully" + RESET_COLOR) 35 | print(GREEN_COLOR + " Version: " + RUNWARE_COMFYUI_VERSION + " | Maintained by: Runware Inc" + RESET_COLOR) 36 | print(GREEN_COLOR + " Official Website: https://my.runware.ai" + RESET_COLOR) 37 | print(BLUE_COLOR + "##############################################################" + RESET_COLOR) 38 | 39 | NODE_CLASS_MAPPINGS = { 40 | "Runware Image Inference": txt2img, 41 | "Runware Outpaint": outpaintSettings, 42 | "Runware Background Removal": bgremoval, 43 | "Runware PhotoMaker V2": photoMaker, 44 | "Runware Image Upscaler": upscaler, 45 | "Runware Model Search": modelSearch, 46 | "Runware Kontext Inference": runwareKontext, 47 | "Runware Multi Inference": multiInference, 48 | "Runware TeaCache": teaCache, 49 | "Runware DeepCache": deepCache, 50 | "Runware Lora Search": loraSearch, 51 | "Runware Embedding Search": embeddingSearch, 52 | "Runware VAE Search": vaeSearch, 53 | "Runware Embeddings Combine": embeddingsCombine, 54 | "Runware ControlNet": controlNet, 55 | "Runware Lora Combine": loraCombine, 56 | "Runware Refiner": refiner, 57 | "Runware Image Masking": imageMasking, 58 | "Runware ControlNet PreProcessor": controlNetPreprocessor, 59 | "Runware API Manager": apiManager, 60 | "Runware Image Caption": imageCaptioning, 61 | "Runware ControlNet Combine": controlNetCombine, 62 | "Runware IPAdapter": ipAdapter, 63 | "Runware IPAdapters Combine": ipAdapterCombine, 64 | "Runware Reference Images": referenceImages, 65 | } 66 | 67 | NODE_DISPLAY_NAME_MAPPINGS = { 68 | "Runware Model Search": "Runware Model", 69 | "Runware Lora Search": "Runware Lora", 70 | "Runware Embedding Search": "Runware Embedding", 71 | "Runware VAE Search": "Runware VAE", 72 | "Runware Multi Inference": "Runware Multi Inference [BETA]", 73 | } 74 | 75 | WEB_DIRECTORY = "./clientlibs" 76 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] -------------------------------------------------------------------------------- /assets/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Runware/ComfyUI-Runware/7d4cca13cfd04dd99f180d7d894de9fd4d4060d8/assets/screenshot.jpg -------------------------------------------------------------------------------- /clientlibs/main.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | import { api } from "../../scripts/api.js"; 3 | import { promptEnhanceHandler, syncDimensionsNodeHandler, searchNodeHandler, APIKeyHandler, captionNodeHandler, handleCustomErrors } from "./utils.js"; 4 | import { RUNWARE_NODE_TYPES, RUNWARE_NODE_PROPS, SEARCH_TERMS } from "./types.js"; 5 | 6 | const nodeInitList = []; 7 | app.registerExtension({ 8 | name: "runware.ai", 9 | async setup() { 10 | api.addEventListener('runwareError', handleCustomErrors); 11 | api.addEventListener('runwareImageCaption', captionNodeHandler); 12 | }, 13 | 14 | async nodeCreated(node) { 15 | const nodeClass = node.comfyClass; 16 | let crNodeProps = false; 17 | if(typeof nodeClass === "string" && Object.values(RUNWARE_NODE_TYPES).includes(nodeClass)) { 18 | crNodeProps = RUNWARE_NODE_PROPS[nodeClass]; 19 | } else { 20 | return; 21 | } 22 | 23 | node.bgcolor = crNodeProps.bgColor; 24 | if(nodeClass === RUNWARE_NODE_TYPES.APIMANAGER) { 25 | APIKeyHandler(node); 26 | } else if(nodeClass === RUNWARE_NODE_TYPES.IMAGECAPTION) { 27 | const captionInput = node.widgets[1].inputEl; 28 | captionInput.style.outline = "none"; 29 | captionInput.readOnly = true; 30 | return; 31 | } 32 | 33 | if(crNodeProps.colorModeOnly === true) return; 34 | const nodeWidgets = node.widgets; 35 | if(nodeWidgets.length <= 0) return; 36 | 37 | for(const nodeWidget of nodeWidgets) { 38 | const widgetName = nodeWidget.name; 39 | const widgetType = nodeWidget.type; 40 | 41 | if(crNodeProps.promptEnhancer === true && widgetType === "customtext") { 42 | if (widgetName === "positivePrompt" || widgetName === "negativePrompt") { 43 | nodeWidget.inputEl.addEventListener('keydown', promptEnhanceHandler); 44 | } 45 | } else if(crNodeProps.liveDimensions === true && widgetType === "combo" && widgetName === "dimensions") { 46 | syncDimensionsNodeHandler(node, nodeWidget); 47 | } 48 | 49 | if(crNodeProps.liveSearch === true) { 50 | if(widgetType === "text" && SEARCH_TERMS.includes(widgetName)) { 51 | node.callback = function(){}; 52 | searchNodeHandler(node, nodeWidget); 53 | nodeInitList.push(node); 54 | } 55 | } 56 | } 57 | }, 58 | loadedGraphNode(node) { 59 | if(nodeInitList.includes(node)) node.callback(); 60 | } 61 | }) -------------------------------------------------------------------------------- /clientlibs/types.js: -------------------------------------------------------------------------------- 1 | const DEFAULT_BGCOLOR = "#5345bf"; 2 | 3 | const DEFAULT_DIMENSIONS_LIST = { 4 | "Square (512x512)": "512x512", 5 | "Square HD (1024x1024)": "1024x1024", 6 | "Portrait 3:4 (768x1024)": "768x1024", 7 | "Portrait 9:16 (576x1024)": "576x1024", 8 | "Landscape 4:3 (1024x768)": "1024x768", 9 | "Landscape 16:9 (1024x576)": "1024x576" 10 | }; 11 | 12 | const DEFAULT_MODELS_ARCH_LIST = { 13 | "All": "all", 14 | "FLUX.1-Schnell": "flux1s", 15 | "FLUX.1-Dev": "flux1d", 16 | "Pony": "pony", 17 | "SD 1.5": "sd1x", 18 | "SD 1.5 Hyper": "sdhyper", 19 | "SD 1.5 LCM": "sd1xlcm", 20 | "SD 3": "sd3", 21 | "SDXL 1.0": "sdxl", 22 | "SDXL 1.0 LCM": "sdxllcm", 23 | "SDXL Distilled": "sdxldistilled", 24 | "SDXL Hyper": "sdxlhyper", 25 | "SDXL Lightning": "sdxllightning", 26 | "SDXL Turbo": "sdxlturbo", 27 | }; 28 | 29 | const DEFAULT_CONTROLNET_CONDITIONING_LIST = { 30 | "All": "all", 31 | "Canny": "canny", 32 | "Depth": "depth", 33 | "MLSD": "mlsd", 34 | "Normal BAE": "normalbae", 35 | "Open Pose": "openpose", 36 | "Tile": "tile", 37 | "Seg": "seg", 38 | "Line Art": "lineart", 39 | "Line Art Anime": "lineart_anime", 40 | "Shuffle": "shuffle", 41 | "Scribble": "scribble", 42 | "Soft Edge": "softedge", 43 | }; 44 | 45 | const SEARCH_TERMS = ["Model Search", "Lora Search", "ControlNet Search", "Embedding Search", "VAE Search"]; 46 | const MODEL_TYPES_TERMS = ["ModelType", "LoraType", "ControlNetType"]; 47 | const MODEL_LIST_TERMS = ["ModelList", "LoraList", "ControlNetList", "EmbeddingList", "VAEList"]; 48 | 49 | const RUNWARE_NODE_TYPES = { 50 | IMAGEINFERENCE: "Runware Image Inference", 51 | KONTEXTINFERENCE: "Runware Kontext Inference", 52 | OUTPAINT: "Runware Outpaint", 53 | PHOTOMAKER: "Runware PhotoMaker V2", 54 | MODELSEARCH: "Runware Model Search", 55 | MULTIINFERENCE: "Runware Multi Inference", 56 | TEACACHE: "Runware TeaCache", 57 | DEEPCACHE: "Runware DeepCache", 58 | LORASEARCH: "Runware Lora Search", 59 | CONTROLNET: "Runware ControlNet", 60 | BGREMOVAL: "Runware Background Removal", 61 | UPSCALER: "Runware Image Upscaler", 62 | REFINER: "Runware Refiner", 63 | LORACOMBINE: "Runware Lora Combine", 64 | CONTROLNETCOMBINE: "Runware ControlNet Combine", 65 | IPADAPTER: "Runware IPAdapter", 66 | IPADAPTERSCOMBINE: "Runware IPAdapters Combine", 67 | IMAGEMASKING: "Runware Image Masking", 68 | CONTROLNETPREPROCESSING: "Runware ControlNet PreProcessor", 69 | APIMANAGER: "Runware API Manager", 70 | IMAGECAPTION: "Runware Image Caption", 71 | EMBEDDING: "Runware Embedding Search", 72 | EMBEDDINGCOMBINE: "Runware Embedding Combine", 73 | VAE: "Runware VAE Search", 74 | REFERENCEIMAGES: "Runware Reference Images", 75 | }; 76 | 77 | const RUNWARE_NODE_PROPS = { 78 | [RUNWARE_NODE_TYPES.IMAGEINFERENCE]: { 79 | bgColor: DEFAULT_BGCOLOR, 80 | liveDimensions: true, 81 | promptEnhancer: true, 82 | }, 83 | [RUNWARE_NODE_TYPES.KONTEXTINFERENCE]: { 84 | bgColor: DEFAULT_BGCOLOR, 85 | promptEnhancer: true, 86 | }, 87 | [RUNWARE_NODE_TYPES.REFERENCEIMAGES]: { 88 | bgColor: DEFAULT_BGCOLOR, 89 | colorModeOnly: true, 90 | }, 91 | [RUNWARE_NODE_TYPES.OUTPAINT]: { 92 | bgColor: DEFAULT_BGCOLOR, 93 | colorModeOnly: true, 94 | }, 95 | [RUNWARE_NODE_TYPES.PHOTOMAKER]: { 96 | bgColor: DEFAULT_BGCOLOR, 97 | liveDimensions: true, 98 | promptEnhancer: true, 99 | }, 100 | [RUNWARE_NODE_TYPES.MODELSEARCH]: { 101 | bgColor: DEFAULT_BGCOLOR, 102 | liveSearch: true, 103 | }, 104 | [RUNWARE_NODE_TYPES.MULTIINFERENCE]: { 105 | bgColor: DEFAULT_BGCOLOR, 106 | colorModeOnly: true, 107 | }, 108 | [RUNWARE_NODE_TYPES.TEACACHE]: { 109 | bgColor: DEFAULT_BGCOLOR, 110 | colorModeOnly: true, 111 | }, 112 | [RUNWARE_NODE_TYPES.DEEPCACHE]: { 113 | bgColor: DEFAULT_BGCOLOR, 114 | colorModeOnly: true, 115 | }, 116 | [RUNWARE_NODE_TYPES.LORASEARCH]: { 117 | bgColor: DEFAULT_BGCOLOR, 118 | liveSearch: true, 119 | }, 120 | [RUNWARE_NODE_TYPES.EMBEDDING]: { 121 | bgColor: DEFAULT_BGCOLOR, 122 | liveSearch: true, 123 | }, 124 | [RUNWARE_NODE_TYPES.CONTROLNET]: { 125 | bgColor: DEFAULT_BGCOLOR, 126 | liveSearch: true, 127 | }, 128 | [RUNWARE_NODE_TYPES.VAE]: { 129 | bgColor: DEFAULT_BGCOLOR, 130 | liveSearch: true, 131 | }, 132 | [RUNWARE_NODE_TYPES.EMBEDDINGCOMBINE]: { 133 | bgColor: DEFAULT_BGCOLOR, 134 | colorModeOnly: true, 135 | }, 136 | [RUNWARE_NODE_TYPES.CONTROLNETCONDITIONING]: { 137 | bgColor: DEFAULT_BGCOLOR, 138 | liveSearch: true, 139 | }, 140 | [RUNWARE_NODE_TYPES.BGREMOVAL]: { 141 | bgColor: DEFAULT_BGCOLOR, 142 | colorModeOnly: true, 143 | }, 144 | [RUNWARE_NODE_TYPES.UPSCALER]: { 145 | bgColor: DEFAULT_BGCOLOR, 146 | colorModeOnly: true, 147 | }, 148 | [RUNWARE_NODE_TYPES.REFINER]: { 149 | bgColor: DEFAULT_BGCOLOR, 150 | colorModeOnly: true, 151 | }, 152 | [RUNWARE_NODE_TYPES.LORACOMBINE]: { 153 | bgColor: DEFAULT_BGCOLOR, 154 | colorModeOnly: true, 155 | }, 156 | [RUNWARE_NODE_TYPES.IPADAPTER]: { 157 | bgColor: DEFAULT_BGCOLOR, 158 | colorModeOnly: true, 159 | }, 160 | [RUNWARE_NODE_TYPES.IPADAPTERSCOMBINE]: { 161 | bgColor: DEFAULT_BGCOLOR, 162 | colorModeOnly: true, 163 | }, 164 | [RUNWARE_NODE_TYPES.CONTROLNETCOMBINE]: { 165 | bgColor: DEFAULT_BGCOLOR, 166 | colorModeOnly: true, 167 | }, 168 | [RUNWARE_NODE_TYPES.IMAGEMASKING]: { 169 | bgColor: DEFAULT_BGCOLOR, 170 | colorModeOnly: true, 171 | }, 172 | [RUNWARE_NODE_TYPES.CONTROLNETPREPROCESSING]: { 173 | bgColor: DEFAULT_BGCOLOR, 174 | liveDimensions: true, 175 | }, 176 | [RUNWARE_NODE_TYPES.APIMANAGER]: { 177 | bgColor: DEFAULT_BGCOLOR, 178 | colorModeOnly: true, 179 | }, 180 | [RUNWARE_NODE_TYPES.IMAGECAPTION]: { 181 | bgColor: DEFAULT_BGCOLOR, 182 | colorModeOnly: true, 183 | }, 184 | }; 185 | 186 | export { 187 | DEFAULT_BGCOLOR, 188 | DEFAULT_DIMENSIONS_LIST, 189 | DEFAULT_MODELS_ARCH_LIST, 190 | DEFAULT_CONTROLNET_CONDITIONING_LIST, 191 | SEARCH_TERMS, 192 | MODEL_TYPES_TERMS, 193 | MODEL_LIST_TERMS, 194 | RUNWARE_NODE_TYPES, 195 | RUNWARE_NODE_PROPS 196 | } -------------------------------------------------------------------------------- /modules/apiManager.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class apiManager: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "API Key": ( 9 | "STRING", 10 | { 11 | "tooltip": "Define Or Change Your Runware API Key To Start Using Runware Image Inference Services.\n\n(e.g; BcVr1JVQM8FGzrRwb3GsbCq5ww1QwabV)\n\nIf You Don't Have One, You Can Get It From: https://my.runware.ai/keys", 12 | }, 13 | ), 14 | "Max Timeout": ( 15 | "INT", 16 | { 17 | "tooltip": "Change Runware API Request Timeout In Seconds.\n\n(e.g; 90 Seconds).", 18 | "min": 5, 19 | "max": 99, 20 | "default": int(rwUtils.getTimeout()), 21 | }, 22 | ), 23 | "Image Output Quality": ( 24 | "INT", 25 | { 26 | "tooltip": "Sets the compression quality of the output image. Higher values preserve more quality but increase file size, lower values reduce file size but decrease quality.", 27 | "min": 20, 28 | "max": 99, 29 | "default": int(rwUtils.getOutputQuality()), 30 | }, 31 | ), 32 | "Image Output Format": ( 33 | ["WEBP", "PNG", "JPEG"], 34 | { 35 | "tooltip": "Change the Default Image Output Format.", 36 | "default": rwUtils.getOutputFormat(), 37 | }, 38 | ), 39 | "Enable Images Caching": ( 40 | "BOOLEAN", 41 | { 42 | "label_on": "Enabled", 43 | "label_off": "Disabled", 44 | "tooltip": "Enable or disable image caching functionality.\n\nWhen enabled, images will be cached to improve performance and reduce redundant processing.", 45 | "default": rwUtils.getEnableImagesCaching(), 46 | }, 47 | ), 48 | "Min Image Cache Size": ( 49 | "INT", 50 | { 51 | "tooltip": "Set the minimum size (in KB) for images to be cached.", 52 | "min": 30, 53 | "max": 4096, 54 | "step": 1, 55 | "default": int(rwUtils.getMinImageCacheSize()), 56 | }, 57 | ), 58 | }, 59 | } 60 | 61 | DESCRIPTION = "API Managers is a Runware Utility That helps you define or change your API Key, Session Timeout, Image Output Format & Quality, and Image Caching settings From The ComfyUI Interface Without having to adjust the env file locally." 62 | CATEGORY = "Runware" 63 | RETURN_TYPES = () -------------------------------------------------------------------------------- /modules/bgremoval.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class bgremoval: 4 | RUNWARE_RMBG_MODELS = { 5 | "RemBG 1.4": "runware:109@1", 6 | "Bria RMBG 2.0": "runware:110@1", 7 | "BiRefNet v1 Base": "runware:112@1", 8 | "BiRefNet v1 Base - COD": "runware:112@2", 9 | "BiRefNet Dis": "runware:112@3", 10 | "BiRefNet General": "runware:112@5", 11 | "BiRefNet General RES 512x512": "runware:112@6", 12 | "BiRefNet HRSOD DHU": "runware:112@7", 13 | "BiRefNet Massive TR DIS5K TES": "runware:112@8", 14 | "BiRefNet Matting": "runware:112@9", 15 | "BiRefNet Portrait": "runware:112@10" 16 | } 17 | 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "Image": ("IMAGE", { 23 | "tooltip": "Specifies the input image to be processed." 24 | }), 25 | "Model": (list(cls.RUNWARE_RMBG_MODELS.keys()), { 26 | "tooltip": "Select the background removal model to use. Different models have varying strengths depending on image content and complexity.", 27 | "default": "RemBG 1.4", 28 | }), 29 | }, 30 | "optional": { 31 | "Post Process Mask": ("BOOLEAN", { 32 | "tooltip": "Controls whether the mask should undergo additional post-processing. This step can improve the accuracy and quality of the background removal mask.", 33 | "default": False, 34 | "label_on": "Enabled", 35 | "label_off": "Disabled", 36 | }), 37 | "Return Only Mask": ("BOOLEAN", { 38 | "tooltip": "Whether to return only the mask. The mask is the opposite of the image background removal.", 39 | "default": False, 40 | "label_on": "Enabled", 41 | "label_off": "Disabled", 42 | }), 43 | "Alpha Matting": ("BOOLEAN", { 44 | "tooltip": "Alpha matting is a post-processing technique that enhances the quality of the output by refining the edges of the foreground object.", 45 | "default": False, 46 | "label_on": "Enabled", 47 | "label_off": "Disabled", 48 | }), 49 | "Alpha Matting Foreground Threshold": ("INT", { 50 | "tooltip": "Threshold value used in alpha matting to distinguish the foreground from the background. Adjusting this parameter affects the sharpness and accuracy of the foreground object edges.", 51 | "default": 240, 52 | "min": 1, 53 | "max": 255, 54 | }), 55 | "Alpha Matting Background Threshold": ("INT", { 56 | "tooltip": "Threshold value used in alpha matting to refine the background areas. It influences how aggressively the algorithm removes the background while preserving image details. The higher the value, the more computation is needed and therefore the more expensive the operation is.", 57 | "default": 10, 58 | "min": 1, 59 | "max": 255, 60 | }), 61 | "Alpha Matting Erode Size": ("INT", { 62 | "tooltip": "Specifies the size of the erosion operation used in alpha matting. Erosion helps in smoothing the edges of the foreground object for a cleaner removal of the background.", 63 | "default": 10, 64 | "min": 1, 65 | "max": 255, 66 | }), 67 | **rwUtils.RUNWARE_REMBG_OUTPUT_FORMATS, 68 | } 69 | } 70 | 71 | DESCRIPTION = "Remove backgrounds from images effortlessly using Runware's low-cost image editing Inference." 72 | FUNCTION = "rembg" 73 | RETURN_TYPES = ("IMAGE",) 74 | CATEGORY = "Runware" 75 | 76 | def rembg(self, **kwargs): 77 | image = kwargs.get("Image") 78 | modelName = kwargs.get("Model", "RemBG 1.4") 79 | postProcessMask = kwargs.get("Post Process Mask", False) 80 | returnOnlyMask = kwargs.get("Return Only Mask", False) 81 | alphaMatting = kwargs.get("Alpha Matting", False) 82 | alphaMattingForegroundThreshold = kwargs.get("Alpha Matting Foreground Threshold", 240) 83 | alphaMattingBackgroundThreshold = kwargs.get("Alpha Matting Background Threshold", 10) 84 | alphaMattingErodeSize = kwargs.get("Alpha Matting Erode Size", 10) 85 | outputFormat = kwargs.get("outputFormat", "WEBP") 86 | modelAIR = self.RUNWARE_RMBG_MODELS.get(modelName, "runware:109@1") 87 | includeExtraSettings = postProcessMask or returnOnlyMask or alphaMatting 88 | 89 | if modelAIR != "runware:109@1" and includeExtraSettings: 90 | raise ValueError( 91 | f"Oops! The selected model '{modelName}' does not support additional settings!\n" 92 | "Please switch to 'RemBG 1.4' if you wish to use these features." 93 | ) 94 | 95 | genConfig = { 96 | "taskType": "imageBackgroundRemoval", 97 | "taskUUID": rwUtils.genRandUUID(), 98 | "inputImage": rwUtils.convertTensor2IMG(image), 99 | "model": modelAIR, 100 | "outputFormat": outputFormat, 101 | "outputQuality": rwUtils.OUTPUT_QUALITY, 102 | "outputType": "base64Data", 103 | } 104 | 105 | if includeExtraSettings: 106 | settings = { 107 | "postProcessMask": postProcessMask, 108 | "returnOnlyMask": returnOnlyMask, 109 | "alphaMatting": alphaMatting, 110 | } 111 | if alphaMatting: 112 | settings["alphaMattingForegroundThreshold"] = alphaMattingForegroundThreshold 113 | settings["alphaMattingBackgroundThreshold"] = alphaMattingBackgroundThreshold 114 | settings["alphaMattingErodeSize"] = alphaMattingErodeSize 115 | genConfig["settings"] = settings 116 | 117 | genResult = rwUtils.inferenecRequest([genConfig]) 118 | images = rwUtils.convertImageB64List(genResult) 119 | return (images, ) -------------------------------------------------------------------------------- /modules/bridges/mainRoute.py: -------------------------------------------------------------------------------- 1 | from ..utils import runwareUtils as rwUtils 2 | from server import PromptServer 3 | from aiohttp import web 4 | 5 | routes = PromptServer.instance.routes 6 | 7 | @routes.post('/setAPIKey') 8 | async def setAPIKey(reqPayload): 9 | reqData = await reqPayload.json() 10 | apiKey = reqData.get('apiKey', None) 11 | if(apiKey is None or apiKey == "" or len(apiKey) < 30): 12 | return web.json_response({'success': False, 'error': 'Invalid API Key!'}) 13 | try: 14 | apiKey = apiKey.strip() 15 | apiCheckResult = rwUtils.checkAPIKey(apiKey) 16 | if(apiCheckResult == False): 17 | return web.json_response({'success': False, 'error': 'Failed To Set Your API Key, Please Try Again!'}) 18 | elif(apiCheckResult != True): 19 | return web.json_response({'success': False, 'error': apiCheckResult}) 20 | rwUtils.setAPIKey(apiKey) 21 | except Exception as e: 22 | return web.json_response({'success': False, 'error': str(e)}) 23 | return web.json_response({'success': True}) 24 | 25 | @routes.post('/setMaxTimeout') 26 | async def setMaxTimeout(reqPayload): 27 | reqData = await reqPayload.json() 28 | maxTimeout = reqData.get('maxTimeout', 90) 29 | if(maxTimeout < 5 or maxTimeout > 99): 30 | return web.json_response({'success': False, 'error': 'Invalid Timeout Value!'}) 31 | try: 32 | rwUtils.setTimeout(maxTimeout) 33 | except Exception as e: 34 | return web.json_response({'success': False, 'error': str(e)}) 35 | return web.json_response({'success': True}) 36 | 37 | @routes.post('/setOutputFormat') 38 | async def setOutputFormat(reqPayload): 39 | reqData = await reqPayload.json() 40 | outputFormat = reqData.get('outputFormat', 'WEBP') 41 | if outputFormat not in ['WEBP', 'PNG', 'JPEG']: 42 | return web.json_response({'success': False, 'error': 'Invalid Output Format!'}) 43 | try: 44 | rwUtils.setOutputFormat(outputFormat) 45 | except Exception as e: 46 | return web.json_response({'success': False, 'error': str(e)}) 47 | return web.json_response({'success': True}) 48 | 49 | @routes.post('/setOutputQuality') 50 | async def setOutputQuality(reqPayload): 51 | reqData = await reqPayload.json() 52 | outputQuality = reqData.get('outputQuality', 95) 53 | if not isinstance(outputQuality, int) or outputQuality < 20 or outputQuality > 99: 54 | return web.json_response({'success': False, 'error': 'Invalid Output Quality!'}) 55 | try: 56 | rwUtils.setOutputQuality(outputQuality) 57 | except Exception as e: 58 | return web.json_response({'success': False, 'error': str(e)}) 59 | return web.json_response({'success': True}) 60 | 61 | @routes.post('/setEnableImagesCaching') 62 | async def setEnableImagesCaching(reqPayload): 63 | reqData = await reqPayload.json() 64 | enableCaching = reqData.get('enableCaching', True) 65 | if not isinstance(enableCaching, bool): 66 | return web.json_response({'success': False, 'error': 'Invalid value for enable caching!'}) 67 | try: 68 | rwUtils.setEnableImagesCaching(enableCaching) 69 | except Exception as e: 70 | return web.json_response({'success': False, 'error': str(e)}) 71 | return web.json_response({'success': True}) 72 | 73 | @routes.post('/setMinImageCacheSize') 74 | async def setMinImageCacheSize(reqPayload): 75 | reqData = await reqPayload.json() 76 | minCacheSize = reqData.get('minCacheSize', 100) 77 | if not isinstance(minCacheSize, int) or minCacheSize < 30 or minCacheSize > 4096: 78 | return web.json_response({'success': False, 'error': 'Invalid minimum cache size!'}) 79 | try: 80 | rwUtils.setMinImageCacheSize(minCacheSize) 81 | except Exception as e: 82 | return web.json_response({'success': False, 'error': str(e)}) 83 | return web.json_response({'success': True}) 84 | 85 | @routes.post('/promptEnhance') 86 | async def promptEnhance(reqPayload): 87 | reqData = await reqPayload.json() 88 | userPrompt = reqData.get('userPrompt') 89 | utilityConfig = [{ 90 | "taskType": "promptEnhance", 91 | "taskUUID": rwUtils.genRandUUID(), 92 | "prompt": userPrompt, 93 | "promptMaxLength": 300, 94 | "promptVersions": 1, 95 | }] 96 | 97 | try: 98 | utilityResults = rwUtils.inferenecRequest(utilityConfig) 99 | except Exception as e: 100 | return web.json_response({'success': False, 'error': str(e)}) 101 | enhancedPrompt = utilityResults["data"][0]["text"] 102 | return web.json_response({'success': True, 'enhancedPrompt': enhancedPrompt}) 103 | 104 | @routes.post('/modelSearch') 105 | async def modelSearch(reqPayload): 106 | reqData = await reqPayload.json() 107 | modelQuery = reqData.get('modelQuery', "") 108 | modelArch = reqData.get('modelArch', "all") 109 | modelCategory = reqData.get('modelCat', "checkpoint") 110 | modelType = reqData.get('modelType', "base") 111 | controlNetConditioning = reqData.get('condtioning', "all") 112 | 113 | utilityConfig = [{ 114 | "taskType": "modelSearch", 115 | "taskUUID": rwUtils.genRandUUID(), 116 | "category": modelCategory, 117 | "limit": 25, 118 | "sort": "-downloadCount", 119 | }] 120 | 121 | aclTypes = ["controlnet", "lora", "lycoris", "embeddings", "vae"] 122 | 123 | if(modelCategory not in aclTypes): 124 | utilityConfig[0]["type"] = modelType 125 | elif(modelCategory == "controlnet" and controlNetConditioning != "all"): 126 | utilityConfig[0]["conditioning"] = controlNetConditioning 127 | if(modelArch != "all"): 128 | utilityConfig[0]["architecture"] = modelArch 129 | if(modelQuery != ""): 130 | utilityConfig[0]["search"] = modelQuery 131 | 132 | try: 133 | utilityResults = rwUtils.inferenecRequest(utilityConfig) 134 | except Exception as e: 135 | return web.json_response({'success': False, 'error': str(e)}) 136 | totalResults = utilityResults["data"][0]["totalResults"] 137 | if(totalResults < 1): 138 | return web.json_response({'success': False, 'error': 'No Results Found!'}) 139 | results = utilityResults["data"][0].get("results", []) 140 | if not results: 141 | return web.json_response({'success': False, 'error': 'No Results Found!'}) 142 | modelList = results 143 | return web.json_response({'success': True, 'modelList': modelList}) -------------------------------------------------------------------------------- /modules/controlNet.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class controlNet: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Guide Image": ("IMAGE", { 9 | "tooltip": "Specifies the preprocessed image to be used as guide to control the image generation process." 10 | }), 11 | "ControlNet Search": ("STRING", { 12 | "tooltip": "Searchg For Specific ControlNet By Name Or Civit AIR Code (eg: Canny).", 13 | }), 14 | "Model Architecture": ([ 15 | "All", 16 | "FLUX.1-Schnell", 17 | "FLUX.1-Dev", 18 | "Pony", 19 | "SD 1.5", 20 | "SD 1.5 Hyper", 21 | "SD 1.5 LCM", 22 | "SD 3", 23 | "SDXL 1.0", 24 | "SDXL 1.0 LCM", 25 | "SDXL Distilled", 26 | "SDXL Hyper", 27 | "SDXL Lightning", 28 | "SDXL Turbo", 29 | ], { 30 | "tooltip": "Choose ControlNet Model Architecture To Filter Results.", 31 | "default": "All", 32 | }), 33 | "ControlNetType": ([ 34 | "All", 35 | "Canny", 36 | "Depth", 37 | "MLSD", 38 | "Normal BAE", 39 | "Open Pose", 40 | "Tile", 41 | "Seg", 42 | "Line Art", 43 | "Line Art Anime", 44 | "Shuffle", 45 | "Scribble", 46 | "Soft Edge", 47 | ], { 48 | "tooltip": "Choose ControlNet Type To Filter Results.", 49 | "default": "All", 50 | }), 51 | "ControlNetList": ([ 52 | "civitai:38784@44716 (SD1.5 Canny)", 53 | "civitai:38784@44876 (SD1.5 Inpaint)", 54 | "civitai:38784@44877 (SD1.5 Lineart)", 55 | "civitai:38784@44795 (SD1.5 MLSD)", 56 | "civitai:38784@44774 (SD1.5 NormalBAE)", 57 | "runware:20@1 (SDXL Canny)", 58 | "runware:25@1 (Flux Dev Canny)", 59 | "runware:26@1 (Flux Dev Tile)", 60 | "runware:28@1 (Flux Dev Blur)", 61 | "runware:29@1 (Flux Dev OpenPose)", 62 | "runware:30@1 (Flux Dev Gray)", 63 | "runware:31@1 (Flux Dev Low Quality)", 64 | ], { 65 | "tooltip": "ControlNet Results Will Show UP Here So You Could Choose From.", 66 | "default": "civitai:38784@44716 (SD1.5 Canny)", 67 | }), 68 | "startStep": ("INT", { 69 | "tooltip": "Represents the step number at which the ControlNet model starts to control the inference process. (Enter -1 To Disable)", 70 | "min": -1, 71 | "max": 99, 72 | "default": -1, 73 | }), 74 | "startStepPercentage": ("INT", { 75 | "tooltip": "Represents the percentage of steps at which the ControlNet model starts to control the inference process. (Enter -1 To Disable)", 76 | "min": -1, 77 | "max": 99, 78 | "default": 0, 79 | }), 80 | "endStep": ("INT", { 81 | "tooltip": "Represents the step number at which the ControlNet preprocessor ends to control the inference process. (Enter -1 To Disable)", 82 | "min": -1, 83 | "max": 100, 84 | "default": -1, 85 | }), 86 | "endStepPercentage": ("INT", { 87 | "tooltip": "Represents the percentage of steps at which the ControlNet model ends to control the inference process. (Enter -1 To Disable)", 88 | "min": -1, 89 | "max": 100, 90 | "default": 80, 91 | }), 92 | "Control Mode": (["prompt", "controlnet", "balanced"], { 93 | "tooltip": "Choose Control Mode To Control The Inference Process.", 94 | "default": "balanced", 95 | }), 96 | "weight": ("FLOAT", { 97 | "tooltip": "Represents the weight (strength) of the ControlNet model in the image.", 98 | "default": 1.0, 99 | "min": 0, 100 | "max": 1.0, 101 | "step": 0.1, 102 | }), 103 | "Use Search Value": ("BOOLEAN", { 104 | "tooltip": "When Enabled, the value you've set in the search input will be used instead.\n\nThis is useful in case the model search API is down or you prefer to set the model manually.", 105 | "default": False, 106 | "label_on": "Enabled", 107 | "label_off": "Disabled", 108 | }), 109 | }, 110 | } 111 | 112 | DESCRIPTION = "Directly Search and Configure ControlNet Guidance to Connect It With Runware Image Inference Nodes In ComfyUI." 113 | FUNCTION = "controlNet" 114 | RETURN_TYPES = ("RUNWARECONTROLNET",) 115 | RETURN_NAMES = ("Runware ControlNet",) 116 | CATEGORY = "Runware" 117 | 118 | @classmethod 119 | def VALIDATE_INPUTS(cls, ControlNetList, startStep, startStepPercentage, endStep, endStepPercentage): 120 | if((startStep == -1 and startStepPercentage == -1) or (startStep != -1 and startStepPercentage != -1)): 121 | raise Exception("Error: Please Provide Either Start Step or Start Step Percentage!") 122 | if((endStep == -1 and endStepPercentage == -1) or (endStep != -1 and endStepPercentage != -1)): 123 | raise Exception("Error: Please Provide Either End Step or End Step Percentage!") 124 | if((startStep != -1 and endStep != -1) and (startStep > endStep)): 125 | raise Exception("Error: Start Step Cannot Be Greater Than End Step!") 126 | elif((startStep != -1 and endStep != -1) and (startStep == endStep)): 127 | raise Exception("Error: Start Step Cannot Be Equal To End Step!") 128 | if((startStepPercentage != -1 and endStepPercentage != -1) and (startStepPercentage > endStepPercentage)): 129 | raise Exception("Error: Start Step Percentage Cannot Be Greater Than End Step Percentage!") 130 | elif((startStepPercentage != -1 and endStepPercentage != -1) and (startStepPercentage == endStepPercentage)): 131 | raise Exception("Error: Start Step Percentage Cannot Be Equal To End Step Percentage!") 132 | return True 133 | 134 | def controlNet(self, **kwargs): 135 | guideImage = kwargs.get("Guide Image") 136 | enableSearchValue = kwargs.get("Use Search Value", False) 137 | searchInput = kwargs.get("ControlNet Search") 138 | 139 | if enableSearchValue: 140 | modelAIRCode = searchInput 141 | else: 142 | CRModel = kwargs.get("ControlNetList") 143 | modelAIRCode = CRModel.split(" ")[0] 144 | 145 | startStep = kwargs.get("startStep") 146 | startStepPercentage = kwargs.get("startStepPercentage") 147 | endStep = kwargs.get("endStep") 148 | endStepPercentage = kwargs.get("endStepPercentage") 149 | controlMode = kwargs.get("Control Mode") 150 | weight = kwargs.get("weight") 151 | 152 | controlNetGuideOBJ = [{ 153 | "model": modelAIRCode, 154 | "guideImage": rwUtils.convertTensor2IMG(guideImage), 155 | "weight": round(weight, 2), 156 | "controlMode": controlMode, 157 | }] 158 | 159 | if(startStep != -1): 160 | controlNetGuideOBJ[0]["startStep"] = startStep 161 | elif(startStepPercentage != -1): 162 | controlNetGuideOBJ[0]["startStepPercentage"] = startStepPercentage 163 | if(endStep != -1): 164 | controlNetGuideOBJ[0]["endStep"] = endStep 165 | elif(endStepPercentage != -1): 166 | controlNetGuideOBJ[0]["endStepPercentage"] = endStepPercentage 167 | 168 | return (controlNetGuideOBJ,) -------------------------------------------------------------------------------- /modules/controlNetCombine.py: -------------------------------------------------------------------------------- 1 | class controlNetCombine: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "ControlNet 1": ("RUNWARECONTROLNET", { 7 | "tooltip": "Connect a Runware ControlNet Node.", 8 | }), 9 | }, 10 | "optional": { 11 | "ControlNet 2": ("RUNWARECONTROLNET", { 12 | "tooltip": "Connect a Runware ControlNet Node.", 13 | }), 14 | "ControlNet 3": ("RUNWARECONTROLNET", { 15 | "tooltip": "Connect a Runware ControlNet Node.", 16 | }), 17 | }, 18 | } 19 | 20 | DESCRIPTION = "Combine One or More ControlNet's To Guide Image Generation Process in Runware Image Inference." 21 | FUNCTION = "controlNetCombine" 22 | RETURN_TYPES = ("RUNWARECONTROLNET",) 23 | RETURN_NAMES = ("Runware ControlNet's",) 24 | CATEGORY = "Runware" 25 | 26 | def controlNetCombine(self, **kwargs): 27 | controlNet1 = kwargs.get("ControlNet 1") 28 | controlNet2 = kwargs.get("ControlNet 2", None) 29 | controlNet3 = kwargs.get("ControlNet 3", None) 30 | 31 | controlNetObjectArray = controlNet1 32 | if(controlNet2 is not None): 33 | controlNetObjectArray = controlNetObjectArray + controlNet2 34 | if(controlNet3 is not None): 35 | controlNetObjectArray = controlNetObjectArray + controlNet3 36 | print(controlNetObjectArray) 37 | return (controlNetObjectArray,) -------------------------------------------------------------------------------- /modules/controlNetPreprocessor.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class controlNetPreprocessor: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Image": ("IMAGE", { 9 | "tooltip": "Specifies the input image to be preprocessed to generate a guide image. This guide image will be used as a reference for image generation processes, guiding the AI to generate images that are more aligned with the input image." 10 | }), 11 | "PreProcessor Type": ([ 12 | "Canny", 13 | "Depth", 14 | "MLSD", 15 | "NormalBAE", 16 | "OpenPose", 17 | "Tile", 18 | "Seg", 19 | "LineArt", 20 | "LineArtAnime", 21 | "Shuffle", 22 | "Scribble", 23 | "SoftEdge", 24 | ], { 25 | "tooltip": "Choose Preprocessor Type To Generate The Guide Image.", 26 | "default": "Canny", 27 | }), 28 | "Include Hands And Faces [ OpenPose ]": ("BOOLEAN", { 29 | "default": False, 30 | "tooltip": "Include the hands and face in the pose outline when using the OpenPose preprocessor.", 31 | "label_on": "Enabled", 32 | "label_off": "Disabled", 33 | }), 34 | "Canny Low Threshold": ("INT", { 35 | "tooltip": "Defines the lower threshold when using the Canny edge detection preprocessor.", 36 | "default": 100, 37 | "min": 0, 38 | "max": 255, 39 | }), 40 | "Canny High Threshold": ("INT", { 41 | "tooltip": "Defines the high threshold when using the Canny edge detection preprocessor.", 42 | "default": 200, 43 | "min": 0, 44 | "max": 255, 45 | }), 46 | "Image Resizing": ("BOOLEAN", { 47 | "default": False, 48 | "tooltip": "It Allows Image Resizing if the dimension of the image is higher than a specific value it will be resized.", 49 | "label_on": "Enabled", 50 | "label_off": "Disabled", 51 | }), 52 | "dimensions": ([ 53 | "Square (512x512)", "Square HD (1024x1024)", "Portrait 3:4 (768x1024)", 54 | "Portrait 9:16 (576x1024)", "Landscape 4:3 (1024x768)", 55 | "Landscape 16:9 (1024x576)", "Custom" 56 | ], { 57 | "default": "Square HD (1024x1024)", 58 | "tooltip": "Resize the dimensions of the generated Guide image by specifying its width and height in pixels, or select from the predefined options. Image dimensions must be multiples of 64 (e.g., 512x512, 1024x768).", 59 | }), 60 | "width": ("INT", { 61 | "tooltip": "If the Image height dimension is larger than this value, the output image will be resized to the specified height.", 62 | "default": 1024, 63 | "min": 512, 64 | "max": 2048, 65 | "step": 64, 66 | }), 67 | "height": ("INT", { 68 | "tooltip": "If the Image width dimension is larger than this value, the output image will be resized to the specified width.", 69 | "default": 1024, 70 | "min": 512, 71 | "max": 2048, 72 | "step": 64, 73 | }), 74 | }, 75 | } 76 | 77 | DESCRIPTION = "ControlNet offers advanced capabilities for precise image processing through the use of guide images in specific formats, known as preprocessed images. This powerful tool enhances the control and customization of image generation, enabling users to achieve desired artistic styles and detailed adjustments effectively." 78 | FUNCTION = "controlNetPreProcess" 79 | RETURN_TYPES = ("IMAGE",) 80 | RETURN_NAMES = ("GUIDE IMAGE",) 81 | CATEGORY = "Runware" 82 | 83 | def controlNetPreProcess(self, **kwargs): 84 | image = kwargs.get("Image") 85 | preProcessorType = kwargs.get("PreProcessor Type") 86 | includeHandsAndFaces = kwargs.get("Include Hands And Faces [ OpenPose ]") 87 | cannyLowThreshold = kwargs.get("Canny Low Threshold") 88 | cannyHighThreshold = kwargs.get("Canny High Threshold") 89 | imageResizing = kwargs.get("Image Resizing") 90 | width = kwargs.get("width") 91 | height = kwargs.get("height") 92 | 93 | genConfig = [ 94 | { 95 | "taskType": "imageControlNetPreProcess", 96 | "taskUUID": rwUtils.genRandUUID(), 97 | "inputImage": rwUtils.convertTensor2IMG(image), 98 | "preProcessorType": preProcessorType, 99 | "outputFormat": rwUtils.OUTPUT_FORMAT, 100 | "outputQuality": rwUtils.OUTPUT_QUALITY, 101 | "outputType": "base64Data", 102 | } 103 | ] 104 | 105 | # For Debugging Purposes Only 106 | print(f"[Debugging] Task UUID: {genConfig[0]['taskUUID']}") 107 | 108 | if(imageResizing): 109 | genConfig[0]["width"] = width 110 | genConfig[0]["height"] = height 111 | if(preProcessorType == "OpenPose" and includeHandsAndFaces): 112 | genConfig[0]["includeHandsAndFaceOpenPose"] = includeHandsAndFaces 113 | elif(preProcessorType == "Canny"): 114 | genConfig[0]["lowThresholdCanny"] = cannyLowThreshold 115 | genConfig[0]["highThresholdCanny"] = cannyHighThreshold 116 | 117 | genResult = rwUtils.inferenecRequest(genConfig) 118 | images = rwUtils.convertImageB64List(genResult) 119 | return (images, ) -------------------------------------------------------------------------------- /modules/deepCache.py: -------------------------------------------------------------------------------- 1 | class deepCache: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "DeepCache Interval": ( 7 | "INT", 8 | { 9 | "tooltip": "Represents the frequency of feature caching, specified as the number of steps between each cache operation.\n\nA larger interval value will make inference faster but may impact quality. A smaller interval prioritizes quality over speed.", 10 | "default": 3, 11 | "min": 1, 12 | "max": 5, 13 | "step": 1, 14 | }, 15 | ), 16 | "DeepCache BranchId": ( 17 | "INT", 18 | { 19 | "tooltip": "Determines which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.\n\nLower branch IDs (e.g., 0) result in more aggressive caching for faster generation, while higher branch IDs produce more conservative caching with potentially higher quality results.", 20 | "default": 0, 21 | "min": 0, 22 | "max": 5, 23 | "step": 1, 24 | }, 25 | ), 26 | }, 27 | } 28 | 29 | DESCRIPTION = "Can Be connected to Runware Inference to accelerate image generation by reusing past computations.\n\nDeepCache feature, which speeds up diffusion-based image generation by caching internal feature maps from the neural network.\n\nDeepCache can provide significant performance improvements for high-throughput scenarios or when generating multiple similar images." 30 | FUNCTION = "deepCache" 31 | RETURN_TYPES = ("RUNWAREACCELERATOR",) 32 | RETURN_NAMES = ("Runware Accelerator",) 33 | CATEGORY = "Runware" 34 | 35 | def deepCache(self, **kwargs): 36 | deepCacheInterval = kwargs.get("DeepCache Interval") 37 | deepCacheBranchId = kwargs.get("DeepCache BranchId") 38 | 39 | return ( 40 | { 41 | "deepCache": True, 42 | # "deepCacheInterval": deepCacheInterval, 43 | "deepCacheBranchId": deepCacheBranchId, 44 | }, 45 | ) 46 | -------------------------------------------------------------------------------- /modules/embeddingSearch.py: -------------------------------------------------------------------------------- 1 | class embeddingSearch: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Embedding Search": ("STRING", { 7 | "tooltip": "Search For A Specific Embedding By Name Or Civit AIR Code (eg: EasyNegative).", 8 | }), 9 | "Model Architecture": ([ 10 | "All", 11 | "FLUX.1-Schnell", 12 | "FLUX.1-Dev", 13 | "Pony", 14 | "SD 1.5", 15 | "SD 1.5 Hyper", 16 | "SD 1.5 LCM", 17 | "SD 3", 18 | "SDXL 1.0", 19 | "SDXL 1.0 LCM", 20 | "SDXL Distilled", 21 | "SDXL Hyper", 22 | "SDXL Lightning", 23 | "SDXL Turbo", 24 | ], { 25 | "tooltip": "Choose Embedding Model Architecture To Filter Out The Results.", 26 | "default": "SD 1.5", 27 | }), 28 | "EmbeddingList": ([ 29 | "civitai:7808@9208 (EasyNegative)", 30 | "civitai:4629@5637 (Deep Negative V1.x)", 31 | "civitai:56519@60938 (negative_hand Negative Embedding)", 32 | "civitai:72437@77169 (BadDream + UnrealisticDream (Negative Embeddings))", 33 | "civitai:11772@25820 (veryBadImageNegative)", 34 | "civitai:71961@94057 (Fast Negative Embedding (+ FastNegativeV2))", 35 | ], { 36 | "tooltip": "Embedding Results Will Show UP Here So You Could Choose From.", 37 | "default": "civitai:7808@9208 (EasyNegative)", 38 | }), 39 | "weight": ("FLOAT", { 40 | "tooltip": "Defines the strength or influence of the Embedding on the generation process.", 41 | "default": 1.0, 42 | "min": 0, 43 | "max": 1, 44 | "step": 0.1, 45 | }), 46 | "Use Search Value": ("BOOLEAN", { 47 | "tooltip": "When Enabled, the value you've set in the search input will be used instead.\n\nThis is useful in case the model search API is down or you prefer to set the model manually.", 48 | "default": False, 49 | "label_on": "Enabled", 50 | "label_off": "Disabled", 51 | }), 52 | }, 53 | } 54 | 55 | DESCRIPTION = "Directly Search and Connect Embeddings to Runware Inference Nodes In ComfyUI." 56 | FUNCTION = "embeddingSearch" 57 | RETURN_TYPES = ("RUNWAREEMBEDDING",) 58 | RETURN_NAMES = ("Runware Embedding",) 59 | CATEGORY = "Runware" 60 | 61 | @classmethod 62 | def VALIDATE_INPUTS(cls, EmbeddingList): 63 | return True 64 | 65 | def embeddingSearch(self, **kwargs): 66 | enableSearchValue = kwargs.get("Use Search Value", False) 67 | searchInput = kwargs.get("Embedding Search") 68 | embedding_weight = kwargs.get("weight") 69 | 70 | if enableSearchValue: 71 | modelAIRCode = searchInput 72 | else: 73 | crModel = kwargs.get("EmbeddingList") 74 | modelAIRCode = crModel.split(" ")[0] 75 | 76 | return ({ 77 | "model": modelAIRCode, 78 | "weight": round(embedding_weight, 2), 79 | },) -------------------------------------------------------------------------------- /modules/embeddingsCombine.py: -------------------------------------------------------------------------------- 1 | class embeddingsCombine: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Embedding 1": ("RUNWAREEMBEDDING", { 7 | "tooltip": "Connect a Runware Embedding From an Embedding Node.", 8 | }), 9 | }, 10 | "optional": { 11 | "Embedding 2": ("RUNWAREEMBEDDING", { 12 | "tooltip": "Connect a Runware Embedding From an Embedding Node.", 13 | }), 14 | "Embedding 3": ("RUNWAREEMBEDDING", { 15 | "tooltip": "Connect a Runware Embedding From an Embedding Node.", 16 | }), 17 | }, 18 | } 19 | 20 | DESCRIPTION = "Combine One or More Embeddings To Connect It With Runware Image Inference." 21 | FUNCTION = "embeddingsCombine" 22 | RETURN_TYPES = ("RUNWAREEMBEDDING",) 23 | RETURN_NAMES = ("Runware Embeddings",) 24 | CATEGORY = "Runware" 25 | 26 | def embeddingsCombine(self, **kwargs): 27 | embeddingV1 = kwargs.get("Embedding 1") 28 | embeddingV2 = kwargs.get("Embedding 2", None) 29 | embeddingV3 = kwargs.get("Embedding 3", None) 30 | 31 | embeddingsObjArray = [] 32 | embeddingsObjArray.append(embeddingV1) 33 | if(embeddingV2): 34 | embeddingsObjArray.append(embeddingV2) 35 | if(embeddingV3): 36 | embeddingsObjArray.append(embeddingV3) 37 | return (embeddingsObjArray,) -------------------------------------------------------------------------------- /modules/imageCaptioning.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class imageCaptioning: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Always Recaption": ("BOOLEAN", { 9 | "default": False, 10 | "tooltip": "Enable this option to always recaption the image each time you run the workflow.", 11 | "label_on": "Enabled", 12 | "label_off": "Disabled", 13 | }), 14 | "Image": ("IMAGE", { 15 | "tooltip": "Specify The Image to be Captioned." 16 | }), 17 | }, 18 | "optional": { 19 | "Image Description": ("STRING", { 20 | "multiline": True, 21 | "placeholder": "You Don't Have to write Anything here.\nThe Image Description Will Be Generated Automatically.", 22 | }), 23 | }, 24 | "hidden": { "node_id": "UNIQUE_ID" } 25 | } 26 | 27 | DESCRIPTION = "Image to text, also known as image captioning, allows you to obtain descriptive text prompts based on uploaded or previously generated images. This process is instrumental in generating textual descriptions that can be used to create additional images or provide detailed insights into visual content." 28 | 29 | FUNCTION = "imageCaptioning" 30 | RETURN_TYPES = ("STRING",) 31 | RETURN_NAMES = ("IMAGE PROMPT",) 32 | CATEGORY = "Runware" 33 | OUTPUT_NODE = True 34 | 35 | @classmethod 36 | def IS_CHANGED(s, **kwargs): 37 | alwaysRecaption = kwargs.get("Always Recaption") 38 | if(alwaysRecaption): 39 | return float("NAN") 40 | return True 41 | 42 | def imageCaptioning(self, **kwargs): 43 | image = kwargs.get("Image") 44 | genConfig = [ 45 | { 46 | "taskType": "imageCaption", 47 | "taskUUID": rwUtils.genRandUUID(), 48 | "inputImage": rwUtils.convertTensor2IMG(image) 49 | } 50 | ] 51 | 52 | genResult = rwUtils.inferenecRequest(genConfig) 53 | genText = genResult["data"][0]["text"] 54 | rwUtils.sendImageCaption(genText, kwargs.get("node_id")) 55 | return (genText, ) -------------------------------------------------------------------------------- /modules/imageInference.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | 4 | class txt2img: 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "Model": ("RUNWAREMODEL", { 10 | "tooltip": "Connect a Runware Model From Runware Model Node.", 11 | }), 12 | "positivePrompt": ("STRING", { 13 | "multiline": True, 14 | "placeholder": "Positive Prompt: a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n\nYou Can Press (Ctrl + Alt + E) To Enhance The Prompt!", 15 | "tooltip": "Positive Prompt: a text instruction to guide the model on generating the image. You Can Also Press (Ctrl + Alt + E) To Enhance The Prompt!" 16 | }), 17 | "negativePrompt": ("STRING", { 18 | "multiline": True, 19 | "placeholder": "Negative Prompt: a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides negative guidance for the task. This parameter helps to avoid certain undesired results.", 20 | "tooltip": "Negative Prompt: a text instruction to guide the model on generating the image." 21 | }), 22 | "Multi Inference Mode": ("BOOLEAN", { 23 | "tooltip": "If Enabled the node will skip the image generation process and will only return the Runware Task Object to be used in the Multi Inference Node.", 24 | "default": False, 25 | "label_on": "Enabled", 26 | "label_off": "Disabled", 27 | }), 28 | "Prompt Weighting": (["Disabled", "sdEmbeds", "Compel"], { 29 | "default": "Disabled", 30 | "tooltip": "Prompt weighting allows you to adjust how strongly different parts of your prompt influence the generated image.\n\nChoose between \"compel\" notation with advanced weighting operations or \"sdEmbeds\" for simple emphasis adjustments.\n\nCompel Example: \"small+ dog, pixar style\"\n\nsdEmbeds Example: \"(small:2.5) dog, pixar style\"", 31 | }), 32 | "dimensions": ([ 33 | "Square (512x512)", "Square HD (1024x1024)", "Portrait 3:4 (768x1024)", 34 | "Portrait 9:16 (576x1024)", "Landscape 4:3 (1024x768)", 35 | "Landscape 16:9 (1024x576)", "Custom" 36 | ], { 37 | "default": "Square (512x512)", 38 | "tooltip": "Adjust the dimensions of the generated image by specifying its width and height in pixels, or select from the predefined options. Image dimensions must be multiples of 64 (e.g., 512x512, 1024x768).", 39 | }), 40 | "width": ("INT", { 41 | "tooltip": "The Width of the image in pixels.", 42 | "default": 512, 43 | "min": 128, 44 | "max": 2048, 45 | "step": 64, 46 | }), 47 | "height": ("INT", { 48 | "tooltip": "The Height of the image in pixels.", 49 | "default": 512, 50 | "min": 128, 51 | "max": 2048, 52 | "step": 64, 53 | }), 54 | "steps": ("INT", { 55 | "tooltip": "The number of steps is the number of iterations the model will perform to generate the image. The higher the number of steps, the more detailed the image will be. However, increasing the number of steps will also increase the time it takes to generate the image and may not always result in a better image.", 56 | "default": 4, 57 | "min": 1, 58 | "max": 100, 59 | }), 60 | "scheduler": (['Default', 'DDIM', 'DDIMScheduler', 'DDPMScheduler', 'DEISMultistepScheduler', 'DPMSolverSinglestepScheduler', 'DPMSolverMultistepScheduler', 'DPMSolverMultistepInverse', 'DPM++', 'DPM++ Karras', 'DPM++ 2M', 'DPM++ 2M Karras', 'DPM++ 2M SDE Karras', 'DPM++ 2M SDE', 'DPM++ 3M', 'DPM++ 3M Karras', 'DPM++ SDE Karras', 'DPM++ SDE', 'EDMEulerScheduler', 'EDMDPMSolverMultistepScheduler', 'Euler', 'EulerDiscreteScheduler', 'Euler Karras', 'Euler a', 'EulerAncestralDiscreteScheduler', 'FlowMatchEulerDiscreteScheduler', 'Heun', 'HeunDiscreteScheduler', 'Heun Karras', 'IPNDMScheduler', 'KDPM2DiscreteScheduler', 'KDPM2AncestralDiscreteScheduler', 'LCM', 'LCMScheduler', 'LMS', 'LMSDiscreteScheduler', 'LMS Karras', 'PNDMScheduler', 'TCDScheduler', 'UniPC', 'UniPCMultistepScheduler', 'UniPC Karras', 'UniPC 2M', 'UniPC 2M Karras', 'UniPC 3M', 'UniPC 3M Karras'], { 61 | "tooltip": "An scheduler is a component that manages the inference process. Different schedulers can be used to achieve different results like more detailed images, faster inference, or more accurate results.", 62 | "default": "Default", 63 | }), 64 | "cfgScale": ("FLOAT", { 65 | "tooltip": "Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results.", 66 | "default": 6.5, 67 | "min": 1.0, 68 | "max": 30.0, 69 | "step": 0.1, 70 | }), 71 | "seed": ("INT", { 72 | "tooltip": "A value used to randomize the image generation. If you want to make images reproducible (generate the same image multiple times), you can use the same seed value.", 73 | "default": rwUtils.genRandSeed(), 74 | "min": 1, 75 | "max": 9223372036854776000, 76 | }), 77 | "clipSkip": ("INT", { 78 | "tooltip": "Enables skipping layers of the CLIP embedding process, leading to quicker and more varied image generation.", 79 | "default": 0, 80 | "min": 0, 81 | "max": 2, 82 | }), 83 | "strength": ("FLOAT", { 84 | "tooltip": "When doing Image-to-Image or Inpainting, this parameter is used to determine the influence of the seedImage image in the generated output. A lower value results in more influence from the original image, while a higher value allows more creative deviation.", 85 | "default": 0.8, 86 | "min": 0.0, 87 | "max": 1.0, 88 | "step": 0.01, 89 | }), 90 | "Mask Margin": ("BOOLEAN", { 91 | "tooltip": "Enables Or Disables Mask Margin Feature.", 92 | "default": False, 93 | "label_on": "Enabled", 94 | "label_off": "Disabled", 95 | }), 96 | "maskMargin": ("INT", { 97 | "tooltip": "Adds extra context pixels around the masked region during inpainting. When this parameter is present, the model will zoom into the masked area, considering these additional pixels to create more coherent and well-integrated details.", 98 | "default": 32, 99 | "min": 32, 100 | "max": 128, 101 | }), 102 | "batchSize": ("INT", { 103 | "tooltip": "The number of images to generate in a single request.", 104 | "default": 1, 105 | "min": 1, 106 | "max": 10, 107 | }), 108 | }, 109 | "optional": { 110 | "Accelerator": ("RUNWAREACCELERATOR", { 111 | "tooltip": "Connect a Runware Accelerator Node (TeaCache, DeepCache) to speed up the image generation process.", 112 | }), 113 | "Lora": ("RUNWARELORA", { 114 | "tooltip": "Connect a Runware Lora From Lora Search Node Or Lora Combine For Multiple Lora's Together.", 115 | }), 116 | "Outpainting": ("RUNWAREOUTPAINT", { 117 | "tooltip": "Connect a Runware Outpainting Node to extend the image boundaries in different directions.", 118 | }), 119 | "IPAdapters": ("RUNWAREIPAdapter", { 120 | "tooltip": "Connect a Runware IP Adapter node or IP Adapter Combine node to use reference images for guiding the generation.", 121 | }), 122 | "ControlNet": ("RUNWARECONTROLNET", { 123 | "tooltip": "Connect a Runware ControlNet Guidance Node to help the model generate images that align with the desired structure.", 124 | }), 125 | "Refiner": ("RUNWAREREFINER", { 126 | "tooltip": "Connect a Runware Refiner Node to help create higher quality image outputs by incorporating specialized models designed to enhance image details and overall coherence.", 127 | }), 128 | "seedImage": ("IMAGE", { 129 | "tooltip": "Specifies the seed image to be used for the diffusion process, when doing Image-to-Image, Inpainting or Outpainting, this parameter is required.", 130 | }), 131 | "maskImage": ("MASK", { 132 | "tooltip": "Specifies the mask image to be used for the inpainting process, when doing Inpainting, this parameter is required.", 133 | }), 134 | "Embeddings": ("RUNWAREEMBEDDING", { 135 | "tooltip": "Connect a Runware Embedding Node to help the model generate images that align with the desired structure.", 136 | }), 137 | "VAE": ("RUNWAREVAE", { 138 | "tooltip": "Connect a Runware VAE Node to help the model generate images that align with the desired structure.", 139 | }), 140 | } 141 | } 142 | 143 | @classmethod 144 | def VALIDATE_INPUTS(cls, positivePrompt, negativePrompt): 145 | if (positivePrompt is not None and (positivePrompt == "" or len(positivePrompt) < 3 or len(positivePrompt) > 2000)): 146 | raise Exception( 147 | "Positive Prompt Must Be Between 3 And 2000 characters!") 148 | if (negativePrompt is not None and negativePrompt != "" and (len(negativePrompt) < 3 or len(negativePrompt) > 2000)): 149 | raise Exception( 150 | "Negative Prompt Must Be Between 3 And 2000 characters!") 151 | return True 152 | 153 | DESCRIPTION = "Generates Images Lightning Fast With Runware Image Inference Sonic Engine." 154 | FUNCTION = "generateImage" 155 | RETURN_TYPES = ("IMAGE", "RUNWARETASK") 156 | RETURN_NAMES = ("IMAGE", "RW-Task") 157 | CATEGORY = "Runware" 158 | 159 | def generateImage(self, **kwargs): 160 | runwareModel = kwargs.get("Model") 161 | positivePrompt = kwargs.get("positivePrompt") 162 | negativePrompt = kwargs.get("negativePrompt", None) 163 | multiInferenceMode = kwargs.get("Multi Inference Mode", False) 164 | promptWeighting = kwargs.get("Prompt Weighting", "Disabled") 165 | runwareControlNet = kwargs.get("ControlNet", None) 166 | runwareAccelerator = kwargs.get("Accelerator", None) 167 | runwareLora = kwargs.get("Lora", None) 168 | runwareOutpainting = kwargs.get("Outpainting", None) 169 | runwareIPAdapters = kwargs.get("IPAdapters", None) 170 | runwareRefiner = kwargs.get("Refiner", None) 171 | runwareEmbedding = kwargs.get("Embeddings", None) 172 | runwareVAE = kwargs.get("VAE", None) 173 | seedImage = kwargs.get("seedImage", None) 174 | seedImageStrength = kwargs.get("strength", 0.8) 175 | maskImage = kwargs.get("maskImage", None) 176 | enableMaskMargin = kwargs.get("Mask Margin", False) 177 | maskImageMargin = kwargs.get("maskMargin", 32) 178 | clipSkip = kwargs.get("clipSkip", 0) 179 | height = kwargs.get("height", 512) 180 | width = kwargs.get("width", 512) 181 | steps = kwargs.get("steps", 4) 182 | scheduler = kwargs.get("scheduler", "Default") 183 | cfgScale = kwargs.get("cfgScale", 6.5) 184 | seed = kwargs.get("seed") 185 | batchSize = kwargs.get("batchSize", 1) 186 | 187 | if (maskImage is not None and seedImage is None): 188 | raise Exception("Mask Image Requires Seed Image To Be Provided!") 189 | 190 | genConfig = [ 191 | { 192 | "taskType": "imageInference", 193 | "taskUUID": rwUtils.genRandUUID(), 194 | "positivePrompt": positivePrompt, 195 | "height": height, 196 | "width": width, 197 | "model": runwareModel, 198 | "steps": steps, 199 | "CFGScale": cfgScale, 200 | "scheduler": scheduler, 201 | "clipSkip": clipSkip, 202 | "seed": seed, 203 | "outputType": "base64Data", 204 | "outputFormat": rwUtils.OUTPUT_FORMAT, 205 | "outputQuality": rwUtils.OUTPUT_QUALITY, 206 | "numberResults": batchSize, 207 | } 208 | ] 209 | 210 | # For Debugging Purposes Only 211 | print(f"[Debugging] Task UUID: {genConfig[0]['taskUUID']}") 212 | 213 | if (negativePrompt is not None and negativePrompt != ""): 214 | genConfig[0]["negativePrompt"] = negativePrompt 215 | if (promptWeighting != "Disabled"): 216 | if (promptWeighting == "sdEmbeds"): 217 | genConfig[0]["promptWeighting"] = "sdEmbeds" 218 | else: 219 | genConfig[0]["promptWeighting"] = "compel" 220 | if(runwareAccelerator is not None): 221 | genConfig[0]["acceleratorOptions"] = runwareAccelerator 222 | if (runwareLora is not None): 223 | if (isinstance(runwareLora, list)): 224 | genConfig[0]["lora"] = runwareLora 225 | elif (isinstance(runwareLora, dict)): 226 | genConfig[0]["lora"] = [runwareLora] 227 | if (runwareIPAdapters is not None): 228 | if (isinstance(runwareIPAdapters, list)): 229 | genConfig[0]["ipAdapters"] = runwareIPAdapters 230 | elif (isinstance(runwareIPAdapters, dict)): 231 | genConfig[0]["ipAdapters"] = [runwareIPAdapters] 232 | if (runwareEmbedding is not None): 233 | if (isinstance(runwareEmbedding, list)): 234 | genConfig[0]["embeddings"] = runwareEmbedding 235 | elif (isinstance(runwareEmbedding, dict)): 236 | genConfig[0]["embeddings"] = [runwareEmbedding] 237 | if (runwareOutpainting is not None): 238 | genConfig[0]["outpaint"] = runwareOutpainting 239 | if (runwareVAE is not None): 240 | genConfig[0]["vae"] = runwareVAE 241 | if (runwareControlNet is not None): 242 | genConfig[0]["controlNet"] = runwareControlNet 243 | if (runwareRefiner is not None): 244 | genConfig[0]["refiner"] = runwareRefiner 245 | if (seedImage is not None): 246 | seedImage = rwUtils.convertTensor2IMG(seedImage) 247 | genConfig[0]["seedImage"] = seedImage 248 | genConfig[0]["strength"] = seedImageStrength 249 | if (maskImage is not None): 250 | maskImage = rwUtils.convertTensor2IMG(maskImage) 251 | genConfig[0]["maskImage"] = maskImage 252 | if (enableMaskMargin): 253 | genConfig[0]["maskMargin"] = maskImageMargin 254 | 255 | if (multiInferenceMode): 256 | return (None, genConfig) 257 | else: 258 | genResult = rwUtils.inferenecRequest(genConfig) 259 | images = rwUtils.convertImageB64List(genResult) 260 | return (images, None) 261 | -------------------------------------------------------------------------------- /modules/imageMasking.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | 4 | class imageMasking: 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "Image": ("IMAGE", { 10 | "tooltip": "Specifies the input image to be processed for mask generation." 11 | }), 12 | "Detection Model": ([ 13 | "face_yolov8n", "face_yolov8s", "mediapipe_face_full", "mediapipe_face_short", "mediapipe_face_mesh", 14 | "mediapipe_face_mesh_eyes_only", "eyes_mesh_mediapipe", "nose_mesh_mediapipe", "lips_mesh_mediapipe", 15 | "eyes_lips_mesh", "nose_eyes_mesh", "nose_lips_mesh", "hand_yolov8n", "person_yolov8n-seg", "person_yolov8s-seg" 16 | ], { 17 | "tooltip": "Specifies the specialized detection model to use for mask generation.", 18 | "default": "face_yolov8n", 19 | }), 20 | "Confidence": ("FLOAT", { 21 | "tooltip": "Confidence threshold for detections. Only detections with confidence scores above this threshold will be included in the mask.", 22 | "default": 0.25, 23 | "min": 0, 24 | "max": 1, 25 | "step": 0.01, 26 | }), 27 | "Max Detections": ("INT", { 28 | "tooltip": "Limits the maximum number of elements (faces, hands, or people) that will be detected and masked in the image. If there are more elements than this value, only the ones with highest confidence scores will be included.", 29 | "default": 6, 30 | "min": 1, 31 | "max": 20, 32 | }), 33 | "Mask Padding": ("INT", { 34 | "tooltip": "Extends or reduces the detected mask area by the specified number of pixels. Positive values create a larger masked region (useful when you want to ensure complete coverage of the element), while negative values shrink the mask (useful for tighter, more focused areas).", 35 | "default": 4, 36 | "min": -40, 37 | "max": 40, 38 | }), 39 | "Mask Blur": ("INT", { 40 | "tooltip": "Extends the mask by the specified number of pixels with a gradual fade-out effect, creating smooth transitions between masked and unmasked regions in the final result.", 41 | "default": 4, 42 | "min": 0, 43 | "max": 20, 44 | }), 45 | }, 46 | } 47 | 48 | DESCRIPTION = "Image Masking provides intelligent detection and mask generation for specific elements in images, particularly optimized for faces, hands, and people. Built on advanced detection models, this feature enhances the inpainting workflow by automatically creating precise masks around detected elements, enabling targeted enhancement and detailing." 49 | 50 | FUNCTION = "imageMasking" 51 | RETURN_TYPES = ("IMAGE", "IMAGE", "MASK") 52 | RETURN_NAMES = ("Image", "Mask Preview", "Mask") 53 | CATEGORY = "Runware" 54 | 55 | def imageMasking(self, **kwargs): 56 | image = kwargs.get("Image") 57 | detectionModel = kwargs.get("Detection Model") 58 | confidence = kwargs.get("Confidence") 59 | maxDetections = kwargs.get("Max Detections") 60 | maskPadding = kwargs.get("Mask Padding") 61 | maskBlur = kwargs.get("Mask Blur") 62 | 63 | model_mapping = { 64 | "face_yolov8n": "runware:35@1", 65 | "face_yolov8s": "runware:35@2", 66 | "mediapipe_face_full": "runware:35@6", 67 | "mediapipe_face_short": "runware:35@7", 68 | "mediapipe_face_mesh": "runware:35@8", 69 | "mediapipe_face_mesh_eyes_only": "runware:35@9", 70 | "eyes_mesh_mediapipe": "runware:35@15", 71 | "nose_mesh_mediapipe": "runware:35@13", 72 | "lips_mesh_mediapipe": "runware:35@14", 73 | "eyes_lips_mesh": "runware:35@10", 74 | "nose_eyes_mesh": "runware:35@11", 75 | "nose_lips_mesh": "runware:35@12", 76 | "hand_yolov8n": "runware:35@3", 77 | "person_yolov8n-seg": "runware:35@4", 78 | "person_yolov8s-seg": "runware:35@5" 79 | } 80 | detectionModel = model_mapping.get(detectionModel, detectionModel) 81 | 82 | genConfig = [ 83 | { 84 | "taskType": "imageMasking", 85 | "taskUUID": rwUtils.genRandUUID(), 86 | "inputImage": rwUtils.convertTensor2IMG(image), 87 | "model": detectionModel, 88 | "confidence": confidence, 89 | "maxDetections": maxDetections, 90 | "outputType": "base64Data", 91 | "outputFormat": rwUtils.OUTPUT_FORMAT, 92 | "outputQuality": rwUtils.OUTPUT_QUALITY, 93 | } 94 | ] 95 | 96 | genConfig[0]["maskPadding"] = maskPadding 97 | genConfig[0]["maskBlur"] = maskBlur 98 | 99 | genResult = rwUtils.inferenecRequest(genConfig) 100 | images = rwUtils.convertImageB64List(genResult) 101 | return (image, images[0], images[0]) 102 | -------------------------------------------------------------------------------- /modules/ipAdapter.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class ipAdapter: 4 | RUNWARE_IPADAPTER_MODELS = { 5 | "FLUX.1 Dev Redux": "runware:105@1", 6 | "IP Adapter SDXL": "runware:55@1", 7 | "IP Adapter SDXL Plus": "runware:55@2", 8 | "IP Adapter SDXL Plus Face": "runware:55@3", 9 | "IP Adapter SDXL Vit-H": "runware:55@4", 10 | "IP Adapter SD 1.5": "runware:55@5", 11 | "IP Adapter SD 1.5 Plus": "runware:55@6", 12 | "IP Adapter SD 1.5 Light": "runware:55@7", 13 | "IP Adapter SD 1.5 Plus Face": "runware:55@8", 14 | "IP Adapter SD 1.5 Full Face": "runware:55@9", 15 | "IP Adapter SD 1.5 Vit-G": "runware:55@10" 16 | } 17 | 18 | @classmethod 19 | def INPUT_TYPES(cls): 20 | return { 21 | "required": { 22 | "Reference Image": ("IMAGE",), 23 | "Model": (list(cls.RUNWARE_IPADAPTER_MODELS.keys()), { 24 | "tooltip": "Choose IP Adapter model to use for reference-based image generation.", 25 | "default": "IP Adapter SDXL", 26 | }), 27 | "weight": ("FLOAT", { 28 | "default": 1.0, 29 | "min": 0.0, 30 | "max": 1.0, 31 | "step": 0.1, 32 | "tooltip": "Represents the strength or influence of this IP-Adapter in the generation process.\n\nA value of 0 means no influence, while 1 means maximum influence.\n\nNote: This Value Is Ignored For Flux Redux.", 33 | }), 34 | }, 35 | } 36 | 37 | RETURN_TYPES = ("RUNWAREIPAdapter",) 38 | RETURN_NAMES = ("IPAdapter",) 39 | FUNCTION = "ipAdapter" 40 | CATEGORY = "Runware" 41 | DESCRIPTION = "IP-Adapters enable image-prompted generation, allowing you to use reference images to guide the style and content of your generations. Multiple IP Adapters can be used simultaneously." 42 | 43 | def ipAdapter(self, **kwargs): 44 | refImage = kwargs.get("Reference Image") 45 | modelName = kwargs.get("Model") 46 | weight = kwargs.get("weight") 47 | modelAirCode = self.RUNWARE_IPADAPTER_MODELS.get(modelName) 48 | guideImage = rwUtils.convertTensor2IMG(refImage) 49 | return ({ 50 | "model": modelAirCode, 51 | "guideImage": guideImage, 52 | "weight": round(weight, 2), 53 | },) -------------------------------------------------------------------------------- /modules/ipAdapterCombine.py: -------------------------------------------------------------------------------- 1 | class ipAdapterCombine: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "IPAdapter 1": ("RUNWAREIPAdapter", { 7 | "tooltip": "Connect an IPAdapter from Runware IPAdapter node.", 8 | }), 9 | }, 10 | "optional": { 11 | "IPAdapter 2": ("RUNWAREIPAdapter", { 12 | "tooltip": "Connect an IPAdapter from Runware IPAdapter node.", 13 | }), 14 | "IPAdapter 3": ("RUNWAREIPAdapter", { 15 | "tooltip": "Connect an IPAdapter from Runware IPAdapter node.", 16 | }), 17 | }, 18 | } 19 | 20 | DESCRIPTION = "Combine multiple IPAdapters to connect with Runware Image Inference." 21 | FUNCTION = "ipAdapterCombine" 22 | RETURN_TYPES = ("RUNWAREIPAdapter",) 23 | RETURN_NAMES = ("IPAdapters",) 24 | CATEGORY = "Runware" 25 | 26 | def ipAdapterCombine(self, **kwargs): 27 | ipAdapter1 = kwargs.get("IPAdapter 1") 28 | ipAdapter2 = kwargs.get("IPAdapter 2", None) 29 | ipAdapter3 = kwargs.get("IPAdapter 3", None) 30 | 31 | ipAdapterArray = [] 32 | ipAdapterArray.append(ipAdapter1) 33 | if(ipAdapter2): 34 | ipAdapterArray.append(ipAdapter2) 35 | if(ipAdapter3): 36 | ipAdapterArray.append(ipAdapter3) 37 | return (ipAdapterArray,) -------------------------------------------------------------------------------- /modules/loraCombine.py: -------------------------------------------------------------------------------- 1 | class loraCombine: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Lora 1": ("RUNWARELORA", { 7 | "tooltip": "Connect a Runware Lora From Lora Search Node.", 8 | }), 9 | }, 10 | "optional": { 11 | "Lora 2": ("RUNWARELORA", { 12 | "tooltip": "Connect a Runware Lora From Lora Search Node.", 13 | }), 14 | "Lora 3": ("RUNWARELORA", { 15 | "tooltip": "Connect a Runware Lora From Lora Search Node.", 16 | }), 17 | }, 18 | } 19 | 20 | DESCRIPTION = "Combine One or More Lora's To Connect It With Runware Image Inference." 21 | FUNCTION = "loraCombine" 22 | RETURN_TYPES = ("RUNWARELORA",) 23 | RETURN_NAMES = ("Runware Lora's",) 24 | CATEGORY = "Runware" 25 | 26 | def loraCombine(self, **kwargs): 27 | runwareLora1 = kwargs.get("Lora 1") 28 | runwareLora2 = kwargs.get("Lora 2", None) 29 | runwareLora3 = kwargs.get("Lora 3", None) 30 | 31 | loraObjectArray = [] 32 | loraObjectArray.append(runwareLora1) 33 | if(runwareLora2): 34 | loraObjectArray.append(runwareLora2) 35 | if(runwareLora3): 36 | loraObjectArray.append(runwareLora3) 37 | return (loraObjectArray,) -------------------------------------------------------------------------------- /modules/loraSearch.py: -------------------------------------------------------------------------------- 1 | class loraSearch: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Lora Search": ("STRING", { 7 | "tooltip": "Search For Specific Lora By Name Or Civit AIR Code (eg: Cyberpunk).", 8 | }), 9 | "Model Architecture": ([ 10 | "All", 11 | "FLUX.1-Schnell", 12 | "FLUX.1-Dev", 13 | "Pony", 14 | "SD 1.5", 15 | "SD 1.5 Hyper", 16 | "SD 1.5 LCM", 17 | "SD 3", 18 | "SDXL 1.0", 19 | "SDXL 1.0 LCM", 20 | "SDXL Distilled", 21 | "SDXL Hyper", 22 | "SDXL Lightning", 23 | "SDXL Turbo", 24 | ], { 25 | "tooltip": "Choose Lora Model Architecture To Filter Results.", 26 | "default": "All", 27 | }), 28 | "LoraType": ([ 29 | "Lora", 30 | "LyCORIS" 31 | ], { 32 | "tooltip": "Choose Lora Type To Filter Out The Results.", 33 | "default": "Lora", 34 | }), 35 | "LoraList": ([ 36 | "civitai:58390@62833 (Detail Tweaker LoRA (细节调整LoRA))", 37 | "civitai:82098@87153 (Add More Details - Detail Enhancer / Tweaker (细节调整) LoRA)", 38 | "civitai:122359@135867 (Detail Tweaker XL)", 39 | "civitai:14171@16677 (Cute_girl_mix4)", 40 | "civitai:13941@16576 (epi_noiseoffset)", 41 | "civitai:25995@32988 (blindbox/大概是盲盒)", 42 | ], { 43 | "tooltip": "Lora Results Will Show UP Here So You Could Choose From.", 44 | "default": "civitai:58390@62833 (Detail Tweaker LoRA (细节调整LoRA))", 45 | }), 46 | "weight": ("FLOAT", { 47 | "tooltip": "Defines the strength or influence of the LoRA model in the generation process.", 48 | "default": 1.0, 49 | "min": -4.0, 50 | "max": 4.0, 51 | "step": 0.1, 52 | }), 53 | "Use Search Value": ("BOOLEAN", { 54 | "tooltip": "When Enabled, the value you've set in the search input will be used instead.\n\nThis is useful in case the model search API is down or you prefer to set the model manually.", 55 | "default": False, 56 | "label_on": "Enabled", 57 | "label_off": "Disabled", 58 | }), 59 | }, 60 | } 61 | 62 | DESCRIPTION = "Directly Search and Connect Lora's to Runware Inference Nodes In ComfyUI." 63 | FUNCTION = "loraSearch" 64 | RETURN_TYPES = ("RUNWARELORA",) 65 | RETURN_NAMES = ("Runware Lora",) 66 | CATEGORY = "Runware" 67 | 68 | @classmethod 69 | def VALIDATE_INPUTS(cls, LoraList): 70 | return True 71 | 72 | def loraSearch(self, **kwargs): 73 | enableSearchValue = kwargs.get("Use Search Value", False) 74 | searchInput = kwargs.get("Lora Search") 75 | lora_weight = kwargs.get("weight") 76 | 77 | if enableSearchValue: 78 | modelAIRCode = searchInput 79 | else: 80 | crModel = kwargs.get("LoraList") 81 | modelAIRCode = crModel.split(" ")[0] 82 | 83 | return ({ 84 | "model": modelAIRCode, 85 | "weight": round(lora_weight, 2), 86 | },) -------------------------------------------------------------------------------- /modules/modelSearch.py: -------------------------------------------------------------------------------- 1 | class modelSearch: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Model Search": ("STRING", { 7 | "tooltip": "Search For Specific Model By Name Or Civit AIR Code (eg: Juggernaut).", 8 | }), 9 | "Model Architecture": ([ 10 | "All", 11 | "FLUX.1-Schnell", 12 | "FLUX.1-Dev", 13 | "Pony", 14 | "SD 1.5", 15 | "SD 1.5 Hyper", 16 | "SD 1.5 LCM", 17 | "SD 3", 18 | "SDXL 1.0", 19 | "SDXL 1.0 LCM", 20 | "SDXL Distilled", 21 | "SDXL Hyper", 22 | "SDXL Lightning", 23 | "SDXL Turbo", 24 | ], { 25 | "tooltip": "Choose Model Architecture To Filter Results.", 26 | "default": "All", 27 | }), 28 | "ModelType": ([ 29 | "Base Model", 30 | "Inpainting Model", 31 | ], { 32 | "tooltip": "Choose Model Type To Filter Results.", 33 | "default": "Base Model", 34 | }), 35 | "ModelList": ([ 36 | "rundiffusion:110@101 (Juggernaut Lightning Flux by RunDiffusion)", 37 | "rundiffusion:130@100 (Juggernaut Pro Flux by RunDiffusion)", 38 | "runware:100@1 (Flux Schnell)", 39 | "runware:101@1 (Flux Dev)", 40 | "runware:5@1 (SD3)", 41 | "civitai:4384@128713 (SDXL 1.5 DreamShaper)", 42 | "civitai:43331@176425 (SD 1.5 majicMIX realistic 麦橘写实)", 43 | "civitai:101055@128078 (SDXL v1.0 VAE fix)", 44 | "civitai:133005@288982 (SDXL Juggernaut XL V8)", 45 | ], { 46 | "tooltip": "Model Results Will Show UP Here So You Could Choose From. If You didn't Search For Anything this will show featured Model List.", 47 | "default": "runware:100@1 (Flux Schnell)", 48 | }), 49 | "Use Search Value": ("BOOLEAN", { 50 | "tooltip": "When Enabled, the value you've set in the search input will be used instead.\n\nThis is useful in case the model search API is down or you prefer to set the model manually.", 51 | "default": False, 52 | "label_on": "Enabled", 53 | "label_off": "Disabled", 54 | }), 55 | }, 56 | } 57 | 58 | DESCRIPTION = "Directly Search and Connect Model Checkpoints to Runware Inference Nodes In ComfyUI." 59 | FUNCTION = "modelSearch" 60 | RETURN_TYPES = ("RUNWAREMODEL",) 61 | RETURN_NAMES = ("Runware Model",) 62 | CATEGORY = "Runware" 63 | 64 | @classmethod 65 | def VALIDATE_INPUTS(cls, ModelList): 66 | return True 67 | 68 | def modelSearch(self, **kwargs): 69 | enableSearchValue = kwargs.get("Use Search Value", False) 70 | searchInput = kwargs.get("Model Search") 71 | 72 | if enableSearchValue: 73 | modelAIRCode = searchInput 74 | else: 75 | crModel = kwargs.get("ModelList") 76 | modelAIRCode = crModel.split(" ")[0] 77 | 78 | return (modelAIRCode,) -------------------------------------------------------------------------------- /modules/multiInference.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from .utils import runwareUtils as rwUtils 3 | from comfy_execution.graph import ExecutionBlocker 4 | 5 | 6 | class multiInference: 7 | @classmethod 8 | def INPUT_TYPES(cls): 9 | return { 10 | "required": { 11 | "Task 1": ( 12 | "RUNWARETASK", 13 | { 14 | "tooltip": "Connect a Runware Task From Any Inference Node.", 15 | }, 16 | ), 17 | }, 18 | "optional": { 19 | "Task 2": ( 20 | "RUNWARETASK", 21 | { 22 | "tooltip": "Connect a Runware Task From Any Inference Node.", 23 | }, 24 | ), 25 | "Task 3": ( 26 | "RUNWARETASK", 27 | { 28 | "tooltip": "Connect a Runware Task From Any Inference Node.", 29 | }, 30 | ), 31 | "Task 4": ( 32 | "RUNWARETASK", 33 | { 34 | "tooltip": "Connect a Runware Task From Any Inference Node.", 35 | }, 36 | ), 37 | }, 38 | } 39 | 40 | DESCRIPTION = "Allows you to Run Multiple Inference Tasks in Parallel." 41 | FUNCTION = "multiInference" 42 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE") 43 | RETURN_NAMES = ("Result 1", "Result 2", "Result 3", "Result 4") 44 | CATEGORY = "Runware" 45 | 46 | def multiInference(self, **kwargs): 47 | tasksData = [kwargs.get(f"Task {i}", None) for i in range(1, 5)] 48 | validTasks = [task for task in tasksData if task is not None] 49 | 50 | if not validTasks: 51 | raise Exception("Error: No valid tasks provided for Multi Inference!") 52 | 53 | taskIncidents = {} 54 | 55 | async def runInference(taskData, taskIndex, oindex): 56 | try: 57 | print(f"[Debugging] Task {oindex + 1} UUID => {taskData[0]['taskUUID']}") 58 | loop = asyncio.get_running_loop() 59 | result = await loop.run_in_executor( 60 | None, rwUtils.inferenecRequest, taskData 61 | ) 62 | images = rwUtils.convertImageB64List(result) 63 | return images 64 | except Exception as e: 65 | print(f"\n---- Runware Multi Inference Task {oindex + 1} Failed ----") 66 | print(f"Task-Type: {taskData[0]['taskType']}") 67 | print(f"Task-UUID: {taskData[0]['taskUUID']}") 68 | print(f"{e}") 69 | print(f"-------------------\n") 70 | taskIncidents[taskIndex] = e 71 | return ExecutionBlocker(None) 72 | 73 | async def main(): 74 | coroutines = [] 75 | valid_indices = [idx for idx, t in enumerate(tasksData) if t is not None] 76 | for i, config in enumerate(validTasks): 77 | original_index = valid_indices[i] 78 | coroutines.append(runInference(config, i, original_index)) 79 | results = await asyncio.gather(*coroutines, return_exceptions=False) 80 | return results 81 | 82 | try: 83 | loop = asyncio.get_event_loop() 84 | except RuntimeError: 85 | loop = asyncio.new_event_loop() 86 | asyncio.set_event_loop(loop) 87 | 88 | finalResults = loop.run_until_complete(main()) 89 | 90 | optResult = [ExecutionBlocker(None)] * 4 91 | rindex = 0 92 | workingTasks = 0 93 | 94 | for i, task_config in enumerate(tasksData): 95 | if task_config is not None: 96 | result = finalResults[rindex] 97 | if result is not None and not isinstance(result, ExecutionBlocker): 98 | workingTasks += 1 99 | optResult[i] = result 100 | rindex += 1 101 | 102 | if workingTasks == 0 and len(validTasks) > 0: 103 | firstError = min(taskIncidents.keys()) 104 | raise Exception( 105 | f"Error in Task {firstError + 1} => {taskIncidents[firstError]}" 106 | ) 107 | 108 | return tuple(optResult) 109 | -------------------------------------------------------------------------------- /modules/outpaintSettings.py: -------------------------------------------------------------------------------- 1 | class outpaintSettings: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Top": ( 7 | "INT", 8 | { 9 | "tooltip": "Number of pixels to extend at the top of the image. Must be a multiple of 64.", 10 | "default": 64, 11 | "min": 0, 12 | "max": 2048, 13 | "step": 64, 14 | }, 15 | ), 16 | "Right": ( 17 | "INT", 18 | { 19 | "tooltip": "Number of pixels to extend at the right of the image. Must be a multiple of 64.", 20 | "default": 64, 21 | "min": 0, 22 | "max": 2048, 23 | "step": 64, 24 | }, 25 | ), 26 | "Bottom": ( 27 | "INT", 28 | { 29 | "tooltip": "Number of pixels to extend at the bottom of the image. Must be a multiple of 64.", 30 | "default": 64, 31 | "min": 0, 32 | "max": 2048, 33 | "step": 64, 34 | }, 35 | ), 36 | "Left": ( 37 | "INT", 38 | { 39 | "tooltip": "Number of pixels to extend at the left of the image. Must be a multiple of 64.", 40 | "default": 64, 41 | "min": 0, 42 | "max": 2048, 43 | "step": 64, 44 | }, 45 | ), 46 | "Blur": ( 47 | "INT", 48 | { 49 | "tooltip": "The amount of blur to apply at the boundaries between the original image and the extended areas, measured in pixels.", 50 | "default": 0, 51 | "min": 0, 52 | "max": 32, 53 | "step": 1, 54 | }, 55 | ), 56 | }, 57 | } 58 | 59 | DESCRIPTION = "Can Be Added To Image Inference Nodes To Extend the image boundaries in specified directions. \n\nYou must provide the final dimensions using width and height parameters, which should account for the original image size plus the total extension (seedImage dimensions + top + bottom, left + right)." 60 | FUNCTION = "outpaintSettings" 61 | RETURN_TYPES = ("RUNWAREOUTPAINT",) 62 | RETURN_NAMES = ("Outpaint Settings",) 63 | CATEGORY = "Runware" 64 | 65 | def outpaintSettings(self, **kwargs): 66 | top = kwargs.get("Top", 0) 67 | right = kwargs.get("Right", 0) 68 | bottom = kwargs.get("Bottom", 0) 69 | left = kwargs.get("Left", 0) 70 | blur = kwargs.get("Blur", 0) 71 | 72 | outpaint = { 73 | "top": top, 74 | "right": right, 75 | "bottom": bottom, 76 | "left": left, 77 | "blur": blur, 78 | } 79 | 80 | return (outpaint,) 81 | -------------------------------------------------------------------------------- /modules/photoMaker.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | 4 | class photoMaker: 5 | @classmethod 6 | def INPUT_TYPES(cls): 7 | return { 8 | "required": { 9 | "Model": ("RUNWAREMODEL", { 10 | "tooltip": "Connect a Runware Model From Runware Model Node.", 11 | }), 12 | "Image 1": ("IMAGE", { 13 | "tooltip": "Specifies Input Image 1 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 14 | }), 15 | "positivePrompt": ("STRING", { 16 | "multiline": True, 17 | "placeholder": "Positive Prompt: a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n\nThe PhotoMaker positive prompt must follow a specific format: the class word (like \"man\", \"woman\", \"girl\") must be followed by the trigger word \"img\".\nExample: man img, wearing a suit", 18 | "tooltip": "Positive Prompt: a text instruction to guide the model on generating the image.\nDo not Forget To Add trigger word \"img\" after the class word (like \"man\", \"woman\", \"girl\")", 19 | }), 20 | "negativePrompt": ("STRING", { 21 | "multiline": True, 22 | "placeholder": "Negative Prompt: a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides negative guidance for the task. This parameter helps to avoid certain undesired results.", 23 | "tooltip": "Negative Prompt: a text instruction to guide the model on generating the image." 24 | }), 25 | "Prompt Weighting": (["Disabled", "Compel"], { 26 | "default": "Disabled", 27 | "tooltip": "Prompt weighting allows you to adjust how strongly different parts of your prompt influence the generated image.", 28 | }), 29 | "style": ([ 30 | "No style", "Cinematic", "Disney Character", "Digital Art", "Photographic", "Fantasy art", 31 | "Neonpunk", "Enhance", "Comic book", "Lowpoly", "Line art" 32 | ], { 33 | "default": "No style", 34 | "tooltip": "Specifies the artistic style to be applied to the generated images." 35 | }), 36 | "dimensions": ([ 37 | "Square (512x512)", "Square HD (1024x1024)", "Portrait 3:4 (768x1024)", 38 | "Portrait 9:16 (576x1024)", "Landscape 4:3 (1024x768)", 39 | "Landscape 16:9 (1024x576)", "Custom" 40 | ], { 41 | "default": "Square HD (1024x1024)", 42 | "tooltip": "Adjust the dimensions of the generated image by specifying its width and height in pixels, or select from the predefined options. Image dimensions must be multiples of 64 (e.g., 512x512, 1024x768).", 43 | }), 44 | "width": ("INT", { 45 | "tooltip": "The Width of the image in pixels.", 46 | "default": 1024, 47 | "min": 512, 48 | "max": 2048, 49 | "step": 64, 50 | }), 51 | "height": ("INT", { 52 | "tooltip": "The Height of the image in pixels.", 53 | "default": 1024, 54 | "min": 512, 55 | "max": 2048, 56 | "step": 64, 57 | }), 58 | "steps": ("INT", { 59 | "tooltip": "The number of steps is the number of iterations the model will perform to generate the image. The higher the number of steps, the more detailed the image will be. However, increasing the number of steps will also increase the time it takes to generate the image and may not always result in a better image.", 60 | "default": 25, 61 | "min": 1, 62 | "max": 100, 63 | }), 64 | "scheduler": (['Default', 'DDIM', 'DDIMScheduler', 'DDPMScheduler', 'DEISMultistepScheduler', 'DPMSolverSinglestepScheduler', 'DPMSolverMultistepScheduler', 'DPMSolverMultistepInverse', 'DPM++', 'DPM++ Karras', 'DPM++ 2M', 'DPM++ 2M Karras', 'DPM++ 2M SDE Karras', 'DPM++ 2M SDE', 'DPM++ 3M', 'DPM++ 3M Karras', 'DPM++ SDE Karras', 'DPM++ SDE', 'EDMEulerScheduler', 'EDMDPMSolverMultistepScheduler', 'Euler', 'EulerDiscreteScheduler', 'Euler Karras', 'Euler a', 'EulerAncestralDiscreteScheduler', 'FlowMatchEulerDiscreteScheduler', 'Heun', 'HeunDiscreteScheduler', 'Heun Karras', 'IPNDMScheduler', 'KDPM2DiscreteScheduler', 'KDPM2AncestralDiscreteScheduler', 'LCM', 'LCMScheduler', 'LMS', 'LMSDiscreteScheduler', 'LMS Karras', 'PNDMScheduler', 'TCDScheduler', 'UniPC', 'UniPCMultistepScheduler', 'UniPC Karras', 'UniPC 2M', 'UniPC 2M Karras', 'UniPC 3M', 'UniPC 3M Karras'], { 65 | "tooltip": "An scheduler is a component that manages the inference process. Different schedulers can be used to achieve different results like more detailed images, faster inference, or more accurate results.", 66 | "default": "Default", 67 | }), 68 | "cfgScale": ("FLOAT", { 69 | "tooltip": "Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results.", 70 | "default": 6.5, 71 | "min": 1.0, 72 | "max": 30.0, 73 | "step": 0.5, 74 | }), 75 | "strength": ("INT", { 76 | "default": 15, 77 | "min": 15, 78 | "max": 50, 79 | "tooltip": "Controls the balance between preserving the subject's original features and the creative transformation specified in the prompt.\n- Lower values provide stronger subject fidelity.\n- Higher values allow more creative freedom in the transformation." 80 | }), 81 | "clipSkip": ("INT", { 82 | "tooltip": "Enables skipping layers of the CLIP embedding process, leading to quicker and more varied image generation.", 83 | "default": 0, 84 | "min": 0, 85 | "max": 2, 86 | }), 87 | "seed": ("INT", { 88 | "tooltip": "A value used to randomize the image generation. If you want to make images reproducible (generate the same image multiple times), you can use the same seed value.", 89 | "default": rwUtils.genRandSeed(), 90 | "min": 1, 91 | "max": 9223372036854776000, 92 | }), 93 | "batchSize": ("INT", { 94 | "tooltip": "The number of images to generate in a single request.", 95 | "default": 1, 96 | "min": 1, 97 | "max": 10, 98 | }), 99 | }, 100 | "optional": { 101 | "Image 2": ("IMAGE", { 102 | "tooltip": "Specifies Input Image 2 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 103 | }), 104 | "Image 3": ("IMAGE", { 105 | "tooltip": "Specifies Input Image 3 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 106 | }), 107 | "Image 4": ("IMAGE", { 108 | "tooltip": "Specifies Input Image 4 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 109 | }), 110 | } 111 | } 112 | 113 | DESCRIPTION = "Transform and style images using PhotoMaker's advanced personalization technology. Create consistent, high-quality image variations with precise subject fidelity and style control." 114 | FUNCTION = "photoMaker" 115 | RETURN_TYPES = ("IMAGE",) 116 | CATEGORY = "Runware" 117 | 118 | @classmethod 119 | def VALIDATE_INPUTS(cls, positivePrompt, negativePrompt): 120 | if (positivePrompt is not None and (positivePrompt == "" or len(positivePrompt) < 3 or len(positivePrompt) > 2000)): 121 | raise Exception( 122 | "Positive Prompt Must Be Between 3 And 2000 characters!") 123 | if (negativePrompt is not None and negativePrompt != "" and (len(negativePrompt) < 3 or len(negativePrompt) > 2000)): 124 | raise Exception( 125 | "Negative Prompt Must Be Between 3 And 2000 characters!") 126 | return True 127 | 128 | def photoMaker(self, **kwargs): 129 | runwareModel = kwargs.get("Model") 130 | image1 = kwargs.get("Image 1") 131 | image2 = kwargs.get("Image 2", None) 132 | image3 = kwargs.get("Image 3", None) 133 | image4 = kwargs.get("Image 4", None) 134 | positivePrompt = kwargs.get("positivePrompt") 135 | negativePrompt = kwargs.get("negativePrompt", None) 136 | promptWeighting = kwargs.get("Prompt Weighting", "Disabled") 137 | style = kwargs.get("style", "No style") 138 | dimensions = kwargs.get("dimensions", "Square HD (1024x1024)") 139 | width = kwargs.get("width", 1024) 140 | height = kwargs.get("height", 1024) 141 | steps = kwargs.get("steps", 25) 142 | scheduler = kwargs.get("scheduler", "Default") 143 | cfgScale = kwargs.get("cfgScale", 6.5) 144 | strength = kwargs.get("strength", 15) 145 | clipSkip = kwargs.get("clipSkip", 0) 146 | seed = kwargs.get("seed") 147 | batchSize = kwargs.get("batchSize", 1) 148 | 149 | imageList = [rwUtils.convertTensor2IMG(image1)] 150 | if (image2 is not None): 151 | imageList.append(rwUtils.convertTensor2IMG(image2)) 152 | if (image3 is not None): 153 | imageList.append(rwUtils.convertTensor2IMG(image3)) 154 | if (image4 is not None): 155 | imageList.append(rwUtils.convertTensor2IMG(image4)) 156 | 157 | genConfig = [ 158 | { 159 | "taskType": "photoMaker", 160 | "taskUUID": rwUtils.genRandUUID(), 161 | "model": runwareModel, 162 | "inputImages": imageList, 163 | "positivePrompt": positivePrompt, 164 | "style": style, 165 | "strength": strength, 166 | "height": height, 167 | "width": width, 168 | "steps": steps, 169 | "scheduler": scheduler, 170 | "seed": seed, 171 | "CFGScale": cfgScale, 172 | "clipSkip": clipSkip, 173 | "numberResults": batchSize, 174 | "outputType": "base64Data", 175 | "outputFormat": rwUtils.OUTPUT_FORMAT, 176 | "outputQuality": rwUtils.OUTPUT_QUALITY, 177 | } 178 | ] 179 | 180 | if (negativePrompt is not None and negativePrompt != ""): 181 | genConfig[0]["negativePrompt"] = negativePrompt 182 | if (promptWeighting != "Disabled"): 183 | if (promptWeighting == "sdEmbeds"): 184 | genConfig[0]["promptWeighting"] = "sdEmbeds" 185 | else: 186 | genConfig[0]["promptWeighting"] = "compel" 187 | 188 | genResult = rwUtils.inferenecRequest(genConfig) 189 | images = rwUtils.convertImageB64List(genResult) 190 | return (images, ) 191 | -------------------------------------------------------------------------------- /modules/referenceImages.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class referenceImages: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Image 1": ("IMAGE", { 9 | "tooltip": "Specifies Reference Image 1 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 10 | }), 11 | }, 12 | "optional": { 13 | "Image 2": ("IMAGE", { 14 | "tooltip": "Specifies Reference Image 2 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 15 | }), 16 | "Image 3": ("IMAGE", { 17 | "tooltip": "Specifies Reference Image 3 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 18 | }), 19 | "Image 4": ("IMAGE", { 20 | "tooltip": "Specifies Reference Image 4 that will be used as reference for the subject. These reference images help the AI to maintain identity consistency during the generation process." 21 | }), 22 | } 23 | } 24 | 25 | DESCRIPTION = "Used With Runware Tasks To Provide Reference Images For The Subject. These Reference Images Help The AI To Maintain Identity Consistency During The Generation Process." 26 | FUNCTION = "referenceImages" 27 | RETURN_TYPES = ("RUNWAREREFERENCEIMAGES",) 28 | RETURN_NAMES = ("Reference Images",) 29 | CATEGORY = "Runware" 30 | 31 | def referenceImages(self, **kwargs): 32 | image1 = kwargs.get("Image 1") 33 | image2 = kwargs.get("Image 2", None) 34 | image3 = kwargs.get("Image 3", None) 35 | image4 = kwargs.get("Image 4", None) 36 | 37 | imageList = [rwUtils.convertTensor2IMG(image1)] 38 | if (image2 is not None): 39 | imageList.append(rwUtils.convertTensor2IMG(image2)) 40 | if (image3 is not None): 41 | imageList.append(rwUtils.convertTensor2IMG(image3)) 42 | if (image4 is not None): 43 | imageList.append(rwUtils.convertTensor2IMG(image4)) 44 | 45 | return (imageList, ) 46 | -------------------------------------------------------------------------------- /modules/refiner.py: -------------------------------------------------------------------------------- 1 | class refiner: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "Model": ("RUNWAREMODEL", { 7 | "tooltip": "Connect a Runware Model From Model Search Node.", 8 | }), 9 | "startStep": ("INT", { 10 | "tooltip": "Represents the step number at which the refinement process begins. The initial model will generate the image up to this step, after which the refiner model takes over to enhance the result.", 11 | "min": 0, 12 | "max": 99, 13 | "default": 0, 14 | }), 15 | "startStepPercentage": ("INT", { 16 | "tooltip": "Represents the percentage of total steps at which the refinement process begins. The initial model will generate the image up to this percentage of steps before the refiner takes over.", 17 | "min": 0, 18 | "max": 99, 19 | "default": 0 20 | }), 21 | }, 22 | } 23 | 24 | DESCRIPTION = "Refiner models help create higher quality image outputs by incorporating specialized models designed to enhance image details and overall coherence. This can be particularly useful when you need results with superior quality, photorealism, or specific aesthetic refinements. (Note that refiner models are only SDXL based)" 25 | FUNCTION = "refiner" 26 | RETURN_TYPES = ("RUNWAREREFINER",) 27 | CATEGORY = "Runware" 28 | 29 | @classmethod 30 | def VALIDATE_INPUTS(cls, startStep, startStepPercentage): 31 | if(startStep != 0 and startStepPercentage != 0): 32 | raise Exception("Please provide either startStep or startStepPercentage, not both.") 33 | elif(startStep == 0 and startStepPercentage == 0): 34 | raise Exception("Please provide either startStep or startStepPercentage.") 35 | else: 36 | return True 37 | 38 | def refiner(self, **kwargs): 39 | runwareModel = kwargs.get("Model") 40 | startStep = kwargs.get("startStep") 41 | startStepPercentage = kwargs.get("startStepPercentage") 42 | refinerResult = { 43 | "model": runwareModel, 44 | } 45 | if(startStep != 0): 46 | refinerResult["startStep"] = startStep 47 | else: 48 | refinerResult["startStepPercentage"] = startStepPercentage 49 | return (refinerResult,) -------------------------------------------------------------------------------- /modules/runwareBFL.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class runwareKontext: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Model": ([ 9 | "bfl:3@1 (FLUX.1 Kontext Pro)", 10 | "bfl:4@1 (FLUX.1 Kontext Max)", 11 | ], { 12 | "tooltip": "Select The Model You Want For Image Generation or Image Editing.", 13 | "default": "bfl:3@1 (FLUX.1 Kontext Pro)", 14 | }), 15 | "positivePrompt": ("STRING", { 16 | "multiline": True, 17 | "placeholder": "Text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n\nYou Can Press (Ctrl + Alt + E) To Enhance The Prompt!", 18 | "tooltip": "Text instruction to guide the model on generating the image. You Can Also Press (Ctrl + Alt + E) To Enhance The Prompt!" 19 | }), 20 | "Prompt Upsampling": ("BOOLEAN", { 21 | "tooltip": "If Enabled it Will automatically modify the prompt at generation time for more creative results.", 22 | "default": False, 23 | "label_on": "Enabled", 24 | "label_off": "Disabled", 25 | }), 26 | "Aspect Ratio": ([ 27 | "21:9 (Ultra-Wide / Landscape)", 28 | "16:9 (Wide / Landscape)", 29 | "4:3 (Standard / Landscape)", 30 | "3:2 (Classic Photo / Landscape)", 31 | "1:1 (Square)", 32 | "2:3 (Classic Photo / Portrait)", 33 | "3:4 (Standard / Portrait)", 34 | "9:16 (Vertical Video / Portrait)", 35 | "9:21 (Ultra-Tall / Portrait)" 36 | ], { 37 | "default": "16:9 (Wide / Landscape)", 38 | "tooltip": "Adjust the dimensions of the generated image. This setting allows you to choose from various aspect ratios, such as 16:9 for wide images or 1:1 for square images. The width and height will be automatically adjusted based on the selected aspect ratio.", 39 | }), 40 | "seed": ("INT", { 41 | "tooltip": "A value used to randomize the image generation. If you want to make images reproducible (generate the same image multiple times), you can use the same seed value.", 42 | "default": rwUtils.genRandSeed(), 43 | "min": 1, 44 | "max": 9223372036854776000, 45 | }), 46 | "Multi Inference Mode": ("BOOLEAN", { 47 | "tooltip": "If Enabled the node will skip the image generation process and will only return the Runware Task Object to be used in the Multi Inference Node.", 48 | "default": False, 49 | "label_on": "Enabled", 50 | "label_off": "Disabled", 51 | }), 52 | "batchSize": ("INT", { 53 | "tooltip": "The number of images to generate in a single request.", 54 | "default": 1, 55 | "min": 1, 56 | "max": 10, 57 | }), 58 | }, 59 | "optional": { 60 | "Reference Images": ("RUNWAREREFERENCEIMAGES", { 61 | "tooltip": "Connect a Runware Reference Images Node to use reference images for image editing or generation.", 62 | }), 63 | } 64 | } 65 | 66 | @classmethod 67 | def VALIDATE_INPUTS(cls, positivePrompt): 68 | if (positivePrompt is not None and (positivePrompt == "" or len(positivePrompt) < 3 or len(positivePrompt) > 2000)): 69 | raise Exception( 70 | "Positive Prompt Must Be Between 3 And 2000 characters!") 71 | return True 72 | 73 | DESCRIPTION = "Generate And Edit Images Using Runware BFL Kontext Models." 74 | FUNCTION = "kontextInference" 75 | RETURN_TYPES = ("IMAGE", "RUNWARETASK") 76 | RETURN_NAMES = ("IMAGE", "RW-Task") 77 | CATEGORY = "Runware" 78 | 79 | def kontextInference(self, **kwargs): 80 | runwareModel = kwargs.get("Model") 81 | runwareModel = runwareModel.split(" ")[0] 82 | positivePrompt = kwargs.get("positivePrompt") 83 | promptUpsampling = kwargs.get("Prompt Upsampling") 84 | aspectRatio = kwargs.get("Aspect Ratio") 85 | multiInferenceMode = kwargs.get("Multi Inference Mode", False) 86 | referenceImages = kwargs.get("Reference Images", None) 87 | seed = kwargs.get("seed") 88 | batchSize = kwargs.get("batchSize", 1) 89 | 90 | aspectRatioMap = { 91 | "21:9": (1568, 672), 92 | "16:9": (1392, 752), 93 | "4:3": (1184, 880), 94 | "3:2": (1248, 832), 95 | "1:1": (1024, 1024), 96 | "2:3": (832, 1248), 97 | "3:4": (880, 1184), 98 | "9:16": (752, 1392), 99 | "9:21": (672, 1568), 100 | } 101 | 102 | secAspectRatio = aspectRatio.split(" ")[0] 103 | width, height = aspectRatioMap.get(secAspectRatio, (1024, 1024)) 104 | 105 | genConfig = [ 106 | { 107 | "taskType": "imageInference", 108 | "taskUUID": rwUtils.genRandUUID(), 109 | "positivePrompt": positivePrompt, 110 | "height": height, 111 | "width": width, 112 | "model": runwareModel, 113 | "seed": seed, 114 | "outputType": "base64Data", 115 | "outputFormat": rwUtils.OUTPUT_FORMAT, 116 | "outputQuality": rwUtils.OUTPUT_QUALITY, 117 | "numberResults": batchSize, 118 | } 119 | ] 120 | 121 | if (referenceImages is not None): 122 | genConfig[0]["referenceImages"] = referenceImages 123 | 124 | if (promptUpsampling): 125 | genConfig[0]["providerSettings"] = { 126 | "bfl": { 127 | "promptUpsampling": True 128 | } 129 | } 130 | 131 | # For Debugging Purposes Only 132 | print(f"[Debugging] Task UUID: {genConfig[0]['taskUUID']}") 133 | 134 | if (multiInferenceMode): 135 | return (None, genConfig) 136 | else: 137 | genResult = rwUtils.inferenecRequest(genConfig) 138 | images = rwUtils.convertImageB64List(genResult) 139 | return (images, None) 140 | -------------------------------------------------------------------------------- /modules/teaCache.py: -------------------------------------------------------------------------------- 1 | class teaCache: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "TeaCache Distance": ( 7 | "FLOAT", 8 | { 9 | "tooltip": "Controls the aggressiveness of the TeaCache feature.\n\nValues range from 0.0 (most conservative) to 1.0 (most aggressive).\n\nLower values prioritize quality by being more selective about which computations to reuse, while higher values prioritize speed by reusing more computations.", 10 | "default": 0.5, 11 | "min": 0.0, 12 | "max": 1.0, 13 | "step": 0.1, 14 | }, 15 | ), 16 | }, 17 | } 18 | 19 | DESCRIPTION = "Can Be connected to Runware Inference to accelerate image generation by reusing past computations.\n\nTeaCache is specifically designed for transformer-based models such as Flux and SD3, and does not work with UNet models like SDXL or SD1.5." 20 | FUNCTION = "teaCache" 21 | RETURN_TYPES = ("RUNWAREACCELERATOR",) 22 | RETURN_NAMES = ("Runware Accelerator",) 23 | CATEGORY = "Runware" 24 | 25 | def teaCache(self, **kwargs): 26 | teaCacheDistance = kwargs.get("TeaCache Distance") 27 | 28 | return ( 29 | { 30 | "teaCache": True, 31 | "teaCacheDistance": round(teaCacheDistance, 2), 32 | }, 33 | ) 34 | -------------------------------------------------------------------------------- /modules/upscaler.py: -------------------------------------------------------------------------------- 1 | from .utils import runwareUtils as rwUtils 2 | 3 | class upscaler: 4 | @classmethod 5 | def INPUT_TYPES(cls): 6 | return { 7 | "required": { 8 | "Image": ("IMAGE", { 9 | "tooltip": "Specifies the input image to be upscaled." 10 | }), 11 | "upscaleFactor": ("INT", { 12 | "tooltip": "Each level will increase the size of the image by the corresponding factor.", 13 | "default": 2, 14 | "min": 2, 15 | "max": 4, 16 | }), 17 | }, 18 | } 19 | 20 | DESCRIPTION = "Enhance the resolution and quality of your images using Runware's advanced upscaling API. Transform low-resolution images into sharp, high-definition visuals." 21 | FUNCTION = "upscale" 22 | RETURN_TYPES = ("IMAGE",) 23 | CATEGORY = "Runware" 24 | 25 | def upscale(self, **kwargs): 26 | image = kwargs.get("Image") 27 | upscaleFactor = kwargs.get("upscaleFactor", 2) 28 | 29 | genConfig = [ 30 | { 31 | "taskType": "imageUpscale", 32 | "taskUUID": rwUtils.genRandUUID(), 33 | "inputImage": rwUtils.convertTensor2IMG(image), 34 | "upscaleFactor": upscaleFactor, 35 | "outputFormat": rwUtils.OUTPUT_FORMAT, 36 | "outputQuality": rwUtils.OUTPUT_QUALITY, 37 | "outputType": "base64Data", 38 | } 39 | ] 40 | 41 | genResult = rwUtils.inferenecRequest(genConfig) 42 | images = rwUtils.convertImageB64List(genResult) 43 | return (images, ) -------------------------------------------------------------------------------- /modules/utils/runwareUtils.py: -------------------------------------------------------------------------------- 1 | from comfy.model_management import InterruptProcessingException 2 | from requests.adapters import HTTPAdapter 3 | from datetime import datetime, timedelta 4 | from server import PromptServer 5 | from dotenv import load_dotenv 6 | from pathlib import Path 7 | from PIL import Image 8 | import numpy as np 9 | import requests 10 | import hashlib 11 | import asyncio 12 | import random 13 | import base64 14 | import torch 15 | import uuid 16 | import time 17 | import json 18 | import os 19 | import io 20 | import threading 21 | 22 | load_dotenv() 23 | 24 | BASEFOLDER = Path(__file__).parent.parent.parent 25 | IMAGE_CACHE_FILE = BASEFOLDER / "imagesCache.json" 26 | 27 | if not IMAGE_CACHE_FILE.exists(): 28 | with open(IMAGE_CACHE_FILE, "w") as f: 29 | print("[Runware] Initializing images cache...") 30 | json.dump({}, f) 31 | 32 | RUNWARE_REMBG_OUTPUT_FORMATS = { 33 | "outputFormat": ( 34 | ["WEBP", "PNG"], 35 | {"default": "WEBP", "tooltip": "Choose the output image format."}, 36 | ) 37 | } 38 | 39 | MAX_RETRIES = 4 40 | RETRY_COOLDOWNS = [1, 2, 5, 10] 41 | 42 | session = requests.Session() 43 | adapter = HTTPAdapter(pool_connections=10, pool_maxsize=10) 44 | session.mount("http://", adapter) 45 | session.mount("https://", adapter) 46 | 47 | def generalRequestWrapper(recaller, *args, **kwargs): 48 | for attempt in range(MAX_RETRIES + 1): 49 | try: 50 | return recaller(*args, **kwargs) 51 | except (requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e: 52 | if attempt == MAX_RETRIES: 53 | raise 54 | else: 55 | cooldown = RETRY_COOLDOWNS[attempt] 56 | print(f"[Runware] Error API Request Failed! Retrying in {cooldown} seconds... (Attempt {attempt+1}/{MAX_RETRIES})") 57 | time.sleep(cooldown) 58 | continue 59 | return False 60 | 61 | def getAPIKey(): 62 | apiKey = os.getenv("RUNWARE_API_KEY") 63 | if apiKey and isinstance(apiKey, str) and len(apiKey) > 24: 64 | return apiKey 65 | return False 66 | 67 | def getTimeout(): 68 | timeout = os.getenv("RUNWARE_TIMEOUT") 69 | if timeout and timeout.isdigit(): 70 | return int(timeout) 71 | else: 72 | timeout = 90 73 | os.environ["RUNWARE_TIMEOUT"] = str(timeout) 74 | return timeout 75 | 76 | def getOutputQuality(): 77 | output_quality = os.getenv("RUNWARE_OUTPUT_QUALITY") 78 | if output_quality and isinstance(output_quality, str) and output_quality.isdigit(): 79 | return int(output_quality) 80 | else: 81 | output_quality = 95 82 | os.environ["RUNWARE_OUTPUT_QUALITY"] = str(output_quality) 83 | return output_quality 84 | 85 | def getOutputFormat(): 86 | output_format = os.getenv("RUNWARE_OUTPUT_FORMAT") 87 | if output_format and isinstance(output_format, str): 88 | return output_format 89 | else: 90 | output_format = "WEBP" 91 | os.environ["RUNWARE_OUTPUT_FORMAT"] = output_format 92 | return output_format 93 | 94 | def getEnableImagesCaching(): 95 | enable_images_caching = os.getenv("RUNWARE_ENABLE_IMAGES_CACHING") 96 | if enable_images_caching and enable_images_caching.lower() in ["true", "false"]: 97 | return enable_images_caching.lower() == "true" 98 | else: 99 | enable_images_caching = True 100 | os.environ["RUNWARE_ENABLE_IMAGES_CACHING"] = str(enable_images_caching) 101 | return enable_images_caching 102 | 103 | def getMinImageCacheSize(): 104 | min_image_cache_size = os.getenv("RUNWARE_MIN_IMAGE_CACHE_SIZE") 105 | if min_image_cache_size and min_image_cache_size.isdigit(): 106 | return int(min_image_cache_size) 107 | else: 108 | min_image_cache_size = 150 109 | os.environ["RUNWARE_MIN_IMAGE_CACHE_SIZE"] = str(min_image_cache_size) 110 | return min_image_cache_size 111 | 112 | def getCustomEndpoint(): 113 | custom_endpoint = os.getenv("RUNWARE_CUSTOM_ENDPOINT") 114 | if custom_endpoint and isinstance(custom_endpoint, str): 115 | return custom_endpoint 116 | return "https://api.runware.ai/v1" 117 | 118 | SESSION_TIMEOUT = getTimeout() 119 | RUNWARE_API_KEY = getAPIKey() 120 | OUTPUT_FORMAT = getOutputFormat() 121 | OUTPUT_QUALITY = getOutputQuality() 122 | ENABLE_IMAGES_CACHING = getEnableImagesCaching() 123 | MIN_IMAGE_CACHE_SIZE = getMinImageCacheSize() 124 | RUNWARE_API_BASE_URL = getCustomEndpoint() 125 | 126 | def setEnvKey(keyName, keyValue): 127 | comfyNodeRoot = Path(__file__).parent.parent.parent 128 | envFilePath = comfyNodeRoot / ".env" 129 | if not envFilePath.exists(): 130 | envFilePath.touch() 131 | with open(envFilePath, "r") as f: 132 | lines = f.readlines() 133 | key_exists = False 134 | new_lines = [] 135 | for line in lines: 136 | if line.startswith(f"{keyName}="): 137 | key_exists = True 138 | new_lines.append(f"{keyName}={keyValue}\n") 139 | else: 140 | new_lines.append(line) 141 | if not key_exists: 142 | new_lines.append(f"{keyName}={keyValue}\n") 143 | with open(envFilePath, "w") as f: 144 | f.writelines(new_lines) 145 | return True 146 | 147 | def setAPIKey(apiKey: str): 148 | global RUNWARE_API_KEY 149 | envSetRes = setEnvKey("RUNWARE_API_KEY", apiKey) 150 | if envSetRes: 151 | RUNWARE_API_KEY = apiKey 152 | os.environ["RUNWARE_API_KEY"] = apiKey 153 | return True 154 | 155 | def setTimeout(timeout: int): 156 | envSetRes = setEnvKey("RUNWARE_TIMEOUT", str(timeout)) 157 | if envSetRes: 158 | global SESSION_TIMEOUT 159 | SESSION_TIMEOUT = timeout 160 | os.environ["RUNWARE_TIMEOUT"] = str(timeout) 161 | return True 162 | 163 | def setOutputFormat(format: str): 164 | envSetRes = setEnvKey("RUNWARE_OUTPUT_FORMAT", format) 165 | if envSetRes: 166 | global OUTPUT_FORMAT 167 | OUTPUT_FORMAT = format 168 | os.environ["RUNWARE_OUTPUT_FORMAT"] = format 169 | return True 170 | 171 | def setOutputQuality(quality: int): 172 | envSetRes = setEnvKey("RUNWARE_OUTPUT_QUALITY", str(quality)) 173 | if envSetRes: 174 | global OUTPUT_QUALITY 175 | OUTPUT_QUALITY = quality 176 | os.environ["RUNWARE_OUTPUT_QUALITY"] = str(quality) 177 | return True 178 | 179 | def setEnableImagesCaching(enabled: bool): 180 | envSetRes = setEnvKey("RUNWARE_ENABLE_IMAGES_CACHING", str(enabled)) 181 | if envSetRes: 182 | global ENABLE_IMAGES_CACHING 183 | ENABLE_IMAGES_CACHING = enabled 184 | os.environ["RUNWARE_ENABLE_IMAGES_CACHING"] = str(enabled) 185 | return True 186 | 187 | def setMinImageCacheSize(size: int): 188 | envSetRes = setEnvKey("RUNWARE_MIN_IMAGE_CACHE_SIZE", str(size)) 189 | if envSetRes: 190 | global MIN_IMAGE_CACHE_SIZE 191 | MIN_IMAGE_CACHE_SIZE = size 192 | os.environ["RUNWARE_MIN_IMAGE_CACHE_SIZE"] = str(size) 193 | return True 194 | 195 | def genRandSeed(minSeed=1000, maxSeed=9223372036854776000): 196 | return random.randint(minSeed, maxSeed) 197 | 198 | def genRandUUID(): 199 | return str(uuid.uuid4()) 200 | 201 | def checkAPIKey(apiKey): 202 | headers = { 203 | "Accept": "application/json", 204 | "Content-Type": "application/json", 205 | "Accept-Encoding": "gzip, deflate, br, zstd", 206 | } 207 | 208 | genConfig = [ 209 | { 210 | "taskType": "authentication", 211 | "apiKey": apiKey, 212 | } 213 | ] 214 | 215 | try: 216 | 217 | def recaller(): 218 | return session.post( 219 | RUNWARE_API_BASE_URL, 220 | headers=headers, 221 | json=genConfig, 222 | timeout=10, 223 | allow_redirects=False, 224 | stream=True, 225 | ) 226 | 227 | genResult = generalRequestWrapper(recaller) 228 | genResult = genResult.json() 229 | if "errors" in genResult: 230 | return str(genResult["errors"][0]["message"]) 231 | else: 232 | return True 233 | except Exception as e: 234 | return False 235 | 236 | def sendImageCaption(captionText, nodeID): 237 | PromptServer.instance.send_sync( 238 | "runwareImageCaption", 239 | { 240 | "success": True, 241 | "captionText": captionText, 242 | "nodeID": nodeID, 243 | }, 244 | ) 245 | 246 | def inferenecRequest(genConfig): 247 | global RUNWARE_API_KEY, RUNWARE_API_BASE_URL, SESSION_TIMEOUT 248 | RUNWARE_API_KEY = os.getenv("RUNWARE_API_KEY") 249 | SESSION_TIMEOUT = int(os.getenv("RUNWARE_TIMEOUT")) 250 | headers = { 251 | "Authorization": f"Bearer {RUNWARE_API_KEY}", 252 | "Content-Type": "application/json", 253 | "Accept-Encoding": "gzip, deflate, br, zstd", 254 | } 255 | 256 | try: 257 | def recaller(): 258 | return session.post( 259 | RUNWARE_API_BASE_URL, 260 | headers=headers, 261 | json=genConfig, 262 | timeout=SESSION_TIMEOUT, 263 | allow_redirects=False, 264 | stream=True, 265 | ) 266 | 267 | genResult = generalRequestWrapper(recaller) 268 | try: 269 | genResult = genResult.json() 270 | except json.JSONDecodeError as e: 271 | print(f"[Debugging] Runware JSON Decode Error: {str(e)}") 272 | print(f"[Debugging] Runware Response Status Code: {genResult.status_code}") 273 | print(f"[Debugging] Runware Response Headers: {genResult.headers}") 274 | print(f"[Debugging] Runware Raw Response Content: {genResult.content}") 275 | raise Exception("Error: Invalid JSON response from API!") 276 | if "errors" in genResult: 277 | raise Exception(genResult["errors"][0]["message"]) 278 | else: 279 | return genResult 280 | except requests.exceptions.Timeout: 281 | raise Exception( 282 | f"Error: Request Timed Out After {SESSION_TIMEOUT} Seconds - Please Try Again!" 283 | ) 284 | except requests.exceptions.RequestException: 285 | raise Exception("Error: Runware Request Failed!") 286 | except Exception as e: 287 | if "invalid api key" in str(e).lower(): 288 | PromptServer.instance.send_sync( 289 | "runwareError", 290 | { 291 | "success": False, 292 | "errorMessage": str(e), 293 | "errorCode": 401, 294 | }, 295 | ) 296 | raise InterruptProcessingException() 297 | else: 298 | raise Exception(f"Error: {e}") 299 | return False 300 | 301 | 302 | async def uploadImage(imageDataUri): 303 | uploadTaskConfig = [ 304 | {"taskType": "imageUpload", "taskUUID": genRandUUID(), "image": imageDataUri} 305 | ] 306 | 307 | try: 308 | uploadResult = inferenecRequest(uploadTaskConfig) 309 | if ( 310 | uploadResult 311 | and "data" in uploadResult 312 | and "imageUUID" in uploadResult["data"][0] 313 | ): 314 | imageUUID = uploadResult["data"][0]["imageUUID"] 315 | return imageUUID 316 | except Exception as e: 317 | return False 318 | 319 | return False 320 | 321 | async def uploadAndCacheImage(imgSig: str, imgDataUri: str): 322 | try: 323 | imageUUID = await uploadImage(imgDataUri) 324 | if imageUUID: 325 | await imageStoreSet(imgSig, imageUUID) 326 | except Exception as e: 327 | return False 328 | 329 | async def imageStoreSet(imgHash: str, imgUUID: str) -> bool: 330 | try: 331 | with open(IMAGE_CACHE_FILE, "r") as f: 332 | cache = json.load(f) 333 | expires = (datetime.now() + timedelta(days=30)).isoformat() 334 | cache[imgHash] = {"uuid": imgUUID, "expires": expires} 335 | with open(IMAGE_CACHE_FILE, "w") as f: 336 | json.dump(cache, f, indent=2) 337 | return True 338 | except Exception as e: 339 | return False 340 | 341 | def imageStoreGet(imgHash: str) -> str | bool: 342 | try: 343 | with open(IMAGE_CACHE_FILE, "r") as f: 344 | cache = json.load(f) 345 | if imgHash not in cache: 346 | return False 347 | entry = cache[imgHash] 348 | expires = datetime.fromisoformat(entry["expires"]) 349 | if datetime.now() > expires: 350 | del cache[imgHash] 351 | with open(IMAGE_CACHE_FILE, "w") as f: 352 | json.dump(cache, f, indent=2) 353 | return False 354 | return entry["uuid"] 355 | except Exception as e: 356 | return False 357 | 358 | def convertTensor2IMG(tensorImage): 359 | global ENABLE_IMAGES_CACHING, MIN_IMAGE_CACHE_SIZE 360 | ENABLE_IMAGES_CACHING = getEnableImagesCaching() 361 | MIN_IMAGE_CACHE_SIZE = int(os.getenv("RUNWARE_MIN_IMAGE_CACHE_SIZE")) 362 | 363 | imageNP = (tensorImage.squeeze().numpy() * 255).astype(np.uint8) 364 | imgSig = hashlib.sha256(imageNP.tobytes()).hexdigest() 365 | 366 | if ENABLE_IMAGES_CACHING: 367 | imgUUID = imageStoreGet(imgSig) 368 | if imgUUID: 369 | return imgUUID 370 | 371 | image = Image.fromarray(imageNP) 372 | with io.BytesIO() as buffer: 373 | image.save( 374 | buffer, format="webp", quality=100, subsampling=0, method=6, exact=True 375 | ) 376 | imageRawData = buffer.getvalue() 377 | imgBytes = len(imageRawData) 378 | imgSize = int(imgBytes / 1024) 379 | imgb64 = base64.b64encode(imageRawData).decode("utf-8") 380 | imgDataUri = f"data:image/webp;base64,{imgb64}" 381 | 382 | if ENABLE_IMAGES_CACHING and imgSize >= MIN_IMAGE_CACHE_SIZE: 383 | try: 384 | loop = asyncio.get_running_loop() 385 | loop.call_soon(lambda: asyncio.ensure_future(uploadAndCacheImage(imgSig, imgDataUri))) 386 | except RuntimeError: 387 | def run_coro(): 388 | new_loop = asyncio.new_event_loop() 389 | asyncio.set_event_loop(new_loop) 390 | new_loop.run_until_complete(uploadAndCacheImage(imgSig, imgDataUri)) 391 | new_loop.close() 392 | threading.Thread(target=run_coro).start() 393 | return imgDataUri 394 | 395 | def convertIMG2Tensor(b64img): 396 | imgbytes = base64.b64decode(b64img) 397 | image = Image.open(io.BytesIO(imgbytes)) 398 | imageNP = np.array(image).astype(np.float32) / 255.0 399 | tensorImage = torch.from_numpy(imageNP).squeeze() 400 | return tensorImage 401 | 402 | def convertImageB64List(imageDataObject): 403 | images = () 404 | for result in imageDataObject["data"]: 405 | generatedImage = result.get( 406 | "imageBase64Data", 407 | result.get( 408 | "maskImageBase64Data", result.get("guideImageBase64Data", False) 409 | ), 410 | ) 411 | generatedImage = convertIMG2Tensor(generatedImage) 412 | images += (generatedImage,) 413 | images = torch.stack(images, dim=0) 414 | return images 415 | -------------------------------------------------------------------------------- /modules/vaeSearch.py: -------------------------------------------------------------------------------- 1 | class vaeSearch: 2 | @classmethod 3 | def INPUT_TYPES(cls): 4 | return { 5 | "required": { 6 | "VAE Search": ("STRING", { 7 | "tooltip": "Search For A Specific VAE By Name Or Civit AIR Code (eg: ClearVAE).", 8 | }), 9 | "Model Architecture": ([ 10 | "All", 11 | "FLUX.1-Schnell", 12 | "FLUX.1-Dev", 13 | "Pony", 14 | "SD 1.5", 15 | "SD 1.5 Hyper", 16 | "SD 1.5 LCM", 17 | "SD 3", 18 | "SDXL 1.0", 19 | "SDXL 1.0 LCM", 20 | "SDXL Distilled", 21 | "SDXL Hyper", 22 | "SDXL Lightning", 23 | "SDXL Turbo", 24 | ], { 25 | "tooltip": "Choose VAE's Model Architecture To Filter Out The Results.", 26 | "default": "SD 1.5", 27 | }), 28 | "VAEList": ([ 29 | "civitai:23906@28569 (kl-f8-anime2 VAE)", 30 | "civitai:22354@88156 (ClearVAE(SD1.5))", 31 | "civitai:276082@311162 (vae-ft-mse-840000-ema-pruned | 840000 | 840k SD1.5 VAE)", 32 | "civitai:70248@83798 (Color101 VAE)", 33 | "civitai:22354@26689 (ClearVAE (SD1.5))", 34 | "civitai:88390@94036 (difConsistency RAW VAE (Pack))", 35 | ], { 36 | "tooltip": "VAE Results Will Show UP Here So You Could Choose From.", 37 | "default": "civitai:23906@28569 (kl-f8-anime2 VAE)", 38 | }), 39 | "Use Search Value": ("BOOLEAN", { 40 | "tooltip": "When Enabled, the value you've set in the search input will be used instead.\n\nThis is useful in case the model search API is down or you prefer to set the model manually.", 41 | "default": False, 42 | "label_on": "Enabled", 43 | "label_off": "Disabled", 44 | }), 45 | }, 46 | } 47 | 48 | DESCRIPTION = "Directly Search and Connect VAE's to Runware Inference Nodes In ComfyUI." 49 | FUNCTION = "vaeSearch" 50 | RETURN_TYPES = ("RUNWAREVAE",) 51 | RETURN_NAMES = ("Runware VAE",) 52 | CATEGORY = "Runware" 53 | 54 | @classmethod 55 | def VALIDATE_INPUTS(cls, VAEList): 56 | return True 57 | 58 | def vaeSearch(self, **kwargs): 59 | enableSearchValue = kwargs.get("Use Search Value", False) 60 | searchInput = kwargs.get("VAE Search") 61 | 62 | if enableSearchValue: 63 | modelAIRCode = searchInput 64 | else: 65 | crModel = kwargs.get("VAEList") 66 | modelAIRCode = crModel.split(" ")[0] 67 | 68 | return (modelAIRCode,) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-dotenv 2 | brotli 3 | zstd -------------------------------------------------------------------------------- /workflows/Runware_Adetailer.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":7,"last_link_id":8,"nodes":[{"id":4,"type":"Runware Image Masking","pos":[351.7330627441406,132.90028381347656],"size":[315,194],"flags":{},"order":4,"mode":0,"inputs":[{"name":"Image","type":"IMAGE","link":3}],"outputs":[{"name":"Image","type":"IMAGE","links":[4],"slot_index":0},{"name":"Mask Preview","type":"IMAGE","links":null},{"name":"Mask","type":"MASK","links":[5],"slot_index":2}],"properties":{"Node name for S&R":"Runware Image Masking"},"widgets_values":["face_yolov8n",0.25,6,4,4],"bgcolor":"#6c5ce7"},{"id":2,"type":"Runware Model Search","pos":[-75.49443817138672,-288.33795166015625],"size":[315,130],"flags":{"collapsed":true},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:101055@128078 (SDXL v1.0 VAE fix)"],"bgcolor":"#6c5ce7"},{"id":6,"type":"PreviewImage","pos":[1151.2889404296875,-264.380859375],"size":[552.3806762695312,564.0267944335938],"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":7}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":1,"type":"PreviewImage","pos":[357.7220764160156,-258.3918762207031],"size":[307.82305908203125,331.447509765625],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":7,"type":"Runware Model Search","pos":[691.1177978515625,-322.2765808105469],"size":[315,130],"flags":{"collapsed":true},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[8],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:101@1 (Flux Dev)"],"bgcolor":"#6c5ce7"},{"id":3,"type":"Runware Image Inference","pos":[-77.49113464355469,-239.42645263671875],"size":[400,602],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":2},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1,3],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a man standing in the woods and looking serious, wide screen, zoomed out, realistic, HD, details","","Disabled","Square HD (1024x1024)",1024,1024,20,"Default",4.5,502840077979554,"randomize",0,0.8,false,32,1],"bgcolor":"#6c5ce7"},{"id":5,"type":"Runware Image Inference","pos":[700.1026611328125,-276.35955810546875],"size":[421.96026611328125,602],"flags":{},"order":5,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":8},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":4,"shape":7},{"name":"maskImage","type":"MASK","link":5,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[7],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a man standing in the woods and looking serious, wide screen, zoomed out, realistic, HD, details","","Disabled","Square HD (1024x1024)",1024,1024,30,"Default",2,1108798832542204,"randomize",0,0.8,true,32,1],"bgcolor":"#6c5ce7"}],"links":[[1,3,0,1,0,"IMAGE"],[2,2,0,3,0,"RUNWAREMODEL"],[3,3,0,4,0,"IMAGE"],[4,4,0,5,4,"IMAGE"],[5,4,2,5,5,"MASK"],[7,5,0,6,0,"IMAGE"],[8,7,0,5,0,"RUNWAREMODEL"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.001808834538306,"offset":[518.6927968058118,348.6661916601252]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Basic_Background_Removal.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":5,"last_link_id":6,"nodes":[{"id":5,"type":"Runware Background Removal","pos":[481.94317626953125,-300.1940612792969],"size":[333.4461364746094,178],"flags":{},"order":1,"mode":0,"inputs":[{"name":"Image","type":"IMAGE","link":5}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[6],"slot_index":0}],"properties":{"Node name for S&R":"Runware Background Removal"},"widgets_values":[false,false,false,240,10,10],"bgcolor":"#6c5ce7"},{"id":2,"type":"PreviewImage","pos":[479.01971435546875,-74.14266204833984],"size":[343.48712158203125,246],"flags":{},"order":2,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":6}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"}},{"id":1,"type":"LoadImage","pos":[91.22550201416016,-285.5789794921875],"size":[363.7179260253906,459.1794128417969],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[5],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["r2rs1PrivateFLux.png","image"]}],"links":[[5,1,0,5,0,"IMAGE"],[6,5,0,2,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.0263162364614136,"offset":[464.15888590882696,406.82531520554556]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Basic_Image_Inference.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":79,"last_link_id":52,"nodes":[{"id":76,"type":"PreviewImage","pos":[890.173095703125,-194.4432373046875],"size":[442.2727355957031,469.6363525390625],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":51}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":78,"type":"Runware Model Search","pos":[5.445709228515625,-324.625],"size":[315,130],"flags":{"collapsed":false},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[52],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:100@1 (Flux Schnell)"],"bgcolor":"#6c5ce7"},{"id":77,"type":"Runware Image Inference","pos":[401.4457092285156,-337.6249694824219],"size":[462.1487731933594,735.272705078125],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":52},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[51],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["abstract, 1girl, solo, 25 years old, perfect face, close view, dark purple hair, spiked hair, ear piercing, spiked collar, black hoodie, winter, blush, shy smile, portrait, face focus, depth of field, looking at viewer, cinematic lighting, studio quality, symmetrical centered symmetrical artwork, style of krenz cushart, Jordan Grimmer, donato giancola, wlop, artgerm, ultra delicate minimalism, ilya kuvshinov, rossdraws, Norman Rockwell,","","Disabled","Square (512x512)",512,512,4,"Default",6.5,1115161573669306,"randomize",2,0.8,false,32,1],"bgcolor":"#6c5ce7"},{"id":79,"type":"Note","pos":[-61.95427703857422,-118.02499389648438],"size":[432.87603759765625,351.79339599609375],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[],"properties":{},"widgets_values":["Hello there! 🌟\n\nWelcome to the Runware Image Inference workflow. 🎉\n\nHere’s what you need to know to get started: \n\n- You will need to add your API Key just run the workflow and you will be prompted for that, if you don't have an account you can create a new account and you will get \"Free Credits\" to start :)\n\n- You can explore and search from a variety of models. 🧠✨\n \n- Remember to adjust the settings appropriately to achieve the desired results. ⚙️✅\n\n- Don’t forget to tweak the \"steps\" for optimal performance. 🛠️📈\n\nThe best part? We support a wide range of models, so dive in and have fun experimenting! 🚀❤️\n\ntip: Press \"Ctrl + Alt + E\" To Automatically Enhance Your Prompt\n\ntip: You can reset the model, lora search by clearing the search input and everything will get back to defaults."],"color":"#432","bgcolor":"#653"}],"links":[[51,77,0,76,0,"IMAGE"],[52,78,0,77,0,"RUNWAREMODEL"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.9090909090909091,"offset":[533.379187367039,448.4813355624458]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Basic_Image_Inference_IPAdapter.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":5,"last_link_id":4,"nodes":[{"id":5,"type":"LoadImage","pos":[-748.4779663085938,-211.1426239013672],"size":[328.1999816894531,359.1000061035156],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","localized_name":"IMAGE","type":"IMAGE","links":[4]},{"name":"MASK","localized_name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["ggasdw.jpg","image"]},{"id":1,"type":"PreviewImage","pos":[134.92190551757812,-558.04248046875],"size":[716.7999877929688,717.5],"flags":{},"order":4,"mode":0,"inputs":[{"name":"images","localized_name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":3,"type":"Runware Model Search","pos":[-731.1779174804688,-549.74267578125],"size":[310.5999755859375,154],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","localized_name":"Runware Model","type":"RUNWAREMODEL","links":[2]}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:101055@128078 (SDXL v1.0 VAE fix)",false],"bgcolor":"#5345bf"},{"id":4,"type":"Runware IPAdapter","pos":[-736.3779907226562,-344.2424011230469],"size":[317.4000244140625,82],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Reference Image","localized_name":"Reference Image","type":"IMAGE","link":4}],"outputs":[{"name":"IPAdapter","localized_name":"IPAdapter","type":"RUNWAREIPAdapter","links":[3]}],"properties":{"Node name for S&R":"Runware IPAdapter"},"widgets_values":["IP Adapter SDXL",1],"bgcolor":"#5345bf"},{"id":2,"type":"Runware Image Inference","pos":[-375.5780334472656,-558.5426635742188],"size":[487,719],"flags":{},"order":3,"mode":0,"inputs":[{"name":"Model","localized_name":"Model","type":"RUNWAREMODEL","link":2},{"name":"Lora","localized_name":"Lora","type":"RUNWARELORA","shape":7,"link":null},{"name":"IPAdapters","localized_name":"IPAdapters","type":"RUNWAREIPAdapter","shape":7,"link":3},{"name":"ControlNet","localized_name":"ControlNet","type":"RUNWARECONTROLNET","shape":7,"link":null},{"name":"Refiner","localized_name":"Refiner","type":"RUNWAREREFINER","shape":7,"link":null},{"name":"seedImage","localized_name":"seedImage","type":"IMAGE","shape":7,"link":null},{"name":"maskImage","localized_name":"maskImage","type":"MASK","shape":7,"link":null},{"name":"Embeddings","localized_name":"Embeddings","type":"RUNWAREEMBEDDING","shape":7,"link":null},{"name":"VAE","localized_name":"VAE","type":"RUNWAREVAE","shape":7,"link":null}],"outputs":[{"name":"IMAGE","localized_name":"IMAGE","type":"IMAGE","links":[1]}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["A stylish young woman standing confidently in a vibrant urban setting, wearing a fitted black t-shirt with a bold white skull design, detailed with subtle cracks and shading, paired with ripped blue jeans and combat boots, her hair flowing naturally in the wind, cinematic lighting, highly detailed, photorealistic, 8k resolution, sharp focus, rich textures, dynamic composition, soft bokeh background, vivid colors, intricate details on the skull graphic","blurry, low resolution, distorted, overexposed, underexposed, grainy, artifacts, extra limbs, missing details, poorly drawn hands, unnatural poses, dull colors, oversaturated, flat background, text errors, watermark, crowded composition, deformed face","Disabled","Square HD (1024x1024)",1024,1024,50,"DPM++ 2M SDE",8,599278031163006,"randomize",0,0.8,false,32,4],"bgcolor":"#5345bf"}],"links":[[1,2,0,1,0,"IMAGE"],[2,3,0,2,0,"RUNWAREMODEL"],[3,4,0,2,2,"RUNWAREIPAdapter"],[4,5,0,4,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.9090909090909091,"offset":[1022.3779979367997,667.6425882124634]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Basic_Image_Inference_Lora.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":4,"last_link_id":3,"nodes":[{"id":3,"type":"Runware Model Search","pos":[-170.87696838378906,-336.2454528808594],"size":[315,130],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:100@1 (Flux Schnell)"],"bgcolor":"#6c5ce7"},{"id":4,"type":"Runware Lora Search","pos":[-162.10772705078125,-143.322265625],"size":[315,196],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Lora","type":"RUNWARELORA","links":[3],"slot_index":0}],"properties":{"Node name for S&R":"Runware Lora Search"},"widgets_values":["niji","FLUX.1-Schnell","Lora","civitai:768440@859463 (Flux-Better-Art-With-Niji Schnell)",1,null],"bgcolor":"#6c5ce7"},{"id":1,"type":"PreviewImage","pos":[627.12255859375,-365.4764404296875],"size":[458.46142578125,484.9228515625],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":2,"type":"Runware Image Inference","pos":[191.5845489501953,-370.3477478027344],"size":[411.69232177734375,628.3076782226562],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":2},{"name":"Lora","type":"RUNWARELORA","link":3,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a guy wearing red glasses, anime, pixiv top monthly, trending on artstation, cinematic, danbooru, zerochan art, kyoto animation, niji","","Disabled","Square (512x512)",512,512,4,"Default",6.5,258656211183721,"randomize",0,0.8,false,32,1],"bgcolor":"#6c5ce7"}],"links":[[1,2,0,1,0,"IMAGE"],[2,3,0,2,0,"RUNWAREMODEL"],[3,4,0,2,1,"RUNWARELORA"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.0263162364614136,"offset":[606.4152342088262,463.3381111055449]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Basic_Image_Upscaling.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":5,"last_link_id":3,"nodes":[{"id":4,"type":"Runware Image Upscaler","pos":[139.44570922851562,-279.625],"size":[315,58],"flags":{},"order":1,"mode":0,"inputs":[{"name":"Image","type":"IMAGE","link":3}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Upscaler"},"widgets_values":[4],"bgcolor":"#6c5ce7"},{"id":1,"type":"PreviewImage","pos":[497.6885986328125,-288.6935729980469],"size":[593.5640869140625,614.8204345703125],"flags":{},"order":2,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":2}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":5,"type":"LoadImage","pos":[140.2388916015625,-145.11216735839844],"size":[320.84613037109375,411.43585205078125],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[3],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["qwd2s.jpg","image"]}],"links":[[2,4,0,1,0,"IMAGE"],[3,5,0,4,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.0263162364614136,"offset":[452.4665833088276,458.4663183555447]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_ControlNet.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":6,"last_link_id":6,"nodes":[{"id":4,"type":"LoadImage","pos":[0.36829668283462524,-416.1068420410156],"size":[401.6533203125,314],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[4],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["qwd2qdasf.jpg","image"]},{"id":6,"type":"Runware Model Search","pos":[426.5971374511719,-462.02398681640625],"size":[315,130],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[5],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:101@1 (Flux Dev)"],"bgcolor":"#6c5ce7"},{"id":5,"type":"Runware ControlNet PreProcessor","pos":[-3.6244616508483887,-163.56333923339844],"size":[390.5999755859375,226],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Image","type":"IMAGE","link":4}],"outputs":[{"name":"GUIDE IMAGE","type":"IMAGE","links":[6],"slot_index":0}],"properties":{"Node name for S&R":"Runware ControlNet PreProcessor"},"widgets_values":["OpenPose",true,100,200,false,"Square HD (1024x1024)",1024,1024],"bgcolor":"#6c5ce7"},{"id":1,"type":"PreviewImage","pos":[843.8425903320312,-253.40106201171875],"size":[593.306640625,620.9238891601562],"flags":{},"order":5,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":3,"type":"Runware ControlNet","pos":[4.361094951629639,107.9453125],"size":[380.4000244140625,274],"flags":{},"order":3,"mode":0,"inputs":[{"name":"Guide Image","type":"IMAGE","link":6}],"outputs":[{"name":"Runware ControlNet","type":"RUNWARECONTROLNET","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware ControlNet"},"widgets_values":["","FLUX.1-Dev","Open Pose","runware:29@1 (ControlNetFlux (CNFlux) instantx-union)",-1,0,-1,65,"balanced",0.8],"bgcolor":"#6c5ce7"},{"id":2,"type":"Runware Image Inference","pos":[415.61724853515625,-281.35052490234375],"size":[413.9747009277344,664.8140258789062],"flags":{},"order":4,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":5},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":2,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a girl wearing a red dress, with curly hair, very anime!!! fine - face, pretty face, realistic shaded perfect face, fine details by stanley artgerm lau, wlop, rossdraws, james jean, andrei riabovitchev, marc simonetti, and sakimichan, trending on artstation","","Disabled","Square HD (1024x1024)",1024,1024,30,"Default",2.5,164154394413164,"randomize",0,0.8,false,32,1],"bgcolor":"#6c5ce7"}],"links":[[1,2,0,1,0,"IMAGE"],[2,3,0,2,2,"RUNWARECONTROLNET"],[4,4,0,5,0,"IMAGE"],[5,6,0,2,0,"RUNWAREMODEL"],[6,5,0,3,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.001808834538306,"offset":[326.04127153857115,393.5849410747678]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Image_Inference_Captioning.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":14,"last_link_id":13,"nodes":[{"id":11,"type":"Runware Image Inference","pos":[831.05029296875,183.88824462890625],"size":[400,602],"flags":{},"order":3,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":13},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7},{"name":"positivePrompt","type":"STRING","link":11,"widget":{"name":"positivePrompt"}}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[10],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["","","Disabled","Square (512x512)",512,512,4,"Default",6.5,229459996454677,"randomize",0,0.9,false,32,1],"bgcolor":"#6c5ce7"},{"id":14,"type":"Runware Model Search","pos":[497.6798095703125,175.9790802001953],"size":[315,130],"flags":{"collapsed":false},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[13],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:100@1 (Flux Schnell)"],"bgcolor":"#6c5ce7"},{"id":10,"type":"LoadImage","pos":[241.08689880371094,350.3967590332031],"size":[236.30767822265625,325.9602966308594],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[12],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["qwd2s.jpg","image"]},{"id":13,"type":"Runware Image Caption","pos":[513.2827758789062,373.235595703125],"size":[278.0941162109375,190.92190551757812],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Image","type":"IMAGE","link":12}],"outputs":[{"name":"IMAGE PROMPT","type":"STRING","links":[11],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Caption"},"widgets_values":[false,"cartoon girl with a skull on her and a city in the background, profile picture 1024px, 🎀 🧟 🍓 🧚, very short curly hair, comicbook art, icon for an ai app, dressed black hoodie, e-girl, anime colours"],"bgcolor":"#6c5ce7"},{"id":12,"type":"PreviewImage","pos":[1269.0423583984375,236.072021484375],"size":[515.3893432617188,538.380126953125],"flags":{},"order":4,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":10}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]}],"links":[[10,11,0,12,0,"IMAGE"],[11,13,0,11,8,"STRING"],[12,10,0,13,0,"IMAGE"],[13,14,0,11,0,"RUNWAREMODEL"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.026316236461414,"offset":[-218.03843002977476,-95.65528646365456]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Image_Inference_Embedding.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":5,"last_link_id":4,"nodes":[{"id":1,"type":"Runware Model Search","pos":[438.6363220214844,72.83195495605469],"size":[315,130],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:133005@288982 (SDXL Juggernaut XL V8)"],"bgcolor":"#6c5ce7"},{"id":3,"type":"PreviewImage","pos":[1289.6943359375,108.03620147705078],"size":[682.38671875,714.5171508789062],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"}},{"id":4,"type":"Runware Image Inference","pos":[791.6922607421875,67.16986083984375],"size":[444.3775939941406,702.0932006835938],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":2},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":4,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["analog film photo photograph of a majestic penguin breaking free from a intricately wrapped Christmas gift box, showcasing a delightful mix of festive and wildlife elements. Capture the penguin's exhilarating moment as it bursts forth from the box, its tuxedo-like feathers glistening with dew and holiday cheer.\nCompositionally, frame the scene from a low angle, placing the gift box at the top of the image, allowing the viewer's gaze to follow the penguin's trajectory upward. Employ a shallow depth of field, blurring the surrounding wrapping paper and gift embellishments to emphasize the penguin's heroic breakout. Soft, warm artificial lighting will add a cozy atmosphere, while also highlighting the festive colors and textures of the box.\nanalog film photo photograph of a majestic penguin breaking free from a intricately wrapped Christmas gift box, showcasing a delightful mix of festive and wildlife elements. Capture the penguin's exhilarating moment as it bursts forth from the box, its tuxedo-like feathers glistening with dew and holiday cheer. Compositionally, frame the scene from a low angle, placing the gift box at the top of the image, allowing the viewer's gaze to follow the penguin's trajectory upward. Employ a shallow depth of field, blurring the surrounding wrapping paper and gift embellishments to emphasize the penguin's heroic breakout.Artistically, draw inspiration from the whimsical world of children's illustrations and classic animated films, where penguins are often depicted in charming, expressive poses. The overall mood should be one of joyful abandon, capturing the magic and surprise of the moment when the penguin finally achieves its freedom.\nTechnical specifications: Resolution: 4096 x 3040 pixels; Aspect Ratio: 16:9; File Format: JPEG or PNG; Camera Angle: Low-angle shot with a slight tilt upwards","unaestheticXL2v10","Disabled","Square HD (1024x1024)",1024,1024,30,"DPM++ 2M SDE Karras",8,754029311997231,"fixed",0,0.8,false,32,1],"bgcolor":"#6c5ce7"},{"id":5,"type":"Runware Embedding Search","pos":[430.3169860839844,265.5909729003906],"size":[315,172],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Embedding","type":"RUNWAREEMBEDDING","links":[4],"slot_index":0}],"properties":{"Node name for S&R":"Runware Embedding Search"},"widgets_values":["","SDXL 1.0","civitai:119032@302265 (unaestheticXL | Negative TI 2v10)",1,null],"bgcolor":"#6c5ce7"}],"links":[[1,4,0,3,0,"IMAGE"],[2,1,0,4,0,"RUNWAREMODEL"],[4,5,0,4,6,"RUNWAREEMBEDDING"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.7710865788590636,"offset":[212.93107457927547,155.16249707244557]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Image_Inference_Refiner.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":6,"last_link_id":6,"nodes":[{"id":1,"type":"Runware Model Search","pos":[-180.6204376220703,-331.3739318847656],"size":[315,130],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[1],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:101055@128078 (SDXL v1.0 VAE fix)"],"bgcolor":"#6c5ce7"},{"id":4,"type":"PreviewImage","pos":[616.4048461914062,-342.0920104980469],"size":[572.4613647460938,603.7946166992188],"flags":{},"order":4,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":3}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":6,"type":"Runware Model Search","pos":[-169.90292358398438,-124.80946350097656],"size":[315,130],"flags":{"collapsed":false},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[6],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:133005@288982 (SDXL Juggernaut XL V8)"],"bgcolor":"#6c5ce7"},{"id":5,"type":"Runware Refiner","pos":[-175.74874877929688,74.93402099609375],"size":[315,82],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":6}],"outputs":[{"name":"RUNWAREREFINER","type":"RUNWAREREFINER","links":[4],"slot_index":0}],"properties":{"Node name for S&R":"Runware Refiner"},"widgets_values":[0,60],"bgcolor":"#6c5ce7"},{"id":2,"type":"Runware Image Inference","pos":[183.7898406982422,-340.14276123046875],"size":[413.6410217285156,654.6153564453125],"flags":{},"order":3,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":1},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":4,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[3],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["A juicy burger on the lunar surface, an astronaut in a sleek white spacesuit gestures towards it, with the vibrant blue Earth rising behind him, soft golden light illuminating the burger's crispy bun and melted cheese, casting a warm glow on the desolate moon rocks.","","Disabled","Square HD (1024x1024)",1024,1024,50,"Default",6.5,362638305241772,"randomize",0,0.8,false,32,1],"bgcolor":"#6c5ce7"}],"links":[[1,1,0,2,0,"RUNWAREMODEL"],[3,2,0,4,0,"IMAGE"],[4,5,0,2,3,"RUNWAREREFINER"],[6,6,0,5,0,"RUNWAREMODEL"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.0263162364614136,"offset":[568.4152507588258,429.23556185554514]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Image_Inference_VAE.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":6,"last_link_id":3,"nodes":[{"id":4,"type":"Runware Model Search","pos":[201.85939025878906,-115.19313049316406],"size":[315,130],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[2],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:133005@288982 (SDXL Juggernaut XL V8)"],"bgcolor":"#6c5ce7"},{"id":5,"type":"Runware VAE Search","pos":[197.4307403564453,76.13542938232422],"size":[315,106],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware VAE","type":"RUNWAREVAE","links":[3],"slot_index":0}],"properties":{"Node name for S&R":"Runware VAE Search"},"widgets_values":["","SDXL 1.0","civitai:357506@399618 (Grey VAE For SDXL v1)"],"bgcolor":"#6c5ce7"},{"id":3,"type":"PreviewImage","pos":[1052.9173583984375,-79.98888397216797],"size":[682.38671875,714.5171508789062],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":1}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"}},{"id":2,"type":"Runware Image Inference","pos":[554.9153442382812,-120.855224609375],"size":[444.3775939941406,702.0932006835938],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":2},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":null,"shape":7},{"name":"maskImage","type":"MASK","link":null,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":3,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[1],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["analog film photo photograph of a majestic penguin breaking free from a intricately wrapped Christmas gift box, showcasing a delightful mix of festive and wildlife elements. Capture the penguin's exhilarating moment as it bursts forth from the box, its tuxedo-like feathers glistening with dew and holiday cheer.\nCompositionally, frame the scene from a low angle, placing the gift box at the top of the image, allowing the viewer's gaze to follow the penguin's trajectory upward. Employ a shallow depth of field, blurring the surrounding wrapping paper and gift embellishments to emphasize the penguin's heroic breakout. Soft, warm artificial lighting will add a cozy atmosphere, while also highlighting the festive colors and textures of the box.\nanalog film photo photograph of a majestic penguin breaking free from a intricately wrapped Christmas gift box, showcasing a delightful mix of festive and wildlife elements. Capture the penguin's exhilarating moment as it bursts forth from the box, its tuxedo-like feathers glistening with dew and holiday cheer. Compositionally, frame the scene from a low angle, placing the gift box at the top of the image, allowing the viewer's gaze to follow the penguin's trajectory upward. Employ a shallow depth of field, blurring the surrounding wrapping paper and gift embellishments to emphasize the penguin's heroic breakout.Artistically, draw inspiration from the whimsical world of children's illustrations and classic animated films, where penguins are often depicted in charming, expressive poses. The overall mood should be one of joyful abandon, capturing the magic and surprise of the moment when the penguin finally achieves its freedom.\nTechnical specifications: Resolution: 4096 x 3040 pixels; Aspect Ratio: 16:9; File Format: JPEG or PNG; Camera Angle: Low-angle shot with a slight tilt upwards","","Disabled","Square HD (1024x1024)",1024,1024,30,"DPM++ 2M SDE Karras",8,754029311997231,"fixed",0,0.8,false,32,1],"bgcolor":"#6c5ce7"}],"links":[[1,2,0,3,0,"IMAGE"],[2,4,0,2,0,"RUNWAREMODEL"],[3,5,0,2,7,"RUNWAREVAE"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.7710865788590636,"offset":[520.2895561011256,319.8651432887956]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Inpainting.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":30,"last_link_id":36,"nodes":[{"id":26,"type":"LoadImage","pos":[-440.0780029296875,-294.6051025390625],"size":[371,462],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[31],"slot_index":0},{"name":"MASK","type":"MASK","links":[32],"slot_index":1}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["clipspace/clipspace-mask-6099630.100000024.png [input]","image"]},{"id":23,"type":"PreviewImage","pos":[423.9219970703125,-451.6051025390625],"size":[553,581],"flags":{},"order":3,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":29}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":25,"type":"Runware Model Search","pos":[-414.0780029296875,-472.6051025390625],"size":[315,130],"flags":{"collapsed":false},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[30],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","SDXL Lightning","Base Model","civitai:133005@357609 (Juggernaut XL from RunDiffusion V9+RDPhoto2-Lightning_4S)"],"bgcolor":"#6c5ce7"},{"id":24,"type":"Runware Image Inference","pos":[-42.0780029296875,-489.6051025390625],"size":[443,656],"flags":{},"order":2,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":30},{"name":"Lora","type":"RUNWARELORA","link":null,"shape":7},{"name":"ControlNet","type":"RUNWARECONTROLNET","link":null,"shape":7},{"name":"Refiner","type":"RUNWAREREFINER","link":null,"shape":7},{"name":"seedImage","type":"IMAGE","link":31,"shape":7},{"name":"maskImage","type":"MASK","link":32,"shape":7},{"name":"Embeddings","type":"RUNWAREEMBEDDING","link":null,"shape":7},{"name":"VAE","type":"RUNWAREVAE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[29],"slot_index":0}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a cat","","Disabled","Square HD (1024x1024)",1024,1024,8,"Default",2.5,696000855492372,"randomize",0,0.79,true,50,1],"bgcolor":"#6c5ce7"}],"links":[[29,24,0,23,0,"IMAGE"],[30,25,0,24,0,"RUNWAREMODEL"],[31,26,0,24,4,"IMAGE"],[32,26,1,24,5,"MASK"]],"groups":[],"config":{},"extra":{"ds":{"scale":1,"offset":[840.0779979367994,566.0425882124623]}},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Multi_Inference.json: -------------------------------------------------------------------------------- 1 | {"id":"33be6027-1bdd-47dc-9bce-9a9b667aff69","revision":0,"last_node_id":54,"last_link_id":78,"nodes":[{"id":14,"type":"Runware Image Inference","pos":[63.60523986816406,-263.4772033691406],"size":[400,702],"flags":{"collapsed":true},"order":5,"mode":0,"inputs":[{"localized_name":"Model","name":"Model","type":"RUNWAREMODEL","link":13},{"localized_name":"Accelerator","name":"Accelerator","shape":7,"type":"RUNWAREACCELERATOR","link":null},{"localized_name":"Lora","name":"Lora","shape":7,"type":"RUNWARELORA","link":null},{"localized_name":"Outpainting","name":"Outpainting","shape":7,"type":"RUNWAREOUTPAINT","link":null},{"localized_name":"IPAdapters","name":"IPAdapters","shape":7,"type":"RUNWAREIPAdapter","link":null},{"localized_name":"ControlNet","name":"ControlNet","shape":7,"type":"RUNWARECONTROLNET","link":null},{"localized_name":"Refiner","name":"Refiner","shape":7,"type":"RUNWAREREFINER","link":null},{"localized_name":"seedImage","name":"seedImage","shape":7,"type":"IMAGE","link":null},{"localized_name":"maskImage","name":"maskImage","shape":7,"type":"MASK","link":null},{"localized_name":"Embeddings","name":"Embeddings","shape":7,"type":"RUNWAREEMBEDDING","link":null},{"localized_name":"VAE","name":"VAE","shape":7,"type":"RUNWAREVAE","link":null},{"localized_name":"positivePrompt","name":"positivePrompt","type":"STRING","widget":{"name":"positivePrompt"},"link":41},{"localized_name":"negativePrompt","name":"negativePrompt","type":"STRING","widget":{"name":"negativePrompt"},"link":null},{"localized_name":"Multi Inference Mode","name":"Multi Inference Mode","type":"BOOLEAN","widget":{"name":"Multi Inference Mode"},"link":null},{"localized_name":"Prompt Weighting","name":"Prompt Weighting","type":"COMBO","widget":{"name":"Prompt Weighting"},"link":null},{"localized_name":"dimensions","name":"dimensions","type":"COMBO","widget":{"name":"dimensions"},"link":null},{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"cfgScale","name":"cfgScale","type":"FLOAT","widget":{"name":"cfgScale"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"clipSkip","name":"clipSkip","type":"INT","widget":{"name":"clipSkip"},"link":null},{"localized_name":"strength","name":"strength","type":"FLOAT","widget":{"name":"strength"},"link":null},{"localized_name":"Mask Margin","name":"Mask Margin","type":"BOOLEAN","widget":{"name":"Mask Margin"},"link":null},{"localized_name":"maskMargin","name":"maskMargin","type":"INT","widget":{"name":"maskMargin"},"link":null},{"localized_name":"batchSize","name":"batchSize","type":"INT","widget":{"name":"batchSize"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[]},{"localized_name":"RW-Task","name":"RW-Task","type":"RUNWARETASK","links":[42]}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a very cute cat in studio ghibli style, big eyes, details, 4k, anime, cinematic scene, neon lights","",true,"Disabled","Square (512x512)",512,512,4,"DPM++ 2M SDE",3,1007228176077577,"randomize",0,0.8,false,32,1],"bgcolor":"#5345bf"},{"id":48,"type":"Runware Image Inference","pos":[67.12537384033203,-116.10993957519531],"size":[400,702],"flags":{"collapsed":true},"order":6,"mode":0,"inputs":[{"localized_name":"Model","name":"Model","type":"RUNWAREMODEL","link":72},{"localized_name":"Accelerator","name":"Accelerator","shape":7,"type":"RUNWAREACCELERATOR","link":null},{"localized_name":"Lora","name":"Lora","shape":7,"type":"RUNWARELORA","link":null},{"localized_name":"Outpainting","name":"Outpainting","shape":7,"type":"RUNWAREOUTPAINT","link":null},{"localized_name":"IPAdapters","name":"IPAdapters","shape":7,"type":"RUNWAREIPAdapter","link":null},{"localized_name":"ControlNet","name":"ControlNet","shape":7,"type":"RUNWARECONTROLNET","link":null},{"localized_name":"Refiner","name":"Refiner","shape":7,"type":"RUNWAREREFINER","link":null},{"localized_name":"seedImage","name":"seedImage","shape":7,"type":"IMAGE","link":null},{"localized_name":"maskImage","name":"maskImage","shape":7,"type":"MASK","link":null},{"localized_name":"Embeddings","name":"Embeddings","shape":7,"type":"RUNWAREEMBEDDING","link":null},{"localized_name":"VAE","name":"VAE","shape":7,"type":"RUNWAREVAE","link":null},{"localized_name":"positivePrompt","name":"positivePrompt","type":"STRING","widget":{"name":"positivePrompt"},"link":69},{"localized_name":"negativePrompt","name":"negativePrompt","type":"STRING","widget":{"name":"negativePrompt"},"link":null},{"localized_name":"Multi Inference Mode","name":"Multi Inference Mode","type":"BOOLEAN","widget":{"name":"Multi Inference Mode"},"link":null},{"localized_name":"Prompt Weighting","name":"Prompt Weighting","type":"COMBO","widget":{"name":"Prompt Weighting"},"link":null},{"localized_name":"dimensions","name":"dimensions","type":"COMBO","widget":{"name":"dimensions"},"link":null},{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"cfgScale","name":"cfgScale","type":"FLOAT","widget":{"name":"cfgScale"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"clipSkip","name":"clipSkip","type":"INT","widget":{"name":"clipSkip"},"link":null},{"localized_name":"strength","name":"strength","type":"FLOAT","widget":{"name":"strength"},"link":null},{"localized_name":"Mask Margin","name":"Mask Margin","type":"BOOLEAN","widget":{"name":"Mask Margin"},"link":null},{"localized_name":"maskMargin","name":"maskMargin","type":"INT","widget":{"name":"maskMargin"},"link":null},{"localized_name":"batchSize","name":"batchSize","type":"INT","widget":{"name":"batchSize"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[]},{"localized_name":"RW-Task","name":"RW-Task","type":"RUNWARETASK","links":[75]}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a cool dog looks very happy, snow, cozy","",true,"Disabled","Square (512x512)",512,512,4,"DPM++ 2M SDE",3,814536205716626,"randomize",0,0.8,false,32,1],"bgcolor":"#5345bf"},{"id":50,"type":"Runware Image Inference","pos":[69.54536437988281,25.459915161132812],"size":[400,702],"flags":{"collapsed":true},"order":7,"mode":0,"inputs":[{"localized_name":"Model","name":"Model","type":"RUNWAREMODEL","link":73},{"localized_name":"Accelerator","name":"Accelerator","shape":7,"type":"RUNWAREACCELERATOR","link":null},{"localized_name":"Lora","name":"Lora","shape":7,"type":"RUNWARELORA","link":null},{"localized_name":"Outpainting","name":"Outpainting","shape":7,"type":"RUNWAREOUTPAINT","link":null},{"localized_name":"IPAdapters","name":"IPAdapters","shape":7,"type":"RUNWAREIPAdapter","link":null},{"localized_name":"ControlNet","name":"ControlNet","shape":7,"type":"RUNWARECONTROLNET","link":null},{"localized_name":"Refiner","name":"Refiner","shape":7,"type":"RUNWAREREFINER","link":null},{"localized_name":"seedImage","name":"seedImage","shape":7,"type":"IMAGE","link":null},{"localized_name":"maskImage","name":"maskImage","shape":7,"type":"MASK","link":null},{"localized_name":"Embeddings","name":"Embeddings","shape":7,"type":"RUNWAREEMBEDDING","link":null},{"localized_name":"VAE","name":"VAE","shape":7,"type":"RUNWAREVAE","link":null},{"localized_name":"positivePrompt","name":"positivePrompt","type":"STRING","widget":{"name":"positivePrompt"},"link":70},{"localized_name":"negativePrompt","name":"negativePrompt","type":"STRING","widget":{"name":"negativePrompt"},"link":null},{"localized_name":"Multi Inference Mode","name":"Multi Inference Mode","type":"BOOLEAN","widget":{"name":"Multi Inference Mode"},"link":null},{"localized_name":"Prompt Weighting","name":"Prompt Weighting","type":"COMBO","widget":{"name":"Prompt Weighting"},"link":null},{"localized_name":"dimensions","name":"dimensions","type":"COMBO","widget":{"name":"dimensions"},"link":null},{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"cfgScale","name":"cfgScale","type":"FLOAT","widget":{"name":"cfgScale"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"clipSkip","name":"clipSkip","type":"INT","widget":{"name":"clipSkip"},"link":null},{"localized_name":"strength","name":"strength","type":"FLOAT","widget":{"name":"strength"},"link":null},{"localized_name":"Mask Margin","name":"Mask Margin","type":"BOOLEAN","widget":{"name":"Mask Margin"},"link":null},{"localized_name":"maskMargin","name":"maskMargin","type":"INT","widget":{"name":"maskMargin"},"link":null},{"localized_name":"batchSize","name":"batchSize","type":"INT","widget":{"name":"batchSize"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[]},{"localized_name":"RW-Task","name":"RW-Task","type":"RUNWARETASK","links":[76]}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["a fish inside a fish tank, realistic, 4k, detailed","",true,"Disabled","Square (512x512)",512,512,4,"DPM++ 2M SDE",3,376518581510528,"randomize",0,0.8,false,32,1],"bgcolor":"#5345bf"},{"id":52,"type":"Runware Image Inference","pos":[65.91541290283203,169.4499053955078],"size":[400,702],"flags":{"collapsed":true},"order":8,"mode":0,"inputs":[{"localized_name":"Model","name":"Model","type":"RUNWAREMODEL","link":74},{"localized_name":"Accelerator","name":"Accelerator","shape":7,"type":"RUNWAREACCELERATOR","link":null},{"localized_name":"Lora","name":"Lora","shape":7,"type":"RUNWARELORA","link":null},{"localized_name":"Outpainting","name":"Outpainting","shape":7,"type":"RUNWAREOUTPAINT","link":null},{"localized_name":"IPAdapters","name":"IPAdapters","shape":7,"type":"RUNWAREIPAdapter","link":null},{"localized_name":"ControlNet","name":"ControlNet","shape":7,"type":"RUNWARECONTROLNET","link":null},{"localized_name":"Refiner","name":"Refiner","shape":7,"type":"RUNWAREREFINER","link":null},{"localized_name":"seedImage","name":"seedImage","shape":7,"type":"IMAGE","link":null},{"localized_name":"maskImage","name":"maskImage","shape":7,"type":"MASK","link":null},{"localized_name":"Embeddings","name":"Embeddings","shape":7,"type":"RUNWAREEMBEDDING","link":null},{"localized_name":"VAE","name":"VAE","shape":7,"type":"RUNWAREVAE","link":null},{"localized_name":"positivePrompt","name":"positivePrompt","type":"STRING","widget":{"name":"positivePrompt"},"link":71},{"localized_name":"negativePrompt","name":"negativePrompt","type":"STRING","widget":{"name":"negativePrompt"},"link":null},{"localized_name":"Multi Inference Mode","name":"Multi Inference Mode","type":"BOOLEAN","widget":{"name":"Multi Inference Mode"},"link":null},{"localized_name":"Prompt Weighting","name":"Prompt Weighting","type":"COMBO","widget":{"name":"Prompt Weighting"},"link":null},{"localized_name":"dimensions","name":"dimensions","type":"COMBO","widget":{"name":"dimensions"},"link":null},{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"cfgScale","name":"cfgScale","type":"FLOAT","widget":{"name":"cfgScale"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"clipSkip","name":"clipSkip","type":"INT","widget":{"name":"clipSkip"},"link":null},{"localized_name":"strength","name":"strength","type":"FLOAT","widget":{"name":"strength"},"link":null},{"localized_name":"Mask Margin","name":"Mask Margin","type":"BOOLEAN","widget":{"name":"Mask Margin"},"link":null},{"localized_name":"maskMargin","name":"maskMargin","type":"INT","widget":{"name":"maskMargin"},"link":null},{"localized_name":"batchSize","name":"batchSize","type":"INT","widget":{"name":"batchSize"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[]},{"localized_name":"RW-Task","name":"RW-Task","type":"RUNWARETASK","links":[77]}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["Closeup body, A hyper-stylistic,, extremely detailed image is a breathtaking painting created from a combination of smeared black and gold metallic colors. The silhouette of a young woman is captured in streaks as if they were moving rapidly in the same direction. The black paint is applied in bold, dynamic strokes, creating a sense of energy and movement that contrasts beautifully with the gold accents. Gold paint is used to highlight the female silhouette, adding a sense of depth and dimension to the image. The overall effect is an exciting, surreal dynamism, as if the viewer has captured a fleeting moment in time. Every detail of the image is meticulously detailed, from the subtle, swirling patterns of the gold paint to the complex, textural patterns of the black paint. The contrasts between light and dark, color and texture create a sense of tension and energy that draws the viewer in, inviting them to explore the kinetic world of the painting. the highest quality, intricate detail, visually stunning, masterpiece, black and gold street backdrop","",true,"Disabled","Square (512x512)",512,512,4,"DPM++ 2M SDE",3,1048781623064936,"randomize",0,0.8,false,32,1],"bgcolor":"#5345bf"},{"id":29,"type":"PrimitiveNode","pos":[-313.58447265625,-287.6775817871094],"size":[295.9100036621094,88],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","widget":{"name":"positivePrompt"},"links":[41]}],"title":"positivePrompt","properties":{"Run widget replace on values":false},"widgets_values":["a very cute cat in studio ghibli style, big eyes, details, 4k, anime, cinematic scene, neon lights"]},{"id":49,"type":"PrimitiveNode","pos":[-313.694580078125,-148.7803497314453],"size":[294.70001220703125,88],"flags":{},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","widget":{"name":"positivePrompt"},"links":[69]}],"title":"positivePrompt","properties":{"Run widget replace on values":false},"widgets_values":["a cool dog looks very happy, snow, cozy"]},{"id":51,"type":"PrimitiveNode","pos":[-312.48443603515625,-7.210506439208984],"size":[293.489990234375,88],"flags":{},"order":2,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","widget":{"name":"positivePrompt"},"links":[70]}],"title":"positivePrompt","properties":{"Run widget replace on values":false},"widgets_values":["a fish inside a fish tank, realistic, 4k, detailed"]},{"id":54,"type":"PreviewImage","pos":[787.405029296875,-20.520469665527344],"size":[338.6278991699219,354.0370178222656],"flags":{},"order":13,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":78}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":16,"type":"Runware Multi Inference","pos":[43.991127014160156,-430.8931579589844],"size":[289.79998779296875,86],"flags":{},"order":9,"mode":0,"inputs":[{"localized_name":"Task 1","name":"Task 1","type":"RUNWARETASK","link":42},{"localized_name":"Task 2","name":"Task 2","shape":7,"type":"RUNWARETASK","link":75},{"localized_name":"Task 3","name":"Task 3","shape":7,"type":"RUNWARETASK","link":76},{"localized_name":"Task 4","name":"Task 4","shape":7,"type":"RUNWARETASK","link":77}],"outputs":[{"localized_name":"Result 1","name":"Result 1","type":"IMAGE","links":[43]},{"localized_name":"Result 2","name":"Result 2","type":"IMAGE","links":[50]},{"localized_name":"Result 3","name":"Result 3","type":"IMAGE","links":[51]},{"localized_name":"Result 4","name":"Result 4","type":"IMAGE","links":[78]}],"properties":{"Node name for S&R":"Runware Multi Inference"},"widgets_values":[],"bgcolor":"#5345bf"},{"id":24,"type":"PreviewImage","pos":[384.4295959472656,-428.31951904296875],"size":[338.6278991699219,350.4070129394531],"flags":{},"order":10,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":43}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":34,"type":"PreviewImage","pos":[784.937744140625,-427.1095886230469],"size":[338.6278991699219,354.0370178222656],"flags":{},"order":11,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":50}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":35,"type":"PreviewImage","pos":[385.6392822265625,-24.179574966430664],"size":[338.6278991699219,354.0370178222656],"flags":{},"order":12,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":51}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":53,"type":"PrimitiveNode","pos":[-308.8545837402344,135.56948852539062],"size":[291.07000732421875,88],"flags":{},"order":4,"mode":0,"inputs":[],"outputs":[{"name":"STRING","type":"STRING","widget":{"name":"positivePrompt"},"links":[71]}],"title":"positivePrompt","properties":{"Run widget replace on values":false},"widgets_values":["Closeup body, A hyper-stylistic,, extremely detailed image is a breathtaking painting created from a combination of smeared black and gold metallic colors. The silhouette of a young woman is captured in streaks as if they were moving rapidly in the same direction. The black paint is applied in bold, dynamic strokes, creating a sense of energy and movement that contrasts beautifully with the gold accents. Gold paint is used to highlight the female silhouette, adding a sense of depth and dimension to the image. The overall effect is an exciting, surreal dynamism, as if the viewer has captured a fleeting moment in time. Every detail of the image is meticulously detailed, from the subtle, swirling patterns of the gold paint to the complex, textural patterns of the black paint. The contrasts between light and dark, color and texture create a sense of tension and energy that draws the viewer in, inviting them to explore the kinetic world of the painting. the highest quality, intricate detail, visually stunning, masterpiece, black and gold street backdrop"]},{"id":3,"type":"Runware Model Search","pos":[-345.5522155761719,-487.4124450683594],"size":[339.04998779296875,154],"flags":{"collapsed":false},"order":3,"mode":0,"inputs":[{"localized_name":"Model Search","name":"Model Search","type":"STRING","widget":{"name":"Model Search"},"link":null},{"localized_name":"Model Architecture","name":"Model Architecture","type":"COMBO","widget":{"name":"Model Architecture"},"link":null},{"localized_name":"ModelType","name":"ModelType","type":"COMBO","widget":{"name":"ModelType"},"link":null},{"localized_name":"ModelList","name":"ModelList","type":"COMBO","widget":{"name":"ModelList"},"link":null},{"localized_name":"Use Search Value","name":"Use Search Value","type":"BOOLEAN","widget":{"name":"Use Search Value"},"link":null}],"outputs":[{"localized_name":"Runware Model","name":"Runware Model","type":"RUNWAREMODEL","links":[13,72,73,74]}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","runware:100@1 (Flux Schnell)",false],"bgcolor":"#5345bf"}],"links":[[13,3,0,14,0,"RUNWAREMODEL"],[41,29,0,14,11,"STRING"],[42,14,1,16,0,"RUNWARETASK"],[43,16,0,24,0,"IMAGE"],[50,16,1,34,0,"IMAGE"],[51,16,2,35,0,"IMAGE"],[69,49,0,48,11,"STRING"],[70,51,0,50,11,"STRING"],[71,53,0,52,11,"STRING"],[72,3,0,48,0,"RUNWAREMODEL"],[73,3,0,50,0,"RUNWAREMODEL"],[74,3,0,52,0,"RUNWAREMODEL"],[75,48,1,16,1,"RUNWARETASK"],[76,50,1,16,2,"RUNWARETASK"],[77,52,1,16,3,"RUNWARETASK"],[78,16,3,54,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":0.8264462809917354,"offset":[779.5444598246156,604.9504716404363]},"frontendVersion":"1.17.11"},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_Outpainting.json: -------------------------------------------------------------------------------- 1 | {"id":"64d5095f-01d6-48a2-b4b2-a936b07f976e","revision":0,"last_node_id":8,"last_link_id":4,"nodes":[{"id":1,"type":"LoadImage","pos":[-307.6441955566406,-225.01039123535156],"size":[334.1299743652344,422.8999938964844],"flags":{},"order":1,"mode":0,"inputs":[{"localized_name":"image","name":"image","type":"COMBO","widget":{"name":"image"},"link":null},{"localized_name":"choose file to upload","name":"upload","type":"IMAGEUPLOAD","widget":{"name":"upload"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[3]},{"localized_name":"MASK","name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["21sdfgas.jpg","image"]},{"id":2,"type":"Runware Image Inference","pos":[67.45526885986328,-502.1003112792969],"size":[400,702],"flags":{},"order":3,"mode":0,"inputs":[{"localized_name":"Model","name":"Model","type":"RUNWAREMODEL","link":1},{"localized_name":"Accelerator","name":"Accelerator","shape":7,"type":"RUNWAREACCELERATOR","link":null},{"localized_name":"Lora","name":"Lora","shape":7,"type":"RUNWARELORA","link":null},{"localized_name":"Outpainting","name":"Outpainting","shape":7,"type":"RUNWAREOUTPAINT","link":2},{"localized_name":"IPAdapters","name":"IPAdapters","shape":7,"type":"RUNWAREIPAdapter","link":null},{"localized_name":"ControlNet","name":"ControlNet","shape":7,"type":"RUNWARECONTROLNET","link":null},{"localized_name":"Refiner","name":"Refiner","shape":7,"type":"RUNWAREREFINER","link":null},{"localized_name":"seedImage","name":"seedImage","shape":7,"type":"IMAGE","link":3},{"localized_name":"maskImage","name":"maskImage","shape":7,"type":"MASK","link":null},{"localized_name":"Embeddings","name":"Embeddings","shape":7,"type":"RUNWAREEMBEDDING","link":null},{"localized_name":"VAE","name":"VAE","shape":7,"type":"RUNWAREVAE","link":null},{"localized_name":"positivePrompt","name":"positivePrompt","type":"STRING","widget":{"name":"positivePrompt"},"link":null},{"localized_name":"negativePrompt","name":"negativePrompt","type":"STRING","widget":{"name":"negativePrompt"},"link":null},{"localized_name":"Multi Inference Mode","name":"Multi Inference Mode","type":"BOOLEAN","widget":{"name":"Multi Inference Mode"},"link":null},{"localized_name":"Prompt Weighting","name":"Prompt Weighting","type":"COMBO","widget":{"name":"Prompt Weighting"},"link":null},{"localized_name":"dimensions","name":"dimensions","type":"COMBO","widget":{"name":"dimensions"},"link":null},{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"cfgScale","name":"cfgScale","type":"FLOAT","widget":{"name":"cfgScale"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"clipSkip","name":"clipSkip","type":"INT","widget":{"name":"clipSkip"},"link":null},{"localized_name":"strength","name":"strength","type":"FLOAT","widget":{"name":"strength"},"link":null},{"localized_name":"Mask Margin","name":"Mask Margin","type":"BOOLEAN","widget":{"name":"Mask Margin"},"link":null},{"localized_name":"maskMargin","name":"maskMargin","type":"INT","widget":{"name":"maskMargin"},"link":null},{"localized_name":"batchSize","name":"batchSize","type":"INT","widget":{"name":"batchSize"},"link":null}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[4]},{"localized_name":"RW-Task","name":"RW-Task","type":"RUNWARETASK","links":null}],"properties":{"Node name for S&R":"Runware Image Inference"},"widgets_values":["__blank__","",false,"Disabled","Custom",1536,1536,30,"Default",3,779910374698910,"randomize",0,0.8,false,32,1],"bgcolor":"#5345bf"},{"id":7,"type":"PreviewImage","pos":[500.6356201171875,-499.6804504394531],"size":[696.1900024414062,697.1900634765625],"flags":{},"order":4,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":4}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":4,"type":"Runware Outpaint","pos":[-299.12445068359375,-431.9206848144531],"size":[315,154],"flags":{},"order":2,"mode":0,"inputs":[{"localized_name":"Top","name":"Top","type":"INT","widget":{"name":"Top"},"link":null},{"localized_name":"Right","name":"Right","type":"INT","widget":{"name":"Right"},"link":null},{"localized_name":"Bottom","name":"Bottom","type":"INT","widget":{"name":"Bottom"},"link":null},{"localized_name":"Left","name":"Left","type":"INT","widget":{"name":"Left"},"link":null},{"localized_name":"Blur","name":"Blur","type":"INT","widget":{"name":"Blur"},"link":null}],"outputs":[{"localized_name":"Outpaint Settings","name":"Outpaint Settings","type":"RUNWAREOUTPAINT","links":[2]}],"properties":{"Node name for S&R":"Runware Outpaint"},"widgets_values":[256,256,256,256,0],"bgcolor":"#5345bf"},{"id":5,"type":"Runware Model Search","pos":[-289.864501953125,-477.9004211425781],"size":[342.6799621582031,154],"flags":{"collapsed":true},"order":0,"mode":0,"inputs":[{"localized_name":"Model Search","name":"Model Search","type":"STRING","widget":{"name":"Model Search"},"link":null},{"localized_name":"Model Architecture","name":"Model Architecture","type":"COMBO","widget":{"name":"Model Architecture"},"link":null},{"localized_name":"ModelType","name":"ModelType","type":"COMBO","widget":{"name":"ModelType"},"link":null},{"localized_name":"ModelList","name":"ModelList","type":"COMBO","widget":{"name":"ModelList"},"link":null},{"localized_name":"Use Search Value","name":"Use Search Value","type":"BOOLEAN","widget":{"name":"Use Search Value"},"link":null}],"outputs":[{"localized_name":"Runware Model","name":"Runware Model","type":"RUNWAREMODEL","links":[1]}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","FLUX.1-Dev","Inpainting Model","runware:102@1 (FLUX Dev Fill 1)",false],"bgcolor":"#5345bf"}],"links":[[1,5,0,2,0,"RUNWAREMODEL"],[2,4,0,2,3,"RUNWAREOUTPAINT"],[3,1,0,2,7,"IMAGE"],[4,2,0,7,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":1,"offset":[512.6044598246152,589.6904716404363]},"frontendVersion":"1.17.11"},"version":0.4} -------------------------------------------------------------------------------- /workflows/Runware_PhotoMaker_v2.json: -------------------------------------------------------------------------------- 1 | {"last_node_id":15,"last_link_id":19,"nodes":[{"id":10,"type":"LoadImage","pos":[-11.610055923461914,-225.45167541503906],"size":[324.9819641113281,419.80859375],"flags":{},"order":0,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[15],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["ggasdw.jpg","image"]},{"id":11,"type":"LoadImage","pos":[28.317718505859375,-268.37384033203125],"size":[315,314],"flags":{"collapsed":true},"order":1,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[17],"slot_index":0},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["qwd2qdasf.jpg","image"]},{"id":12,"type":"LoadImage","pos":[172.05775451660156,-269.3723449707031],"size":[315,314],"flags":{"collapsed":true},"order":2,"mode":0,"inputs":[],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[16]},{"name":"MASK","type":"MASK","links":null}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["qwd2s.jpg","image"]},{"id":13,"type":"Runware Model Search","pos":[108.17327117919922,-327.2672424316406],"size":[315,130],"flags":{"collapsed":true},"order":3,"mode":0,"inputs":[],"outputs":[{"name":"Runware Model","type":"RUNWAREMODEL","links":[18],"slot_index":0}],"properties":{"Node name for S&R":"Runware Model Search"},"widgets_values":["","All","Base Model","civitai:133005@288982 (SDXL Juggernaut XL V8)"],"bgcolor":"#6c5ce7"},{"id":7,"type":"PreviewImage","pos":[797.9251098632812,-290.334716796875],"size":[528.4240112304688,550.0520629882812],"flags":{},"order":5,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":19}],"outputs":[],"properties":{"Node name for S&R":"PreviewImage"},"widgets_values":[]},{"id":15,"type":"Runware PhotoMaker V2","pos":[343.7464599609375,-301.3145751953125],"size":[400,569.9061279296875],"flags":{},"order":4,"mode":0,"inputs":[{"name":"Model","type":"RUNWAREMODEL","link":18},{"name":"Image 1","type":"IMAGE","link":15},{"name":"Image 2","type":"IMAGE","link":16,"shape":7},{"name":"Image 3","type":"IMAGE","link":17,"shape":7},{"name":"Image 4","type":"IMAGE","link":null,"shape":7}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[19],"slot_index":0}],"properties":{"Node name for S&R":"Runware PhotoMaker V2"},"widgets_values":["img of a girl, solo, perfect face, close view, dark purple hair, spiked hair, ear piercing, spiked collar, black hoodie, winter, blush, shy smile, portrait, face focus, depth of field, looking at viewer, blushy white studio, by Makoto Shinkai, Stanley Artgerm Lau, WLOP, Rossdraws, James Jean, Andrei Riabovitchev, Marc Simonetti, krenz cushart, Sakimichan, D&D trending on ArtStation, digital art","nsfw, bad quality, deformed, out of frame, watermark, text","Disabled","Fantasy art","Square HD (1024x1024)",1024,1024,25,"Default",6.5,15,0,722752061398001,"randomize",1],"bgcolor":"#6c5ce7"}],"links":[[15,10,0,15,1,"IMAGE"],[16,12,0,15,2,"IMAGE"],[17,11,0,15,3,"IMAGE"],[18,13,0,15,0,"RUNWAREMODEL"],[19,15,0,7,0,"IMAGE"]],"groups":[],"config":{},"extra":{"ds":{"scale":1.001808834538306,"offset":[394.91668730768725,442.4964682151535]}},"version":0.4} --------------------------------------------------------------------------------