├── .dockerignore ├── .gitignore ├── .gitmodules ├── LICENSE ├── MAKING_A_MODEL_GUIDE.md ├── README.md ├── cog.yaml ├── cog_model_helpers ├── __init__.py ├── optimise_images.py └── seed.py ├── comfyui.py ├── custom_node_configs ├── comfy.settings.json └── was_suite_config.json ├── custom_node_helper.py ├── custom_node_helpers ├── ComfyUI_BRIA_AI_RMBG.py ├── ComfyUI_Controlnet_Aux.py ├── ComfyUI_Essentials.py ├── ComfyUI_IPAdapter_plus.py ├── ComfyUI_InstantID.py ├── ComfyUI_KJNodes.py ├── ComfyUI_LayerDiffuse.py ├── WAS_Node_Suite.py └── __init__.py ├── custom_nodes.json ├── feature-extractor └── preprocessor_config.json ├── node.py ├── predict.py ├── reset.json ├── safety_checker.py ├── scripts ├── get_weights.py ├── install_custom_nodes.py ├── prepare_template.py ├── reset.py └── start.sh ├── weights.json ├── weights_downloader.py ├── weights_manifest.py ├── workflow_api.json └── workflow_ui.json /.dockerignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | **/.git 3 | **/.github 4 | **/.ci 5 | 6 | # Outputs 7 | *.jpg 8 | *.png 9 | *.gif 10 | *.mp4 11 | *.zip 12 | *.tar 13 | *.webp 14 | *.webm 15 | 16 | # Models 17 | *.ckpt 18 | *.safetensors 19 | *.pth 20 | *.bin 21 | *.onnx 22 | *.torchscript 23 | # .pt files are used by efficiency-nodes-comfyui 24 | 25 | # Files 26 | scripts/* 27 | test/* 28 | updated_weights.json 29 | 30 | # Extension files 31 | *.ipynb 32 | *.bat 33 | 34 | # ComfyUI 35 | ComfyUI/venv 36 | ComfyUI/temp 37 | ComfyUI/user 38 | ComfyUI/models 39 | ComfyUI/custom_nodes/comfyUI_controlnet_aux/ckpts 40 | ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models 41 | ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora 42 | ComfyUI/custom_nodes/efficiency-nodes-comfyui/images 43 | 44 | # ComfyUI bits we just don’t need 45 | ComfyUI/tests 46 | ComfyUI/tests-ui 47 | ComfyUI/notebooks 48 | ComfyUI/script_examples 49 | ComfyUI/comfyui_screenshot.png 50 | 51 | # Safety checker 52 | safety-cache/ 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.png 3 | *.jpg 4 | *.gif 5 | *.mp4 6 | *.zip 7 | *.ckpt 8 | *.safetensors 9 | *.pth 10 | *.bin 11 | *.torchscript 12 | *.webp 13 | weights.txt 14 | manifest.txt 15 | updated_weights.json 16 | .cog 17 | safety-cache/ 18 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ComfyUI"] 2 | path = ComfyUI 3 | url = https://github.com/comfyanonymous/ComfyUI.git 4 | commit = eecd69b53a896343775bcb02a4f8349e7442ffd1 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 fofr 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MAKING_A_MODEL_GUIDE.md: -------------------------------------------------------------------------------- 1 | # Adapting this template for your model 2 | 3 | This guide will help you adapt the `cog-comfyui` template repository for your own model. 4 | 5 | If you haven’t used `cog` before or pushed a Replicate model, these are good starting guides: 6 | 7 | - https://cog.run/getting-started-own-model 8 | - https://replicate.com/docs/guides/push-a-model 9 | 10 | 11 | ## Create a new repo from the template 12 | 13 | Use https://github.com/fofr/cog-comfyui as a template to create a new repository 14 | 15 | ## Prepare the template 16 | 17 | After you have git cloned your new repository locally, including submodules, you should run the `prepare_template.py` script. 18 | 19 | This will: 20 | 21 | - delete unnecessary files and directories. 22 | - overwrite the `README.md` with a blank file and header. 23 | - replace `predict.py` with `example_predict.py` 24 | 25 | Run this script: 26 | 27 | ```sh 28 | python scripts/prepare_template.py 29 | ``` 30 | 31 | Check what has been deleted and replaced before committing: 32 | 33 | ```sh 34 | git status 35 | git diff 36 | ``` 37 | 38 | ## Add your workflow 39 | 40 | You should save the API version of your workflow as `workflow_api.json`. 41 | 42 | It also helps to keep a copy of the UI version too, as `workflow_ui.json`. 43 | 44 | ## Update the inputs to your model 45 | 46 | `predict.py` is the entrypoint to your model. You can read about `predict.py` and the inputs you can use in the [Cog documentation](https://cog.run/python/#predictorpredictkwargs). 47 | 48 | You'll end up with something like this: 49 | 50 | ```python 51 | def predict( 52 | self, 53 | prompt: str = Input( 54 | default="", 55 | ), 56 | negative_prompt: str = Input( 57 | description="Things you do not want to see in your image", 58 | default="", 59 | ), 60 | image: Path = Input( 61 | description="An input image", 62 | default=None, 63 | ), 64 | ... 65 | ) -> List[Path]: 66 | """Run a single prediction on the model""" 67 | ... 68 | ``` 69 | 70 | To make sure these inputs carry over to the workflow, you'll need to update the JSON object with new values. `example_predict.py` includes an example of this: 71 | 72 | Within the predict method: 73 | 74 | ```python 75 | self.update_workflow( 76 | workflow, 77 | prompt=prompt, 78 | negative_prompt=negative_prompt, 79 | seed=seed, 80 | ) 81 | ``` 82 | 83 | And in the `update_workflow` method (in the Predictor class): 84 | 85 | ```python 86 | def update_workflow(self, workflow, **kwargs): 87 | # The node 88 | positive_prompt = workflow["6"]["inputs"] 89 | 90 | # Updating one of the nodes inputs 91 | positive_prompt["text"] = kwargs["prompt"] 92 | 93 | negative_prompt = workflow["7"]["inputs"] 94 | negative_prompt["text"] = f"nsfw, {kwargs['negative_prompt']}" 95 | 96 | sampler = workflow["3"]["inputs"] 97 | sampler["seed"] = kwargs["seed"] 98 | ``` 99 | 100 | ## Remove any custom nodes you do not need 101 | 102 | To remove a custom node you should: 103 | 104 | - remove its corresponding entry from the `custom_nodes.json` file 105 | - optional: delete any corresponding helpers in `custom_node_helpers` 106 | - optional: delete any configs from `custom_nodes_configs` 107 | - optional: delete dependencies from `cog.yaml` 108 | 109 | If you've already installed the nodes, make sure to also remove it from the `ComfyUI/custom_nodes` directory. 110 | 111 | ## Add your own custom nodes 112 | 113 | The simplest way to add new nodes is to: 114 | 115 | - add a new entry to the `custom_nodes.json` file, with the repo URL and the commit hash you want to use (usually the latest) 116 | - add any dependencies from the custom node’s `requirements.txt` to the `cog.yaml` file (if they are not already there) 117 | - rerun `scripts/install_custom_nodes.py` to install the new nodes 118 | 119 | Some nodes will try to download weights on demand. You might want to avoid doing this from your Replicate model. The easiest fix is to make sure the downloaded weights are also pushed with your container. If you choose to do this, make sure to also update `.dockerignore`, which by default excludes weight from the container. 120 | 121 | ## Running the model locally 122 | 123 | You can run the model with defaults via cog: 124 | 125 | ```sh 126 | cog predict 127 | ``` 128 | 129 | Or if you want to pass inputs: 130 | 131 | ```sh 132 | cog predict -i prompt="something" -i image=@/path/to/image.jpg 133 | ``` 134 | 135 | ## Deploying your model to Replicate 136 | 137 | Create a new model from https://replicate.com/create 138 | 139 | Push your cog container to Replicate: 140 | 141 | ```sh 142 | cog push r8.im// 143 | ``` 144 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Consistent character 2 | 3 | Create images of a given character in different poses 4 | 5 | Run on Replicate: 6 | 7 | https://replicate.com/fofr/consistent-character 8 | 9 | There is a workflow that you can use directly in ComfyUI, but it will only produce one pose at a time: 10 | 11 | https://github.com/fofr/cog-consistent-character/blob/main/workflow_ui.json 12 | 13 | ## Developing locally 14 | 15 | Clone this repository: 16 | 17 | ```sh 18 | git clone --recurse-submodules https://github.com/fofr/cog-consistent-character.git 19 | ``` 20 | 21 | Run the [following script](https://github.com/fofr/cog-consistent-character/blob/main/scripts/install_custom_nodes.py) to install all the custom nodes: 22 | 23 | ```sh 24 | ./scripts/install_custom_nodes.py 25 | ``` 26 | 27 | You can view the list of nodes in [custom_nodes.json](https://github.com/fofr/cog-consistent-character/blob/main/custom_nodes.json) 28 | 29 | ### Running the Web UI from your Cog container 30 | 31 | 1. **GPU Machine**: Start the Cog container and expose port 8188: 32 | ```sh 33 | sudo cog run -p 8188 bash 34 | ``` 35 | Running this command starts up the Cog container and let's you access it 36 | 37 | 2. **Inside Cog Container**: Now that we have access to the Cog container, we start the server, binding to all network interfaces: 38 | ```sh 39 | cd ComfyUI/ 40 | python main.py --listen 0.0.0.0 41 | ``` 42 | 43 | 3. **Local Machine**: Access the server using the GPU machine's IP and the exposed port (8188): 44 | `http://:8188` 45 | 46 | When you goto `http://:8188` you'll see the classic ComfyUI web form! 47 | -------------------------------------------------------------------------------- /cog.yaml: -------------------------------------------------------------------------------- 1 | build: 2 | gpu: true 3 | cuda: "12.1" 4 | system_packages: 5 | - ffmpeg 6 | python_version: "3.10.6" 7 | python_packages: 8 | - torch 9 | - torchvision 10 | - torchaudio 11 | - torchsde 12 | - einops 13 | - transformers>=4.25.1 14 | - safetensors>=0.3.0 15 | - aiohttp 16 | - accelerate 17 | - pyyaml 18 | - Pillow 19 | - scipy 20 | - tqdm 21 | - psutil 22 | - kornia>=0.7.1 23 | - websocket-client==1.6.3 24 | 25 | # layerdiffuse 26 | - diffusers>=0.25.0 27 | 28 | # fix for pydantic issues in cog 29 | # https://github.com/replicate/cog/issues/1623 30 | - albumentations==1.4.3 31 | 32 | # was-node-suite-comfyui 33 | # https://github.com/WASasquatch/was-node-suite-comfyui/blob/main/requirements.txt 34 | - cmake 35 | - imageio 36 | - joblib 37 | - matplotlib 38 | - pilgram 39 | - scikit-learn 40 | - rembg 41 | 42 | # ComfyUI_essentials 43 | - numba 44 | 45 | # ComfyUI_FizzNodes 46 | - pandas 47 | - numexpr 48 | 49 | # comfyui-reactor-node 50 | - insightface 51 | - onnx 52 | 53 | # ComfyUI-Impact-Pack 54 | - segment-anything 55 | - piexif 56 | 57 | # ComfyUI-Impact-Subpack 58 | - ultralytics!=8.0.177 59 | 60 | # comfyui_segment_anything 61 | - timm 62 | 63 | # comfyui_controlnet_aux 64 | # https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/requirements.txt 65 | - importlib_metadata 66 | - opencv-python-headless>=4.0.1.24 67 | - filelock 68 | - numpy 69 | - einops 70 | - pyyaml 71 | - scikit-image 72 | - python-dateutil 73 | - mediapipe 74 | - svglib 75 | - fvcore 76 | - yapf 77 | - omegaconf 78 | - ftfy 79 | - addict 80 | - yacs 81 | - trimesh[easy] 82 | 83 | # ComfyUI-KJNodes 84 | - librosa 85 | - color-matcher 86 | 87 | # PuLID 88 | - facexlib 89 | run: 90 | - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.1/pget_linux_x86_64" && chmod +x /usr/local/bin/pget 91 | - pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/ 92 | predict: "predict.py:Predictor" 93 | -------------------------------------------------------------------------------- /cog_model_helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fofr/cog-consistent-character/25254c13bf1c5873ba85b20f3a22701390893640/cog_model_helpers/__init__.py -------------------------------------------------------------------------------- /cog_model_helpers/optimise_images.py: -------------------------------------------------------------------------------- 1 | from cog import Input 2 | from PIL import Image 3 | 4 | IMAGE_FILE_EXTENSIONS = [".jpg", ".jpeg", ".png"] 5 | FORMAT_CHOICES = ["webp", "jpg", "png"] 6 | DEFAULT_FORMAT = "webp" 7 | DEFAULT_QUALITY = 80 8 | 9 | 10 | def predict_output_format() -> str: 11 | return Input( 12 | description="Format of the output images", 13 | choices=FORMAT_CHOICES, 14 | default=DEFAULT_FORMAT, 15 | ) 16 | 17 | 18 | def predict_output_quality() -> int: 19 | return Input( 20 | description="Quality of the output images, from 0 to 100. 100 is best quality, 0 is lowest quality.", 21 | default=DEFAULT_QUALITY, 22 | ge=0, 23 | le=100, 24 | ) 25 | 26 | 27 | def should_optimise_images(output_format: str, output_quality: int): 28 | return output_quality < 100 or output_format in [ 29 | "webp", 30 | "jpg", 31 | ] 32 | 33 | 34 | def optimise_image_files( 35 | output_format: str = DEFAULT_FORMAT, output_quality: int = DEFAULT_QUALITY, files=[] 36 | ): 37 | if should_optimise_images(output_format, output_quality): 38 | optimised_files = [] 39 | for file in files: 40 | if file.is_file() and file.suffix in IMAGE_FILE_EXTENSIONS: 41 | image = Image.open(file) 42 | optimised_file_path = file.with_suffix(f".{output_format}") 43 | image.save( 44 | optimised_file_path, 45 | quality=output_quality, 46 | optimize=True, 47 | ) 48 | optimised_files.append(optimised_file_path) 49 | else: 50 | optimised_files.append(file) 51 | 52 | return optimised_files 53 | else: 54 | return files 55 | -------------------------------------------------------------------------------- /cog_model_helpers/seed.py: -------------------------------------------------------------------------------- 1 | from cog import Input 2 | import random 3 | 4 | 5 | def predict_seed() -> int: 6 | return Input( 7 | description="Set a seed for reproducibility. Random by default.", 8 | default=None, 9 | ) 10 | 11 | 12 | def generate(seed: int) -> int: 13 | if seed is None or seed == 0 or seed == -1: 14 | seed = random.randint(0, 2**32 - 1) 15 | print(f"Random seed set to: {seed}") 16 | else: 17 | print(f"Seed set to: {seed}") 18 | return seed 19 | -------------------------------------------------------------------------------- /comfyui.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib.request 3 | import subprocess 4 | import threading 5 | import time 6 | import json 7 | import urllib 8 | import uuid 9 | import websocket 10 | import random 11 | import requests 12 | import shutil 13 | import custom_node_helpers as helpers 14 | from cog import Path 15 | from node import Node 16 | from weights_downloader import WeightsDownloader 17 | from urllib.error import URLError 18 | 19 | 20 | class ComfyUI: 21 | def __init__(self, server_address): 22 | self.weights_downloader = WeightsDownloader() 23 | self.server_address = server_address 24 | 25 | def start_server(self, output_directory, input_directory): 26 | self.input_directory = input_directory 27 | self.output_directory = output_directory 28 | self.apply_helper_methods("prepare", weights_downloader=self.weights_downloader) 29 | 30 | start_time = time.time() 31 | server_thread = threading.Thread( 32 | target=self.run_server, args=(output_directory, input_directory) 33 | ) 34 | server_thread.start() 35 | while not self.is_server_running(): 36 | if time.time() - start_time > 60: 37 | raise TimeoutError("Server did not start within 60 seconds") 38 | time.sleep(0.5) 39 | 40 | elapsed_time = time.time() - start_time 41 | print(f"Server started in {elapsed_time:.2f} seconds") 42 | 43 | def run_server(self, output_directory, input_directory): 44 | command = f"python ./ComfyUI/main.py --output-directory {output_directory} --input-directory {input_directory} --disable-metadata --highvram" 45 | server_process = subprocess.Popen(command, shell=True) 46 | server_process.wait() 47 | 48 | def is_server_running(self): 49 | try: 50 | with urllib.request.urlopen( 51 | "http://{}/history/{}".format(self.server_address, "123") 52 | ) as response: 53 | return response.status == 200 54 | except URLError: 55 | return False 56 | 57 | def apply_helper_methods(self, method_name, *args, **kwargs): 58 | # Dynamically applies a method from helpers module with given args. 59 | # Example usage: self.apply_helper_methods("add_weights", weights_to_download, node) 60 | for module_name in dir(helpers): 61 | module = getattr(helpers, module_name) 62 | method = getattr(module, method_name, None) 63 | if callable(method): 64 | method(*args, **kwargs) 65 | 66 | def handle_weights(self, workflow, weights_to_download=[]): 67 | print("Checking weights") 68 | embeddings = self.weights_downloader.get_weights_by_type("EMBEDDINGS") 69 | embedding_to_fullname = {emb.split(".")[0]: emb for emb in embeddings} 70 | weights_filetypes = self.weights_downloader.supported_filetypes 71 | 72 | for node in workflow.values(): 73 | self.apply_helper_methods("add_weights", weights_to_download, Node(node)) 74 | 75 | for input in node["inputs"].values(): 76 | if isinstance(input, str): 77 | if any(key in input for key in embedding_to_fullname): 78 | weights_to_download.extend( 79 | embedding_to_fullname[key] 80 | for key in embedding_to_fullname 81 | if key in input 82 | ) 83 | elif any(input.endswith(ft) for ft in weights_filetypes): 84 | weights_to_download.append(input) 85 | 86 | weights_to_download = list(set(weights_to_download)) 87 | 88 | for weight in weights_to_download: 89 | self.weights_downloader.download_weights(weight) 90 | print(f"✅ {weight}") 91 | 92 | print("====================================") 93 | 94 | def is_image_or_video_value(self, value): 95 | filetypes = [".png", ".jpg", ".jpeg", ".webp", ".mp4", ".webm"] 96 | return isinstance(value, str) and any( 97 | value.lower().endswith(ft) for ft in filetypes 98 | ) 99 | 100 | def handle_known_unsupported_nodes(self, workflow): 101 | for node in workflow.values(): 102 | self.apply_helper_methods("check_for_unsupported_nodes", Node(node)) 103 | 104 | def handle_inputs(self, workflow): 105 | print("Checking inputs") 106 | seen_inputs = set() 107 | for node in workflow.values(): 108 | if "inputs" in node: 109 | for input_key, input_value in node["inputs"].items(): 110 | if isinstance(input_value, str) and input_value not in seen_inputs: 111 | seen_inputs.add(input_value) 112 | if input_value.startswith(("http://", "https://")): 113 | filename = os.path.join( 114 | self.input_directory, os.path.basename(input_value) 115 | ) 116 | if not os.path.exists(filename): 117 | print(f"Downloading {input_value} to {filename}") 118 | try: 119 | response = requests.get(input_value) 120 | response.raise_for_status() 121 | with open(filename, "wb") as file: 122 | file.write(response.content) 123 | node["inputs"][input_key] = filename 124 | print(f"✅ {filename}") 125 | except requests.exceptions.RequestException as e: 126 | print(f"❌ Error downloading {input_value}: {e}") 127 | 128 | elif self.is_image_or_video_value(input_value): 129 | filename = os.path.join( 130 | self.input_directory, os.path.basename(input_value) 131 | ) 132 | if not os.path.exists(filename): 133 | print(f"❌ {filename} not provided") 134 | else: 135 | print(f"✅ {filename}") 136 | 137 | print("====================================") 138 | 139 | def connect(self): 140 | self.client_id = str(uuid.uuid4()) 141 | self.ws = websocket.WebSocket() 142 | self.ws.connect(f"ws://{self.server_address}/ws?clientId={self.client_id}") 143 | 144 | def post_request(self, endpoint, data=None): 145 | url = f"http://{self.server_address}{endpoint}" 146 | headers = {"Content-Type": "application/json"} if data else {} 147 | json_data = json.dumps(data).encode("utf-8") if data else None 148 | req = urllib.request.Request( 149 | url, data=json_data, headers=headers, method="POST" 150 | ) 151 | with urllib.request.urlopen(req) as response: 152 | if response.status != 200: 153 | print(f"Failed: {endpoint}, status code: {response.status}") 154 | 155 | # https://github.com/comfyanonymous/ComfyUI/blob/master/server.py 156 | def clear_queue(self): 157 | self.post_request("/queue", {"clear": True}) 158 | self.post_request("/interrupt") 159 | 160 | def queue_prompt(self, prompt): 161 | try: 162 | # Prompt is the loaded workflow (prompt is the label comfyUI uses) 163 | p = {"prompt": prompt, "client_id": self.client_id} 164 | data = json.dumps(p).encode("utf-8") 165 | req = urllib.request.Request( 166 | f"http://{self.server_address}/prompt?{self.client_id}", data=data 167 | ) 168 | 169 | output = json.loads(urllib.request.urlopen(req).read()) 170 | return output["prompt_id"] 171 | except urllib.error.HTTPError as e: 172 | print(f"ComfyUI error: {e.code} {e.reason}") 173 | http_error = True 174 | 175 | if http_error: 176 | raise Exception( 177 | "ComfyUI Error – Your workflow could not be run. This usually happens if you’re trying to use an unsupported node. Check the logs for 'KeyError: ' details, and go to https://github.com/fofr/cog-comfyui to see the list of supported custom nodes." 178 | ) 179 | 180 | def wait_for_prompt_completion(self, workflow, prompt_id): 181 | while True: 182 | out = self.ws.recv() 183 | if isinstance(out, str): 184 | message = json.loads(out) 185 | if message["type"] == "executing": 186 | data = message["data"] 187 | if data["node"] is None and data["prompt_id"] == prompt_id: 188 | break 189 | elif data["prompt_id"] == prompt_id: 190 | node = workflow.get(data["node"], {}) 191 | meta = node.get("_meta", {}) 192 | class_type = node.get("class_type", "Unknown") 193 | print( 194 | f"Executing node {data['node']}, title: {meta.get('title', 'Unknown')}, class type: {class_type}" 195 | ) 196 | else: 197 | continue 198 | 199 | def load_workflow(self, workflow): 200 | if not isinstance(workflow, dict): 201 | wf = json.loads(workflow) 202 | else: 203 | wf = workflow 204 | 205 | # There are two types of ComfyUI JSON 206 | # We need the API version 207 | if any(key in wf.keys() for key in ["last_node_id", "last_link_id", "version"]): 208 | raise ValueError( 209 | "You need to use the API JSON version of a ComfyUI workflow. To do this go to your ComfyUI settings and turn on 'Enable Dev mode Options'. Then you can save your ComfyUI workflow via the 'Save (API Format)' button." 210 | ) 211 | 212 | self.handle_known_unsupported_nodes(wf) 213 | self.handle_inputs(wf) 214 | self.handle_weights(wf) 215 | return wf 216 | 217 | def reset_execution_cache(self): 218 | print("Resetting execution cache") 219 | with open("reset.json", "r") as file: 220 | reset_workflow = json.loads(file.read()) 221 | self.queue_prompt(reset_workflow) 222 | 223 | def randomise_input_seed(self, input_key, inputs): 224 | if input_key in inputs and isinstance(inputs[input_key], (int, float)): 225 | new_seed = random.randint(0, 2**32 - 1) 226 | print(f"Randomising {input_key} to {new_seed}") 227 | inputs[input_key] = new_seed 228 | 229 | def randomise_seeds(self, workflow): 230 | for node_id, node in workflow.items(): 231 | inputs = node.get("inputs", {}) 232 | seed_keys = ["seed", "noise_seed", "rand_seed"] 233 | for seed_key in seed_keys: 234 | self.randomise_input_seed(seed_key, inputs) 235 | 236 | def run_workflow(self, workflow): 237 | print("Running workflow") 238 | prompt_id = self.queue_prompt(workflow) 239 | self.wait_for_prompt_completion(workflow, prompt_id) 240 | output_json = self.get_history(prompt_id) 241 | print("outputs: ", output_json) 242 | print("====================================") 243 | 244 | def get_history(self, prompt_id): 245 | with urllib.request.urlopen( 246 | f"http://{self.server_address}/history/{prompt_id}" 247 | ) as response: 248 | output = json.loads(response.read()) 249 | return output[prompt_id]["outputs"] 250 | 251 | def get_files(self, directories, prefix=""): 252 | files = [] 253 | if isinstance(directories, str): 254 | directories = [directories] 255 | 256 | for directory in directories: 257 | for f in os.listdir(directory): 258 | if f == "__MACOSX": 259 | continue 260 | path = os.path.join(directory, f) 261 | if os.path.isfile(path): 262 | print(f"{prefix}{f}") 263 | files.append(Path(path)) 264 | elif os.path.isdir(path): 265 | print(f"{prefix}{f}/") 266 | files.extend(self.get_files(path, prefix=f"{prefix}{f}/")) 267 | 268 | return sorted(files) 269 | 270 | def cleanup(self, directories): 271 | self.clear_queue() 272 | for directory in directories: 273 | if directory == "/tmp/inputs": 274 | subject_path = os.path.join(directory, "subject.png") 275 | if os.path.exists(subject_path): 276 | os.remove(subject_path) 277 | else: 278 | if os.path.exists(directory): 279 | shutil.rmtree(directory) 280 | os.makedirs(directory) 281 | -------------------------------------------------------------------------------- /custom_node_configs/comfy.settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "pysssss.ShowImageOnMenu": false, 3 | "Comfy.DevMode": true 4 | } -------------------------------------------------------------------------------- /custom_node_configs/was_suite_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_requirements": true, 3 | "suppress_uncomfy_warnings": false, 4 | "show_startup_junk": true, 5 | "show_inspiration_quote": false, 6 | "text_nodes_type": "STRING", 7 | "webui_styles": null, 8 | "webui_styles_persistent_update": true, 9 | "blip_model_url": "", 10 | "blip_model_vqa_url": "", 11 | "sam_model_vith_url": "", 12 | "sam_model_vitl_url": "", 13 | "sam_model_vitb_url": "", 14 | "history_display_limit": 1, 15 | "use_legacy_ascii_text": false, 16 | "ffmpeg_bin_path": "/usr/bin/ffmpeg", 17 | "ffmpeg_extra_codecs": { 18 | "avc1": ".mp4", 19 | "h264": ".mkv" 20 | }, 21 | "wildcards_path": "/src/ComfyUI/custom_nodes/was-node-suite-comfyui/wildcards", 22 | "wildcard_api": true 23 | } 24 | -------------------------------------------------------------------------------- /custom_node_helper.py: -------------------------------------------------------------------------------- 1 | class CustomNodeHelper: 2 | # This class provides helper methods for managing node weights and checking unsupported nodes. 3 | 4 | @staticmethod 5 | def prepare(**kwargs): 6 | # Placeholder method to prepare a custom node before ComfyUI starts 7 | pass 8 | 9 | @staticmethod 10 | def weights_map(base_url): 11 | # Placeholder method for mapping weights based on a base URL. 12 | return {} 13 | 14 | @staticmethod 15 | def add_weights(weights_to_download, node): 16 | # Placeholder method to add weights to download list based on node specifications. 17 | pass 18 | 19 | @staticmethod 20 | def check_for_unsupported_nodes(node): 21 | # Placeholder method to check if a node is not supported. 22 | pass 23 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_BRIA_AI_RMBG.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | MODELS = [ 4 | "RMBG-1.4/model.pth", 5 | ] 6 | 7 | class ComfyUI_BRIA_AI_RMBG(CustomNodeHelper): 8 | @staticmethod 9 | def models(): 10 | return MODELS 11 | 12 | @staticmethod 13 | def add_weights(weights_to_download, node): 14 | if node.is_type("BRIA_RMBG_ModelLoader_Zho"): 15 | weights_to_download.extend(MODELS) 16 | 17 | @staticmethod 18 | def weights_map(base_url): 19 | return { 20 | model: { 21 | "url": f"{base_url}/custom_nodes/ComfyUI-BRIA_AI-RMBG/{model}.tar", 22 | "dest": "ComfyUI/custom_nodes/ComfyUI-BRIA_AI-RMBG/", 23 | } 24 | for model in MODELS 25 | } 26 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_Controlnet_Aux.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | from weights_manifest import WeightsManifest 3 | 4 | MODELS = { 5 | "UNet.pth": "bdsqlsz/qinglong_controlnet-lllite/Annotators", 6 | "mobile_sam.pt": "dhkim2810/MobileSAM", 7 | "hrnetv2_w64_imagenet_pretrained.pth": "hr16/ControlNet-HandRefiner-pruned", 8 | "graphormer_hand_state_dict.bin": "hr16/ControlNet-HandRefiner-pruned", 9 | "rtmpose-m_ap10k_256_bs5.torchscript.pt": "hr16/DWPose-TorchScript-BatchSize5", 10 | "dw-ll_ucoco_384_bs5.torchscript.pt": "hr16/DWPose-TorchScript-BatchSize5", 11 | "rtmpose-m_ap10k_256.onnx": "hr16/UnJIT-DWPose", 12 | "yolo_nas_s_fp16.onnx": "hr16/yolo-nas-fp16", 13 | "yolo_nas_m_fp16.onnx": "hr16/yolo-nas-fp16", 14 | "yolox_l.torchscript.pt": "hr16/yolox-onnx", 15 | "densepose_r101_fpn_dl.torchscript": "LayerNorm/DensePose-TorchScript-with-hint-image", 16 | "densepose_r50_fpn_dl.torchscript": "LayerNorm/DensePose-TorchScript-with-hint-image", 17 | "mlsd_large_512_fp32.pth": "lllyasviel/Annotators", 18 | "150_16_swin_l_oneformer_coco_100ep.pth": "lllyasviel/Annotators", 19 | "ControlNetHED.pth": "lllyasviel/Annotators", 20 | "ZoeD_M12_N.pt": "lllyasviel/Annotators", 21 | "scannet.pt": "lllyasviel/Annotators", 22 | "hand_pose_model.pth": "lllyasviel/Annotators", 23 | "upernet_global_small.pth": "lllyasviel/Annotators", 24 | "latest_net_G.pth": "lllyasviel/Annotators", 25 | "netG.pth": "lllyasviel/Annotators", 26 | "sk_model2.pth": "lllyasviel/Annotators", 27 | "dpt_hybrid-midas-501f0c75.pt": "lllyasviel/Annotators", 28 | "table5_pidinet.pth": "lllyasviel/Annotators", 29 | "erika.pth": "lllyasviel/Annotators", 30 | "250_16_swin_l_oneformer_ade20k_160k.pth": "lllyasviel/Annotators", 31 | "sk_model.pth": "lllyasviel/Annotators", 32 | "body_pose_model.pth": "lllyasviel/Annotators", 33 | "res101.pth": "lllyasviel/Annotators", 34 | "facenet.pth": "lllyasviel/Annotators", 35 | "isnetis.ckpt": "skytnt/anime-seg", 36 | "yolox_l.onnx": "yzd-v/DWPose", 37 | "dw-ll_ucoco_384.onnx": "yzd-v/DWPose", 38 | "7_model.pth": "bdsqlsz/qinglong_controlnet-lllite/Annotators", 39 | "gmflow-scale1-mixdata.pth": "hr16/Unimatch", 40 | "gmflow-scale2-mixdata.pth": "hr16/Unimatch", 41 | "gmflow-scale2-regrefine6-mixdata.pth": "hr16/Unimatch", 42 | "depth_anything_vitl14.pth": "LiheYoung/Depth-Anything/checkpoints", 43 | "depth_anything_vitb14.pth": "LiheYoung/Depth-Anything/checkpoints", 44 | "depth_anything_vits14.pth": "LiheYoung/Depth-Anything/checkpoints", 45 | "diffusion_edge_indoor.pt": "hr16/Diffusion-Edge", 46 | "diffusion_edge_natrual.pt": "hr16/Diffusion-Edge", # (model has a typo) 47 | "diffusion_edge_urban.pt": "hr16/Diffusion-Edge", 48 | "dsine.pt": "hr16/Diffusion-Edge", 49 | "swin_b-68c6b09e.pth": "torch", 50 | "vgg16-397923af.pth": "torch", 51 | "depth_anything_metric_depth_indoor.pt": "LiheYoung/Depth-Anything/checkpoints_metric_depth", 52 | "depth_anything_metric_depth_outdoor.pt": "LiheYoung/Depth-Anything/checkpoints_metric_depth", 53 | "MTEED.pth": "TheMistoAI/MistoLine/Anyline", 54 | "metric_depth_vit_small_800k.pth": "JUGGHM/Metric3D", 55 | "metric_depth_vit_large_800k.pth": "JUGGHM/Metric3D", 56 | "metric_depth_vit_giant2_800k.pth": "JUGGHM/Metric3D", 57 | } 58 | 59 | 60 | class ComfyUI_Controlnet_Aux(CustomNodeHelper): 61 | @staticmethod 62 | def prepare(**kwargs): 63 | kwargs["weights_downloader"].download_if_not_exists( 64 | "mobilenet_v2-b0353104.pth", 65 | f"{WeightsManifest.base_url()}/custom_nodes/comfyui_controlnet_aux/mobilenet_v2-b0353104.pth.tar", 66 | "/root/.cache/torch/hub/checkpoints/", 67 | ) 68 | 69 | @staticmethod 70 | def models(): 71 | return MODELS 72 | 73 | @staticmethod 74 | def weights_map(base_url): 75 | return { 76 | key: { 77 | "url": f"{base_url}/custom_nodes/comfyui_controlnet_aux/{key}.tar", 78 | "dest": f"ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts/{MODELS[key]}", 79 | } 80 | for key in MODELS 81 | } 82 | 83 | # Controlnet preprocessor models are not included in the API JSON 84 | # We need to add them manually based on the nodes being used to 85 | # avoid them being downloaded automatically from elsewhere 86 | @staticmethod 87 | def node_class_mapping(): 88 | return { 89 | # Depth 90 | "MiDaS-NormalMapPreprocessor": "dpt_hybrid-midas-501f0c75.pt", 91 | "MiDaS-DepthMapPreprocessor": "dpt_hybrid-midas-501f0c75.pt", 92 | "Zoe-DepthMapPreprocessor": "ZoeD_M12_N.pt", 93 | "LeReS-DepthMapPreprocessor": ["res101.pth", "latest_net_G.pth"], 94 | "MeshGraphormer-DepthMapPreprocessor": [ 95 | "hrnetv2_w64_imagenet_pretrained.pth", 96 | "graphormer_hand_state_dict.bin", 97 | ], 98 | "DepthAnythingPreprocessor": [ 99 | "depth_anything_vitl14.pth", 100 | "depth_anything_vitb14.pth", 101 | "depth_anything_vits14.pth", 102 | ], 103 | "Zoe_DepthAnythingPreprocessor": [ 104 | "depth_anything_metric_depth_indoor.pt", 105 | "depth_anything_metric_depth_outdoor.pt", 106 | ], 107 | "Metric3DPreprocessor": [ 108 | "metric_depth_vit_small_800k.pth", 109 | "metric_depth_vit_large_800k.pth", 110 | "metric_depth_vit_giant2_800k.pth", 111 | ], 112 | "Metric3D-NormalMapPreprocessor": [ 113 | "metric_depth_vit_small_800k.pth", 114 | "metric_depth_vit_large_800k.pth", 115 | "metric_depth_vit_giant2_800k.pth", 116 | ], 117 | # Segmentation 118 | "BAE-NormalMapPreprocessor": "scannet.pt", 119 | "OneFormer-COCO-SemSegPreprocessor": "150_16_swin_l_oneformer_coco_100ep.pth", 120 | "OneFormer-ADE20K-SemSegPreprocessor": "250_16_swin_l_oneformer_ade20k_160k.pth", 121 | "UniFormer-SemSegPreprocessor": "upernet_global_small.pth", 122 | "SemSegPreprocessor": "upernet_global_small.pth", 123 | "AnimeFace_SemSegPreprocessor": ["UNet.pth", "isnetis.ckpt"], 124 | "SAMPreprocessor": "mobile_sam.pt", 125 | "DSINE-NormalMapPreprocessor": "dsine.pt", 126 | # Line extractors 127 | "AnimeLineArtPreprocessor": "netG.pth", 128 | "HEDPreprocessor": "ControlNetHED.pth", 129 | "FakeScribblePreprocessor": "ControlNetHED.pth", 130 | "M-LSDPreprocessor": "mlsd_large_512_fp32.pth", 131 | "PiDiNetPreprocessor": "table5_pidinet.pth", 132 | "LineArtPreprocessor": ["sk_model.pth", "sk_model2.pth"], 133 | "Manga2Anime_LineArt_Preprocessor": "erika.pth", 134 | "TEEDPreprocessor": "7_model.pth", 135 | "DiffusionEdge_Preprocessor": [ 136 | "diffusion_edge_indoor.pt", 137 | "diffusion_edge_natrual.pt", # model has a typo 138 | "diffusion_edge_urban.pt", 139 | "vgg16-397923af.pth", 140 | "swin_b-68c6b09e.pth", 141 | ], 142 | "AnyLineArtPreprocessor_aux": [ 143 | "MTEED.pth", 144 | "erika.pth", 145 | "netG.pth", 146 | "sk_model2.pth", 147 | ], 148 | # Pose 149 | "OpenposePreprocessor": [ 150 | "body_pose_model.pth", 151 | "hand_pose_model.pth", 152 | "facenet.pth", 153 | ], 154 | # Optical flow 155 | "Unimatch_OptFlowPreprocessor": [ 156 | "gmflow-scale1-mixdata.pth", 157 | "gmflow-scale2-mixdata.pth", 158 | "gmflow-scale2-regrefine6-mixdata.pth", 159 | ], 160 | } 161 | 162 | @staticmethod 163 | def add_weights(weights_to_download, node): 164 | node_mapping = ComfyUI_Controlnet_Aux.node_class_mapping() 165 | 166 | if node.is_type_in(node_mapping.keys()): 167 | class_weights = node_mapping[node.type()] 168 | weights_to_download.extend( 169 | class_weights if isinstance(class_weights, list) else [class_weights] 170 | ) 171 | 172 | # Additional check for AIO_Preprocessor and its preprocessor input value 173 | if node.is_type("AIO_Preprocessor"): 174 | preprocessor = node.input("preprocessor") 175 | if preprocessor in node_mapping: 176 | preprocessor_weights = node_mapping[preprocessor] 177 | weights_to_download.extend( 178 | preprocessor_weights 179 | if isinstance(preprocessor_weights, list) 180 | else [preprocessor_weights] 181 | ) 182 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_Essentials.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | 4 | class ComfyUI_Essentials(CustomNodeHelper): 5 | @staticmethod 6 | def add_weights(weights_to_download, node): 7 | if node.is_type("LoadCLIPSegModels"): 8 | weights_to_download.extend(["models--CIDAS--clipseg-rd64-refined"]) 9 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_IPAdapter_plus.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from custom_node_helper import CustomNodeHelper 4 | 5 | # List of presets 6 | PRESETS = [ 7 | # IPAdapterUnifiedLoader 8 | "LIGHT - SD1.5 only (low strength)", 9 | "STANDARD (medium strength)", 10 | "VIT-G (medium strength)", 11 | "PLUS (high strength)", 12 | "PLUS FACE (portraits)", 13 | "FULL FACE - SD1.5 only (portraits stronger)", 14 | # IPAdapterUnifiedLoaderFaceID 15 | "FACEID", 16 | "FACEID PLUS - SD1.5 only", 17 | "FACEID PLUS V2", 18 | "FACEID PORTRAIT (style transfer)", 19 | "FACEID PORTRAIT UNNORM - SDXL only (strong)", 20 | # IPAdapterUnifiedLoaderCommunity 21 | "Composition", 22 | ] 23 | 24 | 25 | class ComfyUI_IPAdapter_plus(CustomNodeHelper): 26 | @staticmethod 27 | def prepare(**kwargs): 28 | # create the ipadapter folder in ComfyUI/models/ipadapter 29 | # if it doesn't exist at setup time then the plugin defers to the base directory 30 | # and won't look for our ipadaters that are downloaded on demand 31 | if not os.path.exists("ComfyUI/models/ipadapter"): 32 | os.makedirs("ComfyUI/models/ipadapter") 33 | 34 | @staticmethod 35 | def get_preset_weights(preset): 36 | is_insightface = False 37 | weights_to_add = [] 38 | 39 | # clipvision 40 | if preset.startswith("VIT-G"): 41 | weights_to_add.append("CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors") 42 | else: 43 | weights_to_add.append("CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors") 44 | 45 | # ipadapters 46 | if preset.startswith("LIGHT"): 47 | weights_to_add.append("ip-adapter_sd15_light_v11.bin") 48 | 49 | if preset.startswith("STANDARD"): 50 | weights_to_add.extend( 51 | ["ip-adapter_sd15.safetensors", "ip-adapter_sdxl_vit-h.safetensors"] 52 | ) 53 | 54 | if preset.startswith("VIT-G"): 55 | weights_to_add.extend( 56 | ["ip-adapter_sd15_vit-G.safetensors", "ip-adapter_sdxl.safetensors"] 57 | ) 58 | 59 | if preset.startswith("PLUS ("): 60 | weights_to_add.extend( 61 | [ 62 | "ip-adapter-plus_sd15.safetensors", 63 | "ip-adapter-plus_sdxl_vit-h.safetensors", 64 | ] 65 | ) 66 | 67 | if preset.startswith("PLUS FACE"): 68 | weights_to_add.extend( 69 | [ 70 | "ip-adapter-plus-face_sd15.safetensors", 71 | "ip-adapter-plus-face_sdxl_vit-h.safetensors", 72 | ] 73 | ) 74 | 75 | if preset.startswith("FULL FACE"): 76 | weights_to_add.append("ip-adapter-full-face_sd15.safetensors") 77 | 78 | if preset == "FACEID": 79 | is_insightface = True 80 | weights_to_add.extend( 81 | [ 82 | "ip-adapter-faceid_sd15.bin", 83 | "ip-adapter-faceid_sdxl.bin", 84 | "ip-adapter-faceid_sd15_lora.safetensors", 85 | "ip-adapter-faceid_sdxl_lora.safetensors", 86 | ] 87 | ) 88 | 89 | if preset.startswith("FACEID PORTRAIT UNNORM"): 90 | is_insightface = True 91 | weights_to_add.extend( 92 | [ 93 | "ip-adapter-faceid-portrait-unnorm_sdxl.bin", 94 | ] 95 | ) 96 | 97 | if preset.startswith("FACEID PORTRAIT ("): 98 | is_insightface = True 99 | weights_to_add.extend( 100 | [ 101 | "ip-adapter-faceid-portrait-v11_sd15.bin", 102 | "ip-adapter-faceid-portrait_sdxl.bin", 103 | ] 104 | ) 105 | 106 | if preset.startswith("FACEID PLUS - "): 107 | is_insightface = True 108 | weights_to_add.extend( 109 | [ 110 | "ip-adapter-faceid-plus_sd15.bin", 111 | "ip-adapter-faceid-plus_sd15_lora.safetensors", 112 | ] 113 | ) 114 | 115 | if preset.startswith("FACEID PLUS V2"): 116 | is_insightface = True 117 | weights_to_add.extend( 118 | [ 119 | "ip-adapter-faceid-plusv2_sd15.bin", 120 | "ip-adapter-faceid-plusv2_sdxl.bin", 121 | "ip-adapter-faceid-plusv2_sd15_lora.safetensors", 122 | "ip-adapter-faceid-plusv2_sdxl_lora.safetensors", 123 | ] 124 | ) 125 | 126 | if preset.startswith("Composition"): 127 | weights_to_add.extend( 128 | [ 129 | "ip_plus_composition_sd15.safetensors", 130 | "ip_plus_composition_sdxl.safetensors", 131 | ] 132 | ) 133 | 134 | if is_insightface: 135 | weights_to_add.append("models/buffalo_l") 136 | 137 | return weights_to_add 138 | 139 | @staticmethod 140 | def add_weights(weights_to_download, node): 141 | if node.is_type_in( 142 | [ 143 | "IPAdapterUnifiedLoader", 144 | "IPAdapterUnifiedLoaderFaceID", 145 | "IPAdapterUnifiedLoaderCommunity", 146 | ] 147 | ): 148 | preset = node.input("preset") 149 | print(f"Including weights for IPAdapter preset: {preset}") 150 | if preset: 151 | weights_to_download.extend( 152 | ComfyUI_IPAdapter_plus.get_preset_weights(preset) 153 | ) 154 | elif node.is_type("IPAdapterInsightFaceLoader"): 155 | weights_to_download.append("models/buffalo_l") 156 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_InstantID.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | 4 | class ComfyUI_InstantID(CustomNodeHelper): 5 | @staticmethod 6 | def add_weights(weights_to_download, node): 7 | if node.is_type("InstantIDFaceAnalysis"): 8 | weights_to_download.append("models/antelopev2") 9 | elif ( 10 | node.is_type("InstantIDModelLoader") 11 | and node.input("instantid_file") == "ipadapter.bin" 12 | ): 13 | node.set_input("instantid_file", "instantid-ip-adapter.bin") 14 | weights_to_download.append("instantid-ip-adapter.bin") 15 | elif node.is_type("ControlNetLoader"): 16 | if ( 17 | node.input("control_net_name") 18 | == "instantid/diffusion_pytorch_model.safetensors" 19 | ): 20 | node.set_input("control_net_name", "instantid-controlnet.safetensors") 21 | weights_to_download.append("instantid-controlnet.safetensors") 22 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_KJNodes.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | class ComfyUI_KJNodes(CustomNodeHelper): 4 | @staticmethod 5 | def add_weights(weights_to_download, node): 6 | if node.is_type("BatchCLIPSeg"): 7 | weights_to_download.extend(["models--CIDAS--clipseg-rd64-refined"]) 8 | 9 | @staticmethod 10 | def check_for_unsupported_nodes(node): 11 | unsupported_nodes = { 12 | "StabilityAPI_SD3": "Calling an external API and passing your key is not supported and is unsafe", 13 | "Superprompt": "Superprompt is not supported as it needs to download T5 weights", 14 | } 15 | node.raise_if_unsupported(unsupported_nodes) 16 | -------------------------------------------------------------------------------- /custom_node_helpers/ComfyUI_LayerDiffuse.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | 4 | class ComfyUI_LayerDiffuse(CustomNodeHelper): 5 | @staticmethod 6 | def get_config_weights(config): 7 | config_weights_map = { 8 | "SDXL, Attention Injection": ["layer_xl_transparent_attn.safetensors"], 9 | "SDXL, Conv Injection": ["layer_xl_transparent_conv.safetensors"], 10 | "SD15, Attention Injection, attn_sharing": [ 11 | "layer_sd15_transparent_attn.safetensors" 12 | ], 13 | "SDXL, Foreground": ["layer_xl_fg2ble.safetensors"], 14 | "SDXL, Background": ["layer_xl_bg2ble.safetensors"], 15 | "Diff, SDXL, Foreground": ["layer_xl_fgble2bg.safetensors"], 16 | "Diff, SDXL, Background": ["layer_xl_bgble2fg.safetensors"], 17 | "SD15, attn_sharing, Batch size (3N)": ["layer_sd15_joint.safetensors"], 18 | "SD15, Foreground, attn_sharing, Batch size (2N)": [ 19 | "layer_sd15_fg2bg.safetensors" 20 | ], 21 | "SD15, Background, attn_sharing, Batch size (2N)": [ 22 | "layer_sd15_bg2fg.safetensors" 23 | ], 24 | } 25 | return config_weights_map.get(config, []) 26 | 27 | @staticmethod 28 | def get_vae_weights(config): 29 | vae_weights_map = { 30 | "SD15": ["layer_sd15_vae_transparent_decoder.safetensors"], 31 | "SDXL": ["vae_transparent_decoder.safetensors"], 32 | } 33 | 34 | return vae_weights_map.get(config, []) 35 | 36 | @staticmethod 37 | def add_weights(weights_to_download, node): 38 | if node.is_type_in( 39 | [ 40 | "LayeredDiffusionApply", 41 | "LayeredDiffusionJointApply", 42 | "LayeredDiffusionCondApply", 43 | "LayeredDiffusionCondJointApply", 44 | ] 45 | ): 46 | config = node.input("config") 47 | weights_to_download.extend(ComfyUI_LayerDiffuse.get_config_weights(config)) 48 | elif node.is_type( 49 | "LayeredDiffusionDiffApply", 50 | ): 51 | config = f"Diff, {node.input('config')}" 52 | weights_to_download.extend(ComfyUI_LayerDiffuse.get_config_weights(config)) 53 | elif node.is_type_in( 54 | [ 55 | "LayeredDiffusionDecode", 56 | "LayeredDiffusionDecodeRGBA", 57 | "LayeredDiffusionDecodeSplit", 58 | ] 59 | ): 60 | sd_version = node.input("sd_version") 61 | weights_to_download.extend(ComfyUI_LayerDiffuse.get_vae_weights(sd_version)) 62 | -------------------------------------------------------------------------------- /custom_node_helpers/WAS_Node_Suite.py: -------------------------------------------------------------------------------- 1 | from custom_node_helper import CustomNodeHelper 2 | 3 | 4 | class WAS_Node_Suite(CustomNodeHelper): 5 | @staticmethod 6 | def add_weights(weights_to_download, node): 7 | if ( 8 | node.is_type("CLIPSeg Model Loader") 9 | and node.input("model") == "CIDAS/clipseg-rd64-refined" 10 | ): 11 | weights_to_download.extend(["models--CIDAS--clipseg-rd64-refined"]) 12 | 13 | @staticmethod 14 | def check_for_unsupported_nodes(node): 15 | unsupported_nodes = { 16 | "BLIP Model Loader": "BLIP version 1 not supported by Transformers", 17 | "BLIP Analyze Image": "BLIP version 1 not supported by Transformers", 18 | "CLIPTextEncode (NSP)": "Makes an HTTP request out to a Github file", 19 | "Diffusers Model Loader": "Diffusers is not going to be included as a requirement for this custom node", 20 | "Diffusers Hub Model Down-Loader": "Diffusers is not going to be included as a requirement for this custom node", 21 | "SAM Model Loader": "There are better SAM Loader modules to use. This implementation is not supported", 22 | "Text Parse Noodle Soup Prompts": "Makes an HTTP request out to a Github file", 23 | "Text Random Prompt": "Makes an HTTP request out to Lexica, which is unsupported", 24 | "True Random.org Number Generator": "Needs an API key which cannot be supplied", 25 | "Image Seamless Texture": "img2texture dependency has not been added", 26 | "MiDaS Model Loader": "WAS MiDaS nodes are not currently supported", 27 | "MiDaS Mask Image": "WAS MiDaS nodes are not currently supported", 28 | "MiDaS Depth Approximation": "WAS MiDaS nodes are not currently supported", 29 | "Text File History Loader": "History is not persisted", 30 | } 31 | node.raise_if_unsupported(unsupported_nodes) 32 | -------------------------------------------------------------------------------- /custom_node_helpers/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import importlib 4 | 5 | current_dir = os.path.dirname(os.path.abspath(__file__)) 6 | for file in os.listdir(current_dir): 7 | if file.endswith(".py") and not file.startswith("__"): 8 | module_name = file[:-3] 9 | module = importlib.import_module(f".{module_name}", package=__name__) 10 | class_name = module_name 11 | setattr(sys.modules[__name__], class_name, getattr(module, class_name)) 12 | -------------------------------------------------------------------------------- /custom_nodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "repo": "https://github.com/cubiq/ComfyUI_IPAdapter_plus", 4 | "commit": "d33265a" 5 | }, 6 | { 7 | "repo": "https://github.com/Fannovel16/comfyui_controlnet_aux", 8 | "commit": "8e51eb3" 9 | }, 10 | { 11 | "repo": "https://github.com/cubiq/ComfyUI_essentials", 12 | "commit": "94b7450" 13 | }, 14 | { 15 | "repo": "https://github.com/cubiq/ComfyUI_InstantID", 16 | "commit": "d8c70a0" 17 | }, 18 | { 19 | "repo": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-BRIA_AI-RMBG", 20 | "commit": "44a3f8f" 21 | }, 22 | { 23 | "repo": "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes", 24 | "commit": "d78b780" 25 | }, 26 | { 27 | "repo": "https://github.com/huchenlei/ComfyUI-layerdiffuse", 28 | "commit": "151f746" 29 | }, 30 | { 31 | "repo": "https://github.com/kijai/ComfyUI-KJNodes", 32 | "commit": "48d5a18" 33 | }, 34 | { 35 | "repo": "https://github.com/huchenlei/ComfyUI-IC-Light-Native", 36 | "commit": "5c065e1" 37 | }, 38 | { 39 | "repo": "https://github.com/fofr/ComfyUI-Impact-Pack", 40 | "commit": "07a18e7" 41 | }, 42 | { 43 | "repo": "https://github.com/WASasquatch/was-node-suite-comfyui", 44 | "commit": "33534f2" 45 | } 46 | ] 47 | -------------------------------------------------------------------------------- /feature-extractor/preprocessor_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "crop_size": 224, 3 | "do_center_crop": true, 4 | "do_convert_rgb": true, 5 | "do_normalize": true, 6 | "do_resize": true, 7 | "feature_extractor_type": "CLIPFeatureExtractor", 8 | "image_mean": [ 9 | 0.48145466, 10 | 0.4578275, 11 | 0.40821073 12 | ], 13 | "image_std": [ 14 | 0.26862954, 15 | 0.26130258, 16 | 0.27577711 17 | ], 18 | "resample": 3, 19 | "size": 224 20 | } 21 | -------------------------------------------------------------------------------- /node.py: -------------------------------------------------------------------------------- 1 | class Node: 2 | def __init__(self, node): 3 | self.node = node 4 | 5 | def type(self): 6 | return self.node["class_type"] 7 | 8 | def is_type(self, type): 9 | return "class_type" in self.node and self.node["class_type"] == type 10 | 11 | def is_type_in(self, types): 12 | return "class_type" in self.node and self.node["class_type"] in types 13 | 14 | def has_input(self, key): 15 | return key in self.node["inputs"] 16 | 17 | def input(self, key, default_value=None): 18 | return self.node["inputs"][key] if key in self.node["inputs"] else default_value 19 | 20 | def set_input(self, key, value): 21 | self.node["inputs"][key] = value 22 | 23 | def raise_if_unsupported(self, unsupported_nodes={}): 24 | if self.is_type_in(unsupported_nodes): 25 | raise ValueError(f"{self.type()} node is not supported: {unsupported_nodes[self.type()]}") 26 | -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | # An example of how to convert a given API workflow into its own Replicate model 2 | # Replace predict.py with this file when building your own workflow 3 | 4 | import os 5 | import mimetypes 6 | import json 7 | import random 8 | from PIL import Image, ExifTags 9 | from typing import List, Iterator 10 | from cog import BasePredictor, Input, Path 11 | from comfyui import ComfyUI 12 | from safety_checker import SafetyChecker 13 | from cog_model_helpers import optimise_images 14 | from cog_model_helpers import seed as seed_helper 15 | 16 | OUTPUT_DIR = "/tmp/outputs" 17 | INPUT_DIR = "/tmp/inputs" 18 | COMFYUI_TEMP_OUTPUT_DIR = "ComfyUI/temp" 19 | ALL_DIRECTORIES = [OUTPUT_DIR, INPUT_DIR, COMFYUI_TEMP_OUTPUT_DIR] 20 | 21 | MAX_HEADSHOTS = 14 22 | MAX_POSES = 30 23 | POSE_PATH = f"{INPUT_DIR}/poses" 24 | 25 | mimetypes.add_type("image/webp", ".webp") 26 | 27 | # Save your example JSON to the same directory as predict.py 28 | api_json_file = "workflow_api.json" 29 | 30 | 31 | class Predictor(BasePredictor): 32 | def setup(self): 33 | self.comfyUI = ComfyUI("127.0.0.1:8188") 34 | self.comfyUI.start_server(OUTPUT_DIR, INPUT_DIR) 35 | self.safetyChecker = SafetyChecker() 36 | 37 | # Give a list of weights filenames to download during setup 38 | with open(api_json_file, "r") as file: 39 | workflow = json.loads(file.read()) 40 | self.comfyUI.handle_weights( 41 | workflow, 42 | weights_to_download=[], 43 | ) 44 | 45 | # Download pose images 46 | self.comfyUI.weights_downloader.download( 47 | "pose_images.tar", 48 | "https://weights.replicate.delivery/default/fofr/character/pose_images.tar", 49 | f"{INPUT_DIR}/poses", 50 | ) 51 | 52 | self.headshots = self.list_pose_filenames(type="headshot") 53 | self.poses = self.list_pose_filenames(type="pose") 54 | self.all = self.headshots + self.poses 55 | 56 | def get_filenames( 57 | self, filenames: List[str], length: int, use_random: bool = True 58 | ) -> List[str]: 59 | if length > len(filenames): 60 | length = len(filenames) 61 | print(f"Using {length} as the max number of files.") 62 | 63 | if use_random: 64 | random.shuffle(filenames) 65 | 66 | return filenames[:length] 67 | 68 | def list_pose_filenames(self, type="headshot"): 69 | if type == "headshot": 70 | max_value = MAX_HEADSHOTS 71 | prefix = "headshot" 72 | elif type == "pose": 73 | max_value = MAX_POSES 74 | prefix = "pose" 75 | else: 76 | raise ValueError("Invalid type specified. Use 'headshot' or 'pose'.") 77 | 78 | return [ 79 | { 80 | "kps": f"{POSE_PATH}/{prefix}_kps_{str(i).zfill(5)}_.png", 81 | "openpose": f"{POSE_PATH}/{prefix}_open_pose_{str(i).zfill(5)}_.png", 82 | "dwpose": f"{POSE_PATH}/{prefix}_dw_pose_{str(i).zfill(5)}_.png", 83 | } 84 | for i in range(1, max_value + 1) 85 | ] 86 | 87 | def get_poses(self, number_of_outputs, is_random, type): 88 | if type == "Headshot poses": 89 | return self.get_filenames(self.headshots, number_of_outputs, is_random) 90 | elif type == "Half-body poses": 91 | return self.get_filenames(self.poses, number_of_outputs, is_random) 92 | else: 93 | return self.get_filenames(self.all, number_of_outputs, is_random) 94 | 95 | def handle_input_file( 96 | self, 97 | input_file: Path, 98 | filename: str = "image.png", 99 | check_orientation: bool = True, 100 | ): 101 | image = Image.open(input_file) 102 | 103 | if check_orientation: 104 | try: 105 | for orientation in ExifTags.TAGS.keys(): 106 | if ExifTags.TAGS[orientation] == "Orientation": 107 | break 108 | exif = dict(image._getexif().items()) 109 | 110 | if exif[orientation] == 3: 111 | image = image.rotate(180, expand=True) 112 | elif exif[orientation] == 6: 113 | image = image.rotate(270, expand=True) 114 | elif exif[orientation] == 8: 115 | image = image.rotate(90, expand=True) 116 | except (KeyError, AttributeError): 117 | # EXIF data does not have orientation 118 | # Do not rotate 119 | pass 120 | 121 | image.save(os.path.join(INPUT_DIR, filename)) 122 | 123 | # Update nodes in the JSON workflow to modify your workflow based on the given inputs 124 | def update_workflow(self, workflow, **kwargs): 125 | positive_prompt = workflow["9"]["inputs"] 126 | positive_prompt["text"] = kwargs["prompt"] 127 | 128 | negative_prompt = workflow["10"]["inputs"] 129 | negative_prompt["text"] = ( 130 | f"(nsfw:2), nipple, nude, naked, {kwargs['negative_prompt']}, lowres, two people, child, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, multiple view, reference sheet, mutated, poorly drawn, mutation, deformed, ugly, bad proportions, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, amateur drawing, odd eyes, uneven eyes, unnatural face, uneven nostrils, crooked mouth, bad teeth, crooked teeth, gross, ugly, very long body, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn eyes" 131 | ) 132 | 133 | sampler = workflow["11"]["inputs"] 134 | sampler["seed"] = kwargs["seed"] 135 | 136 | empty_latent_image = workflow["29"]["inputs"] 137 | empty_latent_image["batch_size"] = kwargs["number_of_images_per_pose"] 138 | 139 | kps_input_image = workflow["94"]["inputs"] 140 | kps_input_image["image"] = kwargs["pose"]["kps"] 141 | 142 | dwpose_input_image = workflow["95"]["inputs"] 143 | dwpose_input_image["image"] = kwargs["pose"]["dwpose"] 144 | 145 | def predict( 146 | self, 147 | prompt: str = Input( 148 | default="A headshot photo", 149 | description="Describe the subject. Include clothes and hairstyle for more consistency.", 150 | ), 151 | negative_prompt: str = Input( 152 | description="Things you do not want to see in your image", 153 | default="", 154 | ), 155 | subject: Path = Input( 156 | description="An image of a person. Best images are square close ups of a face, but they do not have to be.", 157 | default=None, 158 | ), 159 | # type: str = Input( 160 | # description="The type of images to generate, headshots, half-body poses or both.", 161 | # choices=[ 162 | # "Both headshots and half-body poses", 163 | # "Headshot poses", 164 | # "Half-body poses", 165 | # ], 166 | # default="Both headshots and half-body poses", 167 | # ), 168 | number_of_outputs: int = Input( 169 | description="The number of images to generate.", default=3, ge=1, le=20 170 | ), 171 | number_of_images_per_pose: int = Input( 172 | description="The number of images to generate for each pose.", 173 | default=1, 174 | ge=1, 175 | le=4, 176 | ), 177 | randomise_poses: bool = Input( 178 | description="Randomise the poses used.", default=True 179 | ), 180 | output_format: str = optimise_images.predict_output_format(), 181 | output_quality: int = optimise_images.predict_output_quality(), 182 | seed: int = seed_helper.predict_seed(), 183 | disable_safety_checker: bool = Input( 184 | description="Disable safety checker for generated images.", default=False 185 | ), 186 | ) -> Iterator[Path]: 187 | """Run a single prediction on the model""" 188 | self.comfyUI.cleanup(ALL_DIRECTORIES) 189 | 190 | # Headshot poses are not coming out consistently good 191 | type = "Half-body poses" 192 | 193 | using_fixed_seed = bool(seed) 194 | seed = seed_helper.generate(seed) 195 | 196 | self.handle_input_file(subject, filename="subject.png") 197 | poses = self.get_poses(number_of_outputs, randomise_poses, type) 198 | 199 | with open(api_json_file, "r") as file: 200 | workflow = json.loads(file.read()) 201 | 202 | self.comfyUI.connect() 203 | 204 | if using_fixed_seed: 205 | self.comfyUI.reset_execution_cache() 206 | 207 | returned_files = [] 208 | has_any_nsfw_content = False 209 | has_yielded_safe_content = False 210 | 211 | for pose in poses: 212 | self.update_workflow( 213 | workflow, 214 | prompt=prompt, 215 | negative_prompt=negative_prompt, 216 | seed=seed, 217 | type=type, 218 | number_of_outputs=number_of_outputs, 219 | number_of_images_per_pose=number_of_images_per_pose, 220 | randomise_poses=randomise_poses, 221 | pose=pose, 222 | ) 223 | self.comfyUI.run_workflow(workflow) 224 | all_output_files = self.comfyUI.get_files(OUTPUT_DIR) 225 | new_files = [ 226 | file 227 | for file in all_output_files 228 | if file.name.rsplit(".", 1)[0] not in returned_files 229 | ] 230 | optimised_images = optimise_images.optimise_image_files( 231 | output_format, output_quality, new_files 232 | ) 233 | 234 | for image in optimised_images: 235 | if not disable_safety_checker: 236 | has_nsfw_content = self.safetyChecker.run( 237 | [image], error_on_all_nsfw=False 238 | ) 239 | if any(has_nsfw_content): 240 | has_any_nsfw_content = True 241 | print(f"Not returning image {image} as it has NSFW content.") 242 | else: 243 | yield Path(image) 244 | has_yielded_safe_content = True 245 | else: 246 | yield Path(image) 247 | has_yielded_safe_content = True 248 | 249 | returned_files.extend( 250 | [file.name.rsplit(".", 1)[0] for file in all_output_files] 251 | ) 252 | 253 | if has_any_nsfw_content and not has_yielded_safe_content: 254 | raise Exception( 255 | "NSFW content detected in all outputs. Try running it again, or try a different prompt." 256 | ) 257 | -------------------------------------------------------------------------------- /reset.json: -------------------------------------------------------------------------------- 1 | { 2 | "3": { 3 | "inputs": { 4 | "width": 512, 5 | "height": 512, 6 | "batch_size": 1, 7 | "color": 0 8 | }, 9 | "class_type": "EmptyImage", 10 | "_meta": { 11 | "title": "EmptyImage" 12 | } 13 | }, 14 | "4": { 15 | "inputs": { 16 | "images": [ 17 | "3", 18 | 0 19 | ] 20 | }, 21 | "class_type": "PreviewImage", 22 | "_meta": { 23 | "title": "Preview Image" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /safety_checker.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import subprocess 4 | import numpy as np 5 | import PIL 6 | from diffusers.pipelines.stable_diffusion.safety_checker import ( 7 | StableDiffusionSafetyChecker, 8 | ) 9 | from transformers import CLIPImageProcessor 10 | 11 | FEATURE_EXTRACTOR = "./feature-extractor" 12 | SAFETY_CACHE = "./safety-cache" 13 | SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar" 14 | 15 | 16 | class SafetyChecker: 17 | def __init__(self): 18 | if not os.path.exists(SAFETY_CACHE): 19 | subprocess.check_call( 20 | ["pget", "-xf", SAFETY_URL, SAFETY_CACHE], 21 | close_fds=False, 22 | ) 23 | 24 | self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( 25 | SAFETY_CACHE, torch_dtype=torch.float16 26 | ).to("cuda") 27 | self.feature_extractor = CLIPImageProcessor.from_pretrained(FEATURE_EXTRACTOR) 28 | 29 | def load_image(self, image_path): 30 | return PIL.Image.open(image_path).convert("RGB") 31 | 32 | def run(self, image_paths, error_on_all_nsfw=True): 33 | images = [self.load_image(image_path) for image_path in image_paths] 34 | safety_checker_input = self.feature_extractor(images, return_tensors="pt").to( 35 | "cuda" 36 | ) 37 | np_images = [np.array(val) for val in images] 38 | _, is_nsfw = self.safety_checker( 39 | images=np_images, 40 | clip_input=safety_checker_input.pixel_values.to(torch.float16), 41 | ) 42 | 43 | for i, nsfw in enumerate(is_nsfw): 44 | if nsfw: 45 | print(f"NSFW content detected in image {image_paths[i]}") 46 | 47 | if error_on_all_nsfw and all(is_nsfw): 48 | raise Exception( 49 | "NSFW content detected in all outputs. Try running it again, or try a different prompt." 50 | ) 51 | 52 | return is_nsfw 53 | -------------------------------------------------------------------------------- /scripts/get_weights.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | This script is used to download weight files specified in various input formats. 5 | It supports reading weight file names from plain text files, extracting them from JSON workflows, 6 | or directly from command-line arguments. The script utilizes the WeightsDownloader class 7 | to handle the actual downloading of the weight files. 8 | """ 9 | 10 | import sys 11 | import os 12 | import json 13 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 14 | from weights_downloader import WeightsDownloader 15 | 16 | def download_weight_files(weight_files): 17 | wd = WeightsDownloader() 18 | for weight_file in weight_files: 19 | try: 20 | wd.download_weights(weight_file) 21 | except Exception as e: 22 | print(f"Failed to download {weight_file}: {str(e)}") 23 | continue 24 | 25 | def extract_weights_from_workflow(workflow_path): 26 | with open(workflow_path, 'r') as f: 27 | workflow = json.load(f) 28 | weights_to_download = [] 29 | for node in workflow.values(): 30 | if "inputs" in node: 31 | for input in node["inputs"].values(): 32 | if isinstance(input, str) and input.endswith(tuple(WeightsDownloader.supported_filetypes)): 33 | weights_to_download.append(input) 34 | return list(set(weights_to_download)) 35 | 36 | def main(filenames): 37 | weight_files = [] 38 | for filename in filenames: 39 | if filename.endswith('.txt'): 40 | with open(filename, 'r') as f: 41 | weight_files.extend(f.read().splitlines()) 42 | elif filename.endswith('.json'): 43 | weight_files.extend(extract_weights_from_workflow(filename)) 44 | else: 45 | weight_files.append(filename) 46 | download_weight_files(weight_files) 47 | 48 | if __name__ == "__main__": 49 | if len(sys.argv) < 2: 50 | print("Usage: python get_weights.py [ ...] or python get_weights.py or python get_weights.py ") 51 | sys.exit(1) 52 | filenames = sys.argv[1:] 53 | main(filenames) 54 | -------------------------------------------------------------------------------- /scripts/install_custom_nodes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import os 5 | import subprocess 6 | 7 | """ 8 | This script is used to clone specific versions of repositories. 9 | It reads a JSON file containing repositories and their commit hashes, clones them into a specific directory, 10 | and then checks out to the specified commit. 11 | """ 12 | 13 | json_file = "custom_nodes.json" 14 | comfy_dir = "ComfyUI" 15 | custom_nodes_dir = f"{comfy_dir}/custom_nodes/" 16 | 17 | with open(json_file, "r") as file: 18 | repos = json.load(file) 19 | 20 | # Loop over each repository in the list 21 | for repo in repos: 22 | repo_url = repo["repo"] 23 | commit_hash = repo["commit"] 24 | repo_name = os.path.basename(repo_url.replace(".git", "")) 25 | 26 | # Check if the repository directory already exists 27 | repo_path = os.path.join(custom_nodes_dir, repo_name) 28 | if not os.path.isdir(repo_path): 29 | # Clone the repository into the destination directory 30 | print( 31 | f"Cloning {repo_url} into {repo_path} and checking out to commit {commit_hash}" 32 | ) 33 | subprocess.run(["git", "clone", "--recursive", repo_url, repo_path]) 34 | 35 | # Store the current directory and change to the repository's directory 36 | current_dir = os.getcwd() 37 | os.chdir(repo_path) 38 | subprocess.run(["git", "checkout", commit_hash]) 39 | 40 | # Change back to the original directory after operations 41 | os.chdir(current_dir) 42 | else: 43 | print(f"Skipping clone for {repo_name}, directory already exists") 44 | 45 | # Copy custom node config files to the correct directory 46 | config_files = { 47 | "was_suite_config": { 48 | "src": "custom_node_configs/was_suite_config.json", 49 | "dest": os.path.join(custom_nodes_dir, "was-node-suite-comfyui/"), 50 | }, 51 | "rgthree_config": { 52 | "src": "custom_node_configs/rgthree_config.json", 53 | "dest": os.path.join(custom_nodes_dir, "rgthree-comfy/"), 54 | }, 55 | "comfy_settings": { 56 | "src": "custom_node_configs/comfy.settings.json", 57 | "dest": os.path.join(comfy_dir, "user", "default"), 58 | }, 59 | } 60 | 61 | if "comfy_settings" in config_files: 62 | paths = config_files["comfy_settings"] 63 | if not os.path.exists(paths["dest"]): 64 | os.makedirs(paths["dest"]) 65 | 66 | for config_file, paths in config_files.items(): 67 | if ( 68 | os.path.isfile(paths["src"]) 69 | and os.path.isdir(paths["dest"]) 70 | and not os.path.exists( 71 | os.path.join(paths["dest"], os.path.basename(paths["src"])) 72 | ) 73 | ): 74 | print(f"Copying {config_file} to {paths['dest']}") 75 | subprocess.run(["cp", paths["src"], paths["dest"]]) 76 | -------------------------------------------------------------------------------- /scripts/prepare_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import shutil 4 | 5 | FILES_TO_DELETE = [ 6 | "examples", 7 | "test", 8 | "CHANGELOG.md", 9 | "supported_weights.md", 10 | "weights_licenses.md", 11 | "scripts/push_comfyui_manager_weights.py", 12 | "scripts/push_weights_from_hf.py", 13 | "scripts/push_weights.py", 14 | "scripts/sort_weights.py", 15 | ] 16 | 17 | def prepare_template(): 18 | """ 19 | This script is used to prepare the template for a new model. 20 | It deletes unnecessary files and directories. 21 | It also overwrites the README.md with a blank file and header. 22 | Finally, it replaces predict.py with example_predict.py. 23 | """ 24 | print("Preparing to clean up this template for a new model") 25 | print( 26 | "This will clear the README and delete the following files and directories:", 27 | "\n".join(FILES_TO_DELETE), 28 | ) 29 | print("Are you sure you want to continue? (y/n)") 30 | 31 | if input() != "y": 32 | print("Aborting...") 33 | exit(1) 34 | 35 | print("Deleting unnecessary files and directories...") 36 | for file in FILES_TO_DELETE: 37 | if os.path.exists(file): 38 | if os.path.isdir(file): 39 | shutil.rmtree(file) 40 | else: 41 | os.remove(file) 42 | 43 | # Overwrite the README.md with a blank file and header "# Your repo" 44 | print("Overwriting README.md with a blank file and header") 45 | with open("README.md", "w") as f: 46 | f.write("# Your repo\n") 47 | 48 | print("Replacing predict.py with example_predict.py") 49 | shutil.move("example_predict.py", "predict.py") 50 | 51 | prepare_template() 52 | -------------------------------------------------------------------------------- /scripts/reset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import subprocess 3 | 4 | """ 5 | This script is used to reset the ComfyUI environment. 6 | It deletes the ComfyUI directory before reinstalling ComfyUI and every custom node. 7 | """ 8 | 9 | print("Preparing to reset the ComfyUI environment...") 10 | print( 11 | "This will delete the ComfyUI directory before reinstalling ComfyUI and every custom node." 12 | ) 13 | print("Are you sure you want to continue? (y/n)") 14 | 15 | if input() != "y": 16 | print("Aborting...") 17 | exit(1) 18 | 19 | print("Deleting the ComfyUI directory...") 20 | subprocess.run(["sudo", "rm", "-rf", "ComfyUI"]) 21 | 22 | print("Installing ComfyUI...") 23 | subprocess.run(["git", "submodule", "update", "--init", "--recursive"]) 24 | 25 | print("Installing custom nodes...") 26 | subprocess.run(["./scripts/install_custom_nodes.py"]) 27 | print("Custom nodes installed successfully.") 28 | -------------------------------------------------------------------------------- /scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd ComfyUI 3 | python main.py --listen 0.0.0.0 --gpu-only 4 | -------------------------------------------------------------------------------- /weights.json: -------------------------------------------------------------------------------- 1 | { 2 | "CHECKPOINTS": [ 3 | "512-inpainting-ema.safetensors", 4 | "absolutereality_v181.safetensors", 5 | "albedobaseXL_v13.safetensors", 6 | "albedobaseXL_v21.safetensors", 7 | "anything-v3-fp16-pruned.safetensors", 8 | "CinematicRedmond.safetensors", 9 | "copaxCuteXLSDXL10_v4.safetensors", 10 | "copaxTimelessxlSDXL1_v8.safetensors", 11 | "crystalClearXL_ccxl.safetensors", 12 | "Deliberate_v2.safetensors", 13 | "dreamlabsoil_V2_v2.safetensors", 14 | "DreamShaper_6.2_BakedVae_pruned.safetensors", 15 | "DreamShaper_6.31_BakedVae.safetensors", 16 | "DreamShaper_6.31_BakedVae_pruned.safetensors", 17 | "DreamShaper_6.31_INPAINTING.inpainting.safetensors", 18 | "DreamShaper_6_BakedVae.safetensors", 19 | "dreamshaper_8.safetensors", 20 | "dreamshaper_8LCM.safetensors", 21 | "dreamshaperXL_alpha2Xl10.safetensors", 22 | "dreamshaperXL_lightningDPMSDE.safetensors", 23 | "dynavision_v20Bakedvae.safetensors", 24 | "epicrealism_naturalSinRC1VAE.safetensors", 25 | "epicrealismXL_v10.safetensors", 26 | "Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors", 27 | "Hyper-SDXL-1step-Unet.safetensors", 28 | "imp_v10.safetensors", 29 | "jibMixRealisticXL_v10Lightning46Step.safetensors", 30 | "Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors", 31 | "juggernaut_reborn.safetensors", 32 | "Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", 33 | "juggernautXL_juggernautX.safetensors", 34 | "juggernautXL_v8Rundiffusion.safetensors", 35 | "LCM_Dreamshaper_v7_4k.safetensors", 36 | "leosamsHelloworldXL_helloworldXL60.safetensors", 37 | "magicmixReverie_v10.safetensors", 38 | "majicmixRealistic_v7.safetensors", 39 | "motionctrl.pth", 40 | "motionctrl_svd.ckpt", 41 | "photonLCM_v10.safetensors", 42 | "pixlAnimeCartoonComic_v10.safetensors", 43 | "playground-v2.5-1024px-aesthetic.fp16.safetensors", 44 | "proteus_v02.safetensors", 45 | "ProteusV0.4-Lighting.safetensors", 46 | "ProteusV0.4.safetensors", 47 | "rcnzCartoon3d_v20.safetensors", 48 | "Realistic_Vision_V5.1-inpainting.ckpt", 49 | "Realistic_Vision_V5.1-inpainting.safetensors", 50 | "Realistic_Vision_V5.1.ckpt", 51 | "Realistic_Vision_V5.1.safetensors", 52 | "Realistic_Vision_V5.1_fp16-no-ema-inpainting.ckpt", 53 | "Realistic_Vision_V5.1_fp16-no-ema-inpainting.safetensors", 54 | "Realistic_Vision_V5.1_fp16-no-ema.ckpt", 55 | "Realistic_Vision_V5.1_fp16-no-ema.safetensors", 56 | "Realistic_Vision_V6.0_NV_B1.safetensors", 57 | "Realistic_Vision_V6.0_NV_B1_fp16.safetensors", 58 | "Realistic_Vision_V6.0_NV_B1_inpainting.safetensors", 59 | "Realistic_Vision_V6.0_NV_B1_inpainting_fp16.safetensors", 60 | "realisticLCMBYStable_v10.safetensors", 61 | "RealVisXL_V2.0.safetensors", 62 | "RealVisXL_V3.0.safetensors", 63 | "RealVisXL_V3.0_Turbo.safetensors", 64 | "RealVisXL_V4.0.safetensors", 65 | "RealVisXL_V4.0_Lightning.safetensors", 66 | "rundiffusionXL_beta.safetensors", 67 | "sd_xl_base_1.0.safetensors", 68 | "sd_xl_base_1.0_0.9vae.safetensors", 69 | "sd_xl_refiner_1.0.safetensors", 70 | "sd_xl_refiner_1.0_0.9vae.safetensors", 71 | "sd_xl_turbo_1.0.safetensors", 72 | "sd_xl_turbo_1.0_fp16.safetensors", 73 | "SDXL-Flash.safetensors", 74 | "SDXL-Flash_Mini.safetensors", 75 | "sdxl_lightning_1step_x0.safetensors", 76 | "sdxl_lightning_2step.safetensors", 77 | "sdxl_lightning_4step.safetensors", 78 | "sdxl_lightning_8step.safetensors", 79 | "sdxlUnstableDiffusers_nihilmania.safetensors", 80 | "sdxlUnstableDiffusers_v11Rundiffusion.safetensors", 81 | "segmind-vega.safetensors", 82 | "SSD-1B.safetensors", 83 | "starlightXLAnimated_v3.safetensors", 84 | "svd.safetensors", 85 | "svd_xt.safetensors", 86 | "toonyou_beta6.safetensors", 87 | "turbovisionxlSuperFastXLBasedOnNew_tvxlV32Bakedvae.safetensors", 88 | "v1-5-pruned-emaonly.ckpt", 89 | "v2-1_512-ema-pruned.safetensors", 90 | "v2-1_768-ema-pruned.ckpt", 91 | "v2-1_768-ema-pruned.safetensors", 92 | "v2-1_768-nonema-pruned.ckpt", 93 | "v2-1_768-nonema-pruned.safetensors", 94 | "wd-illusion-fp16.safetensors", 95 | "x4-upscaler-ema.safetensors" 96 | ], 97 | "UPSCALE_MODELS": [ 98 | "4x-AnimeSharp.pth", 99 | "4x-UltraMix_Balanced.pth", 100 | "4x-UltraMix_Smooth.pth", 101 | "4x-UltraSharp.pth", 102 | "4x_foolhardy_Remacri.pth", 103 | "4x_NMKD-Siax_200k.pth", 104 | "8x_NMKD-Superscale_150000_G.pth", 105 | "ESRGAN_4x.pth", 106 | "RealESRGAN_x2.pth", 107 | "RealESRGAN_x4.pth", 108 | "RealESRGAN_x4plus.pth", 109 | "RealESRGAN_x4plus_anime_6B.pth", 110 | "RealESRGAN_x8.pth" 111 | ], 112 | "CLIP": [ 113 | "models--QuanSun--EVA-CLIP" 114 | ], 115 | "CLIP_VISION": [ 116 | "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors", 117 | "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors", 118 | "clip-vit-large-patch14.bin", 119 | "clip_vision_g.safetensors", 120 | "IPAdapter_image_encoder_sd15.safetensors", 121 | "model.15.safetensors", 122 | "model.sdxl.safetensors" 123 | ], 124 | "LORAS": [ 125 | "3d_render_style_xl.safetensors", 126 | "add-detail-xl.safetensors", 127 | "aesthetic_anime_v1s.safetensors", 128 | "age_slider-LECO-v1.safetensors", 129 | "AnimateLCM_sd15_t2v_lora.safetensors", 130 | "artificialguybr/3DRedmond-3DRenderStyle-3DRenderAF.safetensors", 131 | "artificialguybr/AnalogRedmond-AnalogRedmAF.safetensors", 132 | "artificialguybr/AnalogRedmondV2-Analog-AnalogRedmAF.safetensors", 133 | "artificialguybr/BetterTextRedmond.safetensors", 134 | "artificialguybr/ClayAnimationRedm.safetensors", 135 | "artificialguybr/ClayAnimationRedmond15-ClayAnimation-Clay.safetensors", 136 | "artificialguybr/ColoringBookRedmond-ColoringBook-ColoringBookAF.safetensors", 137 | "artificialguybr/ColoringBookRedmond-ColoringBookAF.safetensors", 138 | "artificialguybr/ColoringBookRedmond21V-FreedomRedmond-ColoringBook-ColoringBookAF.safetensors", 139 | "artificialguybr/CuteCartoon15V-LiberteRedmodModel-Cartoon-CuteCartoonAF.safetensors", 140 | "artificialguybr/CuteCartoonRedmond-CuteCartoon-CuteCartoonAF.safetensors", 141 | "artificialguybr/CuteFruitsRedmond-CtFruitsRedmAF.safetensors", 142 | "artificialguybr/FilmGrainRedmond-FilmGrain-FilmGrainAF.safetensors", 143 | "artificialguybr/IconsRedmond.safetensors", 144 | "artificialguybr/IconsRedmond15V-Icons.safetensors", 145 | "artificialguybr/IconsRedmondV2-Icons.safetensors", 146 | "artificialguybr/LineAniRedmond-LineAniAF.safetensors", 147 | "artificialguybr/LineAniRedmondV2-Lineart-LineAniAF.safetensors", 148 | "artificialguybr/LogoRedmond15V-LogoRedmAF-Logo.safetensors", 149 | "artificialguybr/LogoRedmond_LogoRedAF.safetensors", 150 | "artificialguybr/LogoRedmondV2-Logo-LogoRedmAF.safetensors", 151 | "artificialguybr/MoviePosterRedmond-MoviePoster-MoviePosterRedAF.safetensors", 152 | "artificialguybr/PixelArtRedmond-Lite64.safetensors", 153 | "artificialguybr/PixelArtRedmond15V-PixelArt-PIXARFK.safetensors", 154 | "artificialguybr/PomologicalWatercolorRedmond.safetensors", 155 | "artificialguybr/PS1Redmond-PS1Game-Playstation1Graphics.safetensors", 156 | "artificialguybr/StickersRedmond.safetensors", 157 | "artificialguybr/StickersRedmond15Version-Stickers-Sticker.safetensors", 158 | "artificialguybr/StickersRedmond21V-FreedomRedmond-Sticker-Stickers.safetensors", 159 | "artificialguybr/StoryBookRedmond-KidsRedmAF.safetensors", 160 | "artificialguybr/StoryBookRedmond15-KidsRedmAF-KidsBook.safetensors", 161 | "artificialguybr/StorybookRedmondUnbound-KidsRedmAF.safetensors", 162 | "artificialguybr/StorybookRedmondV2-KidsBook-KidsRedmAF.safetensors", 163 | "artificialguybr/StudioGhibli.Redmond-StdGBRRedmAF-StudioGhibli.safetensors", 164 | "artificialguybr/StudioGhibliRedmond-StdGBRedmAF.safetensors", 165 | "artificialguybr/ToyRedmond-FnkRedmAF.safetensors", 166 | "artificialguybr/TshirtDesignRedmond-TshirtDesignAF.safetensors", 167 | "artificialguybr/TShirtDesignRedmondV2-Tshirtdesign-TshirtDesignAF.safetensors", 168 | "artificialguybr/View360.safetensors", 169 | "COOLKIDS_MERGE_V2.5.safetensors", 170 | "fofr/emoji.safetensors", 171 | "glowneon_xl_v1.safetensors", 172 | "Harrlogos_v2.0.safetensors", 173 | "Hyper-SD15-1step-lora.safetensors", 174 | "Hyper-SD15-2steps-lora.safetensors", 175 | "Hyper-SD15-4steps-lora.safetensors", 176 | "Hyper-SD15-8steps-lora.safetensors", 177 | "Hyper-SDXL-1step-lora.safetensors", 178 | "Hyper-SDXL-2steps-lora.safetensors", 179 | "Hyper-SDXL-4steps-lora.safetensors", 180 | "Hyper-SDXL-8steps-lora.safetensors", 181 | "ip-adapter-faceid-plus_sd15_lora.safetensors", 182 | "ip-adapter-faceid-plusv2_sd15_lora.safetensors", 183 | "ip-adapter-faceid-plusv2_sdxl_lora.safetensors", 184 | "ip-adapter-faceid_sd15_lora.safetensors", 185 | "ip-adapter-faceid_sdxl_lora.safetensors", 186 | "lcm-lora-sdv1-5.safetensors", 187 | "lcm-lora-ssd-1b.safetensors", 188 | "lcm_lora_sdxl.safetensors", 189 | "MODILL_XL_0.27_RC.safetensors", 190 | "more_details.safetensors", 191 | "PerfectEyesXL.safetensors", 192 | "sd_xl_offset_example-lora_1.0.safetensors", 193 | "sdxl_lightning_2step_lora.pth", 194 | "sdxl_lightning_2step_lora.safetensors", 195 | "sdxl_lightning_4step_lora.pth", 196 | "sdxl_lightning_4step_lora.safetensors", 197 | "sdxl_lightning_8step_lora.pth", 198 | "sdxl_lightning_8step_lora.safetensors", 199 | "SDXLrender_v2.0.safetensors", 200 | "Segmind-VegaRT.safetensors", 201 | "theovercomer8sContrastFix_sd15.safetensors", 202 | "theovercomer8sContrastFix_sd21768.safetensors", 203 | "weight_slider-LECO-v1.safetensors" 204 | ], 205 | "EMBEDDINGS": [ 206 | "bad_prompt_version2-neg.pt", 207 | "easynegative.safetensors", 208 | "epiCNegative.pt", 209 | "epiCRealism.pt", 210 | "FastNegativeV2.pt", 211 | "JuggernautNegative-neg.pt", 212 | "negative_hand-neg.pt", 213 | "ng_deepnegative_v1_75t.pt", 214 | "verybadimagenegative_v1.3.pt" 215 | ], 216 | "CONTROLNET": [ 217 | "control-lora-canny-rank128.safetensors", 218 | "control-lora-canny-rank256.safetensors", 219 | "control-lora-depth-rank128.safetensors", 220 | "control-lora-depth-rank256.safetensors", 221 | "control-lora-recolor-rank128.safetensors", 222 | "control-lora-recolor-rank256.safetensors", 223 | "control-lora-sketch-rank128-metadata.safetensors", 224 | "control-lora-sketch-rank256.safetensors", 225 | "control_boxdepth_LooseControlfp16.safetensors", 226 | "control_lora_rank128_v11e_sd15_ip2p_fp16.safetensors", 227 | "control_lora_rank128_v11e_sd15_shuffle_fp16.safetensors", 228 | "control_lora_rank128_v11f1e_sd15_tile_fp16.safetensors", 229 | "control_lora_rank128_v11f1p_sd15_depth_fp16.safetensors", 230 | "control_lora_rank128_v11p_sd15_canny_fp16.safetensors", 231 | "control_lora_rank128_v11p_sd15_inpaint_fp16.safetensors", 232 | "control_lora_rank128_v11p_sd15_lineart_fp16.safetensors", 233 | "control_lora_rank128_v11p_sd15_mlsd_fp16.safetensors", 234 | "control_lora_rank128_v11p_sd15_normalbae_fp16.safetensors", 235 | "control_lora_rank128_v11p_sd15_openpose_fp16.safetensors", 236 | "control_lora_rank128_v11p_sd15_scribble_fp16.safetensors", 237 | "control_lora_rank128_v11p_sd15_seg_fp16.safetensors", 238 | "control_lora_rank128_v11p_sd15_softedge_fp16.safetensors", 239 | "control_lora_rank128_v11p_sd15s2_lineart_anime_fp16.safetensors", 240 | "control_sd15_inpaint_depth_hand_fp16.safetensors", 241 | "control_v11e_sd15_ip2p.pth", 242 | "control_v11e_sd15_ip2p_fp16.safetensors", 243 | "control_v11e_sd15_shuffle.pth", 244 | "control_v11e_sd15_shuffle_fp16.safetensors", 245 | "control_v11f1e_sd15_tile.pth", 246 | "control_v11f1e_sd15_tile_fp16.safetensors", 247 | "control_v11f1p_sd15_depth.pth", 248 | "control_v11f1p_sd15_depth_fp16.safetensors", 249 | "control_v11p_sd15_canny.pth", 250 | "control_v11p_sd15_canny_fp16.safetensors", 251 | "control_v11p_sd15_inpaint.pth", 252 | "control_v11p_sd15_inpaint_fp16.safetensors", 253 | "control_v11p_sd15_lineart.pth", 254 | "control_v11p_sd15_lineart_fp16.safetensors", 255 | "control_v11p_sd15_mlsd.pth", 256 | "control_v11p_sd15_mlsd_fp16.safetensors", 257 | "control_v11p_sd15_normalbae.pth", 258 | "control_v11p_sd15_normalbae_fp16.safetensors", 259 | "control_v11p_sd15_openpose.pth", 260 | "control_v11p_sd15_openpose_fp16.safetensors", 261 | "control_v11p_sd15_scribble.pth", 262 | "control_v11p_sd15_scribble_fp16.safetensors", 263 | "control_v11p_sd15_seg.pth", 264 | "control_v11p_sd15_seg_fp16.safetensors", 265 | "control_v11p_sd15_softedge.pth", 266 | "control_v11p_sd15_softedge_fp16.safetensors", 267 | "control_v11p_sd15s2_lineart_anime.pth", 268 | "control_v11p_sd15s2_lineart_anime_fp16.safetensors", 269 | "control_v11u_sd15_tile_fp16.safetensors", 270 | "control_v1p_sd15_qrcode_monster.safetensors", 271 | "control_v1p_sdxl_qrcode_monster.safetensors", 272 | "controllllite_v01032064e_sdxl_canny_anime.safetensors", 273 | "controlnet-canny-sdxl-1.0.fp16.safetensors", 274 | "controlnet-depth-sdxl-1.0.fp16.safetensors", 275 | "controlnet-sd-xl-1.0-softedge-dexined.safetensors", 276 | "controlnet-temporalnet-sdxl-1.0.safetensors", 277 | "depth-anything.safetensors", 278 | "depth-zoe-xl-v1.0-controlnet.safetensors", 279 | "diffusers_xl_canny_full.safetensors", 280 | "diffusers_xl_canny_mid.safetensors", 281 | "diffusers_xl_canny_small.safetensors", 282 | "diffusers_xl_depth_full.safetensors", 283 | "diffusers_xl_depth_mid.safetensors", 284 | "diffusers_xl_depth_small.safetensors", 285 | "instantid-controlnet.safetensors", 286 | "ioclab_sd15_recolor.safetensors", 287 | "ip-adapter_sd15.pth", 288 | "ip-adapter_sd15_plus.pth", 289 | "ip-adapter_xl.pth", 290 | "kohya_controllllite_xl_blur.safetensors", 291 | "kohya_controllllite_xl_blur_anime.safetensors", 292 | "kohya_controllllite_xl_blur_anime_beta.safetensors", 293 | "kohya_controllllite_xl_canny.safetensors", 294 | "kohya_controllllite_xl_canny_anime.safetensors", 295 | "kohya_controllllite_xl_depth.safetensors", 296 | "kohya_controllllite_xl_depth_anime.safetensors", 297 | "kohya_controllllite_xl_openpose_anime.safetensors", 298 | "kohya_controllllite_xl_openpose_anime_v2.safetensors", 299 | "kohya_controllllite_xl_scribble_anime.safetensors", 300 | "mistoLine_fp16.safetensors", 301 | "mistoLine_rank256.safetensors", 302 | "OpenPoseXL2.safetensors", 303 | "sai_xl_canny_128lora.safetensors", 304 | "sai_xl_canny_256lora.safetensors", 305 | "sai_xl_depth_128lora.safetensors", 306 | "sai_xl_depth_256lora.safetensors", 307 | "sai_xl_recolor_128lora.safetensors", 308 | "sai_xl_recolor_256lora.safetensors", 309 | "sai_xl_sketch_128lora.safetensors", 310 | "sai_xl_sketch_256lora.safetensors", 311 | "sargezt_xl_depth.safetensors", 312 | "sargezt_xl_depth_faid_vidit.safetensors", 313 | "sargezt_xl_depth_zeed.safetensors", 314 | "sargezt_xl_softedge.safetensors", 315 | "t2i-adapter_diffusers_xl_canny.safetensors", 316 | "t2i-adapter_diffusers_xl_depth_midas.safetensors", 317 | "t2i-adapter_diffusers_xl_depth_zoe.safetensors", 318 | "t2i-adapter_diffusers_xl_lineart.safetensors", 319 | "t2i-adapter_diffusers_xl_openpose.safetensors", 320 | "t2i-adapter_diffusers_xl_sketch.safetensors", 321 | "t2i-adapter_xl_canny.safetensors", 322 | "t2i-adapter_xl_openpose.safetensors", 323 | "t2i-adapter_xl_sketch.safetensors", 324 | "t2iadapter_canny_sd14v1.pth", 325 | "t2iadapter_color_sd14v1.pth", 326 | "t2iadapter_depth_sd14v1.pth", 327 | "t2iadapter_keypose_sd14v1.pth", 328 | "t2iadapter_openpose_sd14v1.pth", 329 | "t2iadapter_seg_sd14v1.pth", 330 | "t2iadapter_sketch_sd14v1.pth", 331 | "t2iadapter_style_sd14v1.pth", 332 | "temporalnetversion2.ckpt", 333 | "thibaud_xl_openpose.safetensors", 334 | "thibaud_xl_openpose_256lora.safetensors" 335 | ], 336 | "IPADAPTER": [ 337 | "ip-adapter-faceid-plus_sd15.bin", 338 | "ip-adapter-faceid-plusv2_sd15.bin", 339 | "ip-adapter-faceid-plusv2_sdxl.bin", 340 | "ip-adapter-faceid-portrait-v11_sd15.bin", 341 | "ip-adapter-faceid-portrait_sd15.bin", 342 | "ip-adapter-faceid-portrait_sdxl.bin", 343 | "ip-adapter-faceid-portrait_sdxl_unnorm.bin", 344 | "ip-adapter-faceid_sd15.bin", 345 | "ip-adapter-faceid_sdxl.bin", 346 | "ip-adapter-full-face_sd15.bin", 347 | "ip-adapter-full-face_sd15.safetensors", 348 | "ip-adapter-plus-face_sd15.bin", 349 | "ip-adapter-plus-face_sd15.safetensors", 350 | "ip-adapter-plus-face_sdxl_vit-h.bin", 351 | "ip-adapter-plus-face_sdxl_vit-h.safetensors", 352 | "ip-adapter-plus_sd15.bin", 353 | "ip-adapter-plus_sd15.safetensors", 354 | "ip-adapter-plus_sdxl_vit-h.bin", 355 | "ip-adapter-plus_sdxl_vit-h.safetensors", 356 | "ip-adapter_sd15.bin", 357 | "ip-adapter_sd15.safetensors", 358 | "ip-adapter_sd15_light.bin", 359 | "ip-adapter_sd15_light.safetensors", 360 | "ip-adapter_sd15_light_v11.bin", 361 | "ip-adapter_sd15_vit-G.bin", 362 | "ip-adapter_sd15_vit-G.safetensors", 363 | "ip-adapter_sdxl.safetensors", 364 | "ip-adapter_sdxl_vit-h.safetensors", 365 | "ip_plus_composition_sd15.safetensors", 366 | "ip_plus_composition_sdxl.safetensors" 367 | ], 368 | "VAE": [ 369 | "sdxl_vae.safetensors", 370 | "vae-ft-mse-840000-ema-pruned.safetensors" 371 | ], 372 | "UNET": [ 373 | "iclight_sd15_fbc_unet_ldm.safetensors", 374 | "iclight_sd15_fc_unet_ldm.safetensors", 375 | "sdxl_lightning_1step_unet_x0.pth", 376 | "sdxl_lightning_1step_unet_x0.safetensors", 377 | "sdxl_lightning_2step_unet.pth", 378 | "sdxl_lightning_2step_unet.safetensors", 379 | "sdxl_lightning_4step_unet.pth", 380 | "sdxl_lightning_4step_unet.safetensors", 381 | "sdxl_lightning_8step_unet.pth", 382 | "sdxl_lightning_8step_unet.safetensors" 383 | ], 384 | "PHOTOMAKER": [ 385 | "photomaker-v1.bin" 386 | ], 387 | "INSTANTID": [ 388 | "instantid-ip-adapter.bin" 389 | ], 390 | "INSIGHTFACE": [ 391 | "antelopev2", 392 | "buffalo_l", 393 | "inswapper_128.onnx", 394 | "inswapper_128_fp16.onnx", 395 | "models/antelopev2", 396 | "models/buffalo_l" 397 | ], 398 | "FACEDETECTION": [ 399 | "detection_mobilenet0.25_Final.pth", 400 | "detection_Resnet50_Final.pth", 401 | "parsing_bisenet.pth", 402 | "parsing_parsenet.pth", 403 | "yolov5l-face.pth", 404 | "yolov5n-face.pth" 405 | ], 406 | "FACERESTORE_MODELS": [ 407 | "codeformer.pth", 408 | "GFPGANv1.3.pth", 409 | "GFPGANv1.4.pth", 410 | "RestoreFormer.pth" 411 | ], 412 | "MMDETS": [ 413 | "bbox/mmdet_anime-face_yolov3.pth" 414 | ], 415 | "SAMS": [ 416 | "mobile_sam.pt", 417 | "sam_hq_vit_b.pth", 418 | "sam_hq_vit_h.pth", 419 | "sam_hq_vit_l.pth", 420 | "sam_vit_b_01ec64.pth", 421 | "sam_vit_h_4b8939.pth", 422 | "sam_vit_l_0b3195.pth" 423 | ], 424 | "GROUNDING-DINO": [ 425 | "groundingdino_swinb_cogcoor.pth", 426 | "groundingdino_swint_ogc.pth" 427 | ], 428 | "BERT-BASE-UNCASED": [ 429 | "bert-base-uncased" 430 | ], 431 | "ULTRALYTICS": [ 432 | "bbox/face_yolov8m.pt", 433 | "bbox/hand_yolov8s.pt", 434 | "bbox/Eyes.pt", 435 | "segm/hair_yolov8n-seg_60.pt", 436 | "segm/person_yolov8m-seg.pt" 437 | ], 438 | "LAYER_MODEL": [ 439 | "layer_sd15_bg2fg.safetensors", 440 | "layer_sd15_fg2bg.safetensors", 441 | "layer_sd15_joint.safetensors", 442 | "layer_sd15_transparent_attn.safetensors", 443 | "layer_sd15_vae_transparent_decoder.safetensors", 444 | "layer_xl_bg2ble.safetensors", 445 | "layer_xl_bgble2fg.safetensors", 446 | "layer_xl_fg2ble.safetensors", 447 | "layer_xl_fgble2bg.safetensors", 448 | "layer_xl_transparent_attn.safetensors", 449 | "layer_xl_transparent_conv.safetensors", 450 | "vae_transparent_decoder.safetensors" 451 | ], 452 | "CLIPSEG": [ 453 | "models--CIDAS--clipseg-rd64-refined" 454 | ], 455 | "REMBG": [ 456 | "isnet-anime.onnx", 457 | "isnet-general-use.onnx", 458 | "silueta.onnx", 459 | "u2net.onnx", 460 | "u2net_cloth_seg.onnx", 461 | "u2net_human_seg.onnx", 462 | "u2netp.onnx", 463 | "vit_b-decoder-quant.onnx", 464 | "vit_b-encoder-quant.onnx" 465 | ], 466 | "PULID": [ 467 | "ip-adapter_pulid_sdxl_fp16.safetensors" 468 | ], 469 | "GLIGEN": [ 470 | "gligen_sd14_textbox_pruned.safetensors", 471 | "gligen_sd14_textbox_pruned_fp16.safetensors" 472 | ] 473 | } 474 | -------------------------------------------------------------------------------- /weights_downloader.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | import os 4 | 5 | from weights_manifest import WeightsManifest 6 | 7 | BASE_URL = "https://weights.replicate.delivery/default/comfy-ui" 8 | 9 | 10 | class WeightsDownloader: 11 | supported_filetypes = [ 12 | ".ckpt", 13 | ".safetensors", 14 | ".pt", 15 | ".pth", 16 | ".bin", 17 | ".onnx", 18 | ".torchscript", 19 | ] 20 | 21 | def __init__(self): 22 | self.weights_manifest = WeightsManifest() 23 | self.weights_map = self.weights_manifest.weights_map 24 | 25 | def get_weights_by_type(self, type): 26 | return self.weights_manifest.get_weights_by_type(type) 27 | 28 | def download_weights(self, weight_str): 29 | if weight_str in self.weights_map: 30 | if self.weights_manifest.is_non_commercial_only(weight_str): 31 | print( 32 | f"⚠️ {weight_str} is for non-commercial use only. Unless you have obtained a commercial license.\nDetails: https://github.com/fofr/cog-comfyui/blob/main/weights_licenses.md" 33 | ) 34 | self.download_if_not_exists( 35 | weight_str, 36 | self.weights_map[weight_str]["url"], 37 | self.weights_map[weight_str]["dest"], 38 | ) 39 | else: 40 | raise ValueError( 41 | f"{weight_str} unavailable. View the list of available weights: https://github.com/fofr/cog-comfyui/blob/main/supported_weights.md" 42 | ) 43 | 44 | def download_if_not_exists(self, weight_str, url, dest): 45 | if dest.endswith(weight_str): 46 | path_string = dest 47 | else: 48 | path_string = os.path.join(dest, weight_str) 49 | 50 | if not os.path.exists(path_string): 51 | self.download(weight_str, url, dest) 52 | 53 | def download(self, weight_str, url, dest): 54 | if "/" in weight_str: 55 | subfolder = weight_str.rsplit("/", 1)[0] 56 | dest = os.path.join(dest, subfolder) 57 | os.makedirs(dest, exist_ok=True) 58 | 59 | print(f"⏳ Downloading {weight_str} to {dest}") 60 | start = time.time() 61 | subprocess.check_call( 62 | ["pget", "--log-level", "warn", "-xf", url, dest], close_fds=False 63 | ) 64 | elapsed_time = time.time() - start 65 | try: 66 | file_size_bytes = os.path.getsize( 67 | os.path.join(dest, os.path.basename(weight_str)) 68 | ) 69 | file_size_megabytes = file_size_bytes / (1024 * 1024) 70 | print( 71 | f"⌛️ Downloaded {weight_str} in {elapsed_time:.2f}s, size: {file_size_megabytes:.2f}MB" 72 | ) 73 | except FileNotFoundError: 74 | print(f"⌛️ Downloaded {weight_str} in {elapsed_time:.2f}s") 75 | -------------------------------------------------------------------------------- /weights_manifest.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import time 3 | import os 4 | import json 5 | import custom_node_helpers as helpers 6 | 7 | UPDATED_WEIGHTS_MANIFEST_URL = ( 8 | "https://raw.githubusercontent.com/fofr/cog-comfyui/main/weights.json" 9 | ) 10 | 11 | UPDATED_WEIGHTS_MANIFEST_PATH = "updated_weights.json" 12 | WEIGHTS_MANIFEST_PATH = "weights.json" 13 | 14 | BASE_URL = "https://weights.replicate.delivery/default/comfy-ui" 15 | BASE_PATH = "ComfyUI/models" 16 | 17 | 18 | class WeightsManifest: 19 | @staticmethod 20 | def base_url(): 21 | return BASE_URL 22 | 23 | def __init__(self): 24 | self.download_latest_weights_manifest = ( 25 | os.getenv("DOWNLOAD_LATEST_WEIGHTS_MANIFEST", "false").lower() == "true" 26 | ) 27 | self.weights_manifest = self._load_weights_manifest() 28 | self.weights_map = self._initialize_weights_map() 29 | 30 | def _load_weights_manifest(self): 31 | if self.download_latest_weights_manifest: 32 | self._download_updated_weights_manifest() 33 | return self._merge_manifests() 34 | 35 | def _download_updated_weights_manifest(self): 36 | if not os.path.exists(UPDATED_WEIGHTS_MANIFEST_PATH): 37 | print( 38 | f"Downloading updated weights manifest from {UPDATED_WEIGHTS_MANIFEST_URL}" 39 | ) 40 | start = time.time() 41 | try: 42 | subprocess.check_call( 43 | [ 44 | "pget", 45 | "--log-level", 46 | "warn", 47 | "-f", 48 | UPDATED_WEIGHTS_MANIFEST_URL, 49 | UPDATED_WEIGHTS_MANIFEST_PATH, 50 | ], 51 | close_fds=False, 52 | timeout=5, 53 | ) 54 | print( 55 | f"Downloading {UPDATED_WEIGHTS_MANIFEST_URL} took: {(time.time() - start):.2f}s" 56 | ) 57 | except subprocess.CalledProcessError: 58 | print(f"Failed to download {UPDATED_WEIGHTS_MANIFEST_URL}") 59 | pass 60 | except subprocess.TimeoutExpired: 61 | print(f"Download from {UPDATED_WEIGHTS_MANIFEST_URL} timed out") 62 | pass 63 | 64 | def _merge_manifests(self): 65 | if os.path.exists(WEIGHTS_MANIFEST_PATH): 66 | with open(WEIGHTS_MANIFEST_PATH, "r") as f: 67 | original_manifest = json.load(f) 68 | else: 69 | original_manifest = {} 70 | 71 | if not os.path.exists(UPDATED_WEIGHTS_MANIFEST_PATH): 72 | return original_manifest 73 | 74 | with open(UPDATED_WEIGHTS_MANIFEST_PATH, "r") as f: 75 | updated_manifest = json.load(f) 76 | 77 | for key in updated_manifest: 78 | if key in original_manifest: 79 | for item in updated_manifest[key]: 80 | if item not in original_manifest[key]: 81 | print(f"Adding {item} to {key}") 82 | original_manifest[key].append(item) 83 | else: 84 | original_manifest[key] = updated_manifest[key] 85 | 86 | return original_manifest 87 | 88 | def _generate_weights_map(self, keys, dest): 89 | return { 90 | key: { 91 | "url": f"{BASE_URL}/{dest}/{key}.tar", 92 | "dest": f"{BASE_PATH}/{dest}", 93 | } 94 | for key in keys 95 | } 96 | 97 | def _initialize_weights_map(self): 98 | weights_map = {} 99 | for key in self.weights_manifest.keys(): 100 | if key.isupper(): 101 | weights_map.update( 102 | self._generate_weights_map(self.weights_manifest[key], key.lower()) 103 | ) 104 | 105 | for module_name in dir(helpers): 106 | module = getattr(helpers, module_name) 107 | if hasattr(module, "weights_map"): 108 | weights_map.update(module.weights_map(BASE_URL)) 109 | 110 | return weights_map 111 | 112 | def non_commercial_weights(self): 113 | return [ 114 | "inswapper_128.onnx", 115 | "inswapper_128_fp16.onnx", 116 | "proteus_v02.safetensors", 117 | "RealVisXL_V3.0_Turbo.safetensors", 118 | "sd_xl_turbo_1.0.safetensors", 119 | "sd_xl_turbo_1.0_fp16.safetensors", 120 | "svd.safetensors", 121 | "svd_xt.safetensors", 122 | "turbovisionxlSuperFastXLBasedOnNew_tvxlV32Bakedvae", 123 | "copaxTimelessxlSDXL1_v8.safetensors", 124 | "MODILL_XL_0.27_RC.safetensors", 125 | "epicrealismXL_v10.safetensors", 126 | "RMBG-1.4/model.pth", 127 | ] 128 | 129 | def is_non_commercial_only(self, weight_str): 130 | return weight_str in self.non_commercial_weights() 131 | 132 | def get_weights_by_type(self, weight_type): 133 | return self.weights_manifest.get(weight_type, []) 134 | -------------------------------------------------------------------------------- /workflow_api.json: -------------------------------------------------------------------------------- 1 | { 2 | "1": { 3 | "inputs": { 4 | "instantid_file": "instantid-ip-adapter.bin" 5 | }, 6 | "class_type": "InstantIDModelLoader", 7 | "_meta": { 8 | "title": "Load InstantID Model" 9 | } 10 | }, 11 | "2": { 12 | "inputs": { 13 | "weight": 0.8, 14 | "start_at": 0, 15 | "end_at": 1, 16 | "instantid": [ 17 | "1", 18 | 0 19 | ], 20 | "insightface": [ 21 | "4", 22 | 0 23 | ], 24 | "control_net": [ 25 | "5", 26 | 0 27 | ], 28 | "image": [ 29 | "7", 30 | 0 31 | ], 32 | "model": [ 33 | "52", 34 | 0 35 | ], 36 | "positive": [ 37 | "9", 38 | 0 39 | ], 40 | "negative": [ 41 | "10", 42 | 0 43 | ], 44 | "image_kps": [ 45 | "94", 46 | 0 47 | ] 48 | }, 49 | "class_type": "ApplyInstantID", 50 | "_meta": { 51 | "title": "Apply InstantID" 52 | } 53 | }, 54 | "4": { 55 | "inputs": { 56 | "provider": "CUDA" 57 | }, 58 | "class_type": "InstantIDFaceAnalysis", 59 | "_meta": { 60 | "title": "InstantID Face Analysis" 61 | } 62 | }, 63 | "5": { 64 | "inputs": { 65 | "control_net_name": "instantid-controlnet.safetensors" 66 | }, 67 | "class_type": "ControlNetLoader", 68 | "_meta": { 69 | "title": "Load ControlNet Model" 70 | } 71 | }, 72 | "7": { 73 | "inputs": { 74 | "image": "subject.png", 75 | "upload": "image" 76 | }, 77 | "class_type": "LoadImage", 78 | "_meta": { 79 | "title": "Load Image" 80 | } 81 | }, 82 | "8": { 83 | "inputs": { 84 | "ckpt_name": "dreamshaperXL_lightningDPMSDE.safetensors" 85 | }, 86 | "class_type": "CheckpointLoaderSimple", 87 | "_meta": { 88 | "title": "Load Checkpoint" 89 | } 90 | }, 91 | "9": { 92 | "inputs": { 93 | "text": "a portrait photo of a woman", 94 | "clip": [ 95 | "8", 96 | 1 97 | ] 98 | }, 99 | "class_type": "CLIPTextEncode", 100 | "_meta": { 101 | "title": "CLIP Text Encode (Prompt)" 102 | } 103 | }, 104 | "10": { 105 | "inputs": { 106 | "text": "(nsfw:1.5), nipple, nude, naked, lowres, child, getty, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, trademark, watermark, title, multiple view, reference sheet, mutated hands and fingers, poorly drawn face, mutation, deformed, ugly, bad proportions, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, tatoo, amateur drawing, odd eyes, uneven eyes, unnatural face, uneven nostrils, crooked mouth, bad teeth, crooked teeth, photoshop, video game, censor, censored, ghost, b&w, weird colors, gradient background, spotty background, blurry background, ugly background, simple background, realistic, out of frame, extra objects, gross, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of focus, blurry, very long body, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn eyes, cloned face, disfigured, deformed, cross-eye, extra limbs, missing limb, malformed hands, mutated, morbid, mutilated, disfigured, extra arms, extra hands, mangled fingers, contorted, conjoined, mismatched limbs, mismatched parts, bad perspective, black and white, oversaturated, undersaturated, bad shadow, cropped image, draft, grainy, pixelated", 107 | "clip": [ 108 | "8", 109 | 1 110 | ] 111 | }, 112 | "class_type": "CLIPTextEncode", 113 | "_meta": { 114 | "title": "CLIP Text Encode (Prompt)" 115 | } 116 | }, 117 | "11": { 118 | "inputs": { 119 | "seed": 760055130669872, 120 | "steps": 4, 121 | "cfg": 1.3, 122 | "sampler_name": "dpmpp_sde_gpu", 123 | "scheduler": "karras", 124 | "denoise": 0.9500000000000001, 125 | "model": [ 126 | "2", 127 | 0 128 | ], 129 | "positive": [ 130 | "74", 131 | 0 132 | ], 133 | "negative": [ 134 | "2", 135 | 2 136 | ], 137 | "latent_image": [ 138 | "29", 139 | 0 140 | ] 141 | }, 142 | "class_type": "KSampler", 143 | "_meta": { 144 | "title": "KSampler" 145 | } 146 | }, 147 | "13": { 148 | "inputs": { 149 | "samples": [ 150 | "11", 151 | 0 152 | ], 153 | "vae": [ 154 | "8", 155 | 2 156 | ] 157 | }, 158 | "class_type": "VAEDecode", 159 | "_meta": { 160 | "title": "VAE Decode" 161 | } 162 | }, 163 | "29": { 164 | "inputs": { 165 | "width": 1024, 166 | "height": 1024, 167 | "batch_size": 1 168 | }, 169 | "class_type": "EmptyLatentImage", 170 | "_meta": { 171 | "title": "Empty Latent Image" 172 | } 173 | }, 174 | "39": { 175 | "inputs": { 176 | "preset": "PLUS FACE (portraits)", 177 | "model": [ 178 | "8", 179 | 0 180 | ] 181 | }, 182 | "class_type": "IPAdapterUnifiedLoader", 183 | "_meta": { 184 | "title": "IPAdapter Unified Loader" 185 | } 186 | }, 187 | "52": { 188 | "inputs": { 189 | "weight": 1, 190 | "weight_type": "style transfer", 191 | "combine_embeds": "concat", 192 | "start_at": 0, 193 | "end_at": 1, 194 | "embeds_scaling": "V only", 195 | "model": [ 196 | "39", 197 | 0 198 | ], 199 | "ipadapter": [ 200 | "39", 201 | 1 202 | ], 203 | "image": [ 204 | "7", 205 | 0 206 | ] 207 | }, 208 | "class_type": "IPAdapterAdvanced", 209 | "_meta": { 210 | "title": "IPAdapter Advanced" 211 | } 212 | }, 213 | "73": { 214 | "inputs": { 215 | "control_net_name": "thibaud_xl_openpose.safetensors" 216 | }, 217 | "class_type": "ControlNetLoader", 218 | "_meta": { 219 | "title": "Load ControlNet Model" 220 | } 221 | }, 222 | "74": { 223 | "inputs": { 224 | "strength": 0.9, 225 | "conditioning": [ 226 | "2", 227 | 1 228 | ], 229 | "control_net": [ 230 | "73", 231 | 0 232 | ], 233 | "image": [ 234 | "95", 235 | 0 236 | ] 237 | }, 238 | "class_type": "ControlNetApply", 239 | "_meta": { 240 | "title": "Apply ControlNet" 241 | } 242 | }, 243 | "94": { 244 | "inputs": { 245 | "image": "headshot_kps_00006_.png", 246 | "upload": "image" 247 | }, 248 | "class_type": "LoadImage", 249 | "_meta": { 250 | "title": "Load Image" 251 | } 252 | }, 253 | "95": { 254 | "inputs": { 255 | "image": "headshot_dw_pose_00006_.png", 256 | "upload": "image" 257 | }, 258 | "class_type": "LoadImage", 259 | "_meta": { 260 | "title": "Load Image" 261 | } 262 | }, 263 | "98": { 264 | "inputs": { 265 | "guide_size": 384, 266 | "guide_size_for": true, 267 | "max_size": 1024, 268 | "seed": 1073953721129240, 269 | "steps": 4, 270 | "cfg": 1, 271 | "sampler_name": "dpmpp_sde", 272 | "scheduler": "karras", 273 | "denoise": 0.35000000000000003, 274 | "feather": 5, 275 | "noise_mask": true, 276 | "force_inpaint": true, 277 | "bbox_threshold": 0.5, 278 | "bbox_dilation": 10, 279 | "bbox_crop_factor": 3, 280 | "sam_detection_hint": "center-1", 281 | "sam_dilation": 0, 282 | "sam_threshold": 0.93, 283 | "sam_bbox_expansion": 0, 284 | "sam_mask_hint_threshold": 0.7, 285 | "sam_mask_hint_use_negative": "False", 286 | "drop_size": 10, 287 | "wildcard": "", 288 | "cycle": 1, 289 | "inpaint_model": false, 290 | "noise_mask_feather": 20, 291 | "image": [ 292 | "13", 293 | 0 294 | ], 295 | "model": [ 296 | "2", 297 | 0 298 | ], 299 | "clip": [ 300 | "8", 301 | 1 302 | ], 303 | "vae": [ 304 | "8", 305 | 2 306 | ], 307 | "positive": [ 308 | "113", 309 | 0 310 | ], 311 | "negative": [ 312 | "114", 313 | 0 314 | ], 315 | "bbox_detector": [ 316 | "99", 317 | 0 318 | ] 319 | }, 320 | "class_type": "FaceDetailer", 321 | "_meta": { 322 | "title": "FaceDetailer" 323 | } 324 | }, 325 | "99": { 326 | "inputs": { 327 | "model_name": "bbox/Eyes.pt" 328 | }, 329 | "class_type": "UltralyticsDetectorProvider", 330 | "_meta": { 331 | "title": "UltralyticsDetectorProvider" 332 | } 333 | }, 334 | "103": { 335 | "inputs": { 336 | "upscale_method": "lanczos", 337 | "scale_by": 1.5, 338 | "image": [ 339 | "98", 340 | 0 341 | ] 342 | }, 343 | "class_type": "ImageScaleBy", 344 | "_meta": { 345 | "title": "Upscale Image By" 346 | } 347 | }, 348 | "105": { 349 | "inputs": { 350 | "seed": 760055130669869, 351 | "steps": 4, 352 | "cfg": 1, 353 | "sampler_name": "dpmpp_sde_gpu", 354 | "scheduler": "karras", 355 | "denoise": 0.2, 356 | "model": [ 357 | "2", 358 | 0 359 | ], 360 | "positive": [ 361 | "2", 362 | 1 363 | ], 364 | "negative": [ 365 | "2", 366 | 2 367 | ], 368 | "latent_image": [ 369 | "106", 370 | 0 371 | ] 372 | }, 373 | "class_type": "KSampler", 374 | "_meta": { 375 | "title": "KSampler" 376 | } 377 | }, 378 | "106": { 379 | "inputs": { 380 | "pixels": [ 381 | "109", 382 | 0 383 | ], 384 | "vae": [ 385 | "8", 386 | 2 387 | ] 388 | }, 389 | "class_type": "VAEEncode", 390 | "_meta": { 391 | "title": "VAE Encode" 392 | } 393 | }, 394 | "107": { 395 | "inputs": { 396 | "samples": [ 397 | "105", 398 | 0 399 | ], 400 | "vae": [ 401 | "8", 402 | 2 403 | ] 404 | }, 405 | "class_type": "VAEDecode", 406 | "_meta": { 407 | "title": "VAE Decode" 408 | } 409 | }, 410 | "109": { 411 | "inputs": { 412 | "brightness": 0, 413 | "contrast": 1, 414 | "saturation": 1, 415 | "sharpness": 1.1, 416 | "blur": 0, 417 | "gaussian_blur": 0, 418 | "edge_enhance": 0.1, 419 | "detail_enhance": "true", 420 | "image": [ 421 | "103", 422 | 0 423 | ] 424 | }, 425 | "class_type": "Image Filter Adjustments", 426 | "_meta": { 427 | "title": "Image Filter Adjustments" 428 | } 429 | }, 430 | "111": { 431 | "inputs": { 432 | "brightness": 0, 433 | "contrast": 1, 434 | "saturation": 1, 435 | "sharpness": 1.1, 436 | "blur": 0, 437 | "gaussian_blur": 0, 438 | "edge_enhance": 0.1, 439 | "detail_enhance": "true", 440 | "image": [ 441 | "115", 442 | 0 443 | ] 444 | }, 445 | "class_type": "Image Filter Adjustments", 446 | "_meta": { 447 | "title": "Image Filter Adjustments" 448 | } 449 | }, 450 | "113": { 451 | "inputs": { 452 | "text": "perfect eyes", 453 | "clip": [ 454 | "8", 455 | 1 456 | ] 457 | }, 458 | "class_type": "CLIPTextEncode", 459 | "_meta": { 460 | "title": "CLIP Text Encode (Prompt)" 461 | } 462 | }, 463 | "114": { 464 | "inputs": { 465 | "text": "deformed pupils, deformed eyes, ugly eyes", 466 | "clip": [ 467 | "8", 468 | 1 469 | ] 470 | }, 471 | "class_type": "CLIPTextEncode", 472 | "_meta": { 473 | "title": "CLIP Text Encode (Prompt)" 474 | } 475 | }, 476 | "115": { 477 | "inputs": { 478 | "guide_size": 384, 479 | "guide_size_for": true, 480 | "max_size": 1024, 481 | "seed": 1073953721129240, 482 | "steps": 4, 483 | "cfg": 1, 484 | "sampler_name": "dpmpp_sde", 485 | "scheduler": "karras", 486 | "denoise": 0.35000000000000003, 487 | "feather": 5, 488 | "noise_mask": true, 489 | "force_inpaint": true, 490 | "bbox_threshold": 0.5, 491 | "bbox_dilation": 10, 492 | "bbox_crop_factor": 3, 493 | "sam_detection_hint": "center-1", 494 | "sam_dilation": 0, 495 | "sam_threshold": 0.93, 496 | "sam_bbox_expansion": 0, 497 | "sam_mask_hint_threshold": 0.7, 498 | "sam_mask_hint_use_negative": "False", 499 | "drop_size": 10, 500 | "wildcard": "", 501 | "cycle": 1, 502 | "inpaint_model": false, 503 | "noise_mask_feather": 20, 504 | "image": [ 505 | "107", 506 | 0 507 | ], 508 | "model": [ 509 | "8", 510 | 0 511 | ], 512 | "clip": [ 513 | "8", 514 | 1 515 | ], 516 | "vae": [ 517 | "8", 518 | 2 519 | ], 520 | "positive": [ 521 | "114", 522 | 0 523 | ], 524 | "negative": [ 525 | "113", 526 | 0 527 | ], 528 | "bbox_detector": [ 529 | "99", 530 | 0 531 | ] 532 | }, 533 | "class_type": "FaceDetailer", 534 | "_meta": { 535 | "title": "FaceDetailer" 536 | } 537 | }, 538 | "116": { 539 | "inputs": { 540 | "filename_prefix": "ComfyUI", 541 | "images": [ 542 | "111", 543 | 0 544 | ] 545 | }, 546 | "class_type": "SaveImage", 547 | "_meta": { 548 | "title": "Save Image" 549 | } 550 | } 551 | } 552 | -------------------------------------------------------------------------------- /workflow_ui.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 115, 3 | "last_link_id": 233, 4 | "nodes": [ 5 | { 6 | "id": 1, 7 | "type": "InstantIDModelLoader", 8 | "pos": [ 9 | 2089, 10 | -1325 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 58 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "INSTANTID", 22 | "type": "INSTANTID", 23 | "links": [ 24 | 1 25 | ], 26 | "shape": 3, 27 | "slot_index": 0 28 | } 29 | ], 30 | "properties": { 31 | "Node name for S&R": "InstantIDModelLoader" 32 | }, 33 | "widgets_values": [ 34 | "instantid-ip-adapter.bin" 35 | ] 36 | }, 37 | { 38 | "id": 39, 39 | "type": "IPAdapterUnifiedLoader", 40 | "pos": [ 41 | 2686, 42 | -1428 43 | ], 44 | "size": { 45 | "0": 315, 46 | "1": 78 47 | }, 48 | "flags": {}, 49 | "order": 10, 50 | "mode": 0, 51 | "inputs": [ 52 | { 53 | "name": "model", 54 | "type": "MODEL", 55 | "link": 106 56 | }, 57 | { 58 | "name": "ipadapter", 59 | "type": "IPADAPTER", 60 | "link": null 61 | } 62 | ], 63 | "outputs": [ 64 | { 65 | "name": "model", 66 | "type": "MODEL", 67 | "links": [ 68 | 119 69 | ], 70 | "shape": 3, 71 | "slot_index": 0 72 | }, 73 | { 74 | "name": "ipadapter", 75 | "type": "IPADAPTER", 76 | "links": [ 77 | 121 78 | ], 79 | "shape": 3, 80 | "slot_index": 1 81 | } 82 | ], 83 | "properties": { 84 | "Node name for S&R": "IPAdapterUnifiedLoader" 85 | }, 86 | "widgets_values": [ 87 | "PLUS FACE (portraits)" 88 | ] 89 | }, 90 | { 91 | "id": 5, 92 | "type": "ControlNetLoader", 93 | "pos": [ 94 | 2348, 95 | -421 96 | ], 97 | "size": { 98 | "0": 315, 99 | "1": 58 100 | }, 101 | "flags": {}, 102 | "order": 1, 103 | "mode": 0, 104 | "outputs": [ 105 | { 106 | "name": "CONTROL_NET", 107 | "type": "CONTROL_NET", 108 | "links": [ 109 | 3 110 | ], 111 | "shape": 3, 112 | "slot_index": 0 113 | } 114 | ], 115 | "properties": { 116 | "Node name for S&R": "ControlNetLoader" 117 | }, 118 | "widgets_values": [ 119 | "instantid-controlnet.safetensors" 120 | ] 121 | }, 122 | { 123 | "id": 4, 124 | "type": "InstantIDFaceAnalysis", 125 | "pos": [ 126 | 2093, 127 | -1227 128 | ], 129 | "size": { 130 | "0": 315, 131 | "1": 58 132 | }, 133 | "flags": {}, 134 | "order": 2, 135 | "mode": 0, 136 | "outputs": [ 137 | { 138 | "name": "FACEANALYSIS", 139 | "type": "FACEANALYSIS", 140 | "links": [ 141 | 2 142 | ], 143 | "shape": 3, 144 | "slot_index": 0 145 | } 146 | ], 147 | "properties": { 148 | "Node name for S&R": "InstantIDFaceAnalysis" 149 | }, 150 | "widgets_values": [ 151 | "CUDA" 152 | ] 153 | }, 154 | { 155 | "id": 73, 156 | "type": "ControlNetLoader", 157 | "pos": [ 158 | 2797, 159 | -194 160 | ], 161 | "size": { 162 | "0": 315, 163 | "1": 58 164 | }, 165 | "flags": {}, 166 | "order": 3, 167 | "mode": 0, 168 | "outputs": [ 169 | { 170 | "name": "CONTROL_NET", 171 | "type": "CONTROL_NET", 172 | "links": [ 173 | 152 174 | ], 175 | "shape": 3, 176 | "slot_index": 0 177 | } 178 | ], 179 | "properties": { 180 | "Node name for S&R": "ControlNetLoader" 181 | }, 182 | "widgets_values": [ 183 | "thibaud_xl_openpose.safetensors" 184 | ] 185 | }, 186 | { 187 | "id": 74, 188 | "type": "ControlNetApply", 189 | "pos": [ 190 | 3186, 191 | -352 192 | ], 193 | "size": { 194 | "0": 317.4000244140625, 195 | "1": 98 196 | }, 197 | "flags": {}, 198 | "order": 17, 199 | "mode": 0, 200 | "inputs": [ 201 | { 202 | "name": "conditioning", 203 | "type": "CONDITIONING", 204 | "link": 149 205 | }, 206 | { 207 | "name": "control_net", 208 | "type": "CONTROL_NET", 209 | "link": 152 210 | }, 211 | { 212 | "name": "image", 213 | "type": "IMAGE", 214 | "link": 189 215 | } 216 | ], 217 | "outputs": [ 218 | { 219 | "name": "CONDITIONING", 220 | "type": "CONDITIONING", 221 | "links": [ 222 | 150 223 | ], 224 | "shape": 3, 225 | "slot_index": 0 226 | } 227 | ], 228 | "properties": { 229 | "Node name for S&R": "ControlNetApply" 230 | }, 231 | "widgets_values": [ 232 | 0.9 233 | ] 234 | }, 235 | { 236 | "id": 13, 237 | "type": "VAEDecode", 238 | "pos": [ 239 | 3506, 240 | -783 241 | ], 242 | "size": { 243 | "0": 210, 244 | "1": 46 245 | }, 246 | "flags": {}, 247 | "order": 19, 248 | "mode": 0, 249 | "inputs": [ 250 | { 251 | "name": "samples", 252 | "type": "LATENT", 253 | "link": 16 254 | }, 255 | { 256 | "name": "vae", 257 | "type": "VAE", 258 | "link": 18 259 | } 260 | ], 261 | "outputs": [ 262 | { 263 | "name": "IMAGE", 264 | "type": "IMAGE", 265 | "links": [ 266 | 17, 267 | 195 268 | ], 269 | "shape": 3, 270 | "slot_index": 0 271 | } 272 | ], 273 | "properties": { 274 | "Node name for S&R": "VAEDecode" 275 | } 276 | }, 277 | { 278 | "id": 100, 279 | "type": "PreviewImage", 280 | "pos": [ 281 | 5828, 282 | -871 283 | ], 284 | "size": [ 285 | 210, 286 | 246 287 | ], 288 | "flags": {}, 289 | "order": 22, 290 | "mode": 0, 291 | "inputs": [ 292 | { 293 | "name": "images", 294 | "type": "IMAGE", 295 | "link": 201 296 | } 297 | ], 298 | "properties": { 299 | "Node name for S&R": "PreviewImage" 300 | } 301 | }, 302 | { 303 | "id": 106, 304 | "type": "VAEEncode", 305 | "pos": [ 306 | 6211.0974220169455, 307 | -321.2619906454818 308 | ], 309 | "size": { 310 | "0": 210, 311 | "1": 46 312 | }, 313 | "flags": {}, 314 | "order": 26, 315 | "mode": 0, 316 | "inputs": [ 317 | { 318 | "name": "pixels", 319 | "type": "IMAGE", 320 | "link": 215 321 | }, 322 | { 323 | "name": "vae", 324 | "type": "VAE", 325 | "link": 209 326 | } 327 | ], 328 | "outputs": [ 329 | { 330 | "name": "LATENT", 331 | "type": "LATENT", 332 | "links": [ 333 | 207 334 | ], 335 | "shape": 3, 336 | "slot_index": 0 337 | } 338 | ], 339 | "properties": { 340 | "Node name for S&R": "VAEEncode" 341 | } 342 | }, 343 | { 344 | "id": 2, 345 | "type": "ApplyInstantID", 346 | "pos": [ 347 | 2740, 348 | -801 349 | ], 350 | "size": { 351 | "0": 315, 352 | "1": 266 353 | }, 354 | "flags": {}, 355 | "order": 16, 356 | "mode": 0, 357 | "inputs": [ 358 | { 359 | "name": "instantid", 360 | "type": "INSTANTID", 361 | "link": 1 362 | }, 363 | { 364 | "name": "insightface", 365 | "type": "FACEANALYSIS", 366 | "link": 2 367 | }, 368 | { 369 | "name": "control_net", 370 | "type": "CONTROL_NET", 371 | "link": 3 372 | }, 373 | { 374 | "name": "image", 375 | "type": "IMAGE", 376 | "link": 153 377 | }, 378 | { 379 | "name": "model", 380 | "type": "MODEL", 381 | "link": 120 382 | }, 383 | { 384 | "name": "positive", 385 | "type": "CONDITIONING", 386 | "link": 131 387 | }, 388 | { 389 | "name": "negative", 390 | "type": "CONDITIONING", 391 | "link": 12 392 | }, 393 | { 394 | "name": "image_kps", 395 | "type": "IMAGE", 396 | "link": 188 397 | }, 398 | { 399 | "name": "mask", 400 | "type": "MASK", 401 | "link": null 402 | } 403 | ], 404 | "outputs": [ 405 | { 406 | "name": "MODEL", 407 | "type": "MODEL", 408 | "links": [ 409 | 72, 410 | 196, 411 | 213 412 | ], 413 | "shape": 3, 414 | "slot_index": 0 415 | }, 416 | { 417 | "name": "positive", 418 | "type": "CONDITIONING", 419 | "links": [ 420 | 149, 421 | 205 422 | ], 423 | "shape": 3, 424 | "slot_index": 1 425 | }, 426 | { 427 | "name": "negative", 428 | "type": "CONDITIONING", 429 | "links": [ 430 | 15, 431 | 206 432 | ], 433 | "shape": 3, 434 | "slot_index": 2 435 | } 436 | ], 437 | "properties": { 438 | "Node name for S&R": "ApplyInstantID" 439 | }, 440 | "widgets_values": [ 441 | 0.8, 442 | 0, 443 | 1 444 | ] 445 | }, 446 | { 447 | "id": 103, 448 | "type": "ImageScaleBy", 449 | "pos": [ 450 | 6195, 451 | -909 452 | ], 453 | "size": { 454 | "0": 315, 455 | "1": 82 456 | }, 457 | "flags": {}, 458 | "order": 23, 459 | "mode": 0, 460 | "inputs": [ 461 | { 462 | "name": "image", 463 | "type": "IMAGE", 464 | "link": 203 465 | } 466 | ], 467 | "outputs": [ 468 | { 469 | "name": "IMAGE", 470 | "type": "IMAGE", 471 | "links": [ 472 | 204, 473 | 214 474 | ], 475 | "shape": 3, 476 | "slot_index": 0 477 | } 478 | ], 479 | "properties": { 480 | "Node name for S&R": "ImageScaleBy" 481 | }, 482 | "widgets_values": [ 483 | "lanczos", 484 | 1.5 485 | ] 486 | }, 487 | { 488 | "id": 104, 489 | "type": "PreviewImage", 490 | "pos": [ 491 | 6148, 492 | -770 493 | ], 494 | "size": { 495 | "0": 210, 496 | "1": 246 497 | }, 498 | "flags": {}, 499 | "order": 24, 500 | "mode": 0, 501 | "inputs": [ 502 | { 503 | "name": "images", 504 | "type": "IMAGE", 505 | "link": 204 506 | } 507 | ], 508 | "properties": { 509 | "Node name for S&R": "PreviewImage" 510 | } 511 | }, 512 | { 513 | "id": 12, 514 | "type": "SaveImage", 515 | "pos": [ 516 | 3148, 517 | 196 518 | ], 519 | "size": [ 520 | 790.3585945600566, 521 | 727.7048805651185 522 | ], 523 | "flags": {}, 524 | "order": 20, 525 | "mode": 0, 526 | "inputs": [ 527 | { 528 | "name": "images", 529 | "type": "IMAGE", 530 | "link": 17 531 | } 532 | ], 533 | "properties": {}, 534 | "widgets_values": [ 535 | "ComfyUI" 536 | ] 537 | }, 538 | { 539 | "id": 110, 540 | "type": "PreviewImage", 541 | "pos": [ 542 | 4007, 543 | 202 544 | ], 545 | "size": [ 546 | 723.5699935037755, 547 | 724.7476222709581 548 | ], 549 | "flags": {}, 550 | "order": 27, 551 | "mode": 0, 552 | "inputs": [ 553 | { 554 | "name": "images", 555 | "type": "IMAGE", 556 | "link": 216 557 | } 558 | ], 559 | "properties": { 560 | "Node name for S&R": "PreviewImage" 561 | } 562 | }, 563 | { 564 | "id": 109, 565 | "type": "Image Filter Adjustments", 566 | "pos": [ 567 | 6695, 568 | -1278 569 | ], 570 | "size": { 571 | "0": 315, 572 | "1": 226 573 | }, 574 | "flags": {}, 575 | "order": 25, 576 | "mode": 0, 577 | "inputs": [ 578 | { 579 | "name": "image", 580 | "type": "IMAGE", 581 | "link": 214 582 | } 583 | ], 584 | "outputs": [ 585 | { 586 | "name": "IMAGE", 587 | "type": "IMAGE", 588 | "links": [ 589 | 215, 590 | 216 591 | ], 592 | "shape": 3, 593 | "slot_index": 0 594 | } 595 | ], 596 | "properties": { 597 | "Node name for S&R": "Image Filter Adjustments" 598 | }, 599 | "widgets_values": [ 600 | 0, 601 | 1, 602 | 1, 603 | 1.1, 604 | 0, 605 | 0, 606 | 0.1, 607 | "true" 608 | ] 609 | }, 610 | { 611 | "id": 112, 612 | "type": "PreviewImage", 613 | "pos": [ 614 | 8054.3463387737265, 615 | -703.7322512684176 616 | ], 617 | "size": [ 618 | 210, 619 | 246 620 | ], 621 | "flags": {}, 622 | "order": 33, 623 | "mode": 0, 624 | "inputs": [ 625 | { 626 | "name": "images", 627 | "type": "IMAGE", 628 | "link": 218 629 | } 630 | ], 631 | "properties": { 632 | "Node name for S&R": "PreviewImage" 633 | } 634 | }, 635 | { 636 | "id": 111, 637 | "type": "Image Filter Adjustments", 638 | "pos": [ 639 | 7580, 640 | -479 641 | ], 642 | "size": { 643 | "0": 315, 644 | "1": 226 645 | }, 646 | "flags": {}, 647 | "order": 32, 648 | "mode": 0, 649 | "inputs": [ 650 | { 651 | "name": "image", 652 | "type": "IMAGE", 653 | "link": 233 654 | } 655 | ], 656 | "outputs": [ 657 | { 658 | "name": "IMAGE", 659 | "type": "IMAGE", 660 | "links": [ 661 | 218 662 | ], 663 | "shape": 3, 664 | "slot_index": 0 665 | } 666 | ], 667 | "properties": { 668 | "Node name for S&R": "Image Filter Adjustments" 669 | }, 670 | "widgets_values": [ 671 | 0, 672 | 1, 673 | 1, 674 | 1.1, 675 | 0, 676 | 0, 677 | 0.1, 678 | "true" 679 | ] 680 | }, 681 | { 682 | "id": 105, 683 | "type": "KSampler", 684 | "pos": [ 685 | 6717, 686 | -810 687 | ], 688 | "size": { 689 | "0": 315, 690 | "1": 262 691 | }, 692 | "flags": {}, 693 | "order": 28, 694 | "mode": 0, 695 | "inputs": [ 696 | { 697 | "name": "model", 698 | "type": "MODEL", 699 | "link": 213 700 | }, 701 | { 702 | "name": "positive", 703 | "type": "CONDITIONING", 704 | "link": 205 705 | }, 706 | { 707 | "name": "negative", 708 | "type": "CONDITIONING", 709 | "link": 206 710 | }, 711 | { 712 | "name": "latent_image", 713 | "type": "LATENT", 714 | "link": 207 715 | } 716 | ], 717 | "outputs": [ 718 | { 719 | "name": "LATENT", 720 | "type": "LATENT", 721 | "links": [ 722 | 210 723 | ], 724 | "shape": 3, 725 | "slot_index": 0 726 | } 727 | ], 728 | "properties": { 729 | "Node name for S&R": "KSampler" 730 | }, 731 | "widgets_values": [ 732 | 760055130669869, 733 | "fixed", 734 | 4, 735 | 1, 736 | "dpmpp_sde_gpu", 737 | "karras", 738 | 0.2 739 | ] 740 | }, 741 | { 742 | "id": 94, 743 | "type": "LoadImage", 744 | "pos": [ 745 | 1991, 746 | -625 747 | ], 748 | "size": { 749 | "0": 315, 750 | "1": 314 751 | }, 752 | "flags": {}, 753 | "order": 4, 754 | "mode": 0, 755 | "outputs": [ 756 | { 757 | "name": "IMAGE", 758 | "type": "IMAGE", 759 | "links": [ 760 | 188 761 | ], 762 | "shape": 3, 763 | "slot_index": 0 764 | }, 765 | { 766 | "name": "MASK", 767 | "type": "MASK", 768 | "links": null, 769 | "shape": 3 770 | } 771 | ], 772 | "properties": { 773 | "Node name for S&R": "LoadImage" 774 | }, 775 | "widgets_values": [ 776 | "headshot_kps_00006_.png", 777 | "image" 778 | ] 779 | }, 780 | { 781 | "id": 95, 782 | "type": "LoadImage", 783 | "pos": [ 784 | 2351, 785 | -262 786 | ], 787 | "size": { 788 | "0": 396.93450927734375, 789 | "1": 348.37152099609375 790 | }, 791 | "flags": {}, 792 | "order": 5, 793 | "mode": 0, 794 | "outputs": [ 795 | { 796 | "name": "IMAGE", 797 | "type": "IMAGE", 798 | "links": [ 799 | 189 800 | ], 801 | "shape": 3, 802 | "slot_index": 0 803 | }, 804 | { 805 | "name": "MASK", 806 | "type": "MASK", 807 | "links": null, 808 | "shape": 3 809 | } 810 | ], 811 | "properties": { 812 | "Node name for S&R": "LoadImage" 813 | }, 814 | "widgets_values": [ 815 | "headshot_dw_pose_00006_.png", 816 | "image" 817 | ] 818 | }, 819 | { 820 | "id": 29, 821 | "type": "EmptyLatentImage", 822 | "pos": [ 823 | 2729, 824 | -461 825 | ], 826 | "size": { 827 | "0": 315, 828 | "1": 106 829 | }, 830 | "flags": {}, 831 | "order": 6, 832 | "mode": 0, 833 | "outputs": [ 834 | { 835 | "name": "LATENT", 836 | "type": "LATENT", 837 | "links": [ 838 | 76 839 | ], 840 | "shape": 3, 841 | "slot_index": 0 842 | } 843 | ], 844 | "properties": { 845 | "Node name for S&R": "EmptyLatentImage" 846 | }, 847 | "widgets_values": [ 848 | 1024, 849 | 1024, 850 | 1 851 | ] 852 | }, 853 | { 854 | "id": 11, 855 | "type": "KSampler", 856 | "pos": [ 857 | 3150, 858 | -792 859 | ], 860 | "size": { 861 | "0": 315, 862 | "1": 262 863 | }, 864 | "flags": {}, 865 | "order": 18, 866 | "mode": 0, 867 | "inputs": [ 868 | { 869 | "name": "model", 870 | "type": "MODEL", 871 | "link": 72 872 | }, 873 | { 874 | "name": "positive", 875 | "type": "CONDITIONING", 876 | "link": 150 877 | }, 878 | { 879 | "name": "negative", 880 | "type": "CONDITIONING", 881 | "link": 15 882 | }, 883 | { 884 | "name": "latent_image", 885 | "type": "LATENT", 886 | "link": 76 887 | } 888 | ], 889 | "outputs": [ 890 | { 891 | "name": "LATENT", 892 | "type": "LATENT", 893 | "links": [ 894 | 16 895 | ], 896 | "shape": 3, 897 | "slot_index": 0 898 | } 899 | ], 900 | "properties": { 901 | "Node name for S&R": "KSampler" 902 | }, 903 | "widgets_values": [ 904 | 760055130669872, 905 | "fixed", 906 | 5, 907 | 1.2, 908 | "dpmpp_sde_gpu", 909 | "karras", 910 | 0.9500000000000001 911 | ] 912 | }, 913 | { 914 | "id": 10, 915 | "type": "CLIPTextEncode", 916 | "pos": [ 917 | 2022, 918 | -911 919 | ], 920 | "size": { 921 | "0": 400, 922 | "1": 200 923 | }, 924 | "flags": {}, 925 | "order": 12, 926 | "mode": 0, 927 | "inputs": [ 928 | { 929 | "name": "clip", 930 | "type": "CLIP", 931 | "link": 11, 932 | "slot_index": 0 933 | } 934 | ], 935 | "outputs": [ 936 | { 937 | "name": "CONDITIONING", 938 | "type": "CONDITIONING", 939 | "links": [ 940 | 12 941 | ], 942 | "shape": 3, 943 | "slot_index": 0 944 | } 945 | ], 946 | "properties": { 947 | "Node name for S&R": "CLIPTextEncode" 948 | }, 949 | "widgets_values": [ 950 | "(nsfw:1.5), naked, lowres, child, getty, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, trademark, watermark, title, multiple view, reference sheet, mutated hands and fingers, poorly drawn face, mutation, deformed, ugly, bad proportions, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, tatoo, amateur drawing, odd eyes, uneven eyes, unnatural face, uneven nostrils, crooked mouth, bad teeth, crooked teeth, photoshop, video game, censor, censored, ghost, b&w, weird colors, gradient background, spotty background, blurry background, ugly background, simple background, realistic, out of frame, extra objects, gross, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of focus, blurry, very long body, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn eyes, cloned face, disfigured, deformed, cross-eye, extra limbs, missing limb, malformed hands, mutated, morbid, mutilated, disfigured, extra arms, extra hands, mangled fingers, contorted, conjoined, mismatched limbs, mismatched parts, bad perspective, black and white, oversaturated, undersaturated, bad shadow, cropped image, draft, grainy, pixelated" 951 | ] 952 | }, 953 | { 954 | "id": 7, 955 | "type": "LoadImage", 956 | "pos": [ 957 | 1618, 958 | -750 959 | ], 960 | "size": { 961 | "0": 315, 962 | "1": 314 963 | }, 964 | "flags": {}, 965 | "order": 7, 966 | "mode": 0, 967 | "outputs": [ 968 | { 969 | "name": "IMAGE", 970 | "type": "IMAGE", 971 | "links": [ 972 | 133, 973 | 153 974 | ], 975 | "shape": 3, 976 | "slot_index": 0 977 | }, 978 | { 979 | "name": "MASK", 980 | "type": "MASK", 981 | "links": null, 982 | "shape": 3 983 | } 984 | ], 985 | "properties": { 986 | "Node name for S&R": "LoadImage" 987 | }, 988 | "widgets_values": [ 989 | "subject.png", 990 | "image" 991 | ] 992 | }, 993 | { 994 | "id": 9, 995 | "type": "CLIPTextEncode", 996 | "pos": [ 997 | 2032, 998 | -1143 999 | ], 1000 | "size": { 1001 | "0": 400, 1002 | "1": 200 1003 | }, 1004 | "flags": {}, 1005 | "order": 11, 1006 | "mode": 0, 1007 | "inputs": [ 1008 | { 1009 | "name": "clip", 1010 | "type": "CLIP", 1011 | "link": 10, 1012 | "slot_index": 0 1013 | } 1014 | ], 1015 | "outputs": [ 1016 | { 1017 | "name": "CONDITIONING", 1018 | "type": "CONDITIONING", 1019 | "links": [ 1020 | 131 1021 | ], 1022 | "shape": 3, 1023 | "slot_index": 0 1024 | } 1025 | ], 1026 | "properties": { 1027 | "Node name for S&R": "CLIPTextEncode" 1028 | }, 1029 | "widgets_values": [ 1030 | "a portrait photo" 1031 | ] 1032 | }, 1033 | { 1034 | "id": 52, 1035 | "type": "IPAdapterAdvanced", 1036 | "pos": [ 1037 | 3128, 1038 | -1429 1039 | ], 1040 | "size": { 1041 | "0": 315, 1042 | "1": 278 1043 | }, 1044 | "flags": {}, 1045 | "order": 15, 1046 | "mode": 0, 1047 | "inputs": [ 1048 | { 1049 | "name": "model", 1050 | "type": "MODEL", 1051 | "link": 119 1052 | }, 1053 | { 1054 | "name": "ipadapter", 1055 | "type": "IPADAPTER", 1056 | "link": 121 1057 | }, 1058 | { 1059 | "name": "image", 1060 | "type": "IMAGE", 1061 | "link": 133 1062 | }, 1063 | { 1064 | "name": "image_negative", 1065 | "type": "IMAGE", 1066 | "link": null 1067 | }, 1068 | { 1069 | "name": "attn_mask", 1070 | "type": "MASK", 1071 | "link": null 1072 | }, 1073 | { 1074 | "name": "clip_vision", 1075 | "type": "CLIP_VISION", 1076 | "link": null 1077 | } 1078 | ], 1079 | "outputs": [ 1080 | { 1081 | "name": "MODEL", 1082 | "type": "MODEL", 1083 | "links": [ 1084 | 120 1085 | ], 1086 | "shape": 3, 1087 | "slot_index": 0 1088 | } 1089 | ], 1090 | "properties": { 1091 | "Node name for S&R": "IPAdapterAdvanced" 1092 | }, 1093 | "widgets_values": [ 1094 | 1, 1095 | "style transfer", 1096 | "concat", 1097 | 0, 1098 | 1, 1099 | "V only" 1100 | ] 1101 | }, 1102 | { 1103 | "id": 107, 1104 | "type": "VAEDecode", 1105 | "pos": [ 1106 | 7167, 1107 | -817 1108 | ], 1109 | "size": { 1110 | "0": 210, 1111 | "1": 46 1112 | }, 1113 | "flags": {}, 1114 | "order": 29, 1115 | "mode": 0, 1116 | "inputs": [ 1117 | { 1118 | "name": "samples", 1119 | "type": "LATENT", 1120 | "link": 210 1121 | }, 1122 | { 1123 | "name": "vae", 1124 | "type": "VAE", 1125 | "link": 211 1126 | } 1127 | ], 1128 | "outputs": [ 1129 | { 1130 | "name": "IMAGE", 1131 | "type": "IMAGE", 1132 | "links": [ 1133 | 212, 1134 | 223 1135 | ], 1136 | "shape": 3, 1137 | "slot_index": 0 1138 | } 1139 | ], 1140 | "properties": { 1141 | "Node name for S&R": "VAEDecode" 1142 | } 1143 | }, 1144 | { 1145 | "id": 8, 1146 | "type": "CheckpointLoaderSimple", 1147 | "pos": [ 1148 | 1609, 1149 | -909 1150 | ], 1151 | "size": { 1152 | "0": 315, 1153 | "1": 98 1154 | }, 1155 | "flags": {}, 1156 | "order": 8, 1157 | "mode": 0, 1158 | "outputs": [ 1159 | { 1160 | "name": "MODEL", 1161 | "type": "MODEL", 1162 | "links": [ 1163 | 106, 1164 | 224 1165 | ], 1166 | "shape": 3, 1167 | "slot_index": 0 1168 | }, 1169 | { 1170 | "name": "CLIP", 1171 | "type": "CLIP", 1172 | "links": [ 1173 | 10, 1174 | 11, 1175 | 200, 1176 | 221, 1177 | 222, 1178 | 225 1179 | ], 1180 | "shape": 3, 1181 | "slot_index": 1 1182 | }, 1183 | { 1184 | "name": "VAE", 1185 | "type": "VAE", 1186 | "links": [ 1187 | 18, 1188 | 197, 1189 | 209, 1190 | 211, 1191 | 226 1192 | ], 1193 | "shape": 3, 1194 | "slot_index": 2 1195 | } 1196 | ], 1197 | "properties": { 1198 | "Node name for S&R": "CheckpointLoaderSimple" 1199 | }, 1200 | "widgets_values": [ 1201 | "dreamshaperXL_lightningDPMSDE.safetensors" 1202 | ] 1203 | }, 1204 | { 1205 | "id": 99, 1206 | "type": "UltralyticsDetectorProvider", 1207 | "pos": [ 1208 | 5166, 1209 | -1113 1210 | ], 1211 | "size": { 1212 | "0": 315, 1213 | "1": 78 1214 | }, 1215 | "flags": {}, 1216 | "order": 9, 1217 | "mode": 0, 1218 | "outputs": [ 1219 | { 1220 | "name": "BBOX_DETECTOR", 1221 | "type": "BBOX_DETECTOR", 1222 | "links": [ 1223 | 194, 1224 | 227 1225 | ], 1226 | "shape": 3, 1227 | "slot_index": 0 1228 | }, 1229 | { 1230 | "name": "SEGM_DETECTOR", 1231 | "type": "SEGM_DETECTOR", 1232 | "links": null, 1233 | "shape": 3 1234 | } 1235 | ], 1236 | "properties": { 1237 | "Node name for S&R": "UltralyticsDetectorProvider" 1238 | }, 1239 | "widgets_values": [ 1240 | "bbox/Eyes.pt" 1241 | ] 1242 | }, 1243 | { 1244 | "id": 113, 1245 | "type": "CLIPTextEncode", 1246 | "pos": [ 1247 | 4656.5143615163615, 1248 | -1171.5705874544674 1249 | ], 1250 | "size": { 1251 | "0": 400, 1252 | "1": 200 1253 | }, 1254 | "flags": {}, 1255 | "order": 14, 1256 | "mode": 0, 1257 | "inputs": [ 1258 | { 1259 | "name": "clip", 1260 | "type": "CLIP", 1261 | "link": 222 1262 | } 1263 | ], 1264 | "outputs": [ 1265 | { 1266 | "name": "CONDITIONING", 1267 | "type": "CONDITIONING", 1268 | "links": [ 1269 | 219, 1270 | 231 1271 | ], 1272 | "shape": 3, 1273 | "slot_index": 0 1274 | } 1275 | ], 1276 | "properties": { 1277 | "Node name for S&R": "CLIPTextEncode" 1278 | }, 1279 | "widgets_values": [ 1280 | "perfect eyes" 1281 | ] 1282 | }, 1283 | { 1284 | "id": 114, 1285 | "type": "CLIPTextEncode", 1286 | "pos": [ 1287 | 4673, 1288 | -1425 1289 | ], 1290 | "size": { 1291 | "0": 400, 1292 | "1": 200 1293 | }, 1294 | "flags": {}, 1295 | "order": 13, 1296 | "mode": 0, 1297 | "inputs": [ 1298 | { 1299 | "name": "clip", 1300 | "type": "CLIP", 1301 | "link": 221 1302 | } 1303 | ], 1304 | "outputs": [ 1305 | { 1306 | "name": "CONDITIONING", 1307 | "type": "CONDITIONING", 1308 | "links": [ 1309 | 220, 1310 | 232 1311 | ], 1312 | "shape": 3, 1313 | "slot_index": 0 1314 | } 1315 | ], 1316 | "properties": { 1317 | "Node name for S&R": "CLIPTextEncode" 1318 | }, 1319 | "widgets_values": [ 1320 | "deformed pupils, deformed eyes, ugly eyes" 1321 | ] 1322 | }, 1323 | { 1324 | "id": 115, 1325 | "type": "FaceDetailer", 1326 | "pos": [ 1327 | 6917, 1328 | -282 1329 | ], 1330 | "size": { 1331 | "0": 506.4000244140625, 1332 | "1": 880 1333 | }, 1334 | "flags": {}, 1335 | "order": 31, 1336 | "mode": 0, 1337 | "inputs": [ 1338 | { 1339 | "name": "image", 1340 | "type": "IMAGE", 1341 | "link": 223 1342 | }, 1343 | { 1344 | "name": "model", 1345 | "type": "MODEL", 1346 | "link": 224 1347 | }, 1348 | { 1349 | "name": "clip", 1350 | "type": "CLIP", 1351 | "link": 225 1352 | }, 1353 | { 1354 | "name": "vae", 1355 | "type": "VAE", 1356 | "link": 226 1357 | }, 1358 | { 1359 | "name": "positive", 1360 | "type": "CONDITIONING", 1361 | "link": 232 1362 | }, 1363 | { 1364 | "name": "negative", 1365 | "type": "CONDITIONING", 1366 | "link": 231 1367 | }, 1368 | { 1369 | "name": "bbox_detector", 1370 | "type": "BBOX_DETECTOR", 1371 | "link": 227 1372 | }, 1373 | { 1374 | "name": "sam_model_opt", 1375 | "type": "SAM_MODEL", 1376 | "link": null 1377 | }, 1378 | { 1379 | "name": "segm_detector_opt", 1380 | "type": "SEGM_DETECTOR", 1381 | "link": null 1382 | }, 1383 | { 1384 | "name": "detailer_hook", 1385 | "type": "DETAILER_HOOK", 1386 | "link": null 1387 | } 1388 | ], 1389 | "outputs": [ 1390 | { 1391 | "name": "image", 1392 | "type": "IMAGE", 1393 | "links": [ 1394 | 233 1395 | ], 1396 | "shape": 3, 1397 | "slot_index": 0 1398 | }, 1399 | { 1400 | "name": "cropped_refined", 1401 | "type": "IMAGE", 1402 | "links": null, 1403 | "shape": 6 1404 | }, 1405 | { 1406 | "name": "cropped_enhanced_alpha", 1407 | "type": "IMAGE", 1408 | "links": null, 1409 | "shape": 6 1410 | }, 1411 | { 1412 | "name": "mask", 1413 | "type": "MASK", 1414 | "links": null, 1415 | "shape": 3 1416 | }, 1417 | { 1418 | "name": "detailer_pipe", 1419 | "type": "DETAILER_PIPE", 1420 | "links": null, 1421 | "shape": 3 1422 | }, 1423 | { 1424 | "name": "cnet_images", 1425 | "type": "IMAGE", 1426 | "links": null, 1427 | "shape": 6 1428 | } 1429 | ], 1430 | "properties": { 1431 | "Node name for S&R": "FaceDetailer" 1432 | }, 1433 | "widgets_values": [ 1434 | 384, 1435 | true, 1436 | 1024, 1437 | 1073953721129240, 1438 | "fixed", 1439 | 4, 1440 | 1, 1441 | "dpmpp_sde", 1442 | "karras", 1443 | 0.35000000000000003, 1444 | 5, 1445 | true, 1446 | true, 1447 | 0.5, 1448 | 10, 1449 | 3, 1450 | "center-1", 1451 | 0, 1452 | 0.93, 1453 | 0, 1454 | 0.7, 1455 | "False", 1456 | 10, 1457 | "", 1458 | 1, 1459 | false, 1460 | 20 1461 | ] 1462 | }, 1463 | { 1464 | "id": 108, 1465 | "type": "SaveImage", 1466 | "pos": [ 1467 | 7528, 1468 | -880 1469 | ], 1470 | "size": [ 1471 | 315, 1472 | 270 1473 | ], 1474 | "flags": {}, 1475 | "order": 30, 1476 | "mode": 0, 1477 | "inputs": [ 1478 | { 1479 | "name": "images", 1480 | "type": "IMAGE", 1481 | "link": 212 1482 | } 1483 | ], 1484 | "properties": {}, 1485 | "widgets_values": [ 1486 | "ComfyUI" 1487 | ] 1488 | }, 1489 | { 1490 | "id": 98, 1491 | "type": "FaceDetailer", 1492 | "pos": [ 1493 | 5117, 1494 | -881 1495 | ], 1496 | "size": { 1497 | "0": 506.4000244140625, 1498 | "1": 880 1499 | }, 1500 | "flags": {}, 1501 | "order": 21, 1502 | "mode": 0, 1503 | "inputs": [ 1504 | { 1505 | "name": "image", 1506 | "type": "IMAGE", 1507 | "link": 195 1508 | }, 1509 | { 1510 | "name": "model", 1511 | "type": "MODEL", 1512 | "link": 196 1513 | }, 1514 | { 1515 | "name": "clip", 1516 | "type": "CLIP", 1517 | "link": 200 1518 | }, 1519 | { 1520 | "name": "vae", 1521 | "type": "VAE", 1522 | "link": 197 1523 | }, 1524 | { 1525 | "name": "positive", 1526 | "type": "CONDITIONING", 1527 | "link": 219 1528 | }, 1529 | { 1530 | "name": "negative", 1531 | "type": "CONDITIONING", 1532 | "link": 220 1533 | }, 1534 | { 1535 | "name": "bbox_detector", 1536 | "type": "BBOX_DETECTOR", 1537 | "link": 194 1538 | }, 1539 | { 1540 | "name": "sam_model_opt", 1541 | "type": "SAM_MODEL", 1542 | "link": null 1543 | }, 1544 | { 1545 | "name": "segm_detector_opt", 1546 | "type": "SEGM_DETECTOR", 1547 | "link": null 1548 | }, 1549 | { 1550 | "name": "detailer_hook", 1551 | "type": "DETAILER_HOOK", 1552 | "link": null 1553 | } 1554 | ], 1555 | "outputs": [ 1556 | { 1557 | "name": "image", 1558 | "type": "IMAGE", 1559 | "links": [ 1560 | 201, 1561 | 203 1562 | ], 1563 | "shape": 3, 1564 | "slot_index": 0 1565 | }, 1566 | { 1567 | "name": "cropped_refined", 1568 | "type": "IMAGE", 1569 | "links": null, 1570 | "shape": 6 1571 | }, 1572 | { 1573 | "name": "cropped_enhanced_alpha", 1574 | "type": "IMAGE", 1575 | "links": null, 1576 | "shape": 6 1577 | }, 1578 | { 1579 | "name": "mask", 1580 | "type": "MASK", 1581 | "links": null, 1582 | "shape": 3 1583 | }, 1584 | { 1585 | "name": "detailer_pipe", 1586 | "type": "DETAILER_PIPE", 1587 | "links": null, 1588 | "shape": 3 1589 | }, 1590 | { 1591 | "name": "cnet_images", 1592 | "type": "IMAGE", 1593 | "links": null, 1594 | "shape": 6 1595 | } 1596 | ], 1597 | "properties": { 1598 | "Node name for S&R": "FaceDetailer" 1599 | }, 1600 | "widgets_values": [ 1601 | 384, 1602 | true, 1603 | 1024, 1604 | 1073953721129240, 1605 | "fixed", 1606 | 4, 1607 | 1, 1608 | "dpmpp_sde", 1609 | "karras", 1610 | 0.35000000000000003, 1611 | 5, 1612 | true, 1613 | true, 1614 | 0.5, 1615 | 10, 1616 | 3, 1617 | "center-1", 1618 | 0, 1619 | 0.93, 1620 | 0, 1621 | 0.7, 1622 | "False", 1623 | 10, 1624 | "", 1625 | 1, 1626 | false, 1627 | 20 1628 | ] 1629 | } 1630 | ], 1631 | "links": [ 1632 | [ 1633 | 1, 1634 | 1, 1635 | 0, 1636 | 2, 1637 | 0, 1638 | "INSTANTID" 1639 | ], 1640 | [ 1641 | 2, 1642 | 4, 1643 | 0, 1644 | 2, 1645 | 1, 1646 | "FACEANALYSIS" 1647 | ], 1648 | [ 1649 | 3, 1650 | 5, 1651 | 0, 1652 | 2, 1653 | 2, 1654 | "CONTROL_NET" 1655 | ], 1656 | [ 1657 | 10, 1658 | 8, 1659 | 1, 1660 | 9, 1661 | 0, 1662 | "CLIP" 1663 | ], 1664 | [ 1665 | 11, 1666 | 8, 1667 | 1, 1668 | 10, 1669 | 0, 1670 | "CLIP" 1671 | ], 1672 | [ 1673 | 12, 1674 | 10, 1675 | 0, 1676 | 2, 1677 | 6, 1678 | "CONDITIONING" 1679 | ], 1680 | [ 1681 | 15, 1682 | 2, 1683 | 2, 1684 | 11, 1685 | 2, 1686 | "CONDITIONING" 1687 | ], 1688 | [ 1689 | 16, 1690 | 11, 1691 | 0, 1692 | 13, 1693 | 0, 1694 | "LATENT" 1695 | ], 1696 | [ 1697 | 17, 1698 | 13, 1699 | 0, 1700 | 12, 1701 | 0, 1702 | "IMAGE" 1703 | ], 1704 | [ 1705 | 18, 1706 | 8, 1707 | 2, 1708 | 13, 1709 | 1, 1710 | "VAE" 1711 | ], 1712 | [ 1713 | 72, 1714 | 2, 1715 | 0, 1716 | 11, 1717 | 0, 1718 | "MODEL" 1719 | ], 1720 | [ 1721 | 76, 1722 | 29, 1723 | 0, 1724 | 11, 1725 | 3, 1726 | "LATENT" 1727 | ], 1728 | [ 1729 | 106, 1730 | 8, 1731 | 0, 1732 | 39, 1733 | 0, 1734 | "MODEL" 1735 | ], 1736 | [ 1737 | 119, 1738 | 39, 1739 | 0, 1740 | 52, 1741 | 0, 1742 | "MODEL" 1743 | ], 1744 | [ 1745 | 120, 1746 | 52, 1747 | 0, 1748 | 2, 1749 | 4, 1750 | "MODEL" 1751 | ], 1752 | [ 1753 | 121, 1754 | 39, 1755 | 1, 1756 | 52, 1757 | 1, 1758 | "IPADAPTER" 1759 | ], 1760 | [ 1761 | 131, 1762 | 9, 1763 | 0, 1764 | 2, 1765 | 5, 1766 | "CONDITIONING" 1767 | ], 1768 | [ 1769 | 133, 1770 | 7, 1771 | 0, 1772 | 52, 1773 | 2, 1774 | "IMAGE" 1775 | ], 1776 | [ 1777 | 149, 1778 | 2, 1779 | 1, 1780 | 74, 1781 | 0, 1782 | "CONDITIONING" 1783 | ], 1784 | [ 1785 | 150, 1786 | 74, 1787 | 0, 1788 | 11, 1789 | 1, 1790 | "CONDITIONING" 1791 | ], 1792 | [ 1793 | 152, 1794 | 73, 1795 | 0, 1796 | 74, 1797 | 1, 1798 | "CONTROL_NET" 1799 | ], 1800 | [ 1801 | 153, 1802 | 7, 1803 | 0, 1804 | 2, 1805 | 3, 1806 | "IMAGE" 1807 | ], 1808 | [ 1809 | 188, 1810 | 94, 1811 | 0, 1812 | 2, 1813 | 7, 1814 | "IMAGE" 1815 | ], 1816 | [ 1817 | 189, 1818 | 95, 1819 | 0, 1820 | 74, 1821 | 2, 1822 | "IMAGE" 1823 | ], 1824 | [ 1825 | 194, 1826 | 99, 1827 | 0, 1828 | 98, 1829 | 6, 1830 | "BBOX_DETECTOR" 1831 | ], 1832 | [ 1833 | 195, 1834 | 13, 1835 | 0, 1836 | 98, 1837 | 0, 1838 | "IMAGE" 1839 | ], 1840 | [ 1841 | 196, 1842 | 2, 1843 | 0, 1844 | 98, 1845 | 1, 1846 | "MODEL" 1847 | ], 1848 | [ 1849 | 197, 1850 | 8, 1851 | 2, 1852 | 98, 1853 | 3, 1854 | "VAE" 1855 | ], 1856 | [ 1857 | 200, 1858 | 8, 1859 | 1, 1860 | 98, 1861 | 2, 1862 | "CLIP" 1863 | ], 1864 | [ 1865 | 201, 1866 | 98, 1867 | 0, 1868 | 100, 1869 | 0, 1870 | "IMAGE" 1871 | ], 1872 | [ 1873 | 203, 1874 | 98, 1875 | 0, 1876 | 103, 1877 | 0, 1878 | "IMAGE" 1879 | ], 1880 | [ 1881 | 204, 1882 | 103, 1883 | 0, 1884 | 104, 1885 | 0, 1886 | "IMAGE" 1887 | ], 1888 | [ 1889 | 205, 1890 | 2, 1891 | 1, 1892 | 105, 1893 | 1, 1894 | "CONDITIONING" 1895 | ], 1896 | [ 1897 | 206, 1898 | 2, 1899 | 2, 1900 | 105, 1901 | 2, 1902 | "CONDITIONING" 1903 | ], 1904 | [ 1905 | 207, 1906 | 106, 1907 | 0, 1908 | 105, 1909 | 3, 1910 | "LATENT" 1911 | ], 1912 | [ 1913 | 209, 1914 | 8, 1915 | 2, 1916 | 106, 1917 | 1, 1918 | "VAE" 1919 | ], 1920 | [ 1921 | 210, 1922 | 105, 1923 | 0, 1924 | 107, 1925 | 0, 1926 | "LATENT" 1927 | ], 1928 | [ 1929 | 211, 1930 | 8, 1931 | 2, 1932 | 107, 1933 | 1, 1934 | "VAE" 1935 | ], 1936 | [ 1937 | 212, 1938 | 107, 1939 | 0, 1940 | 108, 1941 | 0, 1942 | "IMAGE" 1943 | ], 1944 | [ 1945 | 213, 1946 | 2, 1947 | 0, 1948 | 105, 1949 | 0, 1950 | "MODEL" 1951 | ], 1952 | [ 1953 | 214, 1954 | 103, 1955 | 0, 1956 | 109, 1957 | 0, 1958 | "IMAGE" 1959 | ], 1960 | [ 1961 | 215, 1962 | 109, 1963 | 0, 1964 | 106, 1965 | 0, 1966 | "IMAGE" 1967 | ], 1968 | [ 1969 | 216, 1970 | 109, 1971 | 0, 1972 | 110, 1973 | 0, 1974 | "IMAGE" 1975 | ], 1976 | [ 1977 | 218, 1978 | 111, 1979 | 0, 1980 | 112, 1981 | 0, 1982 | "IMAGE" 1983 | ], 1984 | [ 1985 | 219, 1986 | 113, 1987 | 0, 1988 | 98, 1989 | 4, 1990 | "CONDITIONING" 1991 | ], 1992 | [ 1993 | 220, 1994 | 114, 1995 | 0, 1996 | 98, 1997 | 5, 1998 | "CONDITIONING" 1999 | ], 2000 | [ 2001 | 221, 2002 | 8, 2003 | 1, 2004 | 114, 2005 | 0, 2006 | "CLIP" 2007 | ], 2008 | [ 2009 | 222, 2010 | 8, 2011 | 1, 2012 | 113, 2013 | 0, 2014 | "CLIP" 2015 | ], 2016 | [ 2017 | 223, 2018 | 107, 2019 | 0, 2020 | 115, 2021 | 0, 2022 | "IMAGE" 2023 | ], 2024 | [ 2025 | 224, 2026 | 8, 2027 | 0, 2028 | 115, 2029 | 1, 2030 | "MODEL" 2031 | ], 2032 | [ 2033 | 225, 2034 | 8, 2035 | 1, 2036 | 115, 2037 | 2, 2038 | "CLIP" 2039 | ], 2040 | [ 2041 | 226, 2042 | 8, 2043 | 2, 2044 | 115, 2045 | 3, 2046 | "VAE" 2047 | ], 2048 | [ 2049 | 227, 2050 | 99, 2051 | 0, 2052 | 115, 2053 | 6, 2054 | "BBOX_DETECTOR" 2055 | ], 2056 | [ 2057 | 231, 2058 | 113, 2059 | 0, 2060 | 115, 2061 | 5, 2062 | "CONDITIONING" 2063 | ], 2064 | [ 2065 | 232, 2066 | 114, 2067 | 0, 2068 | 115, 2069 | 4, 2070 | "CONDITIONING" 2071 | ], 2072 | [ 2073 | 233, 2074 | 115, 2075 | 0, 2076 | 111, 2077 | 0, 2078 | "IMAGE" 2079 | ] 2080 | ], 2081 | "groups": [], 2082 | "config": {}, 2083 | "extra": {}, 2084 | "version": 0.4 2085 | } 2086 | --------------------------------------------------------------------------------