├── Examples
├── imgs
│ ├── blur.jpg
│ ├── cs.jpg
│ ├── m1.jpg
│ ├── m1a.jpg
│ ├── wm1.jpg
│ ├── Thumbs.db
│ ├── anime.jpg
│ ├── cs_oc.jpg
│ ├── deblur.jpg
│ ├── wm1mt.jpg
│ ├── wm1op.jpg
│ ├── deblur_wf.jpg
│ ├── PoseToImage.jpg
│ ├── m1_depthmap.jpg
│ ├── m1_skeleton.jpg
│ ├── PoseToImage_wf.jpg
│ ├── omnigen_combine.jpg
│ ├── omnigen_t2i_i2i.jpg
│ └── deptnmapToImage_wf.jpg
├── depthmap2image.json
├── omnigen_t2i_i2i.json
├── omnigen_combine.json
└── OmniGen_wf.json
├── .gitignore
├── requirements.txt
├── pyproject.toml
├── .github
└── workflows
│ └── publish.yml
├── LICENSE
├── __init__.py
├── UPDATE.md
├── data.json
├── README.md
└── AILab_OmniGen.py
/Examples/imgs/blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/blur.jpg
--------------------------------------------------------------------------------
/Examples/imgs/cs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/cs.jpg
--------------------------------------------------------------------------------
/Examples/imgs/m1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/m1.jpg
--------------------------------------------------------------------------------
/Examples/imgs/m1a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/m1a.jpg
--------------------------------------------------------------------------------
/Examples/imgs/wm1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/wm1.jpg
--------------------------------------------------------------------------------
/Examples/imgs/Thumbs.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/Thumbs.db
--------------------------------------------------------------------------------
/Examples/imgs/anime.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/anime.jpg
--------------------------------------------------------------------------------
/Examples/imgs/cs_oc.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/cs_oc.jpg
--------------------------------------------------------------------------------
/Examples/imgs/deblur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/deblur.jpg
--------------------------------------------------------------------------------
/Examples/imgs/wm1mt.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/wm1mt.jpg
--------------------------------------------------------------------------------
/Examples/imgs/wm1op.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/wm1op.jpg
--------------------------------------------------------------------------------
/Examples/imgs/deblur_wf.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/deblur_wf.jpg
--------------------------------------------------------------------------------
/Examples/imgs/PoseToImage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/PoseToImage.jpg
--------------------------------------------------------------------------------
/Examples/imgs/m1_depthmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/m1_depthmap.jpg
--------------------------------------------------------------------------------
/Examples/imgs/m1_skeleton.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/m1_skeleton.jpg
--------------------------------------------------------------------------------
/Examples/imgs/PoseToImage_wf.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/PoseToImage_wf.jpg
--------------------------------------------------------------------------------
/Examples/imgs/omnigen_combine.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/omnigen_combine.jpg
--------------------------------------------------------------------------------
/Examples/imgs/omnigen_t2i_i2i.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/omnigen_t2i_i2i.jpg
--------------------------------------------------------------------------------
/Examples/imgs/deptnmapToImage_wf.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/1038lab/ComfyUI-OmniGen/HEAD/Examples/imgs/deptnmapToImage_wf.jpg
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore Python cache files
2 | __pycache__
3 |
4 | # Ignore OmniGen directory
5 | OmniGen
6 |
7 | # Ignore temporary files
8 | tmp
9 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch>=2.0.0
2 | numpy>=1.26.0
3 | pillow>=10.0.0
4 | huggingface_hub
5 | requests>=2.31.0
6 | transformers>=4.30.0
7 | accelerate>=0.26.0
8 | einops>=0.6.0
9 | safetensors>=0.3.0
10 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-omnigen"
3 | version = "1.2.2"
4 | description = "OmniGen node for AI image generation"
5 | license = {file = "LICENSE"}
6 |
7 | dependencies = [
8 | "torch>=2.0.0",
9 | "numpy>=1.26.0",
10 | "pillow>=10.0.0",
11 | "huggingface_hub",
12 | "requests>=2.31.0",
13 | "transformers>=4.30.0",
14 | "accelerate>=0.26.0",
15 | "einops>=0.6.0",
16 | "safetensors>=0.3.0",
17 | ]
18 |
19 | [project.urls]
20 | Repository = "https://github.com/1038lab/ComfyUI-OmniGen"
21 |
22 | [tool.comfy]
23 | PublisherId = "ailab"
24 | DisplayName = "ComfyUI-OmniGen"
25 | Icon = ""
26 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - "pyproject.toml"
9 |
10 | permissions:
11 | issues: write
12 |
13 | jobs:
14 | publish-node:
15 | name: Publish Custom Node to registry
16 | runs-on: ubuntu-latest
17 | if: ${{ github.repository_owner == '1038lab' }}
18 | steps:
19 | - name: Check out code
20 | uses: actions/checkout@v4
21 | - name: Publish Custom Node
22 | uses: Comfy-Org/publish-node-action@v1
23 | with:
24 | ## Add your own personal access token to your Github Repository secrets and reference it here.
25 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
26 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 AI Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import sys
3 | import os
4 | import importlib.util
5 |
6 | # Add module directory to Python path
7 | current_dir = Path(__file__).parent
8 | if str(current_dir) not in sys.path:
9 | sys.modules[__name__] = sys.modules.get(__name__, type(__name__, (), {}))
10 | sys.path.insert(0, str(current_dir))
11 |
12 | # Initialize mappings
13 | NODE_CLASS_MAPPINGS = {}
14 | NODE_DISPLAY_NAME_MAPPINGS = {}
15 |
16 | def load_nodes():
17 | """Automatically discover and load node definitions"""
18 | for file in current_dir.glob("*.py"):
19 | if file.stem == "__init__":
20 | continue
21 |
22 | try:
23 | # Import module
24 | spec = importlib.util.spec_from_file_location(file.stem, file)
25 | if spec and spec.loader:
26 | module = importlib.util.module_from_spec(spec)
27 | spec.loader.exec_module(module)
28 |
29 | # Update mappings
30 | if hasattr(module, "NODE_CLASS_MAPPINGS"):
31 | NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
32 | if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS"):
33 | NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
34 |
35 | # Initialize paths if available
36 | if hasattr(module, "Paths") and hasattr(module.Paths, "LLM_DIR"):
37 | os.makedirs(module.Paths.LLM_DIR, exist_ok=True)
38 |
39 | except Exception as e:
40 | print(f"Error loading {file.name}: {str(e)}")
41 |
42 | # Load all nodes
43 | load_nodes()
44 |
45 | __all__ = [
46 | "NODE_CLASS_MAPPINGS",
47 | "NODE_DISPLAY_NAME_MAPPINGS"
48 | ]
49 |
50 |
--------------------------------------------------------------------------------
/UPDATE.md:
--------------------------------------------------------------------------------
1 | # OmniGen Updates
2 |
3 | ## 2025-01-22 (v1.2.2)
4 | ### Improved
5 | - Significant performance improvements in pipeline execution
6 | - Enhanced device management stability
7 | - Optimized memory handling during model operations
8 | - Better error recovery mechanisms
9 |
10 | ### Fixed
11 | - Pipeline device movement reliability
12 | - Memory management in edge cases
13 | - Error handling during device transitions
14 |
15 | ## 2024-11-11
16 | ### Added
17 | - Preset Prompts Support
18 | - Added preset prompt selection from data.json
19 | - Custom prompts take precedence over presets
20 | - Easy to extend with new presets
21 | - Default to empty if no preset selected
22 |
23 | - Model Precision Selection
24 | - Added three precision options:
25 | - Auto: Automatically selects based on VRAM
26 | - FP16: Full precision (15.5GB VRAM)
27 | - FP8: Reduced precision (3.4GB VRAM)
28 | - Auto mode selects FP8 for systems with <8GB VRAM
29 | - Shows available VRAM in selection message
30 | - Smart switching between models with proper cleanup
31 |
32 | - Memory Management Improvements
33 | - Three modes available:
34 | - Balanced (Default): Standard operation mode
35 | - Speed Priority: Keeps model in VRAM for faster consecutive generations
36 | - Memory Priority: Aggressive memory saving with model offloading
37 | - Smart model instance caching
38 | - Automatic VRAM cleanup when switching models
39 | - Recommended settings:
40 | - FP8 model (3.4GB VRAM): Speed Priority mode is safe
41 | - FP16 model (15.5GB VRAM): Use Memory Priority mode if VRAM limited
42 |
43 | - Pipeline Improvements
44 | - Better device movement handling
45 | - Original pipeline backup for device movement failures
46 | - Improved error handling and recovery
47 | - Component-wise device movement for better stability
48 |
49 | ### Fixed
50 | - Pipeline device movement issues
51 | - Memory leaks in consecutive generations
52 | - Temporary file cleanup
53 | - Model precision switching issues
54 |
55 | ### Improved
56 | - Error handling and logging
57 | - VRAM usage monitoring
58 | - Temporary file management with UUID
59 | - Code organization and documentation
--------------------------------------------------------------------------------
/Examples/depthmap2image.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 6,
3 | "last_link_id": 4,
4 | "nodes": [
5 | {
6 | "id": 2,
7 | "type": "LoadImage",
8 | "pos": {
9 | "0": 70,
10 | "1": 80
11 | },
12 | "size": {
13 | "0": 606.6453857421875,
14 | "1": 552.7633666992188
15 | },
16 | "flags": {},
17 | "order": 0,
18 | "mode": 0,
19 | "inputs": [],
20 | "outputs": [
21 | {
22 | "name": "IMAGE",
23 | "type": "IMAGE",
24 | "links": [
25 | 4
26 | ],
27 | "label": "IMAGE"
28 | },
29 | {
30 | "name": "MASK",
31 | "type": "MASK",
32 | "links": null,
33 | "label": "MASK"
34 | }
35 | ],
36 | "properties": {
37 | "Node name for S&R": "LoadImage"
38 | },
39 | "widgets_values": [
40 | "cs.png",
41 | "image"
42 | ]
43 | },
44 | {
45 | "id": 6,
46 | "type": "ailab_OmniGen",
47 | "pos": {
48 | "0": 690,
49 | "1": 80
50 | },
51 | "size": {
52 | "0": 400,
53 | "1": 428
54 | },
55 | "flags": {},
56 | "order": 1,
57 | "mode": 0,
58 | "inputs": [
59 | {
60 | "name": "image_1",
61 | "type": "IMAGE",
62 | "link": 4,
63 | "shape": 7,
64 | "label": "image_1"
65 | },
66 | {
67 | "name": "image_2",
68 | "type": "IMAGE",
69 | "link": null,
70 | "shape": 7,
71 | "label": "image_2"
72 | },
73 | {
74 | "name": "image_3",
75 | "type": "IMAGE",
76 | "link": null,
77 | "shape": 7,
78 | "label": "image_3"
79 | }
80 | ],
81 | "outputs": [
82 | {
83 | "name": "IMAGE",
84 | "type": "IMAGE",
85 | "links": [
86 | 3
87 | ],
88 | "label": "IMAGE",
89 | "slot_index": 0
90 | }
91 | ],
92 | "properties": {
93 | "Node name for S&R": "ailab_OmniGen"
94 | },
95 | "widgets_values": [
96 | "Depth map to image (image_1)",
97 | "",
98 | "Auto",
99 | "Balanced",
100 | 3.5,
101 | 1.8,
102 | 50,
103 | true,
104 | false,
105 | 768,
106 | 512,
107 | 0,
108 | "randomize",
109 | 1024
110 | ]
111 | },
112 | {
113 | "id": 3,
114 | "type": "PreviewImage",
115 | "pos": {
116 | "0": 1100,
117 | "1": 80
118 | },
119 | "size": {
120 | "0": 705.9578247070312,
121 | "1": 556.9784545898438
122 | },
123 | "flags": {},
124 | "order": 2,
125 | "mode": 0,
126 | "inputs": [
127 | {
128 | "name": "images",
129 | "type": "IMAGE",
130 | "link": 3,
131 | "label": "images"
132 | }
133 | ],
134 | "outputs": [],
135 | "properties": {
136 | "Node name for S&R": "PreviewImage"
137 | },
138 | "widgets_values": []
139 | }
140 | ],
141 | "links": [
142 | [
143 | 3,
144 | 6,
145 | 0,
146 | 3,
147 | 0,
148 | "IMAGE"
149 | ],
150 | [
151 | 4,
152 | 2,
153 | 0,
154 | 6,
155 | 0,
156 | "IMAGE"
157 | ]
158 | ],
159 | "groups": [],
160 | "config": {},
161 | "extra": {
162 | "ds": {
163 | "scale": 1,
164 | "offset": [
165 | 0,
166 | 0
167 | ]
168 | }
169 | },
170 | "version": 0.4
171 | }
--------------------------------------------------------------------------------
/Examples/omnigen_t2i_i2i.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 11,
3 | "last_link_id": 13,
4 | "nodes": [
5 | {
6 | "id": 3,
7 | "type": "PreviewImage",
8 | "pos": {
9 | "0": 450,
10 | "1": 60
11 | },
12 | "size": {
13 | "0": 553.608642578125,
14 | "1": 826.3672485351562
15 | },
16 | "flags": {},
17 | "order": 1,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "images",
22 | "type": "IMAGE",
23 | "link": 11,
24 | "label": "images"
25 | }
26 | ],
27 | "outputs": [],
28 | "properties": {
29 | "Node name for S&R": "PreviewImage"
30 | },
31 | "widgets_values": []
32 | },
33 | {
34 | "id": 9,
35 | "type": "PreviewImage",
36 | "pos": {
37 | "0": 1430,
38 | "1": 60
39 | },
40 | "size": {
41 | "0": 516.1412353515625,
42 | "1": 824.7374267578125
43 | },
44 | "flags": {},
45 | "order": 3,
46 | "mode": 0,
47 | "inputs": [
48 | {
49 | "name": "images",
50 | "type": "IMAGE",
51 | "link": 12,
52 | "label": "images"
53 | }
54 | ],
55 | "outputs": [],
56 | "properties": {
57 | "Node name for S&R": "PreviewImage"
58 | },
59 | "widgets_values": []
60 | },
61 | {
62 | "id": 10,
63 | "type": "ailab_OmniGen",
64 | "pos": {
65 | "0": 33,
66 | "1": 60
67 | },
68 | "size": {
69 | "0": 400,
70 | "1": 428
71 | },
72 | "flags": {},
73 | "order": 0,
74 | "mode": 0,
75 | "inputs": [
76 | {
77 | "name": "image_1",
78 | "type": "IMAGE",
79 | "link": null,
80 | "shape": 7,
81 | "label": "image_1"
82 | },
83 | {
84 | "name": "image_2",
85 | "type": "IMAGE",
86 | "link": null,
87 | "shape": 7,
88 | "label": "image_2"
89 | },
90 | {
91 | "name": "image_3",
92 | "type": "IMAGE",
93 | "link": null,
94 | "shape": 7,
95 | "label": "image_3"
96 | }
97 | ],
98 | "outputs": [
99 | {
100 | "name": "IMAGE",
101 | "type": "IMAGE",
102 | "links": [
103 | 11,
104 | 13
105 | ],
106 | "label": "IMAGE",
107 | "slot_index": 0
108 | }
109 | ],
110 | "properties": {
111 | "Node name for S&R": "ailab_OmniGen"
112 | },
113 | "widgets_values": [
114 | "20yo woman looking at viewer",
115 | "",
116 | "Auto",
117 | "Balanced",
118 | 3.5,
119 | 1.8,
120 | 50,
121 | true,
122 | false,
123 | 512,
124 | 512,
125 | 0,
126 | "randomize",
127 | 1024
128 | ]
129 | },
130 | {
131 | "id": 11,
132 | "type": "ailab_OmniGen",
133 | "pos": {
134 | "0": 1013,
135 | "1": 60
136 | },
137 | "size": {
138 | "0": 400,
139 | "1": 428
140 | },
141 | "flags": {},
142 | "order": 2,
143 | "mode": 0,
144 | "inputs": [
145 | {
146 | "name": "image_1",
147 | "type": "IMAGE",
148 | "link": 13,
149 | "shape": 7,
150 | "label": "image_1"
151 | },
152 | {
153 | "name": "image_2",
154 | "type": "IMAGE",
155 | "link": null,
156 | "shape": 7,
157 | "label": "image_2"
158 | },
159 | {
160 | "name": "image_3",
161 | "type": "IMAGE",
162 | "link": null,
163 | "shape": 7,
164 | "label": "image_3"
165 | }
166 | ],
167 | "outputs": [
168 | {
169 | "name": "IMAGE",
170 | "type": "IMAGE",
171 | "links": [
172 | 12
173 | ],
174 | "label": "IMAGE",
175 | "slot_index": 0
176 | }
177 | ],
178 | "properties": {
179 | "Node name for S&R": "ailab_OmniGen"
180 | },
181 | "widgets_values": [
182 | "The girl in image_1 sitting on rock on top of the mountain (image_1)",
183 | "",
184 | "Auto",
185 | "Balanced",
186 | 3.5,
187 | 1.8,
188 | 50,
189 | true,
190 | false,
191 | 512,
192 | 512,
193 | 0,
194 | "randomize",
195 | 1024
196 | ]
197 | }
198 | ],
199 | "links": [
200 | [
201 | 11,
202 | 10,
203 | 0,
204 | 3,
205 | 0,
206 | "IMAGE"
207 | ],
208 | [
209 | 12,
210 | 11,
211 | 0,
212 | 9,
213 | 0,
214 | "IMAGE"
215 | ],
216 | [
217 | 13,
218 | 10,
219 | 0,
220 | 11,
221 | 0,
222 | "IMAGE"
223 | ]
224 | ],
225 | "groups": [],
226 | "config": {},
227 | "extra": {
228 | "ds": {
229 | "scale": 1.0834705943388678,
230 | "offset": [
231 | 454.5646998745825,
232 | 206.62789963508953
233 | ]
234 | }
235 | },
236 | "version": 0.4
237 | }
--------------------------------------------------------------------------------
/data.json:
--------------------------------------------------------------------------------
1 | {
2 | "PRESET_PROMPTS": {
3 | "None": "",
4 | "20yo woman looking at viewer": "Create an image of a 20-year-old woman looking directly at the viewer, with a neutral or friendly expression.",
5 | "Transform image_1 into an oil painting (image_1)": "Transform image_1 into an oil painting, giving it a textured, classic style with visible brushstrokes and rich color.",
6 | "Transform image_1 into an Anime (image_1)": "Transform image_1 into an anime-style illustration, with large expressive eyes, vibrant colors, and exaggerated features.",
7 | "The girl in image_1 sitting on rock on top of the mountain (image_1)": "Depict the girl from image_1 sitting on a rock at the top of a mountain, gazing out over a breathtaking landscape.",
8 | "Combine 2 People in anime style (image_1, image_2)": "Combine the characters from image_1 and image_2 in anime style, blending their features and surroundings into one cohesive scene.",
9 | "2 people at the coffee shop (image_1, image_2)": "A woman from image_1 and a man from image_2 are sitting across from each other at a cozy coffee shop, each holding a cup of coffee and engaging in conversation.",
10 | "Depth map to image (image_1)": "Following the depth mapping of image_1, generate a new photo: an elderly couple sitting at a cozy coffee shop, with layers of depth and focus.",
11 | "Image to pose skeleton (image_1)": "Detect the skeleton of a human in image_1, creating a skeletal overlay for analysis or artistic interpretation.",
12 | "Pose skeleton to image (image_1)": "Following the pose of the human skeleton detected in image_1, generate a new photo of the subject in the same pose with realistic anatomy.",
13 | "Deblur image (image_1)": "Deblur this image: image_1, removing any motion blur or focus issues to create a clearer, more defined image.",
14 | "Make an object come to life (image_1)": "Turn, an inanimate object in image_1 like a teapot, into a lively character with eyes, a smile, and moving limbs.",
15 | "Transform a landscape (image_1)": "Transform the serene mountain landscape in image_1 into a glowing, magical world with floating islands and sparkling rivers.",
16 | "Mix people and background (image_1, image_2)": "Use image_1 as the background and place the girl wearing a red dress from image_2 in the foreground, making her appear as if she’s walking through a foggy forest.",
17 | "Combine creatures (image_1, image_2)": "Combine the fierce lion from image_1 with the majestic eagle in image_2 to create a mythical creature with the body of a lion and wings of an eagle.",
18 | "Create a futuristic city (image_1)": "Use image_1, a city skyline, and transform it into a futuristic metropolis with flying cars, neon lights, and holograms.",
19 | "Fantasy world building (image_1, image_2, image_3)": "Mix the icy landscape from image_1, the mystical castle from image_2, and the dark forest from image_3 to build a fantasy world full of adventure.",
20 | "Create a weather transformation (image_1)": "Take the sunny day in image_1 and transform it into a dramatic thunderstorm with dark clouds, lightning strikes, and strong winds.",
21 | "Surreal composition (image_1)": "Create a surreal scene by blending image_1, where the sky is full of floating clocks, melting trees, and a river made of clouds.",
22 | "Turn a person into a mythical creature (image_1)": "Take the portrait of the person in image_1 and transform them into a beautiful, ethereal creature with wings, glowing eyes, and a radiant aura.",
23 | "Underwater scene (image_1)": "Place the subject of image_1 underwater, surrounded by schools of fish, vibrant coral reefs, and beams of sunlight filtering through the water.",
24 | "Create a time-lapse effect (image_1)": "Turn image_1 into a time-lapse scene where flowers bloom, the sun rises and sets, and a bustling city grows in the background.",
25 | "Epic battle scene (image_1, image_2)": "Combine the knight in image_1 with the dragon in image_2 to create an epic battle scene in a fiery wasteland.",
26 | "Create a dreamlike atmosphere (image_1)": "Use image_1, a forest, and turn it into a dreamlike scene where the trees glow, the ground sparkles, and the sky is filled with colorful swirls of light.",
27 | "Mix old and new (image_1, image_2)": "Combine the vintage car from image_1 with the futuristic cityscape from image_2, making the car drive through a neon-lit street full of towering skyscrapers.",
28 | "Turn a mundane object into art (image_1)": "Take the everyday coffee mug from image_1 and turn it into a masterpiece, where the mug is surrounded by swirling colors, abstract shapes, and vibrant patterns.",
29 | "Create a superhero scene (image_1, image_2)": "Use image_1 as the setting, and place the superhero from image_2 in the center of the city, with lightning and energy blasts emanating from their hands.",
30 | "Make a monster (image_1)": "Transform the image of a simple animal in image_1 into a terrifying, mythical monster with glowing eyes, sharp claws, and smoke billowing from its mouth.",
31 | "Historical reimagining (image_1)": "Take the old photo of a historical figure in image_1 and reimagine them as a futuristic leader, wearing high-tech armor and standing in front of a modern city.",
32 | "Abstract art from a photograph (image_1)": "Turn the photograph in image_1 into an abstract work of art, where shapes and colors blend and distort, creating a completely new visual interpretation.",
33 | "Create an alien landscape (image_1)": "Use image_1, a barren desert, and transform it into an alien world with purple skies, strange plants, and glowing rocks scattered across the land."
34 | }
35 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ComfyUI-OmniGen
2 |
3 | A ComfyUI custom node implementation of OmniGen, a powerful all in one text-to-image generation and editing model.
4 |
5 | $${\color{red}If\ this\ custom\ node\ helps\ you\ or\ you\ like\ my\ work,\ please\ give\ me⭐on\ this\ repo!}$$
6 | $${\color{red}It's\ a\ greatest\ encouragement\ for\ my\ efforts!}$$
7 |
8 | ## Updates
9 | - 2025/01/22: Added new features and bug fixes. Refer to [update.md](UPDATE.md) for details.
10 | - 2024/11/11: Added new features and bug fixes. Refer to [update.md](UPDATE.md) for details.
11 |
12 |
13 | Features
14 |
15 | - Text-to-Image Generation
16 | - Image Editing
17 | - Support for Multiple Input Images
18 | - Memory Optimization Options
19 | - Flexible Image Size Control
20 |
21 |
22 | ## Installation
23 | #### 1. ComfyUI-Manager
24 | 1. Search for `ComfyUI-OmniGen` on [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) and install it.
25 | 2. Restart ComfyUI
26 | #### 2. Git Clone
27 | 1. Open the terminal on the ComfyUI `ComfyUI/custom_nodes` folder
28 | 2. Run `git clone https://github.com/1038lab/ComfyUI-OmniGen.git`
29 | 3. Restart ComfyUI
30 |
31 | ## Install the required Python packages:
32 | 1. Open the terminal on the `ComfyUI/custom_nodes/ComfyUI-OmniGen` folder
33 | 2. `..\..\..\python_embeded\python.exe -m pip install -r requirements.txt`
34 |
35 |
36 | Auto-Download Feature
37 |
38 | The node includes automatic downloading of:
39 | 1. OmniGen code from GitHub repository
40 | 2. Model weights from Hugging Face
41 |
42 | No manual file downloading is required. The node will handle everything automatically on first use.
43 | >[!IMPORTANT]
44 | >The first time you use this custom node, it will automatically download the model from Hugging Face. Please be patient, as the download size is approximately 15.5 GB, and it may take some time to complete.
45 | >
46 | >Alternatively, you can manually download the model from Hugging Face at the following link:
47 | >Download OmniGen-v1 from [Hugging Face](https://huggingface.co/Shitao/OmniGen-v1/tree/main)
48 | >After downloading, place the model in the following directory: `comfyui/models/LLM/OmniGen-v1`
49 |
50 | ### Example workflows
51 | Simple usage for text to image & image to image. [workflow](/Examples/omnigen_t2i_i2i.json)
52 | 
53 |
54 | Generate an image that combines 2 images. [Workflow](/Examples/omnigen_combine.json)
55 | 
56 |
57 | Following the pose of this image `image_1`, generate a new photo: An viking old man standing.
58 | 
59 |
60 | Generate a depth map from the input image and create a new image based on the depth map. [Workflow](/Examples/depthmap2image.json)
61 | 
62 |
63 | Deblur this image. `image_1`
64 | 
65 |
66 | ### Example prompts:
67 |
68 | | Prompt | Image_1 | Image_2 | Image_3 | Output |
69 | | ------ | ------ | ------ | ------ | ------ |
70 | | 20yo woman looking at viewer | | | |  |
71 | | Transform `image_1` into an oil painting |  | | |  |
72 | | Transform `image_2` into an Anime |  | | |  |
73 | | The girl in `image_1` sitting on rock on top of the mountain. |  | | |  |
74 | | Combine `image_1` and `image_2` in anime style. |  |  | |  |
75 | | A woman from `image_1` and a man from `image_2` are sitting across from each other at a cozy coffee shop, each holding a cup of coffee and engaging in conversation. |  |  | |  |
76 | | Following the depth mapping of this image `image_1`, generate a new photo: an elderly couple sitting at a cozy coffee shop. The scene should feature the couple enjoying a peaceful moment together at a table. The elderly man, with a gray beard and glasses, and the woman, with short silver hair and a soft cardigan, are sipping coffee and smiling at each other. The atmosphere is warm, with soft, ambient lighting and a blurred background of other patrons, shelves of books, and cafe decorations. The depth and textures from `image_1` should be integrated to emphasize the intimate, serene setting. |  | | |  |
77 | | Detect the skeleton of a human in this image: `image_1` |  | | |  |
78 | | Following the pose of this image `image_1`, generate a new photo: A viking old man standing |  | | |  |
79 | | Detect the depth map of a human in this image: `image_1` |  | | |  |
80 | | Deblur this image: `image_1` |  | | |  |
81 |
82 |
83 | Using Images in Prompts and Settings
84 |
85 | You can reference input images in your prompt using either format:
86 | - `
<|image_1|>`, `
<|image_2|>`, `
<|image_3|>`
87 | - `image_1`, `image_2`, `image_3`
88 | - `image1`, `image2`, `image3`
89 |
90 | ## Usage
91 | The node will automatically download required files on first use:
92 | - OmniGen code from GitHub
93 | - Model weights from Hugging Face (Shitao/OmniGen-v1)
94 |
95 | ### Input Parameters
96 | - `prompt`: Text description of the desired image
97 | - `num_inference_steps`: Number of denoising steps (default: 50)
98 | - `guidance_scale`: Text guidance scale (default: 2.5)
99 | - `img_guidance_scale`: Image guidance scale (default: 1.6)
100 | - `max_input_image_size`: Maximum size for input images (default: 1024)
101 | - `width/height`: Output image dimensions (default: 1024x1024)
102 | - `seed`: Random seed for reproducibility
103 |
104 | ### Memory Optimization Options
105 | - `separate_cfg_infer`: Separate inference process for different guidance (default: True)
106 | - `offload_model`: Offload model to CPU to reduce memory usage (default: True)
107 | - `use_input_image_size_as_output`: Match output size to input image (default: False)
108 |
109 |
110 | Credit
111 |
112 | - Model Weights: [Shitao/OmniGen-v1](https://huggingface.co/Shitao/OmniGen-v1)
113 |
114 |
--------------------------------------------------------------------------------
/Examples/omnigen_combine.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 14,
3 | "last_link_id": 18,
4 | "nodes": [
5 | {
6 | "id": 10,
7 | "type": "PreviewImage",
8 | "pos": {
9 | "0": 460,
10 | "1": 928
11 | },
12 | "size": {
13 | "0": 553.608642578125,
14 | "1": 826.3672485351562
15 | },
16 | "flags": {},
17 | "order": 2,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "images",
22 | "type": "IMAGE",
23 | "link": 15,
24 | "label": "images"
25 | }
26 | ],
27 | "outputs": [],
28 | "properties": {
29 | "Node name for S&R": "PreviewImage"
30 | },
31 | "widgets_values": []
32 | },
33 | {
34 | "id": 9,
35 | "type": "PreviewImage",
36 | "pos": {
37 | "0": 1025,
38 | "1": 558
39 | },
40 | "size": {
41 | "0": 1037.9891357421875,
42 | "1": 808.0861206054688
43 | },
44 | "flags": {},
45 | "order": 5,
46 | "mode": 0,
47 | "inputs": [
48 | {
49 | "name": "images",
50 | "type": "IMAGE",
51 | "link": 16,
52 | "label": "images"
53 | }
54 | ],
55 | "outputs": [],
56 | "properties": {
57 | "Node name for S&R": "PreviewImage"
58 | },
59 | "widgets_values": []
60 | },
61 | {
62 | "id": 3,
63 | "type": "PreviewImage",
64 | "pos": {
65 | "0": 460,
66 | "1": 50
67 | },
68 | "size": {
69 | "0": 553.608642578125,
70 | "1": 826.3672485351562
71 | },
72 | "flags": {},
73 | "order": 3,
74 | "mode": 0,
75 | "inputs": [
76 | {
77 | "name": "images",
78 | "type": "IMAGE",
79 | "link": 13,
80 | "label": "images"
81 | }
82 | ],
83 | "outputs": [],
84 | "properties": {
85 | "Node name for S&R": "PreviewImage"
86 | },
87 | "widgets_values": []
88 | },
89 | {
90 | "id": 14,
91 | "type": "ailab_OmniGen",
92 | "pos": {
93 | "0": 46,
94 | "1": 929
95 | },
96 | "size": {
97 | "0": 400,
98 | "1": 428
99 | },
100 | "flags": {},
101 | "order": 0,
102 | "mode": 0,
103 | "inputs": [
104 | {
105 | "name": "image_1",
106 | "type": "IMAGE",
107 | "link": null,
108 | "shape": 7,
109 | "label": "image_1"
110 | },
111 | {
112 | "name": "image_2",
113 | "type": "IMAGE",
114 | "link": null,
115 | "shape": 7,
116 | "label": "image_2"
117 | },
118 | {
119 | "name": "image_3",
120 | "type": "IMAGE",
121 | "link": null,
122 | "shape": 7,
123 | "label": "image_3"
124 | }
125 | ],
126 | "outputs": [
127 | {
128 | "name": "IMAGE",
129 | "type": "IMAGE",
130 | "links": [
131 | 15,
132 | 18
133 | ],
134 | "label": "IMAGE",
135 | "slot_index": 0
136 | }
137 | ],
138 | "properties": {
139 | "Node name for S&R": "ailab_OmniGen"
140 | },
141 | "widgets_values": [
142 | "None",
143 | "30yo man wear black suit looking at the viewer",
144 | "Auto",
145 | "Balanced",
146 | 3.5,
147 | 1.8,
148 | 50,
149 | true,
150 | false,
151 | 512,
152 | 768,
153 | 0,
154 | "randomize",
155 | 1024
156 | ]
157 | },
158 | {
159 | "id": 12,
160 | "type": "ailab_OmniGen",
161 | "pos": {
162 | "0": 46,
163 | "1": 50
164 | },
165 | "size": {
166 | "0": 400,
167 | "1": 428
168 | },
169 | "flags": {},
170 | "order": 1,
171 | "mode": 0,
172 | "inputs": [
173 | {
174 | "name": "image_1",
175 | "type": "IMAGE",
176 | "link": null,
177 | "shape": 7,
178 | "label": "image_1"
179 | },
180 | {
181 | "name": "image_2",
182 | "type": "IMAGE",
183 | "link": null,
184 | "shape": 7,
185 | "label": "image_2"
186 | },
187 | {
188 | "name": "image_3",
189 | "type": "IMAGE",
190 | "link": null,
191 | "shape": 7,
192 | "label": "image_3"
193 | }
194 | ],
195 | "outputs": [
196 | {
197 | "name": "IMAGE",
198 | "type": "IMAGE",
199 | "links": [
200 | 13,
201 | 17
202 | ],
203 | "label": "IMAGE",
204 | "slot_index": 0
205 | }
206 | ],
207 | "properties": {
208 | "Node name for S&R": "ailab_OmniGen"
209 | },
210 | "widgets_values": [
211 | "20yo woman looking at viewer",
212 | "",
213 | "Auto",
214 | "Balanced",
215 | 3.5,
216 | 1.8,
217 | 50,
218 | true,
219 | false,
220 | 512,
221 | 768,
222 | 0,
223 | "randomize",
224 | 1024
225 | ]
226 | },
227 | {
228 | "id": 13,
229 | "type": "ailab_OmniGen",
230 | "pos": {
231 | "0": 1025,
232 | "1": 50
233 | },
234 | "size": {
235 | "0": 400,
236 | "1": 428
237 | },
238 | "flags": {},
239 | "order": 4,
240 | "mode": 0,
241 | "inputs": [
242 | {
243 | "name": "image_1",
244 | "type": "IMAGE",
245 | "link": 17,
246 | "shape": 7,
247 | "label": "image_1"
248 | },
249 | {
250 | "name": "image_2",
251 | "type": "IMAGE",
252 | "link": 18,
253 | "shape": 7,
254 | "label": "image_2"
255 | },
256 | {
257 | "name": "image_3",
258 | "type": "IMAGE",
259 | "link": null,
260 | "shape": 7,
261 | "label": "image_3"
262 | }
263 | ],
264 | "outputs": [
265 | {
266 | "name": "IMAGE",
267 | "type": "IMAGE",
268 | "links": [
269 | 16
270 | ],
271 | "label": "IMAGE",
272 | "slot_index": 0
273 | }
274 | ],
275 | "properties": {
276 | "Node name for S&R": "ailab_OmniGen"
277 | },
278 | "widgets_values": [
279 | "2 people at the coffee shop (image_1, image_2)",
280 | "",
281 | "Auto",
282 | "Balanced",
283 | 3.5,
284 | 1.8,
285 | 50,
286 | true,
287 | false,
288 | 1024,
289 | 512,
290 | 0,
291 | "randomize",
292 | 1024
293 | ]
294 | }
295 | ],
296 | "links": [
297 | [
298 | 13,
299 | 12,
300 | 0,
301 | 3,
302 | 0,
303 | "IMAGE"
304 | ],
305 | [
306 | 15,
307 | 14,
308 | 0,
309 | 10,
310 | 0,
311 | "IMAGE"
312 | ],
313 | [
314 | 16,
315 | 13,
316 | 0,
317 | 9,
318 | 0,
319 | "IMAGE"
320 | ],
321 | [
322 | 17,
323 | 12,
324 | 0,
325 | 13,
326 | 0,
327 | "IMAGE"
328 | ],
329 | [
330 | 18,
331 | 14,
332 | 0,
333 | 13,
334 | 1,
335 | "IMAGE"
336 | ]
337 | ],
338 | "groups": [],
339 | "config": {},
340 | "extra": {
341 | "ds": {
342 | "scale": 0.5054470284993083,
343 | "offset": [
344 | 2609.438866225438,
345 | 676.0223517880113
346 | ]
347 | }
348 | },
349 | "version": 0.4
350 | }
--------------------------------------------------------------------------------
/AILab_OmniGen.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os.path as osp
3 | import os
4 | import torch
5 | import numpy as np
6 | from PIL import Image
7 | from huggingface_hub import snapshot_download
8 | import requests
9 | import folder_paths
10 | import tempfile
11 | import shutil
12 | import json
13 | import uuid
14 |
15 | # Define all path constants
16 | class Paths:
17 | ROOT_DIR = osp.dirname(__file__)
18 | MODELS_DIR = folder_paths.models_dir
19 | LLM_DIR = osp.join(MODELS_DIR, "LLM")
20 | OMNIGEN_DIR = osp.join(LLM_DIR, "OmniGen-v1")
21 | OMNIGEN_CODE_DIR = osp.join(ROOT_DIR, "OmniGen")
22 | TMP_DIR = osp.join(ROOT_DIR, "tmp")
23 | MODEL_FILE_FP16 = osp.join(OMNIGEN_DIR, "model.safetensors")
24 | MODEL_FILE_FP8 = osp.join(OMNIGEN_DIR, "model-fp8_e4m3fn.safetensors")
25 |
26 | # Ensure necessary directories exist
27 | os.makedirs(Paths.LLM_DIR, exist_ok=True)
28 | sys.path.append(Paths.ROOT_DIR)
29 |
30 | class ailab_OmniGen:
31 | VERSION = "1.2.2"
32 | _model_instance = None
33 | _current_precision = None
34 |
35 | # Load preset prompts
36 | try:
37 | json_path = osp.join(osp.dirname(__file__), "data.json")
38 | if osp.exists(json_path):
39 | with open(json_path, 'r', encoding='utf-8') as f:
40 | data = json.load(f)
41 | PRESET_PROMPTS = data.get("PRESET_PROMPTS", {"None": ""})
42 | else:
43 | PRESET_PROMPTS = {"None": ""}
44 | except Exception as e:
45 | print(f"Error loading preset prompts: {e}")
46 | PRESET_PROMPTS = {"None": ""}
47 |
48 | def __init__(self):
49 | self._ensure_code_exists()
50 | self._ensure_model_exists()
51 |
52 | try:
53 | from OmniGen import OmniGenPipeline
54 | self.OmniGenPipeline = OmniGenPipeline
55 | except ImportError as e:
56 | print(f"Error importing OmniGen: {e}")
57 | raise RuntimeError("Failed to import OmniGen. Please check if the code was downloaded correctly.")
58 |
59 | def _ensure_code_exists(self):
60 | """Ensure OmniGen code exists, download from GitHub if not"""
61 | try:
62 | if not osp.exists(Paths.OMNIGEN_CODE_DIR):
63 | print("Downloading OmniGen code from GitHub...")
64 |
65 | files = [
66 | "model.py",
67 | "pipeline.py",
68 | "processor.py",
69 | "scheduler.py",
70 | "transformer.py",
71 | "utils.py",
72 | "__init__.py"
73 | ]
74 |
75 | os.makedirs(Paths.OMNIGEN_CODE_DIR, exist_ok=True)
76 | base_url = "https://raw.githubusercontent.com/VectorSpaceLab/OmniGen/main/OmniGen/"
77 |
78 | for file in files:
79 | url = base_url + file
80 | response = requests.get(url)
81 | if response.status_code == 200:
82 | with open(osp.join(Paths.OMNIGEN_CODE_DIR, file), 'wb') as f:
83 | f.write(response.content)
84 | print(f"Downloaded {file}")
85 | else:
86 | raise RuntimeError(f"Failed to download {file}: {response.status_code}")
87 |
88 | print("OmniGen code setup completed")
89 |
90 | if Paths.OMNIGEN_CODE_DIR not in sys.path:
91 | sys.path.append(Paths.OMNIGEN_CODE_DIR)
92 |
93 | else:
94 | print("OmniGen code already exists")
95 |
96 | except Exception as e:
97 | print(f"Error downloading OmniGen code: {e}")
98 | raise RuntimeError(f"Failed to download OmniGen code: {str(e)}")
99 |
100 | def _ensure_model_exists(self, model_precision=None):
101 | """Ensure model file exists, download if not"""
102 | try:
103 | os.makedirs(Paths.OMNIGEN_DIR, exist_ok=True)
104 |
105 | # Download FP8 model if specified and not exists
106 | if model_precision == "FP8" and not osp.exists(Paths.MODEL_FILE_FP8):
107 | print("FP8 model not found, downloading from Hugging Face...")
108 | url = "https://huggingface.co/silveroxides/OmniGen-V1/resolve/main/model-fp8_e4m3fn.safetensors"
109 | response = requests.get(url, stream=True)
110 | if response.status_code == 200:
111 | with open(Paths.MODEL_FILE_FP8, 'wb') as f:
112 | for chunk in response.iter_content(chunk_size=8192):
113 | if chunk:
114 | f.write(chunk)
115 | print("FP8 model downloaded successfully")
116 | else:
117 | raise RuntimeError(f"Failed to download FP8 model: {response.status_code}")
118 |
119 | # Check if FP16 model exists
120 | if not osp.exists(Paths.MODEL_FILE_FP16):
121 | print("FP16 model not found, starting download from Hugging Face...")
122 | snapshot_download(
123 | repo_id="silveroxides/OmniGen-V1",
124 | local_dir=Paths.OMNIGEN_DIR,
125 | local_dir_use_symlinks=False,
126 | resume_download=True,
127 | token=None,
128 | tqdm_class=None,
129 | )
130 | print("FP16 model downloaded successfully")
131 |
132 | # Verify model files exist after download
133 | if model_precision == "FP8" and not osp.exists(Paths.MODEL_FILE_FP8):
134 | raise RuntimeError("FP8 model download failed")
135 | if not osp.exists(Paths.MODEL_FILE_FP16):
136 | raise RuntimeError("FP16 model download failed")
137 |
138 | print("OmniGen models verified successfully")
139 |
140 | except Exception as e:
141 | print(f"Error during model initialization: {e}")
142 | raise RuntimeError(f"Failed to initialize OmniGen model: {str(e)}")
143 |
144 | def _setup_temp_dir(self):
145 | """Set up temporary directory with unique name"""
146 | self._temp_dir = osp.join(Paths.TMP_DIR, str(uuid.uuid4()))
147 | os.makedirs(self._temp_dir, exist_ok=True)
148 |
149 | def _cleanup_temp_dir(self):
150 | """Clean up temporary directory"""
151 | if hasattr(self, '_temp_dir') and osp.exists(self._temp_dir):
152 | shutil.rmtree(self._temp_dir)
153 |
154 | def _auto_select_precision(self):
155 | """Automatically select precision based on available VRAM"""
156 | if torch.cuda.is_available():
157 | vram_size = torch.cuda.get_device_properties(0).total_memory / 1024**3
158 | if vram_size < 8:
159 | print(f"Auto selecting FP8 (Available VRAM: {vram_size:.1f}GB)")
160 | return "FP8"
161 | print(f"Auto selecting FP16 (Available VRAM: {vram_size:.1f}GB)")
162 | return "FP16"
163 |
164 | @classmethod
165 | def INPUT_TYPES(s):
166 | return {
167 | "required": {
168 | "preset_prompt": (list(s.PRESET_PROMPTS.keys()), {"default": "None"}),
169 | "prompt": ("STRING", {"multiline": True, "forceInput": False, "default": ""}),
170 | "model_precision": (["Auto", "FP16", "FP8"], {"default": "Auto"}),
171 | "memory_management": (["Balanced", "Speed Priority", "Memory Priority"], {"default": "Balanced"}),
172 | "guidance_scale": ("FLOAT", {"default": 3.5, "min": 1.0, "max": 5.0, "step": 0.1, "round": 0.01}),
173 | "img_guidance_scale": ("FLOAT", {"default": 1.8, "min": 1.0, "max": 2.0, "step": 0.1, "round": 0.01}),
174 | "num_inference_steps": ("INT", {"default": 50, "min": 1, "max": 100, "step": 1}),
175 | "separate_cfg_infer": ("BOOLEAN", {"default": True}),
176 | "use_input_image_size_as_output": ("BOOLEAN", {"default": False}),
177 | "width": ("INT", {"default": 512, "min": 128, "max": 2048, "step": 8}),
178 | "height": ("INT", {"default": 512, "min": 128, "max": 2048, "step": 8}),
179 | "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
180 | "max_input_image_size": ("INT", {"default": 1024, "min": 128, "max": 2048, "step": 16}),
181 | },
182 | "optional": {
183 | "image_1": ("IMAGE",),
184 | "image_2": ("IMAGE",),
185 | "image_3": ("IMAGE",),
186 | }
187 | }
188 |
189 | RETURN_TYPES = ("IMAGE",)
190 | FUNCTION = "generation"
191 | CATEGORY = "🧪AILab/OmniGen"
192 |
193 | def save_input_img(self, image):
194 | try:
195 | with tempfile.NamedTemporaryFile(suffix=".png", delete=False, dir=Paths.TMP_DIR) as f:
196 | img_np = image.numpy()[0] * 255
197 | img_pil = Image.fromarray(img_np.astype(np.uint8))
198 | img_pil.save(f.name)
199 | return f.name
200 | except Exception as e:
201 | print(f"Error saving input image: {e}")
202 | return None
203 |
204 | def _process_prompt_and_images(self, prompt, images):
205 | """Process prompt and images, return updated prompt and image paths"""
206 | input_images = []
207 |
208 | # Auto-generate prompt if empty but images provided
209 | if not prompt and any(images):
210 | prompt = " ".join(f"
<|image_{i+1}|>" for i, img in enumerate(images) if img is not None)
211 |
212 | # Process each image
213 | for i, img in enumerate(images, 1):
214 | if img is not None:
215 | img_path = self.save_input_img(img)
216 | if img_path:
217 | input_images.append(img_path)
218 | img_tag = f"
<|image_{i}|>"
219 | if f"image_{i}" in prompt:
220 | prompt = prompt.replace(f"image_{i}", img_tag)
221 | elif f"image{i}" in prompt:
222 | prompt = prompt.replace(f"image{i}", img_tag)
223 | elif img_tag not in prompt:
224 | prompt += f" {img_tag}"
225 |
226 | return prompt.strip(), input_images
227 |
228 | def _check_sdpa_support(self):
229 | """Check if system supports Scaled Dot Product Attention"""
230 | try:
231 | import torch
232 | if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
233 | return True
234 | return False
235 | except Exception as e:
236 | print(f"Error checking SDPA support: {e}")
237 | return False
238 |
239 | def _get_pipeline(self, model_precision, keep_in_vram):
240 | try:
241 | # Reuse existing instance if available
242 | if keep_in_vram and self._model_instance and self._current_precision == model_precision:
243 | print("Reusing existing pipeline instance")
244 | return self._model_instance
245 |
246 | # Check model file
247 | model_file = Paths.MODEL_FILE_FP8 if model_precision == "FP8" else Paths.MODEL_FILE_FP16
248 | if not os.path.exists(model_file):
249 | raise RuntimeError(f"Model file not found: {model_file}")
250 |
251 | device = "cuda" if torch.cuda.is_available() else "cpu"
252 | try:
253 | # Initialize pipeline
254 | pipe = self.OmniGenPipeline.from_pretrained(Paths.OMNIGEN_DIR)
255 |
256 | if pipe is None:
257 | raise RuntimeError("Initial pipeline creation failed")
258 |
259 | # Move to device safely
260 | try:
261 | original_pipe = pipe
262 | pipe = pipe.to(device)
263 | if pipe is None:
264 | print("Warning: Pipeline.to(device) returned None, using original pipeline")
265 | pipe = original_pipe
266 | if hasattr(pipe, 'text_encoder'):
267 | pipe.text_encoder = pipe.text_encoder.to(device)
268 | if hasattr(pipe, 'unet'):
269 | pipe.unet = pipe.unet.to(device)
270 | if hasattr(pipe, 'vae'):
271 | pipe.vae = pipe.vae.to(device)
272 | except Exception as device_error:
273 | print(f"Warning: Error moving pipeline to device: {device_error}")
274 | pipe = original_pipe
275 |
276 | # Save instance if needed
277 | if keep_in_vram:
278 | self._model_instance = pipe
279 | self._current_precision = model_precision
280 |
281 | return pipe
282 |
283 | except Exception as pipe_error:
284 | print(f"Pipeline creation error: {pipe_error}")
285 | raise
286 |
287 | except Exception as e:
288 | print(f"Fatal error in pipeline creation: {str(e)}")
289 | raise RuntimeError(f"Failed to create pipeline: {str(e)}")
290 |
291 | def generation(self, preset_prompt, model_precision, prompt, memory_management, num_inference_steps, guidance_scale,
292 | img_guidance_scale, max_input_image_size, separate_cfg_infer,
293 | use_input_image_size_as_output, width, height, seed,
294 | image_1=None, image_2=None, image_3=None):
295 | try:
296 | # Auto select precision
297 | if model_precision == "Auto":
298 | model_precision = self._auto_select_precision()
299 |
300 | self._setup_temp_dir()
301 |
302 | # Clear existing instance if precision doesn't match
303 | if self._model_instance and self._current_precision != model_precision:
304 | print(f"Precision changed from {self._current_precision} to {model_precision}, clearing instance")
305 | self._model_instance = None
306 | self._current_precision = None
307 | if torch.cuda.is_available():
308 | torch.cuda.empty_cache()
309 |
310 | # Memory management strategy
311 | keep_in_vram = (memory_management == "Speed Priority")
312 | offload_model = (memory_management == "Memory Priority")
313 |
314 | if memory_management == "Memory Priority":
315 | print("Memory Priority mode: Forcing pipeline recreation")
316 | self._model_instance = None
317 | self._current_precision = None
318 | torch.cuda.empty_cache() if torch.cuda.is_available() else None
319 |
320 | # Print VRAM status before pipeline creation
321 | print(f"Current VRAM usage: {torch.cuda.memory_allocated()/1024**2:.2f}MB") if torch.cuda.is_available() else None
322 |
323 | final_prompt = prompt.strip() if prompt.strip() else self.PRESET_PROMPTS[preset_prompt]
324 | pipe = self._get_pipeline(model_precision, keep_in_vram)
325 |
326 | # Additional safety checks for position embeddings
327 | if hasattr(pipe, 'text_encoder') and hasattr(pipe.text_encoder, 'position_embedding'):
328 | try:
329 | import transformers
330 | if isinstance(pipe.text_encoder, transformers.models.phi.modeling_phi.PhiModel):
331 | print("Checking position embeddings initialization...")
332 | if not hasattr(pipe.text_encoder, 'rotary_emb'):
333 | print("Warning: Initializing rotary embeddings")
334 | pipe.text_encoder._init_rope()
335 | except ImportError:
336 | print("Warning: Could not check transformers model type")
337 |
338 | # Monitor VRAM usage
339 | if torch.cuda.is_available():
340 | print(f"VRAM usage after pipeline creation: {torch.cuda.memory_allocated()/1024**2:.2f}MB")
341 |
342 | # Process prompt and images
343 | final_prompt, input_images = self._process_prompt_and_images(final_prompt, [image_1, image_2, image_3])
344 | input_images = input_images if input_images else None
345 |
346 | print(f"Processing with prompt: {final_prompt}")
347 | print(f"Model will be {'offloaded' if offload_model else 'kept'} during generation")
348 |
349 | output = pipe(
350 | prompt=final_prompt,
351 | input_images=input_images,
352 | guidance_scale=guidance_scale,
353 | img_guidance_scale=img_guidance_scale,
354 | num_inference_steps=num_inference_steps,
355 | separate_cfg_infer=separate_cfg_infer,
356 | use_kv_cache=True,
357 | offload_kv_cache=True,
358 | offload_model=offload_model,
359 | use_input_image_size_as_output=use_input_image_size_as_output,
360 | width=width,
361 | height=height,
362 | seed=seed,
363 | max_input_image_size=max_input_image_size,
364 | )
365 |
366 | # Print VRAM usage after generation
367 | if torch.cuda.is_available():
368 | print(f"VRAM usage after generation: {torch.cuda.memory_allocated()/1024**2:.2f}MB")
369 |
370 | img = np.array(output[0]) / 255.0
371 | img = torch.from_numpy(img).unsqueeze(0)
372 |
373 | # Clean up if not keeping in VRAM
374 | if not keep_in_vram:
375 | del pipe
376 | if torch.cuda.is_available():
377 | torch.cuda.empty_cache()
378 |
379 | return (img,)
380 |
381 | except Exception as e:
382 | print(f"Error during generation: {e}")
383 | raise e
384 | finally:
385 | self._cleanup_temp_dir()
386 | if not keep_in_vram and torch.cuda.is_available():
387 | torch.cuda.empty_cache()
388 |
389 |
390 | NODE_CLASS_MAPPINGS = {
391 | "ailab_OmniGen": ailab_OmniGen
392 | }
393 |
394 | NODE_DISPLAY_NAME_MAPPINGS = {
395 | "ailab_OmniGen": "OmniGen 🖼️"
396 | }
--------------------------------------------------------------------------------
/Examples/OmniGen_wf.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 136,
3 | "last_link_id": 192,
4 | "nodes": [
5 | {
6 | "id": 99,
7 | "type": "JWImageResizeByLongerSide",
8 | "pos": {
9 | "0": -37.84663009643555,
10 | "1": -799.6002197265625
11 | },
12 | "size": {
13 | "0": 226.8000030517578,
14 | "1": 78
15 | },
16 | "flags": {
17 | "collapsed": false
18 | },
19 | "order": 9,
20 | "mode": 0,
21 | "inputs": [
22 | {
23 | "name": "image",
24 | "type": "IMAGE",
25 | "link": 161,
26 | "label": "image"
27 | },
28 | {
29 | "name": "size",
30 | "type": "INT",
31 | "link": 151,
32 | "widget": {
33 | "name": "size"
34 | },
35 | "label": "size"
36 | }
37 | ],
38 | "outputs": [
39 | {
40 | "name": "IMAGE",
41 | "type": "IMAGE",
42 | "links": [
43 | 189
44 | ],
45 | "slot_index": 0,
46 | "label": "IMAGE"
47 | }
48 | ],
49 | "properties": {
50 | "Node name for S&R": "JWImageResizeByLongerSide"
51 | },
52 | "widgets_values": [
53 | 512,
54 | "nearest exact"
55 | ]
56 | },
57 | {
58 | "id": 102,
59 | "type": "JWImageResizeByLongerSide",
60 | "pos": {
61 | "0": -41.614620208740234,
62 | "1": 256.2696533203125
63 | },
64 | "size": {
65 | "0": 226.8000030517578,
66 | "1": 78
67 | },
68 | "flags": {
69 | "collapsed": false
70 | },
71 | "order": 11,
72 | "mode": 4,
73 | "inputs": [
74 | {
75 | "name": "image",
76 | "type": "IMAGE",
77 | "link": 178,
78 | "label": "image"
79 | },
80 | {
81 | "name": "size",
82 | "type": "INT",
83 | "link": 156,
84 | "widget": {
85 | "name": "size"
86 | },
87 | "label": "size"
88 | }
89 | ],
90 | "outputs": [
91 | {
92 | "name": "IMAGE",
93 | "type": "IMAGE",
94 | "links": [
95 | 191
96 | ],
97 | "slot_index": 0,
98 | "label": "IMAGE"
99 | }
100 | ],
101 | "properties": {
102 | "Node name for S&R": "JWImageResizeByLongerSide"
103 | },
104 | "widgets_values": [
105 | 512,
106 | "nearest exact"
107 | ]
108 | },
109 | {
110 | "id": 101,
111 | "type": "JWImageResizeByLongerSide",
112 | "pos": {
113 | "0": -39,
114 | "1": -269.6000061035156
115 | },
116 | "size": {
117 | "0": 226.8000030517578,
118 | "1": 78
119 | },
120 | "flags": {
121 | "collapsed": false
122 | },
123 | "order": 10,
124 | "mode": 4,
125 | "inputs": [
126 | {
127 | "name": "image",
128 | "type": "IMAGE",
129 | "link": 177,
130 | "label": "image"
131 | },
132 | {
133 | "name": "size",
134 | "type": "INT",
135 | "link": 155,
136 | "widget": {
137 | "name": "size"
138 | },
139 | "label": "size"
140 | }
141 | ],
142 | "outputs": [
143 | {
144 | "name": "IMAGE",
145 | "type": "IMAGE",
146 | "links": [
147 | 190
148 | ],
149 | "slot_index": 0,
150 | "label": "IMAGE"
151 | }
152 | ],
153 | "properties": {
154 | "Node name for S&R": "JWImageResizeByLongerSide"
155 | },
156 | "widgets_values": [
157 | 512,
158 | "nearest exact"
159 | ]
160 | },
161 | {
162 | "id": 104,
163 | "type": "Reroute",
164 | "pos": {
165 | "0": 480,
166 | "1": -250
167 | },
168 | "size": [
169 | 75,
170 | 26
171 | ],
172 | "flags": {},
173 | "order": 12,
174 | "mode": 0,
175 | "inputs": [
176 | {
177 | "name": "",
178 | "type": "*",
179 | "link": 160,
180 | "label": "",
181 | "widget": {
182 | "name": "value"
183 | }
184 | }
185 | ],
186 | "outputs": [
187 | {
188 | "name": "",
189 | "type": "INT",
190 | "links": [
191 | 184
192 | ],
193 | "slot_index": 0,
194 | "label": ""
195 | }
196 | ],
197 | "properties": {
198 | "showOutputText": false,
199 | "horizontal": false
200 | }
201 | },
202 | {
203 | "id": 130,
204 | "type": "ShowText|pysssss",
205 | "pos": {
206 | "0": 590,
207 | "1": -850
208 | },
209 | "size": {
210 | "0": 326.7864990234375,
211 | "1": 163.01734924316406
212 | },
213 | "flags": {},
214 | "order": 8,
215 | "mode": 0,
216 | "inputs": [
217 | {
218 | "name": "text",
219 | "type": "STRING",
220 | "link": 175,
221 | "widget": {
222 | "name": "text"
223 | },
224 | "label": "text"
225 | }
226 | ],
227 | "outputs": [
228 | {
229 | "name": "STRING",
230 | "type": "STRING",
231 | "links": [],
232 | "slot_index": 0,
233 | "shape": 6,
234 | "label": "STRING"
235 | }
236 | ],
237 | "properties": {
238 | "Node name for S&R": "ShowText|pysssss"
239 | },
240 | "widgets_values": [
241 | ""
242 | ],
243 | "color": "#232",
244 | "bgcolor": "#353"
245 | },
246 | {
247 | "id": 129,
248 | "type": "SDXLPromptStyler",
249 | "pos": {
250 | "0": 255,
251 | "1": -850
252 | },
253 | "size": {
254 | "0": 299.58685302734375,
255 | "1": 336.3129577636719
256 | },
257 | "flags": {},
258 | "order": 0,
259 | "mode": 0,
260 | "inputs": [],
261 | "outputs": [
262 | {
263 | "name": "text_positive",
264 | "type": "STRING",
265 | "links": [
266 | 175,
267 | 192
268 | ],
269 | "slot_index": 0,
270 | "label": "text_positive"
271 | },
272 | {
273 | "name": "text_negative",
274 | "type": "STRING",
275 | "links": null,
276 | "label": "text_negative"
277 | }
278 | ],
279 | "properties": {
280 | "Node name for S&R": "SDXLPromptStyler"
281 | },
282 | "widgets_values": [
283 | "",
284 | "",
285 | "base",
286 | false,
287 | true,
288 | false
289 | ],
290 | "color": "#232",
291 | "bgcolor": "#353"
292 | },
293 | {
294 | "id": 8,
295 | "type": "LoadImage",
296 | "pos": {
297 | "0": -405.84661865234375,
298 | "1": -796.6002197265625
299 | },
300 | "size": {
301 | "0": 353.0822448730469,
302 | "1": 440.29144287109375
303 | },
304 | "flags": {},
305 | "order": 1,
306 | "mode": 0,
307 | "inputs": [],
308 | "outputs": [
309 | {
310 | "name": "IMAGE",
311 | "type": "IMAGE",
312 | "links": [
313 | 161
314 | ],
315 | "slot_index": 0,
316 | "label": "IMAGE"
317 | },
318 | {
319 | "name": "MASK",
320 | "type": "MASK",
321 | "links": null,
322 | "label": "MASK"
323 | }
324 | ],
325 | "properties": {
326 | "Node name for S&R": "LoadImage"
327 | },
328 | "widgets_values": [
329 | "test_empty.png",
330 | "image"
331 | ]
332 | },
333 | {
334 | "id": 132,
335 | "type": "LoadImage",
336 | "pos": {
337 | "0": -407,
338 | "1": -269.6000061035156
339 | },
340 | "size": {
341 | "0": 354.59674072265625,
342 | "1": 436.3330383300781
343 | },
344 | "flags": {},
345 | "order": 2,
346 | "mode": 4,
347 | "inputs": [],
348 | "outputs": [
349 | {
350 | "name": "IMAGE",
351 | "type": "IMAGE",
352 | "links": [
353 | 177
354 | ],
355 | "slot_index": 0,
356 | "label": "IMAGE"
357 | },
358 | {
359 | "name": "MASK",
360 | "type": "MASK",
361 | "links": null,
362 | "label": "MASK"
363 | }
364 | ],
365 | "properties": {
366 | "Node name for S&R": "LoadImage"
367 | },
368 | "widgets_values": [
369 | "test_empty.png",
370 | "image"
371 | ]
372 | },
373 | {
374 | "id": 133,
375 | "type": "LoadImage",
376 | "pos": {
377 | "0": -409.73052978515625,
378 | "1": 256.40277099609375
379 | },
380 | "size": {
381 | "0": 354.59674072265625,
382 | "1": 436.3330383300781
383 | },
384 | "flags": {},
385 | "order": 3,
386 | "mode": 4,
387 | "inputs": [],
388 | "outputs": [
389 | {
390 | "name": "IMAGE",
391 | "type": "IMAGE",
392 | "links": [
393 | 178
394 | ],
395 | "slot_index": 0,
396 | "label": "IMAGE"
397 | },
398 | {
399 | "name": "MASK",
400 | "type": "MASK",
401 | "links": null,
402 | "label": "MASK"
403 | }
404 | ],
405 | "properties": {
406 | "Node name for S&R": "LoadImage"
407 | },
408 | "widgets_values": [
409 | "test_empty.png",
410 | "image"
411 | ]
412 | },
413 | {
414 | "id": 135,
415 | "type": "ailab_OmniGen",
416 | "pos": {
417 | "0": 590,
418 | "1": -640
419 | },
420 | "size": {
421 | "0": 326.4344787597656,
422 | "1": 394
423 | },
424 | "flags": {},
425 | "order": 13,
426 | "mode": 0,
427 | "inputs": [
428 | {
429 | "name": "image_1",
430 | "type": "IMAGE",
431 | "link": 189,
432 | "shape": 7,
433 | "label": "image_1"
434 | },
435 | {
436 | "name": "image_2",
437 | "type": "IMAGE",
438 | "link": 190,
439 | "shape": 7,
440 | "label": "image_2"
441 | },
442 | {
443 | "name": "image_3",
444 | "type": "IMAGE",
445 | "link": 191,
446 | "shape": 7,
447 | "label": "image_3"
448 | },
449 | {
450 | "name": "prompt",
451 | "type": "STRING",
452 | "link": 192,
453 | "widget": {
454 | "name": "prompt"
455 | },
456 | "label": "prompt"
457 | },
458 | {
459 | "name": "width",
460 | "type": "INT",
461 | "link": 186,
462 | "widget": {
463 | "name": "width"
464 | },
465 | "label": "width"
466 | },
467 | {
468 | "name": "height",
469 | "type": "INT",
470 | "link": 185,
471 | "widget": {
472 | "name": "height"
473 | },
474 | "label": "height"
475 | },
476 | {
477 | "name": "max_input_image_size",
478 | "type": "INT",
479 | "link": 184,
480 | "widget": {
481 | "name": "max_input_image_size"
482 | },
483 | "label": "max_input_image_size"
484 | }
485 | ],
486 | "outputs": [
487 | {
488 | "name": "IMAGE",
489 | "type": "IMAGE",
490 | "links": [
491 | 187
492 | ],
493 | "slot_index": 0,
494 | "label": "IMAGE"
495 | }
496 | ],
497 | "properties": {
498 | "Node name for S&R": "ailab_OmniGen"
499 | },
500 | "widgets_values": [
501 | "20yo woman looking at viewer",
502 | "",
503 | "Auto",
504 | "Balanced",
505 | 3.5,
506 | 1.8,
507 | 50,
508 | true,
509 | false,
510 | 512,
511 | 512,
512 | 794984809397616,
513 | "randomize",
514 | 512
515 | ]
516 | },
517 | {
518 | "id": 66,
519 | "type": "SDXLEmptyLatentSizePicker+",
520 | "pos": {
521 | "0": 260,
522 | "1": -440
523 | },
524 | "size": {
525 | "0": 300.3179626464844,
526 | "1": 170
527 | },
528 | "flags": {},
529 | "order": 4,
530 | "mode": 0,
531 | "inputs": [],
532 | "outputs": [
533 | {
534 | "name": "LATENT",
535 | "type": "LATENT",
536 | "links": [],
537 | "slot_index": 0,
538 | "label": "LATENT"
539 | },
540 | {
541 | "name": "width",
542 | "type": "INT",
543 | "links": [
544 | 186
545 | ],
546 | "slot_index": 1,
547 | "label": "width"
548 | },
549 | {
550 | "name": "height",
551 | "type": "INT",
552 | "links": [
553 | 185
554 | ],
555 | "slot_index": 2,
556 | "label": "height"
557 | }
558 | ],
559 | "properties": {
560 | "Node name for S&R": "SDXLEmptyLatentSizePicker+"
561 | },
562 | "widgets_values": [
563 | "1024x1024 (1.0)",
564 | 1,
565 | 512,
566 | 512
567 | ],
568 | "color": "#223",
569 | "bgcolor": "#335"
570 | },
571 | {
572 | "id": 90,
573 | "type": "Note",
574 | "pos": {
575 | "0": 949,
576 | "1": -848
577 | },
578 | "size": {
579 | "0": 501.8983154296875,
580 | "1": 598.0471801757812
581 | },
582 | "flags": {},
583 | "order": 5,
584 | "mode": 0,
585 | "inputs": [],
586 | "outputs": [],
587 | "properties": {},
588 | "widgets_values": [
589 | "Custome Node\nhttps://github.com/1038lab/ComfyUI-OmniGen\n\nUpdates\nhttps://github.com/1038lab/ComfyUI-OmniGen/blob/main/UPDATE.md\n\nSample Prompts:\n\n1. Create an image of a 20-year-old woman looking directly at the viewer, with a neutral or friendly expression.\n\n2. Transform image_1 into an oil painting, giving it a textured, classic style with visible brushstrokes and rich color.\n\n3. Transform image_1 into an anime-style illustration, with large expressive eyes, vibrant colors, and exaggerated features.\n\n4. Depict the girl from image_1 sitting on a rock at the top of a mountain, gazing out over a breathtaking landscape.\n\n5. Combine the characters from image_1 and image_2 in anime style, blending their features and surroundings into one cohesive scene.\n\n6. A woman from image_1 and a man from image_2 are sitting across from each other at a cozy coffee shop, each holding a cup of coffee and engaging in conversation.\n\n7. Following the depth mapping of image_1, generate a new photo: an elderly couple sitting at a cozy coffee shop, with layers of depth and focus.\n\n8. Detect the skeleton of a human in image_1, creating a skeletal overlay for analysis or artistic interpretation.\n\n9. Following the pose of the human skeleton detected in image_1, generate a new photo of the subject in the same pose with realistic anatomy.\","
590 | ],
591 | "color": "#432",
592 | "bgcolor": "#653"
593 | },
594 | {
595 | "id": 3,
596 | "type": "PreviewImage",
597 | "pos": {
598 | "0": 249,
599 | "1": -170
600 | },
601 | "size": [
602 | 1209.980138054469,
603 | 871.4285871205018
604 | ],
605 | "flags": {},
606 | "order": 14,
607 | "mode": 0,
608 | "inputs": [
609 | {
610 | "name": "images",
611 | "type": "IMAGE",
612 | "link": 187,
613 | "label": "images"
614 | }
615 | ],
616 | "outputs": [],
617 | "properties": {
618 | "Node name for S&R": "PreviewImage"
619 | },
620 | "widgets_values": []
621 | },
622 | {
623 | "id": 134,
624 | "type": "Fast Groups Bypasser (rgthree)",
625 | "pos": {
626 | "0": -305,
627 | "1": -1024
628 | },
629 | "size": [
630 | 252,
631 | 130
632 | ],
633 | "flags": {},
634 | "order": 6,
635 | "mode": 0,
636 | "inputs": [],
637 | "outputs": [
638 | {
639 | "name": "OPT_CONNECTION",
640 | "type": "*",
641 | "links": null,
642 | "label": "OPT_CONNECTION"
643 | }
644 | ],
645 | "properties": {
646 | "matchColors": "",
647 | "matchTitle": "",
648 | "showNav": true,
649 | "sort": "position",
650 | "customSortAlphabet": "",
651 | "toggleRestriction": "default"
652 | },
653 | "color": "#323",
654 | "bgcolor": "#535"
655 | },
656 | {
657 | "id": 100,
658 | "type": "PrimitiveNode",
659 | "pos": {
660 | "0": -23,
661 | "1": -1027
662 | },
663 | "size": [
664 | 220,
665 | 82
666 | ],
667 | "flags": {},
668 | "order": 7,
669 | "mode": 0,
670 | "inputs": [],
671 | "outputs": [
672 | {
673 | "name": "INT",
674 | "type": "INT",
675 | "links": [
676 | 151,
677 | 155,
678 | 156,
679 | 160
680 | ],
681 | "slot_index": 0,
682 | "widget": {
683 | "name": "size"
684 | },
685 | "label": "INT"
686 | }
687 | ],
688 | "title": "Set input image resolution",
689 | "properties": {
690 | "Run widget replace on values": false
691 | },
692 | "widgets_values": [
693 | 512,
694 | "fixed"
695 | ],
696 | "color": "#223",
697 | "bgcolor": "#335"
698 | }
699 | ],
700 | "links": [
701 | [
702 | 151,
703 | 100,
704 | 0,
705 | 99,
706 | 1,
707 | "INT"
708 | ],
709 | [
710 | 155,
711 | 100,
712 | 0,
713 | 101,
714 | 1,
715 | "INT"
716 | ],
717 | [
718 | 156,
719 | 100,
720 | 0,
721 | 102,
722 | 1,
723 | "INT"
724 | ],
725 | [
726 | 160,
727 | 100,
728 | 0,
729 | 104,
730 | 0,
731 | "*"
732 | ],
733 | [
734 | 161,
735 | 8,
736 | 0,
737 | 99,
738 | 0,
739 | "IMAGE"
740 | ],
741 | [
742 | 175,
743 | 129,
744 | 0,
745 | 130,
746 | 0,
747 | "STRING"
748 | ],
749 | [
750 | 177,
751 | 132,
752 | 0,
753 | 101,
754 | 0,
755 | "IMAGE"
756 | ],
757 | [
758 | 178,
759 | 133,
760 | 0,
761 | 102,
762 | 0,
763 | "IMAGE"
764 | ],
765 | [
766 | 184,
767 | 104,
768 | 0,
769 | 135,
770 | 6,
771 | "INT"
772 | ],
773 | [
774 | 185,
775 | 66,
776 | 2,
777 | 135,
778 | 5,
779 | "INT"
780 | ],
781 | [
782 | 186,
783 | 66,
784 | 1,
785 | 135,
786 | 4,
787 | "INT"
788 | ],
789 | [
790 | 187,
791 | 135,
792 | 0,
793 | 3,
794 | 0,
795 | "IMAGE"
796 | ],
797 | [
798 | 189,
799 | 99,
800 | 0,
801 | 135,
802 | 0,
803 | "IMAGE"
804 | ],
805 | [
806 | 190,
807 | 101,
808 | 0,
809 | 135,
810 | 1,
811 | "IMAGE"
812 | ],
813 | [
814 | 191,
815 | 102,
816 | 0,
817 | 135,
818 | 2,
819 | "IMAGE"
820 | ],
821 | [
822 | 192,
823 | 129,
824 | 0,
825 | 135,
826 | 3,
827 | "STRING"
828 | ]
829 | ],
830 | "groups": [
831 | {
832 | "title": "ComfyUI-OmniGen",
833 | "bounding": [
834 | 269,
835 | -1059,
836 | 1173,
837 | 142
838 | ],
839 | "color": "#0964f6",
840 | "font_size": 100,
841 | "flags": {}
842 | },
843 | {
844 | "title": "Image_3",
845 | "bounding": [
846 | -419,
847 | 184,
848 | 615,
849 | 520
850 | ],
851 | "color": "#3f789e",
852 | "font_size": 24,
853 | "flags": {}
854 | },
855 | {
856 | "title": "image_2",
857 | "bounding": [
858 | -417,
859 | -343,
860 | 615,
861 | 520
862 | ],
863 | "color": "#3f789e",
864 | "font_size": 24,
865 | "flags": {}
866 | },
867 | {
868 | "title": "Image_1",
869 | "bounding": [
870 | -416,
871 | -873,
872 | 615,
873 | 527
874 | ],
875 | "color": "#3f789e",
876 | "font_size": 24,
877 | "flags": {}
878 | }
879 | ],
880 | "config": {},
881 | "extra": {
882 | "ds": {
883 | "scale": 0.6209213230591556,
884 | "offset": [
885 | 1275.2500693850584,
886 | 1201.641418761238
887 | ]
888 | }
889 | },
890 | "version": 0.4
891 | }
--------------------------------------------------------------------------------