├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── publish.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── __init__.py
├── docs
└── assets
│ ├── Continue.png
│ ├── Input.png
│ ├── allnodes.png
│ ├── continue1.png
│ ├── continue2.png
│ ├── continue3.png
│ ├── continue4.png
│ ├── continue5.png
│ ├── crystools.png
│ ├── demo.gif
│ ├── devmode.png
│ ├── input2.png
│ ├── lipsync.png
│ ├── lipsync1.png
│ ├── lipsync2.png
│ ├── lipsync3.png
│ ├── menu.png
│ ├── output.png
│ ├── output1.png
│ ├── output2.png
│ ├── save_as_api.png
│ ├── switch.png
│ ├── switch2.png
│ ├── workflow.png
│ ├── workflow2.png
│ ├── workflow3.png
│ ├── workflow4.png
│ └── workflow5.png
├── lipsync_studio.py
├── py
├── __init__.py
└── endpoints.py
├── pyproject.toml
├── requirements.txt
├── utils
├── __init__.py
└── utils.py
├── web
└── js
│ ├── constants.js
│ ├── inputs.js
│ ├── jsnodes.js
│ ├── jszip.min.js
│ ├── nodetype_continue.js
│ ├── nodetype_input.js
│ ├── nodetype_lipSync.js
│ ├── nodetype_output.js
│ ├── nodetype_workflow.js
│ ├── utils.js
│ ├── videoPreview.js
│ ├── widgets.js
│ └── workflows.js
├── workflow.py
└── workflow_nodes.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | ** Local or Remote**
21 | - [ ] Local
22 | - [ ] Remote
23 |
24 | **Expected behavior**
25 | A clear and concise description of what you expected to happen.
26 |
27 | **Reproductible Workflows**
28 | If applicable, a short workflow that reproduces the bug.
29 |
30 | **console log**
31 | on comfyui page, open the developper tools (F12) and copy all the console output.
32 |
33 | **cmd output log**
34 | copy the output of the window cmd where you run comfyui.
35 |
36 | **Desktop (please complete the following information):**
37 | - Browser [e.g. chrome, safari]
38 |
39 | **Additional context**
40 | Add any other context about the problem here.
41 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - master
7 | paths:
8 | - "pyproject.toml"
9 |
10 | jobs:
11 | publish-node:
12 | name: Publish Custom Node to registry
13 | runs-on: ubuntu-latest
14 | # if this is a forked repository. Skipping the workflow.
15 | if: github.event.repository.fork == false
16 | steps:
17 | - name: Check out code
18 | uses: actions/checkout@v4
19 | - name: Publish Custom Node
20 | uses: Comfy-Org/publish-node-action@main
21 | with:
22 | ## Add your own personal access token to your Github Repository secrets and reference it here.
23 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .git/*
2 | **/__pycache__/
3 | tests/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Comfyui-FlowChain
2 |
3 | Thank you for your interest in contributing to sd-wav2lip-uhq! We appreciate your effort and to help us incorporate your contribution in the best way possible, please follow the following contribution guidelines.
4 |
5 | ## Reporting Bugs
6 |
7 | If you find a bug in the project, we encourage you to report it. Here's how:
8 |
9 | 1. First, check the [existing Issues](url_of_issues) to see if the issue has already been reported. If it has, please add a comment to the existing issue rather than creating a new one.
10 | 2. If you can't find an existing issue that matches your bug, create a new issue. Make sure to include as many details as possible so we can understand and reproduce the problem.
11 |
12 | ## Proposing Changes
13 |
14 | We welcome code contributions from the community. Here's how to propose changes:
15 |
16 | 1. Fork this repository to your own GitHub account.
17 | 2. Create a new branch on your fork for your changes.
18 | 3. Make your changes in this branch.
19 | 4. When you are ready, submit a pull request to the `main` branch of this repository.
20 |
21 | Please note that we use the GitHub Flow workflow, so all pull requests should be made to the `main` branch.
22 |
23 | Before submitting a pull request, please make sure your code adheres to the project's coding conventions and it has passed all tests. If you are adding features, please also add appropriate tests.
24 |
25 | ## Contact
26 |
27 | If you have any questions or need help, please ping the developer via discord NumZ#7184 to make sure your addition will fit well into such a large project and to get help if needed.
28 |
29 | Thank you again for your contribution !
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 the comfyui-FlowChain
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ⛓️ Comfyui-FlowChain
2 |
3 | ## 💡 Description
4 |
5 | This repository includes a set of custom nodes for ComfyUI that allow you to:
6 |
7 | - Convert your workflows into nodes
8 | - Chain your workflows together
9 | - Bonus: a node to integrate [LipSync Studio v0.6](https://www.patreon.com/Wav2LipStudio) via API (third-party application)
10 |
11 |
12 |
13 | ## 🚀 All Nodes
14 |
15 |
16 |
17 | ## 📖 Quick Index
18 |
19 | - [🤔 Why, when the 'convert to group node' option already exists?](#-why-when-the-convert-to-group-node-option-already-exists)
20 | - [🚀 Updates](#-updates)
21 | - [🔗 requirements](#-requirements)
22 | - [💻 Installation](#-installation)
23 | - [🕸️ Nodes](#-nodes)
24 | - [🐍 Usage](#-usage)
25 | - [📂 Export Nested Projects](#-Export-Nested-Projects)
26 | - [💪 Special things to know](#-special-things-to-know)
27 | - [📺 Examples](#-examples)
28 | - [😎 Contributing](#-contributing)
29 | - [🙏 Appreciation](#-appreciation)
30 | - [📜 License](#-license)
31 | - [☕ Support](#-support)
32 |
33 | ## 🤔 Why, when the 'convert to group node' option already exists?
34 |
35 | The idea came from the frustration users experienced with the many limitations and bugs in ComfyUI’s group nodes. While group nodes can seem useful for organizing nodes hierarchically, their actual use often leads to issues. Here are some of the major flaws I aimed to address:
36 |
37 | - **Instability with primitive nodes**: Primitive nodes are often dropped when grouped, making them ineffective in complex workflows.
38 | - **Issues with dynamic nodes**: Dynamic nodes, such as those with switches or conditional behaviors, tend to lose connections when grouped.
39 | - **Incompatibility with ControlNet**: When integrating ControlNet nodes into group nodes, the order of inputs and outputs is crucial to avoid reversing results.
40 | - **Complex node maintenance**: Managing nodes in ComfyUI can quickly become overwhelming, especially when you need to modify the same function multiple times.
41 |
42 | By solving these problems, my tool makes node management more intuitive, stable, and efficient, allowing you to focus on what matters: creating.
43 |
44 | ## 🚀 Updates
45 |
46 | **2025.05.17**
47 |
48 | - 💪 Now compatible with all node types: the output connection auto-detects WorkflowInput, and the default input detects WorkflowOutput.
49 | - 📂 New menu option **Workflow › Export Flowchain (ZIP)** bundles all nested workflows into a single ZIP archive for easier project sharing.
50 | - 🛠️ Retrocompatible with previous workflows version (I hope ^^).
51 |
52 | **2025.05.14**
53 |
54 | - 🛠️ Minimum Comfyui version is now **0.3.33**
55 |
56 | **2025.05.12**
57 |
58 | - 🛠️ Fixed instabilities for comfyui version > 0.3.27 (thanks to [o0oradaro0o](https://github.com/o0oradaro0o))
59 | - 🛠️ Fixed order output (thanks to [springjk](https://github.com/springjk))
60 |
61 | **2025.03.26**
62 |
63 | - 💪 Live update of workflow when modifying a subworkflow, just 1 second after saving the subworkflow. But you need to back and forth to see the changes. if you work on one screen no problem, but if you work on two screens you need to back and forth to see the changes.
64 | - ↕️ Capability to order inputs and outputs in the workflow node. Order depends on the order of the input/output from top to bottom in the subworkflow.
65 | - 🛠️ Fixed some instabilities.
66 | - 🆙 Must work with ComfyUI 1.18.9 and above.
67 |
68 | **2025.03.23**
69 | First of all, I apologize for not maintaining the code regularly. ComfyUI has evolved significantly since my last version, so I've completely revised the code to ensure better integration with ComfyUI and avoid future issues.
70 |
71 | Features in this version:
72 |
73 | - 💪 No need to convert your workflows to API format anymore - you can use original workflows directly.
74 | - 💪 Better cache management
75 | - 🗑️ remove "import workflow", no longer usefull since working on standart workflow.
76 | -
77 |
78 | Warning and know issue:
79 |
80 | - 🛑 old version will not be compatible, sorry about that
81 | - ⚠️ The word "seed" in input as "INT" will break the node so if you wan't use "seed" as input prefer to give a name like "seed_input" or whatever...
82 |
83 | **2024.11.01 Initial version features :**
84 | 1 Convert your workflows into nodes
85 |
86 | - ⛓️ Chain your workflow
87 | - 👄 Extra Node that use [LipSync Studio v0.6](https://www.patreon.com/Wav2LipStudio)
88 |
89 | ## 🔗 requirements
90 |
91 | - **comfyui**: Be sure to have **comfyUI 0.3.33** and a **ComfyUI front-end** version of at least **1.18.9**. If not, update ComfyUI to the latest version. To find the front-end version, go to ComfyUI settings (the gear icon), click "About," and check the version at the top of the page.
92 |
93 | ## 💻 Installation
94 |
95 | ### Method 1: Automatic Installation
96 |
97 | 1. Go to the **ComfyUI Manager** and click **Custom Nodes Manager**.
98 | 2. search for **FlowChain** and click **Install**.
99 |
100 | ### Method 2: Manual Installation
101 |
102 | 1. Install [Git](https://git-scm.com/)
103 | 2. Go to folder ..\ComfyUI\custom_nodes
104 | 3. Run cmd.exe
105 | > **Windows**:
106 | >
107 | > > **Variant 1:** In folder click panel current path and input **cmd** and press **Enter** on keyboard
108 | > >
109 | > > **Variant 2:** Press on keyboard Windows+R, and enter cmd.exe open window cmd, enter **cd /d your_path_to_custom_nodes**, **Enter** on keyboard
110 | 4. Then do :
111 |
112 | `git clone https://github.com/numz/Comfyui-FlowChain.git`
113 |
114 | After this command be created folder Comfyui-FlowChain
115 |
116 | 8. Go to the folder:
117 |
118 | `cd Comfyui-FlowChain`
119 |
120 | 8. Then do:
121 |
122 | `pip install -r requirements.txt`
123 |
124 | 7. Run Comfyui...
125 |
126 | ## 🕸️ Nodes:
127 |
128 | | | Name | Description | ComfyUI category |
129 | | :-----------------------------------------------: | :------------------ | :-------------------------------------------------------------------------------------------: | :--------------: |
130 | |
| _Workflow_ | Node that allows loading workflows. It will show Inputs and Outputs into the loaded Workflows | FlowChain ⛓️ |
131 | |
| _Workflow Input_ | Node used to declare the inputs of your workflows. | FlowChain ⛓️ |
132 | |
| _Workflow Output_ | Node used to declare the outputs of your workflows. | FlowChain ⛓️ |
133 | |
| _Workflow Continue_ | Node to stop/Continue the workflow process. | FlowChain ⛓️ |
134 | |
| _Workflow Lipsync_ | Extra Node to use LipSync Studio via API | FlowChain ⛓️ |
135 |
136 | # 🐍 Usage
137 |
138 | [Quick Video Tutorial](https://www.youtube.com/watch?v=7C8-vX0sTAc)
139 |
140 | ## ⛓️ Workflow Node
141 |
142 | 
143 |
144 | Select a workflow from the **workflows** dropdown menu. This list displays all compatible workflows saved in your ComfyUI **user directory**:
145 |
146 | `ComfyUI\user\default\workflows`
147 |
148 | For a workflow to be considered **compatible**, it must contain at least one "Workflow Input" or "Workflow Output" node. This ensures the workflow can properly interface with the FlowChain system.
149 |
150 | **Note:** After adding new workflows to this directory, you'll need to refresh the ComfyUI interface (press F5) to see them appear in the dropdown list. But once loaded, it will refresh the parent node automatically
151 |
152 | ## ⛓️ Input Node
153 |
154 | 
155 |
156 | - Allow to declare inputs in your workflow.
157 | - drag the **output** socket on another input socket node, type detects automatically
158 | - Give a Name.
159 | - **Default** value is used when debugging your workflow or if you don't plug an input into the **Workflow** node.
160 |
161 | - 
162 |
163 | ## ⛓️ Output Node
164 |
165 | 
166 |
167 | - Allow to declare outputs in your workflow.
168 | - drag the **default** input socket on another output socket node, type detects automatically
169 | - Give a Name.
170 | - **Default** value is used to connect the output.
171 |
172 | 
173 |
174 | ## ⛓️ Continue Node
175 |
176 | 
177 |
178 | - Usually associated with a **boolean** input plugged on **"continue_workflow"**, allow to "Stop" a workflow if **"continue_workflow"** is False.
179 | - Types available : **"IMAGE", "LATENT"**
180 | - Give a Name and select the type.
181 | - During development of your workflow, If \*\*continue_worflow" is False it will let pass only 1 image/latent, and if True it will let pass all images/latents.
182 |
183 | 
184 |
185 | - But When a workflow is loaded into the **"workflow"** Node, which contain a **"Workflow Continue"** node, it will be delete if **continue_workflow** is False. That allow to create conditional situation where you want to prevent computation of some parts.
186 |
187 | 
188 |
189 | ## 📂 Export Nested Projects
190 |
191 | A new Menu is available in **workflow > Export Flowchain (Zip)**
192 |
193 | 
194 |
195 | Bundles all nested workflows into a single ZIP archive for easier project sharing.
196 |
197 | ## 🔉👄 Workflow LipSync Node
198 |
199 | 
200 |
201 | - Extra Node that allow to use third-party app **[Lipsync Studio v0.6](https://www.patreon.com/Wav2LipStudio)** Via it's API
202 | - Inputs:
203 | - **frames**: Images to compute.
204 | - **audio**: Audio to add.
205 | - **faceswap_image**: An image with a face to swap.
206 | - **lipsync_studio_url**: usually http://127.0.0.1:7860
207 | - **project_name**: name of your project.
208 | - **face_id**: id of the face you want to lipsync and faceswap.
209 | - **fps**: frame per second.
210 | - **avatar**: Will be used create a driving video, 10 avatars are available, each give different output result.
211 | - **close mouth before Lip sync**: Allow to close the mouth before create the lip sync.
212 | - **quality**: Can be **Low, Medium, High**, in High gfpgan will be used to enhance quality output.
213 | - **skip_first_frame**: number of frames to remove at the beginning of the video.
214 | - **load_cap**: number of frames to load.
215 | - **low vram**: allow to decrease VRAM consumption for low pc configuration.
216 |
217 | Project will be automatically created into your Lipsync Studio **projects** folder. You can then load it into studio and work directly from studio if the output not good enough for you.
218 |
219 | 
220 |
221 | ## 💪 Special things to know
222 |
223 | the **"🪛 Switch"** nodes from [Crystools](https://github.com/crystian/ComfyUI-Crystools) have a particular place in **workflow Node**
224 |
225 | 
226 |
227 | Let's illustrate this with an example:
228 |
229 | 
230 |
231 | Here we want to choose between video1 or video2. It depends on the **boolean** value in **Switch Image Node**. The issue here is that both videos will be loaded before Switch. To prevent both videos from being loaded, the **"workflow node"** will check the boolean value, remove the unused node, and directly connect the correct value to the preview image.
232 |
233 | 
234 |
235 | This gives you the ability to create truly conditional cases in your workflows, without computing irrelevant nodes.
236 |
237 | # 📺 Examples
238 |
239 | Coming soon
240 |
241 | # 😎 Contributing
242 |
243 | We welcome contributions to this project. When submitting pull requests, please provide a detailed description of the changes. see [CONTRIBUTING](CONTRIBUTING.md) for more information.
244 |
245 | # 🙏 Appreciation
246 |
247 | - [Jedrzej Kosinski](https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite) : For the code quality that really inspired me during development.
248 |
249 | # ☕ Support
250 |
251 | this project is open-source effort that is free to use and modify. I rely on the support of users to keep this project going and help improve it. If you'd like to support me, you can make a donation on my [buy me a coffe](https://buymeacoffee.com/numzzz5) or [Patreon page](https://www.patreon.com/Wav2LipStudio). Any contribution, large or small, is greatly appreciated!
252 |
253 | Your support helps me cover the costs of development and maintenance, and allows me to allocate more time and resources to enhancing this project. Thank you for your support!
254 |
255 | [](https://buymeacoffee.com/numzzz5)
256 |
257 | [patreon page](https://www.patreon.com/Wav2LipStudio)
258 |
259 | # 📜 License
260 |
261 | - The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE).
262 |
263 | ```
264 |
265 | ```
266 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib.util
3 | import sys
4 | import traceback
5 | from .lipsync_studio import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
6 | from .workflow_nodes import NODE_CLASS_MAPPINGS_NODES, NODE_DISPLAY_NAME_MAPPINGS_NODES
7 | from .workflow import NODE_CLASS_MAPPINGS_WORKFLOW, NODE_DISPLAY_NAME_MAPPINGS_WORKFLOW
8 | from pathlib import Path
9 |
10 | NODE_CLASS_MAPPINGS.update(NODE_CLASS_MAPPINGS_NODES)
11 | NODE_CLASS_MAPPINGS.update(NODE_CLASS_MAPPINGS_WORKFLOW)
12 | NODE_DISPLAY_NAME_MAPPINGS.update(NODE_DISPLAY_NAME_MAPPINGS_NODES)
13 | NODE_DISPLAY_NAME_MAPPINGS.update(NODE_DISPLAY_NAME_MAPPINGS_WORKFLOW)
14 |
15 |
16 | def get_ext_dir(subpath=None, mkdir=False):
17 | dir = os.path.dirname(__file__)
18 | if subpath is not None:
19 | dir = os.path.join(dir, subpath)
20 |
21 | dir = os.path.abspath(dir)
22 |
23 | if mkdir and not os.path.exists(dir):
24 | os.makedirs(dir)
25 | return dir
26 |
27 |
28 | py = Path(get_ext_dir("py"))
29 | files = list(py.glob("*.py"))
30 | for file in files:
31 | try:
32 | name = os.path.splitext(file)[0]
33 | spec = importlib.util.spec_from_file_location(name, os.path.join(py, file))
34 | module = importlib.util.module_from_spec(spec)
35 | sys.modules[name] = module
36 | spec.loader.exec_module(module)
37 | if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
38 | NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS)
39 | if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module,
40 | "NODE_DISPLAY_NAME_MAPPINGS") is not None:
41 | NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
42 | except Exception as e:
43 | traceback.print_exc()
44 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
45 |
46 | WEB_DIRECTORY = "./web"
47 |
--------------------------------------------------------------------------------
/docs/assets/Continue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/Continue.png
--------------------------------------------------------------------------------
/docs/assets/Input.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/Input.png
--------------------------------------------------------------------------------
/docs/assets/allnodes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/allnodes.png
--------------------------------------------------------------------------------
/docs/assets/continue1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/continue1.png
--------------------------------------------------------------------------------
/docs/assets/continue2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/continue2.png
--------------------------------------------------------------------------------
/docs/assets/continue3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/continue3.png
--------------------------------------------------------------------------------
/docs/assets/continue4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/continue4.png
--------------------------------------------------------------------------------
/docs/assets/continue5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/continue5.png
--------------------------------------------------------------------------------
/docs/assets/crystools.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/crystools.png
--------------------------------------------------------------------------------
/docs/assets/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/demo.gif
--------------------------------------------------------------------------------
/docs/assets/devmode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/devmode.png
--------------------------------------------------------------------------------
/docs/assets/input2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/input2.png
--------------------------------------------------------------------------------
/docs/assets/lipsync.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/lipsync.png
--------------------------------------------------------------------------------
/docs/assets/lipsync1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/lipsync1.png
--------------------------------------------------------------------------------
/docs/assets/lipsync2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/lipsync2.png
--------------------------------------------------------------------------------
/docs/assets/lipsync3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/lipsync3.png
--------------------------------------------------------------------------------
/docs/assets/menu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/menu.png
--------------------------------------------------------------------------------
/docs/assets/output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/output.png
--------------------------------------------------------------------------------
/docs/assets/output1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/output1.png
--------------------------------------------------------------------------------
/docs/assets/output2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/output2.png
--------------------------------------------------------------------------------
/docs/assets/save_as_api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/save_as_api.png
--------------------------------------------------------------------------------
/docs/assets/switch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/switch.png
--------------------------------------------------------------------------------
/docs/assets/switch2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/switch2.png
--------------------------------------------------------------------------------
/docs/assets/workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/workflow.png
--------------------------------------------------------------------------------
/docs/assets/workflow2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/workflow2.png
--------------------------------------------------------------------------------
/docs/assets/workflow3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/workflow3.png
--------------------------------------------------------------------------------
/docs/assets/workflow4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/workflow4.png
--------------------------------------------------------------------------------
/docs/assets/workflow5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/docs/assets/workflow5.png
--------------------------------------------------------------------------------
/lipsync_studio.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | from gradio_client import Client
3 | import os
4 | import subprocess
5 | import folder_paths
6 | import numpy as np
7 | import hashlib
8 | from .utils.utils import get_ffmpeg_path
9 | import sys
10 | from PIL import Image
11 |
12 |
13 | class WorkflowLipSync:
14 | def __init__(self):
15 | self.ws = None
16 |
17 | @classmethod
18 | def INPUT_TYPES(cls):
19 | return {"required": {
20 | "lipsync_studio_url": ("STRING", {"default": "http://127.0.0.1:7860/"}),
21 | "project_name": ("STRING", {"default": "project1"}),
22 | "frames": ("IMAGE",),
23 | "face_id": ("INT", {"default": 0, "min": 0, "max": 10, "step": 1}),
24 | "fps": ("FLOAT", {"default": 25., "min": 0., "max": 60., "step": 1}),
25 | "audio": ("AUDIO",),
26 | "avatar": (["Avatar 1", "Avatar 2", "Avatar 3", "Avatar 4", "Avatar 5", "Avatar 6", "Avatar 7", "Avatar 8", "Avatar 9", "Avatar 10"],),
27 | "close_mouth_before_lipsync": ("BOOLEAN", {"default": True}),
28 | "quality": (["Low", "Medium", "High"],),
29 | "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
30 | "load_cap": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
31 | "low_vram": ("BOOLEAN", {"default": False}),
32 |
33 | },
34 | "optional": {
35 | "faceswap_image": ("IMAGE",),
36 | }}
37 |
38 | # RETURN_TYPES = ("STRING", "STRING")
39 | RETURN_TYPES = ()
40 | # RETURN_NAMES = ("faceswap_video_path", "lipsync_video_path")
41 | RETURN_NAMES = ()
42 | FUNCTION = "generate"
43 | CATEGORY = "FlowChain ⛓️"
44 |
45 | OUTPUT_NODE = True
46 |
47 | @classmethod
48 | def IS_CHANGED(s, project_name, **kworgs):
49 | m = hashlib.sha256()
50 | m.update(project_name.encode())
51 | return m.digest().hex()
52 |
53 | def generate(self, lipsync_studio_url, project_name, frames, fps, face_id, audio, avatar, close_mouth_before_lipsync, quality, skip_first_frames,
54 | load_cap, low_vram, faceswap_image=None, **kwargs):
55 | client = Client(lipsync_studio_url, verbose=False)
56 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
57 | project_name, folder_paths.get_output_directory(), frames[0].shape[1], frames[0].shape[0])
58 | # Set project name
59 | client.predict(project_name, api_name="/set_project_name")
60 | frame_list = []
61 | counter = 0
62 | if not os.path.exists(os.path.join(full_output_folder, project_name)):
63 | os.makedirs(os.path.join(full_output_folder, project_name))
64 | for (batch_number, image) in enumerate(frames):
65 | i = 255. * image.cpu().numpy()
66 | filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
67 | file = f"{filename_with_batch_num}_{counter:05}_.png"
68 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
69 | img.save(os.path.join(full_output_folder, project_name, file), compress_level=4)
70 | img_info = {
71 | 'path': os.path.join(full_output_folder, project_name, file)
72 | }
73 | frame_list.append(img_info)
74 | counter += 1
75 |
76 | client.predict(
77 | frame_list,
78 | fps,
79 | api_name="/new_frames"
80 | )
81 | if load_cap == 0:
82 | load_cap = len(frames)
83 |
84 | client.predict(
85 | skip_first_frames + 1, # float (numeric value between 1 and 1) in 'Trim Video Start' Slider component
86 | api_name="/video_start_frame"
87 | )
88 | client.predict(
89 | load_cap + 1, # float (numeric value between 1 and 1) in 'Trim Video Start' Slider component
90 | api_name="/video_stop_frame"
91 | )
92 |
93 | if faceswap_image is not None:
94 | i = 255. * faceswap_image[0].cpu().numpy()
95 | file = f"faceswap_{counter:05}_.png"
96 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
97 | img.save(os.path.join(full_output_folder, project_name, file), compress_level=4)
98 | client.predict(
99 | os.path.join(full_output_folder, project_name, file),
100 | # filepath in 'Face Swap' Image component
101 | api_name="/new_face_swap_img"
102 | )
103 | else:
104 | client.predict(
105 | None,
106 | # filepath in 'Face Swap' Image component
107 | api_name="/new_face_swap_img"
108 | )
109 |
110 | client.predict(
111 | 1,
112 | # float (numeric value between 1 and 4) in 'Resolution Divide Factor' Slider component
113 | 30, # float (numeric value between 0 and 100) in 'Min Face Width Detection' Slider component
114 | True, # bool in 'Keyframes On Speaker Change' Checkbox component
115 | True, # bool in 'Keyframes On Scene Change' Checkbox component
116 | skip_first_frames + 1, # int 'Trim Video Start' Slider component
117 | load_cap, # float (numeric value between 1 and 1) in 'Trim Video Stop' Slider component
118 | 4, # float (numeric value between 1 and 64) in 'Number of CPU' Slider component
119 | 1000,
120 | api_name="/analyse_video"
121 | )
122 | # Set Audio Type
123 | client.predict(
124 | # config["audio_path"] if config["audio_path"] else "Input Video",# Literal['File', 'Generate', 'Input Video'] in 'Audio Input' Radio component
125 | "File", # Literal['File', 'Generate', 'Input Video'] in 'Audio Input' Radio component
126 | api_name="/set_audio_type"
127 | )
128 | output_file_audio = f"{filename}_{counter:05}.wav"
129 | output_file_audio_path = os.path.join(full_output_folder, project_name, output_file_audio)
130 |
131 | # FFmpeg command to save audio in WAV format
132 | channels = audio['waveform'].size(1)
133 |
134 | wav_args = [ffmpeg_path(), "-v", "error", "-n",
135 | "-ar", str(audio['sample_rate']), # Sample rate
136 | "-ac", str(channels), # Number of channels
137 | "-f", "f32le", "-i", "-", # Audio format and input from stdin
138 | "-c:a", "pcm_s16le", # Encode as 16-bit PCM WAV
139 | output_file_audio_path]
140 | env = os.environ.copy()
141 | audio_data = audio['waveform'].squeeze(0).transpose(0, 1) \
142 | .numpy().tobytes()
143 |
144 | try:
145 | res = subprocess.run(wav_args, input=audio_data,
146 | env=env, capture_output=True, check=True)
147 | except subprocess.CalledProcessError as e:
148 | raise Exception("An error occurred in the ffmpeg subprocess:\n" \
149 | + e.stderr.decode("utf-8"))
150 |
151 | if res.stderr:
152 | print(res.stderr.decode("utf-8"), end="", file=sys.stderr)
153 |
154 | client.predict(
155 | output_file_audio_path,
156 | # filepath in 'Speech' Audio component
157 | api_name="/set_audio_file"
158 | )
159 | client.predict(
160 | avatar,
161 | # Literal['None', 'Avatar 1', 'Avatar 2', 'Avatar 3', 'Avatar 4', 'Avatar 5', 'Avatar 6', 'Avatar 7', 'Avatar 8', 'Avatar 9', 'Avatar 10'] in 'Avatar' Dropdown component
162 | api_name="/change_avatar"
163 | )
164 | client.predict(
165 | low_vram, # bool in 'Low VRAM' Checkbox component
166 | api_name="/set_low_vram"
167 | )
168 | client.predict(
169 | avatar,
170 | api_name="/generate_driving_video"
171 | )
172 | client.predict(
173 | quality, # Literal['Low', 'Medium', 'High', 'Best'] in 'Video Quality' Radio component
174 | api_name="/set_video_quality"
175 | )
176 | for id_speaker in range(face_id):
177 | client.predict(
178 | str(id_speaker), # Literal[] in 'Face Id' Dropdown component
179 | False, # bool in 'Show wav2lip Output' Checkbox component
180 | api_name="/set_face_id"
181 | )
182 | client.predict(
183 | False, # bool in 'Speaker' Checkbox component
184 | api_name="/set_speaker"
185 | )
186 | if faceswap_image is not None:
187 | client.predict(
188 | "None", # Literal[] in 'Face swap id' Radio component
189 | api_name="/set_faceswap"
190 | )
191 |
192 | client.predict(
193 | str(face_id), # Literal[] in 'Face Id' Dropdown component
194 | False, # bool in 'Show wav2lip Output' Checkbox component
195 | api_name="/set_face_id"
196 | )
197 |
198 | client.predict(
199 | True, # bool in 'Speaker' Checkbox component
200 | api_name="/set_speaker"
201 | )
202 |
203 | if faceswap_image is not None:
204 | client.predict(
205 | "0", # Literal[] in 'Face swap id' Radio component
206 | api_name="/set_faceswap"
207 | )
208 | client.predict(
209 | api_name="/generate_faceswap"
210 | )
211 |
212 | client.predict(
213 | True, # bool in 'Stop video' Checkbox component
214 | api_name="/set_stop_video"
215 | )
216 | client.predict(
217 | close_mouth_before_lipsync, # bool in 'Stop video' Checkbox component
218 | api_name="/set_face_zero"
219 | )
220 |
221 | # Generate Wav2lip
222 | result = client.predict(
223 | 1, # float (numeric value between 1 and 100) in 'Volume Amplifier' Slider component
224 | api_name="/generate_w2l"
225 | )
226 | output_dir = folder_paths.get_output_directory()
227 | video_path = result["value"]["video"]
228 | new_path = os.path.join(output_dir, project_name, os.path.split(video_path)[-1])
229 | if not os.path.exists(new_path):
230 | shutil.copy(video_path, new_path)
231 | return {"ui": {"video_path": [new_path, project_name]}}
232 | # return (video_path, faceswap_video)
233 |
234 |
235 | # A dictionary that contains all nodes you want to export with their names
236 | # NOTE: names should be globally unique
237 | NODE_CLASS_MAPPINGS = {
238 | "WorkflowLipSync": WorkflowLipSync,
239 | }
240 |
241 | # A dictionary that contains the friendly/humanly readable titles for the nodes
242 | NODE_DISPLAY_NAME_MAPPINGS = {
243 | "WorkflowLipSync": "Workflow LipSync (FlowChain ⛓️)",
244 | }
245 |
--------------------------------------------------------------------------------
/py/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/py/__init__.py
--------------------------------------------------------------------------------
/py/endpoints.py:
--------------------------------------------------------------------------------
1 | import server
2 | from aiohttp import web
3 | import os
4 | import json
5 | import folder_paths
6 | from app.user_manager import UserManager
7 |
8 |
9 | def get_workflow_data(workflow_file):
10 | nodes = {"WorkflowInput": {}, "WorkflowOutput": {}}
11 | # Extraire les nœuds WorkflowInput et WorkflowOutput
12 | # print(workflow_file["nodes"]);
13 | for node in workflow_file["nodes"]:
14 |
15 | node_type = node.get("type")
16 | if node_type in nodes.keys():
17 | # Convertir au format compatible pour le client
18 | node_id = str(node.get("id", "unknown"))
19 | w_values = node.get("widgets_values", [])
20 | inp = []
21 | if node_type == "WorkflowInput":
22 | if len(w_values) < 3:
23 | inp = [w_values[0], node.get("inputs",[])[0].get("type", "")]
24 | if len(w_values) == 2:
25 | inp.append(w_values[1])
26 | else:
27 | inp = w_values
28 |
29 | elif node_type == "WorkflowOutput":
30 | #print(node.get("outputs",[]))
31 | #print(w_values)
32 | if ('Name' in w_values):
33 | inp = [w_values['Name']['value'], node.get("outputs",[])[0].get("type", "*")]
34 | elif len(w_values) < 3:
35 | inp = [w_values[0], node.get("outputs",[])[0].get("type", "*")]
36 | if len(w_values) == 2:
37 | inp.append(w_values[1])
38 | else:
39 | inp = w_values
40 | #print(inp)
41 | nodes[node_type][node_id] = {
42 | "class_type": node_type,
43 | "inputs": inp
44 | }
45 | if type(node.get("pos")) is list:
46 | nodes[node_type][node_id]["position"] = node["pos"][1]
47 | else:
48 | nodes[node_type][node_id]["position"] = node["pos"]['1']
49 | # sort by position
50 | nodes_input = dict(sorted(nodes["WorkflowInput"].items(), key=lambda item: item[1]["position"]))
51 | nodes_output = dict(sorted(nodes["WorkflowOutput"].items(), key=lambda item: item[1]["position"]))
52 | return {"inputs": nodes_input,
53 | "outputs": nodes_output,
54 | 'workflow': workflow_file}
55 |
56 |
57 | @server.PromptServer.instance.routes.get("/flowchain/workflows")
58 | async def workflows(request):
59 | user = UserManager().get_request_user_id(request)
60 | json_path = os.path.join(folder_paths.user_directory, user, "workflows")
61 | result = {}
62 |
63 | # Vérifier si le répertoire principal existe
64 | if os.path.exists(json_path):
65 | # Utiliser os.walk pour parcourir récursivement tous les sous-répertoires
66 | for root, dirs, files in os.walk(str(json_path)):
67 | for file in files:
68 | # Ne traiter que les fichiers JSON
69 | if file.lower().endswith('.json'):
70 | file_path = os.path.join(root, file)
71 |
72 | try:
73 | with open(file_path, "r", encoding="utf-8") as f:
74 | json_content = json.load(f)
75 | # print(file_path);
76 | relative_path = os.path.relpath(file_path, str(json_path))
77 | if "nodes" in json_content:
78 | file_conf = get_workflow_data(json_content)
79 | if file_conf["inputs"] or file_conf["outputs"]:
80 | result[relative_path] = file_conf
81 |
82 | except json.JSONDecodeError:
83 | # Ignorer les fichiers JSON mal formés
84 | print(f"Ignoring malformed JSON file: {file_path}")
85 | except Exception as e:
86 | print(f"Error processing, probably old format: {str(e)}")
87 | else:
88 | # Créer le répertoire s'il n'existe pas
89 | os.makedirs(json_path)
90 | result["No file in worflows folder"] = {"inputs": {}, "outputs": {}}
91 |
92 | # Si aucun fichier valide n'a été trouvé
93 | if not result:
94 | result["No compatible workflow files found"] = {"inputs": {}, "outputs": {}}
95 |
96 | return web.json_response(result, content_type='application/json')
97 |
98 |
99 | @server.PromptServer.instance.routes.get("/flowchain/workflow")
100 | async def workflow(request):
101 | user = UserManager().get_request_user_id(request)
102 | result = {}
103 | original_path = request.query.get("workflow_path")
104 | unversal_path = original_path.replace("\\", "/")
105 | json_path = unversal_path.split("/")
106 | if ".json" in json_path[-1]:
107 | # file_name = json_path[-1]
108 | json_path = folder_paths.user_directory + "/" + user + "/workflows/" + unversal_path
109 |
110 | if os.path.exists(json_path):
111 | with open(json_path, "r", encoding="utf-8") as f:
112 | json_content = json.load(f)
113 | if "nodes" in json_content:
114 | result = get_workflow_data(json_content)
115 | else:
116 | result = {"error": "File not found"}
117 | else:
118 | result = {"error": "File not found"}
119 |
120 | return web.json_response(result, content_type='application/json')
121 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-flowchain"
3 | description = "Convert your workflows into node and chain them."
4 | version = "1.0.0"
5 | license = {file = "LICENSE"}
6 | dependencies = ["gradio_client==0.8.0"]
7 |
8 | [project.urls]
9 | Repository = "https://github.com/numz/Comfyui-FlowChain"
10 | # Used by Comfy Registry https://comfyregistry.org
11 |
12 | [tool.comfy]
13 | PublisherId = "numz"
14 | DisplayName = "Comfyui-FlowChain"
15 | Icon = ""
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | gradio_client==0.8.0
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/numz/Comfyui-FlowChain/2b8a852708ac4cf8506c79bc9af4d0c0b617e597/utils/__init__.py
--------------------------------------------------------------------------------
/utils/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import subprocess
4 |
5 |
6 | def ffmpeg_suitability(path):
7 | try:
8 | version = subprocess.run([path, "-version"], check=True,
9 | capture_output=True).stdout.decode("utf-8")
10 | except:
11 | return 0
12 | score = 0
13 | # rough layout of the importance of various features
14 | simple_criterion = [("libvpx", 20), ("264", 10), ("265", 3),
15 | ("svtav1", 5), ("libopus", 1)]
16 | for criterion in simple_criterion:
17 | if version.find(criterion[0]) >= 0:
18 | score += criterion[1]
19 | # obtain rough compile year from copyright information
20 | copyright_index = version.find('2000-2')
21 | if copyright_index >= 0:
22 | copyright_year = version[copyright_index + 6:copyright_index + 9]
23 | if copyright_year.isnumeric():
24 | score += int(copyright_year)
25 | return score
26 |
27 | def get_ffmpeg_path():
28 | if "VHS_FORCE_FFMPEG_PATH" in os.environ:
29 | ffmpeg_path = os.environ.get("VHS_FORCE_FFMPEG_PATH")
30 | else:
31 | ffmpeg_paths = []
32 | try:
33 | from imageio_ffmpeg import get_ffmpeg_exe
34 |
35 | imageio_ffmpeg_path = get_ffmpeg_exe()
36 | ffmpeg_paths.append(imageio_ffmpeg_path)
37 | except:
38 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
39 | raise
40 |
41 | if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
42 | ffmpeg_path = imageio_ffmpeg_path
43 | else:
44 | system_ffmpeg = shutil.which("ffmpeg")
45 | if system_ffmpeg is not None:
46 | ffmpeg_paths.append(system_ffmpeg)
47 | if os.path.isfile("ffmpeg"):
48 | ffmpeg_paths.append(os.path.abspath("ffmpeg"))
49 | if os.path.isfile("ffmpeg.exe"):
50 | ffmpeg_paths.append(os.path.abspath("ffmpeg.exe"))
51 | if len(ffmpeg_paths) == 0:
52 |
53 | ffmpeg_path = None
54 | elif len(ffmpeg_paths) == 1:
55 | # Evaluation of suitability isn't required, can take sole option
56 | # to reduce startup time
57 | ffmpeg_path = ffmpeg_paths[0]
58 | else:
59 | ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability)
60 |
61 | return ffmpeg_path
62 |
63 |
--------------------------------------------------------------------------------
/web/js/constants.js:
--------------------------------------------------------------------------------
1 | export const colors = [
2 | "#222222",
3 | "#5940bb",
4 | "#AFFFFF",
5 | "#7cbb1a",
6 | "#29699c",
7 | "#777788",
8 | "#268bd2",
9 | "#2ab7ca",
10 | "#d33682",
11 | "#dc322f",
12 | "#facfad",
13 | "#77ff77",
14 | "#6940bb",
15 | "#165481",
16 | "#176974",
17 | "#851f50",
18 | "#911e1c",
19 | "#9f826b",
20 | "#499f49",
21 | "#392978",
22 | ];
23 | export const bg_colors = [
24 | "#000000",
25 | "#392978",
26 | "#59888d",
27 | "#496c12",
28 | "#19466a",
29 | "#4b4b56",
30 | "#165481",
31 | "#176974",
32 | "#851f50",
33 | "#911e1c",
34 | "#9f826b",
35 | "#499f49",
36 | "#192978",
37 | "#065481",
38 | "#076974",
39 | "#551f50",
40 | "#611e1c",
41 | "#7f826b",
42 | "#199f49",
43 | "#092978",
44 | ];
45 | export const node_type_list = [
46 | "none",
47 | "IMAGE",
48 | "MASK",
49 | "STRING",
50 | "INT",
51 | "FLOAT",
52 | "LATENT",
53 | "CLIP",
54 | "CONDITIONING",
55 | "MODEL",
56 | "VAE",
57 | "BOOLEAN",
58 | "SWITCH",
59 | "AUDIO",
60 | "DICT",
61 | "IMAGE_PATH",
62 | "AUDIO_PATH",
63 | "VIDEO_PATH",
64 | "AUDIO/VIDEO_PATH",
65 | "DOC_PATH",
66 | "PROMPT",
67 | ];
68 | export const string_widget = [
69 | "STRING",
70 | "text",
71 | "IMAGE_PATH",
72 | "AUDIO_PATH",
73 | "VIDEO_PATH",
74 | "AUDIO/VIDEO_PATH",
75 | "DOC_PATH",
76 | "PROMPT",
77 | ];
78 |
--------------------------------------------------------------------------------
/web/js/inputs.js:
--------------------------------------------------------------------------------
1 | import { addWidgets, convertToInput, getDefaultOptions } from "./widgets.js";
2 | import { string_widget } from "./constants.js";
3 |
4 | export function createColor(type, bg = false) {
5 | // Generate a consistent color based on the type string
6 | const hash = type.split("").reduce((acc, char) => {
7 | return char.charCodeAt(0) + ((acc << 5) - acc);
8 | }, 0);
9 |
10 | // Convert hash to RGB values
11 | let r = (hash & 0xff0000) >> 16;
12 | let g = (hash & 0x00ff00) >> 8;
13 | let b = hash & 0x0000ff;
14 |
15 | // For background colors, make them lighter
16 | // For background colors, make them darker by reducing RGB values
17 |
18 | if (bg) {
19 | const darkenFactor = 0.7; // Adjust this value to control darkness
20 | r = Math.floor(r * darkenFactor);
21 | g = Math.floor(g * darkenFactor);
22 | b = Math.floor(b * darkenFactor);
23 | return `#${r.toString(16).padStart(2, "0")}${g
24 | .toString(16)
25 | .padStart(2, "0")}${b.toString(16).padStart(2, "0")}`;
26 | } else {
27 | return `#${r.toString(16).padStart(2, "0")}${g
28 | .toString(16)
29 | .padStart(2, "0")}${b.toString(16).padStart(2, "0")}`;
30 | }
31 | }
32 |
33 | export function cleanInputs(root_obj, start_index = 2, reset_value = true) {
34 | for (let i = 0; i < root_obj.outputs.length; i++) {
35 | const output = root_obj.outputs[i];
36 | if (output.links && output.links.length) {
37 | // Make a copy of the links array because it will be modified during disconnection
38 | const links = output.links.slice();
39 | for (const linkId of links) {
40 | root_obj.graph.removeLink(linkId);
41 | }
42 | }
43 | }
44 |
45 | // Same for inputs
46 | for (let i = 0; i < root_obj.inputs.length; i++) {
47 | const input = root_obj.inputs[i];
48 | if (input.link) {
49 | root_obj.graph.removeLink(input.link);
50 | }
51 | }
52 |
53 | root_obj.widgets = root_obj.widgets.splice(0, start_index);
54 | if (reset_value) {
55 | root_obj.widgets_values = [];
56 | const max_node_output = root_obj.outputs.length;
57 | for (let i = 0; i < max_node_output; i++) root_obj.removeOutput(0);
58 | }
59 |
60 | const max_node_input = root_obj.inputs.length;
61 | for (let i = 0; i < max_node_input; i++) root_obj.removeInput(0);
62 | }
63 |
64 | export function clearInputs(root_obj, reset_value = true, splice = 2) {
65 | //nodeData.input.required = {};
66 |
67 | if (!root_obj.widgets_values) root_obj.widgets_values = [];
68 |
69 | // Déconnecter tous les liens d'abord pour les entrées
70 | for (let i = 0; i < root_obj.outputs.length; i++) {
71 | const output = root_obj.outputs[i];
72 | if (output.links && output.links.length) {
73 | const links = output.links.slice();
74 | for (const linkId of links) {
75 | root_obj.graph.removeLink(linkId);
76 | }
77 | }
78 | }
79 |
80 | // Parcourir les entrées en sens inverse pour éviter les problèmes d'indice
81 | for (let i = root_obj.inputs.length - 1; i >= 0; i--) {
82 | const input = root_obj.inputs[i];
83 | if ((input.name === "default" || input.name === "input") && input.link) {
84 | root_obj.graph.removeLink(input.link);
85 | }
86 | }
87 |
88 | for (let i = 0; i < root_obj.inputs.length; i++) {
89 | if (
90 | root_obj.inputs[i].name == "default" ||
91 | root_obj.inputs[i].name == "input"
92 | ) {
93 | root_obj.removeInput(i);
94 | }
95 | }
96 |
97 | root_obj.widgets = root_obj.widgets.splice(0, splice);
98 | if (reset_value) {
99 | for (let key in root_obj.widgets_values) {
100 | if (key != "Name" && key != "type") {
101 | delete root_obj.widgets_values[key];
102 | }
103 | }
104 | }
105 |
106 | const max_node_input = root_obj.outputs.length;
107 | for (let i = root_obj.outputs.length - 1; i >= 0; i--)
108 | root_obj.removeOutput(i);
109 | /*
110 | if (root_obj.graph) {
111 | root_obj.graph.setDirtyCanvas(true);
112 | root_obj.graph.change();
113 | }*/
114 | }
115 |
116 | export function addOutputs(root_obj, workflow_name) {
117 | const outputs = app.lipsync_studio[workflow_name].outputs;
118 | for (let [key, value] of Object.entries(outputs)) {
119 | let name = "";
120 | let type = "";
121 | if (value.inputs.Name) {
122 | name = value.inputs.Name.value;
123 | type = value.inputs.type.value;
124 | } else {
125 | name = value.inputs[0];
126 | type = value.inputs[1];
127 | }
128 |
129 | const isoutput = root_obj.outputs.find((i) => i.name == name);
130 | if (!isoutput) {
131 | root_obj.addOutput(name, type);
132 | }
133 | }
134 | // remove out not in outputs
135 | const all_outputs = Object.entries(outputs).map(([key, value]) => value);
136 | for (let i = 0; i < root_obj.outputs.length; i++) {
137 | if (!all_outputs.find((out) => out.inputs[0] == root_obj.outputs[i].name)) {
138 | root_obj.removeOutput(i);
139 | }
140 | }
141 | organizeOutputs(root_obj, workflow_name);
142 | }
143 |
144 | export function addInputs(node, inputs, widgets_values) {
145 | // Phase 1: Préparer les données triées et simplifiées
146 | const mapped_input = Object.entries(inputs)
147 | .sort(([, a], [, b]) => a.position - b.position)
148 | .map(([, input], index) => {
149 | let val = "";
150 | let type = input.inputs[1];
151 | if (input.inputs[2]?.values?.length > 0) {
152 | type = "COMBO";
153 | val = input.inputs[1];
154 | } else {
155 | val = input.inputs.length > 2 ? input.inputs[2] : undefined;
156 | }
157 | return {
158 | name: input.inputs[0],
159 | type: type,
160 | value: val,
161 | orderIndex: index, // Ajouter un index d'ordre basé sur la position
162 | options: type == "COMBO" ? input.inputs[2] : undefined,
163 | };
164 | });
165 |
166 | // Séparer les inputs pour un traitement différencié
167 | const widget_inputs = mapped_input.filter(
168 | (input) => input.value !== undefined && input.value !== null
169 | );
170 | // replace value in widget_inputs with the value in widgets_values
171 | if (widgets_values) {
172 | for (let i = 0; i < widget_inputs.length; i++) {
173 | const widget_input = widget_inputs[i];
174 | const widget_value = widgets_values[2 + i];
175 | //find name in mapped_input and replace value
176 | if (widget_value)
177 | for (let j = 0; j < mapped_input.length; j++)
178 | if (mapped_input[j].name == widget_input.name) {
179 | mapped_input[j].value = widget_value;
180 | break;
181 | }
182 | }
183 | }
184 |
185 | const pure_inputs = mapped_input.filter(
186 | (input) => input.value === undefined || input.value === null
187 | );
188 |
189 | // S'assurer que le nœud a un objet local_input_defs
190 | if (!node.local_input_defs) {
191 | node.local_input_defs = {
192 | required: {},
193 | optional: {},
194 | };
195 | }
196 |
197 | // Phase 2: Ajouter les nouveaux widgets/inputs avec leur index d'ordre
198 | for (const input of mapped_input) {
199 | const { name, type, value, orderIndex, options } = input;
200 | let isWidget = node.widgets.find((w) => w.name === name);
201 | let isinput = node.inputs.filter((i) => i.name === name);
202 |
203 | const seen = {};
204 | const indexesToRemove = [];
205 | node.inputs.forEach((input, idx) => {
206 | if (seen[input.name] !== undefined) {
207 | indexesToRemove.push(idx);
208 | } else {
209 | seen[input.name] = idx;
210 | }
211 | });
212 | for (let i = indexesToRemove.length - 1; i >= 0; i--) {
213 | node.removeInput(indexesToRemove[i]);
214 | }
215 | isinput = node.inputs.find((i) => i.name === name);
216 |
217 | if (!isWidget && (!isinput || isinput.widget)) {
218 | if (value !== undefined) {
219 | // Ajouter un widget avec sa valeur et son index d'ordre
220 | let widget_param = { value, type, options };
221 | if (string_widget.includes(type))
222 | widget_param = { value, type: "STRING" };
223 | addWidgets(node, name, widget_param, app);
224 | isWidget = node.widgets.find((w) => w.name === name);
225 | if (isWidget) {
226 | if (orderIndex !== undefined) isWidget.orderIndex = orderIndex; // Stocker l'index d'ordre// Stocker l'index d'ordre
227 | }
228 |
229 | if (isinput && !isinput.widget) {
230 | convertToInput(node, isWidget);
231 | // Préserver l'index d'ordre sur l'input converti
232 | const newInput = node.inputs.find((i) => i.name === name);
233 | if (newInput && orderIndex !== undefined)
234 | newInput.orderIndex = orderIndex;
235 | newInput.pos = isWidget.pos;
236 | }
237 | } else {
238 | // C'est un input pur
239 | if (!isinput) {
240 | node.addInput(name, type);
241 | const newInput = node.inputs[node.inputs.length - 1];
242 | newInput.orderIndex = orderIndex; // Stocker l'index d'ordre
243 | }
244 | }
245 | } else {
246 | // Mettre à jour l'index d'ordre des éléments existants
247 | if (isWidget) isWidget.orderIndex = orderIndex;
248 | if (isinput) isinput.orderIndex = orderIndex;
249 | }
250 | }
251 |
252 | // Phase 3: Mettre à jour les types si nécessaires
253 | for (const input of mapped_input) {
254 | const { name, type, value } = input;
255 | let isWidget = node.widgets.find((w) => w.name === name);
256 | let isinput = node.inputs.find((i) => i.name === name);
257 |
258 | if (isWidget || isinput) {
259 | const localType = node.local_input_defs?.required[name] || null;
260 | const typeToUse = string_widget.includes(type) ? "STRING" : type;
261 | // Vérifier si le type est différent et le mettre à jour
262 | if (localType && localType[0] !== typeToUse) {
263 | if (isinput) {
264 | if (isinput.link) {
265 | node.graph.removeLink(isinput.link);
266 | }
267 | isinput.type = typeToUse;
268 | }
269 |
270 | if (isWidget) {
271 | isWidget.type = typeToUse;
272 | const options = getDefaultOptions(typeToUse, value);
273 | node.local_input_defs.required[name] = [typeToUse, options];
274 | isWidget.options = options;
275 | isWidget.value = value !== undefined ? value : isWidget.value;
276 | }
277 | }
278 | }
279 | }
280 |
281 | // Phase 4: Réorganiser les widgets selon leur index d'ordre
282 | // Séparer les widgets système (les 2 premiers) des widgets à trier
283 | const systemWidgets = node.widgets.slice(0, 2);
284 | const sortableWidgets = node.widgets.slice(2);
285 |
286 | // Trier les widgets par index d'ordre
287 | sortableWidgets.sort((a, b) => {
288 | if (a.orderIndex === undefined) return 1;
289 | if (b.orderIndex === undefined) return -1;
290 | return a.orderIndex - b.orderIndex;
291 | });
292 |
293 | // Reconstruire le tableau des widgets
294 | node.widgets = [...systemWidgets, ...sortableWidgets];
295 |
296 | // Maintenant ajuster les positions visuelles des widgets
297 | for (let i = 2; i < node.widgets.length; i++) {
298 | node.widgets[i].y =
299 | node.widgets[i - 1].y +
300 | (node.widgets[i - 1].computedHeight || LiteGraph.NODE_WIDGET_HEIGHT);
301 | node.widgets[i].last_y = node.widgets[i].y;
302 | }
303 |
304 | // Réorganiser les inputs purs
305 | for (let i = 0; i < pure_inputs.length; i++) {
306 | const targetInput = pure_inputs[i];
307 | const actualIndex = node.inputs.findIndex(
308 | (input) => input.name === targetInput.name
309 | );
310 |
311 | if (actualIndex !== -1) {
312 | // const actualIndex = node.inputs.indexOf(pureNodeInputs[inputIndex]);
313 | const expected_position = i;
314 |
315 | if (actualIndex !== expected_position) {
316 | // Déconnecter temporairement le lien s'il existe
317 | let link = null;
318 | if (node.inputs[actualIndex].link) {
319 | link = node.graph.links[node.inputs[actualIndex].link];
320 | node.graph.removeLink(node.inputs[actualIndex].link);
321 | }
322 |
323 | // Sauvegarder les propriétés importantes
324 | const inputToMove = node.inputs[actualIndex];
325 | const inputName = inputToMove.name;
326 | const inputType = inputToMove.type;
327 | const inputWidget = inputToMove.widget;
328 | const inputPos = inputToMove.pos;
329 |
330 | // Supprimer l'input
331 | node.removeInput(actualIndex);
332 |
333 | // Ajouter à la position attendue
334 | node.addInput(
335 | inputName,
336 | inputType,
337 | { pos: inputPos, widget: inputWidget },
338 | expected_position
339 | );
340 |
341 | // Reconnecter le lien si nécessaire
342 | if (link && link.origin_id !== null) {
343 | const newIndex = node.inputs.findIndex(
344 | (input) => input.name === targetInput.name
345 | );
346 | link.target_slot = newIndex;
347 | node.graph.links[link.id] = link;
348 | node.inputs[newIndex].link = link.id;
349 | }
350 | }
351 | }
352 | }
353 |
354 | // Phase 6: S'assurer que les inputs avec widget sont toujours à la fin de la liste
355 | const inputsWithWidgets = [];
356 | const inputsWithoutWidgets = [];
357 |
358 | // 1. Trier les inputs en deux groupes
359 | for (let i = 0; i < node.inputs.length; i++) {
360 | if (node.inputs[i].widget) {
361 | inputsWithWidgets.push(i);
362 | } else {
363 | inputsWithoutWidgets.push(i);
364 | }
365 | }
366 |
367 | // 2. Si des inputs avec widgets ne sont pas déjà à la fin, les réorganiser
368 | if (
369 | inputsWithWidgets.length > 0 &&
370 | inputsWithWidgets[0] < inputsWithoutWidgets[inputsWithoutWidgets.length - 1]
371 | ) {
372 | // Déplacer tous les inputs avec widgets à la fin
373 | for (let i = 0; i < inputsWithWidgets.length; i++) {
374 | const currentIndex = inputsWithWidgets[i] - i; // Ajuster l'index car la liste change à chaque suppression
375 | const currentWidget = node.inputs[currentIndex];
376 | // Sauvegarder les informations de l'input et son lien
377 | let link = null;
378 | if (currentWidget.link) {
379 | let link = null;
380 | if (currentWidget.link) {
381 | link = node.graph.links[currentWidget.link];
382 | node.graph.removeLink(currentWidget.link);
383 | }
384 | }
385 |
386 | // Sauvegarder les propriétés importantes
387 | const inputToMove = currentWidget;
388 | const inputName = inputToMove.name;
389 | const inputType = inputToMove.type;
390 | const inputWidget = inputToMove.widget;
391 | const inputPos = inputToMove.pos;
392 |
393 | node.removeInput(currentIndex);
394 | node.addInput(inputName, inputType, {
395 | pos: inputPos,
396 | widget: inputWidget,
397 | });
398 |
399 | // Reconnecter le lien si nécessaire
400 | if (link && link.origin_id !== null) {
401 | const newIndex = node.inputs.findIndex(
402 | (input) => input.name === inputName
403 | );
404 | node.graph.links[link.id].target_slot = newIndex;
405 | node.inputs[newIndex].link = link.id;
406 | }
407 | /*if (link && link.origin_id !== null) {
408 | const newIndex = node.inputs.findIndex(input => input.name === inputName);
409 | link.target_slot = newIndex;
410 | node.graph.links[link.id] = link;
411 | node.inputs[newIndex].link = link.id;
412 | }*/
413 | }
414 |
415 | // Rafraîchir le canvas
416 | /*
417 | if (node.graph) {
418 | node.graph.setDirtyCanvas(true);
419 | }*/
420 | }
421 |
422 | // Rafraîchir le canvas si nécessaire
423 | if (node.graph) {
424 | node.graph.setDirtyCanvas(false, true);
425 | //node.graph.afterChange();
426 | //node.graph.connectionChange(node);
427 | }
428 | }
429 |
430 | function organizeOutputs(node, workflow_name) {
431 | const outputs = app.lipsync_studio[workflow_name].outputs;
432 |
433 | // Extraire et trier les outputs selon leur position
434 | let sortedOutputs;
435 | sortedOutputs = Object.entries(outputs)
436 | .sort(([, a], [, b]) => a.position - b.position)
437 | .map(([, output]) => {
438 | let name = "";
439 | let type = "";
440 | if (output.inputs.length === undefined) {
441 | name = output.inputs.Name.value;
442 | type = output.inputs.type.value;
443 | } else {
444 | name = output.inputs[0];
445 | type = output.inputs[1];
446 | }
447 | return { name, type };
448 | });
449 | // Réorganiser les outputs selon l'ordre déterminé
450 | for (let i = 0; i < sortedOutputs.length; i++) {
451 | const targetOutput = sortedOutputs[i];
452 | const actualIndex = node.outputs.findIndex(
453 | (output) => output.name === targetOutput.name
454 | );
455 |
456 | if (actualIndex !== -1 && actualIndex !== i) {
457 | // Sauvegarder les liens existants
458 | let links = [];
459 | if (
460 | node.outputs[actualIndex].links &&
461 | node.outputs[actualIndex].links.length > 0
462 | ) {
463 | for (const linkId of node.outputs[actualIndex].links) {
464 | const linkInfo = node.graph.links[linkId];
465 | if (linkInfo) {
466 | links.push({
467 | id: linkId,
468 | origin_id: linkInfo.origin_id,
469 | origin_slot: linkInfo.origin_slot,
470 | target_id: linkInfo.target_id,
471 | target_slot: linkInfo.target_slot,
472 | });
473 | // Déconnecter temporairement
474 | node.graph.removeLink(linkId);
475 | }
476 | }
477 | }
478 |
479 | // Sauvegarder les propriétés importantes
480 | const outputToMove = node.outputs[actualIndex];
481 | const outputName = outputToMove.name;
482 | const outputType = outputToMove.type;
483 |
484 | // Supprimer l'output
485 | node.removeOutput(actualIndex);
486 |
487 | // Ajouter à la position attendue
488 | node.addOutput(outputName, outputType, i);
489 | const newOutput = node.outputs.findIndex(
490 | (output) => output.name === outputName
491 | );
492 |
493 | // Reconnecter les liens sauvegardés
494 | for (const link of links) {
495 | const targetNode = node.graph.getNodeById(link.target_id);
496 | node.connect(newOutput, targetNode, link.target_slot);
497 | }
498 | }
499 | }
500 |
501 | // Rafraîchir le canvas
502 | if (node.graph) {
503 | node.graph.setDirtyCanvas(true);
504 | }
505 | }
506 |
507 | export function removeInputs(node, inputs, widgets_values) {
508 | // Ensemble des noms d'entrées valides à partir de inputs
509 | const validInputNames = new Set();
510 | for (let [key, value] of Object.entries(inputs)) {
511 | if (value.inputs && value.inputs[0]) {
512 | validInputNames.add(value.inputs[0]);
513 | }
514 | }
515 |
516 | // Noms à préserver quoi qu'il arrive
517 | const preserveNames = new Set(["workflows", "workflow"]);
518 |
519 | // Collecter les noms d'entrées actuelles
520 | const currentInputNames = new Set();
521 | for (let input of node.inputs)
522 | if (!preserveNames.has(input.name)) currentInputNames.add(input.name);
523 |
524 | // Collecter les noms de widgets actuels (à partir de l'index 2)
525 | const currentWidgetNames = new Set();
526 | for (let i = 2; i < node.widgets.length; i++)
527 | if (!preserveNames.has(node.widgets[i].name))
528 | currentWidgetNames.add(node.widgets[i].name);
529 |
530 | // Identifier les éléments à supprimer (ceux qui sont dans current mais pas dans valid)
531 | const inputsToRemove = [...currentInputNames].filter(
532 | (name) => !validInputNames.has(name)
533 | );
534 | const widgetsToRemove = [...currentWidgetNames].filter(
535 | (name) => !validInputNames.has(name)
536 | );
537 |
538 | // Supprimer les entrées identifiées (en parcourant en sens inverse)
539 | if (inputsToRemove.length > 0)
540 | for (let i = node.inputs.length - 1; i >= 0; i--)
541 | if (inputsToRemove.includes(node.inputs[i].name)) {
542 | // Déconnecter le lien s'il existe
543 | if (node.inputs[i].link != null)
544 | node.graph.removeLink(node.inputs[i].link);
545 | node.removeInput(i);
546 | }
547 |
548 | // Supprimer les widgets identifiés (en parcourant en sens inverse)
549 | if (widgetsToRemove.length > 0)
550 | for (let i = node.widgets.length - 1; i >= 2; i--)
551 | if (widgetsToRemove.includes(node.widgets[i].name)) {
552 | widgetName = node.widgets[i].name;
553 | node.widgets.splice(i, 1);
554 | widgets_values.splice(i, 1);
555 | if (
556 | node.local_input_defs &&
557 | node.local_input_defs.required[widgetName]
558 | ) {
559 | delete node.local_input_defs.required[widgetName];
560 | }
561 | // change y and last_y of widgets
562 | for (let j = i; j < node.widgets.length; j++) {
563 | node.widgets[j].y -= node.widgets[j].computedHeight;
564 | node.widgets[j].last_y -= node.widgets[j].computedHeight;
565 | }
566 | }
567 | }
568 |
--------------------------------------------------------------------------------
/web/js/jsnodes.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 | import { api } from "../../../scripts/api.js";
3 | import { setupWorkflowNode } from "./nodetype_workflow.js";
4 | import { setupInputNode } from "./nodetype_input.js";
5 | import { setupContinueNode } from "./nodetype_continue.js";
6 | import { setupOutputNode } from "./nodetype_output.js";
7 | import { setupLipSyncNode } from "./nodetype_lipSync.js";
8 | //import JSZip from "jszip";
9 |
10 | function addFileToZip(zip, file, name) {
11 | zip.file(name, file);
12 | return zip;
13 | }
14 |
15 | async function recursiveZip(nodes, zipInstance) {
16 | for (const node of nodes) {
17 | // Assurez-vous que widgets_values existe et a au moins un élément.
18 | if (!node.widgets_values || node.widgets_values.length === 0) {
19 | console.warn(
20 | "Node in recursiveZip does not have widgets_values or it's empty:",
21 | node
22 | );
23 | continue;
24 | }
25 | const path = node.widgets_values[0];
26 | if (!path || typeof path !== "string") {
27 | console.warn(
28 | "Node path is invalid in recursiveZip:",
29 | path,
30 | "for node:",
31 | node
32 | );
33 | continue;
34 | }
35 |
36 | try {
37 | console.log(`Fetching workflow for path: ${path}`);
38 | const response = await api.fetchApi(
39 | "/flowchain/workflow?workflow_path=" + encodeURIComponent(path)
40 | );
41 |
42 | if (!response.ok) {
43 | const errorText = await response.text();
44 | console.error(
45 | `Failed to fetch workflow ${path}: ${response.status} ${response.statusText}. Server response: ${errorText}`
46 | );
47 | // Vous pourriez vouloir informer l'utilisateur ici, par exemple en ajoutant une entrée d'erreur au ZIP
48 | zipInstance.file(
49 | `ERROR_fetching_${path.replace(/[^a-zA-Z0-9._-]/g, "_")}.txt`,
50 | `Failed to fetch: ${response.status} ${response.statusText}\n${errorText}`
51 | );
52 | continue; // Passe au nœud suivant
53 | }
54 |
55 | const jsonFile = await response.json();
56 | console.log(`Successfully fetched workflow: ${path}`, jsonFile);
57 |
58 | // S'assurer que le nom de fichier est valide et se termine par .json
59 | // Remplace les caractères non valides pour un nom de fichier par _
60 | //const safePath = path.replace(/[^a-zA-Z0-9/._-]/g, "_");
61 | //const fileNameInZip = path.endsWith(".json") ? path : `${safePath}.json`;
62 |
63 | zipInstance.file(path, JSON.stringify(jsonFile, null, 2));
64 | console.log(`Added ${path} to zip.`);
65 |
66 | if (jsonFile && jsonFile.workflow && jsonFile.workflow.nodes) {
67 | const subWorkflowNodes = jsonFile.workflow.nodes.filter(
68 | (n) => n.type === "Workflow" // 'n' pour éviter le shadowing avec 'node' de la boucle externe
69 | );
70 | if (subWorkflowNodes.length > 0) {
71 | console.log(
72 | `Found ${subWorkflowNodes.length} sub-workflow nodes in ${path}. Recursing...`
73 | );
74 | await recursiveZip(subWorkflowNodes, zipInstance); // Attendre l'appel récursif
75 | }
76 | } else {
77 | console.warn(
78 | `Workflow data for ${path} is not in the expected format or has no nodes.`,
79 | jsonFile
80 | );
81 | }
82 | } catch (error) {
83 | console.error(`Error processing node with path ${path}:`, error);
84 | zipInstance.file(
85 | `ERROR_processing_${path.replace(/[^a-zA-Z0-9._-]/g, "_")}.txt`,
86 | `Error during processing: ${error.message}\n${error.stack}`
87 | );
88 | }
89 | }
90 | return zipInstance; // JSZip instances sont mutables, mais retourner est une bonne pratique.
91 | }
92 | function addCustomMenu() {
93 | if (!app.graph) {
94 | requestAnimationFrame(addCustomMenu);
95 | return;
96 | }
97 | const Workflow_menu = document.getElementsByClassName("p-menubar-submenu")[0];
98 |
99 | if (!Workflow_menu) {
100 | console.warn(
101 | "Le menu cible 'pv_id_10_0_list' n'a pas été trouvé. L'élément 'Export FlowChain (Zip)' ne sera pas ajouté."
102 | );
103 | // Envisagez une méthode plus robuste pour ajouter votre menu si celui-ci n'est pas trouvé.
104 | return;
105 | }
106 |
107 | const div = document.createElement("div");
108 | div.className = "p-menubar-item-content"; // Assurez-vous que cette classe et les suivantes existent ou adaptez-les
109 |
110 | const icon = document.createElement("span");
111 | icon.className = "p-menubar-item-icon pi pi-download";
112 |
113 | const text = document.createElement("span");
114 | text.textContent = "Export FlowChain (Zip)";
115 | text.className = "p-menubar-item-label";
116 |
117 | const link = document.createElement("a");
118 | link.appendChild(icon);
119 | link.appendChild(text);
120 | link.className = "p-menubar-item-link";
121 | link.href = "#";
122 | link.style.color = "white"; // Ou utilisez une classe CSS
123 |
124 | link.onclick = async () => {
125 | try {
126 | if (typeof JSZip === "undefined") {
127 | alert(
128 | "JSZip library is not loaded. Please include it in your project (e.g., from cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js) or ensure it is loaded by ComfyUI."
129 | );
130 | console.error("JSZip is not defined. Please load the JSZip library.");
131 | return;
132 | }
133 |
134 | const workflow = app.graph.serialize();
135 | const workflowNodes = workflow.nodes.filter(
136 | (node) => node.type === "Workflow"
137 | );
138 |
139 | let zip = new JSZip();
140 | console.log("Starting recursive zip process for main workflow...");
141 | zip = await recursiveZip(workflowNodes, zip); // Attendre la complétion de la fonction récursive
142 | console.log("Recursive zip process finished.");
143 |
144 | const workflowJson = JSON.stringify(workflow, null, 2);
145 | zip.file("main_workflow.json", workflowJson); // Ajoute le workflow principal
146 | console.log("Added main_workflow.json to zip.");
147 |
148 | const zipContent = await zip.generateAsync({ type: "blob" });
149 | console.log("Zip content generated.");
150 |
151 | const now = new Date();
152 | const timestamp = `${now.getFullYear()}${(now.getMonth() + 1)
153 | .toString()
154 | .padStart(2, "0")}${now.getDate().toString().padStart(2, "0")}_${now
155 | .getHours()
156 | .toString()
157 | .padStart(2, "0")}${now.getMinutes().toString().padStart(2, "0")}${now
158 | .getSeconds()
159 | .toString()
160 | .padStart(2, "0")}`;
161 | const zipFilename = `FlowChain_Workflow_${timestamp}.zip`;
162 |
163 | const downloadLink = document.createElement("a");
164 | downloadLink.href = URL.createObjectURL(zipContent);
165 | downloadLink.download = zipFilename;
166 | document.body.appendChild(downloadLink);
167 | downloadLink.click();
168 | document.body.removeChild(downloadLink);
169 | URL.revokeObjectURL(downloadLink.href);
170 |
171 | console.log("Workflow exporté avec succès :", zipFilename);
172 | } catch (error) {
173 | console.error("Erreur lors de l'exportation du workflow en ZIP:", error);
174 | alert(
175 | "Une erreur est survenue lors de l'exportation du workflow. Vérifiez la console pour plus de détails."
176 | );
177 | }
178 | };
179 |
180 | div.appendChild(link);
181 | const topMenuDiv = document.createElement("li");
182 | topMenuDiv.id = "pv_id_10_0_9";
183 | topMenuDiv.className = "p-menubar-item relative";
184 | topMenuDiv.role = "menuitem";
185 | topMenuDiv.ariaLabel = "Export FlowChain (Zip)";
186 | // Peut-être styler `topMenuDiv` ou `link` si les classes PrimeN
187 | // G ne sont pas disponibles globalement
188 | topMenuDiv.appendChild(div);
189 | Workflow_menu.appendChild(topMenuDiv);
190 | }
191 |
192 | app.registerExtension({
193 | name: "FlowChain.jsnodes",
194 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
195 | if (!nodeData?.category?.startsWith("FlowChain")) {
196 | return;
197 | }
198 | switch (nodeData.name) {
199 | case "Workflow":
200 | setupWorkflowNode(nodeType);
201 | break;
202 | case "WorkflowInput":
203 | setupInputNode(nodeType);
204 | break;
205 | case "WorkflowContinue":
206 | setupContinueNode(nodeType);
207 | break;
208 | case "WorkflowOutput":
209 | setupOutputNode(nodeType);
210 | break;
211 | case "WorkflowLipSync":
212 | setupLipSyncNode(nodeType, nodeData, app);
213 | break;
214 | }
215 | },
216 | async setup(app) {
217 | // Ce code est exécuté lorsque l'extension est initialisée par ComfyUI
218 | addCustomMenu();
219 | },
220 | async init(app) {
221 | api
222 | .fetchApi("/flowchain/workflows")
223 | .then((response) => response.json())
224 | .then((data) => {
225 | app.lipsync_studio = data;
226 | })
227 | .catch((error) => {
228 | console.error("Error:", error);
229 | throw error;
230 | });
231 |
232 | const origRemoveNode = LGraphCanvas.prototype.removeNode;
233 | LGraphCanvas.prototype.removeNode = function (node) {
234 | if (node && node.inputs && node.outputs) {
235 | // Assurer que tous les liens sont déconnectés avant de supprimer le nœud
236 | for (let i = 0; i < node.inputs.length; i++) {
237 | const input = node.inputs[i];
238 | if (input.link != null) {
239 | this.graph.removeLink(input.link);
240 | }
241 | }
242 | for (let i = 0; i < node.outputs.length; i++) {
243 | const output = node.outputs[i];
244 | if (output.links && output.links.length) {
245 | const links = output.links.slice(); // Copier pour éviter les problèmes lors de la modification
246 | for (const linkId of links) {
247 | this.graph.removeLink(linkId);
248 | }
249 | }
250 | }
251 | }
252 | // Appeler la méthode originale
253 | return origRemoveNode.call(this, node);
254 | };
255 | },
256 | });
257 |
--------------------------------------------------------------------------------
/web/js/nodetype_continue.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 | import { chainCallback } from "./utils.js";
3 | import { clearInputs } from "./inputs.js";
4 | import { addWidgets } from "./widgets.js";
5 | import { colors, bg_colors, node_type_list } from "./constants.js";
6 |
7 | function initialisation(node) {
8 | node.widgets[0].callback = (value) => {
9 | clearInputs(node);
10 | node.addOutput("output", value);
11 | node.addInput("input", value);
12 | node.color = colors[node_type_list.indexOf(value)];
13 | node.bgcolor = bg_colors[node_type_list.indexOf(value)];
14 | };
15 | node.color = colors[node_type_list.indexOf("none")];
16 | node.bgcolor = bg_colors[node_type_list.indexOf("none")];
17 | }
18 |
19 | function configure(info) {
20 | let widgetDict = info.widgets_values;
21 | if (info.widgets_values.length == undefined) {
22 | for (let w of this.widgets) {
23 | if (w.name in widgetDict) {
24 | w.value = widgetDict[w.name].value;
25 | }
26 | }
27 | // check if widgetDict in this.widgets
28 | for (let [key, value] of Object.entries(widgetDict)) {
29 | let widget = this.widgets.find((w) => w.name === key);
30 | let isInput = this.inputs.find((i) => i.name === key);
31 | let type = this.widgets.find((w) => w.name === "type");
32 | if (!widget) {
33 | addWidgets(this, key, value, app);
34 | widget = this.widgets.find((w) => w.name === key);
35 | }
36 | if (!isInput) {
37 | root_obj.addInput(key, type, { widget: { name: key } });
38 | }
39 |
40 | //this.widgets.push(value);
41 | widget.options = info.widgets_values[key].options;
42 | widget.value = info.widgets_values[key].value;
43 | //if value exists in inputs
44 | /*
45 | for (let input of this.inputs)
46 | if (input.name == key) {
47 | //find if key exists in inputs array in inputs.Name
48 | if (info.widgets_values[key].type != "converted-widget") {
49 | this.removeInput(this.inputs.indexOf(input));
50 | }
51 | break;
52 | }*/
53 | }
54 | }
55 |
56 | if (info.outputs_values != undefined) {
57 | // deep copy outputs
58 | if (this.id == -1) {
59 | this.outputs[0] = {
60 | links: null,
61 | name: info.outputs_values.name,
62 | type: info.outputs_values.type,
63 | };
64 | } else {
65 | this.outputs[0] = { ...info.outputs_values };
66 | }
67 | }
68 | this.setSize(info.size);
69 | }
70 |
71 | function serialize(info) {
72 | info.widgets_values = {};
73 | if (!this.widgets) {
74 | return;
75 | }
76 |
77 | for (let w of this.widgets) {
78 | info.widgets_values[w.name] = {
79 | name: w.name,
80 | options: w.options,
81 | value: w.value,
82 | type: w.type,
83 | origType: w.origType,
84 | last_y: w.last_y,
85 | };
86 | }
87 | for (let w of this.inputs) {
88 | // if w.name exists in info.widgets_values
89 | if (info.widgets_values[w.name]) {
90 | if (info.widgets_values[w.name].type == "converted-widget") {
91 | if (info.widgets_values[w.name].origType == "toggle") {
92 | w.type = "BOOLEAN";
93 | } else if (info.widgets_values[w.name].origType == "combo") {
94 | w.type = "COMBO";
95 | }
96 | }
97 | }
98 | }
99 | for (let w of this.inputs) {
100 | if (w.name == "input") {
101 | w.type = info.widgets_values.type.value;
102 | }
103 | }
104 | if (this.outputs.length > 0) {
105 | if (this.outputs[0].links == null) {
106 | info.outputs_values = {
107 | links: null,
108 | name: this.outputs[0].name,
109 | type: this.outputs[0].type,
110 | };
111 | } else {
112 | info.outputs_values = {
113 | links: [...this.outputs[0].links],
114 | name: this.outputs[0].name,
115 | slot_index: this.outputs[0].slot_index,
116 | type: this.outputs[0].type,
117 | };
118 | }
119 | }
120 | this.setSize(info.size);
121 | }
122 |
123 | export function setupContinueNode(nodeType) {
124 | nodeType.prototype.onNodeCreated = function () {
125 | chainCallback(this, "onConfigure", configure);
126 | chainCallback(this, "onSerialize", serialize);
127 | initialisation(this);
128 | };
129 | }
130 |
--------------------------------------------------------------------------------
/web/js/nodetype_input.js:
--------------------------------------------------------------------------------
1 | import { chainCallback } from "./utils.js";
2 | import { ComfyWidgets } from "../../../scripts/widgets.js";
3 | import { createColor } from "./inputs.js"; // Ensure clearInputs is imported
4 | //import { ComfyUI } from "../../../scripts/comfyui.js";
5 | import { string_widget } from "./constants.js";
6 |
7 | // Graph-independent part of initialization
8 | function initialisation_preGraph(node) {
9 | if (!node.widgets || node.widgets.length < 1) {
10 | console.error(
11 | "Node widgets not properly initialized for callback setup.",
12 | node.type,
13 | node.id
14 | );
15 | return;
16 | }
17 |
18 | node.onConnectionsChange = function (
19 | slotType, //1 = input, 2 = output
20 | slot,
21 | isChangeConnect,
22 | link_info,
23 | output
24 | ) {
25 | if (link_info && node.graph && slotType == 2 && isChangeConnect) {
26 | const fromNode = node.graph._nodes.find(
27 | (otherNode) => otherNode.id == link_info.origin_id
28 | );
29 |
30 | if (
31 | fromNode &&
32 | fromNode.inputs &&
33 | fromNode.inputs[link_info.origin_slot]
34 | ) {
35 | /*
36 | if (node.graph) {
37 | clearInputs(node, true, 1);
38 | }
39 | */
40 |
41 | const type = link_info.type;
42 | if (type !== "*") {
43 | const other_node = node.graph._nodes.find(
44 | (otherNode) => otherNode.id == link_info.target_id
45 | );
46 |
47 | const node_input = other_node.inputs[link_info.target_slot];
48 | let widget_input = null;
49 | let options = {};
50 | if (node_input && node_input.widget) {
51 | if ("name" in node_input.widget) {
52 | widget_input = other_node.widgets.find(
53 | (w) => w.name == node_input.widget.name
54 | );
55 | if (widget_input && "options" in widget_input) {
56 | options = widget_input.options;
57 | }
58 | }
59 | }
60 | node.widgets = node.widgets.splice(0, 1);
61 | if (node.widgets_values) {
62 | if (node.widgets_values.length == 3 && type !== "COMBO") {
63 | node.widgets_values = [
64 | node.widgets_values[0],
65 | node.widgets_values[2],
66 | ];
67 | } else {
68 | if (type !== "COMBO") {
69 | node.widgets_values = node.widgets_values.splice(0, 2);
70 | }
71 | }
72 | }
73 | if (node.inputs[2]) {
74 | node.removeInput(2);
75 | }
76 | if (node.widgets[0].value === "") {
77 | if (node.widgets_values && node.widgets_values.length == 2) {
78 | node.widgets[0].value = node.widgets_values[0];
79 | } else {
80 | node.widgets[0].value = node_input ? node_input.name : "";
81 | }
82 | }
83 | switch (type) {
84 | case "INT":
85 | node.outputs[0].type = "INT";
86 | ComfyWidgets.INT(node, "default", ["INT", options], app);
87 | if (node.widgets_values && node.widgets_values.length == 2) {
88 | node.widgets[1].value = node.widgets_values[1];
89 | } else {
90 | node.widgets[1].value = widget_input ? widget_input.value : "";
91 | }
92 | node.widgets[1].options = options;
93 |
94 | node.inputs[0].type = node_input ? node_input.type : "*";
95 | if (
96 | !("widget" in node.inputs[0]) ||
97 | node.inputs[0].widget == undefined
98 | ) {
99 | node.inputs[0].widget = { name: "default" };
100 | } else {
101 | node.inputs[0].widget.name = "default";
102 | }
103 |
104 | node.local_input_defs.required["default"] = ["INT", options];
105 | break;
106 | case "FLOAT":
107 | node.outputs[0].type = "FLOAT";
108 |
109 | ComfyWidgets.FLOAT(node, "default", ["FLOAT", options], app);
110 | if (node.widgets_values && node.widgets_values.length == 2) {
111 | node.widgets[1].value = node.widgets_values[1];
112 | } else {
113 | node.widgets[1].value = widget_input ? widget_input.value : "";
114 | }
115 | node.widgets[1].options = options;
116 | node.inputs[0].type = node_input ? node_input.type : "*";
117 | if (
118 | !("widget" in node.inputs[0]) ||
119 | node.inputs[0].widget == undefined
120 | ) {
121 | node.inputs[0].widget = { name: "default" };
122 | } else {
123 | node.inputs[0].widget.name = "default";
124 | }
125 |
126 | node.local_input_defs.required["default"] = ["FLOAT", options];
127 | break;
128 | case "BOOLEAN":
129 | node.outputs[0].type = "BOOLEAN";
130 | node.addWidget("toggle", "default", false, () => {});
131 | if (node.widgets_values && node.widgets_values.length == 2) {
132 | node.widgets[1].value = node.widgets_values[1];
133 | } else {
134 | node.widgets[1].value = widget_input ? widget_input.value : "";
135 | }
136 | node.widgets[1].options = options;
137 | node.inputs[0].type = node_input ? node_input.type : "*";
138 | if (
139 | !("widget" in node.inputs[0]) ||
140 | node.inputs[0].widget == undefined
141 | ) {
142 | node.inputs[0].widget = { name: "default" };
143 | } else {
144 | node.inputs[0].widget.name = "default";
145 | }
146 |
147 | node.local_input_defs.required["default"] = ["BOOLEAN", false];
148 | break;
149 | case "COMBO":
150 | node.outputs[0].type = "COMBO";
151 | ComfyWidgets.COMBO(node, "default", ["COMBO", options], app);
152 | if (node.widgets_values && node.widgets_values.length == 3) {
153 | node.widgets[1].value = node.widgets_values[1];
154 | } else {
155 | node.widgets[1].value = widget_input ? widget_input.value : "";
156 | }
157 | if (options.values.length > 0) {
158 | node.widgets[1].options = options;
159 | } else {
160 | if (node.widgets_values && node.widgets_values.length == 3) {
161 | options = node.widgets_values[2];
162 | node.widgets[1].options = options;
163 | }
164 | }
165 | node.inputs[0].type = node_input ? node_input.type : "*";
166 | if (
167 | !("widget" in node.inputs[0]) ||
168 | node.inputs[0].widget == undefined
169 | ) {
170 | node.inputs[0].widget = { name: "default" };
171 | } else {
172 | node.inputs[0].widget.name = "default";
173 | }
174 |
175 | node.local_input_defs.required["default"] = ["COMBO", options];
176 | break;
177 | case "none":
178 | if (
179 | node.outputs &&
180 | node.outputs.find((o) => o.name === "output")
181 | ) {
182 | const outputIndex = node.outputs.findIndex(
183 | (o) => o.name === "output"
184 | );
185 | if (outputIndex !== -1) node.removeOutput(outputIndex);
186 | }
187 | break;
188 |
189 | default:
190 | if (string_widget.includes(type)) {
191 | node.outputs[0].type = "STRING";
192 | ComfyWidgets.STRING(
193 | node,
194 | "default",
195 | ["STRING", { default: "" }],
196 | app
197 | );
198 |
199 | if (node.widgets_values && node.widgets_values.length == 2) {
200 | node.widgets[1].value = node.widgets_values[1];
201 | } else {
202 | node.widgets[1].value = widget_input
203 | ? widget_input.value
204 | : "";
205 | }
206 | node.widgets[1].options = options;
207 | node.inputs[0].type = node_input ? node_input.type : "*";
208 | if (
209 | !("widget" in node.inputs[0]) ||
210 | node.inputs[0].widget == undefined
211 | ) {
212 | node.inputs[0].widget = { name: "default" };
213 | } else {
214 | node.inputs[0].widget.name = "default";
215 | }
216 |
217 | node.local_input_defs.required["default"] = [
218 | "STRING",
219 | { default: "" },
220 | ];
221 | } else {
222 | node.outputs[0].type = type;
223 | //node.widgets = node.widgets.splice(0, 1);
224 | //node.widgets_values = node.widgets_values.splice(0, 1);
225 |
226 | node.inputs[0].type = node_input ? node_input.type : "*";
227 | if (
228 | !("widget" in node.inputs[0]) ||
229 | node.inputs[0].widget == undefined
230 | ) {
231 | node.inputs[0].widget = undefined;
232 | }
233 |
234 | node.local_input_defs.required["default"] = [type, null];
235 | }
236 | break;
237 | }
238 | node.color = createColor(type);
239 | //node.color = colors[node_type_list.indexOf(type)];
240 | node.bgcolor = createColor(type, true);
241 | }
242 | //node.bgcolor = bg_colors[node_type_list.indexOf(type)];
243 | //node.outputs[0].name = type;
244 | } else {
245 | showAlert("node output undefined");
246 | }
247 | } else {
248 | if (!isChangeConnect) {
249 | if (this.outputs[0].links && this.outputs[0].links.length == 0) {
250 | //this.inputs[0].type = "*";
251 | this.outputs[0].type = "*";
252 | this.widgets[0].value = "";
253 | this.widgets = this.widgets.splice(0, 1);
254 | if (this.widgets_values) {
255 | this.widgets_values = this.widgets_values.splice(0, 1);
256 | }
257 | this.inputs[0].type = "*";
258 | this.inputs[0].widget = undefined;
259 | this.inputs[0].pos = undefined;
260 | }
261 | }
262 | }
263 | //Update either way
264 | //node.update();
265 | };
266 | /*
267 | node.widgets[1].callback = (value) => {
268 | if (node.graph) {
269 | clearInputs(node);
270 | }
271 |
272 | switch (value) {
273 | case "INT":
274 | node.addOutput("output", "INT");
275 | ComfyWidgets.INT(
276 | node,
277 | "default",
278 | ["INT", { default: 0, min: 0, max: 18446744073709551616, step: 1 }],
279 | app
280 | );
281 | node.addInput("default", "INT", { widget: { name: "default" } });
282 | node.local_input_defs.required["default"] = [
283 | "INT",
284 | { default: 0, min: 0, max: 18446744073709551616, step: 1 },
285 | ];
286 | break;
287 | case "FLOAT":
288 | node.addOutput("output", "FLOAT");
289 | ComfyWidgets.FLOAT(
290 | node,
291 | "default",
292 | ["FLOAT", { default: 0, min: 0.0, max: 2048.0, step: 0.01 }],
293 | app
294 | );
295 | node.addInput("default", "FLOAT", { widget: { name: "default" } });
296 | node.local_input_defs.required["default"] = [
297 | "FLOAT",
298 | { default: 0, min: 0.0, max: 2048.0, step: 0.01 },
299 | ];
300 | break;
301 | case "BOOLEAN":
302 | node.addOutput("output", "BOOLEAN");
303 | node.addWidget("toggle", "default", false, () => {});
304 | node.addInput("default", "BOOLEAN", { widget: { name: "default" } });
305 | node.local_input_defs.required["default"] = ["BOOLEAN", false];
306 | break;
307 | case "none":
308 | if (node.outputs && node.outputs.find((o) => o.name === "output")) {
309 | const outputIndex = node.outputs.findIndex(
310 | (o) => o.name === "output"
311 | );
312 | if (outputIndex !== -1) node.removeOutput(outputIndex);
313 | }
314 | break;
315 | default:
316 | if (string_widget.includes(value)) {
317 | node.addOutput("output", "STRING");
318 | ComfyWidgets.STRING(
319 | node,
320 | "default",
321 | ["STRING", { default: "" }],
322 | app
323 | );
324 | node.addInput("default", "STRING", { widget: { name: "default" } });
325 | node.local_input_defs.required["default"] = [
326 | "STRING",
327 | { default: "" },
328 | ];
329 | } else {
330 | node.addOutput("output", value);
331 | node.local_input_defs.required["default"] = [value, null];
332 | }
333 | break;
334 | }
335 | node.color = colors[node_type_list.indexOf(value)];
336 | node.bgcolor = bg_colors[node_type_list.indexOf(value)];
337 | };
338 | */
339 | node.color = createColor("none");
340 | node.bgcolor = createColor("none", true);
341 | }
342 |
343 | // Graph-dependent part of initialization
344 | function initialisation_onAdded(node) {
345 | if (!node.widgets || node.widgets.length < 1) {
346 | return;
347 | }
348 | //if (node.widgets[1].value === "none") {
349 | // clearInputs(node); // This needs node.graph, which is available in onAdded
350 | //}
351 | }
352 |
353 | function configure(info) {
354 | if (
355 | info.widgets_values.length == 3 &&
356 | (!this.widgets[1] || (this.widgets[1] && this.widgets[1].type !== "combo"))
357 | ) {
358 | info.widgets_values = [info.widgets_values[0], info.widgets_values[2]];
359 | }
360 | //info.widgets_values = [info.widgets_values[0], info.widgets_values[1]];
361 | if (info.widgets_values.length == 2 && this.widgets.length == 2) {
362 | this.widgets[1].value = info.widgets_values[1];
363 | }
364 |
365 | //const inputs = {};
366 | //inputs["default"] = {
367 | // inputs: ["default", info.widgets_values[1], info.widgets_values[2]],
368 | //};
369 |
370 | //addInputs(this, inputs);
371 | }
372 |
373 | function serialize(info) {
374 | // Add check for this.local_input_defs
375 | if (
376 | !this.inputs ||
377 | !this.local_input_defs ||
378 | !this.local_input_defs.required
379 | ) {
380 | return; // Exit early if the required structures don't exist
381 | }
382 |
383 | for (let inp of this.inputs) {
384 | if (inp.widget) {
385 | // Check that the required path exists before accessing it
386 | if (
387 | this.local_input_defs.required[inp.name] &&
388 | this.local_input_defs.required[inp.name][0] !== undefined &&
389 | inp.type !== this.local_input_defs.required[inp.name][0]
390 | ) {
391 | inp.type = this.local_input_defs.required[inp.name][0];
392 | const wid = this.widgets.find((w) => w.name === inp.name);
393 | if (
394 | wid &&
395 | wid.origType !== this.local_input_defs.required[inp.name][0]
396 | ) {
397 | wid.origType = this.local_input_defs.required[inp.name][0];
398 | }
399 | }
400 | }
401 | }
402 |
403 | if (this.inputs[0].type == "COMBO") {
404 | if (this.widgets[1].options.values.length > 0) {
405 | info.widgets_values[1] = this.widgets[1].value;
406 | info.widgets_values[2] = this.widgets[1].options;
407 | }
408 | }
409 | }
410 |
411 | export function setupInputNode(nodeType) {
412 | const originalOnAdded = nodeType.prototype.onAdded;
413 | nodeType.prototype.onAdded = function (graph) {
414 | if (originalOnAdded) {
415 | originalOnAdded.apply(this, arguments);
416 | }
417 | initialisation_onAdded(this);
418 | };
419 |
420 | nodeType.prototype.onNodeCreated = function () {
421 | this.local_input_defs = this.local_input_defs || { required: {} };
422 |
423 | initialisation_preGraph(this);
424 |
425 | chainCallback(this, "onConfigure", configure);
426 | chainCallback(this, "onSerialize", serialize);
427 | };
428 | }
429 |
--------------------------------------------------------------------------------
/web/js/nodetype_lipSync.js:
--------------------------------------------------------------------------------
1 | import { useKVState } from "./utils.js";
2 | import { chainCallback } from "./utils.js";
3 | import { addLoadVideoCommon } from "./videoPreview.js";
4 |
5 | export function setupLipSyncNode(nodeType, nodeData, app) {
6 | useKVState(nodeType);
7 | chainCallback(nodeType.prototype, "onNodeCreated", function () {
8 | let new_widgets = []
9 | if (this.widgets) {
10 | for (let w of this.widgets) {
11 | let input = this.constructor.nodeData.input
12 | let config = input?.required[w.name] ?? input.optional[w.name]
13 | if (!config) {
14 | continue
15 | }
16 | if (w?.type == "text" && config[1].vhs_path_extensions) {
17 | new_widgets.push(app.widgets.VHSPATH({}, w.name, ["VHSPATH", config[1]]));
18 | } else {
19 | new_widgets.push(w)
20 | }
21 | }
22 | this.widgets = new_widgets;
23 | }
24 | });
25 | addLoadVideoCommon(nodeType, nodeData);
26 | const onGetImageSizeExecuted = nodeType.prototype.onExecuted;
27 | nodeType.prototype.onExecuted = function(message) {
28 | const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined
29 | let video = message["video_path"][0];
30 | if(video){
31 | this.updateParameters({format: "video/mp4", filename: message["video_path"][0], subfolder: message["video_path"][1], "type": "output"});
32 | }
33 | return r
34 | }
35 | }
--------------------------------------------------------------------------------
/web/js/nodetype_output.js:
--------------------------------------------------------------------------------
1 | import { chainCallback } from "./utils.js";
2 | import { createColor } from "./inputs.js";
3 |
4 | function initialisation(node) {
5 | node.onConnectionsChange = function (
6 | slotType, //1 = input, 2 = output
7 | slot,
8 | isChangeConnect,
9 | link_info,
10 | output
11 | ) {
12 | if (link_info && node.graph && slotType == 1 && isChangeConnect) {
13 | const fromNode = node.graph._nodes.find(
14 | (otherNode) => otherNode.id == link_info.target_id
15 | );
16 |
17 | if (fromNode) {
18 | const other_node = node.graph._nodes.find(
19 | (otherNode) => otherNode.id == link_info.origin_id
20 | );
21 |
22 | const node_output = other_node.outputs[link_info.origin_slot];
23 | if (node_output) {
24 | const type = node_output.type;
25 | node.widgets = node.widgets.splice(0, 1);
26 | if (node.widgets_values && node.widgets_values.length === undefined) {
27 | node.widgets_values = [node.widgets_values.Name.value];
28 | } else {
29 | node.widgets_values = [node_output.name];
30 | }
31 | if (node.inputs[2]) {
32 | node.removeInput(1);
33 | }
34 | if (node.widgets[0].value === "") {
35 | if (node.widgets_values && node.widgets_values.length == 1) {
36 | node.widgets[0].value = node.widgets_values[0];
37 | } else {
38 | node.widgets[0].value = node_output.name;
39 | }
40 | }
41 |
42 | node.outputs[0].type = type;
43 | //node.widgets = node.widgets.splice(0, 1);
44 | //node.widgets_values = node.widgets_values.splice(0, 1);
45 |
46 | //node.inputs[0].type = node_input.type;
47 | if (
48 | !("widget" in node.inputs[0]) ||
49 | node.inputs[0].widget == undefined
50 | ) {
51 | node.inputs[0].widget = undefined;
52 | }
53 |
54 | //node.local_input_defs.required["default"] = [type, null];
55 | node.color = createColor(type);
56 | node.bgcolor = createColor(type, true);
57 | }
58 | }
59 | } else {
60 | if (!isChangeConnect) {
61 | node.inputs[1].type = "*";
62 | node.outputs[0].type = "*";
63 | node.widgets[0].value = "";
64 |
65 | node.widgets = this.widgets.splice(0, 1);
66 | if (node.widgets_values) {
67 | node.widgets_values = [""];
68 | }
69 | }
70 | }
71 | //Update either way
72 | //node.update();
73 | };
74 |
75 | /*
76 | node.widgets[1].callback = ( value ) => {
77 | // D'abord, déconnecter tous les liens existants
78 | for (let i = 0; i < node.outputs.length; i++) {
79 | const output = node.outputs[i];
80 | if (output.links && output.links.length) {
81 | const links = output.links.slice();
82 | for (const linkId of links) {
83 | node.graph.removeLink(linkId);
84 | }
85 | }
86 | }
87 |
88 | for (let i = 0; i < node.inputs.length; i++) {
89 | const input = node.inputs[i];
90 | if (input.link) {
91 | node.graph.removeLink(input.link);
92 | }
93 | }
94 | clearInputs(node);
95 | switch(value){
96 | case "none":
97 | break;
98 | default:
99 | node.addOutput("output", value);
100 | node.addInput("default", value);
101 | break;
102 | }
103 | node.color = colors[node_type_list.indexOf(value)];
104 | node.bgcolor = bg_colors[node_type_list.indexOf(value)];
105 | };
106 | */
107 | //if (node.widgets[1].value == "none") clearInputs(node);
108 | node.color = createColor("none");
109 | node.bgcolor = createColor("none", true);
110 | }
111 |
112 | function configure(info) {
113 | if (info.widgets_values.length == undefined) {
114 | let widgetDict = info.widgets_values;
115 | info.widgets_values = [info.widgets_values.Name.value];
116 | }
117 | if (this.inputs.length > 2) {
118 | this.removeInput(1);
119 | }
120 | this.widgets = this.widgets.splice(0, 1);
121 | /*
122 | if (this.inputs[1].link) {
123 | this.onConnectionsChange(
124 | 1,
125 | 1,
126 | true,
127 | this.graph.links[this.inputs[1].link],
128 | this.graph.links[this.inputs[1].link].origin_id
129 | );
130 | }*/
131 | }
132 |
133 | function serialize(info) {
134 | /*
135 | info.widgets_values = {};
136 | if (!this.widgets) {
137 | return;
138 | }
139 |
140 | for (let w of this.widgets) {
141 | info.widgets_values[w.name] = {
142 | name: w.name,
143 | options: w.options,
144 | value: w.value,
145 | type: w.type,
146 | origType: w.origType,
147 | last_y: w.last_y,
148 | };
149 | }
150 |
151 | for (let w of this.inputs) {
152 | // if w.name exists in info.widgets_values
153 | if (info.widgets_values[w.name]) {
154 | if (info.widgets_values[w.name].type == "converted-widget") {
155 | if (info.widgets_values[w.name].origType == "toggle") {
156 | w.type = "BOOLEAN";
157 | } else if (info.widgets_values[w.name].origType == "text") {
158 | w.type = "STRING";
159 | }
160 | }
161 | }
162 | }
163 | if (this.outputs.length > 0) {
164 | if (this.outputs[0].links == null) {
165 | info.outputs_values = {
166 | links: null,
167 | name: this.outputs[0].name,
168 | type: this.outputs[0].type,
169 | };
170 | } else {
171 | info.outputs_values = {
172 | links: [...this.outputs[0].links],
173 | name: this.outputs[0].name,
174 | slot_index: this.outputs[0].slot_index,
175 | type: this.outputs[0].type,
176 | };
177 | }
178 | }
179 | this.setSize(info.size);
180 | */
181 | }
182 |
183 | export function setupOutputNode(nodeType) {
184 | nodeType.prototype.onNodeCreated = function () {
185 | chainCallback(this, "onConfigure", configure);
186 | chainCallback(this, "onSerialize", serialize);
187 | initialisation(this);
188 | };
189 | }
190 |
--------------------------------------------------------------------------------
/web/js/nodetype_workflow.js:
--------------------------------------------------------------------------------
1 | import { chainCallback, fitHeight } from "./utils.js";
2 | import { addInputs, cleanInputs, removeInputs, addOutputs } from "./inputs.js"; // Consolidated imports
3 | import { importWorkflow } from "./workflows.js";
4 | import { hideWidget } from "./widgets.js";
5 |
6 | // Graph-independent part of initialization (called onNodeCreated)
7 | function initialisation_preGraph(node) {
8 | node.color = "#004670";
9 | node.bgcolor = "#002942";
10 |
11 | node.onConnectionsChange = function (
12 | slotType, //1 = input, 2 = output
13 | slot,
14 | isChangeConnect,
15 | link_info,
16 | output
17 | ) {
18 | if (link_info && node.graph && slotType == 1 && isChangeConnect) {
19 | const fromNode = node.graph._nodes.find(
20 | (otherNode) => otherNode.id == link_info.target_id
21 | );
22 | if (fromNode) {
23 | const other_node = node.graph._nodes.find(
24 | (otherNode) => otherNode.id == link_info.origin_id
25 | );
26 | if (other_node.type == "WorkflowInput") {
27 | const node_output = other_node.outputs[link_info.origin_slot];
28 | if (node_output) {
29 | const type = node_output.type;
30 | other_node.onConnectionsChange(2, 1, true, link_info, node);
31 | }
32 | }
33 | }
34 | } else if (link_info && node.graph && slotType == 2 && isChangeConnect) {
35 | const fromNode = node.graph._nodes.find(
36 | (otherNode) => otherNode.id == link_info.origin_id
37 | );
38 | if (fromNode) {
39 | const other_node = node.graph._nodes.find(
40 | (otherNode) => otherNode.id == link_info.target_id
41 | );
42 | if (other_node.type == "WorkflowOutput") {
43 | const node_input = other_node.inputs[link_info.target_slot];
44 | if (node_input) {
45 | const type = node_input.type;
46 | other_node.onConnectionsChange(1, 2, true, link_info, node);
47 | }
48 | }
49 | }
50 | }
51 | //Update either way
52 | //node.update();
53 | };
54 |
55 | // Basic widget setup that doesn't depend on the graph
56 | if (node.widgets && node.widgets[0] && node.widgets[0].options) {
57 | if (app && app.lipsync_studio) {
58 | node.widgets[0].options.values = [
59 | "None",
60 | ...Object.keys(app.lipsync_studio),
61 | ];
62 | } else {
63 | console.warn(
64 | "App or lipsync_studio not available during preGraph setup for workflow node."
65 | );
66 | node.widgets[0].options.values = ["None"];
67 | }
68 | }
69 |
70 | const isReloading = node.title && node.title.startsWith("Workflow: ");
71 | if (!isReloading && node.widgets && node.widgets[0]) {
72 | node.widgets[0].value = "None"; // Default to "None"
73 | if (node.widgets[1]) {
74 | node.widgets[1].value = ""; // Clear workflow JSON
75 | }
76 | }
77 |
78 | if (node.widgets && node.widgets[1]) {
79 | hideWidget(node, node.widgets[1], { holdSpace: false });
80 | }
81 |
82 | // Setup the primary widget callback.
83 | // Operations inside this callback that need the graph (like cleanInputs, addInputs, addOutputs)
84 | // will be guarded by checking node.graph.
85 | if (node.widgets && node.widgets[0]) {
86 | node.widgets[0].callback = async (value) => {
87 | if (!node.graph) {
88 | console.warn(
89 | "Workflow widget callback triggered, but node.graph is not yet available. Action might be deferred or skipped."
90 | );
91 | // If critical, you might need a flag to re-process this in onAdded
92 | return;
93 | }
94 |
95 | cleanInputs(node); // Requires node.graph
96 |
97 | if (value === "None") {
98 | node.title = "Workflow (FlowChain ⛓️)";
99 | // Further cleanup of dynamic inputs/outputs might be needed here
100 | // For example, explicitly removing all but the essential widgets/inputs/outputs.
101 | } else if (app && app.lipsync_studio && app.lipsync_studio[value]) {
102 | try {
103 | let workflowJSON = await importWorkflow(node, value, app); // importWorkflow updates node.title
104 | workflowJSON = JSON.parse(workflowJSON);
105 | // Ensure app.lipsync_studio[value] (and its .inputs) is still valid after await
106 | if (app.lipsync_studio[value] && app.lipsync_studio[value].inputs) {
107 | const inputs = app.lipsync_studio[value].inputs;
108 | const outputs = app.lipsync_studio[value].outputs;
109 | for (let [key, value] of Object.entries(inputs)) {
110 | if (
111 | value["inputs"][2]?.values &&
112 | value["inputs"][2].values.length > 0
113 | ) {
114 | workflowJSON[key]["inputs"]["type"] = "COMBO";
115 | } else {
116 | workflowJSON[key]["inputs"]["type"] = value["inputs"][1];
117 | }
118 | }
119 | for (let [key, value] of Object.entries(outputs)) {
120 | if (value["inputs"].length === undefined) {
121 | workflowJSON[key]["inputs"]["type"] =
122 | value["inputs"].type.value;
123 | } else {
124 | workflowJSON[key]["inputs"]["type"] = value["inputs"][1];
125 | }
126 | }
127 | workflowJSON = JSON.stringify(workflowJSON);
128 |
129 | if (node.widgets && node.widgets[1]) {
130 | node.widgets[1].value = workflowJSON;
131 | }
132 |
133 | addInputs(node, inputs, []); // Requires node.graph
134 | addOutputs(node, value); // Requires node.graph
135 | fitHeight(node);
136 | } else {
137 | console.error(
138 | "Workflow data or inputs became unavailable after import for:",
139 | value
140 | );
141 | node.title = "Workflow (FlowChain ⛓️)"; // Reset title on error
142 | }
143 | } catch (error) {
144 | console.error(
145 | "Error processing workflow selection in callback:",
146 | error
147 | );
148 | node.title = "Workflow (FlowChain ⛓️)"; // Reset title on error
149 | }
150 | } else {
151 | node.title = "Workflow (FlowChain ⛓️)";
152 | }
153 | };
154 | }
155 | }
156 |
157 | // Graph-dependent part of initialization (called onAdded)
158 | function initialisation_onAdded(node) {
159 | if (!node.graph) {
160 | console.error(
161 | "CRITICAL: initialisation_onAdded called for workflow node, but node.graph is not set."
162 | );
163 | return;
164 | }
165 |
166 | // Perform an initial cleanInputs now that we are sure the graph exists.
167 | // This is important if the node was configured (e.g. from graph load) before being added.
168 | cleanInputs(node);
169 |
170 | // If a workflow was already selected (e.g. loading a saved graph),
171 | // ensure its inputs/outputs are correctly set up.
172 | // The widget callback might have already run if `configure` set its value.
173 | // We re-evaluate or trigger necessary setup steps.
174 | if (node.widgets && node.widgets[0]) {
175 | const selectedWorkflow = node.widgets[0].value;
176 | if (selectedWorkflow && selectedWorkflow !== "None") {
177 | if (
178 | app &&
179 | app.lipsync_studio &&
180 | app.lipsync_studio[selectedWorkflow] &&
181 | app.lipsync_studio[selectedWorkflow].inputs
182 | ) {
183 | // If workflow JSON is loaded and inputs are known, ensure UI is consistent
184 | if (
185 | node.widgets[1] &&
186 | (node.widgets[1].value === "" ||
187 | typeof node.widgets[1].value !== "string" ||
188 | !node.widgets[1].value.startsWith("{"))
189 | ) {
190 | // Workflow selected, but JSON not loaded in widget[1] or inputs/outputs not added by callback yet.
191 | // Trigger the callback logic.
192 | console.log(
193 | `Workflow node ${node.id}: Re-evaluating selected workflow '${selectedWorkflow}' onAdded.`
194 | );
195 | node.widgets[0].callback(selectedWorkflow);
196 | } else {
197 | // Workflow JSON likely loaded, ensure inputs/outputs are present
198 | // This can be a fallback if the callback didn't fully setup due to timing
199 | const inputs = app.lipsync_studio[selectedWorkflow].inputs;
200 | const currentWidgetValues = node.widgets_values || []; // from node.configure
201 | addInputs(node, inputs, currentWidgetValues);
202 | addOutputs(node, selectedWorkflow);
203 | removeInputs(node, inputs, currentWidgetValues); // Ensure this logic is sound for onAdded
204 | fitHeight(node);
205 | }
206 | } else if (selectedWorkflow !== "None") {
207 | // Workflow selected, but its definition isn't in app.lipsync_studio. Try to load it.
208 | console.warn(
209 | `Workflow node ${node.id}: '${selectedWorkflow}' selected but not in lipsync_studio. Attempting import via callback.`
210 | );
211 | node.widgets[0].callback(selectedWorkflow); // This will attempt importWorkflow
212 | }
213 | } else {
214 | node.title = "Workflow (FlowChain ⛓️)";
215 | }
216 | }
217 | }
218 |
219 | function configure(info) {
220 | // `this` is the node. Called when loading graph data.
221 | // `info.widgets_values` contains the saved values for widgets.
222 | if (!app || !app.lipsync_studio) {
223 | console.error(
224 | "App or lipsync_studio not available during workflow node configure."
225 | );
226 | return;
227 | }
228 |
229 | // Update widget options first, in case lipsync_studio has changed since last save
230 | if (this.widgets && this.widgets[0] && this.widgets[0].options) {
231 | this.widgets[0].options.values = [
232 | "None",
233 | ...Object.keys(app.lipsync_studio),
234 | ];
235 | }
236 |
237 | let selectedWorkflowName = info.widgets_values
238 | ? info.widgets_values[0]
239 | : "None";
240 |
241 | if (this.widgets && this.widgets[0]) {
242 | this.widgets[0].value = selectedWorkflowName; // Set the widget value from saved data
243 | }
244 | if (
245 | this.widgets &&
246 | this.widgets[1] &&
247 | info.widgets_values &&
248 | info.widgets_values[1]
249 | ) {
250 | this.widgets[1].value = info.widgets_values[1]; // Set the hidden workflow JSON
251 | }
252 |
253 | if (selectedWorkflowName === "None") {
254 | this.title = "Workflow (FlowChain ⛓️)";
255 | if (this.graph) {
256 | // Only clean if graph is available
257 | cleanInputs(this);
258 | }
259 | return;
260 | }
261 |
262 | // If the node is already on a graph, we can proceed with fuller setup.
263 | // If not, onAdded will handle the rest.
264 | if (this.graph) {
265 | let inputs = [];
266 | if (selectedWorkflowName in app.lipsync_studio) {
267 | inputs = app.lipsync_studio[selectedWorkflowName].inputs;
268 | } else {
269 | if (selectedWorkflowName.replaceAll("\\", "/") in app.lipsync_studio) {
270 | selectedWorkflowName = selectedWorkflowName.replaceAll("\\", "/");
271 | info.widgets_values[0] = selectedWorkflowName;
272 | this.widgets[0].value = selectedWorkflowName;
273 | inputs = app.lipsync_studio[selectedWorkflowName].inputs;
274 | } else {
275 | selectedWorkflowName = selectedWorkflowName.replaceAll("/", "\\");
276 | info.widgets_values[0] = selectedWorkflowName;
277 | this.widgets[0].value = selectedWorkflowName;
278 | inputs = app.lipsync_studio[selectedWorkflowName].inputs;
279 | }
280 | }
281 |
282 | if (inputs) {
283 | this.title =
284 | app.lipsync_studio[selectedWorkflowName].title ||
285 | `Workflow: ${selectedWorkflowName}`;
286 | //const inputs = app.lipsync_studio[selectedWorkflowName].inputs;
287 | addInputs(this, inputs, info.widgets_values || []);
288 | addOutputs(this, selectedWorkflowName);
289 | removeInputs(this, inputs, info.widgets_values || []);
290 | fitHeight(this);
291 | importWorkflow(this, selectedWorkflowName, app)
292 | .then((data) => {
293 | if (data) {
294 | const data_json = JSON.parse(data);
295 | const inputs = app.lipsync_studio[selectedWorkflowName].inputs;
296 | const outputs = app.lipsync_studio[selectedWorkflowName].outputs;
297 |
298 | for (let [key, value] of Object.entries(inputs)) {
299 | data_json[key]["inputs"]["type"] = value["inputs"][1];
300 | }
301 | for (let [key, value] of Object.entries(outputs)) {
302 | if (value["inputs"].length === undefined) {
303 | data_json[key]["inputs"]["type"] = value["inputs"].type.value;
304 | } else {
305 | if (
306 | value["inputs"][2]?.values &&
307 | value["inputs"][2].values.length > 0
308 | ) {
309 | data_json[key]["inputs"]["type"] = "COMBO";
310 | } else {
311 | data_json[key]["inputs"]["type"] = value["inputs"][1];
312 | }
313 | }
314 | }
315 | this.widgets[1].value = JSON.stringify(data_json);
316 |
317 | addInputs(this, inputs, info.widgets_values);
318 | addOutputs(this, selectedWorkflowName);
319 | removeInputs(this, inputs, info.widgets_values);
320 | fitHeight(this);
321 | }
322 | })
323 | .catch((error) => {
324 | console.error("Erreur lors de l'importation:", error);
325 | });
326 | } else {
327 | // Data not yet in lipsync_studio, try to import.
328 | // The callback of widget[0] will handle this if triggered by value change,
329 | // or onAdded will pick it up.
330 | // For configure, we might just set the title and let onAdded handle full setup.
331 | this.title = `Workflow: ${selectedWorkflowName} (loading...)`;
332 | // Avoid calling importWorkflow directly here if onAdded will robustly handle it,
333 | // to prevent multiple calls.
334 | }
335 | } else {
336 | // Graph not yet available, title will be set, onAdded will do the heavy lifting.
337 | this.title = `Workflow: ${selectedWorkflowName}`;
338 | }
339 | }
340 |
341 | function serialize(info) {
342 | // Standard serialization of widget values is usually handled by LiteGraph.
343 | // This custom serialize can ensure specific data is captured if needed.
344 | // info.widgets_values will be populated by LiteGraph based on current widget values.
345 |
346 | // If local_input_defs logic is still needed for type adjustments:
347 | if (this.inputs && this.local_input_defs && this.local_input_defs.required) {
348 | for (let inp of this.inputs) {
349 | if (
350 | inp.widget &&
351 | this.local_input_defs.required[inp.name] &&
352 | this.local_input_defs.required[inp.name][0] !== undefined &&
353 | inp.type !== this.local_input_defs.required[inp.name][0]
354 | ) {
355 | inp.type = this.local_input_defs.required[inp.name][0];
356 | }
357 | }
358 | }
359 | }
360 |
361 | export function setupWorkflowNode(nodeType) {
362 | const originalOnAdded = nodeType.prototype.onAdded;
363 | nodeType.prototype.onAdded = function (graph) {
364 | if (originalOnAdded) {
365 | originalOnAdded.apply(this, arguments);
366 | }
367 | initialisation_onAdded(this); // Our graph-dependent setup
368 | };
369 |
370 | const originalOnRemoved = nodeType.prototype.onRemoved;
371 | nodeType.prototype.onRemoved = function () {
372 | // Perform any cleanup specific to this node when removed from graph
373 | // For example, disconnecting callbacks or releasing resources
374 | if (originalOnRemoved) {
375 | originalOnRemoved.apply(this, arguments);
376 | }
377 | };
378 |
379 | nodeType.prototype.onNodeCreated = function () {
380 | // This is one of the first lifecycle hooks.
381 | // Initialize properties that don't depend on the graph.
382 | this.local_input_defs = this.local_input_defs || { required: {} };
383 |
384 | // Add essential widgets if not already present (LiteGraph usually handles this from type definition)
385 | // For a "COMBO" and a "STRING" (hidden)
386 | if (!this.widgets || this.widgets.length < 2) {
387 | this.addWidget("combo", "Workflow", "None", () => {}, {
388 | values: ["None"],
389 | });
390 | this.addWidget("string", "workflow_json", "", () => {}, {
391 | multiline: true,
392 | });
393 | }
394 |
395 | initialisation_preGraph(this); // Our graph-independent setup
396 |
397 | chainCallback(this, "onConfigure", configure);
398 | chainCallback(this, "onSerialize", serialize);
399 | };
400 | }
401 |
--------------------------------------------------------------------------------
/web/js/utils.js:
--------------------------------------------------------------------------------
1 |
2 | export function chainCallback(object, property, callback) {
3 | if (object == undefined) {
4 | console.error("Tried to add callback to non-existant object")
5 | return;
6 | }
7 | if (property in object) {
8 | const callback_orig = object[property]
9 | object[property] = function () {
10 | const r = callback_orig.apply(this, arguments);
11 | callback.apply(this, arguments);
12 | return r
13 | };
14 | } else {
15 | object[property] = callback;
16 | }
17 | }
18 |
19 | export function useKVState(nodeType) {
20 | chainCallback(nodeType.prototype, "onNodeCreated", function () {
21 | chainCallback(this, "onConfigure", function(info) {
22 | if (!this.widgets) {
23 | //Node has no widgets, there is nothing to restore
24 | return
25 | }
26 | if (typeof(info.widgets_values) != "object") {
27 | //widgets_values is in some unknown inactionable format
28 | return
29 | }
30 | let widgetDict = info.widgets_values
31 |
32 | if (widgetDict.length == undefined) {
33 | for (let w of this.widgets) {
34 | if (w.name in widgetDict) {
35 | w.value = widgetDict[w.name];
36 | if (w.name == "videopreview") {
37 | w.updateSource();
38 | }
39 | }
40 |
41 | }
42 | }
43 | });
44 | chainCallback(this, "onSerialize", function(info) {
45 | info.widgets_values = {};
46 | if (!this.widgets) {
47 | //object has no widgets, there is nothing to store
48 | return;
49 | }
50 | for (let w of this.widgets) {
51 | info.widgets_values[w.name] = w.value;
52 | }
53 | });
54 | })
55 | }
56 |
57 | export function fitHeight(node) {
58 | node.setSize([node.size[0], node.computeSize([node.size[0], node.size[1]])[1]])
59 | node?.graph?.setDirtyCanvas(true);
60 | }
--------------------------------------------------------------------------------
/web/js/videoPreview.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 | import { api } from '../../../scripts/api.js'
3 | import {chainCallback} from "./utils.js";
4 |
5 | export function addVideoPreview(nodeType) {
6 | chainCallback(nodeType.prototype, "onNodeCreated", function() {
7 | var element = document.createElement("div");
8 | const previewNode = this;
9 | var previewWidget = this.addDOMWidget("videopreview", "preview", element, {
10 | serialize: false,
11 | hideOnZoom: false,
12 | getValue() {
13 | return element.value;
14 | },
15 | setValue(v) {
16 | element.value = v;
17 | },
18 | });
19 | previewWidget.computeSize = function(width) {
20 | if (this.aspectRatio && !this.parentEl.hidden) {
21 | let height = (previewNode.size[0]-20)/ this.aspectRatio + 10;
22 | if (!(height > 0)) {
23 | height = 0;
24 | }
25 | this.computedHeight = height + 10;
26 | return [width, height];
27 | }
28 | return [width, -4];//no loaded src, widget should not display
29 | }
30 | element.addEventListener('contextmenu', (e) => {
31 | e.preventDefault()
32 | return app.canvas._mousedown_callback(e)
33 | }, true);
34 | element.addEventListener('pointerdown', (e) => {
35 | e.preventDefault()
36 | return app.canvas._mousedown_callback(e)
37 | }, true);
38 | element.addEventListener('mousewheel', (e) => {
39 | e.preventDefault()
40 | return app.canvas._mousewheel_callback(e)
41 | }, true);
42 | previewWidget.value = {hidden: false, paused: false, params: {}}
43 | previewWidget.parentEl = document.createElement("div");
44 | previewWidget.parentEl.className = "vhs_preview";
45 | previewWidget.parentEl.style['width'] = "100%"
46 | element.appendChild(previewWidget.parentEl);
47 | previewWidget.videoEl = document.createElement("video");
48 | previewWidget.videoEl.controls = false;
49 | previewWidget.videoEl.loop = true;
50 | previewWidget.videoEl.muted = true;
51 | previewWidget.videoEl.style['width'] = "100%"
52 | previewWidget.videoEl.addEventListener("loadedmetadata", () => {
53 |
54 | previewWidget.aspectRatio = previewWidget.videoEl.videoWidth / previewWidget.videoEl.videoHeight;
55 | fitHeight(this);
56 | });
57 | previewWidget.videoEl.addEventListener("error", () => {
58 | //TODO: consider a way to properly notify the user why a preview isn't shown.
59 | previewWidget.parentEl.hidden = true;
60 | fitHeight(this);
61 | });
62 | previewWidget.videoEl.onmouseenter = () => {
63 | previewWidget.videoEl.muted = false;
64 | };
65 | previewWidget.videoEl.onmouseleave = () => {
66 | previewWidget.videoEl.muted = true;
67 | };
68 |
69 | previewWidget.imgEl = document.createElement("img");
70 | previewWidget.imgEl.style['width'] = "100%"
71 | previewWidget.imgEl.hidden = true;
72 | previewWidget.imgEl.onload = () => {
73 | previewWidget.aspectRatio = previewWidget.imgEl.naturalWidth / previewWidget.imgEl.naturalHeight;
74 | fitHeight(this);
75 | };
76 |
77 | var timeout = null;
78 | this.updateParameters = (params, force_update) => {
79 | if (!previewWidget.value.params) {
80 | if(typeof(previewWidget.value != 'object')) {
81 | previewWidget.value = {hidden: false, paused: false}
82 | }
83 | previewWidget.value.params = {}
84 | }
85 | Object.assign(previewWidget.value.params, params)
86 | timeout = setTimeout(() => previewWidget.updateSource(),100);
87 | };
88 | previewWidget.updateSource = function () {
89 | if (this.value.params == undefined) {
90 | return;
91 | }
92 | let params = {}
93 | Object.assign(params, this.value.params);//shallow copy
94 | this.parentEl.hidden = this.value.hidden;
95 | if (params.format?.split('/')[0] == 'video' ||
96 | app.ui.settings.getSettingValue("VHS.AdvancedPreviews", false) &&
97 | (params.format?.split('/')[1] == 'gif') || params.format == 'folder') {
98 | this.videoEl.autoplay = !this.value.paused && !this.value.hidden;
99 | let target_width = 256
100 | if (element.style?.width) {
101 | //overscale to allow scrolling. Endpoint won't return higher than native
102 | target_width = element.style.width.slice(0,-2)*2;
103 | }
104 | if (!params.force_size || params.force_size.includes("?") || params.force_size == "Disabled") {
105 | params.force_size = target_width+"x?"
106 | } else {
107 | let size = params.force_size.split("x")
108 | let ar = parseInt(size[0])/parseInt(size[1])
109 | params.force_size = target_width+"x"+(target_width/ar)
110 | }
111 | if (app.ui.settings.getSettingValue("VHS.AdvancedPreviews", false)) {
112 | this.videoEl.src = api.apiURL('/viewvideo?' + new URLSearchParams(params));
113 | } else {
114 | previewWidget.videoEl.src = api.apiURL('/view?' + new URLSearchParams(params));
115 | }
116 | this.videoEl.hidden = false;
117 | this.imgEl.hidden = true;
118 | } else if (params.format?.split('/')[0] == 'image'){
119 | //Is animated image
120 | this.imgEl.src = api.apiURL('/view?' + new URLSearchParams(params));
121 | this.videoEl.hidden = true;
122 | this.imgEl.hidden = false;
123 | }
124 | }
125 | previewWidget.parentEl.appendChild(previewWidget.videoEl)
126 | previewWidget.parentEl.appendChild(previewWidget.imgEl)
127 | });
128 | }
129 |
130 | function addPreviewOptions(nodeType) {
131 | chainCallback(nodeType.prototype, "getExtraMenuOptions", function(_, options) {
132 | let optNew = []
133 | const previewWidget = this.widgets.find((w) => w.name === "videopreview");
134 |
135 | let url = null
136 | if (previewWidget.videoEl?.hidden == false && previewWidget.videoEl.src) {
137 | url = api.apiURL('/view?' + new URLSearchParams(previewWidget.value.params));
138 | url = url.replace('%2503d', '001')
139 | } else if (previewWidget.imgEl?.hidden == false && previewWidget.imgEl.src) {
140 | url = previewWidget.imgEl.src;
141 | url = new URL(url);
142 | }
143 | if (url) {
144 | optNew.push(
145 | {
146 | content: "Open preview",
147 | callback: () => {
148 | window.open(url, "_blank")
149 | },
150 | },
151 | {
152 | content: "Save preview",
153 | callback: () => {
154 | const a = document.createElement("a");
155 | a.href = url;
156 | a.setAttribute("download", new URLSearchParams(previewWidget.value.params).get("filename"));
157 | document.body.append(a);
158 | a.click();
159 | requestAnimationFrame(() => a.remove());
160 | },
161 | }
162 | );
163 | }
164 | const PauseDesc = (previewWidget.value.paused ? "Resume" : "Pause") + " preview";
165 | if(previewWidget.videoEl.hidden == false) {
166 | optNew.push({content: PauseDesc, callback: () => {
167 | if(previewWidget.value.paused) {
168 | previewWidget.videoEl?.play();
169 | } else {
170 | previewWidget.videoEl?.pause();
171 | }
172 | previewWidget.value.paused = !previewWidget.value.paused;
173 | }});
174 | }
175 | //TODO: Consider hiding elements if no video preview is available yet.
176 | //It would reduce confusion at the cost of functionality
177 | //(if a video preview lags the computer, the user should be able to hide in advance)
178 | const visDesc = (previewWidget.value.hidden ? "Show" : "Hide") + " preview";
179 | optNew.push({content: visDesc, callback: () => {
180 | if (!previewWidget.videoEl.hidden && !previewWidget.value.hidden) {
181 | previewWidget.videoEl.pause();
182 | } else if (previewWidget.value.hidden && !previewWidget.videoEl.hidden && !previewWidget.value.paused) {
183 | previewWidget.videoEl.play();
184 | }
185 | previewWidget.value.hidden = !previewWidget.value.hidden;
186 | previewWidget.parentEl.hidden = previewWidget.value.hidden;
187 | fitHeight(this);
188 |
189 | }});
190 | optNew.push({content: "Sync preview", callback: () => {
191 | //TODO: address case where videos have varying length
192 | //Consider a system of sync groups which are opt-in?
193 | for (let p of document.getElementsByClassName("vhs_preview")) {
194 | for (let child of p.children) {
195 | if (child.tagName == "VIDEO") {
196 | child.currentTime=0;
197 | } else if (child.tagName == "IMG") {
198 | child.src = child.src;
199 | }
200 | }
201 | }
202 | }});
203 | if(options.length > 0 && options[0] != null && optNew.length > 0) {
204 | optNew.push(null);
205 | }
206 | options.unshift(...optNew);
207 | });
208 | }
209 | export function addLoadVideoCommon(nodeType, nodeData) {
210 | addVideoPreview(nodeType);
211 | addPreviewOptions(nodeType);
212 | }
213 |
--------------------------------------------------------------------------------
/web/js/widgets.js:
--------------------------------------------------------------------------------
1 | import { ComfyWidgets } from "../../../scripts/widgets.js";
2 |
3 | export function getDefaultOptions(type, value = 0) {
4 | let options = {};
5 | switch (type) {
6 | case "INT":
7 | options = { default: value, min: 0, max: 18446744073709551616, step: 1 };
8 | break;
9 | case "FLOAT":
10 | options = { default: value, min: 0.0, max: 2048.0, step: 0.01 };
11 | break;
12 | case "STRING":
13 | options = { default: value };
14 | break;
15 | case "COMBO":
16 | options = { default: value, values: ["option1", "option2", "option3"] };
17 | break;
18 | default:
19 | options = {};
20 | }
21 | return options;
22 | }
23 |
24 | export function addWidgets(root_obj, field_name, value, app) {
25 | let type = value.type;
26 | if (type == "converted-widget") {
27 | type = value.origType;
28 | }
29 |
30 | // S'assurer que le nœud a un objet de stockage local pour ses définitions d'entrée
31 | if (!root_obj.local_input_defs) {
32 | root_obj.local_input_defs = {
33 | required: {},
34 | optional: {},
35 | };
36 | }
37 |
38 | if ((type == "STRING" || type == "text") && field_name != "workflow") {
39 | ComfyWidgets.STRING(
40 | root_obj,
41 | field_name,
42 | ["STRING", getDefaultOptions("STRING", value.value)],
43 | app
44 | );
45 | root_obj.addInput(field_name, "STRING", { widget: { name: field_name } });
46 | // Stocker la définition localement au lieu de modifier nodeData
47 | root_obj.local_input_defs.required[field_name] = [
48 | "STRING",
49 | getDefaultOptions("STRING", value.value),
50 | ];
51 | }
52 |
53 | if (type == "INT" || type == "number") {
54 | ComfyWidgets.INT(
55 | root_obj,
56 | field_name,
57 | ["INT", getDefaultOptions("INT", value.value)],
58 | app
59 | );
60 | // Stocker la définition localement
61 | root_obj.addInput(field_name, "INT", { widget: { name: field_name } });
62 | root_obj.local_input_defs.required[field_name] = [
63 | "INT",
64 | getDefaultOptions("INT", value.value),
65 | ];
66 | }
67 |
68 | if (type == "FLOAT") {
69 | ComfyWidgets.FLOAT(
70 | root_obj,
71 | field_name,
72 | ["FLOAT", getDefaultOptions("FLOAT", value.value)],
73 | app
74 | );
75 | root_obj.addInput(field_name, "FLOAT", { widget: { name: field_name } });
76 | // Stocker la définition localement
77 | root_obj.local_input_defs.required[field_name] = [
78 | "FLOAT",
79 | getDefaultOptions("FLOAT", value.value),
80 | ];
81 | }
82 |
83 | if (type == "BOOLEAN" || type == "toggle") {
84 | root_obj.addWidget("toggle", field_name, value.value, () => {});
85 | // Stocker la définition localement
86 | root_obj.addInput(field_name, "BOOLEAN", { widget: { name: field_name } });
87 | root_obj.local_input_defs.required[field_name] = [
88 | "BOOLEAN",
89 | getDefaultOptions("BOOLEAN", value.value),
90 | ];
91 | }
92 | if (type == "COMBO") {
93 | ComfyWidgets.COMBO(
94 | root_obj,
95 | field_name,
96 | ["COMBO", getDefaultOptions("COMBO", value.value)],
97 | app
98 | );
99 | root_obj.widgets[root_obj.widgets.length - 1].options = value.options;
100 | root_obj.addInput(field_name, "COMBO", { widget: { name: field_name } });
101 | root_obj.local_input_defs.required[field_name] = ["COMBO", value.options];
102 | }
103 | /*ComfyWidgets.
104 | if (field_name == "workflow"){
105 | root_obj.addWidget("STRING", field_name, value.value, ()=>{});
106 | root_obj.widgets[root_obj.widgets.length - 1].hidden = true;
107 | // Stocker la définition localement
108 | root_obj.local_input_defs.required[field_name] = ["STRING", {}];
109 | }*/
110 | if (type == "IMAGE") root_obj.addInput(field_name, "IMAGE");
111 | if (type == "LATENT") root_obj.addInput(field_name, "LATENT");
112 | if (type == "MODEL") root_obj.addInput(field_name, "MODEL");
113 | if (type == "CLIP") root_obj.addInput(field_name, "CLIP");
114 | if (type == "MASK") root_obj.addInput(field_name, "MASK");
115 | if (type == "CONDITIONING") root_obj.addInput(field_name, "CONDITIONING");
116 | if (type == "VAE") root_obj.addInput(field_name, "VAE");
117 | }
118 |
119 | export function hideWidget(node, widget, options = {}) {
120 | const { suffix = "", holdSpace = true } = options;
121 |
122 | if (widget.type?.startsWith("converted-widget")) return;
123 | widget.origType = widget.type;
124 | widget.origComputeSize = widget.computeSize;
125 | widget.origSerializeValue = widget.serializeValue;
126 | // @ts-expect-error custom widget type
127 | widget.type = "converted-widget" + suffix;
128 | if (holdSpace) {
129 | widget.computeSize = () => [0, LiteGraph.NODE_WIDGET_HEIGHT];
130 | widget.serializeValue = (node, index) => {
131 | // Prevent serializing the widget if we have no input linked
132 | if (!node.inputs) {
133 | return undefined;
134 | }
135 | let node_input = node.inputs.find((i) => i.widget?.name === widget.name);
136 |
137 | if (!node_input || !node_input.link) {
138 | return undefined;
139 | }
140 | return widget.origSerializeValue
141 | ? widget.origSerializeValue(node, index)
142 | : widget.value;
143 | };
144 | } else {
145 | // -4 is due to the gap litegraph adds between widgets automatically
146 | widget.computeSize = () => [0, -4];
147 | widget.serializeValue = (node, index) => {
148 | return widget.origSerializeValue
149 | ? widget.origSerializeValue(node, index)
150 | : widget.value;
151 | };
152 | }
153 |
154 | // Hide any linked widgets, e.g. seed+seedControl
155 | if (widget.linkedWidgets) {
156 | for (const w of widget.linkedWidgets) {
157 | hideWidget(node, w, { suffix: ":" + widget.name, holdSpace: false });
158 | }
159 | }
160 | }
161 |
162 | export function convertToInput(node, widget) {
163 | hideWidget(node, widget);
164 | // Add input and store widget config for creating on primitive node
165 | const [oldWidth, oldHeight] = node.size;
166 |
167 | for (const widget of node.widgets) {
168 | widget.last_y += LiteGraph.NODE_SLOT_HEIGHT;
169 | }
170 |
171 | // Restore original size but grow if needed
172 | node.setSize([
173 | Math.max(oldWidth, node.size[0]),
174 | Math.max(oldHeight, node.size[1]),
175 | ]);
176 | return node;
177 | }
178 |
--------------------------------------------------------------------------------
/web/js/workflows.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 | import { api } from '../../../scripts/api.js'
3 |
4 | async function convertWorkflowToApiFormat(standardWorkflow) {
5 | try {
6 | return new Promise((resolve, reject) => {
7 | // Sauvegarder les prototypes originaux de onConfigure pour tous les types de nœuds
8 | const originalCallbacks = new Map();
9 |
10 | // Temporairement désactiver tous les callbacks onConfigure
11 | for (const nodeTypeName in LiteGraph.registered_node_types) {
12 | const nodeType = LiteGraph.registered_node_types[nodeTypeName];
13 | if (nodeType.prototype.onConfigure) {
14 | originalCallbacks.set(nodeTypeName, nodeType.prototype.onConfigure);
15 | nodeType.prototype.onConfigure = function() {}; // Fonction vide
16 | }
17 | }
18 |
19 | // Sauvegarder l'état des callbacks du graphe principal
20 | const originalOnConfigure = LGraph.prototype.onConfigure;
21 | LGraph.prototype.onConfigure = function() {}; // Désactiver temporairement
22 |
23 | try {
24 | // Créer un graph temporaire isolé
25 | const tempGraph = new LGraph();
26 | // Sauvegarder la référence du graphe original
27 | const originalGraph = app.graph;
28 |
29 | // Utiliser graphToPrompt en mode isolé
30 | app.graph = tempGraph;
31 | // Configurer sans déclencher de callbacks
32 | tempGraph.configure(standardWorkflow);
33 |
34 | app.graphToPrompt(tempGraph)
35 | .then(apiData => {
36 | // Restaurer le graphe original
37 | app.graph = originalGraph;
38 |
39 | // Résoudre avec le format API
40 | resolve(apiData.output);
41 | })
42 | .catch(error => {
43 | console.error("Erreur lors de la conversion:", error);
44 | reject(error);
45 | })
46 | .finally(() => {
47 | // Nettoyer le graphe temporaire
48 | tempGraph.clear();
49 |
50 | // Assurer que toutes les références sont supprimées
51 | if (tempGraph._nodes) {
52 | while (tempGraph._nodes.length > 0) {
53 | tempGraph.remove(tempGraph._nodes[0]);
54 | }
55 | tempGraph._nodes = null;
56 | }
57 |
58 | // Supprimer les écouteurs d'événements
59 | tempGraph.removeAllListeners && tempGraph.removeAllListeners();
60 | tempGraph._links = null;
61 |
62 | // Restaurer tous les callbacks originaux
63 | for (const [nodeTypeName, callback] of originalCallbacks.entries()) {
64 | LiteGraph.registered_node_types[nodeTypeName].prototype.onConfigure = callback;
65 | }
66 |
67 | // Restaurer le callback du graphe
68 | LGraph.prototype.onConfigure = originalOnConfigure;
69 |
70 | //console.log("Conversion terminée et sandbox nettoyée");
71 | });
72 | } catch (error) {
73 | // En cas d'erreur, restaurer les callbacks et rejeter
74 | for (const [nodeTypeName, callback] of originalCallbacks.entries()) {
75 | LiteGraph.registered_node_types[nodeTypeName].prototype.onConfigure = callback;
76 | }
77 | LGraph.prototype.onConfigure = originalOnConfigure;
78 |
79 | reject(error);
80 | }
81 | });
82 | } catch (error) {
83 | console.error("Erreur lors de la préparation du graph:", error);
84 | throw error;
85 | }
86 | }
87 |
88 | export async function importWorkflow(root_obj, workflow_path, app){
89 | const filename = workflow_path.replace(/\\/g, '/').split("/");
90 | root_obj.title = "Workflow: "+filename[filename.length-1].replace(".json", "").replace(/_/g, " ");
91 |
92 |
93 | return api.fetchApi("/flowchain/workflow?workflow_path="+workflow_path)
94 | .then(response => response.json())
95 | .then(async data => {
96 | //cleanInputs(root_obj,nodeData, reset_values);
97 |
98 | let workflow = data.workflow;
99 | app.lipsync_studio[workflow_path] = data;
100 |
101 | // Si c'est un format standard, le convertir en format API
102 | if ("nodes" in workflow) {
103 | try {
104 | workflow = await convertWorkflowToApiFormat(workflow);
105 | } catch (error) {
106 | console.error("Échec de la conversion du workflow:", error);
107 | return false;
108 | }
109 | }
110 |
111 | if (!workflow) {
112 | console.error('Workflow invalide ou échec de conversion');
113 | return false;
114 | }
115 | return JSON.stringify(workflow);
116 |
117 | })
118 | .catch(error => {
119 | console.error('Erreur lors de l\'importation:', error);
120 | return false;
121 | });
122 | }
--------------------------------------------------------------------------------
/workflow.py:
--------------------------------------------------------------------------------
1 | import json
2 | import torch
3 | import uuid
4 | import copy
5 | import os
6 | from enum import Enum
7 | import numpy as np
8 | import hashlib
9 | from torchvision import transforms
10 | import comfy.model_management
11 | from PIL import Image
12 | from nodes import SaveImage
13 | import gc
14 | import folder_paths
15 | from server import PromptServer
16 | from execution import PromptExecutor
17 |
18 |
19 | class ExecutionResult(Enum):
20 | SUCCESS = 0
21 | FAILURE = 1
22 | PENDING = 2
23 |
24 |
25 | class AnyType(str):
26 | """A special class that is always equal in not equal comparisons. Credit to pythongosssss"""
27 |
28 | def __eq__(self, _) -> bool:
29 | return True
30 |
31 | def __ne__(self, __value: object) -> bool:
32 | return False
33 |
34 |
35 | script_list_path = os.path.join(folder_paths.user_directory, "default", "workflows")
36 |
37 |
38 | def recursive_delete(workflow, to_delete):
39 | # workflow_copy = copy.deepcopy(workflow)
40 | new_delete = []
41 | for node_id in to_delete:
42 | for node_id2, node in workflow.items():
43 | for input_name, input_value in node["inputs"].items():
44 | if type(input_value) == list:
45 | if len(input_value) > 0:
46 | if input_value[0] == node_id:
47 | new_delete.append(node_id2)
48 | if node_id in workflow:
49 | del workflow[node_id]
50 | if len(new_delete) > 0:
51 | workflow = recursive_delete(workflow, new_delete)
52 | return workflow
53 |
54 |
55 | class Workflow(SaveImage):
56 | def __init__(self):
57 | self.ws = None
58 |
59 | @classmethod
60 | def INPUT_TYPES(cls):
61 | return {
62 | "required": {
63 | "workflows": ("COMBO", {"values": []}),
64 | "workflow": ("STRING", {"default": ""})
65 | },
66 | "optional": {},
67 | "hidden": {
68 | "unique_id": "UNIQUE_ID",
69 | }
70 | }
71 |
72 | RETURN_TYPES = (
73 | AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"),
74 | AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"), AnyType("*"),
75 | )
76 |
77 | FUNCTION = "generate"
78 | CATEGORY = "FlowChain ⛓️"
79 |
80 | OUTPUT_NODE = True
81 |
82 | @classmethod
83 | def IS_CHANGED(s, workflows, workflow, **kwargs):
84 | m = hashlib.sha256()
85 | m.update(workflows.encode())
86 |
87 | # Ajouter le contenu du workflow au hash pour détecter les changements de structure
88 | if workflow:
89 | workflow_data = json.loads(workflow)
90 |
91 | # Extraire les nœuds de sortie avec leurs positions/types/connexions
92 | outputs = {}
93 | for k, v in workflow_data.items():
94 | if v.get('class_type') == 'WorkflowOutput':
95 | # Capturer le nom, type et la source de données (connexions entrantes)
96 | output_info = {
97 | 'name': v['inputs']['Name'],
98 | 'type': v['inputs']['type'],
99 | 'position': v.get('_meta', {}).get('position', [0, 0]),
100 | }
101 |
102 | # Ajouter les connexions d'entrée pour tracer la provenance des données
103 | for input_name, input_value in v['inputs'].items():
104 | if isinstance(input_value, list) and len(input_value) > 0:
105 | # Stocker les IDs des nœuds connectés à cette sortie
106 | output_info[input_name + '_source'] = input_value
107 |
108 | outputs[k] = output_info
109 |
110 | # Être sûr de préserver l'ordre des sorties dans le hash
111 | # en les triant par position verticale
112 | sorted_outputs = dict(sorted(
113 | outputs.items(),
114 | key=lambda item: item[1].get('position', [0, 0])[1]
115 | ))
116 |
117 | # Ajouter l'information des sorties au hash
118 | m.update(json.dumps(sorted_outputs, sort_keys=True).encode())
119 |
120 | return m.digest().hex()
121 |
122 | def generate(self, workflows, workflow, unique_id, **kwargs):
123 |
124 | def populate_inputs(workflow, inputs, kwargs_values):
125 | workflow_inputs = {k: v for k, v in workflow.items() if "class_type" in v and v["class_type"] == "WorkflowInput"}
126 | for key, value in workflow_inputs.items():
127 | if value["inputs"]["Name"] in inputs:
128 | if type(inputs[value["inputs"]["Name"]]) == list:
129 | if value["inputs"]["Name"] in kwargs_values:
130 | workflow[key]["inputs"]["default"] = kwargs_values[value["inputs"]["Name"]]
131 | else:
132 | workflow[key]["inputs"]["default"] = inputs[value["inputs"]["Name"]]
133 |
134 | workflow_inputs_images = {k: v for k, v in workflow.items() if "class_type" in v and
135 | v["class_type"] == "WorkflowInput" and v["inputs"]["type"] == "IMAGE"}
136 | for key, value in workflow_inputs_images.items():
137 | if "default" not in value["inputs"]:
138 | workflow[key]["inputs"]["default"] = torch.tensor([])
139 | else:
140 | if isinstance(value["inputs"]["default"], list):
141 | # Si c'est une liste, on la laisse telle quelle
142 | workflow[key]["inputs"]["default"] = torch.tensor([])
143 | else:
144 | # Si c'est un tensor, on vérifie s'il est vide
145 | if value["inputs"]["default"].numel() == 0:
146 | workflow[key]["inputs"]["default"] = torch.tensor([])
147 |
148 | return workflow
149 |
150 | def treat_switch(workflow):
151 | to_delete = []
152 | #do_net_delete = []
153 | switch_to_delete = [-1]
154 | while len(switch_to_delete) > 0:
155 | switch_nodes = {k: v for k, v in workflow.items() if "class_type" in v and
156 | v["class_type"].startswith("Switch") and v["class_type"].endswith("[Crystools]")}
157 | # order switch nodes by inputs.boolean value
158 | switch_to_delete = []
159 | switch_nodes_copy = copy.deepcopy(switch_nodes)
160 | for switch_id, switch_node in switch_nodes.items():
161 | # create list of inputs who have switch in their inputs
162 |
163 | inputs_from_switch = []
164 | for node_ids, node in workflow.items():
165 | for input_name, input_value in node["inputs"].items():
166 | if type(input_value) == list:
167 | if len(input_value) > 0:
168 | if input_value[0] == switch_id:
169 | inputs_from_switch.append({node_ids: input_name})
170 | # convert to dictionary
171 | inputs_from_switch = {k: v for d in inputs_from_switch for k, v in d.items()}
172 | switch = switch_nodes_copy[switch_id]
173 | for node_id, input_name in inputs_from_switch.items():
174 | if type(switch["inputs"]["boolean"]) == list:
175 | switch_boolean_value = workflow[switch["inputs"]["boolean"][0]]["inputs"]
176 |
177 | other_input_name = None
178 | if "default" in switch_boolean_value:
179 | other_input_name = "default"
180 | elif "boolean" in switch_boolean_value:
181 | other_input_name = "boolean"
182 |
183 | if other_input_name is not None:
184 | if switch_boolean_value[other_input_name] == True:
185 | if type(switch["inputs"]["on_true"]) == list:
186 | workflow[node_id]["inputs"][input_name] = switch["inputs"]["on_true"]
187 | if node_id in switch_nodes_copy:
188 | switch_nodes_copy[node_id]["inputs"][input_name] = switch["inputs"]["on_true"]
189 | else:
190 | to_delete.append(node_id)
191 | else:
192 | if type(switch["inputs"]["on_false"]) == list:
193 | workflow[node_id]["inputs"][input_name] = switch["inputs"]["on_false"]
194 | if node_id in switch_nodes_copy:
195 | switch_nodes_copy[node_id]["inputs"][input_name] = switch["inputs"]["on_false"]
196 | else:
197 | to_delete.append(node_id)
198 | switch_to_delete.append(switch_id)
199 | else:
200 | if switch["inputs"]["boolean"] == True:
201 | if type(switch["inputs"]["on_true"]) == list:
202 | workflow[node_id]["inputs"][input_name] = switch["inputs"]["on_true"]
203 | if node_id in switch_nodes_copy:
204 | switch_nodes_copy[node_id]["inputs"][input_name] = switch["inputs"]["on_true"]
205 | else:
206 | to_delete.append(node_id)
207 | else:
208 | if type(switch["inputs"]["on_false"]) == list:
209 | workflow[node_id]["inputs"][input_name] = switch["inputs"]["on_false"]
210 | if node_id in switch_nodes_copy:
211 | switch_nodes_copy[node_id]["inputs"][input_name] = switch["inputs"]["on_false"]
212 | else:
213 | to_delete.append(node_id)
214 | switch_to_delete.append(switch_id)
215 | print(switch_to_delete)
216 | workflow = {k: v for k, v in workflow.items() if
217 | not ("class_type" in v and v["class_type"].startswith("Switch") and v["class_type"].endswith(
218 | "[Crystools]") and k in switch_to_delete)}
219 |
220 | return workflow, to_delete
221 |
222 | def treat_continue(workflow):
223 | to_delete = []
224 | continue_nodes = {k: v for k, v in workflow.items() if "class_type" in v and
225 | v["class_type"].startswith("WorkflowContinue")}
226 | do_net_delete = []
227 | for continue_node_id, continue_node in continue_nodes.items():
228 | for node_id, node in workflow.items():
229 | for input_name, input_value in node["inputs"].items():
230 | if type(input_value) == list:
231 | if len(input_value) > 0:
232 | if input_value[0] == continue_node_id:
233 | if type(continue_node["inputs"]["continue_workflow"]) == list:
234 | input_other_node = \
235 | workflow[continue_node["inputs"]["continue_workflow"][0]][
236 | "inputs"]
237 | other_input_name = None
238 | if "default" in input_other_node:
239 | other_input_name = "default"
240 | elif "boolean" in input_other_node:
241 | other_input_name = "boolean"
242 |
243 | if other_input_name is not None:
244 | if input_other_node[other_input_name]:
245 | workflow[node_id]["inputs"][input_name] = continue_node["inputs"]["input"]
246 | else:
247 | to_delete.append(node_id)
248 | else:
249 | do_net_delete.append(continue_node_id)
250 | else:
251 | if continue_node["inputs"]["continue_workflow"]:
252 | workflow[node_id]["inputs"][input_name] = continue_node["inputs"]["input"]
253 | else:
254 | to_delete.append(node_id)
255 |
256 | workflow = {k: v for k, v in workflow.items() if
257 | not ("class_type" in v and v["class_type"].startswith("WorkflowContinue") and k not in do_net_delete)}
258 | return workflow, to_delete
259 |
260 | def redefine_id(subworkflow, max_id):
261 | new_sub_workflow = {}
262 |
263 | for k, v in subworkflow.items():
264 | max_id += 1
265 | new_sub_workflow[str(max_id)] = v
266 | # replace old id by new id items in inputs of workflow
267 | for node_id, node in subworkflow.items():
268 | for input_name, input_value in node["inputs"].items():
269 | if type(input_value) == list:
270 | if len(input_value) > 0:
271 | if input_value[0] == k:
272 | subworkflow[node_id]["inputs"][input_name][0] = str(max_id)
273 | for node_id, node in new_sub_workflow.items():
274 | for input_name, input_value in node["inputs"].items():
275 | if type(input_value) == list:
276 | if len(input_value) > 0:
277 | if input_value[0] == k:
278 | new_sub_workflow[node_id]["inputs"][input_name][0] = str(max_id)
279 | return new_sub_workflow, max_id
280 |
281 | def change_subnode(subworkflow, node_id_to_find, value):
282 | for node_id, node in subworkflow.items():
283 | for input_name, input_value in node["inputs"].items():
284 | if type(input_value) == list:
285 | if len(input_value) > 0:
286 | if input_value[0] == node_id_to_find:
287 | subworkflow[node_id]["inputs"][input_name] = value
288 |
289 | return subworkflow
290 |
291 | def merge_inputs_outputs(workflow, workflow_name, subworkflow, workflow_outputs):
292 | # get max workflow id
293 | # coinvert workflow_outputs to list
294 | workflow_outputs = list(workflow_outputs.values())
295 | # prendre le premier workflow
296 | workflow_node = [{"id":k, **v} for k, v in workflow.items() if "class_type" in v and v["class_type"] == "Workflow" and v["inputs"]["workflows"] == workflow_name][0]
297 | sub_input_nodes = {k: v for k, v in subworkflow.items() if "class_type" in v and v["class_type"] == "WorkflowInput"}
298 | do_not_delete = []
299 | for sub_id, sub_node in sub_input_nodes.items():
300 | if sub_node["inputs"]["Name"] in workflow_node["inputs"]:
301 | value = workflow_node["inputs"][sub_node["inputs"]["Name"]]
302 | if type(value) == list:
303 | subworkflow = change_subnode(subworkflow, sub_id, value)
304 | else:
305 | subworkflow[sub_id]["inputs"]["default"] = value
306 | do_not_delete.append(sub_id)
307 |
308 | # remove input node
309 | subworkflow = {k: v for k, v in subworkflow.items() if not ("class_type" in v and v["class_type"] == "WorkflowInput" and k not in do_not_delete)}
310 |
311 | # get sub workflow file path
312 | sub_workflow_file_path = os.path.join(folder_paths.user_directory, "default", "workflows", workflow_name)
313 | sub_original_positions = {}
314 |
315 | if os.path.exists(sub_workflow_file_path):
316 | try:
317 | with open(sub_workflow_file_path, "r", encoding="utf-8") as f:
318 | sub_original_workflow = json.load(f)
319 |
320 | if "nodes" in sub_original_workflow:
321 | for node in sub_original_workflow["nodes"]:
322 | if node.get("type") == "WorkflowOutput":
323 | node_id = str(node.get("id", "unknown"))
324 | pos_y = node.get("pos", [0, 0])[1]
325 | w_values = node.get("widgets_values", "")
326 | if "Name" in w_values:
327 | node_name = w_values["Name"]["value"]
328 | else:
329 | node_name = w_values[0]
330 | sub_original_positions[node_name] = pos_y
331 | except Exception as e:
332 | print(f"Error reading sub-workflow file: {str(e)}")
333 |
334 | sub_output_nodes = {k: v for k, v in subworkflow.items() if "class_type" in v and v["class_type"] == "WorkflowOutput"}
335 |
336 | # sort sub workflow output nodes
337 | sub_outputs_with_position = []
338 | for k, v in sub_output_nodes.items():
339 | output_name = v["inputs"]["Name"]
340 | y_position = sub_original_positions.get(output_name, 999999)
341 | sub_outputs_with_position.append((k, y_position))
342 |
343 | sub_outputs_with_position.sort(key=lambda x: x[1])
344 | sub_output_nodes = {k: sub_output_nodes[k] for k, _ in sub_outputs_with_position}
345 |
346 |
347 | workflow_copy = copy.deepcopy(workflow)
348 | for node_id, node in workflow_copy.items():
349 | for input_name, input_value in node["inputs"].items():
350 | if type(input_value) == list:
351 | if len(input_value) > 0:
352 | if input_value[0] == workflow_node["id"]:
353 | for sub_output_id, sub_output_node in sub_output_nodes.items():
354 | if sub_output_node["inputs"]["Name"] == workflow_outputs[input_value[1]]["inputs"]["Name"]:
355 | workflow[node_id]["inputs"][input_name] = sub_output_node["inputs"]["default"]
356 |
357 | # remove output node
358 | subworkflow = {k: v for k, v in subworkflow.items() if not ("class_type" in v and v["class_type"] == "WorkflowOutput")}
359 |
360 | return workflow, subworkflow
361 |
362 | def clean_workflow(workflow, inputs=None, kwargs_values=None):
363 | if kwargs_values is None:
364 | kwargs_values = {}
365 | if inputs is None:
366 | inputs = {}
367 | if inputs is not None:
368 | workflow = populate_inputs(workflow, inputs, kwargs_values)
369 |
370 | workflow_outputs = {k: v for k, v in workflow.items() if "class_type" in v and v["class_type"] == "WorkflowOutput"}
371 |
372 | for output_id, output_node in workflow_outputs.items():
373 | workflow[output_id]["inputs"]["ui"] = False
374 |
375 | workflow, switch_to_delete = treat_switch(workflow)
376 | workflow, continue_to_delete = treat_continue(workflow)
377 | workflow = recursive_delete(workflow, switch_to_delete + continue_to_delete)
378 | return workflow, workflow_outputs
379 |
380 | def get_recursive_workflow(workflow_name, workflows, max_id=0):
381 | # if workflows[-5:] == ".json":
382 | # workflow = get_workflow(workflows)
383 | # else:
384 | try:
385 | if workflows == "{}":
386 | raise ValueError("Empty workflow.")
387 | workflow = json.loads(workflows)
388 | except:
389 | raise RuntimeError(f"Error while loading workflow: {workflow_name}, See for more information.")
390 |
391 | workflow, max_id = redefine_id(workflow, max_id)
392 | sub_workflows = {k: v for k, v in workflow.items() if "class_type" in v and v["class_type"] == "Workflow"}
393 | for key, sub_workflow_node in sub_workflows.items():
394 | workflow_json = sub_workflow_node["inputs"]["workflow"]
395 | sub_workflow_name = sub_workflow_node["inputs"]["workflows"]
396 | subworkflow, max_id = get_recursive_workflow(sub_workflow_name, workflow_json, max_id)
397 |
398 | # get sub workflow file path
399 | sub_workflow_file_path = os.path.join(folder_paths.user_directory, "default", "workflows", sub_workflow_name)
400 | sub_original_positions = {}
401 |
402 | if os.path.exists(sub_workflow_file_path):
403 | try:
404 | with open(sub_workflow_file_path, "r", encoding="utf-8") as f:
405 | sub_original_workflow = json.load(f)
406 |
407 | if "nodes" in sub_original_workflow:
408 | for node in sub_original_workflow["nodes"]:
409 | if node.get("type") == "WorkflowOutput":
410 | node_id = str(node.get("id", "unknown"))
411 | pos_y = node.get("pos", [0, 0])[1]
412 | w_values = node.get("widgets_values", "")
413 | if "Name" in w_values:
414 | node_name = w_values["Name"]["value"]
415 | else:
416 | node_name = w_values[0]
417 | sub_original_positions[node_name] = pos_y
418 | except Exception as e:
419 | print(f"Error reading sub-workflow file: {str(e)}")
420 |
421 | workflow_outputs_sub = {k: v for k, v in subworkflow.items() if "class_type" in v and v["class_type"] == "WorkflowOutput"}
422 |
423 | # sort sub workflow output nodes
424 | sub_outputs_with_position = []
425 | for k, v in workflow_outputs_sub.items():
426 | output_name = v["inputs"]["Name"]
427 | y_position = sub_original_positions.get(output_name, 999999)
428 | sub_outputs_with_position.append((k, y_position))
429 |
430 | sub_outputs_with_position.sort(key=lambda x: x[1])
431 | workflow_outputs_sub = {k: workflow_outputs_sub[k] for k, _ in sub_outputs_with_position}
432 |
433 | workflow, subworkflow = merge_inputs_outputs(workflow, sub_workflow_name, subworkflow, workflow_outputs_sub)
434 | workflow = {k: v for k, v in workflow.items() if k != key}
435 | # add subworkflow to workflow
436 | workflow.update(subworkflow)
437 | return workflow, max_id
438 |
439 | server_instance = PromptServer.instance
440 | client_id = server_instance.client_id
441 | if server_instance and hasattr(server_instance, 'prompt_queue'):
442 | current_queue = server_instance.prompt_queue.get_current_queue()
443 | queue_info = {
444 | 'queue_running': current_queue[0],
445 | 'queue_pending': current_queue[1]
446 | }
447 |
448 | # Now you can access the original inputs as before
449 | queue_to_use = queue_info["queue_running"]
450 | original_inputs = [v["inputs"] for k, v in queue_to_use[0][2].items() if k == unique_id][0]
451 |
452 | else:
453 | # Fallback to empty inputs if server instance not available
454 | original_inputs = {}
455 |
456 | workflow, _ = get_recursive_workflow(workflows, workflow, 5000)
457 | workflow, workflow_outputs = clean_workflow(workflow, original_inputs, kwargs)
458 |
459 | # Accéder au fichier JSON original pour obtenir les positions correctes
460 | workflow_file_path = os.path.join(folder_paths.user_directory, "default", "workflows", workflows)
461 | original_positions = {}
462 |
463 | # Récupérer les positions des noeuds de sortie depuis le fichier original
464 |
465 | if os.path.exists(workflow_file_path):
466 | try:
467 | with open(workflow_file_path, "r", encoding="utf-8") as f:
468 | original_workflow = json.load(f)
469 |
470 | # Créer un mapping node_id -> position pour les noeuds WorkflowOutput
471 | if "nodes" in original_workflow:
472 | for node in original_workflow["nodes"]:
473 | if node.get("type") == "WorkflowOutput":
474 | # node_id = str(node.get("id", "unknown"))
475 | pos_y = node.get("pos", [0, 0])[1]
476 |
477 | w_values = node.get("widgets_values", "")
478 | if "Name" in w_values:
479 | node_name = w_values["Name"]["value"]
480 | else:
481 | node_name = w_values[0]
482 |
483 | original_positions[node_name] = pos_y
484 | except Exception as e:
485 | print(f"Erreur lors de la lecture du fichier workflow original: {str(e)}")
486 |
487 | # Récupérer les nœuds de sortie et les trier par position Y
488 | workflow_outputs_with_position = []
489 | for k, v in workflow_outputs.items():
490 | output_name = v["inputs"]["Name"]
491 | # Utiliser la position du fichier original si disponible, sinon utiliser une position par défaut
492 | y_position = original_positions.get(output_name, 999999)
493 | workflow_outputs_with_position.append((k, y_position))
494 |
495 | # Trier par position Y croissante
496 | workflow_outputs_with_position.sort(key=lambda x: x[1])
497 |
498 | # Extraire seulement les IDs dans l'ordre trié
499 | workflow_outputs_id = [k for k, _ in workflow_outputs_with_position]
500 |
501 | prompt_id = str(uuid.uuid4())
502 |
503 | class SimpleServer:
504 | def __init__(self):
505 | self.client_id = client_id
506 | self.last_node_id = None
507 | self.last_prompt_id = prompt_id
508 |
509 | def send_sync(self, *args, **kwargs):
510 | pass # No-op implementation
511 |
512 | simple_server = SimpleServer()
513 | executor = PromptExecutor(simple_server)
514 |
515 | executor.execute(workflow, prompt_id, {"client_id": client_id}, workflow_outputs_id)
516 |
517 | history_result = executor.history_result
518 | comfy.model_management.unload_all_models()
519 | gc.collect()
520 |
521 | # Remplacer la boucle de génération d'output qui ne respecte pas l'ordre
522 | output = []
523 | for id_node in workflow_outputs_id: # Utiliser l'ordre trié des IDs
524 | if id_node in history_result["outputs"]:
525 | result_value = history_result["outputs"][id_node]["default"]
526 | # Apply formatting based on the expected output type
527 | output.append(result_value[0])
528 | else:
529 | node = workflow_outputs[id_node] # Récupérer le nœud correspondant à l'ID
530 | if node["inputs"]["type"] == "IMAGE" or node["inputs"]["type"] == "MASK":
531 | black_image_np = np.zeros((255, 255, 3), dtype=np.uint8)
532 | black_image_pil = Image.fromarray(black_image_np)
533 | transform = transforms.ToTensor()
534 | image_tensor = transform(black_image_pil)
535 | image_tensor = image_tensor.permute(1, 2, 0)
536 | image_tensor = image_tensor.unsqueeze(0)
537 | output.append(image_tensor)
538 | else:
539 | output.append(None)
540 |
541 | return tuple(output)
542 | # return tuple(queue[uid]["outputs"])
543 |
544 |
545 | NODE_CLASS_MAPPINGS_WORKFLOW = {
546 | "Workflow": Workflow,
547 | }
548 |
549 | NODE_DISPLAY_NAME_MAPPINGS_WORKFLOW = {
550 | "Workflow": "Workflow (FlowChain ⛓️)",
551 | }
552 |
--------------------------------------------------------------------------------
/workflow_nodes.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from PIL import Image
4 | import hashlib
5 | from torchvision import transforms
6 |
7 |
8 | class AnyType(str):
9 | """A special class that is always equal in not equal comparisons. Credit to pythongosssss"""
10 |
11 | def __eq__(self, _) -> bool:
12 | return True
13 |
14 | def __ne__(self, __value: object) -> bool:
15 | return False
16 |
17 |
18 | BOOLEAN = ("BOOLEAN", {"default": True})
19 | STRING = ("STRING", {"default": ""})
20 | any_input = AnyType("*")
21 | node_type_list = ["none", "IMAGE", "MASK", "STRING", "INT", "FLOAT", "LATENT", "BOOLEAN", "CLIP", "CONDITIONING",
22 | "MODEL", "VAE", "DICT", "AUDIO", "AUDIO_PATH", "VIDEO_PATH", "AUDIO/VIDEO_PATH", "DOC_PATH", "IMAGE_PATH", "PROMPT"]
23 |
24 |
25 | class WorkflowContinue:
26 | def __init__(self):
27 | pass
28 |
29 | @classmethod
30 | def INPUT_TYPES(cls):
31 | return {
32 | "required": {
33 | "input": ("IMAGE", {"default": []}),
34 | "type": (
35 | ["none", "IMAGE", "LATENT"],),
36 | "continue_workflow": BOOLEAN,
37 | }
38 | }
39 |
40 | RETURN_TYPES = (AnyType("*"),)
41 | RETURN_NAMES = ("output",)
42 | FUNCTION = "execute"
43 | CATEGORY = "FlowChain ⛓️"
44 |
45 | @classmethod
46 | def IS_CHANGED(s, input, type, continue_workflow):
47 | m = hashlib.sha256()
48 | if input is None:
49 | return "0"
50 | else:
51 | m.update(input.encode() + str(continue_workflow).encode())
52 | return m.digest().hex()
53 |
54 | def execute(self, input, type, continue_workflow):
55 | print("WorkflowContinue", continue_workflow)
56 | if continue_workflow:
57 | if type == "LATENT":
58 | ret = {"samples": input["samples"][0].unsqueeze(0)}
59 | if "noise_mask" in input:
60 | ret["noise_mask"] = input["noise_mask"][0].unsqueeze(0)
61 | return (ret,)
62 | else:
63 | return (input,)
64 | else:
65 | return (input[0].unsqueeze(0),)
66 |
67 |
68 | class WorkflowInput:
69 | def __init__(self):
70 | pass
71 |
72 | @classmethod
73 | def INPUT_TYPES(cls):
74 | return {"required": {
75 | "Name": STRING,
76 | #"type": (node_type_list,),
77 | "default": ("*",)
78 | }}
79 |
80 | RETURN_TYPES = (AnyType("*"),)
81 | RETURN_NAMES = ("output",)
82 | FUNCTION = "execute"
83 | CATEGORY = "FlowChain ⛓️"
84 |
85 | # OUTPUT_NODE = True
86 |
87 | @classmethod
88 | def IS_CHANGED(s, Name,default, **kwargs):
89 | m = hashlib.sha256()
90 | if default is not None:
91 | m.update(str(default).encode())
92 | else:
93 | m.update(Name.encode() + type.encode())
94 | return m.digest().hex()
95 |
96 | @classmethod
97 | def VALIDATE_INPUTS(cls, input_types):
98 | return True
99 |
100 |
101 | def execute(self, Name, **kwargs):
102 | return (kwargs["default"],)
103 |
104 |
105 | class WorkflowOutput:
106 | def __init__(self):
107 | pass
108 |
109 | @classmethod
110 | def INPUT_TYPES(cls):
111 | return {
112 | "required": {
113 | "Name": STRING,
114 | #"type": (node_type_list,)
115 | "default": ("*",)
116 | },
117 | "hidden": {
118 | "ui": BOOLEAN
119 | }}
120 |
121 | RETURN_TYPES = (AnyType("*"),)
122 | RETURN_NAMES = ("output",)
123 | FUNCTION = "execute"
124 | CATEGORY = "FlowChain ⛓️"
125 | OUTPUT_NODE = True
126 |
127 |
128 | @classmethod
129 | def IS_CHANGED(s, Name, ui=True, **kwargs):
130 | m = hashlib.sha256()
131 | m.update(Name.encode())
132 | return m.digest().hex()
133 |
134 | @classmethod
135 | def VALIDATE_INPUTS(cls, input_types):
136 | return True
137 |
138 | def execute(self, Name, ui=True, **kwargs):
139 | if ui:
140 | if kwargs["default"] is None:
141 | return (torch.tensor([]),)
142 | return (kwargs["default"],)
143 | else:
144 | """
145 | if type in ["IMAGE", "MASK"]:
146 | if kwargs["default"] is None:
147 | black_image_np = np.zeros((255, 255, 3), dtype=np.uint8)
148 | black_image_pil = Image.fromarray(black_image_np)
149 | transform = transforms.ToTensor()
150 | image_tensor = transform(black_image_pil)
151 | image_tensor = image_tensor.permute(1, 2, 0)
152 | image_tensor = image_tensor.unsqueeze(0)
153 | return {"ui": {"default": image_tensor}}
154 | return {"ui": {"default": [kwargs["default"]]}}
155 | elif type == "LATENT":
156 | if kwargs["default"] is None:
157 | return {"ui": {"default": torch.tensor([])}}
158 | return {"ui": {"default": [kwargs["default"]]}}
159 | else:
160 | """
161 | ui = {"ui": {}}
162 | ui["ui"]["default"] = [kwargs["default"]]
163 | return ui
164 |
165 |
166 | NODE_CLASS_MAPPINGS_NODES = {
167 | "WorkflowInput": WorkflowInput,
168 | "WorkflowOutput": WorkflowOutput,
169 | "WorkflowContinue": WorkflowContinue,
170 | }
171 |
172 | # A dictionary that contains the friendly/humanly readable titles for the nodes
173 | NODE_DISPLAY_NAME_MAPPINGS_NODES = {
174 | "WorkflowInput": "Workflow Input (FlowChain ⛓️)",
175 | "WorkflowOutput": "Workflow Output (FlowChain ⛓️)",
176 | "WorkflowContinue": "Workflow Continue (FlowChain ⛓️)",
177 | }
178 |
--------------------------------------------------------------------------------