├── __init__.py ├── pyproject.toml ├── .github └── workflows │ └── publish.yml ├── LICENSE ├── nodes_images.py ├── .gitignore ├── README.md └── workflow └── Hunyuan Video + ImageMotionGuider.json /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes_images import NODE_CLASS_MAPPINGS 2 | 3 | __all__ = ['NODE_CLASS_MAPPINGS'] -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-imagemotionguider" 3 | description = "A custom ComfyUI node designed to create seamless motion effects from single images by integrating with Hunyuan Video through latent space manipulation." 4 | version = "1.0.0" 5 | license = {file = "LICENSE"} 6 | 7 | [project.urls] 8 | Repository = "https://github.com/ShmuelRonen/ComfyUI-ImageMotionGuider" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "" 13 | DisplayName = "ComfyUI-ImageMotionGuider" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | jobs: 12 | publish-node: 13 | name: Publish Custom Node to registry 14 | runs-on: ubuntu-latest 15 | # if this is a forked repository. Skipping the workflow. 16 | if: github.event.repository.fork == false 17 | steps: 18 | - name: Check out code 19 | uses: actions/checkout@v4 20 | - name: Publish Custom Node 21 | uses: Comfy-Org/publish-node-action@main 22 | with: 23 | ## Add your own personal access token to your Github Repository secrets and reference it here. 24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Shmuel Ronen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /nodes_images.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import nodes 3 | import folder_paths 4 | 5 | class ImageMotionGuider: 6 | @classmethod 7 | def INPUT_TYPES(s): 8 | return {"required": { 9 | "image": ("IMAGE",), 10 | "move_range_x": ("INT", {"default": 0, "min": -150, "max": 150}), 11 | "frame_num": ("INT", {"default": 10, "min": 2, "max": 150}), 12 | "zoom": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 0.5, "step": 0.05}), 13 | }} 14 | 15 | RETURN_TYPES = ("IMAGE",) 16 | FUNCTION = "guide_motion" 17 | CATEGORY = "image/animation" 18 | 19 | def get_size(self, image): 20 | image_size = image.size() 21 | return int(image_size[2]), int(image_size[1]) 22 | 23 | def guide_motion(self, image, move_range_x, frame_num, zoom): 24 | img_width, img_height = self.get_size(image) 25 | 26 | step_size = abs(move_range_x) / (frame_num - 1) if move_range_x != 0 else 0 27 | start_x = 0 if move_range_x > 0 else abs(move_range_x) 28 | 29 | # For negative motion, adjust the starting position to begin at 0,0 30 | if move_range_x < 0: 31 | start_x -= img_width 32 | 33 | batch = [] 34 | mirrored = torch.flip(image, [2]) 35 | 36 | for i in range(frame_num): 37 | x_pos = start_x + (step_size * i * (-1 if move_range_x < 0 else 1)) 38 | x_pos = int(x_pos) 39 | x_pos = x_pos % img_width if move_range_x != 0 else 0 40 | 41 | current_zoom = (i / (frame_num - 1)) * zoom if zoom > 0 else 0 42 | if current_zoom > 0: 43 | crop_width = int(img_width * (1 - current_zoom)) 44 | crop_height = int(img_height * (1 - current_zoom)) 45 | x_start = (img_width - crop_width) // 2 46 | y_start = (img_height - crop_height) // 2 47 | 48 | zoomed_original = torch.nn.functional.interpolate( 49 | image[:, y_start:y_start + crop_height, x_start:x_start + crop_width, :].permute(0, 3, 1, 2), 50 | size=(img_height, img_width), 51 | mode='bilinear' 52 | ).permute(0, 2, 3, 1) 53 | 54 | zoomed_mirror = torch.nn.functional.interpolate( 55 | mirrored[:, y_start:y_start + crop_height, x_start:x_start + crop_width, :].permute(0, 3, 1, 2), 56 | size=(img_height, img_width), 57 | mode='bilinear' 58 | ).permute(0, 2, 3, 1) 59 | else: 60 | zoomed_original = image 61 | zoomed_mirror = mirrored 62 | 63 | canvas = torch.zeros((1, img_height, img_width, image.shape[3])) 64 | 65 | remaining_width = img_width 66 | current_x = x_pos 67 | use_flipped = False 68 | 69 | while remaining_width > 0: 70 | width = min(img_width - current_x, remaining_width) 71 | current_image = zoomed_mirror if use_flipped else zoomed_original 72 | 73 | canvas[0, :, img_width - remaining_width:img_width - remaining_width + width, :] = \ 74 | current_image[0, :, current_x:current_x + width, :] 75 | 76 | remaining_width -= width 77 | current_x = 0 78 | use_flipped = not use_flipped 79 | 80 | batch.append(canvas) 81 | 82 | return (torch.cat(batch, dim=0),) 83 | 84 | NODE_CLASS_MAPPINGS = { 85 | "ImageMotionGuider": ImageMotionGuider, 86 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # PyPI configuration file 171 | .pypirc 172 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-ImageMotionGuider 2 | 3 | A custom ComfyUI node designed to create seamless motion effects from single images by integrating with Hunyuan Video through latent space manipulation. 4 | 5 | ![image](https://github.com/user-attachments/assets/c62be207-2eb3-4d85-a3b2-d8542c050c6f) 6 | 7 | 8 | https://github.com/user-attachments/assets/e6c770ed-f767-4beb-ae02-c61e9b8cfa5a 9 | 10 | 11 | ## Overview 12 | 13 | The ComfyUI-ImageMotionGuider node creates smooth motion sequences from static images through intelligent mirroring techniques. Its primary purpose is to influence AI video generation models' behavior through latent space guidance, particularly designed for Hunyuan Video integration. 14 | 15 | ## Installation 16 | 17 | Install from Custom Node Manager 18 | 19 | or 20 | 21 | ```bash 22 | cd ComfyUI/custom_nodes 23 | git clone https://github.com/ShmuelRonen/ComfyUI-ImageMotionGuider.git 24 | ``` 25 | 26 | ## Features 27 | 28 | - **Bidirectional Motion**: Horizontal movement control (-150 to 150 pixels) 29 | - **Variable Frame Generation**: Create 2 to 150 frame sequences 30 | - **Dynamic Zoom**: Optional zoom effect (0.0 to 0.5) 31 | - **Seamless Transitions**: Smart image mirroring for continuous motion 32 | - **VAE Integration**: Direct compatibility with VAE encoders 33 | - **Hunyuan Video Ready**: Designed for latent space guidance 34 | 35 | ## Parameters 36 | 37 | | Parameter | Type | Range | Default | Description | 38 | |-----------|------|--------|---------|-------------| 39 | | image | IMAGE | - | - | Input image | 40 | | move_range_x | INT | -150 to 150 | 0 | Motion range (positive = right, negative = left) | 41 | | frame_num | INT | 2 to 150 | 10 | Number of frames to generate | 42 | | zoom | FLOAT | 0.0 to 0.5 | 0.0 | Optional zoom intensity | 43 | 44 | ## Working with Hunyuan Video 45 | 46 | This node is specifically designed to enhance Hunyuan Video generation through: 47 | 48 | 1. **Motion Generation**: 49 | - Creates frame sequences with consistent directional motion 50 | - Maintains original image orientation 51 | - Ensures smooth transitions between frames 52 | 53 | 2. **Latent Space Integration**: 54 | - Compatible with VAE encoders 55 | - Provides motion guidance through latent space 56 | - Influences video generation behavior 57 | 58 | 3. **Direction Control**: 59 | - Positive values: Right-moving sequence 60 | - Negative values: Left-moving sequence 61 | - Both maintain original image orientation 62 | 63 | ## Technical Implementation 64 | 65 | ### Motion Algorithm 66 | - Preserves original image orientation 67 | - Creates mirrored copies for seamless transitions 68 | - Handles directional changes smoothly 69 | - Combines motion with optional zoom 70 | 71 | ### Core Features 72 | - PyTorch-based tensor operations 73 | - CUDA acceleration support 74 | - Efficient memory management 75 | - Smart edge handling 76 | 77 | ## Usage Tips 78 | 79 | ### Image Selection 80 | - Use images with balanced horizontal composition 81 | - Avoid prominent features at edges 82 | - Higher resolution images recommended 83 | 84 | ### Parameter Settings 85 | 86 | 1. **Motion Range (-150 to 150)**: 87 | - Start with ±50 for testing 88 | - Larger values for more dramatic motion 89 | - Consider image content when setting 90 | 91 | 2. **Frame Count (2-150)**: 92 | - 10-20 frames good for testing 93 | - Higher counts for smoother motion 94 | - Balance smoothness vs. processing time 95 | 96 | 3. **Zoom (0.0-0.5)**: 97 | - Use sparingly 98 | - Enhances motion effect 99 | - Combine with motion for dynamic results 100 | 101 | ### Hunyuan Integration 102 | 103 | 1. **Workflow Setup**: 104 | ``` 105 | [Image Loader] → [ImageMotionGuider] → [VAE Encoder] → [Hunyuan Video] 106 | ``` 107 | 108 | 2. **Best Practices**: 109 | - Adjust denoise levels in Hunyuan 110 | - Consider frame count impact 111 | - Test different motion ranges 112 | 113 | ## Requirements 114 | - ComfyUI 115 | - PyTorch 116 | - VAE encoder (for latent conversion) 117 | - Hunyuan Video (for intended usage) 118 | 119 | ## Contributing 120 | 121 | We welcome contributions: 122 | - Bug reports 123 | - Feature requests 124 | - Pull requests 125 | - Usage examples 126 | 127 | ## License 128 | 129 | MIT License 130 | 131 | ## Acknowledgments 132 | 133 | - Developed for Hunyuan Video integration 134 | - Built for the ComfyUI community 135 | - Thanks to all contributors 136 | -------------------------------------------------------------------------------- /workflow/Hunyuan Video + ImageMotionGuider.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 113, 3 | "last_link_id": 287, 4 | "nodes": [ 5 | { 6 | "id": 16, 7 | "type": "KSamplerSelect", 8 | "pos": [ 9 | 484, 10 | 751 11 | ], 12 | "size": [ 13 | 315, 14 | 58 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "SAMPLER", 23 | "type": "SAMPLER", 24 | "links": [ 25 | 19 26 | ], 27 | "shape": 3 28 | } 29 | ], 30 | "properties": { 31 | "Node name for S&R": "KSamplerSelect" 32 | }, 33 | "widgets_values": [ 34 | "euler" 35 | ] 36 | }, 37 | { 38 | "id": 26, 39 | "type": "FluxGuidance", 40 | "pos": [ 41 | 520, 42 | 100 43 | ], 44 | "size": [ 45 | 317.4000244140625, 46 | 58 47 | ], 48 | "flags": {}, 49 | "order": 11, 50 | "mode": 0, 51 | "inputs": [ 52 | { 53 | "name": "conditioning", 54 | "type": "CONDITIONING", 55 | "link": 175 56 | } 57 | ], 58 | "outputs": [ 59 | { 60 | "name": "CONDITIONING", 61 | "type": "CONDITIONING", 62 | "links": [ 63 | 129 64 | ], 65 | "slot_index": 0, 66 | "shape": 3 67 | } 68 | ], 69 | "properties": { 70 | "Node name for S&R": "FluxGuidance" 71 | }, 72 | "widgets_values": [ 73 | 6 74 | ], 75 | "color": "#233", 76 | "bgcolor": "#355" 77 | }, 78 | { 79 | "id": 22, 80 | "type": "BasicGuider", 81 | "pos": [ 82 | 600, 83 | 0 84 | ], 85 | "size": [ 86 | 222.3482666015625, 87 | 46 88 | ], 89 | "flags": {}, 90 | "order": 13, 91 | "mode": 0, 92 | "inputs": [ 93 | { 94 | "name": "model", 95 | "type": "MODEL", 96 | "link": 195, 97 | "slot_index": 0 98 | }, 99 | { 100 | "name": "conditioning", 101 | "type": "CONDITIONING", 102 | "link": 129, 103 | "slot_index": 1 104 | } 105 | ], 106 | "outputs": [ 107 | { 108 | "name": "GUIDER", 109 | "type": "GUIDER", 110 | "links": [ 111 | 30 112 | ], 113 | "slot_index": 0, 114 | "shape": 3 115 | } 116 | ], 117 | "properties": { 118 | "Node name for S&R": "BasicGuider" 119 | }, 120 | "widgets_values": [] 121 | }, 122 | { 123 | "id": 25, 124 | "type": "RandomNoise", 125 | "pos": [ 126 | 479, 127 | 618 128 | ], 129 | "size": [ 130 | 315, 131 | 82 132 | ], 133 | "flags": {}, 134 | "order": 1, 135 | "mode": 0, 136 | "inputs": [], 137 | "outputs": [ 138 | { 139 | "name": "NOISE", 140 | "type": "NOISE", 141 | "links": [ 142 | 37 143 | ], 144 | "shape": 3 145 | } 146 | ], 147 | "properties": { 148 | "Node name for S&R": "RandomNoise" 149 | }, 150 | "widgets_values": [ 151 | 231073068812961, 152 | "randomize" 153 | ], 154 | "color": "#2a363b", 155 | "bgcolor": "#3f5159" 156 | }, 157 | { 158 | "id": 67, 159 | "type": "ModelSamplingSD3", 160 | "pos": [ 161 | 360, 162 | 0 163 | ], 164 | "size": [ 165 | 210, 166 | 58 167 | ], 168 | "flags": {}, 169 | "order": 7, 170 | "mode": 0, 171 | "inputs": [ 172 | { 173 | "name": "model", 174 | "type": "MODEL", 175 | "link": 286 176 | } 177 | ], 178 | "outputs": [ 179 | { 180 | "name": "MODEL", 181 | "type": "MODEL", 182 | "links": [ 183 | 195 184 | ], 185 | "slot_index": 0 186 | } 187 | ], 188 | "properties": { 189 | "Node name for S&R": "ModelSamplingSD3" 190 | }, 191 | "widgets_values": [ 192 | 7 193 | ] 194 | }, 195 | { 196 | "id": 12, 197 | "type": "UNETLoader", 198 | "pos": [ 199 | 0, 200 | 136.59603881835938 201 | ], 202 | "size": [ 203 | 350, 204 | 82 205 | ], 206 | "flags": {}, 207 | "order": 2, 208 | "mode": 0, 209 | "inputs": [], 210 | "outputs": [ 211 | { 212 | "name": "MODEL", 213 | "type": "MODEL", 214 | "links": [ 215 | 286, 216 | 287 217 | ], 218 | "slot_index": 0, 219 | "shape": 3 220 | } 221 | ], 222 | "properties": { 223 | "Node name for S&R": "UNETLoader" 224 | }, 225 | "widgets_values": [ 226 | "hunyuan_video_720_cfgdistill_bf16.safetensors", 227 | "default" 228 | ], 229 | "color": "#223", 230 | "bgcolor": "#335" 231 | }, 232 | { 233 | "id": 73, 234 | "type": "VAEDecodeTiled", 235 | "pos": [ 236 | 1168.747802734375, 237 | 197.42025756835938 238 | ], 239 | "size": [ 240 | 210, 241 | 102 242 | ], 243 | "flags": {}, 244 | "order": 16, 245 | "mode": 0, 246 | "inputs": [ 247 | { 248 | "name": "samples", 249 | "type": "LATENT", 250 | "link": 233 251 | }, 252 | { 253 | "name": "vae", 254 | "type": "VAE", 255 | "link": 232 256 | } 257 | ], 258 | "outputs": [ 259 | { 260 | "name": "IMAGE", 261 | "type": "IMAGE", 262 | "links": [ 263 | 234 264 | ], 265 | "slot_index": 0 266 | } 267 | ], 268 | "properties": { 269 | "Node name for S&R": "VAEDecodeTiled" 270 | }, 271 | "widgets_values": [ 272 | 256, 273 | 64 274 | ] 275 | }, 276 | { 277 | "id": 11, 278 | "type": "DualCLIPLoader", 279 | "pos": [ 280 | 0, 281 | 270 282 | ], 283 | "size": [ 284 | 350, 285 | 106 286 | ], 287 | "flags": {}, 288 | "order": 3, 289 | "mode": 0, 290 | "inputs": [], 291 | "outputs": [ 292 | { 293 | "name": "CLIP", 294 | "type": "CLIP", 295 | "links": [ 296 | 205 297 | ], 298 | "slot_index": 0, 299 | "shape": 3 300 | } 301 | ], 302 | "properties": { 303 | "Node name for S&R": "DualCLIPLoader" 304 | }, 305 | "widgets_values": [ 306 | "clip_l.safetensors", 307 | "llava_llama3_fp8_scaled.safetensors", 308 | "hunyuan_video" 309 | ] 310 | }, 311 | { 312 | "id": 13, 313 | "type": "SamplerCustomAdvanced", 314 | "pos": [ 315 | 860, 316 | 200 317 | ], 318 | "size": [ 319 | 272.3617858886719, 320 | 326 321 | ], 322 | "flags": {}, 323 | "order": 15, 324 | "mode": 0, 325 | "inputs": [ 326 | { 327 | "name": "noise", 328 | "type": "NOISE", 329 | "link": 37, 330 | "slot_index": 0 331 | }, 332 | { 333 | "name": "guider", 334 | "type": "GUIDER", 335 | "link": 30, 336 | "slot_index": 1 337 | }, 338 | { 339 | "name": "sampler", 340 | "type": "SAMPLER", 341 | "link": 19, 342 | "slot_index": 2 343 | }, 344 | { 345 | "name": "sigmas", 346 | "type": "SIGMAS", 347 | "link": 20, 348 | "slot_index": 3 349 | }, 350 | { 351 | "name": "latent_image", 352 | "type": "LATENT", 353 | "link": 244, 354 | "slot_index": 4 355 | } 356 | ], 357 | "outputs": [ 358 | { 359 | "name": "output", 360 | "type": "LATENT", 361 | "links": [ 362 | 233 363 | ], 364 | "slot_index": 0, 365 | "shape": 3 366 | }, 367 | { 368 | "name": "denoised_output", 369 | "type": "LATENT", 370 | "links": null, 371 | "shape": 3 372 | } 373 | ], 374 | "properties": { 375 | "Node name for S&R": "SamplerCustomAdvanced" 376 | }, 377 | "widgets_values": [] 378 | }, 379 | { 380 | "id": 45, 381 | "type": "EmptyHunyuanLatentVideo", 382 | "pos": [ 383 | 475.540771484375, 384 | 432.673583984375 385 | ], 386 | "size": [ 387 | 315, 388 | 130 389 | ], 390 | "flags": {}, 391 | "order": 4, 392 | "mode": 0, 393 | "inputs": [], 394 | "outputs": [ 395 | { 396 | "name": "LATENT", 397 | "type": "LATENT", 398 | "links": [], 399 | "slot_index": 0 400 | } 401 | ], 402 | "properties": { 403 | "Node name for S&R": "EmptyHunyuanLatentVideo" 404 | }, 405 | "widgets_values": [ 406 | 512, 407 | 768, 408 | 89, 409 | 1 410 | ] 411 | }, 412 | { 413 | "id": 89, 414 | "type": "VAEEncode", 415 | "pos": [ 416 | 230.3067626953125, 417 | 694.9825439453125 418 | ], 419 | "size": [ 420 | 210, 421 | 46 422 | ], 423 | "flags": {}, 424 | "order": 14, 425 | "mode": 0, 426 | "inputs": [ 427 | { 428 | "name": "pixels", 429 | "type": "IMAGE", 430 | "link": 282 431 | }, 432 | { 433 | "name": "vae", 434 | "type": "VAE", 435 | "link": 240 436 | } 437 | ], 438 | "outputs": [ 439 | { 440 | "name": "LATENT", 441 | "type": "LATENT", 442 | "links": [ 443 | 244 444 | ], 445 | "slot_index": 0 446 | } 447 | ], 448 | "properties": { 449 | "Node name for S&R": "VAEEncode" 450 | }, 451 | "widgets_values": [] 452 | }, 453 | { 454 | "id": 83, 455 | "type": "VHS_VideoCombine", 456 | "pos": [ 457 | 1404.773681640625, 458 | 201.5826873779297 459 | ], 460 | "size": [ 461 | 486.56744384765625, 462 | 1023.8511962890625 463 | ], 464 | "flags": {}, 465 | "order": 17, 466 | "mode": 0, 467 | "inputs": [ 468 | { 469 | "name": "images", 470 | "type": "IMAGE", 471 | "link": 234 472 | }, 473 | { 474 | "name": "audio", 475 | "type": "AUDIO", 476 | "link": null, 477 | "shape": 7 478 | }, 479 | { 480 | "name": "meta_batch", 481 | "type": "VHS_BatchManager", 482 | "link": null, 483 | "shape": 7 484 | }, 485 | { 486 | "name": "vae", 487 | "type": "VAE", 488 | "link": null, 489 | "shape": 7 490 | } 491 | ], 492 | "outputs": [ 493 | { 494 | "name": "Filenames", 495 | "type": "VHS_FILENAMES", 496 | "links": null 497 | } 498 | ], 499 | "properties": { 500 | "Node name for S&R": "VHS_VideoCombine" 501 | }, 502 | "widgets_values": { 503 | "frame_rate": 24, 504 | "loop_count": 0, 505 | "filename_prefix": "vered\\hi", 506 | "format": "video/h264-mp4", 507 | "pix_fmt": "yuv420p", 508 | "crf": 19, 509 | "save_metadata": true, 510 | "pingpong": false, 511 | "save_output": true, 512 | "videopreview": { 513 | "hidden": false, 514 | "paused": false, 515 | "params": { 516 | "filename": "hi_00102.mp4", 517 | "subfolder": "vered", 518 | "type": "output", 519 | "format": "video/h264-mp4", 520 | "frame_rate": 24, 521 | "workflow": "HunyuanVideo_00005.png", 522 | "fullpath": "/data/app/output/HunyuanVideo_00005.mp4" 523 | }, 524 | "muted": false 525 | } 526 | } 527 | }, 528 | { 529 | "id": 10, 530 | "type": "VAELoader", 531 | "pos": [ 532 | -5.20380973815918, 533 | 457.2947082519531 534 | ], 535 | "size": [ 536 | 350, 537 | 60 538 | ], 539 | "flags": {}, 540 | "order": 5, 541 | "mode": 0, 542 | "inputs": [], 543 | "outputs": [ 544 | { 545 | "name": "VAE", 546 | "type": "VAE", 547 | "links": [ 548 | 232, 549 | 240 550 | ], 551 | "slot_index": 0, 552 | "shape": 3 553 | } 554 | ], 555 | "properties": { 556 | "Node name for S&R": "VAELoader" 557 | }, 558 | "widgets_values": [ 559 | "hunyuan_video_vae_bf16.safetensors" 560 | ] 561 | }, 562 | { 563 | "id": 91, 564 | "type": "LoadImage", 565 | "pos": [ 566 | -857.1005859375, 567 | 472.9724426269531 568 | ], 569 | "size": [ 570 | 315, 571 | 314 572 | ], 573 | "flags": {}, 574 | "order": 6, 575 | "mode": 0, 576 | "inputs": [], 577 | "outputs": [ 578 | { 579 | "name": "IMAGE", 580 | "type": "IMAGE", 581 | "links": [ 582 | 261 583 | ], 584 | "slot_index": 0 585 | }, 586 | { 587 | "name": "MASK", 588 | "type": "MASK", 589 | "links": null 590 | } 591 | ], 592 | "properties": { 593 | "Node name for S&R": "LoadImage" 594 | }, 595 | "widgets_values": [ 596 | "man dance frame 1.png", 597 | "image" 598 | ] 599 | }, 600 | { 601 | "id": 113, 602 | "type": "ImageMotionGuider", 603 | "pos": [ 604 | -434.5153503417969, 605 | 690.4210815429688 606 | ], 607 | "size": [ 608 | 315, 609 | 106 610 | ], 611 | "flags": {}, 612 | "order": 12, 613 | "mode": 0, 614 | "inputs": [ 615 | { 616 | "name": "image", 617 | "type": "IMAGE", 618 | "link": 281 619 | } 620 | ], 621 | "outputs": [ 622 | { 623 | "name": "IMAGE", 624 | "type": "IMAGE", 625 | "links": [ 626 | 282 627 | ], 628 | "slot_index": 0 629 | } 630 | ], 631 | "properties": { 632 | "Node name for S&R": "ImageMotionGuider" 633 | }, 634 | "widgets_values": [ 635 | -30, 636 | 79, 637 | 0 638 | ], 639 | "color": "#432", 640 | "bgcolor": "#653" 641 | }, 642 | { 643 | "id": 98, 644 | "type": "Image Resize", 645 | "pos": [ 646 | -433.2084045410156, 647 | 452.5023498535156 648 | ], 649 | "size": [ 650 | 315, 651 | 178 652 | ], 653 | "flags": {}, 654 | "order": 10, 655 | "mode": 0, 656 | "inputs": [ 657 | { 658 | "name": "image", 659 | "type": "IMAGE", 660 | "link": 261 661 | } 662 | ], 663 | "outputs": [ 664 | { 665 | "name": "IMAGE", 666 | "type": "IMAGE", 667 | "links": [ 668 | 281 669 | ], 670 | "slot_index": 0 671 | } 672 | ], 673 | "properties": { 674 | "Node name for S&R": "Image Resize" 675 | }, 676 | "widgets_values": [ 677 | "resize", 678 | "true", 679 | "lanczos", 680 | 0.5, 681 | 512, 682 | 768 683 | ] 684 | }, 685 | { 686 | "id": 44, 687 | "type": "CLIPTextEncode", 688 | "pos": [ 689 | 386.09552001953125, 690 | 223.65451049804688 691 | ], 692 | "size": [ 693 | 422.84503173828125, 694 | 164.31304931640625 695 | ], 696 | "flags": {}, 697 | "order": 9, 698 | "mode": 0, 699 | "inputs": [ 700 | { 701 | "name": "clip", 702 | "type": "CLIP", 703 | "link": 205 704 | } 705 | ], 706 | "outputs": [ 707 | { 708 | "name": "CONDITIONING", 709 | "type": "CONDITIONING", 710 | "links": [ 711 | 175 712 | ], 713 | "slot_index": 0 714 | } 715 | ], 716 | "title": "CLIP Text Encode (Positive Prompt)", 717 | "properties": { 718 | "Node name for S&R": "CLIPTextEncode" 719 | }, 720 | "widgets_values": [ 721 | "a Spanish man, dressed in a stylish vest and hat, vigorously dances in the heart of a lively outdoor celebration. His arms swing wide, feet tap rhythmically, and body sways to the infectious beat. Surrounded by an enthusiastic crowd, he effortlessly commands the stage, his every step radiating joy and passion. As he spins, the vibrant red canopy above sways gently, casting a warm glow over the festive scene." 722 | ], 723 | "color": "#232", 724 | "bgcolor": "#353" 725 | }, 726 | { 727 | "id": 17, 728 | "type": "BasicScheduler", 729 | "pos": [ 730 | 478, 731 | 860 732 | ], 733 | "size": [ 734 | 315, 735 | 106 736 | ], 737 | "flags": {}, 738 | "order": 8, 739 | "mode": 0, 740 | "inputs": [ 741 | { 742 | "name": "model", 743 | "type": "MODEL", 744 | "link": 287, 745 | "slot_index": 0 746 | } 747 | ], 748 | "outputs": [ 749 | { 750 | "name": "SIGMAS", 751 | "type": "SIGMAS", 752 | "links": [ 753 | 20 754 | ], 755 | "shape": 3 756 | } 757 | ], 758 | "properties": { 759 | "Node name for S&R": "BasicScheduler" 760 | }, 761 | "widgets_values": [ 762 | "simple", 763 | 20, 764 | 0.85 765 | ] 766 | } 767 | ], 768 | "links": [ 769 | [ 770 | 19, 771 | 16, 772 | 0, 773 | 13, 774 | 2, 775 | "SAMPLER" 776 | ], 777 | [ 778 | 20, 779 | 17, 780 | 0, 781 | 13, 782 | 3, 783 | "SIGMAS" 784 | ], 785 | [ 786 | 30, 787 | 22, 788 | 0, 789 | 13, 790 | 1, 791 | "GUIDER" 792 | ], 793 | [ 794 | 37, 795 | 25, 796 | 0, 797 | 13, 798 | 0, 799 | "NOISE" 800 | ], 801 | [ 802 | 129, 803 | 26, 804 | 0, 805 | 22, 806 | 1, 807 | "CONDITIONING" 808 | ], 809 | [ 810 | 175, 811 | 44, 812 | 0, 813 | 26, 814 | 0, 815 | "CONDITIONING" 816 | ], 817 | [ 818 | 195, 819 | 67, 820 | 0, 821 | 22, 822 | 0, 823 | "MODEL" 824 | ], 825 | [ 826 | 205, 827 | 11, 828 | 0, 829 | 44, 830 | 0, 831 | "CLIP" 832 | ], 833 | [ 834 | 232, 835 | 10, 836 | 0, 837 | 73, 838 | 1, 839 | "VAE" 840 | ], 841 | [ 842 | 233, 843 | 13, 844 | 0, 845 | 73, 846 | 0, 847 | "LATENT" 848 | ], 849 | [ 850 | 234, 851 | 73, 852 | 0, 853 | 83, 854 | 0, 855 | "IMAGE" 856 | ], 857 | [ 858 | 240, 859 | 10, 860 | 0, 861 | 89, 862 | 1, 863 | "VAE" 864 | ], 865 | [ 866 | 244, 867 | 89, 868 | 0, 869 | 13, 870 | 4, 871 | "LATENT" 872 | ], 873 | [ 874 | 261, 875 | 91, 876 | 0, 877 | 98, 878 | 0, 879 | "IMAGE" 880 | ], 881 | [ 882 | 281, 883 | 98, 884 | 0, 885 | 113, 886 | 0, 887 | "IMAGE" 888 | ], 889 | [ 890 | 282, 891 | 113, 892 | 0, 893 | 89, 894 | 0, 895 | "IMAGE" 896 | ], 897 | [ 898 | 286, 899 | 12, 900 | 0, 901 | 67, 902 | 0, 903 | "MODEL" 904 | ], 905 | [ 906 | 287, 907 | 12, 908 | 0, 909 | 17, 910 | 0, 911 | "MODEL" 912 | ] 913 | ], 914 | "groups": [], 915 | "config": {}, 916 | "extra": { 917 | "ds": { 918 | "scale": 0.5989500000000009, 919 | "offset": [ 920 | 913.4013573935238, 921 | 4.3243974025415355 922 | ] 923 | }, 924 | "groupNodes": {}, 925 | "VHS_latentpreview": false, 926 | "VHS_latentpreviewrate": 0 927 | }, 928 | "version": 0.4 929 | } --------------------------------------------------------------------------------