├── .gitattributes
├── .github
└── workflows
│ └── publish.yml
├── ComfyUI_Live_Prompt_Interpolation.py
├── ComfyUI_Prompt_Stack_Manager.py
├── DynamicAnimatedWeights.py
├── FX_Source_Img.py
├── FlorenceTravelHelper.py
├── MaskSequenceHelper.py
├── PromptTravelHelper.py
├── README.md
├── __init__.py
├── __pycache__
├── ComfyUI_Live_Prompt_Interpolation.cpython-311.pyc
├── ComfyUI_Prompt_Stack_Manager.cpython-311.pyc
├── DynamicAnimatedWeights.cpython-311.pyc
├── FlorenceTravelHelper.cpython-311.pyc
├── MaskSequenceHelper.cpython-311.pyc
└── PromptTravelHelper.cpython-311.pyc
├── git
├── pyproject.toml
└── workflows
├── AnimatedWeights 2.10 EFX.json
├── AnimatedWeights 2.10 EFX_tempfix.json
├── AnimatedWeights 2.101.json
├── BlipTravel_AutoWeights_1.5.json
└── Dream Zoom 1.21.json
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Set default behavior to automatically normalize line endings
2 | * text=auto
3 |
4 | # Explicitly declare text files we want to always be normalized and converted to native line endings on checkout.
5 | *.py text
6 |
7 | # Denote all files that are truly binary and should not be modified.
8 | *.jpg binary
9 | *.png binary
10 | *.gif binary
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | paths:
9 | - "pyproject.toml"
10 |
11 | jobs:
12 | publish-node:
13 | name: Publish Custom Node to registry
14 | runs-on: ubuntu-latest
15 | # if this is a forked repository. Skipping the workflow.
16 | if: github.event.repository.fork == false
17 | steps:
18 | - name: Check out code
19 | uses: actions/checkout@v4
20 | - name: Publish Custom Node
21 | uses: Comfy-Org/publish-node-action@main
22 | with:
23 | ## Add your own personal access token to your Github Repository secrets and reference it here.
24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
25 |
--------------------------------------------------------------------------------
/ComfyUI_Live_Prompt_Interpolation.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | import subprocess
8 | from time import gmtime, strftime
9 |
10 | # Live Prompt Interpolation
11 | # A new node to execute python with text interpolation and character stability check.
12 | # The resulting print() statement ends up as the output, if any.
13 | # (c) 2024 Atom, ChatGPT, and mgfxer.
14 |
15 | # Internal code to handle the interpolation and prompt logic
16 | def internal_code():
17 | return """
18 | def lerp(a: float, b: float, t: float) -> float:
19 | return (1 - t) * a + t * b
20 |
21 | def generate_interpolated_prompt(prev_prompt: str, curr_prompt: str, t: float) -> str:
22 | if t == 0:
23 | return f"{prev_prompt}"
24 | elif t == 1:
25 | return f"{curr_prompt}"
26 | else:
27 | weighted_prev = f"({prev_prompt}:{1 - t:.2f})"
28 | weighted_curr = f"({curr_prompt}:{t:.2f})"
29 | return f"{weighted_prev} {weighted_curr}"
30 |
31 | current_frame = PYTHON_CODE_BOX_SEED % (TOTAL_FRAMES + 1)
32 |
33 | # Example previous and current prompts
34 | previous_prompt = 'PREVIOUS_PROMPT'
35 | current_prompt = 'CURRENT_PROMPT'
36 |
37 | # Interpolation factor based on frame count
38 | if current_frame > TOTAL_FRAMES:
39 | current_frame = TOTAL_FRAMES
40 |
41 | t = current_frame / TOTAL_FRAMES
42 |
43 | # Generate the interpolated prompt
44 | interpolated_prompt = generate_interpolated_prompt(previous_prompt, current_prompt, t)
45 |
46 | # Print the interpolated prompt
47 | print(interpolated_prompt)
48 | """
49 |
50 | class LivePromptInterpolation:
51 | RETURN_TYPES = ("STRING", "FLOAT", "FLOAT", "INT",)
52 | RETURN_NAMES = ("text", "current_strength", "previous_strength", "toggle_state",)
53 |
54 | FUNCTION = "node_update_with_text_v3"
55 | CATEGORY = "Scripting"
56 |
57 | def __init__(self):
58 | self.previous_prompt = ""
59 | self.current_prompt = ""
60 | self.current_frame = 0
61 | self.total_frames = 100
62 | self.update_cycle = 10
63 | self.char_stability_frames = 5
64 | self.stable_char_count_frame = 0
65 | self.last_char_count = 0
66 | self.toggle_state = 0 # Initialize the toggle state
67 |
68 | @classmethod
69 | def INPUT_TYPES(cls):
70 | return {
71 | "required": {
72 | "new_prompt": ("STRING", {"default": "Enter your prompt here..."}),
73 | "seed": ("INT", {"default": 311, "step": 1, "display": "number"}),
74 | "total_frames": ("INT", {"default": 100, "step": 1, "display": "number"}),
75 | "update_cycle": ("INT", {"default": 10, "step": 1, "display": "number"}),
76 | "min_char_count": ("INT", {"default": 30, "step": 1, "display": "number"}),
77 | "char_stability_frames": ("INT", {"default": 5, "step": 1, "display": "number"})
78 | },
79 | }
80 |
81 | def node_update_with_text_v3(self, new_prompt, seed, total_frames, update_cycle, min_char_count, char_stability_frames):
82 | # Check if the new prompt is significant
83 | def is_significant_change(prev_prompt, new_prompt, min_char_count):
84 | char_count = len(new_prompt)
85 | return char_count >= min_char_count
86 |
87 | # Debug logging
88 | print(f"Current Frame: {self.current_frame}")
89 | print(f"Previous Prompt: {self.previous_prompt}")
90 | print(f"Current Prompt: {self.current_prompt}")
91 | print(f"New Prompt: {new_prompt}")
92 | print(f"Update Cycle: {update_cycle}")
93 | print(f"Character Stability Frames: {char_stability_frames}")
94 |
95 | self.update_cycle = update_cycle # Update cycle dynamically
96 | self.char_stability_frames = char_stability_frames # Update char stability frames dynamically
97 |
98 | # Check if character count has been stable
99 | if len(new_prompt) == self.last_char_count:
100 | self.stable_char_count_frame += 1
101 | else:
102 | self.stable_char_count_frame = 0
103 | self.last_char_count = len(new_prompt)
104 |
105 | # Check if it's time to update the prompt
106 | if (self.stable_char_count_frame >= self.char_stability_frames and
107 | new_prompt != self.current_prompt and
108 | is_significant_change(self.current_prompt, new_prompt, min_char_count)):
109 |
110 | self.previous_prompt = self.current_prompt
111 | self.current_prompt = new_prompt
112 | self.current_frame = 1 # Reset frame count for new interpolation
113 | self.stable_char_count_frame = 0 # Reset stability counter
114 | self.toggle_state = 1 - self.toggle_state # Toggle the state between 0 and 1
115 | print("New prompt detected, resetting frames and toggling state.")
116 | elif self.current_frame < self.total_frames:
117 | self.current_frame += 1
118 | print("Update cycle not reached yet or prompt not stable long enough.")
119 |
120 | # Ensure current_frame doesn't exceed total_frames
121 | if self.current_frame > self.total_frames:
122 | self.current_frame = self.total_frames
123 |
124 | # Update total_frames dynamically
125 | self.total_frames = total_frames
126 |
127 | # Interpolation calculation
128 | t = self.current_frame / self.total_frames
129 | current_strength = t if self.toggle_state == 0 else 1 - t
130 | previous_strength = 1 - current_strength
131 |
132 | # Modify the code to include the previous and current prompts
133 | modified_code = internal_code().replace('PREVIOUS_PROMPT', self.previous_prompt).replace('CURRENT_PROMPT', self.current_prompt).replace('TOTAL_FRAMES', str(self.total_frames))
134 | code = f"import random; random.seed({seed}); PYTHON_CODE_BOX_SEED={self.current_frame}; {modified_code}"
135 |
136 | try:
137 | proc = subprocess.Popen(["python", "-c", code], stdout=subprocess.PIPE)
138 | code_result = proc.communicate(timeout=10)[0] # Add timeout here
139 | # Fix up result.
140 | convert_result = code_result.decode().strip()
141 | except subprocess.TimeoutExpired:
142 | proc.kill()
143 | print("Subprocess timed out and was killed.")
144 | convert_result = ""
145 | except Exception as e:
146 | print(f"Error during code execution: {e}")
147 | convert_result = ""
148 |
149 | t = strftime("%m-%d-%Y %H:%M:%S", gmtime())
150 | print(f"\033[36m[{t}] PCBv3--> {convert_result}\033[0m")
151 |
152 | return (convert_result, current_strength, previous_strength, self.toggle_state,)
153 |
154 | @classmethod
155 | def IS_CHANGED(cls, new_prompt, seed):
156 | return True
157 |
158 | # NOTE: names should be globally unique
159 | NODE_CLASS_MAPPINGS = {"LivePromptInterpolation": LivePromptInterpolation}
160 |
161 | # A dictionary that contains the friendly/humanly readable titles for the nodes
162 | NODE_DISPLAY_NAME_MAPPINGS = {"LivePromptInterpolation": "Live Prompt Interpolation"}
163 |
--------------------------------------------------------------------------------
/ComfyUI_Prompt_Stack_Manager.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | class PromptStackManager:
8 | def __init__(self):
9 | self.prompts = []
10 | self.current_index = 0
11 |
12 | @classmethod
13 | def INPUT_TYPES(cls):
14 | return {
15 | "required": {
16 | "prompt_stack": ("STRING", {"multiline": True, "dynamicPrompts": False, "default": "Prompt 1\nPrompt 2\nPrompt 3"}),
17 | "frames_per_prompt": ("INT", {"default": 1, "min": 1, "max": 1000, "step": 1}),
18 | "seed": ("INT", {"default": 0, "min": 0, "max": 1000000, "step": 1}),
19 | }
20 | }
21 |
22 | RETURN_TYPES = ("STRING", "STRING", "INT")
23 | RETURN_NAMES = ("current_prompt", "previous_prompt", "current_frame")
24 | FUNCTION = "manage_prompts"
25 | CATEGORY = "Prompt Management"
26 | DESCRIPTION = """
27 | Manages a stack of prompts from a multiline text box and outputs the current and previous prompts.
28 | The node cycles through the prompts based on the frame count derived from the seed input.
29 | """
30 |
31 | def manage_prompts(self, prompt_stack, frames_per_prompt, seed):
32 | self.prompts = prompt_stack.strip().split("\n")
33 | num_prompts = len(self.prompts)
34 |
35 | if num_prompts == 0:
36 | return ("", "", 0)
37 |
38 | # Calculate the current frame based on the seed
39 | current_frame = seed % (frames_per_prompt * num_prompts)
40 |
41 | # Determine the current and previous prompt indices
42 | self.current_index = current_frame // frames_per_prompt
43 | previous_index = (self.current_index - 1) if self.current_index > 0 else (num_prompts - 1)
44 |
45 | current_prompt = self.prompts[self.current_index]
46 | previous_prompt = self.prompts[previous_index] if self.current_index > 0 else current_prompt
47 |
48 | return (current_prompt, previous_prompt, current_frame)
49 |
50 | @classmethod
51 | def IS_CHANGED(cls, prompt_stack, frames_per_prompt, seed):
52 | return True
53 |
54 | NODE_CLASS_MAPPINGS = {"PromptStackManager": PromptStackManager}
55 | NODE_DISPLAY_NAME_MAPPINGS = {"PromptStackManager": "Prompt Stack Manager"}
56 |
--------------------------------------------------------------------------------
/DynamicAnimatedWeights.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | import os
8 | from PIL import Image, ImageDraw, ImageFilter, ImageOps, ImageChops, ImageEnhance
9 | import numpy as np
10 | import torch
11 | import random
12 |
13 | class DynamicAnimatedWeightsHelper:
14 | def __init__(self):
15 | pass
16 |
17 | @classmethod
18 | def INPUT_TYPES(cls):
19 | base_animation_types = ["LeftToRight", "RightToLeft", "TopDown", "BottomToTop", "GrowingCircle", "ShrinkingCircle",
20 | "DiagonalTopLeft-BottomRight", "DiagonalBottomRight-TopLeft", "DiagonalTopRight-BottomLeft",
21 | "DiagonalBottomLeft-TopRight", "Fade", "SqSpinCw", "SqSpinCCW", "VenetianBlindsHorizontal",
22 | "VenetianBlindsVertical", "DiagonalVenetianBlinds1", "DiagonalVenetianBlinds2"]
23 | animation_types = base_animation_types + ["Random", "RandomNoVenetian"]
24 | easing_options = ["ease_in", "ease_out", "ease_in_out", "false"]
25 | generation_options = ["Only Transitions", "Generate QR", "Generate Edge-FX", "Generate All"]
26 | return {
27 | "required": {
28 | "animation_type_1": (animation_types, {"default": cls.random_animation()}),
29 | "animation_type_2": (animation_types, {"default": cls.random_animation()}),
30 | "animation_type_3": (animation_types, {"default": cls.random_animation()}),
31 | "animation_type_4": (animation_types, {"default": cls.random_animation()}),
32 | "animation_type_5": (animation_types, {"default": cls.random_animation()}),
33 | "animation_type_6": (animation_types, {"default": cls.random_animation()}),
34 | "animation_type_7": (animation_types, {"default": cls.random_animation()}),
35 | "animation_type_8": (animation_types, {"default": cls.random_animation()}),
36 | "animation_type_9": (animation_types, {"default": cls.random_animation()}),
37 | "animation_type_10": (animation_types, {"default": cls.random_animation()}),
38 | "animation_type_11": (animation_types, {"default": cls.random_animation()}),
39 | "animation_type_12": (animation_types, {"default": cls.random_animation()}),
40 | "transition_easing": (easing_options, {"default": "false"}),
41 | "blur_easing": (easing_options, {"default": "false"}),
42 | "frame_width": ("INT", {"default": 512, "min": 1, "step": 1, "display": "number"}),
43 | "frame_height": ("INT", {"default": 512, "min": 1, "step": 1, "display": "number"}),
44 | "hold_frames": ("INT", {"default": 8, "min": 1, "step": 1, "display": "number"}),
45 | "transition_frames": ("INT", {"default": 20, "min": 1, "step": 1, "display": "number"}),
46 | "padding_frames": ("INT", {"default": 6, "min": 0, "step": 1, "display": "number"}),
47 | "input_frames": ("INT", {"default": 5, "min": 1, "step": 1, "display": "number"}),
48 | "gaussian_blur_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.1, "display": "slider"}),
49 | "edge_fx_thickness": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1, "display": "number"}),
50 | "push_fx": ("INT", {"default": 0, "min": 0, "max": 30, "step": 1, "display": "number"}),
51 | "retract_fx": ("INT", {"default": 0, "min": 0, "max": 30, "step": 1, "display": "number"}),
52 | "fx_cull_white_frames": ("FLOAT", {"default": 10.0, "min": 0.0, "max": 100.0, "step": 0.1, "display": "slider"}),
53 | "qr_greyness": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01, "display": "slider"}),
54 | "random_seed": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.1, "display": "slider"}),
55 | "edgeFade_contrast": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 6.0, "step": 0.1, "display": "slider"}), # Increased contrast effectiveness
56 | "edgeFade_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.1, "display": "slider"}), # Final blur adjustment for Edge FX Fade
57 | "generation_mode": (generation_options, {"default": "Only Transitions"}),
58 | "edge_fx_fade_balance": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 0.9, "step": 0.05, "display": "slider"}),
59 | "venetian_bars": ("INT", {"default": 4, "min": 1, "step": 1, "display": "number"})
60 | }
61 | }
62 |
63 | @classmethod
64 | def random_animation(cls, exclude_venetian=False):
65 | venetian_types = ["VenetianBlindsHorizontal", "VenetianBlindsVertical", "DiagonalVenetianBlinds1", "DiagonalVenetianBlinds2"]
66 | base_animation_types = ["LeftToRight", "RightToLeft", "TopDown", "BottomToTop", "GrowingCircle", "ShrinkingCircle",
67 | "DiagonalTopLeft-BottomRight", "DiagonalBottomRight-TopLeft", "DiagonalTopRight-BottomLeft",
68 | "DiagonalBottomLeft-TopRight", "Fade", "SqSpinCw", "SqSpinCCW", "VenetianBlindsHorizontal",
69 | "VenetianBlindsVertical", "DiagonalVenetianBlinds1", "DiagonalVenetianBlinds2"]
70 | available_types = base_animation_types
71 | if exclude_venetian:
72 | available_types = [t for t in available_types if t not in venetian_types]
73 | return random.choice(available_types)
74 |
75 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "INT")
76 | RETURN_NAMES = ("transitions", "qr_mode", "edge_fx", "edge_fx_fade", "total_frames")
77 | FUNCTION = "run"
78 | CATEGORY = "mgfxer"
79 |
80 | def run(self, **kwargs):
81 | animation_types = [kwargs[f'animation_type_{i}'] for i in range(1, 13)]
82 | frame_width = kwargs['frame_width']
83 | frame_height = kwargs['frame_height']
84 | hold_frames = kwargs['hold_frames']
85 | transition_frames = kwargs['transition_frames']
86 | padding_frames = kwargs['padding_frames']
87 | input_frames = kwargs['input_frames']
88 | gaussian_blur_amount = kwargs['gaussian_blur_amount'] * 20 # Amplify the blur amount here
89 | edgeFade_blur = kwargs['edgeFade_blur'] * 10 # Amplify the final blur amount by 10
90 | edgeFade_contrast = kwargs['edgeFade_contrast']
91 | transition_easing = kwargs['transition_easing']
92 | blur_easing = kwargs['blur_easing']
93 | qr_greyness = kwargs['qr_greyness']
94 | random_seed = int(kwargs['random_seed'] * 10)
95 | edge_fx_thickness = kwargs['edge_fx_thickness']
96 | push_fx = kwargs['push_fx']
97 | retract_fx = kwargs['retract_fx']
98 | fx_cull_white_frames = kwargs['fx_cull_white_frames']
99 | generation_mode = kwargs['generation_mode']
100 | edge_fx_fade_balance = kwargs['edge_fx_fade_balance']
101 | venetian_bars = kwargs['venetian_bars']
102 |
103 | random.seed(random_seed)
104 |
105 | images = []
106 | qr_images = []
107 | edge_fx_frames = []
108 | edge_fx_fade_frames = []
109 | total_frames = 0
110 | for i in range(input_frames):
111 | animation_type = animation_types[i % 12]
112 | if animation_type == "Random":
113 | animation_type = self.random_animation()
114 | elif animation_type == "RandomNoVenetian":
115 | animation_type = self.random_animation(exclude_venetian=True)
116 |
117 | frames, frame_count = self.generate_animation(animation_type, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, venetian_bars)
118 |
119 | if generation_mode in ["Generate QR", "Generate All"]:
120 | qr_frames = self.apply_qr_mode(frames, i, input_frames, hold_frames + transition_frames)
121 | qr_images.extend(qr_frames)
122 |
123 | images.extend(frames)
124 | total_frames += frame_count
125 |
126 | final_padding_color_qr = 'black' if input_frames % 2 == 0 else 'white'
127 | for _ in range(padding_frames):
128 | frame = self.create_frame(frame_width, frame_height, is_black=False)
129 | images.append(frame)
130 | if generation_mode in ["Generate QR", "Generate All"]:
131 | qr_frame = self.create_frame(frame_width, frame_height, is_black=(final_padding_color_qr == 'black'))
132 | qr_images.append(qr_frame)
133 | total_frames += 1
134 |
135 | if generation_mode in ["Generate QR", "Generate All"]:
136 | qr_images = self.apply_qr_greyness(qr_images, qr_greyness, frame_width, frame_height)
137 |
138 | if generation_mode in ["Generate Edge-FX", "Generate All"]:
139 | edge_fx_frames = self.generate_edge_fx(images, edge_fx_thickness, frame_width, frame_height, fx_cull_white_frames)
140 | edge_fx_frames = self.apply_push_retract_fx(edge_fx_frames, push_fx, retract_fx, frame_width, frame_height)
141 | edge_fx_frames = self.check_and_correct_white_frames(edge_fx_frames, frame_width, frame_height, fx_cull_white_frames)
142 |
143 | if generation_mode == "Generate All":
144 | unblurred_edge_fx = edge_fx_frames.copy()
145 | final_edge_fx = self.composite_edge_fx(unblurred_edge_fx, edge_fx_frames)
146 | edge_fx_fade_frames = self.apply_fade_to_edge_fx(final_edge_fx, transition_frames, hold_frames, edge_fx_fade_balance)
147 | edge_fx_fade_frames = self.apply_contrast_to_frames(edge_fx_fade_frames, edgeFade_contrast)
148 | edge_fx_fade_frames = self.apply_blur_to_frames(edge_fx_fade_frames, edgeFade_blur)
149 |
150 | image_batch = torch.cat([self.process_image_for_output(frame) for frame in images], dim=0)
151 | qr_image_batch = torch.cat([self.process_image_for_output(frame) for frame in (qr_images if qr_images else images)], dim=0)
152 | edge_fx_batch = torch.cat([self.process_image_for_output(frame) for frame in (edge_fx_frames if edge_fx_frames else images)], dim=0)
153 | edge_fx_fade_batch = torch.cat([self.process_image_for_output(frame) for frame in (edge_fx_fade_frames if edge_fx_fade_frames else images)], dim=0)
154 |
155 | return (image_batch, qr_image_batch, edge_fx_batch, edge_fx_fade_batch, total_frames)
156 |
157 | def generate_animation(self, animation_type, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, venetian_bars):
158 | if animation_type == "LeftToRight":
159 | return self.generate_left_to_right_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
160 | elif animation_type == "RightToLeft":
161 | return self.generate_rotated_animation(self.generate_left_to_right_animation, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, 180)
162 | elif animation_type == "TopDown":
163 | return self.generate_rotated_animation(self.generate_left_to_right_animation, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, 90, resize=True)
164 | elif animation_type == "BottomToTop":
165 | return self.generate_rotated_animation(self.generate_left_to_right_animation, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, 270, resize=True)
166 | elif animation_type == "GrowingCircle":
167 | return self.generate_growing_circle_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
168 | elif animation_type == "ShrinkingCircle":
169 | return self.generate_shrinking_circle_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
170 | elif animation_type in ["DiagonalTopLeft-BottomRight", "DiagonalBottomRight-TopLeft", "DiagonalTopRight-BottomLeft", "DiagonalBottomLeft-TopRight"]:
171 | return self.generate_diagonal_animation(animation_type, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
172 | elif animation_type == "Fade":
173 | return self.generate_fade_animation(frame_width, frame_height, hold_frames, transition_frames, transition_easing, blur_easing)
174 | elif animation_type == "SqSpinCw":
175 | return self.generate_sq_spin_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, clockwise=True)
176 | elif animation_type == "SqSpinCCW":
177 | return self.generate_sq_spin_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, clockwise=False)
178 | elif animation_type == "VenetianBlindsHorizontal":
179 | return self.generate_venetian_blinds_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, vertical=False, venetian_bars=venetian_bars)
180 | elif animation_type == "VenetianBlindsVertical":
181 | return self.generate_venetian_blinds_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, vertical=True, venetian_bars=venetian_bars)
182 | elif animation_type == "DiagonalVenetianBlinds1":
183 | return self.generate_diagonal_venetian_blinds_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, angle=45, venetian_bars=venetian_bars)
184 | elif animation_type == "DiagonalVenetianBlinds2":
185 | return self.generate_diagonal_venetian_blinds_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, angle=135, venetian_bars=venetian_bars)
186 |
187 | def generate_edge_fx(self, frames, offset, frame_width, frame_height, fx_cull_white_frames):
188 | edge_fx_frames = []
189 | for i in range(len(frames)):
190 | current_frame = frames[i]
191 | if i + offset < len(frames):
192 | next_frame = frames[i + offset]
193 | else:
194 | next_frame = Image.new('RGB', (frame_width, frame_height), color='white')
195 |
196 | mask = ImageChops.difference(current_frame, next_frame)
197 | mask = mask.convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
198 |
199 | edge_fx_frame = Image.new('RGB', (frame_width, frame_height), color='black')
200 | edge_fx_frame.paste(Image.new('RGB', (frame_width, frame_height), color='white'), mask=mask)
201 |
202 | if ImageChops.invert(edge_fx_frame).getbbox() is None:
203 | edge_fx_frame = Image.new('RGB', (frame_width, frame_height), color='black')
204 |
205 | edge_fx_frames.append(edge_fx_frame)
206 |
207 | edge_fx_frames = self.check_and_correct_white_frames(edge_fx_frames, frame_width, frame_height, fx_cull_white_frames)
208 |
209 | return edge_fx_frames
210 |
211 | def check_and_correct_white_frames(self, frames, frame_width, frame_height, fx_cull_white_frames):
212 | corrected_frames = []
213 | for frame in frames:
214 | white_pixels = np.sum(np.array(frame) == 255)
215 | total_pixels = frame_width * frame_height * 3
216 | white_percentage = (white_pixels / total_pixels) * 100
217 |
218 | if white_percentage > fx_cull_white_frames:
219 | corrected_frame = Image.new('RGB', (frame_width, frame_height), color='black')
220 | else:
221 | corrected_frame = frame
222 | corrected_frames.append(corrected_frame)
223 | return corrected_frames
224 |
225 | def apply_push_retract_fx(self, frames, push_fx, retract_fx, frame_width, frame_height):
226 | black_frame = Image.new('RGB', (frame_width, frame_height), color='black')
227 |
228 | if push_fx > 0:
229 | frames = [black_frame] * push_fx + frames[:-push_fx]
230 | elif retract_fx > 0:
231 | frames = frames[retract_fx:] + [black_frame] * retract_fx
232 |
233 | return frames
234 |
235 | def apply_qr_greyness(self, frames, qr_greyness, frame_width, frame_height):
236 | grey_image_white = Image.new('RGB', (frame_width, frame_height), color=(255, 255, 255))
237 | grey_image_black = Image.new('RGB', (frame_width, frame_height), color=(235, 235, 235))
238 | grey_blend_white = lambda img: Image.blend(img, grey_image_white, qr_greyness)
239 | grey_blend_black = lambda img: Image.blend(img, grey_image_black, qr_greyness)
240 | frames = [grey_blend_black(grey_blend_white(frame)) for frame in frames]
241 | return frames
242 |
243 | def generate_diagonal_venetian_blinds_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, angle, venetian_bars):
244 | frames, _ = self.generate_venetian_blinds_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, vertical=True, venetian_bars=venetian_bars)
245 |
246 | enlarged_frames = []
247 | for frame in frames:
248 | large_canvas = Image.new('RGB', (frame_width * 2, frame_height * 2), 'black')
249 | enlarged_frame = frame.resize((int(frame_width * 1.5), int(frame_height * 1.5)), Image.LANCZOS)
250 | large_canvas.paste(enlarged_frame, ((large_canvas.width - enlarged_frame.width) // 2, (large_canvas.height - enlarged_frame.height) // 2))
251 |
252 | rotated_frame = large_canvas.rotate(angle, expand=True)
253 |
254 | cropped_frame = rotated_frame.crop(((rotated_frame.width - frame_width) // 2, (rotated_frame.height - frame_height) // 2,
255 | (rotated_frame.width + frame_width) // 2, (rotated_frame.height + frame_height) // 2))
256 |
257 | enlarged_frames.append(cropped_frame)
258 |
259 | total_frames = hold_frames + transition_frames
260 | return enlarged_frames, total_frames
261 |
262 | def generate_venetian_blinds_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, vertical=True, venetian_bars=4):
263 | images = []
264 | bar_size = frame_height // venetian_bars if vertical else frame_width // venetian_bars
265 |
266 | for _ in range(hold_frames):
267 | frame = self.create_frame(frame_width, frame_height, is_black=True)
268 | images.append(frame)
269 |
270 | for i in range(transition_frames):
271 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
272 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, gaussian_blur_amount)
273 |
274 | frame = self.create_venetian_blinds_transition_frame(frame_width, frame_height, venetian_bars, bar_size, ease_factor, blur_factor['gaussian'], vertical)
275 | images.append(frame)
276 |
277 | total_frames = hold_frames + transition_frames
278 | return images, total_frames
279 |
280 | def create_venetian_blinds_transition_frame(self, frame_width, frame_height, num_bars, bar_size, ease_factor, gaussian_blur_amount, vertical=True):
281 | frame = Image.new('RGB', (frame_width, frame_height), color='black')
282 | draw = ImageDraw.Draw(frame)
283 |
284 | for j in range(num_bars):
285 | if vertical:
286 | left = j * bar_size
287 | right = left + int(bar_size * ease_factor)
288 | draw.rectangle([left, 0, right, frame_height], fill='white')
289 | else:
290 | top = j * bar_size
291 | bottom = top + int(bar_size * ease_factor)
292 | draw.rectangle([0, top, frame_width, bottom], fill='white')
293 |
294 | if gaussian_blur_amount > 0:
295 | frame = frame.filter(ImageFilter.GaussianBlur(gaussian_blur_amount))
296 |
297 | return frame
298 |
299 | def generate_sq_spin_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, clockwise=True):
300 | images = []
301 | initial_square_size = 2
302 | max_square_size = int(frame_width * 1.65)
303 | canvas_size = int(frame_width * 2.5)
304 |
305 | for _ in range(hold_frames):
306 | frame = self.create_frame(frame_width, frame_height, is_black=True)
307 | images.append(frame)
308 |
309 | for i in range(transition_frames):
310 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
311 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, gaussian_blur_amount)
312 |
313 | square_size = initial_square_size + int((max_square_size - initial_square_size) * ease_factor)
314 | rotation_angle = (235 * ease_factor) if clockwise else (-235 * ease_factor)
315 |
316 | frame = self.create_sq_spin_transition_frame(canvas_size, frame_width, frame_height, square_size, rotation_angle, blur_factor['gaussian'])
317 | images.append(frame)
318 |
319 | total_frames = hold_frames + transition_frames
320 | return images, total_frames
321 |
322 | def create_sq_spin_transition_frame(self, canvas_size, frame_width, frame_height, square_size, rotation_angle, gaussian_blur_amount):
323 | frame = Image.new('RGB', (canvas_size, canvas_size), color='black')
324 | draw = ImageDraw.Draw(frame)
325 |
326 | top_left = ((canvas_size - square_size) // 2, (canvas_size - square_size) // 2)
327 | bottom_right = (top_left[0] + square_size, top_left[1] + square_size)
328 | draw.rectangle([top_left, bottom_right], fill='white')
329 |
330 | frame = frame.rotate(rotation_angle, expand=True)
331 |
332 | left = (frame.width - frame_width) // 2
333 | top = (frame.height - frame_height) // 2
334 | frame = frame.crop((left, top, left + frame_width, top + frame_height))
335 |
336 | if gaussian_blur_amount > 0:
337 | frame = frame.filter(ImageFilter.GaussianBlur(gaussian_blur_amount))
338 |
339 | return frame
340 |
341 | def generate_rotated_animation(self, animation_func, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing, rotation_angle, resize=False):
342 | images, total_frames = animation_func(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
343 | rotated_images = [img.rotate(rotation_angle, expand=True) for img in images]
344 | if resize:
345 | rotated_images = [img.resize((frame_width, frame_height), Image.LANCZOS) for img in rotated_images]
346 | return rotated_images, total_frames
347 |
348 | def generate_left_to_right_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing):
349 | images = []
350 |
351 | for _ in range(hold_frames):
352 | frame = self.create_frame(frame_width, frame_height, is_black=True)
353 | images.append(frame)
354 |
355 | for i in range(transition_frames):
356 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
357 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, gaussian_blur_amount)
358 | frame = self.create_left_to_right_transition_frame(frame_width, frame_height, blur_factor['gaussian'], transition_frames, frame_index=i, ease_factor=ease_factor)
359 | images.append(frame)
360 |
361 | total_frames = hold_frames + transition_frames
362 | return images, total_frames
363 |
364 | def generate_growing_circle_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing):
365 | images = []
366 | circle_size = int(frame_width * 1.5)
367 |
368 | for _ in range(hold_frames):
369 | frame = self.create_frame(frame_width, frame_height, is_black=True)
370 | images.append(frame)
371 |
372 | for i in range(transition_frames):
373 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
374 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, gaussian_blur_amount)
375 | frame = self.create_growing_circle_transition_frame(frame_width, frame_height, circle_size, blur_factor['gaussian'], transition_frames, frame_index=i, ease_factor=ease_factor)
376 | images.append(frame)
377 |
378 | total_frames = hold_frames + transition_frames
379 | return images, total_frames
380 |
381 | def generate_shrinking_circle_animation(self, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing):
382 | images = []
383 | circle_size = int(frame_width * 1.5)
384 |
385 | for _ in range(hold_frames):
386 | frame = self.create_frame(frame_width, frame_height, is_black=True)
387 | images.append(frame)
388 |
389 | for i in range(transition_frames):
390 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
391 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, gaussian_blur_amount)
392 | frame = self.create_shrinking_circle_transition_frame(frame_width, frame_height, circle_size, blur_factor['gaussian'], transition_frames, frame_index=i, ease_factor=ease_factor)
393 | images.append(frame)
394 |
395 | total_frames = hold_frames + transition_frames
396 | return images, total_frames
397 |
398 | def generate_diagonal_animation(self, animation_type, frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing):
399 | frames, _ = self.generate_left_to_right_animation(frame_width, frame_height, hold_frames, transition_frames, gaussian_blur_amount, transition_easing, blur_easing)
400 |
401 | enlarged_frames = []
402 | for frame in frames:
403 | large_canvas = Image.new('RGB', (frame_width * 2, frame_height * 2), 'black')
404 | enlarged_frame = frame.resize((int(frame_width * 1.5), int(frame_height * 1.5)), Image.LANCZOS)
405 | large_canvas.paste(enlarged_frame, ((large_canvas.width - enlarged_frame.width) // 2, (large_canvas.height - enlarged_frame.height) // 2))
406 |
407 | if animation_type == "DiagonalTopLeft-BottomRight":
408 | rotated_frame = large_canvas.rotate(45, expand=True)
409 | elif animation_type == "DiagonalBottomRight-TopLeft":
410 | rotated_frame = large_canvas.rotate(225, expand=True)
411 | elif animation_type == "DiagonalTopRight-BottomLeft":
412 | rotated_frame = large_canvas.rotate(135, expand=True)
413 | elif animation_type == "DiagonalBottomLeft-TopRight":
414 | rotated_frame = large_canvas.rotate(315, expand=True)
415 |
416 | cropped_frame = rotated_frame.crop(((rotated_frame.width - frame_width) // 2, (rotated_frame.height - frame_height) // 2,
417 | (rotated_frame.width + frame_width) // 2, (rotated_frame.height + frame_height) // 2))
418 |
419 | enlarged_frames.append(cropped_frame)
420 |
421 | total_frames = hold_frames + transition_frames
422 | return enlarged_frames, total_frames
423 |
424 | def generate_fade_animation(self, frame_width, frame_height, hold_frames, transition_frames, transition_easing, blur_easing):
425 | images = []
426 |
427 | for _ in range(hold_frames):
428 | frame = self.create_frame(frame_width, frame_height, is_black=True)
429 | images.append(frame)
430 |
431 | for i in range(transition_frames):
432 | ease_factor = self.calculate_ease_factor(i, transition_frames, transition_easing)
433 | blur_factor = self.calculate_blur_factor(i, transition_frames, blur_easing, 0)
434 | gray_value = int(255 * (i / transition_frames) * ease_factor)
435 | frame = Image.new('RGB', (frame_width, frame_height), color=(gray_value, gray_value, gray_value))
436 | images.append(frame)
437 |
438 | total_frames = hold_frames + transition_frames
439 | return images, total_frames
440 |
441 | def create_frame(self, frame_width, frame_height, is_black=True):
442 | color = 'black' if is_black else 'white'
443 | frame = Image.new('RGB', (frame_width, frame_height), color=color)
444 | return frame
445 |
446 | def create_left_to_right_transition_frame(self, frame_width, frame_height, blur_factor, transition_frames, frame_index, ease_factor):
447 | frame = Image.new('RGB', (frame_width, frame_height), color='black')
448 | draw = ImageDraw.Draw(frame)
449 |
450 | box_width = int(frame_width * ease_factor)
451 | draw.rectangle([0, 0, box_width, frame_height], fill='white')
452 |
453 | if blur_factor > 0:
454 | frame = frame.filter(ImageFilter.GaussianBlur(blur_factor))
455 |
456 | return frame
457 |
458 | def create_growing_circle_transition_frame(self, frame_width, frame_height, circle_size, blur_factor, transition_frames, frame_index, ease_factor):
459 | frame = Image.new('RGB', (circle_size, circle_size), color='black')
460 | draw = ImageDraw.Draw(frame)
461 |
462 | max_radius = circle_size // 2
463 | radius = int(max_radius * ease_factor)
464 |
465 | draw.ellipse([(circle_size // 2 - radius, circle_size // 2 - radius),
466 | (circle_size // 2 + radius, circle_size // 2 + radius)], fill='white')
467 |
468 | frame = frame.crop((circle_size//2-frame_width//2, circle_size//2-frame_height//2, circle_size//2+frame_width//2, circle_size//2+frame_height//2))
469 |
470 | if blur_factor > 0:
471 | frame = frame.filter(ImageFilter.GaussianBlur(blur_factor))
472 |
473 | return frame
474 |
475 | def create_shrinking_circle_transition_frame(self, frame_width, frame_height, circle_size, blur_factor, transition_frames, frame_index, ease_factor):
476 | frame = Image.new('RGB', (circle_size, circle_size), color='white')
477 | draw = ImageDraw.Draw(frame)
478 |
479 | max_radius = circle_size // 2
480 | radius = int(max_radius * (1 - ease_factor))
481 |
482 | draw.ellipse([(circle_size // 2 - radius, circle_size // 2 - radius),
483 | (circle_size // 2 + radius, circle_size // 2 + radius)], fill='black')
484 |
485 | frame = frame.crop((circle_size//2-frame_width//2, circle_size//2-frame_height//2, circle_size//2+frame_width//2, circle_size//2+frame_height//2))
486 |
487 | if blur_factor > 0:
488 | frame = frame.filter(ImageFilter.GaussianBlur(blur_factor))
489 |
490 | return frame
491 |
492 | def calculate_ease_factor(self, frame_index, transition_frames, transition_easing):
493 | t = frame_index / transition_frames
494 | if transition_easing == "ease_in":
495 | return self.ease_in_quad(t)
496 | elif transition_easing == "ease_out":
497 | return self.ease_out_quad(t)
498 | elif transition_easing == "ease_in_out":
499 | return self.ease_in_out_quad(t)
500 | else: # "false" or any other value
501 | return t # Linear easing
502 |
503 | def calculate_blur_factor(self, frame_index, transition_frames, blur_easing, gaussian_blur_amount):
504 | t = frame_index / transition_frames
505 | if blur_easing == "ease_in":
506 | blur_factor = self.ease_in_quad(t)
507 | elif blur_easing == "ease_out":
508 | blur_factor = 1 - self.ease_out_quad(1 - t)
509 | elif blur_easing == "ease_in_out":
510 | blur_factor = self.ease_in_out_quad_for_blur(t)
511 | else: # "false" or any other value
512 | blur_factor = 1 # Constant blur amount for linear
513 |
514 | return {
515 | 'gaussian': gaussian_blur_amount * blur_factor
516 | }
517 |
518 | def ease_in_quad(self, t):
519 | return t * t
520 |
521 | def ease_out_quad(self, t):
522 | return t * (2 - t)
523 |
524 | def ease_in_out_quad(self, t):
525 | return 2 * t * t if t < 0.5 else -1 + (4 - 2 * t) * t
526 |
527 | def ease_in_out_quad_for_blur(self, t):
528 | if t < 0.5:
529 | return 2 * t * t
530 | else:
531 | return 1 - pow(-2 * t + 2, 2) / 2
532 |
533 | def apply_qr_mode(self, frames, cycle_index, input_frames, cycle_length):
534 | if cycle_index % 2 == 1: # Apply inversion for even cycles
535 | frames = [ImageOps.invert(frame) for frame in frames]
536 | return frames
537 |
538 | def process_image_for_output(self, image) -> torch.Tensor:
539 | i = ImageOps.exif_transpose(image)
540 | if i.mode == 'I':
541 | i = i.point(lambda i: i * (1 / 255))
542 | image = i.convert("RGB")
543 | image_np = np.array(image).astype(np.float32) / 255.0
544 | return torch.from_numpy(image_np)[None,]
545 |
546 | def apply_blur_to_frames(self, frames, gaussian_blur_amount):
547 | return [frame.filter(ImageFilter.GaussianBlur(gaussian_blur_amount)) for frame in frames]
548 |
549 | def apply_contrast_to_frames(self, frames, contrast_factor):
550 | return [ImageEnhance.Contrast(frame).enhance(contrast_factor * 2) for frame in frames] # Apply 3x contrast factor
551 |
552 | def composite_edge_fx(self, unblurred_frames, blurred_frames):
553 | final_frames = []
554 | for unblurred, blurred in zip(unblurred_frames, blurred_frames):
555 | # Create a black background
556 | final_frame = Image.new('RGB', unblurred.size, color='black')
557 |
558 | # Check if the unblurred frame has an alpha channel
559 | if unblurred.mode == 'RGBA':
560 | mask = unblurred.split()[3]
561 | else:
562 | # If no alpha channel, use the image itself as a mask
563 | mask = unblurred.convert('L')
564 |
565 | # Use unblurred frame as mask to cut out blurred frame
566 | final_frame.paste(blurred, mask=mask)
567 |
568 | final_frames.append(final_frame)
569 |
570 | return final_frames
571 |
572 | def apply_fade_to_edge_fx(self, edge_fx_frames, transition_frames, hold_frames, fade_balance):
573 | faded_frames = []
574 | frames_per_cycle = transition_frames + hold_frames
575 |
576 | # Calculate fade-up and fade-down frames, ensuring they sum to transition_frames
577 | fade_up_frames = max(1, min(transition_frames - 1, round(transition_frames * fade_balance)))
578 | fade_down_frames = transition_frames - fade_up_frames
579 |
580 | for i, frame in enumerate(edge_fx_frames):
581 | cycle_position = i % frames_per_cycle
582 |
583 | if cycle_position < fade_up_frames:
584 | # Fade in
585 | fade_factor = cycle_position / fade_up_frames
586 | elif cycle_position >= frames_per_cycle - fade_down_frames:
587 | # Fade out
588 | fade_factor = (frames_per_cycle - cycle_position) / fade_down_frames
589 | else:
590 | # Hold at full opacity
591 | fade_factor = 1.0
592 |
593 | # Apply fade effect
594 | faded_frame = Image.new('RGB', frame.size, (0, 0, 0))
595 | faded_frame = Image.blend(faded_frame, frame, fade_factor)
596 | faded_frames.append(faded_frame)
597 |
598 | return faded_frames
599 |
600 | # Mapping the class to its name for the node system
601 | NODE_CLASS_MAPPINGS = {
602 | "DynamicAnimatedWeightsHelper": DynamicAnimatedWeightsHelper
603 | }
604 |
605 | # Display name mappings for the node system
606 | NODE_DISPLAY_NAME_MAPPINGS = {
607 | "DynamicAnimatedWeightsHelper": "Dynamic Animated Weights"
608 | }
609 |
--------------------------------------------------------------------------------
/FX_Source_Img.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | import torch
8 |
9 | class EdgeFXSourceImages:
10 | def __init__(self):
11 | pass # This constructor does nothing but is required for class instantiation
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | # Define the input types for the node
16 | return {
17 | "required": {
18 | "image_stream": ("IMAGE",), # Single image input for the stream of images
19 | "num_images": ("INT", {"default": 4, "min": 1}), # Integer input for the number of images
20 | "hold_length": ("INT", {"default": 5, "min": 1}), # Integer input for hold length with default 5 and minimum 1
21 | "transition_length": ("INT", {"default": 20, "min": 1}), # Integer input for transition length with default 20 and minimum 1
22 | "padding_frames": ("INT", {"default": 0, "min": 0}), # Integer input for padding frames with default 0 and minimum 0
23 | "push": ("INT", {"default": 0, "min": 0}),
24 | "retract": ("INT", {"default": 0, "min": 0}),
25 | }
26 | }
27 |
28 | # Define the return types for the function outputs
29 | RETURN_TYPES = ("IMAGE", "IMAGE", "STRING", "STRING", "INT") # The function will return two image tensors, two strings, and an integer
30 | # Define the return names for the function outputs
31 | RETURN_NAMES = ("first_timeline", "second_timeline", "first_text_output", "second_text_output", "total_frames")
32 | # Name of the function to be executed
33 | FUNCTION = "generate_mask_definitions_v2"
34 | # Category of the node
35 | CATEGORY = "advanced"
36 |
37 | def generate_mask_definitions_v2(self, image_stream, num_images, hold_length, transition_length, padding_frames, push, retract):
38 | # Initialize lists to hold the timelines and mask definitions
39 | primary_timeline = []
40 | secondary_timeline = []
41 | primary_text_output = ""
42 | secondary_text_output = ""
43 |
44 | # Calculate the total number of frames for each image and the total number of frames for the entire animation
45 | frame_interval = hold_length + transition_length
46 | total_frame_count = num_images * frame_interval + padding_frames
47 |
48 | # Get the actual number of images provided
49 | provided_image_count = image_stream.shape[0]
50 |
51 | # Adjust hold value for the first cycle
52 | adjusted_initial_hold = hold_length - retract + push
53 |
54 | # Generate the primary timeline with adjusted initial hold value
55 | for i in range(num_images):
56 | index = i % provided_image_count
57 | current_hold_duration = adjusted_initial_hold if i == 0 else hold_length
58 | frame_interval = current_hold_duration + transition_length
59 | repeated_images = image_stream[index:index+1].repeat(frame_interval, 1, 1, 1)
60 | primary_timeline.append(repeated_images)
61 |
62 | # Concatenate the list of tensors into a single tensor
63 | primary_timeline = torch.cat(primary_timeline, dim=0)
64 |
65 | # Adjust padding length
66 | adjusted_padding_duration = padding_frames + retract - push
67 |
68 | # Generate the secondary timeline
69 | for i in range(1, num_images):
70 | index = i % provided_image_count
71 | repeated_images = image_stream[index:index+1].repeat(frame_interval, 1, 1, 1)
72 | secondary_timeline.append(repeated_images)
73 | repeated_images = image_stream[0:1].repeat(frame_interval, 1, 1, 1)
74 | secondary_timeline.append(repeated_images)
75 |
76 | # Concatenate the list of tensors into a single tensor
77 | secondary_timeline = torch.cat(secondary_timeline, dim=0)
78 |
79 | # Initialize frame number and create mask text outputs
80 | frame_counter = 0
81 | while frame_counter < num_images * frame_interval:
82 | hold_end_frame = frame_counter + hold_length - 1
83 | transition_start_frame = hold_end_frame + 1
84 | transition_end_frame = transition_start_frame + transition_length - 1
85 |
86 | primary_text_output += f"{frame_counter}:(1.0),\n"
87 | primary_text_output += f"{hold_end_frame}:(1.0),\n"
88 | primary_text_output += f"{transition_end_frame}:(0.0),\n"
89 |
90 | secondary_text_output += f"{frame_counter}:(0.0),\n"
91 | secondary_text_output += f"{hold_end_frame}:(0.0),\n"
92 | secondary_text_output += f"{transition_end_frame}:(1.0),\n"
93 |
94 | frame_counter = transition_end_frame + 1
95 |
96 | # Add padding frames if specified
97 | if adjusted_padding_duration > 0:
98 | padding_start_frame = frame_counter
99 | last_image_primary_timeline = primary_timeline[-1:]
100 | last_image_secondary_timeline = secondary_timeline[-1:]
101 |
102 | # Add padding frames to the timelines
103 | padding_images_primary = last_image_primary_timeline.repeat(adjusted_padding_duration, 1, 1, 1)
104 | primary_timeline = torch.cat((primary_timeline, padding_images_primary), dim=0)
105 |
106 | padding_images_secondary = last_image_secondary_timeline.repeat(adjusted_padding_duration, 1, 1, 1)
107 | secondary_timeline = torch.cat((secondary_timeline, padding_images_secondary), dim=0)
108 |
109 | # Append padding frames to the text outputs
110 | for i in range(adjusted_padding_duration):
111 | frame_counter = padding_start_frame + i
112 | primary_text_output += f"{frame_counter}:(0.0),\n"
113 | secondary_text_output += f"{frame_counter}:(1.0),\n" # Set to 1.0 for the secondary timeline
114 |
115 | # Ensure the text output ends correctly
116 | primary_text_output = primary_text_output.strip().rstrip(',')
117 | secondary_text_output = secondary_text_output.strip().rstrip(',')
118 |
119 | # Return the generated timelines, text outputs, and total frames
120 | return primary_timeline, secondary_timeline, primary_text_output, secondary_text_output, total_frame_count
121 |
122 | # Mapping the class to its name for the node system
123 | NODE_CLASS_MAPPINGS = {
124 | "EdgeFXSourceImages": EdgeFXSourceImages # Map the class name to the node system
125 | }
126 |
127 | # Display name mappings for the node system
128 | NODE_DISPLAY_NAME_MAPPINGS = {
129 | "EdgeFXSourceImages": "EdgeFX Source Images" # Map the display name to the node system
130 | }
131 |
--------------------------------------------------------------------------------
/FlorenceTravelHelper.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | # Florence Prompt Travel Helper
8 | # Version: 2.5
9 | import re
10 | from collections import defaultdict
11 |
12 | class FlorencePromptTravelHelper:
13 | def __init__(self):
14 | pass
15 |
16 | @classmethod
17 | def INPUT_TYPES(s):
18 | return {
19 | "required": {
20 | "bulk_text_input": ("STRING", {
21 | "multiline": True,
22 | "default": "",
23 | }),
24 | "hold_length": ("INT", {
25 | "default": 5,
26 | "min": 0
27 | }),
28 | "transition_length": ("INT", {
29 | "default": 5,
30 | "min": 0
31 | }),
32 | "end_padding_frames": ("INT", {
33 | "default": 10,
34 | "min": 0
35 | }),
36 | }
37 | }
38 |
39 | RETURN_TYPES = ("STRING",)
40 | RETURN_NAMES = ("formatted_prompts",)
41 | FUNCTION = "process_bulk_text"
42 | CATEGORY = "advanced"
43 |
44 | def process_bulk_text(self, bulk_text_input="", hold_length=5, transition_length=5, end_padding_frames=10):
45 | # Handle both string and list inputs
46 | if isinstance(bulk_text_input, list):
47 | bulk_text_input = ".,".join(bulk_text_input)
48 |
49 | if not bulk_text_input or not bulk_text_input.strip():
50 | return ("", )
51 |
52 | # Split the bulk input into individual prompts
53 | prompts = bulk_text_input.strip().split('.,')
54 | # Strip whitespace from all prompts except the last one
55 | prompts = [prompt.strip() for prompt in prompts[:-1]] + [prompts[-1].strip()]
56 |
57 | frames = []
58 | current_frame = 0
59 |
60 | # Create frames based on hold and transition lengths
61 | for prompt in prompts:
62 | prompt = prompt.replace('"', '\\"') # Escape quotes
63 | frames.append((current_frame, prompt))
64 | current_frame += hold_length
65 | frames.append((current_frame, prompt))
66 | current_frame += transition_length
67 |
68 | # Add the first prompt again at the end to create a loop
69 | if prompts:
70 | first_prompt = prompts[0].replace('"', '\\"')
71 | frames.append((current_frame, first_prompt))
72 | current_frame += end_padding_frames
73 | # Add the final piece of the travel schedule
74 | frames.append((current_frame, first_prompt))
75 |
76 | frame_dict = defaultdict(tuple)
77 | for frame, text in frames:
78 | frame_dict[frame] += (text,)
79 |
80 | formatted_prompts = []
81 | for frame in sorted(frame_dict.keys()):
82 | formatted_prompts.append(f'"{frame}": "{", ".join(frame_dict[frame])}"')
83 |
84 | return (',\n'.join(formatted_prompts), )
85 |
86 | NODE_CLASS_MAPPINGS = {
87 | "FlorencePromptTravelHelper": FlorencePromptTravelHelper
88 | }
89 |
90 | NODE_DISPLAY_NAME_MAPPINGS = {
91 | "FlorencePromptTravelHelper": "Florence Prompt Travel Helper"
92 | }
--------------------------------------------------------------------------------
/MaskSequenceHelper.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | import torch
8 |
9 | class MaskSequenceHelper:
10 | def __init__(self):
11 | pass # This constructor does nothing but is required for class instantiation
12 |
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | # Define the input types for the node
16 | return {
17 | "required": {
18 | "image_stream": ("IMAGE",), # Single image input for the stream of images
19 | "num_images": ("INT", {"default": 4, "min": 1}), # Integer input for the number of images
20 | "hold_length": ("INT", {"default": 5, "min": 1}), # Integer input for hold length with default 5 and minimum 1
21 | "transition_length": ("INT", {"default": 20, "min": 1}), # Integer input for transition length with default 20 and minimum 1
22 | "padding_frames": ("INT", {"default": 0, "min": 0}), # Integer input for padding frames with default 0 and minimum 0
23 | }
24 | }
25 |
26 | # Define the return types for the function outputs
27 | RETURN_TYPES = ("IMAGE", "IMAGE", "STRING", "STRING", "INT") # The function will return two image tensors, two strings, and an integer
28 | # Define the return names for the function outputs
29 | RETURN_NAMES = ("first_timeline", "second_timeline", "first_text_output", "second_text_output", "total_frames")
30 | # Name of the function to be executed
31 | FUNCTION = "generate_mask_definitions"
32 | # Category of the node
33 | CATEGORY = "advanced"
34 |
35 | def generate_mask_definitions(self, image_stream, num_images, hold_length, transition_length, padding_frames):
36 | # Initialize lists to hold the timelines and mask definitions
37 | first_timeline = []
38 | second_timeline = []
39 | first_text_output = ""
40 | second_text_output = ""
41 |
42 | # Calculate the total number of frames for each image and the total number of frames for the entire animation
43 | frame_distance = hold_length + transition_length
44 | total_frames = num_images * frame_distance + padding_frames
45 |
46 | # Get the actual number of images provided
47 | num_images_provided = image_stream.shape[0]
48 |
49 | # Generate the first timeline
50 | for i in range(num_images):
51 | index = i % num_images_provided
52 | repeated_images = image_stream[index:index+1].repeat(frame_distance, 1, 1, 1)
53 | first_timeline.append(repeated_images)
54 |
55 | # Concatenate the list of tensors into a single tensor
56 | first_timeline = torch.cat(first_timeline, dim=0)
57 |
58 | # Generate the second timeline
59 | for i in range(1, num_images):
60 | index = i % num_images_provided
61 | repeated_images = image_stream[index:index+1].repeat(frame_distance, 1, 1, 1)
62 | second_timeline.append(repeated_images)
63 | repeated_images = image_stream[0:1].repeat(frame_distance, 1, 1, 1)
64 | second_timeline.append(repeated_images)
65 |
66 | # Concatenate the list of tensors into a single tensor
67 | second_timeline = torch.cat(second_timeline, dim=0)
68 |
69 | # Initialize frame number and create mask text outputs
70 | frame = 0
71 | while frame < num_images * frame_distance:
72 | frame_end_hold = frame + hold_length - 1
73 | frame_start_transition = frame_end_hold + 1
74 | frame_end_transition = frame_start_transition + transition_length - 1
75 |
76 | first_text_output += f"{frame}:(1.0),\n"
77 | first_text_output += f"{frame_end_hold}:(1.0),\n"
78 | first_text_output += f"{frame_end_transition}:(0.0),\n"
79 |
80 | second_text_output += f"{frame}:(0.0),\n"
81 | second_text_output += f"{frame_end_hold}:(0.0),\n"
82 | second_text_output += f"{frame_end_transition}:(1.0),\n"
83 |
84 | frame = frame_end_transition + 1
85 |
86 | # Add padding frames if specified
87 | if padding_frames > 0:
88 | padding_start_frame = frame
89 | last_image_first_timeline = first_timeline[-1:]
90 | last_image_second_timeline = second_timeline[-1:]
91 |
92 | # Add padding frames to the timelines
93 | padding_images_first = last_image_first_timeline.repeat(padding_frames, 1, 1, 1)
94 | first_timeline = torch.cat((first_timeline, padding_images_first), dim=0)
95 |
96 | padding_images_second = last_image_second_timeline.repeat(padding_frames, 1, 1, 1)
97 | second_timeline = torch.cat((second_timeline, padding_images_second), dim=0)
98 |
99 | # Append padding frames to the text outputs
100 | for i in range(padding_frames):
101 | frame = padding_start_frame + i
102 | first_text_output += f"{frame}:(0.0),\n"
103 | second_text_output += f"{frame}:(1.0),\n" # Set to 1.0 for the second timeline
104 |
105 | # Ensure the text output ends correctly
106 | first_text_output = first_text_output.strip().rstrip(',')
107 | second_text_output = second_text_output.strip().rstrip(',')
108 |
109 | # Return the generated timelines, text outputs, and total frames
110 | return first_timeline, second_timeline, first_text_output, second_text_output, total_frames
111 |
112 | # Mapping the class to its name for the node system
113 | NODE_CLASS_MAPPINGS = {
114 | "MaskSequenceHelper": MaskSequenceHelper # Map the class name to the node system
115 | }
116 |
117 | # Display name mappings for the node system
118 | NODE_DISPLAY_NAME_MAPPINGS = {
119 | "MaskSequenceHelper": "Mask Sequence Helper" # Map the display name to the node system
120 | }
121 |
--------------------------------------------------------------------------------
/PromptTravelHelper.py:
--------------------------------------------------------------------------------
1 | """
2 | @author: mgfxer
3 | @title: FrameFX
4 | @nickname: FrameFX 💫
5 | @description: This extension provides various frame and mask sequence manipulation tools for animation workflows.
6 | """
7 | # Prompt Travel Helper
8 | # Version: 2.2
9 |
10 | import re
11 | from collections import defaultdict
12 |
13 | class promptTravelHelper:
14 | def __init__(self):
15 | pass
16 |
17 | @classmethod
18 | def INPUT_TYPES(s):
19 | return {
20 | "required": {
21 | "bulk_text_input": ("STRING", {
22 | "multiline": True,
23 | "default": "",
24 | }),
25 | "hold_length": ("INT", {
26 | "default": 5,
27 | "min": 0
28 | }),
29 | "transition_length": ("INT", {
30 | "default": 5,
31 | "min": 0
32 | }),
33 | "end_padding_frames": ("INT", {
34 | "default": 10,
35 | "min": 0
36 | }),
37 | }
38 | }
39 |
40 | RETURN_TYPES = ("STRING",)
41 | RETURN_NAMES = ("formatted_prompts",)
42 | FUNCTION = "process_bulk_text"
43 | CATEGORY = "advanced"
44 |
45 | def process_bulk_text(self, bulk_text_input="", hold_length=5, transition_length=5, end_padding_frames=10):
46 | if not bulk_text_input.strip():
47 | return ("", )
48 |
49 | # Split the bulk input into individual prompts
50 | prompts = [prompt.strip() for prompt in bulk_text_input.strip().split('\n') if prompt.strip()]
51 |
52 | frames = []
53 | current_frame = 0
54 |
55 | # Create frames based on hold and transition lengths
56 | for prompt in prompts:
57 | prompt = prompt.replace('"', '\\"') # Escape quotes
58 | frames.append((current_frame, prompt))
59 | current_frame += hold_length
60 | frames.append((current_frame, prompt))
61 | current_frame += transition_length
62 |
63 | # Add the first prompt again at the end to create a loop
64 | if prompts:
65 | first_prompt = prompts[0].replace('"', '\\"')
66 | frames.append((current_frame, first_prompt))
67 | current_frame += end_padding_frames
68 |
69 | # Add the final piece of the travel schedule
70 | frames.append((current_frame, first_prompt))
71 |
72 | frame_dict = defaultdict(tuple)
73 |
74 | for frame, text in frames:
75 | frame_dict[frame] += (text,)
76 |
77 | formatted_prompts = []
78 | for frame in sorted(frame_dict.keys()):
79 | formatted_prompts.append(f'"{frame}": "{", ".join(frame_dict[frame])}"')
80 |
81 | return (',\n'.join(formatted_prompts), )
82 |
83 | NODE_CLASS_MAPPINGS = {
84 | "PromptTravelHelper": promptTravelHelper
85 | }
86 |
87 | NODE_DISPLAY_NAME_MAPPINGS = {
88 | "PromptTravelHelper": "Prompt Travel Helper"
89 | }
90 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Dynamic Animated Weights
2 |
3 |
4 |
5 |
6 | Outputs operate on the principle Hold Frames + Transition Length + End Padding Frames. These values specify the length of the animation to be created.
7 | Many of my nodes use these concepts and work in lockstep.
8 |
9 | ### Description
10 | The Dynamic Animated Weights Helper is a versatile extension for animation workflows, providing various frame and mask sequence manipulation tools. This node facilitates the creation of complex animation effects with ease and flexibility.
11 |
12 | ## 12 Transition Slots
13 | The user can choose up to 12 separate transition animations, and these are used as a list order. If only 4 transitions are needed the first 4 will be used, if more than 12 are needed, the list of transitions will repeat in a loop in the order listed.
14 |
15 | ### Features
16 | - Supports multiple animation types including directional and diagonal transitions, circle growth/shrink, fades, square spins, and venetian blinds.
17 | - Includes options for easing, blur effects, and frame transitions.
18 | - Allows for generation modes like QR, Edge-FX, and combined outputs.
19 | - Random animation type selection with options to exclude specific types.
20 | - Customizable parameters for frame dimensions, transition and hold frames, padding, Gaussian blur, edge effects, and more.
21 |
22 | ### Input Parameters
23 | - `animation_type_1` to `animation_type_12`: Select the animation type for each sequence.
24 | - `transition_easing` and `blur_easing`: Choose the easing function for transitions and blurs.
25 | - `frame_width` and `frame_height`: Set the dimensions for each frame.
26 | - `hold_frames`, `transition_frames`, `padding_frames`, and `input_frames`: Configure the number of frames for holding, transitioning, padding, and input sequences.
27 | - `gaussian_blur_amount`: Adjust the amount of Gaussian blur applied.
28 | - `edge_fx_thickness`, `push_fx`, `retract_fx`: Parameters for edge effects.
29 | - `fx_cull_white_frames`: Set the threshold for culling white frames in edge effects.
30 | - `qr_greyness`: Adjust the greyness for QR generation mode.
31 | - `random_seed`: Seed for randomization.
32 | - `edgeFade_contrast` and `edgeFade_blur`: Parameters for edge fade effects.
33 | - `generation_mode`: Select the generation mode (Only Transitions, Generate QR, Generate Edge-FX, Generate All).
34 | - `edge_fx_fade_balance`: Balance for edge fade effects.
35 | - `venetian_bars`: Number of bars for Venetian blinds animation.
36 |
37 | ## Mask Sequence Helper
38 |
39 |
40 | The Mask Sequence Helper node provides an efficient way to generate mask sequence codes across two opposing timelines to form a slideshow effect that loops. The node also outputs your images to match the hold, transition and padding counts set. These two timelines are then either masked by the codes, or by animated weights output. This node makes it easy to manage frame transitions in animation workflows. Its customizable parameters allow for precise control over the timing and sequence of frames, making it a valuable tool for animation projects.
41 |
42 | # Prompt Travel Helper
43 |
44 |
45 |
46 |
47 | ## Description
48 | The Prompt Travel Helper node assists in transforming a stream of BLIP (Bootstrapped Language-Image Pre-training) captions into a prompt travel format. This node operates on the principles of hold, transition, and padding lengths to create a structured sequence of prompts for animation workflows.
49 |
50 | ## Features
51 | - **Stream of BLIP Captions:** Converts a bulk input of BLIP captions into a formatted sequence.
52 | - **Customizable Hold and Transition Lengths:** Define the number of frames for holding and transitioning between prompts.
53 | - **End Padding Frames:** Add padding frames to ensure smooth transitions at the end of the sequence.
54 | - **Formatted Output:** Generates a structured prompt sequence suitable for prompt-travel animation workflows.
55 |
56 | ## Input Parameters
57 | - `bulk_text_input`: A multiline string input for the bulk text of BLIP captions.
58 | - `hold_length`: Integer input for the number of frames to hold each caption (default: 5).
59 | - `transition_length`: Integer input for the number of frames to transition between captions (default: 5).
60 | - `end_padding_frames`: Integer input for the number of padding frames at the end of the sequence (default: 10).
61 |
62 | ## Return Values
63 | - `formatted_prompts`: A single string containing the formatted sequence of prompts.
64 |
65 |
66 | # Florence Prompt Travel Helper
67 |
68 |
69 | ## Description
70 | The Florence Prompt Travel Helper node assists in transforming a stream of Florence captions into a prompt travel format. This node operates on the principles of hold, transition, and padding lengths to create a structured sequence of prompts for animation workflows, similar to the BLIP Travel Helper but specifically designed for Florence captions.
71 |
72 | ## Features
73 | - **Stream of Florence Captions:** Converts a bulk input of Florence captions into a formatted sequence.
74 | - **Customizable Hold and Transition Lengths:** Define the number of frames for holding and transitioning between prompts.
75 | - **End Padding Frames:** Add padding frames to ensure smooth transitions at the end of the sequence.
76 | - **Formatted Output:** Generates a structured prompt sequence suitable for animation workflows.
77 |
78 | ## Input Parameters
79 | - `bulk_text_input`: A multiline string input for the bulk text of Florence captions.
80 | - `hold_length`: Integer input for the number of frames to hold each caption (default: 5).
81 | - `transition_length`: Integer input for the number of frames to transition between captions (default: 5).
82 | - `end_padding_frames`: Integer input for the number of padding frames at the end of the sequence (default: 10).
83 |
84 | ## Return Values
85 | - `formatted_prompts`: A single string containing the formatted sequence of prompts.
86 |
87 | # EdgeFX Source Images
88 |
89 |
90 | ## Description
91 | The EdgeFX Source Images node extends the functionality of the Mask Sequence Helper by adding push and retract features. This node allows for more dynamic control over the timing of the Edge FX animation sequence, enabling users to adjust the timeline by either pushing it forward in time or pulling it back in time, allowing you to resync the effect across the transition timeline. This is primarily useful for the 'lower ram' ipadapter option within the worflow, it's aimed at helping users with lower ram cards, who can barely run EFX. Otherwise the 2 Ipadapter solution should be used, and these push retract features are usually never needed.
92 |
93 | ## Features
94 | - **Generate Edge FX Image Sequences:** Create detailed mask sequences with customizable hold and transition lengths.
95 | - **Timeline Generation:** Produce two separate timelines of repeated images, useful for comparison or alternating effects.
96 | - **Padding Frames:** Add padding frames to ensure smooth transitions.
97 | - **Push and Retract Features:** Adjust the hold length for the first cycle of images to create dynamic effects:
98 | - **Push:** Increase the hold length for the first frame, useful for extending the visibility of the initial image.
99 | - **Retract:** Decrease the hold length for the first frame, useful for shortening the visibility of the initial image.
100 |
101 | ## Input Parameters
102 | - `image_stream`: The stream of images to be processed.
103 | - `num_images`: Number of images in the sequence.
104 | - `hold_length`: Number of frames to hold each image.
105 | - `transition_length`: Number of frames for the transition between images.
106 | - `padding_frames`: Number of padding frames to add at the end.
107 | - `push`: Increase the hold length for the first frame.
108 | - `retract`: Decrease the hold length for the first frame.
109 |
110 | ## Return Values
111 | - `first_timeline`: The first sequence of images with transitions and holds.
112 | - `second_timeline`: The second sequence of images, offset by one image from the first.
113 | - `first_text_output`: Text output for the first mask sequence.
114 | - `second_text_output`: Text output for the second mask sequence.
115 | - `total_frames`: Total number of frames generated, including padding.
116 |
117 | ## Dream Zoom Workflow
118 | (excerpt image only) The workflow file is in the workflows folder.
119 |
120 |
121 |
122 |
123 |
124 | ## Live Prompt Interpolation
125 |
126 |
127 | The Live Prompt Interpolation node is part of the Dream Zoom Workflow with auto-queue functionality in ComfyUI. It enables live interpolation of prompts on the fly, allowing for dynamic and smooth transitions between prompts. This node takes a single prompt and interpolates from the previously typed prompt over a specified number of frames, It has trigger functions that make sure the prompt is only registered after a specified number of frames and characters difference, ensuring that prompt changes are handled in real-time, yet not too soon, providing a fluid animation experience.
128 |
129 |
130 | ## Prompt Stack Manager
131 | 
132 |
133 | The Prompt Stack Manager node is designed for the Dream Zoom Workflow with auto-queue functionality in ComfyUI. It manages a stack of prompts provided in a multiline text box and cycles through them based on the frame count derived from a seed input. This node outputs the current and previous prompts, facilitating live prompt interpolation and seamless transitions between different prompts during animation workflows.
134 | This node was designed to work as a sister node for the following node:
135 |
136 |
137 | ## Attack Hold Weighted Prompt, (not inside this repo)
138 | Atomic Perception created as a collaboration effort for the Dream-zoom workflow on discord. Props to atom.p for inspiring me to get started on custom node creation. His effort on this node led to creating my own nodes. This node below is referenced here because it's part of the Dream-Zoom workflow.
139 |
140 |
141 |
142 | That node can be found here:
143 | https://github.com/AtomicPerception/ap_Nodes/tree/main
144 |
145 |
146 |
147 | ---
148 | `*All of my nodes were created with AI assistance from Chat GPT and Claude.`
149 | ---
150 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from .ComfyUI_Live_Prompt_Interpolation import LivePromptInterpolation
2 | from .ComfyUI_Prompt_Stack_Manager import PromptStackManager
3 | from .DynamicAnimatedWeights import DynamicAnimatedWeightsHelper
4 | from .FlorenceTravelHelper import FlorencePromptTravelHelper
5 | from .MaskSequenceHelper import MaskSequenceHelper
6 | from .PromptTravelHelper import promptTravelHelper
7 | from .FX_Source_Img import EdgeFXSourceImages # Correctly import the EdgeFXSourceImages class
8 |
9 | # Add any other necessary imports or initialization code here
10 |
11 | # Mapping the class to its name for the node system
12 | NODE_CLASS_MAPPINGS = {
13 | "LivePromptInterpolation": LivePromptInterpolation,
14 | "PromptStackManager": PromptStackManager,
15 | "DynamicAnimatedWeightsHelper": DynamicAnimatedWeightsHelper,
16 | "FlorencePromptTravelHelper": FlorencePromptTravelHelper,
17 | "MaskSequenceHelper": MaskSequenceHelper,
18 | "PromptTravelHelper": promptTravelHelper,
19 | "EdgeFXSourceImages": EdgeFXSourceImages # Ensure this is correctly referenced
20 | }
21 |
22 | # Display name mappings for the node system
23 | NODE_DISPLAY_NAME_MAPPINGS = {
24 | "LivePromptInterpolation": "Live Prompt Interpolation",
25 | "PromptStackManager": "Prompt Stack Manager",
26 | "DynamicAnimatedWeightsHelper": "Dynamic Animated Weights",
27 | "FlorencePromptTravelHelper": "Florence Prompt Travel Helper",
28 | "MaskSequenceHelper": "Mask Sequence Helper",
29 | "PromptTravelHelper": "Prompt Travel Helper",
30 | "EdgeFXSourceImages": "Edge FX Source Images" # Ensure this is correctly referenced
31 | }
32 |
--------------------------------------------------------------------------------
/__pycache__/ComfyUI_Live_Prompt_Interpolation.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/ComfyUI_Live_Prompt_Interpolation.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/ComfyUI_Prompt_Stack_Manager.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/ComfyUI_Prompt_Stack_Manager.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/DynamicAnimatedWeights.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/DynamicAnimatedWeights.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/FlorenceTravelHelper.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/FlorenceTravelHelper.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/MaskSequenceHelper.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/MaskSequenceHelper.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/PromptTravelHelper.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/__pycache__/PromptTravelHelper.cpython-311.pyc
--------------------------------------------------------------------------------
/git:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mgfxer/ComfyUI-FrameFX/88324ec508441861e3ebae0e35b58a7a57e40db1/git
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-framefx"
3 | description = "Nodes:Live Prompt Interpolation, Prompt Stack Manager, Dynamic Animated Weights, Florence Prompt Travel Helper, Mask Sequence Helper, Prompt Travel Helper. This extension provides various frame and mask sequence manipulation tools for animation workflows."
4 | version = "1.0.0"
5 | license = "LICENSE"
6 |
7 | [project.urls]
8 | Repository = "https://github.com/mgfxer/ComfyUI-FrameFX"
9 | # Used by Comfy Registry https://comfyregistry.org
10 |
11 | [tool.comfy]
12 | PublisherId = "mgfxer"
13 | DisplayName = "ComfyUI-FrameFX"
14 | Icon = "💫"
15 |
--------------------------------------------------------------------------------
/workflows/BlipTravel_AutoWeights_1.5.json:
--------------------------------------------------------------------------------
1 | {"last_node_id": 646, "last_link_id": 2011, "nodes": [{"id": 63, "type": "VHS_VideoCombine", "pos": [4668.206708696588, 307.9595953430793], "size": [423.09051513671875, 908.6357727050781], "flags": {}, "order": 65, "mode": 4, "inputs": [{"name": "images", "type": "IMAGE", "link": 139}, {"name": "audio", "type": "VHS_AUDIO", "link": null}, {"name": "meta_batch", "type": "VHS_BatchManager", "link": null}], "outputs": [{"name": "Filenames", "type": "VHS_FILENAMES", "links": null, "shape": 3}], "properties": {"Node name for S&R": "VHS_VideoCombine"}, "widgets_values": {"frame_rate": 20, "loop_count": 0, "filename_prefix": "BlipTravel_AW Rife", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 23, "save_metadata": false, "pingpong": false, "save_output": true, "videopreview": {"hidden": false, "paused": false, "params": {"filename": "BlipTravel_AW Rife_00015.mp4", "subfolder": "", "type": "output", "format": "video/h264-mp4"}}}, "color": "#222", "bgcolor": "#000"}, {"id": 94, "type": "RIFE VFI", "pos": [4188.206708696588, 307.9595953430793], "size": {"0": 443.4000244140625, "1": 198}, "flags": {}, "order": 64, "mode": 4, "inputs": [{"name": "frames", "type": "IMAGE", "link": 138}, {"name": "optional_interpolation_states", "type": "INTERPOLATION_STATES", "link": null}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [139], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "RIFE VFI"}, "widgets_values": ["rife47.pth", 10, 2, true, true, 1], "color": "#222", "bgcolor": "#000"}, {"id": 332, "type": "HighRes-Fix Script", "pos": [2316.9292460740203, 767.9595953430793], "size": [325, 250], "flags": {}, "order": 0, "mode": 0, "inputs": [{"name": "script", "type": "SCRIPT", "link": null}], "outputs": [{"name": "SCRIPT", "type": "SCRIPT", "links": [], "shape": 3}], "properties": {"Node name for S&R": "HighRes-Fix Script"}, "widgets_values": ["latent", "(use same)", "nearest-exact", "4x-AnimeSharp.pth", 2, true, 1105566911742777, null, 12, 0.56, 1, false, "SD1.5\\animatediff\\v3_sd15_sparsectrl_rgb.ckpt", 1, "none", false], "color": "#223", "bgcolor": "#335", "shape": 1}, {"id": 71, "type": "LatentUpscaleBy", "pos": [2806.9292460740203, 127.95959534307926], "size": {"0": 210, "1": 82}, "flags": {}, "order": 57, "mode": 0, "inputs": [{"name": "samples", "type": "LATENT", "link": 1028}], "outputs": [{"name": "LATENT", "type": "LATENT", "links": [1078], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "LatentUpscaleBy"}, "widgets_values": ["bicubic", 2], "color": "#223", "bgcolor": "#335"}, {"id": 492, "type": "Seed Generator", "pos": [2566.9292460740203, 127.95959534307926], "size": {"0": 210, "1": 82}, "flags": {}, "order": 1, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [1578, 1579], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "Seed Generator"}, "widgets_values": [987654321, "fixed"], "color": "#223", "bgcolor": "#335"}, {"id": 48, "type": "EmptyLatentImage", "pos": [2326.9292460740203, 137.95959534307926], "size": {"0": 210, "1": 74}, "flags": {}, "order": 46, "mode": 0, "inputs": [{"name": "batch_size", "type": "INT", "link": 1873, "widget": {"name": "batch_size"}}, {"name": "width", "type": "INT", "link": 365, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": 366, "widget": {"name": "height"}}], "outputs": [{"name": "LATENT", "type": "LATENT", "links": [1016], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "EmptyLatentImage"}, "widgets_values": [576, 384, 50], "color": "#223", "bgcolor": "#335"}, {"id": 322, "type": "ADE_MultivalDynamic", "pos": [150, 1070], "size": {"0": 210, "1": 58}, "flags": {}, "order": 2, "mode": 0, "inputs": [{"name": "mask_optional", "type": "MASK", "link": null}], "outputs": [{"name": "MULTIVAL", "type": "MULTIVAL", "links": [994], "shape": 3, "slot_index": 0}], "title": "Motion Scale \ud83c\udfad\ud83c\udd50\ud83c\udd53", "properties": {"Node name for S&R": "ADE_MultivalDynamic"}, "widgets_values": [1.21], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 321, "type": "ADE_LoopedUniformContextOptions", "pos": [100, 540], "size": {"0": 320, "1": 250}, "flags": {}, "order": 3, "mode": 0, "inputs": [{"name": "prev_context", "type": "CONTEXT_OPTIONS", "link": null}, {"name": "view_opts", "type": "VIEW_OPTS", "link": null}], "outputs": [{"name": "CONTEXT_OPTS", "type": "CONTEXT_OPTIONS", "links": [992], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ADE_LoopedUniformContextOptions"}, "widgets_values": [16, 1, 4, true, "flat", false, 0, 1], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 319, "type": "ADE_ApplyAnimateDiffModelSimple", "pos": [129, 324], "size": {"0": 260.3999938964844, "1": 106}, "flags": {}, "order": 31, "mode": 0, "inputs": [{"name": "motion_model", "type": "MOTION_MODEL_ADE", "link": 993, "slot_index": 0}, {"name": "motion_lora", "type": "MOTION_LORA", "link": 1077, "slot_index": 1}, {"name": "scale_multival", "type": "MULTIVAL", "link": 994, "slot_index": 2}, {"name": "effect_multival", "type": "MULTIVAL", "link": null, "slot_index": 3}, {"name": "ad_keyframes", "type": "AD_KEYFRAMES", "link": null}], "outputs": [{"name": "M_MODELS", "type": "M_MODELS", "links": [991], "shape": 3}], "properties": {"Node name for S&R": "ADE_ApplyAnimateDiffModelSimple"}, "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 117, "type": "LoadImage", "pos": [-2289.5089735243064, 512.3777774386936], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 4, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1965], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_3 (100).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 119, "type": "LoadImage", "pos": [-1959.5089735243064, 512.3777774386936], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 5, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1968], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_3 (99).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 574, "type": "JWInteger", "pos": [-2780, 1110], "size": {"0": 315, "1": 58}, "flags": {}, "order": 6, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [1967, 1970, 1973, 1976, 1979, 1982, 1985, 1988, 1991, 1994, 1997, 2000], "shape": 3, "slot_index": 0}], "title": "Crop Height", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [512], "color": "#232", "bgcolor": "#353"}, {"id": 608, "type": "CreateFadeMaskAdvanced", "pos": [-339, 1167], "size": {"0": 210, "1": 280}, "flags": {"collapsed": true}, "order": 47, "mode": 0, "inputs": [{"name": "points_string", "type": "STRING", "link": 1876, "widget": {"name": "points_string"}}, {"name": "frames", "type": "INT", "link": 1883, "widget": {"name": "frames"}}], "outputs": [{"name": "MASK", "type": "MASK", "links": [1877], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "CreateFadeMaskAdvanced"}, "widgets_values": ["0:(0.0),\n44:(0.0),\n48:(1.0),\n68:(1.0),\n72:(0.0)", false, 96, 128, 128, "ease_in_out"], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 532, "type": "CreateFadeMaskAdvanced", "pos": [-338, 1105], "size": {"0": 210, "1": 280}, "flags": {"collapsed": true}, "order": 45, "mode": 0, "inputs": [{"name": "points_string", "type": "STRING", "link": 1871, "widget": {"name": "points_string"}}, {"name": "frames", "type": "INT", "link": 1872, "widget": {"name": "frames"}}], "outputs": [{"name": "MASK", "type": "MASK", "links": [1875, 1887], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "CreateFadeMaskAdvanced"}, "widgets_values": ["0:(0.0),\n44:(0.0),\n48:(1.0),\n68:(1.0),\n72:(0.0)", false, 96, 128, 128, "ease_in_out"], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 607, "type": "MaskSequenceHelper", "pos": [-374, 696], "size": {"0": 304, "1": 138.37965393066406}, "flags": {}, "order": 43, "mode": 0, "inputs": [{"name": "image_stream", "type": "IMAGE", "link": 2009}, {"name": "num_images", "type": "INT", "link": 1861, "widget": {"name": "num_images"}}, {"name": "hold_length", "type": "INT", "link": 2003, "widget": {"name": "hold_length"}}, {"name": "transition_length", "type": "INT", "link": 2005, "widget": {"name": "transition_length"}}, {"name": "padding_frames", "type": "INT", "link": 2007, "widget": {"name": "padding_frames"}}], "outputs": [{"name": "first_timeline", "type": "IMAGE", "links": [1869, 1878], "shape": 3, "slot_index": 0}, {"name": "second_timeline", "type": "IMAGE", "links": [1870, 1879], "shape": 3, "slot_index": 1}, {"name": "first_text_output", "type": "STRING", "links": [1871], "shape": 3, "slot_index": 2}, {"name": "second_text_output", "type": "STRING", "links": [1876], "shape": 3, "slot_index": 3}, {"name": "total_frames", "type": "INT", "links": [1867, 1868, 1872, 1873, 1874, 1881, 1882, 1883], "shape": 3, "slot_index": 4}], "properties": {"Node name for S&R": "MaskSequenceHelper"}, "widgets_values": [4, 5, 20, 0], "color": "#223", "bgcolor": "#335"}, {"id": 4, "type": "VAELoader", "pos": [2516.9292460740203, -2.0404046569207384], "size": {"0": 315, "1": 58}, "flags": {}, "order": 7, "mode": 0, "outputs": [{"name": "VAE", "type": "VAE", "links": [1017], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "VAELoader"}, "widgets_values": ["vaeFtMse840000EmaPruned_vae.safetensors"], "color": "#323", "bgcolor": "#535"}, {"id": 58, "type": "CLIPTextEncode", "pos": [1826, 1009], "size": {"0": 354.7473449707031, "1": 205.9083709716797}, "flags": {}, "order": 38, "mode": 0, "inputs": [{"name": "clip", "type": "CLIP", "link": 101}], "outputs": [{"name": "CONDITIONING", "type": "CONDITIONING", "links": [1015], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "CLIPTextEncode"}, "widgets_values": ["worstquality, amateur, poorquality, poor quality."], "color": "#322", "bgcolor": "#533"}, {"id": 66, "type": "CLIPSetLastLayer", "pos": [1452, 1018], "size": {"0": 315, "1": 58}, "flags": {}, "order": 35, "mode": 0, "inputs": [{"name": "clip", "type": "CLIP", "link": 99}], "outputs": [{"name": "CLIP", "type": "CLIP", "links": [100, 101], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "CLIPSetLastLayer"}, "widgets_values": [-1], "color": "#322", "bgcolor": "#533"}, {"id": 23, "type": "VHS_VideoCombine", "pos": [3121, 304.5443758371351], "size": [450, 940], "flags": {}, "order": 59, "mode": 0, "inputs": [{"name": "images", "type": "IMAGE", "link": 2001}, {"name": "audio", "type": "VHS_AUDIO", "link": null}, {"name": "meta_batch", "type": "VHS_BatchManager", "link": null}], "outputs": [{"name": "Filenames", "type": "VHS_FILENAMES", "links": null, "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "VHS_VideoCombine"}, "widgets_values": {"frame_rate": 10, "loop_count": 0, "filename_prefix": "BlipTravel_AW", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 23, "save_metadata": true, "pingpong": false, "save_output": true, "videopreview": {"hidden": false, "paused": false, "params": {"filename": "BlipTravel_AW_00027.mp4", "subfolder": "", "type": "output", "format": "video/h264-mp4"}}}, "color": "#222", "bgcolor": "#000"}, {"id": 493, "type": "VHS_VideoCombine", "pos": [3612, 296.5443758371351], "size": [447.3279724121094, 944.9919586181641], "flags": {}, "order": 62, "mode": 0, "inputs": [{"name": "images", "type": "IMAGE", "link": 1899}, {"name": "audio", "type": "VHS_AUDIO", "link": null}, {"name": "meta_batch", "type": "VHS_BatchManager", "link": null}], "outputs": [{"name": "Filenames", "type": "VHS_FILENAMES", "links": null, "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "VHS_VideoCombine"}, "widgets_values": {"frame_rate": 10, "loop_count": 0, "filename_prefix": "BlipTravel_Upscale", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 23, "save_metadata": false, "pingpong": false, "save_output": true, "videopreview": {"hidden": false, "paused": false, "params": {"filename": "BlipTravel_Upscale_00022.mp4", "subfolder": "", "type": "output", "format": "video/h264-mp4"}}}, "color": "#222", "bgcolor": "#000"}, {"id": 586, "type": "IPAdapterTiledBatch", "pos": [549, 130], "size": {"0": 320, "1": 300}, "flags": {}, "order": 52, "mode": 4, "inputs": [{"name": "model", "type": "MODEL", "link": 1763}, {"name": "ipadapter", "type": "IPADAPTER", "link": 1764}, {"name": "image", "type": "IMAGE", "link": 1765}, {"name": "image_negative", "type": "IMAGE", "link": null}, {"name": "attn_mask", "type": "MASK", "link": null}, {"name": "clip_vision", "type": "CLIP_VISION", "link": null}, {"name": "encode_batch_size", "type": "INT", "link": 1874, "widget": {"name": "encode_batch_size"}}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [1854], "shape": 3, "slot_index": 0}, {"name": "tiles", "type": "IMAGE", "links": null, "shape": 3}, {"name": "masks", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "IPAdapterTiledBatch"}, "widgets_values": [1.2, "linear", 0, 1, 0, "V only", 0], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 8, "type": "BatchPromptSchedule", "pos": [1445.2001087723713, 2], "size": {"0": 713.6990966796875, "1": 428}, "flags": {}, "order": 51, "mode": 0, "inputs": [{"name": "clip", "type": "CLIP", "link": 100, "slot_index": 0}, {"name": "max_frames", "type": "INT", "link": 1868, "widget": {"name": "max_frames"}}, {"name": "text", "type": "STRING", "link": 1772, "widget": {"name": "text"}}, {"name": "end_frame", "type": "INT", "link": 1867, "widget": {"name": "end_frame"}}], "outputs": [{"name": "CONDITIONING", "type": "CONDITIONING", "links": [1014], "shape": 3, "slot_index": 0}, {"name": "NEG", "type": "CONDITIONING", "links": null, "shape": 3}], "properties": {"Node name for S&R": "BatchPromptSchedule"}, "widgets_values": [" \"0\" : \"In a lush, green meadow, a young girl with long, flowing hair sits by a sparkling stream, dipping her toes into the cool water and watching the fish swim by.\",\n \"24\" : \"The girl stands up and walks through a forest filled with towering trees, their leaves rustling softly in the breeze. Birds with vibrant plumage sing from the branches above.\",\n \"48\" : \"She arrives at a clearing where a giant, ancient tree stands. Its roots twist and turn, creating natural benches and pathways around it. The air is filled with the scent of blooming flowers.\",\n \"72\" : \"The girl finds a hidden door in the tree trunk. Opening it, she steps into a cavern illuminated by glowing crystals of various colors, casting a magical light on the walls.\",\n \"96\" : \"Inside the cavern, she encounters a wise old wizard with a long, white beard. He sits on a stone throne, surrounded by floating books and scrolls that move on their own.\",\n \"120\" : \"The girl exits the cavern and finds herself on a beach at sunset. The waves crash gently against the shore, and the sand glitters as if it were made of tiny diamonds.\",\n \"144\" : \"Walking along the beach, she discovers a hidden cove. Inside, mermaids swim gracefully in a pool of crystal-clear water, their tails shimmering in the twilight.\",\n \"168\" : \"The girl leaves the cove and enters a mystical forest where the trees glow with bioluminescent light. Strange, ethereal creatures flit between the branches, leaving trails of light behind them.\",\n \"192\" : \"She comes upon a village inhabited by clockwork gnomes and elves. They tinker with intricate machines and gadgets, their eyes sparkling with curiosity as they notice her presence.\",\n \"216\" : \"The girl follows a path leading out of the village and finds a floating island in the sky. Waterfalls cascade off the edges, and mythical creatures soar through the air around her.\",\n \"240\" : \"Returning to the meadow, she sits by the stream once more. The sun rises, casting a golden glow over the landscape, and she reflects on her fantastical journey, feeling a sense of peace and wonder.\"", 222, false, "", "trending on artstation, featured, award winning art, best quality, masterpiece", 0, 0, 0, 0, 0, 0], "color": "#232", "bgcolor": "#353"}, {"id": 588, "type": "ShowText|pysssss", "pos": [1402, 538], "size": {"0": 838.2015991210938, "1": 352.51507568359375}, "flags": {"collapsed": false}, "order": 50, "mode": 0, "inputs": [{"name": "text", "type": "STRING", "link": 1770, "widget": {"name": "text"}}], "outputs": [{"name": "STRING", "type": "STRING", "links": null, "shape": 6}], "properties": {"Node name for S&R": "ShowText|pysssss"}, "widgets_values": [["\"0\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"5\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"20\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"25\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"40\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"45\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"60\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"65\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"80\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"85\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"100\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"105\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"120\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"125\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"140\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"145\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"160\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"165\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"180\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"185\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"200\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"205\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"220\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"225\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"240\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"245\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\""], "\"0\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"5\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"20\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"25\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"40\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"45\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"60\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"65\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"80\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"85\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"100\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"105\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"120\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"125\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"140\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"145\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"160\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"165\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"180\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"185\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"200\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"205\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"220\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"225\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"240\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"245\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\"", "\"0\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"5\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"20\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"25\": \"a skull with headphones on it's face is shown in the middle of the image, surrounded by fire and smoke\",\n\"40\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"45\": \"an image of a rabbit wearing headphones and holding a microphone in front of a microphone with headphones around it's ears\",\n\"60\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"65\": \"a woman with red eyes and blood on her face, in the dark, in a scene from the movie'the shape of water '\",\n\"80\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"85\": \"a woman with long hair and fangs on her face, looking at the camera, with her mouth open, in a dark room\",\n\"100\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"105\": \"a woman with her face painted in black and gold, holding her hands up to her face, with her mouth open\",\n\"120\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"125\": \"a skull dj with headphones and headphones playing music in front of a red smoke filled wallpaper with a skull wearing headphones\",\n\"140\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"145\": \"a black and white photo of a man walking through a tunnel in the middle of a desert, with the sky in the background\",\n\"160\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"165\": \"an image of a man with his face covered in black and white paint, with his mouth open and his eyes closed\",\n\"180\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"185\": \"a close up of a person's face with a mouth full of mud and a black substance on it's skin\",\n\"200\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"205\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"220\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"225\": \"a skeleton wearing sunglasses and a hoodie is standing in the middle of a crowded street in the background is bright lights\",\n\"240\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\",\n\"245\": \"a close up of a woman's face with red eyes and black paint all over her face, with a red fire in the center of\"", "\"0\": \"a close up view of a bunch of yellow, blue, and orange tissue - like tissue like fibers that can be used as a dye dye\",\n\"8\": \"a close up view of a bunch of yellow, blue, and orange tissue - like tissue like fibers that can be used as a dye dye\",\n\"28\": \"a man dressed as a skeleton playing a dj's mixer in a dark room with lights on the wall behind him\",\n\"36\": \"a man dressed as a skeleton playing a dj's mixer in a dark room with lights on the wall behind him\",\n\"56\": \"a close up view of a bunch of yellow, blue, and orange tissue - like tissue like fibers that can be used as a dye dye\",\n\"66\": \"a close up view of a bunch of yellow, blue, and orange tissue - like tissue like fibers that can be used as a dye dye\""], "color": "#626962", "bgcolor": "#4e554e"}, {"id": 217, "type": "JWInteger", "pos": [-857, 1113], "size": {"0": 315, "1": 58}, "flags": {}, "order": 8, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [365], "shape": 3, "slot_index": 0}], "title": "Width", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [384], "color": "#232", "bgcolor": "#353"}, {"id": 218, "type": "JWInteger", "pos": [-857, 1218], "size": {"0": 315, "1": 58}, "flags": {}, "order": 9, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [366], "shape": 3, "slot_index": 0}], "title": "Height", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [576], "color": "#232", "bgcolor": "#353"}, {"id": 603, "type": "IPAdapterTiledBatch", "pos": [990.9266348039611, 130.38708345238052], "size": {"0": 320, "1": 300}, "flags": {}, "order": 54, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 1854}, {"name": "ipadapter", "type": "IPADAPTER", "link": 1856}, {"name": "image", "type": "IMAGE", "link": 1878}, {"name": "image_negative", "type": "IMAGE", "link": null}, {"name": "attn_mask", "type": "MASK", "link": 1875}, {"name": "clip_vision", "type": "CLIP_VISION", "link": null}, {"name": "encode_batch_size", "type": "INT", "link": 1881, "widget": {"name": "encode_batch_size"}}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [1855], "shape": 3, "slot_index": 0}, {"name": "tiles", "type": "IMAGE", "links": null, "shape": 3}, {"name": "masks", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "IPAdapterTiledBatch"}, "widgets_values": [1.2, "linear", 0, 1, 0, "V only", 0], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 610, "type": "Note", "pos": [991.9266348039611, 2.3870834523805486], "size": {"0": 305.6416015625, "1": 65.89276885986328}, "flags": {}, "order": 10, "mode": 0, "properties": {"text": ""}, "widgets_values": ["<<<<< choose one IPAdapter set or the other\n\nUses 2 image sequence + 2 mask sequences"], "color": "#432", "bgcolor": "#653"}, {"id": 604, "type": "IPAdapterTiledBatch", "pos": [991.9266348039611, 534.3870834523805], "size": {"0": 320, "1": 300}, "flags": {}, "order": 55, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 1855}, {"name": "ipadapter", "type": "IPADAPTER", "link": 1857}, {"name": "image", "type": "IMAGE", "link": 1879}, {"name": "image_negative", "type": "IMAGE", "link": null}, {"name": "attn_mask", "type": "MASK", "link": 1877}, {"name": "clip_vision", "type": "CLIP_VISION", "link": null}, {"name": "encode_batch_size", "type": "INT", "link": 1882, "widget": {"name": "encode_batch_size"}}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [1880], "shape": 3, "slot_index": 0}, {"name": "tiles", "type": "IMAGE", "links": null, "shape": 3}, {"name": "masks", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "IPAdapterTiledBatch"}, "widgets_values": [1.2, "linear", 0, 1, 0, "V only", 0], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 549, "type": "ImageCompositeMasked", "pos": [-336, 1226], "size": {"0": 315, "1": 146}, "flags": {"collapsed": true}, "order": 49, "mode": 0, "inputs": [{"name": "destination", "type": "IMAGE", "link": 1870, "slot_index": 0}, {"name": "source", "type": "IMAGE", "link": 1869}, {"name": "mask", "type": "MASK", "link": 1887}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1765, 1835], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageCompositeMasked"}, "widgets_values": [0, 0, false], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 344, "type": "ADE_AnimateDiffLoRALoader", "pos": [97, 139], "size": {"0": 321.558837890625, "1": 82}, "flags": {}, "order": 11, "mode": 0, "inputs": [{"name": "prev_motion_lora", "type": "MOTION_LORA", "link": null}], "outputs": [{"name": "MOTION_LORA", "type": "MOTION_LORA", "links": [1077], "shape": 3}], "properties": {"Node name for S&R": "ADE_AnimateDiffLoRALoader"}, "widgets_values": ["liquidOilEmulsion_v10.safetensors", 1.074], "color": "#323", "bgcolor": "#535"}, {"id": 320, "type": "ADE_LoadAnimateDiffModel", "pos": [98, 2], "size": {"0": 320, "1": 60}, "flags": {}, "order": 12, "mode": 0, "inputs": [{"name": "ad_settings", "type": "AD_SETTINGS", "link": null}], "outputs": [{"name": "MOTION_MODEL", "type": "MOTION_MODEL_ADE", "links": [993], "shape": 3}], "properties": {"Node name for S&R": "ADE_LoadAnimateDiffModel"}, "widgets_values": ["AnimateLCM_sd15_t2v.ckpt"], "color": "#323", "bgcolor": "#535"}, {"id": 609, "type": "Note", "pos": [564, 2], "size": {"0": 293.87249755859375, "1": 65.5311050415039}, "flags": {}, "order": 13, "mode": 4, "properties": {"text": ""}, "widgets_values": ["choose one IPAdapter set or the other >>>>\n\nUses Just 1 image sequence"], "color": "#432", "bgcolor": "#653"}, {"id": 556, "type": "PromptTravelHelper", "pos": [-378, 911], "size": {"0": 304, "1": 94}, "flags": {}, "order": 48, "mode": 0, "inputs": [{"name": "bulk_text_input", "type": "STRING", "link": 1641, "widget": {"name": "bulk_text_input"}}, {"name": "hold_length", "type": "INT", "link": 2002, "widget": {"name": "hold_length"}, "slot_index": 1}, {"name": "transition_length", "type": "INT", "link": 2004, "widget": {"name": "transition_length"}}, {"name": "end_padding_frames", "type": "INT", "link": 2006, "widget": {"name": "end_padding_frames"}}], "outputs": [{"name": "formatted_prompts", "type": "STRING", "links": [1770, 1772], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "PromptTravelHelper"}, "widgets_values": ["", 5, 5, 10], "color": "#223", "bgcolor": "#335"}, {"id": 630, "type": "ColorCorrect", "pos": [3212, 2.5443758371350857], "size": {"0": 320, "1": 180}, "flags": {}, "order": 58, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": 1903}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ColorCorrect"}, "widgets_values": [0, 0, 5, 3, 2, 1.1]}, {"id": 85, "type": "ImageSharpen", "pos": [4719, 20], "size": {"0": 315, "1": 106}, "flags": {}, "order": 63, "mode": 4, "inputs": [{"name": "image", "type": "IMAGE", "link": 1902}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [138], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageSharpen"}, "widgets_values": [2, 0.25, 0.25], "color": "#222", "bgcolor": "#000"}, {"id": 629, "type": "ColorCorrect", "pos": [3668, 0.5443758371350857], "size": {"0": 315, "1": 178}, "flags": {}, "order": 61, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": 1898}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1899, 1902], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ColorCorrect"}, "widgets_values": [0, 0, 5, 3, 2, 1.1]}, {"id": 594, "type": "VHS_VideoCombine", "pos": [542, 538], "size": [348.16021728515625, 290], "flags": {}, "order": 53, "mode": 0, "inputs": [{"name": "images", "type": "IMAGE", "link": 1835}, {"name": "audio", "type": "VHS_AUDIO", "link": null}, {"name": "meta_batch", "type": "VHS_BatchManager", "link": null}], "outputs": [{"name": "Filenames", "type": "VHS_FILENAMES", "links": null, "shape": 3}], "properties": {"Node name for S&R": "VHS_VideoCombine"}, "widgets_values": {"frame_rate": 10, "loop_count": 0, "filename_prefix": "PreDiffusion Preview", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 23, "save_metadata": false, "pingpong": false, "save_output": false, "videopreview": {"hidden": false, "paused": false, "params": {"filename": "PreDiffusion Preview_00003.mp4", "subfolder": "", "type": "temp", "format": "video/h264-mp4"}}}, "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 333, "type": "KSampler (Efficient)", "pos": [2696.9292460740203, 307.9595953430793], "size": {"0": 325, "1": 350}, "flags": {}, "order": 60, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 1020}, {"name": "positive", "type": "CONDITIONING", "link": 1021}, {"name": "negative", "type": "CONDITIONING", "link": 1022, "slot_index": 2}, {"name": "latent_image", "type": "LATENT", "link": 1078}, {"name": "optional_vae", "type": "VAE", "link": 1024, "slot_index": 4}, {"name": "script", "type": "SCRIPT", "link": null}, {"name": "seed", "type": "INT", "link": 1578, "widget": {"name": "seed"}, "slot_index": 6}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [], "shape": 3, "slot_index": 0}, {"name": "CONDITIONING+", "type": "CONDITIONING", "links": [], "shape": 3, "slot_index": 1}, {"name": "CONDITIONING-", "type": "CONDITIONING", "links": null, "shape": 3}, {"name": "LATENT", "type": "LATENT", "links": [], "shape": 3, "slot_index": 3}, {"name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 4}, {"name": "IMAGE", "type": "IMAGE", "links": [1898], "shape": 3, "slot_index": 5}], "properties": {"Node name for S&R": "KSampler (Efficient)"}, "widgets_values": [644882533724249, null, 9, 1, "lcm", "sgm_uniform", 0.48, "none", "true"], "color": "#223", "bgcolor": "#335", "shape": 1}, {"id": 123, "type": "LoadImage", "pos": [-1287, 505], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 14, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1974], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (12).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 100, "type": "LoadImage", "pos": [-2960, 112], "size": {"0": 315, "1": 314}, "flags": {}, "order": 15, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1977], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (10) (1).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 103, "type": "LoadImage", "pos": [-2630, 112], "size": {"0": 320, "1": 314}, "flags": {}, "order": 16, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1980], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_1 (92).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 105, "type": "LoadImage", "pos": [-2290, 112], "size": {"0": 320, "1": 314}, "flags": {}, "order": 17, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1983], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_2 (86).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 107, "type": "LoadImage", "pos": [-1950, 112], "size": {"0": 320, "1": 314}, "flags": {}, "order": 18, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1986], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_3 (102).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 109, "type": "LoadImage", "pos": [-1620, 112], "size": {"0": 320, "1": 314}, "flags": {}, "order": 19, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1989], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (114).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 111, "type": "LoadImage", "pos": [-1280, 112], "size": {"0": 320, "1": 314}, "flags": {}, "order": 20, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1992], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (120).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 113, "type": "LoadImage", "pos": [-2969.5089735243064, 512.3777774386936], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 21, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1995], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (127).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 115, "type": "LoadImage", "pos": [-2629.5089735243064, 512.3777774386936], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 22, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1998], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_0 (120).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 318, "type": "ADE_UseEvolvedSampling", "pos": [120, 860], "size": {"0": 288.076171875, "1": 118}, "flags": {}, "order": 40, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 998, "slot_index": 0}, {"name": "m_models", "type": "M_MODELS", "link": 991, "slot_index": 1}, {"name": "context_options", "type": "CONTEXT_OPTIONS", "link": 992, "slot_index": 2}, {"name": "sample_settings", "type": "SAMPLE_SETTINGS", "link": null, "slot_index": 3}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [1767], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ADE_UseEvolvedSampling"}, "widgets_values": ["sqrt_linear (AnimateDiff)"], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 331, "type": "KSampler (Efficient)", "pos": [2317, 310], "size": {"0": 325, "1": 350}, "flags": {}, "order": 56, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 1880, "slot_index": 0}, {"name": "positive", "type": "CONDITIONING", "link": 1014}, {"name": "negative", "type": "CONDITIONING", "link": 1015}, {"name": "latent_image", "type": "LATENT", "link": 1016}, {"name": "optional_vae", "type": "VAE", "link": 1017}, {"name": "script", "type": "SCRIPT", "link": null}, {"name": "seed", "type": "INT", "link": 1579, "widget": {"name": "seed"}}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [1020], "shape": 3, "slot_index": 0}, {"name": "CONDITIONING+", "type": "CONDITIONING", "links": [1021], "shape": 3, "slot_index": 1}, {"name": "CONDITIONING-", "type": "CONDITIONING", "links": [1022], "shape": 3}, {"name": "LATENT", "type": "LATENT", "links": [1028], "shape": 3, "slot_index": 3}, {"name": "VAE", "type": "VAE", "links": [1024], "shape": 3}, {"name": "IMAGE", "type": "IMAGE", "links": [1903, 2001], "shape": 3, "slot_index": 5}], "properties": {"Node name for S&R": "KSampler (Efficient)"}, "widgets_values": [644882533723929, null, 15, 2, "lcm", "sgm_uniform", 1, "none", "true"], "color": "#223", "bgcolor": "#335", "shape": 1}, {"id": 573, "type": "JWInteger", "pos": [-2780, 1000], "size": {"0": 315, "1": 58}, "flags": {}, "order": 23, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [1966, 1969, 1972, 1975, 1978, 1981, 1984, 1987, 1990, 1993, 1996, 1999], "shape": 3, "slot_index": 0}], "title": "Crop Width", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [512], "color": "#232", "bgcolor": "#353"}, {"id": 554, "type": "BLIP Model Loader", "pos": [-389, 57], "size": {"0": 315, "1": 106}, "flags": {}, "order": 24, "mode": 0, "outputs": [{"name": "BLIP_MODEL", "type": "BLIP_MODEL", "links": [1639], "shape": 3}], "properties": {"Node name for S&R": "BLIP Model Loader"}, "widgets_values": ["Salesforce/blip-image-captioning-base", "Salesforce/blip-vqa-base", "cuda"], "color": "#233", "bgcolor": "#355"}, {"id": 553, "type": "BLIP Analyze Image", "pos": [-397, 260], "size": {"0": 332.42633056640625, "1": 240}, "flags": {}, "order": 44, "mode": 0, "inputs": [{"name": "images", "type": "IMAGE", "link": 2010}, {"name": "blip_model", "type": "BLIP_MODEL", "link": 1639, "slot_index": 1}], "outputs": [{"name": "STRING", "type": "STRING", "links": [1641], "shape": 3, "slot_index": 0}, {"name": "STRING", "type": "STRING", "links": null, "shape": 6}], "properties": {"Node name for S&R": "BLIP Analyze Image"}, "widgets_values": ["caption", "Describe the image foreground and background, describe the style of the image.", 24, 86, 5, 3, false], "color": "#233", "bgcolor": "#355"}, {"id": 625, "type": "IG Load Images", "pos": [-1400, 1160], "size": {"0": 325.15911865234375, "1": 170}, "flags": {"collapsed": true}, "order": 33, "mode": 0, "inputs": [{"name": "folder", "type": "STRING", "link": 1896, "widget": {"name": "folder"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}, {"name": "INT", "type": "INT", "links": null, "shape": 3}], "properties": {"Node name for S&R": "IG Load Images"}, "widgets_values": ["", 2000, 524, 1], "color": "#233", "bgcolor": "#355"}, {"id": 645, "type": "Reroute", "pos": [-1125, 972], "size": [75, 26], "flags": {}, "order": 41, "mode": 0, "inputs": [{"name": "", "type": "*", "link": 2008}], "outputs": [{"name": "", "type": "IMAGE", "links": [2009, 2010], "slot_index": 0}], "properties": {"showOutputText": false, "horizontal": false}, "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 572, "type": "ImageBatchMulti", "pos": [-2117, 926], "size": {"0": 210, "1": 302}, "flags": {"collapsed": false}, "order": 36, "mode": 0, "inputs": [{"name": "image_1", "type": "IMAGE", "link": 1957}, {"name": "image_2", "type": "IMAGE", "link": 1958}, {"name": "image_3", "type": "IMAGE", "link": 1959}, {"name": "image_4", "type": "IMAGE", "link": 1960}, {"name": "image_5", "type": "IMAGE", "link": 1961}, {"name": "image_6", "type": "IMAGE", "link": 1962}, {"name": "image_7", "type": "IMAGE", "link": 1963}, {"name": "image_8", "type": "IMAGE", "link": 1964}, {"name": "image_9", "type": "IMAGE", "link": 1953}, {"name": "image_10", "type": "IMAGE", "link": 1954}, {"name": "image_11", "type": "IMAGE", "link": 1955}, {"name": "image_12", "type": "IMAGE", "link": 1956}], "outputs": [{"name": "images", "type": "IMAGE", "links": [2011], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageBatchMulti"}, "widgets_values": [12, null], "color": "#2a363b", "bgcolor": "#3f5159"}, {"id": 643, "type": "workflow/Crop-Convert", "pos": [-2380, 1050], "size": {"0": 493.8000183105469, "1": 1406}, "flags": {"collapsed": true}, "order": 32, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": 1965}, {"name": "ImageScale image", "type": "IMAGE", "link": 1968}, {"name": "ImageScale 2 image", "type": "IMAGE", "link": 1971}, {"name": "ImageScale 3 image", "type": "IMAGE", "link": 1974}, {"name": "ImageScale 4 image", "type": "IMAGE", "link": 1977}, {"name": "ImageScale 5 image", "type": "IMAGE", "link": 1980}, {"name": "ImageScale 6 image", "type": "IMAGE", "link": 1983}, {"name": "ImageScale 7 image", "type": "IMAGE", "link": 1986}, {"name": "ImageScale 8 image", "type": "IMAGE", "link": 1989}, {"name": "ImageScale 9 image", "type": "IMAGE", "link": 1992}, {"name": "ImageScale 10 image", "type": "IMAGE", "link": 1995}, {"name": "ImageScale 11 image", "type": "IMAGE", "link": 1998}, {"name": "width", "type": "INT", "link": 1966, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": 1967, "widget": {"name": "height"}}, {"name": "ImageScale width", "type": "INT", "link": 1969, "widget": {"name": "ImageScale width"}}, {"name": "ImageScale height", "type": "INT", "link": 1970, "widget": {"name": "ImageScale height"}}, {"name": "ImageScale 2 width", "type": "INT", "link": 1972, "widget": {"name": "ImageScale 2 width"}}, {"name": "ImageScale 2 height", "type": "INT", "link": 1973, "widget": {"name": "ImageScale 2 height"}}, {"name": "ImageScale 3 width", "type": "INT", "link": 1975, "widget": {"name": "ImageScale 3 width"}}, {"name": "ImageScale 3 height", "type": "INT", "link": 1976, "widget": {"name": "ImageScale 3 height"}}, {"name": "ImageScale 4 width", "type": "INT", "link": 1978, "widget": {"name": "ImageScale 4 width"}}, {"name": "ImageScale 4 height", "type": "INT", "link": 1979, "widget": {"name": "ImageScale 4 height"}}, {"name": "ImageScale 5 width", "type": "INT", "link": 1981, "widget": {"name": "ImageScale 5 width"}}, {"name": "ImageScale 5 height", "type": "INT", "link": 1982, "widget": {"name": "ImageScale 5 height"}}, {"name": "ImageScale 6 width", "type": "INT", "link": 1984, "widget": {"name": "ImageScale 6 width"}}, {"name": "ImageScale 6 height", "type": "INT", "link": 1985, "widget": {"name": "ImageScale 6 height"}}, {"name": "ImageScale 7 width", "type": "INT", "link": 1987, "widget": {"name": "ImageScale 7 width"}}, {"name": "ImageScale 7 height", "type": "INT", "link": 1988, "widget": {"name": "ImageScale 7 height"}}, {"name": "ImageScale 8 width", "type": "INT", "link": 1990, "widget": {"name": "ImageScale 8 width"}}, {"name": "ImageScale 8 height", "type": "INT", "link": 1991, "widget": {"name": "ImageScale 8 height"}}, {"name": "ImageScale 9 width", "type": "INT", "link": 1993, "widget": {"name": "ImageScale 9 width"}}, {"name": "ImageScale 9 height", "type": "INT", "link": 1994, "widget": {"name": "ImageScale 9 height"}}, {"name": "ImageScale 10 width", "type": "INT", "link": 1996, "widget": {"name": "ImageScale 10 width"}}, {"name": "ImageScale 10 height", "type": "INT", "link": 1997, "widget": {"name": "ImageScale 10 height"}}, {"name": "ImageScale 11 width", "type": "INT", "link": 1999, "widget": {"name": "ImageScale 11 width"}}, {"name": "ImageScale 11 height", "type": "INT", "link": 2000, "widget": {"name": "ImageScale 11 height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1953], "shape": 3}, {"name": "ImageScale IMAGE", "type": "IMAGE", "links": [1954], "shape": 3}, {"name": "ImageScale 2 IMAGE", "type": "IMAGE", "links": [1955], "shape": 3}, {"name": "ImageScale 3 IMAGE", "type": "IMAGE", "links": [1956], "shape": 3}, {"name": "ImageScale 4 IMAGE", "type": "IMAGE", "links": [1957], "shape": 3}, {"name": "ImageScale 5 IMAGE", "type": "IMAGE", "links": [1958], "shape": 3}, {"name": "ImageScale 6 IMAGE", "type": "IMAGE", "links": [1959], "shape": 3}, {"name": "ImageScale 7 IMAGE", "type": "IMAGE", "links": [1960], "shape": 3}, {"name": "ImageScale 8 IMAGE", "type": "IMAGE", "links": [1961], "shape": 3}, {"name": "ImageScale 9 IMAGE", "type": "IMAGE", "links": [1962], "shape": 3}, {"name": "ImageScale 10 IMAGE", "type": "IMAGE", "links": [1963], "shape": 3}, {"name": "ImageScale 11 IMAGE", "type": "IMAGE", "links": [1964], "shape": 3}], "properties": {"Node name for S&R": "workflow/Crop-Convert"}, "widgets_values": ["bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", "bicubic", "center", 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512]}, {"id": 121, "type": "LoadImage", "pos": [-1624, 509], "size": {"0": 320, "1": 314.0000305175781}, "flags": {}, "order": 25, "mode": 0, "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [1971], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoadImage"}, "widgets_values": ["0_3 (93).jpg", "image"], "color": "#232", "bgcolor": "#353"}, {"id": 338, "type": "JWInteger", "pos": [-1818, 1081], "size": {"0": 315, "1": 58}, "flags": {"pinned": false}, "order": 26, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [1824, 1861], "shape": 3, "slot_index": 0}], "title": "# of slides to keep (override)", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [12], "color": "#1d4714", "bgcolor": "#093300"}, {"id": 591, "type": "GetImageRangeFromBatch", "pos": [-1442, 1006], "size": {"0": 315, "1": 102}, "flags": {"collapsed": true}, "order": 39, "mode": 0, "inputs": [{"name": "images", "type": "IMAGE", "link": 2011}, {"name": "masks", "type": "MASK", "link": null}, {"name": "num_frames", "type": "INT", "link": 1824, "widget": {"name": "num_frames"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [2008], "shape": 3, "slot_index": 0}, {"name": "MASK", "type": "MASK", "links": null, "shape": 3}], "properties": {"Node name for S&R": "GetImageRangeFromBatch"}, "widgets_values": [0, 1], "color": "#233", "bgcolor": "#355"}, {"id": 626, "type": "IG Folder", "pos": [-1400, 1100], "size": {"0": 315, "1": 82}, "flags": {"collapsed": true}, "order": 27, "mode": 0, "inputs": [], "outputs": [{"name": "STRING", "type": "STRING", "links": [1896], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "IG Folder"}, "widgets_values": ["input folder", ""], "color": "#233", "bgcolor": "#355"}, {"id": 646, "type": "Note", "pos": [-1160, 1059], "size": {"0": 210, "1": 103.17707824707031}, "flags": {}, "order": 28, "mode": 0, "properties": {"text": ""}, "widgets_values": ["switch to a folder or any number of image inputs here at the reroute,\n\nControl to the left over-rides the count, keeping first in list"], "color": "#432", "bgcolor": "#653"}, {"id": 644, "type": "workflow/Hold, Transition, Padding", "pos": [-856, 841], "size": {"0": 315, "1": 146}, "flags": {}, "order": 29, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [2002, 2003], "shape": 3}, {"name": "Transition Length INT", "type": "INT", "links": [2004, 2005], "shape": 3}, {"name": "End Padding Frames INT", "type": "INT", "links": [2006, 2007], "shape": 3}], "properties": {"Node name for S&R": "workflow/Hold, Transition, Padding"}, "widgets_values": [8, 20, 10], "color": "#232", "bgcolor": "#353"}, {"id": 3, "type": "CheckpointLoaderSimpleWithNoiseSelect", "pos": [-859, 548], "size": {"0": 319.20001220703125, "1": 170}, "flags": {}, "order": 30, "mode": 0, "outputs": [{"name": "MODEL", "type": "MODEL", "links": [996], "shape": 3, "slot_index": 0}, {"name": "CLIP", "type": "CLIP", "links": [88, 99], "shape": 3, "slot_index": 1}, {"name": "VAE", "type": "VAE", "links": [], "shape": 3, "slot_index": 2}], "properties": {"Node name for S&R": "CheckpointLoaderSimpleWithNoiseSelect"}, "widgets_values": ["photon_v1.safetensors", "lcm >> sqrt_linear", false, 0.19347], "color": "#323", "bgcolor": "#535"}, {"id": 59, "type": "LoraLoader", "pos": [-860, 288], "size": {"0": 319.2622985839844, "1": 126}, "flags": {}, "order": 34, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 996, "slot_index": 0}, {"name": "clip", "type": "CLIP", "link": 88}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [997], "shape": 3, "slot_index": 0}, {"name": "CLIP", "type": "CLIP", "links": null, "shape": 3}], "properties": {"Node name for S&R": "LoraLoader"}, "widgets_values": ["lcm_lora_sd15.safetensors", 1, 1], "color": "#323", "bgcolor": "#535"}, {"id": 60, "type": "ModelSamplingDiscrete", "pos": [-860, 131], "size": {"0": 315, "1": 82}, "flags": {}, "order": 37, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 997, "slot_index": 0}], "outputs": [{"name": "MODEL", "type": "MODEL", "links": [998], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ModelSamplingDiscrete"}, "widgets_values": ["lcm", false], "color": "#323", "bgcolor": "#535"}, {"id": 587, "type": "IPAdapterUnifiedLoader", "pos": [-854, -7], "size": {"0": 301.4450378417969, "1": 78}, "flags": {}, "order": 42, "mode": 0, "inputs": [{"name": "model", "type": "MODEL", "link": 1767}, {"name": "ipadapter", "type": "IPADAPTER", "link": null}], "outputs": [{"name": "model", "type": "MODEL", "links": [1763], "shape": 3, "slot_index": 0}, {"name": "ipadapter", "type": "IPADAPTER", "links": [1764, 1856, 1857], "shape": 3, "slot_index": 1}], "properties": {"Node name for S&R": "IPAdapterUnifiedLoader"}, "widgets_values": ["VIT-G (medium strength)"], "color": "#323", "bgcolor": "#535"}], "links": [[88, 3, 1, 59, 1, "CLIP"], [99, 3, 1, 66, 0, "CLIP"], [100, 66, 0, 8, 0, "CLIP"], [101, 66, 0, 58, 0, "CLIP"], [138, 85, 0, 94, 0, "IMAGE"], [139, 94, 0, 63, 0, "IMAGE"], [365, 217, 0, 48, 1, "INT"], [366, 218, 0, 48, 2, "INT"], [991, 319, 0, 318, 1, "M_MODELS"], [992, 321, 0, 318, 2, "CONTEXT_OPTIONS"], [993, 320, 0, 319, 0, "MOTION_MODEL_ADE"], [994, 322, 0, 319, 2, "MULTIVAL"], [996, 3, 0, 59, 0, "MODEL"], [997, 59, 0, 60, 0, "MODEL"], [998, 60, 0, 318, 0, "MODEL"], [1014, 8, 0, 331, 1, "CONDITIONING"], [1015, 58, 0, 331, 2, "CONDITIONING"], [1016, 48, 0, 331, 3, "LATENT"], [1017, 4, 0, 331, 4, "VAE"], [1020, 331, 0, 333, 0, "MODEL"], [1021, 331, 1, 333, 1, "CONDITIONING"], [1022, 331, 2, 333, 2, "CONDITIONING"], [1024, 331, 4, 333, 4, "VAE"], [1028, 331, 3, 71, 0, "LATENT"], [1077, 344, 0, 319, 1, "MOTION_LORA"], [1078, 71, 0, 333, 3, "LATENT"], [1578, 492, 0, 333, 6, "INT"], [1579, 492, 0, 331, 6, "INT"], [1639, 554, 0, 553, 1, "BLIP_MODEL"], [1641, 553, 0, 556, 0, "STRING"], [1763, 587, 0, 586, 0, "MODEL"], [1764, 587, 1, 586, 1, "IPADAPTER"], [1765, 549, 0, 586, 2, "IMAGE"], [1767, 318, 0, 587, 0, "MODEL"], [1770, 556, 0, 588, 0, "STRING"], [1772, 556, 0, 8, 2, "STRING"], [1824, 338, 0, 591, 2, "INT"], [1835, 549, 0, 594, 0, "IMAGE"], [1854, 586, 0, 603, 0, "MODEL"], [1855, 603, 0, 604, 0, "MODEL"], [1856, 587, 1, 603, 1, "IPADAPTER"], [1857, 587, 1, 604, 1, "IPADAPTER"], [1861, 338, 0, 607, 1, "INT"], [1867, 607, 4, 8, 3, "INT"], [1868, 607, 4, 8, 1, "INT"], [1869, 607, 0, 549, 1, "IMAGE"], [1870, 607, 1, 549, 0, "IMAGE"], [1871, 607, 2, 532, 0, "STRING"], [1872, 607, 4, 532, 1, "INT"], [1873, 607, 4, 48, 0, "INT"], [1874, 607, 4, 586, 6, "INT"], [1875, 532, 0, 603, 4, "MASK"], [1876, 607, 3, 608, 0, "STRING"], [1877, 608, 0, 604, 4, "MASK"], [1878, 607, 0, 603, 2, "IMAGE"], [1879, 607, 1, 604, 2, "IMAGE"], [1880, 604, 0, 331, 0, "MODEL"], [1881, 607, 4, 603, 6, "INT"], [1882, 607, 4, 604, 6, "INT"], [1883, 607, 4, 608, 1, "INT"], [1887, 532, 0, 549, 2, "MASK"], [1896, 626, 0, 625, 0, "STRING"], [1898, 333, 5, 629, 0, "IMAGE"], [1899, 629, 0, 493, 0, "IMAGE"], [1902, 629, 0, 85, 0, "IMAGE"], [1903, 331, 5, 630, 0, "IMAGE"], [1953, 643, 0, 572, 8, "IMAGE"], [1954, 643, 1, 572, 9, "IMAGE"], [1955, 643, 2, 572, 10, "IMAGE"], [1956, 643, 3, 572, 11, "IMAGE"], [1957, 643, 4, 572, 0, "IMAGE"], [1958, 643, 5, 572, 1, "IMAGE"], [1959, 643, 6, 572, 2, "IMAGE"], [1960, 643, 7, 572, 3, "IMAGE"], [1961, 643, 8, 572, 4, "IMAGE"], [1962, 643, 9, 572, 5, "IMAGE"], [1963, 643, 10, 572, 6, "IMAGE"], [1964, 643, 11, 572, 7, "IMAGE"], [1965, 117, 0, 643, 0, "IMAGE"], [1966, 573, 0, 643, 12, "INT"], [1967, 574, 0, 643, 13, "INT"], [1968, 119, 0, 643, 1, "IMAGE"], [1969, 573, 0, 643, 14, "INT"], [1970, 574, 0, 643, 15, "INT"], [1971, 121, 0, 643, 2, "IMAGE"], [1972, 573, 0, 643, 16, "INT"], [1973, 574, 0, 643, 17, "INT"], [1974, 123, 0, 643, 3, "IMAGE"], [1975, 573, 0, 643, 18, "INT"], [1976, 574, 0, 643, 19, "INT"], [1977, 100, 0, 643, 4, "IMAGE"], [1978, 573, 0, 643, 20, "INT"], [1979, 574, 0, 643, 21, "INT"], [1980, 103, 0, 643, 5, "IMAGE"], [1981, 573, 0, 643, 22, "INT"], [1982, 574, 0, 643, 23, "INT"], [1983, 105, 0, 643, 6, "IMAGE"], [1984, 573, 0, 643, 24, "INT"], [1985, 574, 0, 643, 25, "INT"], [1986, 107, 0, 643, 7, "IMAGE"], [1987, 573, 0, 643, 26, "INT"], [1988, 574, 0, 643, 27, "INT"], [1989, 109, 0, 643, 8, "IMAGE"], [1990, 573, 0, 643, 28, "INT"], [1991, 574, 0, 643, 29, "INT"], [1992, 111, 0, 643, 9, "IMAGE"], [1993, 573, 0, 643, 30, "INT"], [1994, 574, 0, 643, 31, "INT"], [1995, 113, 0, 643, 10, "IMAGE"], [1996, 573, 0, 643, 32, "INT"], [1997, 574, 0, 643, 33, "INT"], [1998, 115, 0, 643, 11, "IMAGE"], [1999, 573, 0, 643, 34, "INT"], [2000, 574, 0, 643, 35, "INT"], [2001, 331, 5, 23, 0, "IMAGE"], [2002, 644, 0, 556, 1, "INT"], [2003, 644, 0, 607, 2, "INT"], [2004, 644, 1, 556, 2, "INT"], [2005, 644, 1, 607, 3, "INT"], [2006, 644, 2, 556, 3, "INT"], [2007, 644, 2, 607, 4, "INT"], [2008, 591, 0, 645, 0, "*"], [2009, 645, 0, 607, 0, "IMAGE"], [2010, 645, 0, 553, 0, "IMAGE"], [2011, 572, 0, 591, 0, "IMAGE"]], "groups": [{"title": "Animate Diff", "bounding": [26, -104, 435, 1401], "color": "#3f789e", "font_size": 24, "locked": false}, {"title": "Prompts", "bounding": [1373, -107, 897, 1400], "color": "#486057", "font_size": 24, "locked": false}, {"title": "Loader & Settings", "bounding": [-898, -104, 393, 1406], "color": "#8A8", "font_size": 24, "locked": false}, {"title": "VIDEO OUTPUT", "bounding": [3071, -103, 1040, 1394], "color": "#3f789e", "font_size": 24, "locked": false}, {"title": "Sampling / Decoding", "bounding": [2286, -110, 771, 1400], "color": "#88A", "font_size": 24, "locked": false}, {"title": "Blip Travel Auto Weights", "bounding": [-480, -103, 472, 1405], "color": "#3f789e", "font_size": 24, "locked": false}, {"title": "Drop input images", "bounding": [-3011, -113, 2088, 1401], "color": "#8A8", "font_size": 24, "locked": false}, {"title": "RifeOutput", "bounding": [4128, -105, 999, 1395], "color": "#88A", "font_size": 24, "locked": false}, {"title": "IPAdapter Low Vram Option >>>", "bounding": [493, -105, 423, 1402], "color": "#3f789e", "font_size": 24, "locked": false}, {"title": "<< Swap for High Vram Option", "bounding": [939, -107, 418, 1402], "color": "#3f789e", "font_size": 24, "locked": false}], "config": {}, "extra": {"ds": {"scale": 0.7247295000000022, "offset": [1490.7725309362613, 226.51959065611933]}, "groupNodes": {"Crop-Convert": {"nodes": [{"type": "ImageScale", "pos": [-5010, 720], "size": {"0": 320, "1": 130}, "flags": {}, "order": 37, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 0}, {"type": "ImageScale", "pos": [-4990, 900], "size": {"0": 320, "1": 130}, "flags": {}, "order": 38, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 1}, {"type": "ImageScale", "pos": [-5010, 1080], "size": {"0": 320, "1": 130}, "flags": {}, "order": 39, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 2}, {"type": "ImageScale", "pos": [-5000, 1280], "size": {"0": 320, "1": 130}, "flags": {}, "order": 40, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 3}, {"type": "ImageScale", "pos": [-5060, -740], "size": {"0": 315, "1": 130}, "flags": {}, "order": 41, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 4}, {"type": "ImageScale", "pos": [-5080, -530], "size": {"0": 320, "1": 130}, "flags": {}, "order": 42, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 5}, {"type": "ImageScale", "pos": [-5050, -330], "size": {"0": 320, "1": 130}, "flags": {}, "order": 43, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 6}, {"type": "ImageScale", "pos": [-5030, -150], "size": {"0": 320, "1": 130}, "flags": {}, "order": 44, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 7}, {"type": "ImageScale", "pos": [-5000, 40], "size": {"0": 320, "1": 130}, "flags": {}, "order": 45, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 8}, {"type": "ImageScale", "pos": [-5040, 220], "size": {"0": 320, "1": 130}, "flags": {}, "order": 46, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 9}, {"type": "ImageScale", "pos": [-5060, 390], "size": {"0": 320, "1": 130}, "flags": {}, "order": 47, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 10}, {"type": "ImageScale", "pos": [-5030, 540], "size": {"0": 320, "1": 130}, "flags": {}, "order": 48, "mode": 0, "inputs": [{"name": "image", "type": "IMAGE", "link": null}, {"name": "width", "type": "INT", "link": null, "widget": {"name": "width"}}, {"name": "height", "type": "INT", "link": null, "widget": {"name": "height"}}], "outputs": [{"name": "IMAGE", "type": "IMAGE", "links": [], "shape": 3, "slot_index": 0}], "properties": {"Node name for S&R": "ImageScale"}, "widgets_values": ["bicubic", 512, 512, "center"], "index": 11}], "links": [[null, 0, 0, 0, 117, "IMAGE"], [null, 0, 0, 1, 573, "INT"], [null, 0, 0, 2, 574, "INT"], [null, 0, 1, 0, 119, "IMAGE"], [null, 0, 1, 1, 573, "INT"], [null, 0, 1, 2, 574, "INT"], [null, 0, 2, 0, 121, "IMAGE"], [null, 0, 2, 1, 573, "INT"], [null, 0, 2, 2, 574, "INT"], [null, 0, 3, 0, 123, "IMAGE"], [null, 0, 3, 1, 573, "INT"], [null, 0, 3, 2, 574, "INT"], [null, 0, 4, 0, 100, "IMAGE"], [null, 0, 4, 1, 573, "INT"], [null, 0, 4, 2, 574, "INT"], [null, 0, 5, 0, 103, "IMAGE"], [null, 0, 5, 1, 573, "INT"], [null, 0, 5, 2, 574, "INT"], [null, 0, 6, 0, 105, "IMAGE"], [null, 0, 6, 1, 573, "INT"], [null, 0, 6, 2, 574, "INT"], [null, 0, 7, 0, 107, "IMAGE"], [null, 0, 7, 1, 573, "INT"], [null, 0, 7, 2, 574, "INT"], [null, 0, 8, 0, 109, "IMAGE"], [null, 0, 8, 1, 573, "INT"], [null, 0, 8, 2, 574, "INT"], [null, 0, 9, 0, 111, "IMAGE"], [null, 0, 9, 1, 573, "INT"], [null, 0, 9, 2, 574, "INT"], [null, 0, 10, 0, 113, "IMAGE"], [null, 0, 10, 1, 573, "INT"], [null, 0, 10, 2, 574, "INT"], [null, 0, 11, 0, 115, "IMAGE"], [null, 0, 11, 1, 573, "INT"], [null, 0, 11, 2, 574, "INT"]], "external": [[0, 0, "IMAGE"], [1, 0, "IMAGE"], [2, 0, "IMAGE"], [3, 0, "IMAGE"], [4, 0, "IMAGE"], [5, 0, "IMAGE"], [6, 0, "IMAGE"], [7, 0, "IMAGE"], [8, 0, "IMAGE"], [9, 0, "IMAGE"], [10, 0, "IMAGE"], [11, 0, "IMAGE"]]}, "Hold, Transition, Padding": {"nodes": [{"type": "JWInteger", "pos": [-860, 620], "size": {"0": 315, "1": 58}, "flags": {}, "order": 30, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [], "shape": 3, "slot_index": 0}], "title": "Hold Length", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [8], "color": "#1e4516", "bgcolor": "#0a3102", "index": 0}, {"type": "JWInteger", "pos": [-860, 720], "size": {"0": 315, "1": 58}, "flags": {"pinned": false}, "order": 31, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [], "shape": 3, "slot_index": 0}], "title": "Transition Length", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [20], "color": "#1e4516", "bgcolor": "#0a3102", "index": 1}, {"type": "JWInteger", "pos": [-850, 820], "size": {"0": 315, "1": 58}, "flags": {}, "order": 32, "mode": 0, "outputs": [{"name": "INT", "type": "INT", "links": [], "shape": 3, "slot_index": 0}], "title": "End Padding Frames", "properties": {"Node name for S&R": "JWInteger"}, "widgets_values": [10], "color": "#1e4516", "bgcolor": "#0a3102", "index": 2}], "links": [], "external": [[0, 0, "INT"], [1, 0, "INT"], [2, 0, "INT"]]}}}, "version": 0.4}
--------------------------------------------------------------------------------