├── .github ├── FUNDING.yml └── workflows │ └── publish.yml ├── .gitignore ├── CrossAttentionPatch.py ├── IPAdapterPlusV2.py ├── LICENSE ├── README.md ├── __init__.py ├── change_node_name.py ├── exampleV2 ├── IPAdapterV1&V2.json ├── IPAdapterV1&V2.png ├── IPAdapterV2_menu.png ├── layer_weights_slider_example.jpg └── layer_weights_slider_example.json ├── examples ├── IPAdapter_FaceIDv2_Kolors.json ├── demo_workflow.jpg ├── ipadapter_advanced.json ├── ipadapter_clipvision_enhancer.json ├── ipadapter_combine_embeds.json ├── ipadapter_cosxl_edit.json ├── ipadapter_faceid.json ├── ipadapter_faceid_batch.json ├── ipadapter_ideal_faceid_config.json ├── ipadapter_kolors.json ├── ipadapter_negative_image.json ├── ipadapter_noise_injection.json ├── ipadapter_portrait.json ├── ipadapter_precise_composition.json ├── ipadapter_precise_weight_type.json ├── ipadapter_regional_conditioning.json ├── ipadapter_simple.json ├── ipadapter_style_composition.json ├── ipadapter_tiled.json ├── ipadapter_weight_types.json ├── ipadapter_weighted_embeds.json └── ipadapter_weights.json ├── image_proj_models.py ├── layer_weights_slider.py ├── models └── legacy_directory_do_not_use.txt ├── pyproject.toml └── utils.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: cubiq 4 | custom: ['https://www.paypal.com/paypalme/matt3o'] 5 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | jobs: 11 | publish-node: 12 | name: Publish Custom Node to registry 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code 16 | uses: actions/checkout@v4 17 | - name: Publish Custom Node 18 | uses: Comfy-Org/publish-node-action@main 19 | with: 20 | ## Add your own personal access token to your Github Repository secrets and reference it here. 21 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /__pycache__/ 2 | /models/*.bin 3 | /models/*.safetensors 4 | .directory -------------------------------------------------------------------------------- /CrossAttentionPatch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import torch.nn.functional as F 4 | from comfy.ldm.modules.attention import optimized_attention 5 | from .utils import tensor_to_size 6 | 7 | class Attn2Replace: 8 | def __init__(self, callback=None, **kwargs): 9 | self.callback = [callback] 10 | self.kwargs = [kwargs] 11 | 12 | def add(self, callback, **kwargs): 13 | self.callback.append(callback) 14 | self.kwargs.append(kwargs) 15 | 16 | for key, value in kwargs.items(): 17 | setattr(self, key, value) 18 | 19 | def __call__(self, q, k, v, extra_options): 20 | dtype = q.dtype 21 | out = optimized_attention(q, k, v, extra_options["n_heads"]) 22 | sigma = extra_options["sigmas"].detach().cpu()[0].item() if 'sigmas' in extra_options else 999999999.9 23 | 24 | for i, callback in enumerate(self.callback): 25 | if sigma <= self.kwargs[i]["sigma_start"] and sigma >= self.kwargs[i]["sigma_end"]: 26 | out = out + callback(out, q, k, v, extra_options, **self.kwargs[i]) 27 | 28 | return out.to(dtype=dtype) 29 | 30 | def ipadapter_attention(out, q, k, v, extra_options, module_key='', ipadapter=None, weight=1.0, cond=None, cond_alt=None, uncond=None, weight_type="linear", mask=None, sigma_start=0.0, sigma_end=1.0, unfold_batch=False, embeds_scaling='V only', **kwargs): 31 | dtype = q.dtype 32 | cond_or_uncond = extra_options["cond_or_uncond"] 33 | block_type = extra_options["block"][0] 34 | #block_id = extra_options["block"][1] 35 | t_idx = extra_options["transformer_index"] 36 | layers = 11 if '101_to_k_ip' in ipadapter.ip_layers.to_kvs else 16 37 | k_key = module_key + "_to_k_ip" 38 | v_key = module_key + "_to_v_ip" 39 | 40 | # extra options for AnimateDiff 41 | ad_params = extra_options['ad_params'] if "ad_params" in extra_options else None 42 | 43 | b = q.shape[0] 44 | seq_len = q.shape[1] 45 | batch_prompt = b // len(cond_or_uncond) 46 | _, _, oh, ow = extra_options["original_shape"] 47 | 48 | if weight_type == 'ease in': 49 | weight = weight * (0.05 + 0.95 * (1 - t_idx / layers)) 50 | elif weight_type == 'ease out': 51 | weight = weight * (0.05 + 0.95 * (t_idx / layers)) 52 | elif weight_type == 'ease in-out': 53 | weight = weight * (0.05 + 0.95 * (1 - abs(t_idx - (layers/2)) / (layers/2))) 54 | elif weight_type == 'reverse in-out': 55 | weight = weight * (0.05 + 0.95 * (abs(t_idx - (layers/2)) / (layers/2))) 56 | elif weight_type == 'weak input' and block_type == 'input': 57 | weight = weight * 0.2 58 | elif weight_type == 'weak middle' and block_type == 'middle': 59 | weight = weight * 0.2 60 | elif weight_type == 'weak output' and block_type == 'output': 61 | weight = weight * 0.2 62 | elif weight_type == 'strong middle' and (block_type == 'input' or block_type == 'output'): 63 | weight = weight * 0.2 64 | elif isinstance(weight, dict): 65 | if t_idx not in weight: 66 | return 0 67 | 68 | if weight_type == "style transfer precise": 69 | if layers == 11 and t_idx == 3: 70 | uncond = cond 71 | cond = cond * 0 72 | elif layers == 16 and (t_idx == 4 or t_idx == 5): 73 | uncond = cond 74 | cond = cond * 0 75 | elif weight_type == "composition precise": 76 | if layers == 11 and t_idx != 3: 77 | uncond = cond 78 | cond = cond * 0 79 | elif layers == 16 and (t_idx != 4 and t_idx != 5): 80 | uncond = cond 81 | cond = cond * 0 82 | 83 | weight = weight[t_idx] 84 | 85 | if cond_alt is not None and t_idx in cond_alt: 86 | cond = cond_alt[t_idx] 87 | del cond_alt 88 | 89 | if unfold_batch: 90 | # Check AnimateDiff context window 91 | if ad_params is not None and ad_params["sub_idxs"] is not None: 92 | if isinstance(weight, torch.Tensor): 93 | weight = tensor_to_size(weight, ad_params["full_length"]) 94 | weight = torch.Tensor(weight[ad_params["sub_idxs"]]) 95 | if torch.all(weight == 0): 96 | return 0 97 | weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond 98 | elif weight == 0: 99 | return 0 100 | 101 | # if image length matches or exceeds full_length get sub_idx images 102 | if cond.shape[0] >= ad_params["full_length"]: 103 | cond = torch.Tensor(cond[ad_params["sub_idxs"]]) 104 | uncond = torch.Tensor(uncond[ad_params["sub_idxs"]]) 105 | # otherwise get sub_idxs images 106 | else: 107 | cond = tensor_to_size(cond, ad_params["full_length"]) 108 | uncond = tensor_to_size(uncond, ad_params["full_length"]) 109 | cond = cond[ad_params["sub_idxs"]] 110 | uncond = uncond[ad_params["sub_idxs"]] 111 | else: 112 | if isinstance(weight, torch.Tensor): 113 | weight = tensor_to_size(weight, batch_prompt) 114 | if torch.all(weight == 0): 115 | return 0 116 | weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond 117 | elif weight == 0: 118 | return 0 119 | 120 | cond = tensor_to_size(cond, batch_prompt) 121 | uncond = tensor_to_size(uncond, batch_prompt) 122 | 123 | k_cond = ipadapter.ip_layers.to_kvs[k_key](cond) 124 | k_uncond = ipadapter.ip_layers.to_kvs[k_key](uncond) 125 | v_cond = ipadapter.ip_layers.to_kvs[v_key](cond) 126 | v_uncond = ipadapter.ip_layers.to_kvs[v_key](uncond) 127 | else: 128 | # TODO: should we always convert the weights to a tensor? 129 | if isinstance(weight, torch.Tensor): 130 | weight = tensor_to_size(weight, batch_prompt) 131 | if torch.all(weight == 0): 132 | return 0 133 | weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond 134 | elif weight == 0: 135 | return 0 136 | 137 | k_cond = ipadapter.ip_layers.to_kvs[k_key](cond).repeat(batch_prompt, 1, 1) 138 | k_uncond = ipadapter.ip_layers.to_kvs[k_key](uncond).repeat(batch_prompt, 1, 1) 139 | v_cond = ipadapter.ip_layers.to_kvs[v_key](cond).repeat(batch_prompt, 1, 1) 140 | v_uncond = ipadapter.ip_layers.to_kvs[v_key](uncond).repeat(batch_prompt, 1, 1) 141 | 142 | if len(cond_or_uncond) == 3: # TODO: cosxl, I need to check this 143 | ip_k = torch.cat([(k_cond, k_uncond, k_cond)[i] for i in cond_or_uncond], dim=0) 144 | ip_v = torch.cat([(v_cond, v_uncond, v_cond)[i] for i in cond_or_uncond], dim=0) 145 | else: 146 | ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0) 147 | ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0) 148 | 149 | if embeds_scaling == 'K+mean(V) w/ C penalty': 150 | scaling = float(ip_k.shape[2]) / 1280.0 151 | weight = weight * scaling 152 | ip_k = ip_k * weight 153 | ip_v_mean = torch.mean(ip_v, dim=1, keepdim=True) 154 | ip_v = (ip_v - ip_v_mean) + ip_v_mean * weight 155 | out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) 156 | del ip_v_mean 157 | elif embeds_scaling == 'K+V w/ C penalty': 158 | scaling = float(ip_k.shape[2]) / 1280.0 159 | weight = weight * scaling 160 | ip_k = ip_k * weight 161 | ip_v = ip_v * weight 162 | out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) 163 | elif embeds_scaling == 'K+V': 164 | ip_k = ip_k * weight 165 | ip_v = ip_v * weight 166 | out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) 167 | else: 168 | #ip_v = ip_v * weight 169 | out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) 170 | out_ip = out_ip * weight # I'm doing this to get the same results as before 171 | 172 | if mask is not None: 173 | mask_h = oh / math.sqrt(oh * ow / seq_len) 174 | mask_h = int(mask_h) + int((seq_len % int(mask_h)) != 0) 175 | mask_w = seq_len // mask_h 176 | 177 | # check if using AnimateDiff and sliding context window 178 | if (mask.shape[0] > 1 and ad_params is not None and ad_params["sub_idxs"] is not None): 179 | # if mask length matches or exceeds full_length, get sub_idx masks 180 | if mask.shape[0] >= ad_params["full_length"]: 181 | mask = torch.Tensor(mask[ad_params["sub_idxs"]]) 182 | mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1) 183 | else: 184 | mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1) 185 | mask = tensor_to_size(mask, ad_params["full_length"]) 186 | mask = mask[ad_params["sub_idxs"]] 187 | else: 188 | mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1) 189 | mask = tensor_to_size(mask, batch_prompt) 190 | 191 | mask = mask.repeat(len(cond_or_uncond), 1, 1) 192 | mask = mask.view(mask.shape[0], -1, 1).repeat(1, 1, out.shape[2]) 193 | 194 | # covers cases where extreme aspect ratios can cause the mask to have a wrong size 195 | mask_len = mask_h * mask_w 196 | if mask_len < seq_len: 197 | pad_len = seq_len - mask_len 198 | pad1 = pad_len // 2 199 | pad2 = pad_len - pad1 200 | mask = F.pad(mask, (0, 0, pad1, pad2), value=0.0) 201 | elif mask_len > seq_len: 202 | crop_start = (mask_len - seq_len) // 2 203 | mask = mask[:, crop_start:crop_start+seq_len, :] 204 | 205 | out_ip = out_ip * mask 206 | 207 | #out = out + out_ip 208 | 209 | return out_ip.to(dtype=dtype) 210 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI IPAdapter plus V2 2 | 3 | A copy of [ComfyUI_IPAdapter_plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus), Only changed node name to coexist with ComfyUI_IPAdapter_plus v1 version. 4 | 5 | 6 | Thanks to author [Cubiq](https://github.com/cubiq)'s great work, Please support his original work. 7 | 8 | 9 | ![image](exampleV2/IPAdapterV2_menu.png) 10 | ![image](exampleV2/IPAdapterV1&V2.png) 11 | 12 | The copy of latest ComfyUI_IPAdapter_plus V1 version is saved on [GoogleDrive](https://drive.google.com/drive/folders/1DAcDzKcypm3mKH3yjyM2c07b3XqqM07P?usp=sharing) and [BaiduNetdisk](https://pan.baidu.com/s/1x_SVJIhEzBbENGrlonvtgw?pwd=86gf). 13 | 14 | 15 | ## Add node: 16 | ### IPAdapter Layer Weights Slider 17 | ![image](exampleV2/layer_weights_slider_example.jpg) 18 | ```IPAdapter Layer Weights Slider``` node is used in conjunction with the ```IPAdapter Mad Scientist``` node to visualize the layer_weights parameter. 19 | 20 | * Usage: 21 | The weight slider adjustment range is -1 to 1. 22 | If you want to exceed this range, adjust the ```multiplier``` to multiply the output slider value with it. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ██▓ ██▓███ ▄▄▄ ▓█████▄ ▄▄▄ ██▓███ ▄▄▄█████▓▓█████ ██▀███ 3 | ▓██▒▓██░ ██▒▒████▄ ▒██▀ ██▌▒████▄ ▓██░ ██▒▓ ██▒ ▓▒▓█ ▀ ▓██ ▒ ██▒ 4 | ▒██▒▓██░ ██▓▒▒██ ▀█▄ ░██ █▌▒██ ▀█▄ ▓██░ ██▓▒▒ ▓██░ ▒░▒███ ▓██ ░▄█ ▒ 5 | ░██░▒██▄█▓▒ ▒░██▄▄▄▄██ ░▓█▄ ▌░██▄▄▄▄██ ▒██▄█▓▒ ▒░ ▓██▓ ░ ▒▓█ ▄ ▒██▀▀█▄ 6 | ░██░▒██▒ ░ ░ ▓█ ▓██▒░▒████▓ ▓█ ▓██▒▒██▒ ░ ░ ▒██▒ ░ ░▒████▒░██▓ ▒██▒ 7 | ░▓ ▒▓▒░ ░ ░ ▒▒ ▓▒█░ ▒▒▓ ▒ ▒▒ ▓▒█░▒▓▒░ ░ ░ ▒ ░░ ░░ ▒░ ░░ ▒▓ ░▒▓░ 8 | ▒ ░░▒ ░ ▒ ▒▒ ░ ░ ▒ ▒ ▒ ▒▒ ░░▒ ░ ░ ░ ░ ░ ░▒ ░ ▒░ 9 | ▒ ░░░ ░ ▒ ░ ░ ░ ░ ▒ ░░ ░ ░ ░░ ░ 10 | ░ ░ ░ ░ ░ ░ ░ ░ ░ 11 | ░ 12 | · -—+ IPAdapter Plus Extension for ComfyUI +—- · 13 | Brought to you by Matteo "Matt3o/Cubiq" Spinelli 14 | https://github.com/cubiq/ComfyUI_IPAdapter_plus/ 15 | """ 16 | 17 | from .IPAdapterPlusV2 import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 18 | class_mappings = NODE_CLASS_MAPPINGS 19 | display_name_mappings = NODE_DISPLAY_NAME_MAPPINGS 20 | from .layer_weights_slider import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS 21 | NODE_CLASS_MAPPINGS.update(class_mappings) 22 | NODE_DISPLAY_NAME_MAPPINGS.update(display_name_mappings) 23 | 24 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] 25 | -------------------------------------------------------------------------------- /change_node_name.py: -------------------------------------------------------------------------------- 1 | '''Change ComfyUI_IPAdapter_plus Node Name to V2 2 | by chflame@163.com''' 3 | 4 | import os 5 | import glob 6 | 7 | # py file 8 | orig_file_name = 'IPAdapterPlus.py' 9 | dist_file_name = 'IPAdapterPlusV2.py' 10 | orig_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), orig_file_name) 11 | dist_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), dist_file_name) 12 | # json files 13 | json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples") 14 | json_file_list = glob.glob(json_path + '/*.json') 15 | 16 | 17 | class_name_mapping_list = [ 18 | "IPAdapter", 19 | "IPAdapterSimple", 20 | "IPAdapterAdvanced", 21 | "IPAdapterBatch", 22 | "IPAdapterFaceID", 23 | "IPAAdapterFaceIDBatch", 24 | "IPAdapterTiled", 25 | "IPAdapterTiledBatch", 26 | "IPAdapterEmbeds", 27 | "IPAdapterUnifiedLoader", 28 | "IPAdapterUnifiedLoaderFaceID", 29 | "IPAdapterModelLoader", 30 | "IPAdapterInsightFaceLoader", 31 | "IPAdapterEncoder", 32 | "IPAdapterCombineEmbeds", 33 | "IPAdapterNoise", 34 | "PrepImageForClipVision", 35 | "IPAdapterSaveEmbeds", 36 | "IPAdapterLoadEmbeds", 37 | "IPAdapterUnifiedLoaderCommunity", 38 | "IPAdapterStyleComposition", 39 | "IPAdapterStyleCompositionBatch", 40 | "IPAdapterMS", 41 | "IPAdapterWeights", 42 | "IPAdapterRegionalConditioning", 43 | "IPAdapterCombineParams", 44 | "IPAdapterFromParams", 45 | "IPAdapterWeightsFromStrategy", 46 | "IPAdapterPromptScheduleFromWeightsStrategy", 47 | "IPAdapterCombineWeights", 48 | "IPAdapterEmbedsBatch", 49 | "IPAdapterPreciseStyleTransfer", 50 | "IPAdapterPreciseStyleTransferBatch", 51 | "IPAdapterPreciseComposition", 52 | "IPAdapterPreciseCompositionBatch", 53 | "IPAdapterClipVisionEnhancer", 54 | "IPAdapterClipVisionEnhancerBatch", 55 | "IPAdapterFaceIDKolors" 56 | ] 57 | 58 | display_name_mapping_list = [ 59 | "IPAdapter", 60 | "IPAdapter Advanced", 61 | "IPAdapter Batch (Adv.)", 62 | "IPAdapter FaceID", 63 | "IPAdapter FaceID Batch", 64 | "IPAdapter Tiled", 65 | "IPAdapter Tiled Batch", 66 | "IPAdapter Embeds", 67 | "IPAdapter Unified Loader", 68 | "IPAdapter Unified Loader FaceID", 69 | "IPAdapter Model Loader", 70 | "IPAdapter InsightFace Loader", 71 | "IPAdapter Encoder", 72 | "IPAdapter Combine Embeds", 73 | "IPAdapter Noise", 74 | "Prep Image For ClipVision", 75 | "IPAdapter Save Embeds", 76 | "IPAdapter Load Embeds", 77 | "IPAdapter Unified Loader Community", 78 | "IPAdapter Style & Composition SDXL", 79 | "IPAdapter Style & Composition Batch SDXL", 80 | "IPAdapter Mad Scientist", 81 | "IPAdapter Weights", 82 | "IPAdapter Regional Conditioning", 83 | "IPAdapter Combine Params", 84 | "IPAdapter from Params", 85 | "IPAdapter Weights From Strategy", 86 | "Prompt Schedule From Weights Strategy", 87 | "IPAdapter Combine Weights", 88 | "IPAdapter Embeds Batch", 89 | "IPAdapter Precise Style Transfer", 90 | "IPAdapter Precise Style Transfer Batch", 91 | "IPAdapter Precise Composition", 92 | "IPAdapter Precise Composition Batch", 93 | "IPAdapter ClipVision Enhancer", 94 | "IPAdapter ClipVision Enhancer Batch", 95 | "IPAdapter FaceID Kolors" 96 | ] 97 | 98 | def change_node_name(): 99 | if os.path.exists(orig_file_path): 100 | with open(orig_file_path, "r") as f: 101 | content = f.read() 102 | for class_name in class_name_mapping_list: 103 | content = content.replace(f'"{class_name}":', f'"{class_name}V2":') 104 | content = content.replace(f'class {class_name}:', f'class {class_name}V2:') 105 | content = content.replace(f'class {class_name}(', f'class {class_name}V2(') 106 | content = content.replace(f'": {class_name},', f'": {class_name}V2,') 107 | content = content.replace(f'({class_name}):', f'({class_name}V2):') 108 | content = content.replace(f'class IPAdapterV2(nn.Module):', f'class IPAdapter(nn.Module):') 109 | 110 | for display_name in display_name_mapping_list: 111 | content = content.replace(f': "{display_name}",', f': "{display_name} V2",') 112 | 113 | with open(dist_file_path, 'w') as f: 114 | f.write(content) 115 | 116 | os.remove(orig_file_path) 117 | 118 | def change_json_name(): 119 | for i in json_file_list: 120 | with open(i, "r") as f: 121 | content = f.read() 122 | for class_name in class_name_mapping_list: 123 | content = content.replace(f'"{class_name}"', f'"{class_name}V2"') 124 | # print(f'replace "{class_name}" to "{class_name}V2"') 125 | with open(i, 'w') as f: 126 | f.write(content) 127 | print(f"Change {i} node name to V2, done.") 128 | 129 | change_node_name() 130 | change_json_name() -------------------------------------------------------------------------------- /exampleV2/IPAdapterV1&V2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_IPAdapter_plus_V2/24bda419546b0229fe290957411f74ab664fe52f/exampleV2/IPAdapterV1&V2.png -------------------------------------------------------------------------------- /exampleV2/IPAdapterV2_menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_IPAdapter_plus_V2/24bda419546b0229fe290957411f74ab664fe52f/exampleV2/IPAdapterV2_menu.png -------------------------------------------------------------------------------- /exampleV2/layer_weights_slider_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_IPAdapter_plus_V2/24bda419546b0229fe290957411f74ab664fe52f/exampleV2/layer_weights_slider_example.jpg -------------------------------------------------------------------------------- /exampleV2/layer_weights_slider_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 17, 3 | "last_link_id": 20, 4 | "nodes": [ 5 | { 6 | "id": 7, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 840, 10 | 850 11 | ], 12 | "size": { 13 | "0": 425.27801513671875, 14 | "1": 180.6060791015625 15 | }, 16 | "flags": {}, 17 | "order": 8, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 5 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "CONDITIONING", 29 | "type": "CONDITIONING", 30 | "links": [ 31 | 6 32 | ], 33 | "slot_index": 0 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "CLIPTextEncode" 38 | }, 39 | "widgets_values": [ 40 | "text, watermark" 41 | ] 42 | }, 43 | { 44 | "id": 5, 45 | "type": "EmptyLatentImage", 46 | "pos": [ 47 | 900, 48 | 1070 49 | ], 50 | "size": { 51 | "0": 315, 52 | "1": 106 53 | }, 54 | "flags": {}, 55 | "order": 0, 56 | "mode": 0, 57 | "outputs": [ 58 | { 59 | "name": "LATENT", 60 | "type": "LATENT", 61 | "links": [ 62 | 2 63 | ], 64 | "slot_index": 0 65 | } 66 | ], 67 | "properties": { 68 | "Node name for S&R": "EmptyLatentImage" 69 | }, 70 | "widgets_values": [ 71 | 512, 72 | 512, 73 | 1 74 | ] 75 | }, 76 | { 77 | "id": 9, 78 | "type": "SaveImage", 79 | "pos": [ 80 | 1900, 81 | 500 82 | ], 83 | "size": { 84 | "0": 492.97467041015625, 85 | "1": 420.80401611328125 86 | }, 87 | "flags": {}, 88 | "order": 13, 89 | "mode": 0, 90 | "inputs": [ 91 | { 92 | "name": "images", 93 | "type": "IMAGE", 94 | "link": 9 95 | } 96 | ], 97 | "properties": { 98 | "Node name for S&R": "SaveImage" 99 | }, 100 | "widgets_values": [ 101 | "ComfyUI" 102 | ] 103 | }, 104 | { 105 | "id": 14, 106 | "type": "LoadImage", 107 | "pos": [ 108 | 1270, 109 | 1270 110 | ], 111 | "size": { 112 | "0": 315, 113 | "1": 314 114 | }, 115 | "flags": {}, 116 | "order": 1, 117 | "mode": 0, 118 | "outputs": [ 119 | { 120 | "name": "IMAGE", 121 | "type": "IMAGE", 122 | "links": [ 123 | 18 124 | ], 125 | "shape": 3, 126 | "slot_index": 0 127 | }, 128 | { 129 | "name": "MASK", 130 | "type": "MASK", 131 | "links": null, 132 | "shape": 3 133 | } 134 | ], 135 | "properties": { 136 | "Node name for S&R": "LoadImage" 137 | }, 138 | "widgets_values": [ 139 | "ComfyUI_temp_rukla_00004_ (1).png", 140 | "image" 141 | ] 142 | }, 143 | { 144 | "id": 4, 145 | "type": "CheckpointLoaderSimple", 146 | "pos": [ 147 | 260, 148 | 680 149 | ], 150 | "size": { 151 | "0": 424.19866943359375, 152 | "1": 98 153 | }, 154 | "flags": {}, 155 | "order": 2, 156 | "mode": 0, 157 | "outputs": [ 158 | { 159 | "name": "MODEL", 160 | "type": "MODEL", 161 | "links": [ 162 | 13 163 | ], 164 | "slot_index": 0 165 | }, 166 | { 167 | "name": "CLIP", 168 | "type": "CLIP", 169 | "links": [ 170 | 3, 171 | 5 172 | ], 173 | "slot_index": 1 174 | }, 175 | { 176 | "name": "VAE", 177 | "type": "VAE", 178 | "links": [ 179 | 8 180 | ], 181 | "slot_index": 2 182 | } 183 | ], 184 | "properties": { 185 | "Node name for S&R": "CheckpointLoaderSimple" 186 | }, 187 | "widgets_values": [ 188 | "SDXL\\sd_xl_base_1.0_0.9vae.safetensors" 189 | ] 190 | }, 191 | { 192 | "id": 11, 193 | "type": "IPAdapterLayerWeightsSlider", 194 | "pos": [ 195 | 620, 196 | 1270 197 | ], 198 | "size": { 199 | "0": 499.1185302734375, 200 | "1": 346 201 | }, 202 | "flags": {}, 203 | "order": 3, 204 | "mode": 0, 205 | "outputs": [ 206 | { 207 | "name": "layer_weights", 208 | "type": "STRING", 209 | "links": [ 210 | 10 211 | ], 212 | "shape": 3, 213 | "slot_index": 0 214 | } 215 | ], 216 | "properties": { 217 | "Node name for S&R": "IPAdapterLayerWeightsSlider" 218 | }, 219 | "widgets_values": [ 220 | 0, 221 | 0, 222 | 0, 223 | 1, 224 | 0, 225 | 0, 226 | 0, 227 | 0, 228 | 0, 229 | 0, 230 | 0, 231 | 0, 232 | 1 233 | ] 234 | }, 235 | { 236 | "id": 12, 237 | "type": "IPAdapterUnifiedLoaderV2", 238 | "pos": [ 239 | 500, 240 | 210 241 | ], 242 | "size": { 243 | "0": 315, 244 | "1": 78 245 | }, 246 | "flags": {}, 247 | "order": 6, 248 | "mode": 0, 249 | "inputs": [ 250 | { 251 | "name": "model", 252 | "type": "MODEL", 253 | "link": 13 254 | }, 255 | { 256 | "name": "ipadapter", 257 | "type": "IPADAPTER", 258 | "link": null 259 | } 260 | ], 261 | "outputs": [ 262 | { 263 | "name": "model", 264 | "type": "MODEL", 265 | "links": [ 266 | 11 267 | ], 268 | "shape": 3, 269 | "slot_index": 0 270 | }, 271 | { 272 | "name": "ipadapter", 273 | "type": "IPADAPTER", 274 | "links": [ 275 | 12, 276 | 17 277 | ], 278 | "shape": 3, 279 | "slot_index": 1 280 | } 281 | ], 282 | "properties": { 283 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 284 | }, 285 | "widgets_values": [ 286 | "PLUS (high strength)" 287 | ] 288 | }, 289 | { 290 | "id": 6, 291 | "type": "CLIPTextEncode", 292 | "pos": [ 293 | 840, 294 | 650 295 | ], 296 | "size": { 297 | "0": 422.84503173828125, 298 | "1": 164.31304931640625 299 | }, 300 | "flags": {}, 301 | "order": 7, 302 | "mode": 0, 303 | "inputs": [ 304 | { 305 | "name": "clip", 306 | "type": "CLIP", 307 | "link": 3 308 | } 309 | ], 310 | "outputs": [ 311 | { 312 | "name": "CONDITIONING", 313 | "type": "CONDITIONING", 314 | "links": [ 315 | 4 316 | ], 317 | "slot_index": 0 318 | } 319 | ], 320 | "properties": { 321 | "Node name for S&R": "CLIPTextEncode" 322 | }, 323 | "widgets_values": [ 324 | "A monkey carrying a stick" 325 | ] 326 | }, 327 | { 328 | "id": 8, 329 | "type": "VAEDecode", 330 | "pos": [ 331 | 1640, 332 | 652 333 | ], 334 | "size": { 335 | "0": 210, 336 | "1": 46 337 | }, 338 | "flags": {}, 339 | "order": 12, 340 | "mode": 0, 341 | "inputs": [ 342 | { 343 | "name": "samples", 344 | "type": "LATENT", 345 | "link": 7 346 | }, 347 | { 348 | "name": "vae", 349 | "type": "VAE", 350 | "link": 8 351 | } 352 | ], 353 | "outputs": [ 354 | { 355 | "name": "IMAGE", 356 | "type": "IMAGE", 357 | "links": [ 358 | 9 359 | ], 360 | "slot_index": 0 361 | } 362 | ], 363 | "properties": { 364 | "Node name for S&R": "VAEDecode" 365 | } 366 | }, 367 | { 368 | "id": 10, 369 | "type": "IPAdapterMSV2", 370 | "pos": [ 371 | 850, 372 | 210 373 | ], 374 | "size": { 375 | "0": 400, 376 | "1": 364 377 | }, 378 | "flags": {}, 379 | "order": 9, 380 | "mode": 0, 381 | "inputs": [ 382 | { 383 | "name": "model", 384 | "type": "MODEL", 385 | "link": 11 386 | }, 387 | { 388 | "name": "ipadapter", 389 | "type": "IPADAPTER", 390 | "link": 12, 391 | "slot_index": 1 392 | }, 393 | { 394 | "name": "image", 395 | "type": "IMAGE", 396 | "link": 15, 397 | "slot_index": 2 398 | }, 399 | { 400 | "name": "image_negative", 401 | "type": "IMAGE", 402 | "link": null 403 | }, 404 | { 405 | "name": "attn_mask", 406 | "type": "MASK", 407 | "link": null 408 | }, 409 | { 410 | "name": "clip_vision", 411 | "type": "CLIP_VISION", 412 | "link": null 413 | }, 414 | { 415 | "name": "insightface", 416 | "type": "INSIGHTFACE", 417 | "link": null 418 | }, 419 | { 420 | "name": "layer_weights", 421 | "type": "STRING", 422 | "link": 10, 423 | "widget": { 424 | "name": "layer_weights" 425 | } 426 | } 427 | ], 428 | "outputs": [ 429 | { 430 | "name": "MODEL", 431 | "type": "MODEL", 432 | "links": [ 433 | 16 434 | ], 435 | "shape": 3, 436 | "slot_index": 0 437 | } 438 | ], 439 | "properties": { 440 | "Node name for S&R": "IPAdapterMSV2" 441 | }, 442 | "widgets_values": [ 443 | 1, 444 | 1, 445 | "linear", 446 | "concat", 447 | 0, 448 | 1, 449 | "V only", 450 | "" 451 | ] 452 | }, 453 | { 454 | "id": 3, 455 | "type": "KSampler", 456 | "pos": [ 457 | 1290, 458 | 650 459 | ], 460 | "size": { 461 | "0": 315, 462 | "1": 262 463 | }, 464 | "flags": {}, 465 | "order": 11, 466 | "mode": 0, 467 | "inputs": [ 468 | { 469 | "name": "model", 470 | "type": "MODEL", 471 | "link": 19 472 | }, 473 | { 474 | "name": "positive", 475 | "type": "CONDITIONING", 476 | "link": 4 477 | }, 478 | { 479 | "name": "negative", 480 | "type": "CONDITIONING", 481 | "link": 6 482 | }, 483 | { 484 | "name": "latent_image", 485 | "type": "LATENT", 486 | "link": 2 487 | } 488 | ], 489 | "outputs": [ 490 | { 491 | "name": "LATENT", 492 | "type": "LATENT", 493 | "links": [ 494 | 7 495 | ], 496 | "slot_index": 0 497 | } 498 | ], 499 | "properties": { 500 | "Node name for S&R": "KSampler" 501 | }, 502 | "widgets_values": [ 503 | 61942868550657, 504 | "fixed", 505 | 20, 506 | 8, 507 | "euler", 508 | "normal", 509 | 1 510 | ] 511 | }, 512 | { 513 | "id": 15, 514 | "type": "IPAdapterMSV2", 515 | "pos": [ 516 | 1278, 517 | 209 518 | ], 519 | "size": { 520 | "0": 400, 521 | "1": 364 522 | }, 523 | "flags": {}, 524 | "order": 10, 525 | "mode": 0, 526 | "inputs": [ 527 | { 528 | "name": "model", 529 | "type": "MODEL", 530 | "link": 16 531 | }, 532 | { 533 | "name": "ipadapter", 534 | "type": "IPADAPTER", 535 | "link": 17, 536 | "slot_index": 1 537 | }, 538 | { 539 | "name": "image", 540 | "type": "IMAGE", 541 | "link": 18, 542 | "slot_index": 2 543 | }, 544 | { 545 | "name": "image_negative", 546 | "type": "IMAGE", 547 | "link": null 548 | }, 549 | { 550 | "name": "attn_mask", 551 | "type": "MASK", 552 | "link": null 553 | }, 554 | { 555 | "name": "clip_vision", 556 | "type": "CLIP_VISION", 557 | "link": null 558 | }, 559 | { 560 | "name": "insightface", 561 | "type": "INSIGHTFACE", 562 | "link": null 563 | }, 564 | { 565 | "name": "layer_weights", 566 | "type": "STRING", 567 | "link": 20, 568 | "widget": { 569 | "name": "layer_weights" 570 | } 571 | } 572 | ], 573 | "outputs": [ 574 | { 575 | "name": "MODEL", 576 | "type": "MODEL", 577 | "links": [ 578 | 19 579 | ], 580 | "shape": 3, 581 | "slot_index": 0 582 | } 583 | ], 584 | "properties": { 585 | "Node name for S&R": "IPAdapterMSV2" 586 | }, 587 | "widgets_values": [ 588 | 1, 589 | 1, 590 | "linear", 591 | "concat", 592 | 0, 593 | 1, 594 | "V only", 595 | "" 596 | ] 597 | }, 598 | { 599 | "id": 16, 600 | "type": "IPAdapterLayerWeightsSlider", 601 | "pos": [ 602 | 1610, 603 | 1260 604 | ], 605 | "size": { 606 | "0": 499.1185302734375, 607 | "1": 346 608 | }, 609 | "flags": {}, 610 | "order": 4, 611 | "mode": 0, 612 | "outputs": [ 613 | { 614 | "name": "layer_weights", 615 | "type": "STRING", 616 | "links": [ 617 | 20 618 | ], 619 | "shape": 3, 620 | "slot_index": 0 621 | } 622 | ], 623 | "properties": { 624 | "Node name for S&R": "IPAdapterLayerWeightsSlider" 625 | }, 626 | "widgets_values": [ 627 | 0, 628 | 0, 629 | 0, 630 | 0, 631 | 0, 632 | 0, 633 | 1, 634 | 0, 635 | 0, 636 | 0, 637 | 0, 638 | 0, 639 | 1 640 | ] 641 | }, 642 | { 643 | "id": 13, 644 | "type": "LoadImage", 645 | "pos": [ 646 | 280, 647 | 1274 648 | ], 649 | "size": { 650 | "0": 315, 651 | "1": 314 652 | }, 653 | "flags": {}, 654 | "order": 5, 655 | "mode": 0, 656 | "outputs": [ 657 | { 658 | "name": "IMAGE", 659 | "type": "IMAGE", 660 | "links": [ 661 | 15 662 | ], 663 | "shape": 3 664 | }, 665 | { 666 | "name": "MASK", 667 | "type": "MASK", 668 | "links": null, 669 | "shape": 3 670 | } 671 | ], 672 | "properties": { 673 | "Node name for S&R": "LoadImage" 674 | }, 675 | "widgets_values": [ 676 | "swk.jpg", 677 | "image" 678 | ] 679 | } 680 | ], 681 | "links": [ 682 | [ 683 | 2, 684 | 5, 685 | 0, 686 | 3, 687 | 3, 688 | "LATENT" 689 | ], 690 | [ 691 | 3, 692 | 4, 693 | 1, 694 | 6, 695 | 0, 696 | "CLIP" 697 | ], 698 | [ 699 | 4, 700 | 6, 701 | 0, 702 | 3, 703 | 1, 704 | "CONDITIONING" 705 | ], 706 | [ 707 | 5, 708 | 4, 709 | 1, 710 | 7, 711 | 0, 712 | "CLIP" 713 | ], 714 | [ 715 | 6, 716 | 7, 717 | 0, 718 | 3, 719 | 2, 720 | "CONDITIONING" 721 | ], 722 | [ 723 | 7, 724 | 3, 725 | 0, 726 | 8, 727 | 0, 728 | "LATENT" 729 | ], 730 | [ 731 | 8, 732 | 4, 733 | 2, 734 | 8, 735 | 1, 736 | "VAE" 737 | ], 738 | [ 739 | 9, 740 | 8, 741 | 0, 742 | 9, 743 | 0, 744 | "IMAGE" 745 | ], 746 | [ 747 | 10, 748 | 11, 749 | 0, 750 | 10, 751 | 7, 752 | "STRING" 753 | ], 754 | [ 755 | 11, 756 | 12, 757 | 0, 758 | 10, 759 | 0, 760 | "MODEL" 761 | ], 762 | [ 763 | 12, 764 | 12, 765 | 1, 766 | 10, 767 | 1, 768 | "IPADAPTER" 769 | ], 770 | [ 771 | 13, 772 | 4, 773 | 0, 774 | 12, 775 | 0, 776 | "MODEL" 777 | ], 778 | [ 779 | 15, 780 | 13, 781 | 0, 782 | 10, 783 | 2, 784 | "IMAGE" 785 | ], 786 | [ 787 | 16, 788 | 10, 789 | 0, 790 | 15, 791 | 0, 792 | "MODEL" 793 | ], 794 | [ 795 | 17, 796 | 12, 797 | 1, 798 | 15, 799 | 1, 800 | "IPADAPTER" 801 | ], 802 | [ 803 | 18, 804 | 14, 805 | 0, 806 | 15, 807 | 2, 808 | "IMAGE" 809 | ], 810 | [ 811 | 19, 812 | 15, 813 | 0, 814 | 3, 815 | 0, 816 | "MODEL" 817 | ], 818 | [ 819 | 20, 820 | 16, 821 | 0, 822 | 15, 823 | 7, 824 | "STRING" 825 | ] 826 | ], 827 | "groups": [], 828 | "config": {}, 829 | "extra": { 830 | "ds": { 831 | "scale": 0.7247295000000007, 832 | "offset": { 833 | "0": 651.7206756576027, 834 | "1": 18.49569577115478 835 | } 836 | } 837 | }, 838 | "version": 0.4 839 | } -------------------------------------------------------------------------------- /examples/demo_workflow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_IPAdapter_plus_V2/24bda419546b0229fe290957411f74ab664fe52f/examples/demo_workflow.jpg -------------------------------------------------------------------------------- /examples/ipadapter_advanced.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 17, 3 | "last_link_id": 26, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "CheckpointLoaderSimple", 8 | "pos": [ 9 | 50, 10 | 730 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 98 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "MODEL", 22 | "type": "MODEL", 23 | "links": [ 24 | 20 25 | ], 26 | "slot_index": 0 27 | }, 28 | { 29 | "name": "CLIP", 30 | "type": "CLIP", 31 | "links": [ 32 | 3, 33 | 5 34 | ], 35 | "slot_index": 1 36 | }, 37 | { 38 | "name": "VAE", 39 | "type": "VAE", 40 | "links": [ 41 | 8 42 | ], 43 | "slot_index": 2 44 | } 45 | ], 46 | "properties": { 47 | "Node name for S&R": "CheckpointLoaderSimple" 48 | }, 49 | "widgets_values": [ 50 | "sd15/realisticVisionV51_v51VAE.safetensors" 51 | ] 52 | }, 53 | { 54 | "id": 9, 55 | "type": "SaveImage", 56 | "pos": [ 57 | 1770, 58 | 710 59 | ], 60 | "size": { 61 | "0": 529.7760009765625, 62 | "1": 582.3048095703125 63 | }, 64 | "flags": {}, 65 | "order": 11, 66 | "mode": 0, 67 | "inputs": [ 68 | { 69 | "name": "images", 70 | "type": "IMAGE", 71 | "link": 9 72 | } 73 | ], 74 | "properties": {}, 75 | "widgets_values": [ 76 | "IPAdapterV2" 77 | ] 78 | }, 79 | { 80 | "id": 7, 81 | "type": "CLIPTextEncode", 82 | "pos": [ 83 | 690, 84 | 840 85 | ], 86 | "size": { 87 | "0": 425.27801513671875, 88 | "1": 180.6060791015625 89 | }, 90 | "flags": {}, 91 | "order": 6, 92 | "mode": 0, 93 | "inputs": [ 94 | { 95 | "name": "clip", 96 | "type": "CLIP", 97 | "link": 5 98 | } 99 | ], 100 | "outputs": [ 101 | { 102 | "name": "CONDITIONING", 103 | "type": "CONDITIONING", 104 | "links": [ 105 | 6 106 | ], 107 | "slot_index": 0 108 | } 109 | ], 110 | "properties": { 111 | "Node name for S&R": "CLIPTextEncode" 112 | }, 113 | "widgets_values": [ 114 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 115 | ] 116 | }, 117 | { 118 | "id": 8, 119 | "type": "VAEDecode", 120 | "pos": [ 121 | 1570, 122 | 700 123 | ], 124 | "size": { 125 | "0": 140, 126 | "1": 46 127 | }, 128 | "flags": {}, 129 | "order": 10, 130 | "mode": 0, 131 | "inputs": [ 132 | { 133 | "name": "samples", 134 | "type": "LATENT", 135 | "link": 7 136 | }, 137 | { 138 | "name": "vae", 139 | "type": "VAE", 140 | "link": 8 141 | } 142 | ], 143 | "outputs": [ 144 | { 145 | "name": "IMAGE", 146 | "type": "IMAGE", 147 | "links": [ 148 | 9 149 | ], 150 | "slot_index": 0 151 | } 152 | ], 153 | "properties": { 154 | "Node name for S&R": "VAEDecode" 155 | } 156 | }, 157 | { 158 | "id": 5, 159 | "type": "EmptyLatentImage", 160 | "pos": [ 161 | 801, 162 | 1097 163 | ], 164 | "size": { 165 | "0": 315, 166 | "1": 106 167 | }, 168 | "flags": {}, 169 | "order": 1, 170 | "mode": 0, 171 | "outputs": [ 172 | { 173 | "name": "LATENT", 174 | "type": "LATENT", 175 | "links": [ 176 | 2 177 | ], 178 | "slot_index": 0 179 | } 180 | ], 181 | "properties": { 182 | "Node name for S&R": "EmptyLatentImage" 183 | }, 184 | "widgets_values": [ 185 | 512, 186 | 512, 187 | 1 188 | ] 189 | }, 190 | { 191 | "id": 6, 192 | "type": "CLIPTextEncode", 193 | "pos": [ 194 | 690, 195 | 610 196 | ], 197 | "size": { 198 | "0": 422.84503173828125, 199 | "1": 164.31304931640625 200 | }, 201 | "flags": {}, 202 | "order": 5, 203 | "mode": 0, 204 | "inputs": [ 205 | { 206 | "name": "clip", 207 | "type": "CLIP", 208 | "link": 3 209 | } 210 | ], 211 | "outputs": [ 212 | { 213 | "name": "CONDITIONING", 214 | "type": "CONDITIONING", 215 | "links": [ 216 | 4 217 | ], 218 | "slot_index": 0 219 | } 220 | ], 221 | "properties": { 222 | "Node name for S&R": "CLIPTextEncode" 223 | }, 224 | "widgets_values": [ 225 | "in a peaceful spring morning a woman wearing a white shirt is sitting in a park on a bench\n\nhigh quality, detailed, diffuse light" 226 | ] 227 | }, 228 | { 229 | "id": 3, 230 | "type": "KSampler", 231 | "pos": [ 232 | 1210, 233 | 700 234 | ], 235 | "size": { 236 | "0": 315, 237 | "1": 262 238 | }, 239 | "flags": {}, 240 | "order": 9, 241 | "mode": 0, 242 | "inputs": [ 243 | { 244 | "name": "model", 245 | "type": "MODEL", 246 | "link": 23 247 | }, 248 | { 249 | "name": "positive", 250 | "type": "CONDITIONING", 251 | "link": 4 252 | }, 253 | { 254 | "name": "negative", 255 | "type": "CONDITIONING", 256 | "link": 6 257 | }, 258 | { 259 | "name": "latent_image", 260 | "type": "LATENT", 261 | "link": 2 262 | } 263 | ], 264 | "outputs": [ 265 | { 266 | "name": "LATENT", 267 | "type": "LATENT", 268 | "links": [ 269 | 7 270 | ], 271 | "slot_index": 0 272 | } 273 | ], 274 | "properties": { 275 | "Node name for S&R": "KSampler" 276 | }, 277 | "widgets_values": [ 278 | 0, 279 | "fixed", 280 | 30, 281 | 6.5, 282 | "ddpm", 283 | "karras", 284 | 1 285 | ] 286 | }, 287 | { 288 | "id": 14, 289 | "type": "IPAdapterAdvancedV2", 290 | "pos": [ 291 | 801, 292 | 256 293 | ], 294 | "size": { 295 | "0": 315, 296 | "1": 278 297 | }, 298 | "flags": {}, 299 | "order": 8, 300 | "mode": 0, 301 | "inputs": [ 302 | { 303 | "name": "model", 304 | "type": "MODEL", 305 | "link": 20 306 | }, 307 | { 308 | "name": "ipadapter", 309 | "type": "IPADAPTER", 310 | "link": 21, 311 | "slot_index": 1 312 | }, 313 | { 314 | "name": "image", 315 | "type": "IMAGE", 316 | "link": 26 317 | }, 318 | { 319 | "name": "image_negative", 320 | "type": "IMAGE", 321 | "link": null 322 | }, 323 | { 324 | "name": "attn_mask", 325 | "type": "MASK", 326 | "link": null 327 | }, 328 | { 329 | "name": "clip_vision", 330 | "type": "CLIP_VISION", 331 | "link": 24, 332 | "slot_index": 5 333 | } 334 | ], 335 | "outputs": [ 336 | { 337 | "name": "MODEL", 338 | "type": "MODEL", 339 | "links": [ 340 | 23 341 | ], 342 | "shape": 3, 343 | "slot_index": 0 344 | } 345 | ], 346 | "properties": { 347 | "Node name for S&R": "IPAdapterAdvancedV2" 348 | }, 349 | "widgets_values": [ 350 | 0.8, 351 | "linear", 352 | "concat", 353 | 0, 354 | 1, 355 | "V only" 356 | ] 357 | }, 358 | { 359 | "id": 17, 360 | "type": "PrepImageForClipVisionV2", 361 | "pos": [ 362 | 797, 363 | 87 364 | ], 365 | "size": { 366 | "0": 315, 367 | "1": 106 368 | }, 369 | "flags": {}, 370 | "order": 7, 371 | "mode": 0, 372 | "inputs": [ 373 | { 374 | "name": "image", 375 | "type": "IMAGE", 376 | "link": 25 377 | } 378 | ], 379 | "outputs": [ 380 | { 381 | "name": "IMAGE", 382 | "type": "IMAGE", 383 | "links": [ 384 | 26 385 | ], 386 | "shape": 3, 387 | "slot_index": 0 388 | } 389 | ], 390 | "properties": { 391 | "Node name for S&R": "PrepImageForClipVisionV2" 392 | }, 393 | "widgets_values": [ 394 | "LANCZOS", 395 | "top", 396 | 0.15 397 | ] 398 | }, 399 | { 400 | "id": 15, 401 | "type": "IPAdapterModelLoaderV2", 402 | "pos": [ 403 | 308, 404 | 52 405 | ], 406 | "size": { 407 | "0": 315, 408 | "1": 58 409 | }, 410 | "flags": {}, 411 | "order": 2, 412 | "mode": 0, 413 | "outputs": [ 414 | { 415 | "name": "IPADAPTER", 416 | "type": "IPADAPTER", 417 | "links": [ 418 | 21 419 | ], 420 | "shape": 3 421 | } 422 | ], 423 | "properties": { 424 | "Node name for S&R": "IPAdapterModelLoaderV2" 425 | }, 426 | "widgets_values": [ 427 | "ip-adapter-plus_sd15.safetensors" 428 | ] 429 | }, 430 | { 431 | "id": 16, 432 | "type": "CLIPVisionLoader", 433 | "pos": [ 434 | 308, 435 | 161 436 | ], 437 | "size": { 438 | "0": 315, 439 | "1": 58 440 | }, 441 | "flags": {}, 442 | "order": 3, 443 | "mode": 0, 444 | "outputs": [ 445 | { 446 | "name": "CLIP_VISION", 447 | "type": "CLIP_VISION", 448 | "links": [ 449 | 24 450 | ], 451 | "shape": 3 452 | } 453 | ], 454 | "properties": { 455 | "Node name for S&R": "CLIPVisionLoader" 456 | }, 457 | "widgets_values": [ 458 | "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" 459 | ] 460 | }, 461 | { 462 | "id": 12, 463 | "type": "LoadImage", 464 | "pos": [ 465 | 311, 466 | 270 467 | ], 468 | "size": { 469 | "0": 315, 470 | "1": 314 471 | }, 472 | "flags": {}, 473 | "order": 4, 474 | "mode": 0, 475 | "outputs": [ 476 | { 477 | "name": "IMAGE", 478 | "type": "IMAGE", 479 | "links": [ 480 | 25 481 | ], 482 | "shape": 3, 483 | "slot_index": 0 484 | }, 485 | { 486 | "name": "MASK", 487 | "type": "MASK", 488 | "links": null, 489 | "shape": 3 490 | } 491 | ], 492 | "properties": { 493 | "Node name for S&R": "LoadImage" 494 | }, 495 | "widgets_values": [ 496 | "warrior_woman.png", 497 | "image" 498 | ] 499 | } 500 | ], 501 | "links": [ 502 | [ 503 | 2, 504 | 5, 505 | 0, 506 | 3, 507 | 3, 508 | "LATENT" 509 | ], 510 | [ 511 | 3, 512 | 4, 513 | 1, 514 | 6, 515 | 0, 516 | "CLIP" 517 | ], 518 | [ 519 | 4, 520 | 6, 521 | 0, 522 | 3, 523 | 1, 524 | "CONDITIONING" 525 | ], 526 | [ 527 | 5, 528 | 4, 529 | 1, 530 | 7, 531 | 0, 532 | "CLIP" 533 | ], 534 | [ 535 | 6, 536 | 7, 537 | 0, 538 | 3, 539 | 2, 540 | "CONDITIONING" 541 | ], 542 | [ 543 | 7, 544 | 3, 545 | 0, 546 | 8, 547 | 0, 548 | "LATENT" 549 | ], 550 | [ 551 | 8, 552 | 4, 553 | 2, 554 | 8, 555 | 1, 556 | "VAE" 557 | ], 558 | [ 559 | 9, 560 | 8, 561 | 0, 562 | 9, 563 | 0, 564 | "IMAGE" 565 | ], 566 | [ 567 | 20, 568 | 4, 569 | 0, 570 | 14, 571 | 0, 572 | "MODEL" 573 | ], 574 | [ 575 | 21, 576 | 15, 577 | 0, 578 | 14, 579 | 1, 580 | "IPADAPTER" 581 | ], 582 | [ 583 | 23, 584 | 14, 585 | 0, 586 | 3, 587 | 0, 588 | "MODEL" 589 | ], 590 | [ 591 | 24, 592 | 16, 593 | 0, 594 | 14, 595 | 5, 596 | "CLIP_VISION" 597 | ], 598 | [ 599 | 25, 600 | 12, 601 | 0, 602 | 17, 603 | 0, 604 | "IMAGE" 605 | ], 606 | [ 607 | 26, 608 | 17, 609 | 0, 610 | 14, 611 | 2, 612 | "IMAGE" 613 | ] 614 | ], 615 | "groups": [], 616 | "config": {}, 617 | "extra": {}, 618 | "version": 0.4 619 | } -------------------------------------------------------------------------------- /examples/ipadapter_faceid.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 20, 3 | "last_link_id": 36, 4 | "nodes": [ 5 | { 6 | "id": 8, 7 | "type": "VAEDecode", 8 | "pos": [ 9 | 1640, 10 | 710 11 | ], 12 | "size": { 13 | "0": 140, 14 | "1": 46 15 | }, 16 | "flags": {}, 17 | "order": 8, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "samples", 22 | "type": "LATENT", 23 | "link": 7 24 | }, 25 | { 26 | "name": "vae", 27 | "type": "VAE", 28 | "link": 8 29 | } 30 | ], 31 | "outputs": [ 32 | { 33 | "name": "IMAGE", 34 | "type": "IMAGE", 35 | "links": [ 36 | 9 37 | ], 38 | "slot_index": 0 39 | } 40 | ], 41 | "properties": { 42 | "Node name for S&R": "VAEDecode" 43 | } 44 | }, 45 | { 46 | "id": 5, 47 | "type": "EmptyLatentImage", 48 | "pos": [ 49 | 870, 50 | 1100 51 | ], 52 | "size": { 53 | "0": 315, 54 | "1": 106 55 | }, 56 | "flags": {}, 57 | "order": 0, 58 | "mode": 0, 59 | "outputs": [ 60 | { 61 | "name": "LATENT", 62 | "type": "LATENT", 63 | "links": [ 64 | 2 65 | ], 66 | "slot_index": 0 67 | } 68 | ], 69 | "properties": { 70 | "Node name for S&R": "EmptyLatentImage" 71 | }, 72 | "widgets_values": [ 73 | 512, 74 | 512, 75 | 1 76 | ] 77 | }, 78 | { 79 | "id": 3, 80 | "type": "KSampler", 81 | "pos": [ 82 | 1280, 83 | 710 84 | ], 85 | "size": { 86 | "0": 315, 87 | "1": 262 88 | }, 89 | "flags": {}, 90 | "order": 7, 91 | "mode": 0, 92 | "inputs": [ 93 | { 94 | "name": "model", 95 | "type": "MODEL", 96 | "link": 32 97 | }, 98 | { 99 | "name": "positive", 100 | "type": "CONDITIONING", 101 | "link": 4 102 | }, 103 | { 104 | "name": "negative", 105 | "type": "CONDITIONING", 106 | "link": 6 107 | }, 108 | { 109 | "name": "latent_image", 110 | "type": "LATENT", 111 | "link": 2 112 | } 113 | ], 114 | "outputs": [ 115 | { 116 | "name": "LATENT", 117 | "type": "LATENT", 118 | "links": [ 119 | 7 120 | ], 121 | "slot_index": 0 122 | } 123 | ], 124 | "properties": { 125 | "Node name for S&R": "KSampler" 126 | }, 127 | "widgets_values": [ 128 | 0, 129 | "fixed", 130 | 30, 131 | 6.5, 132 | "ddpm", 133 | "karras", 134 | 1 135 | ] 136 | }, 137 | { 138 | "id": 9, 139 | "type": "SaveImage", 140 | "pos": [ 141 | 1830, 142 | 700 143 | ], 144 | "size": { 145 | "0": 529.7760009765625, 146 | "1": 582.3048095703125 147 | }, 148 | "flags": {}, 149 | "order": 9, 150 | "mode": 0, 151 | "inputs": [ 152 | { 153 | "name": "images", 154 | "type": "IMAGE", 155 | "link": 9 156 | } 157 | ], 158 | "properties": {}, 159 | "widgets_values": [ 160 | "IPAdapterV2" 161 | ] 162 | }, 163 | { 164 | "id": 12, 165 | "type": "LoadImage", 166 | "pos": [ 167 | 450, 168 | 240 169 | ], 170 | "size": { 171 | "0": 315, 172 | "1": 314 173 | }, 174 | "flags": {}, 175 | "order": 1, 176 | "mode": 0, 177 | "outputs": [ 178 | { 179 | "name": "IMAGE", 180 | "type": "IMAGE", 181 | "links": [ 182 | 29 183 | ], 184 | "shape": 3, 185 | "slot_index": 0 186 | }, 187 | { 188 | "name": "MASK", 189 | "type": "MASK", 190 | "links": null, 191 | "shape": 3 192 | } 193 | ], 194 | "properties": { 195 | "Node name for S&R": "LoadImage" 196 | }, 197 | "widgets_values": [ 198 | "rosario_4.jpg", 199 | "image" 200 | ] 201 | }, 202 | { 203 | "id": 20, 204 | "type": "IPAdapterUnifiedLoaderFaceIDV2", 205 | "pos": [ 206 | 460, 207 | 60 208 | ], 209 | "size": { 210 | "0": 315, 211 | "1": 126 212 | }, 213 | "flags": {}, 214 | "order": 3, 215 | "mode": 0, 216 | "inputs": [ 217 | { 218 | "name": "model", 219 | "type": "MODEL", 220 | "link": 36 221 | }, 222 | { 223 | "name": "ipadapter", 224 | "type": "IPADAPTER", 225 | "link": null 226 | } 227 | ], 228 | "outputs": [ 229 | { 230 | "name": "MODEL", 231 | "type": "MODEL", 232 | "links": [ 233 | 35 234 | ], 235 | "shape": 3, 236 | "slot_index": 0 237 | }, 238 | { 239 | "name": "ipadapter", 240 | "type": "IPADAPTER", 241 | "links": [ 242 | 34 243 | ], 244 | "shape": 3 245 | } 246 | ], 247 | "properties": { 248 | "Node name for S&R": "IPAdapterUnifiedLoaderFaceIDV2" 249 | }, 250 | "widgets_values": [ 251 | "FACEID PLUS V2", 252 | 0.6, 253 | "CPU" 254 | ] 255 | }, 256 | { 257 | "id": 7, 258 | "type": "CLIPTextEncode", 259 | "pos": [ 260 | 760, 261 | 850 262 | ], 263 | "size": { 264 | "0": 425.27801513671875, 265 | "1": 180.6060791015625 266 | }, 267 | "flags": {}, 268 | "order": 5, 269 | "mode": 0, 270 | "inputs": [ 271 | { 272 | "name": "clip", 273 | "type": "CLIP", 274 | "link": 5 275 | } 276 | ], 277 | "outputs": [ 278 | { 279 | "name": "CONDITIONING", 280 | "type": "CONDITIONING", 281 | "links": [ 282 | 6 283 | ], 284 | "slot_index": 0 285 | } 286 | ], 287 | "properties": { 288 | "Node name for S&R": "CLIPTextEncode" 289 | }, 290 | "widgets_values": [ 291 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed, naked" 292 | ] 293 | }, 294 | { 295 | "id": 6, 296 | "type": "CLIPTextEncode", 297 | "pos": [ 298 | 760, 299 | 620 300 | ], 301 | "size": { 302 | "0": 422.84503173828125, 303 | "1": 164.31304931640625 304 | }, 305 | "flags": {}, 306 | "order": 4, 307 | "mode": 0, 308 | "inputs": [ 309 | { 310 | "name": "clip", 311 | "type": "CLIP", 312 | "link": 3 313 | } 314 | ], 315 | "outputs": [ 316 | { 317 | "name": "CONDITIONING", 318 | "type": "CONDITIONING", 319 | "links": [ 320 | 4 321 | ], 322 | "slot_index": 0 323 | } 324 | ], 325 | "properties": { 326 | "Node name for S&R": "CLIPTextEncode" 327 | }, 328 | "widgets_values": [ 329 | "closeup of a beautiful woman wearing a black dress on the seaside\n\nserene, sunset, spring, high quality, detailed, diffuse light" 330 | ] 331 | }, 332 | { 333 | "id": 4, 334 | "type": "CheckpointLoaderSimple", 335 | "pos": [ 336 | 10, 337 | 680 338 | ], 339 | "size": { 340 | "0": 315, 341 | "1": 98 342 | }, 343 | "flags": {}, 344 | "order": 2, 345 | "mode": 0, 346 | "outputs": [ 347 | { 348 | "name": "MODEL", 349 | "type": "MODEL", 350 | "links": [ 351 | 36 352 | ], 353 | "slot_index": 0 354 | }, 355 | { 356 | "name": "CLIP", 357 | "type": "CLIP", 358 | "links": [ 359 | 3, 360 | 5 361 | ], 362 | "slot_index": 1 363 | }, 364 | { 365 | "name": "VAE", 366 | "type": "VAE", 367 | "links": [ 368 | 8 369 | ], 370 | "slot_index": 2 371 | } 372 | ], 373 | "properties": { 374 | "Node name for S&R": "CheckpointLoaderSimple" 375 | }, 376 | "widgets_values": [ 377 | "sd15/realisticVisionV51_v51VAE.safetensors" 378 | ] 379 | }, 380 | { 381 | "id": 18, 382 | "type": "IPAdapterFaceIDV2", 383 | "pos": [ 384 | 850, 385 | 190 386 | ], 387 | "size": { 388 | "0": 315, 389 | "1": 298 390 | }, 391 | "flags": {}, 392 | "order": 6, 393 | "mode": 0, 394 | "inputs": [ 395 | { 396 | "name": "model", 397 | "type": "MODEL", 398 | "link": 35 399 | }, 400 | { 401 | "name": "ipadapter", 402 | "type": "IPADAPTER", 403 | "link": 34, 404 | "slot_index": 1 405 | }, 406 | { 407 | "name": "image", 408 | "type": "IMAGE", 409 | "link": 29 410 | }, 411 | { 412 | "name": "image_negative", 413 | "type": "IMAGE", 414 | "link": null 415 | }, 416 | { 417 | "name": "attn_mask", 418 | "type": "MASK", 419 | "link": null 420 | }, 421 | { 422 | "name": "clip_vision", 423 | "type": "CLIP_VISION", 424 | "link": null 425 | }, 426 | { 427 | "name": "insightface", 428 | "type": "INSIGHTFACE", 429 | "link": null 430 | } 431 | ], 432 | "outputs": [ 433 | { 434 | "name": "MODEL", 435 | "type": "MODEL", 436 | "links": [ 437 | 32 438 | ], 439 | "shape": 3, 440 | "slot_index": 0 441 | } 442 | ], 443 | "properties": { 444 | "Node name for S&R": "IPAdapterFaceIDV2" 445 | }, 446 | "widgets_values": [ 447 | 1, 448 | 2, 449 | "linear", 450 | "concat", 451 | 0, 452 | 1 453 | ] 454 | } 455 | ], 456 | "links": [ 457 | [ 458 | 2, 459 | 5, 460 | 0, 461 | 3, 462 | 3, 463 | "LATENT" 464 | ], 465 | [ 466 | 3, 467 | 4, 468 | 1, 469 | 6, 470 | 0, 471 | "CLIP" 472 | ], 473 | [ 474 | 4, 475 | 6, 476 | 0, 477 | 3, 478 | 1, 479 | "CONDITIONING" 480 | ], 481 | [ 482 | 5, 483 | 4, 484 | 1, 485 | 7, 486 | 0, 487 | "CLIP" 488 | ], 489 | [ 490 | 6, 491 | 7, 492 | 0, 493 | 3, 494 | 2, 495 | "CONDITIONING" 496 | ], 497 | [ 498 | 7, 499 | 3, 500 | 0, 501 | 8, 502 | 0, 503 | "LATENT" 504 | ], 505 | [ 506 | 8, 507 | 4, 508 | 2, 509 | 8, 510 | 1, 511 | "VAE" 512 | ], 513 | [ 514 | 9, 515 | 8, 516 | 0, 517 | 9, 518 | 0, 519 | "IMAGE" 520 | ], 521 | [ 522 | 29, 523 | 12, 524 | 0, 525 | 18, 526 | 2, 527 | "IMAGE" 528 | ], 529 | [ 530 | 32, 531 | 18, 532 | 0, 533 | 3, 534 | 0, 535 | "MODEL" 536 | ], 537 | [ 538 | 34, 539 | 20, 540 | 1, 541 | 18, 542 | 1, 543 | "IPADAPTER" 544 | ], 545 | [ 546 | 35, 547 | 20, 548 | 0, 549 | 18, 550 | 0, 551 | "MODEL" 552 | ], 553 | [ 554 | 36, 555 | 4, 556 | 0, 557 | 20, 558 | 0, 559 | "MODEL" 560 | ] 561 | ], 562 | "groups": [], 563 | "config": {}, 564 | "extra": {}, 565 | "version": 0.4 566 | } -------------------------------------------------------------------------------- /examples/ipadapter_ideal_faceid_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 23, 3 | "last_link_id": 44, 4 | "nodes": [ 5 | { 6 | "id": 8, 7 | "type": "VAEDecode", 8 | "pos": [ 9 | 1640, 10 | 710 11 | ], 12 | "size": { 13 | "0": 140, 14 | "1": 46 15 | }, 16 | "flags": {}, 17 | "order": 10, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "samples", 22 | "type": "LATENT", 23 | "link": 7 24 | }, 25 | { 26 | "name": "vae", 27 | "type": "VAE", 28 | "link": 8 29 | } 30 | ], 31 | "outputs": [ 32 | { 33 | "name": "IMAGE", 34 | "type": "IMAGE", 35 | "links": [ 36 | 9 37 | ], 38 | "slot_index": 0 39 | } 40 | ], 41 | "properties": { 42 | "Node name for S&R": "VAEDecode" 43 | } 44 | }, 45 | { 46 | "id": 5, 47 | "type": "EmptyLatentImage", 48 | "pos": [ 49 | 870, 50 | 1100 51 | ], 52 | "size": { 53 | "0": 315, 54 | "1": 106 55 | }, 56 | "flags": {}, 57 | "order": 0, 58 | "mode": 0, 59 | "outputs": [ 60 | { 61 | "name": "LATENT", 62 | "type": "LATENT", 63 | "links": [ 64 | 2 65 | ], 66 | "slot_index": 0 67 | } 68 | ], 69 | "properties": { 70 | "Node name for S&R": "EmptyLatentImage" 71 | }, 72 | "widgets_values": [ 73 | 512, 74 | 512, 75 | 1 76 | ] 77 | }, 78 | { 79 | "id": 3, 80 | "type": "KSampler", 81 | "pos": [ 82 | 1280, 83 | 710 84 | ], 85 | "size": { 86 | "0": 315, 87 | "1": 262 88 | }, 89 | "flags": {}, 90 | "order": 9, 91 | "mode": 0, 92 | "inputs": [ 93 | { 94 | "name": "model", 95 | "type": "MODEL", 96 | "link": 42 97 | }, 98 | { 99 | "name": "positive", 100 | "type": "CONDITIONING", 101 | "link": 4 102 | }, 103 | { 104 | "name": "negative", 105 | "type": "CONDITIONING", 106 | "link": 6 107 | }, 108 | { 109 | "name": "latent_image", 110 | "type": "LATENT", 111 | "link": 2 112 | } 113 | ], 114 | "outputs": [ 115 | { 116 | "name": "LATENT", 117 | "type": "LATENT", 118 | "links": [ 119 | 7 120 | ], 121 | "slot_index": 0 122 | } 123 | ], 124 | "properties": { 125 | "Node name for S&R": "KSampler" 126 | }, 127 | "widgets_values": [ 128 | 0, 129 | "fixed", 130 | 30, 131 | 6.5, 132 | "ddpm", 133 | "karras", 134 | 1 135 | ] 136 | }, 137 | { 138 | "id": 9, 139 | "type": "SaveImage", 140 | "pos": [ 141 | 1830, 142 | 700 143 | ], 144 | "size": { 145 | "0": 529.7760009765625, 146 | "1": 582.3048095703125 147 | }, 148 | "flags": {}, 149 | "order": 11, 150 | "mode": 0, 151 | "inputs": [ 152 | { 153 | "name": "images", 154 | "type": "IMAGE", 155 | "link": 9 156 | } 157 | ], 158 | "properties": {}, 159 | "widgets_values": [ 160 | "IPAdapterV2" 161 | ] 162 | }, 163 | { 164 | "id": 12, 165 | "type": "LoadImage", 166 | "pos": [ 167 | 450, 168 | 240 169 | ], 170 | "size": { 171 | "0": 315, 172 | "1": 314 173 | }, 174 | "flags": {}, 175 | "order": 1, 176 | "mode": 0, 177 | "outputs": [ 178 | { 179 | "name": "IMAGE", 180 | "type": "IMAGE", 181 | "links": [ 182 | 29 183 | ], 184 | "shape": 3, 185 | "slot_index": 0 186 | }, 187 | { 188 | "name": "MASK", 189 | "type": "MASK", 190 | "links": null, 191 | "shape": 3 192 | } 193 | ], 194 | "properties": { 195 | "Node name for S&R": "LoadImage" 196 | }, 197 | "widgets_values": [ 198 | "rosario_4.jpg", 199 | "image" 200 | ] 201 | }, 202 | { 203 | "id": 7, 204 | "type": "CLIPTextEncode", 205 | "pos": [ 206 | 760, 207 | 850 208 | ], 209 | "size": { 210 | "0": 425.27801513671875, 211 | "1": 180.6060791015625 212 | }, 213 | "flags": {}, 214 | "order": 5, 215 | "mode": 0, 216 | "inputs": [ 217 | { 218 | "name": "clip", 219 | "type": "CLIP", 220 | "link": 5 221 | } 222 | ], 223 | "outputs": [ 224 | { 225 | "name": "CONDITIONING", 226 | "type": "CONDITIONING", 227 | "links": [ 228 | 6 229 | ], 230 | "slot_index": 0 231 | } 232 | ], 233 | "properties": { 234 | "Node name for S&R": "CLIPTextEncode" 235 | }, 236 | "widgets_values": [ 237 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed, naked" 238 | ] 239 | }, 240 | { 241 | "id": 6, 242 | "type": "CLIPTextEncode", 243 | "pos": [ 244 | 760, 245 | 620 246 | ], 247 | "size": { 248 | "0": 422.84503173828125, 249 | "1": 164.31304931640625 250 | }, 251 | "flags": {}, 252 | "order": 4, 253 | "mode": 0, 254 | "inputs": [ 255 | { 256 | "name": "clip", 257 | "type": "CLIP", 258 | "link": 3 259 | } 260 | ], 261 | "outputs": [ 262 | { 263 | "name": "CONDITIONING", 264 | "type": "CONDITIONING", 265 | "links": [ 266 | 4 267 | ], 268 | "slot_index": 0 269 | } 270 | ], 271 | "properties": { 272 | "Node name for S&R": "CLIPTextEncode" 273 | }, 274 | "widgets_values": [ 275 | "closeup of a beautiful woman wearing a black dress on the seaside\n\nserene, sunset, spring, high quality, detailed, diffuse light" 276 | ] 277 | }, 278 | { 279 | "id": 4, 280 | "type": "CheckpointLoaderSimple", 281 | "pos": [ 282 | 10, 283 | 680 284 | ], 285 | "size": { 286 | "0": 315, 287 | "1": 98 288 | }, 289 | "flags": {}, 290 | "order": 2, 291 | "mode": 0, 292 | "outputs": [ 293 | { 294 | "name": "MODEL", 295 | "type": "MODEL", 296 | "links": [ 297 | 36 298 | ], 299 | "slot_index": 0 300 | }, 301 | { 302 | "name": "CLIP", 303 | "type": "CLIP", 304 | "links": [ 305 | 3, 306 | 5 307 | ], 308 | "slot_index": 1 309 | }, 310 | { 311 | "name": "VAE", 312 | "type": "VAE", 313 | "links": [ 314 | 8 315 | ], 316 | "slot_index": 2 317 | } 318 | ], 319 | "properties": { 320 | "Node name for S&R": "CheckpointLoaderSimple" 321 | }, 322 | "widgets_values": [ 323 | "sd15/realisticVisionV51_v51VAE.safetensors" 324 | ] 325 | }, 326 | { 327 | "id": 20, 328 | "type": "IPAdapterUnifiedLoaderFaceIDV2", 329 | "pos": [ 330 | 460, 331 | 60 332 | ], 333 | "size": { 334 | "0": 315, 335 | "1": 126 336 | }, 337 | "flags": {}, 338 | "order": 3, 339 | "mode": 0, 340 | "inputs": [ 341 | { 342 | "name": "model", 343 | "type": "MODEL", 344 | "link": 36 345 | }, 346 | { 347 | "name": "ipadapter", 348 | "type": "IPADAPTER", 349 | "link": null 350 | } 351 | ], 352 | "outputs": [ 353 | { 354 | "name": "MODEL", 355 | "type": "MODEL", 356 | "links": [ 357 | 35 358 | ], 359 | "shape": 3, 360 | "slot_index": 0 361 | }, 362 | { 363 | "name": "ipadapter", 364 | "type": "IPADAPTER", 365 | "links": [ 366 | 34, 367 | 38 368 | ], 369 | "shape": 3, 370 | "slot_index": 1 371 | } 372 | ], 373 | "properties": { 374 | "Node name for S&R": "IPAdapterUnifiedLoaderFaceIDV2" 375 | }, 376 | "widgets_values": [ 377 | "FACEID PLUS V2", 378 | 0.6, 379 | "CPU" 380 | ] 381 | }, 382 | { 383 | "id": 22, 384 | "type": "IPAdapterUnifiedLoaderV2", 385 | "pos": [ 386 | 855, 387 | 51 388 | ], 389 | "size": { 390 | "0": 315, 391 | "1": 78 392 | }, 393 | "flags": {}, 394 | "order": 7, 395 | "mode": 0, 396 | "inputs": [ 397 | { 398 | "name": "model", 399 | "type": "MODEL", 400 | "link": 43 401 | }, 402 | { 403 | "name": "ipadapter", 404 | "type": "IPADAPTER", 405 | "link": 38 406 | } 407 | ], 408 | "outputs": [ 409 | { 410 | "name": "model", 411 | "type": "MODEL", 412 | "links": [ 413 | 40 414 | ], 415 | "shape": 3, 416 | "slot_index": 0 417 | }, 418 | { 419 | "name": "ipadapter", 420 | "type": "IPADAPTER", 421 | "links": [ 422 | 37 423 | ], 424 | "shape": 3 425 | } 426 | ], 427 | "properties": { 428 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 429 | }, 430 | "widgets_values": [ 431 | "FULL FACE - SD1.5 only (portraits stronger)" 432 | ] 433 | }, 434 | { 435 | "id": 21, 436 | "type": "IPAdapterV2", 437 | "pos": [ 438 | 1280, 439 | 170 440 | ], 441 | "size": { 442 | "0": 315, 443 | "1": 190 444 | }, 445 | "flags": {}, 446 | "order": 8, 447 | "mode": 0, 448 | "inputs": [ 449 | { 450 | "name": "model", 451 | "type": "MODEL", 452 | "link": 40 453 | }, 454 | { 455 | "name": "ipadapter", 456 | "type": "IPADAPTER", 457 | "link": 37, 458 | "slot_index": 1 459 | }, 460 | { 461 | "name": "image", 462 | "type": "IMAGE", 463 | "link": 44 464 | }, 465 | { 466 | "name": "attn_mask", 467 | "type": "MASK", 468 | "link": null 469 | } 470 | ], 471 | "outputs": [ 472 | { 473 | "name": "MODEL", 474 | "type": "MODEL", 475 | "links": [ 476 | 42 477 | ], 478 | "shape": 3, 479 | "slot_index": 0 480 | } 481 | ], 482 | "properties": { 483 | "Node name for S&R": "IPAdapterV2" 484 | }, 485 | "widgets_values": [ 486 | 0.4, 487 | 0, 488 | 1, 489 | "standard" 490 | ] 491 | }, 492 | { 493 | "id": 18, 494 | "type": "IPAdapterFaceIDV2", 495 | "pos": [ 496 | 850, 497 | 190 498 | ], 499 | "size": { 500 | "0": 315, 501 | "1": 322 502 | }, 503 | "flags": {}, 504 | "order": 6, 505 | "mode": 0, 506 | "inputs": [ 507 | { 508 | "name": "model", 509 | "type": "MODEL", 510 | "link": 35 511 | }, 512 | { 513 | "name": "ipadapter", 514 | "type": "IPADAPTER", 515 | "link": 34, 516 | "slot_index": 1 517 | }, 518 | { 519 | "name": "image", 520 | "type": "IMAGE", 521 | "link": 29 522 | }, 523 | { 524 | "name": "image_negative", 525 | "type": "IMAGE", 526 | "link": null 527 | }, 528 | { 529 | "name": "attn_mask", 530 | "type": "MASK", 531 | "link": null 532 | }, 533 | { 534 | "name": "clip_vision", 535 | "type": "CLIP_VISION", 536 | "link": null 537 | }, 538 | { 539 | "name": "insightface", 540 | "type": "INSIGHTFACE", 541 | "link": null 542 | } 543 | ], 544 | "outputs": [ 545 | { 546 | "name": "MODEL", 547 | "type": "MODEL", 548 | "links": [ 549 | 43 550 | ], 551 | "shape": 3, 552 | "slot_index": 0 553 | }, 554 | { 555 | "name": "face_image", 556 | "type": "IMAGE", 557 | "links": [ 558 | 44 559 | ], 560 | "shape": 3, 561 | "slot_index": 1 562 | } 563 | ], 564 | "properties": { 565 | "Node name for S&R": "IPAdapterFaceIDV2" 566 | }, 567 | "widgets_values": [ 568 | 0.8, 569 | 2, 570 | "linear", 571 | "concat", 572 | 0, 573 | 1, 574 | "V only" 575 | ] 576 | } 577 | ], 578 | "links": [ 579 | [ 580 | 2, 581 | 5, 582 | 0, 583 | 3, 584 | 3, 585 | "LATENT" 586 | ], 587 | [ 588 | 3, 589 | 4, 590 | 1, 591 | 6, 592 | 0, 593 | "CLIP" 594 | ], 595 | [ 596 | 4, 597 | 6, 598 | 0, 599 | 3, 600 | 1, 601 | "CONDITIONING" 602 | ], 603 | [ 604 | 5, 605 | 4, 606 | 1, 607 | 7, 608 | 0, 609 | "CLIP" 610 | ], 611 | [ 612 | 6, 613 | 7, 614 | 0, 615 | 3, 616 | 2, 617 | "CONDITIONING" 618 | ], 619 | [ 620 | 7, 621 | 3, 622 | 0, 623 | 8, 624 | 0, 625 | "LATENT" 626 | ], 627 | [ 628 | 8, 629 | 4, 630 | 2, 631 | 8, 632 | 1, 633 | "VAE" 634 | ], 635 | [ 636 | 9, 637 | 8, 638 | 0, 639 | 9, 640 | 0, 641 | "IMAGE" 642 | ], 643 | [ 644 | 29, 645 | 12, 646 | 0, 647 | 18, 648 | 2, 649 | "IMAGE" 650 | ], 651 | [ 652 | 34, 653 | 20, 654 | 1, 655 | 18, 656 | 1, 657 | "IPADAPTER" 658 | ], 659 | [ 660 | 35, 661 | 20, 662 | 0, 663 | 18, 664 | 0, 665 | "MODEL" 666 | ], 667 | [ 668 | 36, 669 | 4, 670 | 0, 671 | 20, 672 | 0, 673 | "MODEL" 674 | ], 675 | [ 676 | 37, 677 | 22, 678 | 1, 679 | 21, 680 | 1, 681 | "IPADAPTER" 682 | ], 683 | [ 684 | 38, 685 | 20, 686 | 1, 687 | 22, 688 | 1, 689 | "IPADAPTER" 690 | ], 691 | [ 692 | 40, 693 | 22, 694 | 0, 695 | 21, 696 | 0, 697 | "MODEL" 698 | ], 699 | [ 700 | 42, 701 | 21, 702 | 0, 703 | 3, 704 | 0, 705 | "MODEL" 706 | ], 707 | [ 708 | 43, 709 | 18, 710 | 0, 711 | 22, 712 | 0, 713 | "MODEL" 714 | ], 715 | [ 716 | 44, 717 | 18, 718 | 1, 719 | 21, 720 | 2, 721 | "IMAGE" 722 | ] 723 | ], 724 | "groups": [], 725 | "config": {}, 726 | "extra": {}, 727 | "version": 0.4 728 | } -------------------------------------------------------------------------------- /examples/ipadapter_kolors.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 88, 3 | "last_link_id": 132, 4 | "nodes": [ 5 | { 6 | "id": 9, 7 | "type": "EmptyLatentImage", 8 | "pos": [ 9 | 1710, 10 | 620 11 | ], 12 | "size": { 13 | "0": 368.5347900390625, 14 | "1": 106 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "LATENT", 22 | "type": "LATENT", 23 | "links": [ 24 | 108 25 | ], 26 | "shape": 3, 27 | "label": "Latent" 28 | } 29 | ], 30 | "properties": { 31 | "Node name for S&R": "EmptyLatentImage" 32 | }, 33 | "widgets_values": [ 34 | 968, 35 | 1152, 36 | 1 37 | ] 38 | }, 39 | { 40 | "id": 80, 41 | "type": "VAEDecode", 42 | "pos": [ 43 | 2690, 44 | 110 45 | ], 46 | "size": { 47 | "0": 210, 48 | "1": 46 49 | }, 50 | "flags": {}, 51 | "order": 11, 52 | "mode": 0, 53 | "inputs": [ 54 | { 55 | "name": "samples", 56 | "type": "LATENT", 57 | "link": 110 58 | }, 59 | { 60 | "name": "vae", 61 | "type": "VAE", 62 | "link": 111 63 | } 64 | ], 65 | "outputs": [ 66 | { 67 | "name": "IMAGE", 68 | "type": "IMAGE", 69 | "links": [ 70 | 113 71 | ], 72 | "shape": 3, 73 | "slot_index": 0 74 | } 75 | ], 76 | "properties": { 77 | "Node name for S&R": "VAEDecode" 78 | } 79 | }, 80 | { 81 | "id": 81, 82 | "type": "PreviewImage", 83 | "pos": [ 84 | 2700, 85 | 210 86 | ], 87 | "size": { 88 | "0": 1085.9268798828125, 89 | "1": 1301.6563720703125 90 | }, 91 | "flags": {}, 92 | "order": 12, 93 | "mode": 0, 94 | "inputs": [ 95 | { 96 | "name": "images", 97 | "type": "IMAGE", 98 | "link": 113 99 | } 100 | ], 101 | "properties": { 102 | "Node name for S&R": "PreviewImage" 103 | } 104 | }, 105 | { 106 | "id": 59, 107 | "type": "MZ_KolorsUNETLoader", 108 | "pos": [ 109 | 1140, 110 | 300 111 | ], 112 | "size": { 113 | "0": 310.1650695800781, 114 | "1": 78 115 | }, 116 | "flags": {}, 117 | "order": 1, 118 | "mode": 0, 119 | "outputs": [ 120 | { 121 | "name": "model", 122 | "type": "MODEL", 123 | "links": [ 124 | 132 125 | ], 126 | "shape": 3, 127 | "label": "model", 128 | "slot_index": 0 129 | }, 130 | { 131 | "name": "hid_proj", 132 | "type": "TorchLinear", 133 | "links": [ 134 | 79, 135 | 87 136 | ], 137 | "shape": 3, 138 | "label": "hid_proj", 139 | "slot_index": 1 140 | } 141 | ], 142 | "properties": { 143 | "Node name for S&R": "MZ_KolorsUNETLoader" 144 | }, 145 | "widgets_values": [ 146 | "diffusion_pytorch_model.fp16.safetensors" 147 | ] 148 | }, 149 | { 150 | "id": 75, 151 | "type": "IPAdapterAdvancedV2", 152 | "pos": [ 153 | 1919, 154 | -273 155 | ], 156 | "size": { 157 | "0": 291.9587097167969, 158 | "1": 278 159 | }, 160 | "flags": {}, 161 | "order": 7, 162 | "mode": 0, 163 | "inputs": [ 164 | { 165 | "name": "model", 166 | "type": "MODEL", 167 | "link": 132, 168 | "slot_index": 0 169 | }, 170 | { 171 | "name": "ipadapter", 172 | "type": "IPADAPTER", 173 | "link": 130, 174 | "slot_index": 1 175 | }, 176 | { 177 | "name": "image", 178 | "type": "IMAGE", 179 | "link": 102, 180 | "slot_index": 2 181 | }, 182 | { 183 | "name": "image_negative", 184 | "type": "IMAGE", 185 | "link": null 186 | }, 187 | { 188 | "name": "attn_mask", 189 | "type": "MASK", 190 | "link": null 191 | }, 192 | { 193 | "name": "clip_vision", 194 | "type": "CLIP_VISION", 195 | "link": 131, 196 | "slot_index": 5 197 | } 198 | ], 199 | "outputs": [ 200 | { 201 | "name": "MODEL", 202 | "type": "MODEL", 203 | "links": [ 204 | 105 205 | ], 206 | "shape": 3, 207 | "slot_index": 0 208 | } 209 | ], 210 | "properties": { 211 | "Node name for S&R": "IPAdapterAdvancedV2" 212 | }, 213 | "widgets_values": [ 214 | 1, 215 | "style transfer precise", 216 | "concat", 217 | 0, 218 | 1, 219 | "V only" 220 | ] 221 | }, 222 | { 223 | "id": 76, 224 | "type": "IPAdapterModelLoaderV2", 225 | "pos": [ 226 | 1541, 227 | -383 228 | ], 229 | "size": { 230 | "0": 315, 231 | "1": 58 232 | }, 233 | "flags": {}, 234 | "order": 2, 235 | "mode": 0, 236 | "outputs": [ 237 | { 238 | "name": "IPADAPTER", 239 | "type": "IPADAPTER", 240 | "links": [ 241 | 130 242 | ], 243 | "shape": 3, 244 | "slot_index": 0 245 | } 246 | ], 247 | "properties": { 248 | "Node name for S&R": "IPAdapterModelLoaderV2" 249 | }, 250 | "widgets_values": [ 251 | "Kolors-IP-Adapter-Plus.bin" 252 | ] 253 | }, 254 | { 255 | "id": 78, 256 | "type": "CLIPVisionLoader", 257 | "pos": [ 258 | 1511, 259 | -127 260 | ], 261 | "size": { 262 | "0": 315, 263 | "1": 58 264 | }, 265 | "flags": {}, 266 | "order": 3, 267 | "mode": 0, 268 | "outputs": [ 269 | { 270 | "name": "CLIP_VISION", 271 | "type": "CLIP_VISION", 272 | "links": [ 273 | 131 274 | ], 275 | "shape": 3, 276 | "slot_index": 0 277 | } 278 | ], 279 | "properties": { 280 | "Node name for S&R": "CLIPVisionLoader" 281 | }, 282 | "widgets_values": [ 283 | "clip-vit-large-patch14-336.bin" 284 | ] 285 | }, 286 | { 287 | "id": 77, 288 | "type": "LoadImage", 289 | "pos": [ 290 | 1137, 291 | -329 292 | ], 293 | "size": { 294 | "0": 237.2888641357422, 295 | "1": 323.4468994140625 296 | }, 297 | "flags": {}, 298 | "order": 4, 299 | "mode": 0, 300 | "outputs": [ 301 | { 302 | "name": "IMAGE", 303 | "type": "IMAGE", 304 | "links": [ 305 | 102 306 | ], 307 | "shape": 3 308 | }, 309 | { 310 | "name": "MASK", 311 | "type": "MASK", 312 | "links": null, 313 | "shape": 3 314 | } 315 | ], 316 | "properties": { 317 | "Node name for S&R": "LoadImage" 318 | }, 319 | "widgets_values": [ 320 | "cga_pixels.png", 321 | "image" 322 | ] 323 | }, 324 | { 325 | "id": 70, 326 | "type": "VAELoader", 327 | "pos": [ 328 | 1130, 329 | 450 330 | ], 331 | "size": { 332 | "0": 315, 333 | "1": 58 334 | }, 335 | "flags": {}, 336 | "order": 5, 337 | "mode": 0, 338 | "outputs": [ 339 | { 340 | "name": "VAE", 341 | "type": "VAE", 342 | "links": [ 343 | 111 344 | ], 345 | "shape": 3, 346 | "slot_index": 0 347 | } 348 | ], 349 | "properties": { 350 | "Node name for S&R": "VAELoader" 351 | }, 352 | "widgets_values": [ 353 | "sdxl_vae.safetensors" 354 | ] 355 | }, 356 | { 357 | "id": 79, 358 | "type": "KSampler", 359 | "pos": [ 360 | 2320, 361 | 110 362 | ], 363 | "size": { 364 | "0": 315, 365 | "1": 262 366 | }, 367 | "flags": {}, 368 | "order": 10, 369 | "mode": 0, 370 | "inputs": [ 371 | { 372 | "name": "model", 373 | "type": "MODEL", 374 | "link": 105 375 | }, 376 | { 377 | "name": "positive", 378 | "type": "CONDITIONING", 379 | "link": 107 380 | }, 381 | { 382 | "name": "negative", 383 | "type": "CONDITIONING", 384 | "link": 106 385 | }, 386 | { 387 | "name": "latent_image", 388 | "type": "LATENT", 389 | "link": 108 390 | } 391 | ], 392 | "outputs": [ 393 | { 394 | "name": "LATENT", 395 | "type": "LATENT", 396 | "links": [ 397 | 110 398 | ], 399 | "shape": 3, 400 | "slot_index": 0 401 | } 402 | ], 403 | "properties": { 404 | "Node name for S&R": "KSampler" 405 | }, 406 | "widgets_values": [ 407 | 13, 408 | "fixed", 409 | 30, 410 | 6.5, 411 | "dpmpp_2m_sde_gpu", 412 | "karras", 413 | 1 414 | ] 415 | }, 416 | { 417 | "id": 67, 418 | "type": "MZ_ChatGLM3", 419 | "pos": [ 420 | 1680, 421 | 80 422 | ], 423 | "size": { 424 | "0": 400, 425 | "1": 200 426 | }, 427 | "flags": {}, 428 | "order": 9, 429 | "mode": 0, 430 | "inputs": [ 431 | { 432 | "name": "chatglm3_model", 433 | "type": "CHATGLM3MODEL", 434 | "link": 86, 435 | "label": "chatglm3_model", 436 | "slot_index": 0 437 | }, 438 | { 439 | "name": "hid_proj", 440 | "type": "TorchLinear", 441 | "link": 87, 442 | "label": "hid_proj" 443 | } 444 | ], 445 | "outputs": [ 446 | { 447 | "name": "CONDITIONING", 448 | "type": "CONDITIONING", 449 | "links": [ 450 | 107 451 | ], 452 | "shape": 3, 453 | "label": "CONDITIONING", 454 | "slot_index": 0 455 | } 456 | ], 457 | "properties": { 458 | "Node name for S&R": "MZ_ChatGLM3" 459 | }, 460 | "widgets_values": [ 461 | "a fierce red hair warrior woman wearing a white and gold armor with purple decorations. Highly detailed digital illustration, high quality, detailed, intricate" 462 | ] 463 | }, 464 | { 465 | "id": 66, 466 | "type": "MZ_ChatGLM3Loader", 467 | "pos": [ 468 | 1140, 469 | 180 470 | ], 471 | "size": { 472 | "0": 315, 473 | "1": 58 474 | }, 475 | "flags": {}, 476 | "order": 6, 477 | "mode": 0, 478 | "outputs": [ 479 | { 480 | "name": "chatglm3_model", 481 | "type": "CHATGLM3MODEL", 482 | "links": [ 483 | 84, 484 | 86 485 | ], 486 | "shape": 3, 487 | "label": "chatglm3_model" 488 | } 489 | ], 490 | "properties": { 491 | "Node name for S&R": "MZ_ChatGLM3Loader" 492 | }, 493 | "widgets_values": [ 494 | "chatglm3-8bit.safetensors" 495 | ] 496 | }, 497 | { 498 | "id": 62, 499 | "type": "MZ_ChatGLM3", 500 | "pos": [ 501 | 1680, 502 | 340 503 | ], 504 | "size": { 505 | "0": 400, 506 | "1": 200 507 | }, 508 | "flags": {}, 509 | "order": 8, 510 | "mode": 0, 511 | "inputs": [ 512 | { 513 | "name": "chatglm3_model", 514 | "type": "CHATGLM3MODEL", 515 | "link": 84, 516 | "label": "chatglm3_model", 517 | "slot_index": 0 518 | }, 519 | { 520 | "name": "hid_proj", 521 | "type": "TorchLinear", 522 | "link": 79, 523 | "label": "hid_proj" 524 | } 525 | ], 526 | "outputs": [ 527 | { 528 | "name": "CONDITIONING", 529 | "type": "CONDITIONING", 530 | "links": [ 531 | 106 532 | ], 533 | "shape": 3, 534 | "label": "CONDITIONING", 535 | "slot_index": 0 536 | } 537 | ], 538 | "properties": { 539 | "Node name for S&R": "MZ_ChatGLM3" 540 | }, 541 | "widgets_values": [ 542 | "" 543 | ] 544 | } 545 | ], 546 | "links": [ 547 | [ 548 | 79, 549 | 59, 550 | 1, 551 | 62, 552 | 1, 553 | "TorchLinear" 554 | ], 555 | [ 556 | 84, 557 | 66, 558 | 0, 559 | 62, 560 | 0, 561 | "CHATGLM3MODEL" 562 | ], 563 | [ 564 | 86, 565 | 66, 566 | 0, 567 | 67, 568 | 0, 569 | "CHATGLM3MODEL" 570 | ], 571 | [ 572 | 87, 573 | 59, 574 | 1, 575 | 67, 576 | 1, 577 | "TorchLinear" 578 | ], 579 | [ 580 | 102, 581 | 77, 582 | 0, 583 | 75, 584 | 2, 585 | "IMAGE" 586 | ], 587 | [ 588 | 105, 589 | 75, 590 | 0, 591 | 79, 592 | 0, 593 | "MODEL" 594 | ], 595 | [ 596 | 106, 597 | 62, 598 | 0, 599 | 79, 600 | 2, 601 | "CONDITIONING" 602 | ], 603 | [ 604 | 107, 605 | 67, 606 | 0, 607 | 79, 608 | 1, 609 | "CONDITIONING" 610 | ], 611 | [ 612 | 108, 613 | 9, 614 | 0, 615 | 79, 616 | 3, 617 | "LATENT" 618 | ], 619 | [ 620 | 110, 621 | 79, 622 | 0, 623 | 80, 624 | 0, 625 | "LATENT" 626 | ], 627 | [ 628 | 111, 629 | 70, 630 | 0, 631 | 80, 632 | 1, 633 | "VAE" 634 | ], 635 | [ 636 | 113, 637 | 80, 638 | 0, 639 | 81, 640 | 0, 641 | "IMAGE" 642 | ], 643 | [ 644 | 130, 645 | 76, 646 | 0, 647 | 75, 648 | 1, 649 | "IPADAPTER" 650 | ], 651 | [ 652 | 131, 653 | 78, 654 | 0, 655 | 75, 656 | 5, 657 | "CLIP_VISION" 658 | ], 659 | [ 660 | 132, 661 | 59, 662 | 0, 663 | 75, 664 | 0, 665 | "MODEL" 666 | ] 667 | ], 668 | "groups": [], 669 | "config": {}, 670 | "extra": { 671 | "ds": { 672 | "scale": 0.620921323059155, 673 | "offset": [ 674 | -781.0947110324239, 675 | 731.4168331979325 676 | ] 677 | } 678 | }, 679 | "version": 0.4 680 | } -------------------------------------------------------------------------------- /examples/ipadapter_noise_injection.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 18, 3 | "last_link_id": 30, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "CheckpointLoaderSimple", 8 | "pos": [ 9 | 50, 10 | 730 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 98 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "MODEL", 22 | "type": "MODEL", 23 | "links": [ 24 | 20 25 | ], 26 | "slot_index": 0 27 | }, 28 | { 29 | "name": "CLIP", 30 | "type": "CLIP", 31 | "links": [ 32 | 3, 33 | 5 34 | ], 35 | "slot_index": 1 36 | }, 37 | { 38 | "name": "VAE", 39 | "type": "VAE", 40 | "links": [ 41 | 8 42 | ], 43 | "slot_index": 2 44 | } 45 | ], 46 | "properties": { 47 | "Node name for S&R": "CheckpointLoaderSimple" 48 | }, 49 | "widgets_values": [ 50 | "sd15/realisticVisionV51_v51VAE.safetensors" 51 | ] 52 | }, 53 | { 54 | "id": 9, 55 | "type": "SaveImage", 56 | "pos": [ 57 | 1770, 58 | 710 59 | ], 60 | "size": { 61 | "0": 529.7760009765625, 62 | "1": 582.3048095703125 63 | }, 64 | "flags": {}, 65 | "order": 12, 66 | "mode": 0, 67 | "inputs": [ 68 | { 69 | "name": "images", 70 | "type": "IMAGE", 71 | "link": 9 72 | } 73 | ], 74 | "properties": {}, 75 | "widgets_values": [ 76 | "IPAdapterV2" 77 | ] 78 | }, 79 | { 80 | "id": 7, 81 | "type": "CLIPTextEncode", 82 | "pos": [ 83 | 690, 84 | 840 85 | ], 86 | "size": { 87 | "0": 425.27801513671875, 88 | "1": 180.6060791015625 89 | }, 90 | "flags": {}, 91 | "order": 6, 92 | "mode": 0, 93 | "inputs": [ 94 | { 95 | "name": "clip", 96 | "type": "CLIP", 97 | "link": 5 98 | } 99 | ], 100 | "outputs": [ 101 | { 102 | "name": "CONDITIONING", 103 | "type": "CONDITIONING", 104 | "links": [ 105 | 6 106 | ], 107 | "slot_index": 0 108 | } 109 | ], 110 | "properties": { 111 | "Node name for S&R": "CLIPTextEncode" 112 | }, 113 | "widgets_values": [ 114 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 115 | ] 116 | }, 117 | { 118 | "id": 5, 119 | "type": "EmptyLatentImage", 120 | "pos": [ 121 | 801, 122 | 1097 123 | ], 124 | "size": { 125 | "0": 315, 126 | "1": 106 127 | }, 128 | "flags": {}, 129 | "order": 1, 130 | "mode": 0, 131 | "outputs": [ 132 | { 133 | "name": "LATENT", 134 | "type": "LATENT", 135 | "links": [ 136 | 2 137 | ], 138 | "slot_index": 0 139 | } 140 | ], 141 | "properties": { 142 | "Node name for S&R": "EmptyLatentImage" 143 | }, 144 | "widgets_values": [ 145 | 512, 146 | 512, 147 | 1 148 | ] 149 | }, 150 | { 151 | "id": 16, 152 | "type": "CLIPVisionLoader", 153 | "pos": [ 154 | 308, 155 | 161 156 | ], 157 | "size": { 158 | "0": 315, 159 | "1": 58 160 | }, 161 | "flags": {}, 162 | "order": 2, 163 | "mode": 0, 164 | "outputs": [ 165 | { 166 | "name": "CLIP_VISION", 167 | "type": "CLIP_VISION", 168 | "links": [ 169 | 24 170 | ], 171 | "shape": 3 172 | } 173 | ], 174 | "properties": { 175 | "Node name for S&R": "CLIPVisionLoader" 176 | }, 177 | "widgets_values": [ 178 | "IPAdapter_image_encoder_sd15.safetensors" 179 | ] 180 | }, 181 | { 182 | "id": 15, 183 | "type": "IPAdapterModelLoaderV2", 184 | "pos": [ 185 | 308, 186 | 52 187 | ], 188 | "size": { 189 | "0": 315, 190 | "1": 58 191 | }, 192 | "flags": {}, 193 | "order": 3, 194 | "mode": 0, 195 | "outputs": [ 196 | { 197 | "name": "IPADAPTER", 198 | "type": "IPADAPTER", 199 | "links": [ 200 | 21 201 | ], 202 | "shape": 3 203 | } 204 | ], 205 | "properties": { 206 | "Node name for S&R": "IPAdapterModelLoaderV2" 207 | }, 208 | "widgets_values": [ 209 | "ip-adapter-plus_sd15.safetensors" 210 | ] 211 | }, 212 | { 213 | "id": 6, 214 | "type": "CLIPTextEncode", 215 | "pos": [ 216 | 690, 217 | 610 218 | ], 219 | "size": { 220 | "0": 422.84503173828125, 221 | "1": 164.31304931640625 222 | }, 223 | "flags": {}, 224 | "order": 5, 225 | "mode": 0, 226 | "inputs": [ 227 | { 228 | "name": "clip", 229 | "type": "CLIP", 230 | "link": 3 231 | } 232 | ], 233 | "outputs": [ 234 | { 235 | "name": "CONDITIONING", 236 | "type": "CONDITIONING", 237 | "links": [ 238 | 4 239 | ], 240 | "slot_index": 0 241 | } 242 | ], 243 | "properties": { 244 | "Node name for S&R": "CLIPTextEncode" 245 | }, 246 | "widgets_values": [ 247 | "in a peaceful spring morning a woman wearing a white shirt is sitting in a park on a bench\n\nhigh quality, detailed, diffuse light" 248 | ] 249 | }, 250 | { 251 | "id": 12, 252 | "type": "LoadImage", 253 | "pos": [ 254 | 311, 255 | 270 256 | ], 257 | "size": { 258 | "0": 315, 259 | "1": 314 260 | }, 261 | "flags": {}, 262 | "order": 4, 263 | "mode": 0, 264 | "outputs": [ 265 | { 266 | "name": "IMAGE", 267 | "type": "IMAGE", 268 | "links": [ 269 | 25 270 | ], 271 | "shape": 3, 272 | "slot_index": 0 273 | }, 274 | { 275 | "name": "MASK", 276 | "type": "MASK", 277 | "links": null, 278 | "shape": 3 279 | } 280 | ], 281 | "properties": { 282 | "Node name for S&R": "LoadImage" 283 | }, 284 | "widgets_values": [ 285 | "girl_sitting.png", 286 | "image" 287 | ] 288 | }, 289 | { 290 | "id": 17, 291 | "type": "PrepImageForClipVisionV2", 292 | "pos": [ 293 | 728, 294 | 290 295 | ], 296 | "size": [ 297 | 210, 298 | 106 299 | ], 300 | "flags": {}, 301 | "order": 7, 302 | "mode": 0, 303 | "inputs": [ 304 | { 305 | "name": "image", 306 | "type": "IMAGE", 307 | "link": 25 308 | } 309 | ], 310 | "outputs": [ 311 | { 312 | "name": "IMAGE", 313 | "type": "IMAGE", 314 | "links": [ 315 | 26, 316 | 29 317 | ], 318 | "shape": 3, 319 | "slot_index": 0 320 | } 321 | ], 322 | "properties": { 323 | "Node name for S&R": "PrepImageForClipVisionV2" 324 | }, 325 | "widgets_values": [ 326 | "LANCZOS", 327 | "top", 328 | 0.15 329 | ] 330 | }, 331 | { 332 | "id": 14, 333 | "type": "IPAdapterAdvancedV2", 334 | "pos": [ 335 | 1351, 336 | 214 337 | ], 338 | "size": { 339 | "0": 315, 340 | "1": 254 341 | }, 342 | "flags": {}, 343 | "order": 9, 344 | "mode": 0, 345 | "inputs": [ 346 | { 347 | "name": "model", 348 | "type": "MODEL", 349 | "link": 20 350 | }, 351 | { 352 | "name": "ipadapter", 353 | "type": "IPADAPTER", 354 | "link": 21, 355 | "slot_index": 1 356 | }, 357 | { 358 | "name": "image", 359 | "type": "IMAGE", 360 | "link": 26 361 | }, 362 | { 363 | "name": "image_negative", 364 | "type": "IMAGE", 365 | "link": 30 366 | }, 367 | { 368 | "name": "attn_mask", 369 | "type": "MASK", 370 | "link": null 371 | }, 372 | { 373 | "name": "clip_vision", 374 | "type": "CLIP_VISION", 375 | "link": 24, 376 | "slot_index": 5 377 | } 378 | ], 379 | "outputs": [ 380 | { 381 | "name": "MODEL", 382 | "type": "MODEL", 383 | "links": [ 384 | 23 385 | ], 386 | "shape": 3, 387 | "slot_index": 0 388 | } 389 | ], 390 | "properties": { 391 | "Node name for S&R": "IPAdapterAdvancedV2" 392 | }, 393 | "widgets_values": [ 394 | 0.7000000000000001, 395 | "linear", 396 | "concat", 397 | 0, 398 | 1 399 | ] 400 | }, 401 | { 402 | "id": 3, 403 | "type": "KSampler", 404 | "pos": [ 405 | 1210, 406 | 700 407 | ], 408 | "size": { 409 | "0": 315, 410 | "1": 262 411 | }, 412 | "flags": {}, 413 | "order": 10, 414 | "mode": 0, 415 | "inputs": [ 416 | { 417 | "name": "model", 418 | "type": "MODEL", 419 | "link": 23 420 | }, 421 | { 422 | "name": "positive", 423 | "type": "CONDITIONING", 424 | "link": 4 425 | }, 426 | { 427 | "name": "negative", 428 | "type": "CONDITIONING", 429 | "link": 6 430 | }, 431 | { 432 | "name": "latent_image", 433 | "type": "LATENT", 434 | "link": 2 435 | } 436 | ], 437 | "outputs": [ 438 | { 439 | "name": "LATENT", 440 | "type": "LATENT", 441 | "links": [ 442 | 7 443 | ], 444 | "slot_index": 0 445 | } 446 | ], 447 | "properties": { 448 | "Node name for S&R": "KSampler" 449 | }, 450 | "widgets_values": [ 451 | 0, 452 | "fixed", 453 | 30, 454 | 6.5, 455 | "dpmpp_2m_sde_gpu", 456 | "exponential", 457 | 1 458 | ] 459 | }, 460 | { 461 | "id": 18, 462 | "type": "IPAdapterNoiseV2", 463 | "pos": [ 464 | 1019, 465 | 405 466 | ], 467 | "size": [ 468 | 210, 469 | 106 470 | ], 471 | "flags": {}, 472 | "order": 8, 473 | "mode": 0, 474 | "inputs": [ 475 | { 476 | "name": "image_optional", 477 | "type": "IMAGE", 478 | "link": 29 479 | } 480 | ], 481 | "outputs": [ 482 | { 483 | "name": "IMAGE", 484 | "type": "IMAGE", 485 | "links": [ 486 | 30 487 | ], 488 | "shape": 3, 489 | "slot_index": 0 490 | } 491 | ], 492 | "properties": { 493 | "Node name for S&R": "IPAdapterNoiseV2" 494 | }, 495 | "widgets_values": [ 496 | "fade", 497 | 0.3, 498 | 5 499 | ] 500 | }, 501 | { 502 | "id": 8, 503 | "type": "VAEDecode", 504 | "pos": [ 505 | 1575, 506 | 705 507 | ], 508 | "size": { 509 | "0": 140, 510 | "1": 46 511 | }, 512 | "flags": {}, 513 | "order": 11, 514 | "mode": 0, 515 | "inputs": [ 516 | { 517 | "name": "samples", 518 | "type": "LATENT", 519 | "link": 7 520 | }, 521 | { 522 | "name": "vae", 523 | "type": "VAE", 524 | "link": 8 525 | } 526 | ], 527 | "outputs": [ 528 | { 529 | "name": "IMAGE", 530 | "type": "IMAGE", 531 | "links": [ 532 | 9 533 | ], 534 | "slot_index": 0 535 | } 536 | ], 537 | "properties": { 538 | "Node name for S&R": "VAEDecode" 539 | } 540 | } 541 | ], 542 | "links": [ 543 | [ 544 | 2, 545 | 5, 546 | 0, 547 | 3, 548 | 3, 549 | "LATENT" 550 | ], 551 | [ 552 | 3, 553 | 4, 554 | 1, 555 | 6, 556 | 0, 557 | "CLIP" 558 | ], 559 | [ 560 | 4, 561 | 6, 562 | 0, 563 | 3, 564 | 1, 565 | "CONDITIONING" 566 | ], 567 | [ 568 | 5, 569 | 4, 570 | 1, 571 | 7, 572 | 0, 573 | "CLIP" 574 | ], 575 | [ 576 | 6, 577 | 7, 578 | 0, 579 | 3, 580 | 2, 581 | "CONDITIONING" 582 | ], 583 | [ 584 | 7, 585 | 3, 586 | 0, 587 | 8, 588 | 0, 589 | "LATENT" 590 | ], 591 | [ 592 | 8, 593 | 4, 594 | 2, 595 | 8, 596 | 1, 597 | "VAE" 598 | ], 599 | [ 600 | 9, 601 | 8, 602 | 0, 603 | 9, 604 | 0, 605 | "IMAGE" 606 | ], 607 | [ 608 | 20, 609 | 4, 610 | 0, 611 | 14, 612 | 0, 613 | "MODEL" 614 | ], 615 | [ 616 | 21, 617 | 15, 618 | 0, 619 | 14, 620 | 1, 621 | "IPADAPTER" 622 | ], 623 | [ 624 | 23, 625 | 14, 626 | 0, 627 | 3, 628 | 0, 629 | "MODEL" 630 | ], 631 | [ 632 | 24, 633 | 16, 634 | 0, 635 | 14, 636 | 5, 637 | "CLIP_VISION" 638 | ], 639 | [ 640 | 25, 641 | 12, 642 | 0, 643 | 17, 644 | 0, 645 | "IMAGE" 646 | ], 647 | [ 648 | 26, 649 | 17, 650 | 0, 651 | 14, 652 | 2, 653 | "IMAGE" 654 | ], 655 | [ 656 | 29, 657 | 17, 658 | 0, 659 | 18, 660 | 0, 661 | "IMAGE" 662 | ], 663 | [ 664 | 30, 665 | 18, 666 | 0, 667 | 14, 668 | 3, 669 | "IMAGE" 670 | ] 671 | ], 672 | "groups": [], 673 | "config": {}, 674 | "extra": {}, 675 | "version": 0.4 676 | } -------------------------------------------------------------------------------- /examples/ipadapter_portrait.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 20, 3 | "last_link_id": 36, 4 | "nodes": [ 5 | { 6 | "id": 8, 7 | "type": "VAEDecode", 8 | "pos": [ 9 | 1640, 10 | 710 11 | ], 12 | "size": { 13 | "0": 140, 14 | "1": 46 15 | }, 16 | "flags": {}, 17 | "order": 8, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "samples", 22 | "type": "LATENT", 23 | "link": 7 24 | }, 25 | { 26 | "name": "vae", 27 | "type": "VAE", 28 | "link": 8 29 | } 30 | ], 31 | "outputs": [ 32 | { 33 | "name": "IMAGE", 34 | "type": "IMAGE", 35 | "links": [ 36 | 9 37 | ], 38 | "slot_index": 0 39 | } 40 | ], 41 | "properties": { 42 | "Node name for S&R": "VAEDecode" 43 | } 44 | }, 45 | { 46 | "id": 3, 47 | "type": "KSampler", 48 | "pos": [ 49 | 1280, 50 | 710 51 | ], 52 | "size": { 53 | "0": 315, 54 | "1": 262 55 | }, 56 | "flags": {}, 57 | "order": 7, 58 | "mode": 0, 59 | "inputs": [ 60 | { 61 | "name": "model", 62 | "type": "MODEL", 63 | "link": 32 64 | }, 65 | { 66 | "name": "positive", 67 | "type": "CONDITIONING", 68 | "link": 4 69 | }, 70 | { 71 | "name": "negative", 72 | "type": "CONDITIONING", 73 | "link": 6 74 | }, 75 | { 76 | "name": "latent_image", 77 | "type": "LATENT", 78 | "link": 2 79 | } 80 | ], 81 | "outputs": [ 82 | { 83 | "name": "LATENT", 84 | "type": "LATENT", 85 | "links": [ 86 | 7 87 | ], 88 | "slot_index": 0 89 | } 90 | ], 91 | "properties": { 92 | "Node name for S&R": "KSampler" 93 | }, 94 | "widgets_values": [ 95 | 0, 96 | "fixed", 97 | 30, 98 | 6.5, 99 | "ddpm", 100 | "karras", 101 | 1 102 | ] 103 | }, 104 | { 105 | "id": 9, 106 | "type": "SaveImage", 107 | "pos": [ 108 | 1830, 109 | 700 110 | ], 111 | "size": { 112 | "0": 529.7760009765625, 113 | "1": 582.3048095703125 114 | }, 115 | "flags": {}, 116 | "order": 9, 117 | "mode": 0, 118 | "inputs": [ 119 | { 120 | "name": "images", 121 | "type": "IMAGE", 122 | "link": 9 123 | } 124 | ], 125 | "properties": {}, 126 | "widgets_values": [ 127 | "IPAdapterV2" 128 | ] 129 | }, 130 | { 131 | "id": 20, 132 | "type": "IPAdapterUnifiedLoaderFaceIDV2", 133 | "pos": [ 134 | 460, 135 | 60 136 | ], 137 | "size": { 138 | "0": 315, 139 | "1": 126 140 | }, 141 | "flags": {}, 142 | "order": 3, 143 | "mode": 0, 144 | "inputs": [ 145 | { 146 | "name": "model", 147 | "type": "MODEL", 148 | "link": 36 149 | }, 150 | { 151 | "name": "ipadapter", 152 | "type": "IPADAPTER", 153 | "link": null 154 | } 155 | ], 156 | "outputs": [ 157 | { 158 | "name": "MODEL", 159 | "type": "MODEL", 160 | "links": [ 161 | 35 162 | ], 163 | "shape": 3, 164 | "slot_index": 0 165 | }, 166 | { 167 | "name": "ipadapter", 168 | "type": "IPADAPTER", 169 | "links": [ 170 | 34 171 | ], 172 | "shape": 3 173 | } 174 | ], 175 | "properties": { 176 | "Node name for S&R": "IPAdapterUnifiedLoaderFaceIDV2" 177 | }, 178 | "widgets_values": [ 179 | "FACEID PORTRAIT (style transfer)", 180 | 0.6, 181 | "CPU" 182 | ] 183 | }, 184 | { 185 | "id": 4, 186 | "type": "CheckpointLoaderSimple", 187 | "pos": [ 188 | 10, 189 | 680 190 | ], 191 | "size": { 192 | "0": 315, 193 | "1": 98 194 | }, 195 | "flags": {}, 196 | "order": 0, 197 | "mode": 0, 198 | "outputs": [ 199 | { 200 | "name": "MODEL", 201 | "type": "MODEL", 202 | "links": [ 203 | 36 204 | ], 205 | "slot_index": 0 206 | }, 207 | { 208 | "name": "CLIP", 209 | "type": "CLIP", 210 | "links": [ 211 | 3, 212 | 5 213 | ], 214 | "slot_index": 1 215 | }, 216 | { 217 | "name": "VAE", 218 | "type": "VAE", 219 | "links": [ 220 | 8 221 | ], 222 | "slot_index": 2 223 | } 224 | ], 225 | "properties": { 226 | "Node name for S&R": "CheckpointLoaderSimple" 227 | }, 228 | "widgets_values": [ 229 | "sdxl/juggernautXL_version8Rundiffusion.safetensors" 230 | ] 231 | }, 232 | { 233 | "id": 5, 234 | "type": "EmptyLatentImage", 235 | "pos": [ 236 | 870, 237 | 1100 238 | ], 239 | "size": { 240 | "0": 315, 241 | "1": 106 242 | }, 243 | "flags": {}, 244 | "order": 1, 245 | "mode": 0, 246 | "outputs": [ 247 | { 248 | "name": "LATENT", 249 | "type": "LATENT", 250 | "links": [ 251 | 2 252 | ], 253 | "slot_index": 0 254 | } 255 | ], 256 | "properties": { 257 | "Node name for S&R": "EmptyLatentImage" 258 | }, 259 | "widgets_values": [ 260 | 1024, 261 | 1024, 262 | 1 263 | ] 264 | }, 265 | { 266 | "id": 12, 267 | "type": "LoadImage", 268 | "pos": [ 269 | 450, 270 | 240 271 | ], 272 | "size": { 273 | "0": 315, 274 | "1": 314 275 | }, 276 | "flags": {}, 277 | "order": 2, 278 | "mode": 0, 279 | "outputs": [ 280 | { 281 | "name": "IMAGE", 282 | "type": "IMAGE", 283 | "links": [ 284 | 29 285 | ], 286 | "shape": 3, 287 | "slot_index": 0 288 | }, 289 | { 290 | "name": "MASK", 291 | "type": "MASK", 292 | "links": null, 293 | "shape": 3 294 | } 295 | ], 296 | "properties": { 297 | "Node name for S&R": "LoadImage" 298 | }, 299 | "widgets_values": [ 300 | "face2.jpg", 301 | "image" 302 | ] 303 | }, 304 | { 305 | "id": 6, 306 | "type": "CLIPTextEncode", 307 | "pos": [ 308 | 760, 309 | 620 310 | ], 311 | "size": { 312 | "0": 422.84503173828125, 313 | "1": 164.31304931640625 314 | }, 315 | "flags": {}, 316 | "order": 4, 317 | "mode": 0, 318 | "inputs": [ 319 | { 320 | "name": "clip", 321 | "type": "CLIP", 322 | "link": 3 323 | } 324 | ], 325 | "outputs": [ 326 | { 327 | "name": "CONDITIONING", 328 | "type": "CONDITIONING", 329 | "links": [ 330 | 4 331 | ], 332 | "slot_index": 0 333 | } 334 | ], 335 | "properties": { 336 | "Node name for S&R": "CLIPTextEncode" 337 | }, 338 | "widgets_values": [ 339 | "a watercolor painting of a woman on the beach\n\nhigh quality artistry" 340 | ] 341 | }, 342 | { 343 | "id": 7, 344 | "type": "CLIPTextEncode", 345 | "pos": [ 346 | 760, 347 | 850 348 | ], 349 | "size": { 350 | "0": 425.27801513671875, 351 | "1": 180.6060791015625 352 | }, 353 | "flags": {}, 354 | "order": 5, 355 | "mode": 0, 356 | "inputs": [ 357 | { 358 | "name": "clip", 359 | "type": "CLIP", 360 | "link": 5 361 | } 362 | ], 363 | "outputs": [ 364 | { 365 | "name": "CONDITIONING", 366 | "type": "CONDITIONING", 367 | "links": [ 368 | 6 369 | ], 370 | "slot_index": 0 371 | } 372 | ], 373 | "properties": { 374 | "Node name for S&R": "CLIPTextEncode" 375 | }, 376 | "widgets_values": [ 377 | "photo, blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed, naked" 378 | ] 379 | }, 380 | { 381 | "id": 18, 382 | "type": "IPAdapterFaceIDV2", 383 | "pos": [ 384 | 850, 385 | 190 386 | ], 387 | "size": { 388 | "0": 315, 389 | "1": 322 390 | }, 391 | "flags": {}, 392 | "order": 6, 393 | "mode": 0, 394 | "inputs": [ 395 | { 396 | "name": "model", 397 | "type": "MODEL", 398 | "link": 35 399 | }, 400 | { 401 | "name": "ipadapter", 402 | "type": "IPADAPTER", 403 | "link": 34, 404 | "slot_index": 1 405 | }, 406 | { 407 | "name": "image", 408 | "type": "IMAGE", 409 | "link": 29 410 | }, 411 | { 412 | "name": "image_negative", 413 | "type": "IMAGE", 414 | "link": null 415 | }, 416 | { 417 | "name": "attn_mask", 418 | "type": "MASK", 419 | "link": null 420 | }, 421 | { 422 | "name": "clip_vision", 423 | "type": "CLIP_VISION", 424 | "link": null 425 | }, 426 | { 427 | "name": "insightface", 428 | "type": "INSIGHTFACE", 429 | "link": null 430 | } 431 | ], 432 | "outputs": [ 433 | { 434 | "name": "MODEL", 435 | "type": "MODEL", 436 | "links": [ 437 | 32 438 | ], 439 | "shape": 3, 440 | "slot_index": 0 441 | } 442 | ], 443 | "properties": { 444 | "Node name for S&R": "IPAdapterFaceIDV2" 445 | }, 446 | "widgets_values": [ 447 | 0.65, 448 | 1, 449 | "linear", 450 | "concat", 451 | 0, 452 | 1, 453 | "V only" 454 | ] 455 | } 456 | ], 457 | "links": [ 458 | [ 459 | 2, 460 | 5, 461 | 0, 462 | 3, 463 | 3, 464 | "LATENT" 465 | ], 466 | [ 467 | 3, 468 | 4, 469 | 1, 470 | 6, 471 | 0, 472 | "CLIP" 473 | ], 474 | [ 475 | 4, 476 | 6, 477 | 0, 478 | 3, 479 | 1, 480 | "CONDITIONING" 481 | ], 482 | [ 483 | 5, 484 | 4, 485 | 1, 486 | 7, 487 | 0, 488 | "CLIP" 489 | ], 490 | [ 491 | 6, 492 | 7, 493 | 0, 494 | 3, 495 | 2, 496 | "CONDITIONING" 497 | ], 498 | [ 499 | 7, 500 | 3, 501 | 0, 502 | 8, 503 | 0, 504 | "LATENT" 505 | ], 506 | [ 507 | 8, 508 | 4, 509 | 2, 510 | 8, 511 | 1, 512 | "VAE" 513 | ], 514 | [ 515 | 9, 516 | 8, 517 | 0, 518 | 9, 519 | 0, 520 | "IMAGE" 521 | ], 522 | [ 523 | 29, 524 | 12, 525 | 0, 526 | 18, 527 | 2, 528 | "IMAGE" 529 | ], 530 | [ 531 | 32, 532 | 18, 533 | 0, 534 | 3, 535 | 0, 536 | "MODEL" 537 | ], 538 | [ 539 | 34, 540 | 20, 541 | 1, 542 | 18, 543 | 1, 544 | "IPADAPTER" 545 | ], 546 | [ 547 | 35, 548 | 20, 549 | 0, 550 | 18, 551 | 0, 552 | "MODEL" 553 | ], 554 | [ 555 | 36, 556 | 4, 557 | 0, 558 | 20, 559 | 0, 560 | "MODEL" 561 | ] 562 | ], 563 | "groups": [], 564 | "config": {}, 565 | "extra": {}, 566 | "version": 0.4 567 | } -------------------------------------------------------------------------------- /examples/ipadapter_simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 13, 3 | "last_link_id": 17, 4 | "nodes": [ 5 | { 6 | "id": 11, 7 | "type": "IPAdapterUnifiedLoaderV2", 8 | "pos": [ 9 | 440, 10 | 440 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 78 15 | }, 16 | "flags": {}, 17 | "order": 3, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "model", 22 | "type": "MODEL", 23 | "link": 10 24 | }, 25 | { 26 | "name": "ipadapter", 27 | "type": "IPADAPTER", 28 | "link": null 29 | } 30 | ], 31 | "outputs": [ 32 | { 33 | "name": "model", 34 | "type": "MODEL", 35 | "links": [ 36 | 11 37 | ], 38 | "shape": 3, 39 | "slot_index": 0 40 | }, 41 | { 42 | "name": "ipadapter", 43 | "type": "IPADAPTER", 44 | "links": [ 45 | 12 46 | ], 47 | "shape": 3, 48 | "slot_index": 1 49 | } 50 | ], 51 | "properties": { 52 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 53 | }, 54 | "widgets_values": [ 55 | "PLUS (high strength)" 56 | ] 57 | }, 58 | { 59 | "id": 4, 60 | "type": "CheckpointLoaderSimple", 61 | "pos": [ 62 | 50, 63 | 730 64 | ], 65 | "size": { 66 | "0": 315, 67 | "1": 98 68 | }, 69 | "flags": {}, 70 | "order": 0, 71 | "mode": 0, 72 | "outputs": [ 73 | { 74 | "name": "MODEL", 75 | "type": "MODEL", 76 | "links": [ 77 | 10 78 | ], 79 | "slot_index": 0 80 | }, 81 | { 82 | "name": "CLIP", 83 | "type": "CLIP", 84 | "links": [ 85 | 3, 86 | 5 87 | ], 88 | "slot_index": 1 89 | }, 90 | { 91 | "name": "VAE", 92 | "type": "VAE", 93 | "links": [ 94 | 8 95 | ], 96 | "slot_index": 2 97 | } 98 | ], 99 | "properties": { 100 | "Node name for S&R": "CheckpointLoaderSimple" 101 | }, 102 | "widgets_values": [ 103 | "sd15/realisticVisionV51_v51VAE.safetensors" 104 | ] 105 | }, 106 | { 107 | "id": 12, 108 | "type": "LoadImage", 109 | "pos": [ 110 | 440, 111 | 60 112 | ], 113 | "size": [ 114 | 315, 115 | 314 116 | ], 117 | "flags": {}, 118 | "order": 1, 119 | "mode": 0, 120 | "outputs": [ 121 | { 122 | "name": "IMAGE", 123 | "type": "IMAGE", 124 | "links": [ 125 | 17 126 | ], 127 | "shape": 3 128 | }, 129 | { 130 | "name": "MASK", 131 | "type": "MASK", 132 | "links": null, 133 | "shape": 3 134 | } 135 | ], 136 | "properties": { 137 | "Node name for S&R": "LoadImage" 138 | }, 139 | "widgets_values": [ 140 | "warrior_woman.png", 141 | "image" 142 | ] 143 | }, 144 | { 145 | "id": 3, 146 | "type": "KSampler", 147 | "pos": [ 148 | 1210, 149 | 700 150 | ], 151 | "size": { 152 | "0": 315, 153 | "1": 262 154 | }, 155 | "flags": {}, 156 | "order": 7, 157 | "mode": 0, 158 | "inputs": [ 159 | { 160 | "name": "model", 161 | "type": "MODEL", 162 | "link": 13 163 | }, 164 | { 165 | "name": "positive", 166 | "type": "CONDITIONING", 167 | "link": 4 168 | }, 169 | { 170 | "name": "negative", 171 | "type": "CONDITIONING", 172 | "link": 6 173 | }, 174 | { 175 | "name": "latent_image", 176 | "type": "LATENT", 177 | "link": 2 178 | } 179 | ], 180 | "outputs": [ 181 | { 182 | "name": "LATENT", 183 | "type": "LATENT", 184 | "links": [ 185 | 7 186 | ], 187 | "slot_index": 0 188 | } 189 | ], 190 | "properties": { 191 | "Node name for S&R": "KSampler" 192 | }, 193 | "widgets_values": [ 194 | 0, 195 | "fixed", 196 | 30, 197 | 6.5, 198 | "dpmpp_2m", 199 | "karras", 200 | 1 201 | ] 202 | }, 203 | { 204 | "id": 6, 205 | "type": "CLIPTextEncode", 206 | "pos": [ 207 | 690, 208 | 610 209 | ], 210 | "size": { 211 | "0": 422.84503173828125, 212 | "1": 164.31304931640625 213 | }, 214 | "flags": {}, 215 | "order": 4, 216 | "mode": 0, 217 | "inputs": [ 218 | { 219 | "name": "clip", 220 | "type": "CLIP", 221 | "link": 3 222 | } 223 | ], 224 | "outputs": [ 225 | { 226 | "name": "CONDITIONING", 227 | "type": "CONDITIONING", 228 | "links": [ 229 | 4 230 | ], 231 | "slot_index": 0 232 | } 233 | ], 234 | "properties": { 235 | "Node name for S&R": "CLIPTextEncode" 236 | }, 237 | "widgets_values": [ 238 | "closeup of a fierce warrior woman wearing a full armor at the end of a battle\n\nhigh quality, detailed" 239 | ] 240 | }, 241 | { 242 | "id": 10, 243 | "type": "IPAdapterV2", 244 | "pos": [ 245 | 820, 246 | 350 247 | ], 248 | "size": { 249 | "0": 315, 250 | "1": 166 251 | }, 252 | "flags": {}, 253 | "order": 6, 254 | "mode": 0, 255 | "inputs": [ 256 | { 257 | "name": "model", 258 | "type": "MODEL", 259 | "link": 11 260 | }, 261 | { 262 | "name": "ipadapter", 263 | "type": "IPADAPTER", 264 | "link": 12 265 | }, 266 | { 267 | "name": "image", 268 | "type": "IMAGE", 269 | "link": 17, 270 | "slot_index": 2 271 | }, 272 | { 273 | "name": "attn_mask", 274 | "type": "MASK", 275 | "link": null 276 | } 277 | ], 278 | "outputs": [ 279 | { 280 | "name": "MODEL", 281 | "type": "MODEL", 282 | "links": [ 283 | 13 284 | ], 285 | "shape": 3, 286 | "slot_index": 0 287 | } 288 | ], 289 | "properties": { 290 | "Node name for S&R": "IPAdapterV2" 291 | }, 292 | "widgets_values": [ 293 | 0.8, 294 | 0, 295 | 1 296 | ] 297 | }, 298 | { 299 | "id": 9, 300 | "type": "SaveImage", 301 | "pos": [ 302 | 1770, 303 | 710 304 | ], 305 | "size": [ 306 | 529.7760009765616, 307 | 582.3048192804504 308 | ], 309 | "flags": {}, 310 | "order": 9, 311 | "mode": 0, 312 | "inputs": [ 313 | { 314 | "name": "images", 315 | "type": "IMAGE", 316 | "link": 9 317 | } 318 | ], 319 | "properties": {}, 320 | "widgets_values": [ 321 | "IPAdapterV2" 322 | ] 323 | }, 324 | { 325 | "id": 7, 326 | "type": "CLIPTextEncode", 327 | "pos": [ 328 | 690, 329 | 840 330 | ], 331 | "size": { 332 | "0": 425.27801513671875, 333 | "1": 180.6060791015625 334 | }, 335 | "flags": {}, 336 | "order": 5, 337 | "mode": 0, 338 | "inputs": [ 339 | { 340 | "name": "clip", 341 | "type": "CLIP", 342 | "link": 5 343 | } 344 | ], 345 | "outputs": [ 346 | { 347 | "name": "CONDITIONING", 348 | "type": "CONDITIONING", 349 | "links": [ 350 | 6 351 | ], 352 | "slot_index": 0 353 | } 354 | ], 355 | "properties": { 356 | "Node name for S&R": "CLIPTextEncode" 357 | }, 358 | "widgets_values": [ 359 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 360 | ] 361 | }, 362 | { 363 | "id": 8, 364 | "type": "VAEDecode", 365 | "pos": [ 366 | 1570, 367 | 700 368 | ], 369 | "size": [ 370 | 140, 371 | 46 372 | ], 373 | "flags": {}, 374 | "order": 8, 375 | "mode": 0, 376 | "inputs": [ 377 | { 378 | "name": "samples", 379 | "type": "LATENT", 380 | "link": 7 381 | }, 382 | { 383 | "name": "vae", 384 | "type": "VAE", 385 | "link": 8 386 | } 387 | ], 388 | "outputs": [ 389 | { 390 | "name": "IMAGE", 391 | "type": "IMAGE", 392 | "links": [ 393 | 9 394 | ], 395 | "slot_index": 0 396 | } 397 | ], 398 | "properties": { 399 | "Node name for S&R": "VAEDecode" 400 | } 401 | }, 402 | { 403 | "id": 5, 404 | "type": "EmptyLatentImage", 405 | "pos": [ 406 | 801, 407 | 1097 408 | ], 409 | "size": { 410 | "0": 315, 411 | "1": 106 412 | }, 413 | "flags": {}, 414 | "order": 2, 415 | "mode": 0, 416 | "outputs": [ 417 | { 418 | "name": "LATENT", 419 | "type": "LATENT", 420 | "links": [ 421 | 2 422 | ], 423 | "slot_index": 0 424 | } 425 | ], 426 | "properties": { 427 | "Node name for S&R": "EmptyLatentImage" 428 | }, 429 | "widgets_values": [ 430 | 512, 431 | 512, 432 | 1 433 | ] 434 | } 435 | ], 436 | "links": [ 437 | [ 438 | 2, 439 | 5, 440 | 0, 441 | 3, 442 | 3, 443 | "LATENT" 444 | ], 445 | [ 446 | 3, 447 | 4, 448 | 1, 449 | 6, 450 | 0, 451 | "CLIP" 452 | ], 453 | [ 454 | 4, 455 | 6, 456 | 0, 457 | 3, 458 | 1, 459 | "CONDITIONING" 460 | ], 461 | [ 462 | 5, 463 | 4, 464 | 1, 465 | 7, 466 | 0, 467 | "CLIP" 468 | ], 469 | [ 470 | 6, 471 | 7, 472 | 0, 473 | 3, 474 | 2, 475 | "CONDITIONING" 476 | ], 477 | [ 478 | 7, 479 | 3, 480 | 0, 481 | 8, 482 | 0, 483 | "LATENT" 484 | ], 485 | [ 486 | 8, 487 | 4, 488 | 2, 489 | 8, 490 | 1, 491 | "VAE" 492 | ], 493 | [ 494 | 9, 495 | 8, 496 | 0, 497 | 9, 498 | 0, 499 | "IMAGE" 500 | ], 501 | [ 502 | 10, 503 | 4, 504 | 0, 505 | 11, 506 | 0, 507 | "MODEL" 508 | ], 509 | [ 510 | 11, 511 | 11, 512 | 0, 513 | 10, 514 | 0, 515 | "MODEL" 516 | ], 517 | [ 518 | 12, 519 | 11, 520 | 1, 521 | 10, 522 | 1, 523 | "IPADAPTER" 524 | ], 525 | [ 526 | 13, 527 | 10, 528 | 0, 529 | 3, 530 | 0, 531 | "MODEL" 532 | ], 533 | [ 534 | 17, 535 | 12, 536 | 0, 537 | 10, 538 | 2, 539 | "IMAGE" 540 | ] 541 | ], 542 | "groups": [], 543 | "config": {}, 544 | "extra": {}, 545 | "version": 0.4 546 | } -------------------------------------------------------------------------------- /examples/ipadapter_style_composition.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 16, 3 | "last_link_id": 25, 4 | "nodes": [ 5 | { 6 | "id": 7, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 690, 10 | 840 11 | ], 12 | "size": { 13 | "0": 425.27801513671875, 14 | "1": 180.6060791015625 15 | }, 16 | "flags": {}, 17 | "order": 6, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 5 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "CONDITIONING", 29 | "type": "CONDITIONING", 30 | "links": [ 31 | 6 32 | ], 33 | "slot_index": 0 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "CLIPTextEncode" 38 | }, 39 | "widgets_values": [ 40 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 41 | ] 42 | }, 43 | { 44 | "id": 11, 45 | "type": "IPAdapterUnifiedLoaderV2", 46 | "pos": [ 47 | 335, 48 | 430 49 | ], 50 | "size": { 51 | "0": 315, 52 | "1": 78 53 | }, 54 | "flags": {}, 55 | "order": 4, 56 | "mode": 0, 57 | "inputs": [ 58 | { 59 | "name": "model", 60 | "type": "MODEL", 61 | "link": 10 62 | }, 63 | { 64 | "name": "ipadapter", 65 | "type": "IPADAPTER", 66 | "link": null 67 | } 68 | ], 69 | "outputs": [ 70 | { 71 | "name": "model", 72 | "type": "MODEL", 73 | "links": [ 74 | 21 75 | ], 76 | "shape": 3, 77 | "slot_index": 0 78 | }, 79 | { 80 | "name": "ipadapter", 81 | "type": "IPADAPTER", 82 | "links": [ 83 | 22 84 | ], 85 | "shape": 3, 86 | "slot_index": 1 87 | } 88 | ], 89 | "properties": { 90 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 91 | }, 92 | "widgets_values": [ 93 | "PLUS (high strength)" 94 | ] 95 | }, 96 | { 97 | "id": 12, 98 | "type": "LoadImage", 99 | "pos": [ 100 | -102, 101 | -46 102 | ], 103 | "size": { 104 | "0": 315, 105 | "1": 314 106 | }, 107 | "flags": {}, 108 | "order": 0, 109 | "mode": 0, 110 | "outputs": [ 111 | { 112 | "name": "IMAGE", 113 | "type": "IMAGE", 114 | "links": [ 115 | 25 116 | ], 117 | "shape": 3, 118 | "slot_index": 0 119 | }, 120 | { 121 | "name": "MASK", 122 | "type": "MASK", 123 | "links": null, 124 | "shape": 3 125 | } 126 | ], 127 | "properties": { 128 | "Node name for S&R": "LoadImage" 129 | }, 130 | "widgets_values": [ 131 | "black_car.jpg", 132 | "image" 133 | ] 134 | }, 135 | { 136 | "id": 16, 137 | "type": "LoadImage", 138 | "pos": [ 139 | 310, 140 | -40 141 | ], 142 | "size": { 143 | "0": 315, 144 | "1": 314 145 | }, 146 | "flags": {}, 147 | "order": 1, 148 | "mode": 0, 149 | "outputs": [ 150 | { 151 | "name": "IMAGE", 152 | "type": "IMAGE", 153 | "links": [ 154 | 24 155 | ], 156 | "shape": 3, 157 | "slot_index": 0 158 | }, 159 | { 160 | "name": "MASK", 161 | "type": "MASK", 162 | "links": null, 163 | "shape": 3 164 | } 165 | ], 166 | "properties": { 167 | "Node name for S&R": "LoadImage" 168 | }, 169 | "widgets_values": [ 170 | "bw_texture_waves.jpg", 171 | "image" 172 | ] 173 | }, 174 | { 175 | "id": 5, 176 | "type": "EmptyLatentImage", 177 | "pos": [ 178 | 801, 179 | 1097 180 | ], 181 | "size": { 182 | "0": 315, 183 | "1": 106 184 | }, 185 | "flags": {}, 186 | "order": 2, 187 | "mode": 0, 188 | "outputs": [ 189 | { 190 | "name": "LATENT", 191 | "type": "LATENT", 192 | "links": [ 193 | 2 194 | ], 195 | "slot_index": 0 196 | } 197 | ], 198 | "properties": { 199 | "Node name for S&R": "EmptyLatentImage" 200 | }, 201 | "widgets_values": [ 202 | 1024, 203 | 1024, 204 | 1 205 | ] 206 | }, 207 | { 208 | "id": 6, 209 | "type": "CLIPTextEncode", 210 | "pos": [ 211 | 690, 212 | 610 213 | ], 214 | "size": { 215 | "0": 422.84503173828125, 216 | "1": 164.31304931640625 217 | }, 218 | "flags": {}, 219 | "order": 5, 220 | "mode": 0, 221 | "inputs": [ 222 | { 223 | "name": "clip", 224 | "type": "CLIP", 225 | "link": 3 226 | } 227 | ], 228 | "outputs": [ 229 | { 230 | "name": "CONDITIONING", 231 | "type": "CONDITIONING", 232 | "links": [ 233 | 4 234 | ], 235 | "slot_index": 0 236 | } 237 | ], 238 | "properties": { 239 | "Node name for S&R": "CLIPTextEncode" 240 | }, 241 | "widgets_values": [ 242 | "sports car running fast on the highway\n\nhigh quality, detailed" 243 | ] 244 | }, 245 | { 246 | "id": 15, 247 | "type": "IPAdapterStyleCompositionV2", 248 | "pos": [ 249 | 772, 250 | 219 251 | ], 252 | "size": { 253 | "0": 315, 254 | "1": 322 255 | }, 256 | "flags": {}, 257 | "order": 7, 258 | "mode": 0, 259 | "inputs": [ 260 | { 261 | "name": "model", 262 | "type": "MODEL", 263 | "link": 21 264 | }, 265 | { 266 | "name": "ipadapter", 267 | "type": "IPADAPTER", 268 | "link": 22 269 | }, 270 | { 271 | "name": "image_style", 272 | "type": "IMAGE", 273 | "link": 24 274 | }, 275 | { 276 | "name": "image_composition", 277 | "type": "IMAGE", 278 | "link": 25 279 | }, 280 | { 281 | "name": "image_negative", 282 | "type": "IMAGE", 283 | "link": null 284 | }, 285 | { 286 | "name": "attn_mask", 287 | "type": "MASK", 288 | "link": null 289 | }, 290 | { 291 | "name": "clip_vision", 292 | "type": "CLIP_VISION", 293 | "link": null 294 | } 295 | ], 296 | "outputs": [ 297 | { 298 | "name": "MODEL", 299 | "type": "MODEL", 300 | "links": [ 301 | 23 302 | ], 303 | "shape": 3, 304 | "slot_index": 0 305 | } 306 | ], 307 | "properties": { 308 | "Node name for S&R": "IPAdapterStyleCompositionV2" 309 | }, 310 | "widgets_values": [ 311 | 1.2, 312 | 1, 313 | false, 314 | "average", 315 | 0, 316 | 1, 317 | "V only" 318 | ] 319 | }, 320 | { 321 | "id": 3, 322 | "type": "KSampler", 323 | "pos": [ 324 | 1247, 325 | 586 326 | ], 327 | "size": { 328 | "0": 315, 329 | "1": 262 330 | }, 331 | "flags": {}, 332 | "order": 8, 333 | "mode": 0, 334 | "inputs": [ 335 | { 336 | "name": "model", 337 | "type": "MODEL", 338 | "link": 23 339 | }, 340 | { 341 | "name": "positive", 342 | "type": "CONDITIONING", 343 | "link": 4 344 | }, 345 | { 346 | "name": "negative", 347 | "type": "CONDITIONING", 348 | "link": 6 349 | }, 350 | { 351 | "name": "latent_image", 352 | "type": "LATENT", 353 | "link": 2 354 | } 355 | ], 356 | "outputs": [ 357 | { 358 | "name": "LATENT", 359 | "type": "LATENT", 360 | "links": [ 361 | 7 362 | ], 363 | "slot_index": 0 364 | } 365 | ], 366 | "properties": { 367 | "Node name for S&R": "KSampler" 368 | }, 369 | "widgets_values": [ 370 | 0, 371 | "fixed", 372 | 30, 373 | 6.5, 374 | "dpmpp_2m", 375 | "karras", 376 | 1 377 | ] 378 | }, 379 | { 380 | "id": 8, 381 | "type": "VAEDecode", 382 | "pos": [ 383 | 1615, 384 | 586 385 | ], 386 | "size": { 387 | "0": 140, 388 | "1": 46 389 | }, 390 | "flags": {}, 391 | "order": 9, 392 | "mode": 0, 393 | "inputs": [ 394 | { 395 | "name": "samples", 396 | "type": "LATENT", 397 | "link": 7 398 | }, 399 | { 400 | "name": "vae", 401 | "type": "VAE", 402 | "link": 8 403 | } 404 | ], 405 | "outputs": [ 406 | { 407 | "name": "IMAGE", 408 | "type": "IMAGE", 409 | "links": [ 410 | 9 411 | ], 412 | "slot_index": 0 413 | } 414 | ], 415 | "properties": { 416 | "Node name for S&R": "VAEDecode" 417 | } 418 | }, 419 | { 420 | "id": 9, 421 | "type": "SaveImage", 422 | "pos": [ 423 | 1822, 424 | 588 425 | ], 426 | "size": [ 427 | 691.0159878487498, 428 | 716.6239849908982 429 | ], 430 | "flags": {}, 431 | "order": 10, 432 | "mode": 0, 433 | "inputs": [ 434 | { 435 | "name": "images", 436 | "type": "IMAGE", 437 | "link": 9 438 | } 439 | ], 440 | "properties": {}, 441 | "widgets_values": [ 442 | "IPAdapterV2" 443 | ] 444 | }, 445 | { 446 | "id": 4, 447 | "type": "CheckpointLoaderSimple", 448 | "pos": [ 449 | -72, 450 | 657 451 | ], 452 | "size": { 453 | "0": 315, 454 | "1": 98 455 | }, 456 | "flags": {}, 457 | "order": 3, 458 | "mode": 0, 459 | "outputs": [ 460 | { 461 | "name": "MODEL", 462 | "type": "MODEL", 463 | "links": [ 464 | 10 465 | ], 466 | "slot_index": 0 467 | }, 468 | { 469 | "name": "CLIP", 470 | "type": "CLIP", 471 | "links": [ 472 | 3, 473 | 5 474 | ], 475 | "slot_index": 1 476 | }, 477 | { 478 | "name": "VAE", 479 | "type": "VAE", 480 | "links": [ 481 | 8 482 | ], 483 | "slot_index": 2 484 | } 485 | ], 486 | "properties": { 487 | "Node name for S&R": "CheckpointLoaderSimple" 488 | }, 489 | "widgets_values": [ 490 | "sdxl/AlbedoBaseXL.safetensors" 491 | ] 492 | } 493 | ], 494 | "links": [ 495 | [ 496 | 2, 497 | 5, 498 | 0, 499 | 3, 500 | 3, 501 | "LATENT" 502 | ], 503 | [ 504 | 3, 505 | 4, 506 | 1, 507 | 6, 508 | 0, 509 | "CLIP" 510 | ], 511 | [ 512 | 4, 513 | 6, 514 | 0, 515 | 3, 516 | 1, 517 | "CONDITIONING" 518 | ], 519 | [ 520 | 5, 521 | 4, 522 | 1, 523 | 7, 524 | 0, 525 | "CLIP" 526 | ], 527 | [ 528 | 6, 529 | 7, 530 | 0, 531 | 3, 532 | 2, 533 | "CONDITIONING" 534 | ], 535 | [ 536 | 7, 537 | 3, 538 | 0, 539 | 8, 540 | 0, 541 | "LATENT" 542 | ], 543 | [ 544 | 8, 545 | 4, 546 | 2, 547 | 8, 548 | 1, 549 | "VAE" 550 | ], 551 | [ 552 | 9, 553 | 8, 554 | 0, 555 | 9, 556 | 0, 557 | "IMAGE" 558 | ], 559 | [ 560 | 10, 561 | 4, 562 | 0, 563 | 11, 564 | 0, 565 | "MODEL" 566 | ], 567 | [ 568 | 21, 569 | 11, 570 | 0, 571 | 15, 572 | 0, 573 | "MODEL" 574 | ], 575 | [ 576 | 22, 577 | 11, 578 | 1, 579 | 15, 580 | 1, 581 | "IPADAPTER" 582 | ], 583 | [ 584 | 23, 585 | 15, 586 | 0, 587 | 3, 588 | 0, 589 | "MODEL" 590 | ], 591 | [ 592 | 24, 593 | 16, 594 | 0, 595 | 15, 596 | 2, 597 | "IMAGE" 598 | ], 599 | [ 600 | 25, 601 | 12, 602 | 0, 603 | 15, 604 | 3, 605 | "IMAGE" 606 | ] 607 | ], 608 | "groups": [], 609 | "config": {}, 610 | "extra": {}, 611 | "version": 0.4 612 | } -------------------------------------------------------------------------------- /examples/ipadapter_tiled.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 18, 3 | "last_link_id": 32, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "CheckpointLoaderSimple", 8 | "pos": [ 9 | 50, 10 | 730 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 98 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "MODEL", 22 | "type": "MODEL", 23 | "links": [ 24 | 29 25 | ], 26 | "slot_index": 0 27 | }, 28 | { 29 | "name": "CLIP", 30 | "type": "CLIP", 31 | "links": [ 32 | 3, 33 | 5 34 | ], 35 | "slot_index": 1 36 | }, 37 | { 38 | "name": "VAE", 39 | "type": "VAE", 40 | "links": [ 41 | 8 42 | ], 43 | "slot_index": 2 44 | } 45 | ], 46 | "properties": { 47 | "Node name for S&R": "CheckpointLoaderSimple" 48 | }, 49 | "widgets_values": [ 50 | "sd15/realisticVisionV51_v51VAE.safetensors" 51 | ] 52 | }, 53 | { 54 | "id": 7, 55 | "type": "CLIPTextEncode", 56 | "pos": [ 57 | 690, 58 | 840 59 | ], 60 | "size": { 61 | "0": 425.27801513671875, 62 | "1": 180.6060791015625 63 | }, 64 | "flags": {}, 65 | "order": 6, 66 | "mode": 0, 67 | "inputs": [ 68 | { 69 | "name": "clip", 70 | "type": "CLIP", 71 | "link": 5 72 | } 73 | ], 74 | "outputs": [ 75 | { 76 | "name": "CONDITIONING", 77 | "type": "CONDITIONING", 78 | "links": [ 79 | 6 80 | ], 81 | "slot_index": 0 82 | } 83 | ], 84 | "properties": { 85 | "Node name for S&R": "CLIPTextEncode" 86 | }, 87 | "widgets_values": [ 88 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 89 | ] 90 | }, 91 | { 92 | "id": 8, 93 | "type": "VAEDecode", 94 | "pos": [ 95 | 1570, 96 | 700 97 | ], 98 | "size": { 99 | "0": 140, 100 | "1": 46 101 | }, 102 | "flags": {}, 103 | "order": 9, 104 | "mode": 0, 105 | "inputs": [ 106 | { 107 | "name": "samples", 108 | "type": "LATENT", 109 | "link": 7 110 | }, 111 | { 112 | "name": "vae", 113 | "type": "VAE", 114 | "link": 8 115 | } 116 | ], 117 | "outputs": [ 118 | { 119 | "name": "IMAGE", 120 | "type": "IMAGE", 121 | "links": [ 122 | 9 123 | ], 124 | "slot_index": 0 125 | } 126 | ], 127 | "properties": { 128 | "Node name for S&R": "VAEDecode" 129 | } 130 | }, 131 | { 132 | "id": 12, 133 | "type": "LoadImage", 134 | "pos": [ 135 | 250, 136 | 290 137 | ], 138 | "size": { 139 | "0": 315, 140 | "1": 314 141 | }, 142 | "flags": {}, 143 | "order": 1, 144 | "mode": 0, 145 | "outputs": [ 146 | { 147 | "name": "IMAGE", 148 | "type": "IMAGE", 149 | "links": [ 150 | 27 151 | ], 152 | "shape": 3, 153 | "slot_index": 0 154 | }, 155 | { 156 | "name": "MASK", 157 | "type": "MASK", 158 | "links": null, 159 | "shape": 3 160 | } 161 | ], 162 | "properties": { 163 | "Node name for S&R": "LoadImage" 164 | }, 165 | "widgets_values": [ 166 | "girl_sitting.png", 167 | "image" 168 | ] 169 | }, 170 | { 171 | "id": 6, 172 | "type": "CLIPTextEncode", 173 | "pos": [ 174 | 690, 175 | 610 176 | ], 177 | "size": { 178 | "0": 422.84503173828125, 179 | "1": 164.31304931640625 180 | }, 181 | "flags": {}, 182 | "order": 5, 183 | "mode": 0, 184 | "inputs": [ 185 | { 186 | "name": "clip", 187 | "type": "CLIP", 188 | "link": 3 189 | } 190 | ], 191 | "outputs": [ 192 | { 193 | "name": "CONDITIONING", 194 | "type": "CONDITIONING", 195 | "links": [ 196 | 4 197 | ], 198 | "slot_index": 0 199 | } 200 | ], 201 | "properties": { 202 | "Node name for S&R": "CLIPTextEncode" 203 | }, 204 | "widgets_values": [ 205 | "in a peaceful spring morning a woman wearing a white shirt is sitting in a park on a bench\n\nhigh quality, detailed, diffuse light" 206 | ] 207 | }, 208 | { 209 | "id": 16, 210 | "type": "CLIPVisionLoader", 211 | "pos": [ 212 | 250, 213 | 180 214 | ], 215 | "size": { 216 | "0": 315, 217 | "1": 58 218 | }, 219 | "flags": {}, 220 | "order": 2, 221 | "mode": 0, 222 | "outputs": [ 223 | { 224 | "name": "CLIP_VISION", 225 | "type": "CLIP_VISION", 226 | "links": [ 227 | 32 228 | ], 229 | "shape": 3, 230 | "slot_index": 0 231 | } 232 | ], 233 | "properties": { 234 | "Node name for S&R": "CLIPVisionLoader" 235 | }, 236 | "widgets_values": [ 237 | "IPAdapter_image_encoder_sd15.safetensors" 238 | ] 239 | }, 240 | { 241 | "id": 5, 242 | "type": "EmptyLatentImage", 243 | "pos": [ 244 | 801, 245 | 1097 246 | ], 247 | "size": { 248 | "0": 315, 249 | "1": 106 250 | }, 251 | "flags": {}, 252 | "order": 3, 253 | "mode": 0, 254 | "outputs": [ 255 | { 256 | "name": "LATENT", 257 | "type": "LATENT", 258 | "links": [ 259 | 2 260 | ], 261 | "slot_index": 0 262 | } 263 | ], 264 | "properties": { 265 | "Node name for S&R": "EmptyLatentImage" 266 | }, 267 | "widgets_values": [ 268 | 512, 269 | 768, 270 | 1 271 | ] 272 | }, 273 | { 274 | "id": 3, 275 | "type": "KSampler", 276 | "pos": [ 277 | 1210, 278 | 700 279 | ], 280 | "size": { 281 | "0": 315, 282 | "1": 262 283 | }, 284 | "flags": {}, 285 | "order": 8, 286 | "mode": 0, 287 | "inputs": [ 288 | { 289 | "name": "model", 290 | "type": "MODEL", 291 | "link": 30 292 | }, 293 | { 294 | "name": "positive", 295 | "type": "CONDITIONING", 296 | "link": 4 297 | }, 298 | { 299 | "name": "negative", 300 | "type": "CONDITIONING", 301 | "link": 6 302 | }, 303 | { 304 | "name": "latent_image", 305 | "type": "LATENT", 306 | "link": 2 307 | } 308 | ], 309 | "outputs": [ 310 | { 311 | "name": "LATENT", 312 | "type": "LATENT", 313 | "links": [ 314 | 7 315 | ], 316 | "slot_index": 0 317 | } 318 | ], 319 | "properties": { 320 | "Node name for S&R": "KSampler" 321 | }, 322 | "widgets_values": [ 323 | 2, 324 | "fixed", 325 | 30, 326 | 6.5, 327 | "ddpm", 328 | "karras", 329 | 1 330 | ] 331 | }, 332 | { 333 | "id": 15, 334 | "type": "IPAdapterModelLoaderV2", 335 | "pos": [ 336 | 250, 337 | 70 338 | ], 339 | "size": { 340 | "0": 315, 341 | "1": 58 342 | }, 343 | "flags": {}, 344 | "order": 4, 345 | "mode": 0, 346 | "outputs": [ 347 | { 348 | "name": "IPADAPTER", 349 | "type": "IPADAPTER", 350 | "links": [ 351 | 31 352 | ], 353 | "shape": 3, 354 | "slot_index": 0 355 | } 356 | ], 357 | "properties": { 358 | "Node name for S&R": "IPAdapterModelLoaderV2" 359 | }, 360 | "widgets_values": [ 361 | "ip-adapter-plus_sd15.safetensors" 362 | ] 363 | }, 364 | { 365 | "id": 18, 366 | "type": "IPAdapterTiledV2", 367 | "pos": [ 368 | 700, 369 | 230 370 | ], 371 | "size": { 372 | "0": 315, 373 | "1": 278 374 | }, 375 | "flags": {}, 376 | "order": 7, 377 | "mode": 0, 378 | "inputs": [ 379 | { 380 | "name": "model", 381 | "type": "MODEL", 382 | "link": 29 383 | }, 384 | { 385 | "name": "ipadapter", 386 | "type": "IPADAPTER", 387 | "link": 31 388 | }, 389 | { 390 | "name": "image", 391 | "type": "IMAGE", 392 | "link": 27 393 | }, 394 | { 395 | "name": "image_negative", 396 | "type": "IMAGE", 397 | "link": null 398 | }, 399 | { 400 | "name": "attn_mask", 401 | "type": "MASK", 402 | "link": null 403 | }, 404 | { 405 | "name": "clip_vision", 406 | "type": "CLIP_VISION", 407 | "link": 32 408 | } 409 | ], 410 | "outputs": [ 411 | { 412 | "name": "MODEL", 413 | "type": "MODEL", 414 | "links": [ 415 | 30 416 | ], 417 | "shape": 3, 418 | "slot_index": 0 419 | }, 420 | { 421 | "name": "tiles", 422 | "type": "IMAGE", 423 | "links": null, 424 | "shape": 3 425 | }, 426 | { 427 | "name": "masks", 428 | "type": "MASK", 429 | "links": null, 430 | "shape": 3 431 | } 432 | ], 433 | "properties": { 434 | "Node name for S&R": "IPAdapterTiledV2" 435 | }, 436 | "widgets_values": [ 437 | 0.7000000000000001, 438 | "ease in", 439 | "concat", 440 | 0, 441 | 1, 442 | 0 443 | ] 444 | }, 445 | { 446 | "id": 9, 447 | "type": "SaveImage", 448 | "pos": [ 449 | 1768, 450 | 700 451 | ], 452 | "size": { 453 | "0": 529.7760009765625, 454 | "1": 582.3048095703125 455 | }, 456 | "flags": {}, 457 | "order": 10, 458 | "mode": 0, 459 | "inputs": [ 460 | { 461 | "name": "images", 462 | "type": "IMAGE", 463 | "link": 9 464 | } 465 | ], 466 | "properties": {}, 467 | "widgets_values": [ 468 | "IPAdapterV2" 469 | ] 470 | } 471 | ], 472 | "links": [ 473 | [ 474 | 2, 475 | 5, 476 | 0, 477 | 3, 478 | 3, 479 | "LATENT" 480 | ], 481 | [ 482 | 3, 483 | 4, 484 | 1, 485 | 6, 486 | 0, 487 | "CLIP" 488 | ], 489 | [ 490 | 4, 491 | 6, 492 | 0, 493 | 3, 494 | 1, 495 | "CONDITIONING" 496 | ], 497 | [ 498 | 5, 499 | 4, 500 | 1, 501 | 7, 502 | 0, 503 | "CLIP" 504 | ], 505 | [ 506 | 6, 507 | 7, 508 | 0, 509 | 3, 510 | 2, 511 | "CONDITIONING" 512 | ], 513 | [ 514 | 7, 515 | 3, 516 | 0, 517 | 8, 518 | 0, 519 | "LATENT" 520 | ], 521 | [ 522 | 8, 523 | 4, 524 | 2, 525 | 8, 526 | 1, 527 | "VAE" 528 | ], 529 | [ 530 | 9, 531 | 8, 532 | 0, 533 | 9, 534 | 0, 535 | "IMAGE" 536 | ], 537 | [ 538 | 27, 539 | 12, 540 | 0, 541 | 18, 542 | 2, 543 | "IMAGE" 544 | ], 545 | [ 546 | 29, 547 | 4, 548 | 0, 549 | 18, 550 | 0, 551 | "MODEL" 552 | ], 553 | [ 554 | 30, 555 | 18, 556 | 0, 557 | 3, 558 | 0, 559 | "MODEL" 560 | ], 561 | [ 562 | 31, 563 | 15, 564 | 0, 565 | 18, 566 | 1, 567 | "IPADAPTER" 568 | ], 569 | [ 570 | 32, 571 | 16, 572 | 0, 573 | 18, 574 | 5, 575 | "CLIP_VISION" 576 | ] 577 | ], 578 | "groups": [], 579 | "config": {}, 580 | "extra": {}, 581 | "version": 0.4 582 | } -------------------------------------------------------------------------------- /examples/ipadapter_weighted_embeds.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 18, 3 | "last_link_id": 29, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "CheckpointLoaderSimple", 8 | "pos": [ 9 | 50, 10 | 730 11 | ], 12 | "size": { 13 | "0": 315, 14 | "1": 98 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "MODEL", 22 | "type": "MODEL", 23 | "links": [ 24 | 10 25 | ], 26 | "slot_index": 0 27 | }, 28 | { 29 | "name": "CLIP", 30 | "type": "CLIP", 31 | "links": [ 32 | 3, 33 | 5 34 | ], 35 | "slot_index": 1 36 | }, 37 | { 38 | "name": "VAE", 39 | "type": "VAE", 40 | "links": [ 41 | 8 42 | ], 43 | "slot_index": 2 44 | } 45 | ], 46 | "properties": { 47 | "Node name for S&R": "CheckpointLoaderSimple" 48 | }, 49 | "widgets_values": [ 50 | "sd15/realisticVisionV51_v51VAE.safetensors" 51 | ] 52 | }, 53 | { 54 | "id": 6, 55 | "type": "CLIPTextEncode", 56 | "pos": [ 57 | 690, 58 | 610 59 | ], 60 | "size": { 61 | "0": 422.84503173828125, 62 | "1": 164.31304931640625 63 | }, 64 | "flags": {}, 65 | "order": 5, 66 | "mode": 0, 67 | "inputs": [ 68 | { 69 | "name": "clip", 70 | "type": "CLIP", 71 | "link": 3 72 | } 73 | ], 74 | "outputs": [ 75 | { 76 | "name": "CONDITIONING", 77 | "type": "CONDITIONING", 78 | "links": [ 79 | 4 80 | ], 81 | "slot_index": 0 82 | } 83 | ], 84 | "properties": { 85 | "Node name for S&R": "CLIPTextEncode" 86 | }, 87 | "widgets_values": [ 88 | "closeup of a fierce warrior woman wearing a full armor at the end of a battle\n\nhigh quality, detailed" 89 | ] 90 | }, 91 | { 92 | "id": 9, 93 | "type": "SaveImage", 94 | "pos": [ 95 | 1770, 96 | 710 97 | ], 98 | "size": { 99 | "0": 529.7760009765625, 100 | "1": 582.3048095703125 101 | }, 102 | "flags": {}, 103 | "order": 13, 104 | "mode": 0, 105 | "inputs": [ 106 | { 107 | "name": "images", 108 | "type": "IMAGE", 109 | "link": 9 110 | } 111 | ], 112 | "properties": {}, 113 | "widgets_values": [ 114 | "IPAdapterV2" 115 | ] 116 | }, 117 | { 118 | "id": 8, 119 | "type": "VAEDecode", 120 | "pos": [ 121 | 1570, 122 | 700 123 | ], 124 | "size": { 125 | "0": 140, 126 | "1": 46 127 | }, 128 | "flags": {}, 129 | "order": 12, 130 | "mode": 0, 131 | "inputs": [ 132 | { 133 | "name": "samples", 134 | "type": "LATENT", 135 | "link": 7 136 | }, 137 | { 138 | "name": "vae", 139 | "type": "VAE", 140 | "link": 8 141 | } 142 | ], 143 | "outputs": [ 144 | { 145 | "name": "IMAGE", 146 | "type": "IMAGE", 147 | "links": [ 148 | 9 149 | ], 150 | "slot_index": 0 151 | } 152 | ], 153 | "properties": { 154 | "Node name for S&R": "VAEDecode" 155 | } 156 | }, 157 | { 158 | "id": 5, 159 | "type": "EmptyLatentImage", 160 | "pos": [ 161 | 801, 162 | 1097 163 | ], 164 | "size": { 165 | "0": 315, 166 | "1": 106 167 | }, 168 | "flags": {}, 169 | "order": 1, 170 | "mode": 0, 171 | "outputs": [ 172 | { 173 | "name": "LATENT", 174 | "type": "LATENT", 175 | "links": [ 176 | 2 177 | ], 178 | "slot_index": 0 179 | } 180 | ], 181 | "properties": { 182 | "Node name for S&R": "EmptyLatentImage" 183 | }, 184 | "widgets_values": [ 185 | 512, 186 | 512, 187 | 1 188 | ] 189 | }, 190 | { 191 | "id": 12, 192 | "type": "LoadImage", 193 | "pos": [ 194 | 453, 195 | -296 196 | ], 197 | "size": { 198 | "0": 315, 199 | "1": 314 200 | }, 201 | "flags": {}, 202 | "order": 2, 203 | "mode": 0, 204 | "outputs": [ 205 | { 206 | "name": "IMAGE", 207 | "type": "IMAGE", 208 | "links": [ 209 | 21 210 | ], 211 | "shape": 3, 212 | "slot_index": 0 213 | }, 214 | { 215 | "name": "MASK", 216 | "type": "MASK", 217 | "links": null, 218 | "shape": 3 219 | } 220 | ], 221 | "properties": { 222 | "Node name for S&R": "LoadImage" 223 | }, 224 | "widgets_values": [ 225 | "warrior_woman.png", 226 | "image" 227 | ] 228 | }, 229 | { 230 | "id": 15, 231 | "type": "LoadImage", 232 | "pos": [ 233 | 458, 234 | 70 235 | ], 236 | "size": { 237 | "0": 315, 238 | "1": 314 239 | }, 240 | "flags": {}, 241 | "order": 3, 242 | "mode": 0, 243 | "outputs": [ 244 | { 245 | "name": "IMAGE", 246 | "type": "IMAGE", 247 | "links": [ 248 | 23 249 | ], 250 | "shape": 3, 251 | "slot_index": 0 252 | }, 253 | { 254 | "name": "MASK", 255 | "type": "MASK", 256 | "links": null, 257 | "shape": 3 258 | } 259 | ], 260 | "properties": { 261 | "Node name for S&R": "LoadImage" 262 | }, 263 | "widgets_values": [ 264 | "anime_illustration.png", 265 | "image" 266 | ] 267 | }, 268 | { 269 | "id": 11, 270 | "type": "IPAdapterUnifiedLoaderV2", 271 | "pos": [ 272 | 440, 273 | 440 274 | ], 275 | "size": { 276 | "0": 315, 277 | "1": 78 278 | }, 279 | "flags": {}, 280 | "order": 4, 281 | "mode": 0, 282 | "inputs": [ 283 | { 284 | "name": "model", 285 | "type": "MODEL", 286 | "link": 10 287 | }, 288 | { 289 | "name": "ipadapter", 290 | "type": "IPADAPTER", 291 | "link": null 292 | } 293 | ], 294 | "outputs": [ 295 | { 296 | "name": "model", 297 | "type": "MODEL", 298 | "links": [ 299 | 19 300 | ], 301 | "shape": 3, 302 | "slot_index": 0 303 | }, 304 | { 305 | "name": "ipadapter", 306 | "type": "IPADAPTER", 307 | "links": [ 308 | 20, 309 | 22, 310 | 27 311 | ], 312 | "shape": 3, 313 | "slot_index": 1 314 | } 315 | ], 316 | "properties": { 317 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 318 | }, 319 | "widgets_values": [ 320 | "PLUS (high strength)" 321 | ] 322 | }, 323 | { 324 | "id": 3, 325 | "type": "KSampler", 326 | "pos": [ 327 | 1210, 328 | 700 329 | ], 330 | "size": { 331 | "0": 315, 332 | "1": 262 333 | }, 334 | "flags": {}, 335 | "order": 11, 336 | "mode": 0, 337 | "inputs": [ 338 | { 339 | "name": "model", 340 | "type": "MODEL", 341 | "link": 28 342 | }, 343 | { 344 | "name": "positive", 345 | "type": "CONDITIONING", 346 | "link": 4 347 | }, 348 | { 349 | "name": "negative", 350 | "type": "CONDITIONING", 351 | "link": 6 352 | }, 353 | { 354 | "name": "latent_image", 355 | "type": "LATENT", 356 | "link": 2 357 | } 358 | ], 359 | "outputs": [ 360 | { 361 | "name": "LATENT", 362 | "type": "LATENT", 363 | "links": [ 364 | 7 365 | ], 366 | "slot_index": 0 367 | } 368 | ], 369 | "properties": { 370 | "Node name for S&R": "KSampler" 371 | }, 372 | "widgets_values": [ 373 | 0, 374 | "fixed", 375 | 30, 376 | 6.5, 377 | "dpmpp_2m", 378 | "karras", 379 | 1 380 | ] 381 | }, 382 | { 383 | "id": 7, 384 | "type": "CLIPTextEncode", 385 | "pos": [ 386 | 690, 387 | 840 388 | ], 389 | "size": { 390 | "0": 425.27801513671875, 391 | "1": 180.6060791015625 392 | }, 393 | "flags": {}, 394 | "order": 6, 395 | "mode": 0, 396 | "inputs": [ 397 | { 398 | "name": "clip", 399 | "type": "CLIP", 400 | "link": 5 401 | } 402 | ], 403 | "outputs": [ 404 | { 405 | "name": "CONDITIONING", 406 | "type": "CONDITIONING", 407 | "links": [ 408 | 6 409 | ], 410 | "slot_index": 0 411 | } 412 | ], 413 | "properties": { 414 | "Node name for S&R": "CLIPTextEncode" 415 | }, 416 | "widgets_values": [ 417 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed, hat, hood, scars, blood" 418 | ] 419 | }, 420 | { 421 | "id": 17, 422 | "type": "IPAdapterEncoderV2", 423 | "pos": [ 424 | 859, 425 | 69 426 | ], 427 | "size": [ 428 | 210, 429 | 118 430 | ], 431 | "flags": {}, 432 | "order": 8, 433 | "mode": 0, 434 | "inputs": [ 435 | { 436 | "name": "ipadapter", 437 | "type": "IPADAPTER", 438 | "link": 22 439 | }, 440 | { 441 | "name": "image", 442 | "type": "IMAGE", 443 | "link": 23 444 | }, 445 | { 446 | "name": "mask", 447 | "type": "MASK", 448 | "link": null 449 | }, 450 | { 451 | "name": "clip_vision", 452 | "type": "CLIP_VISION", 453 | "link": null 454 | } 455 | ], 456 | "outputs": [ 457 | { 458 | "name": "pos_embed", 459 | "type": "EMBEDS", 460 | "links": [ 461 | 25 462 | ], 463 | "shape": 3, 464 | "slot_index": 0 465 | }, 466 | { 467 | "name": "neg_embed", 468 | "type": "EMBEDS", 469 | "links": null, 470 | "shape": 3 471 | } 472 | ], 473 | "properties": { 474 | "Node name for S&R": "IPAdapterEncoderV2" 475 | }, 476 | "widgets_values": [ 477 | 1.5 478 | ] 479 | }, 480 | { 481 | "id": 18, 482 | "type": "IPAdapterCombineEmbedsV2", 483 | "pos": [ 484 | 1136, 485 | -95 486 | ], 487 | "size": [ 488 | 210, 489 | 138 490 | ], 491 | "flags": {}, 492 | "order": 9, 493 | "mode": 0, 494 | "inputs": [ 495 | { 496 | "name": "embed1", 497 | "type": "EMBEDS", 498 | "link": 24 499 | }, 500 | { 501 | "name": "embed2", 502 | "type": "EMBEDS", 503 | "link": 25 504 | }, 505 | { 506 | "name": "embed3", 507 | "type": "EMBEDS", 508 | "link": null 509 | }, 510 | { 511 | "name": "embed4", 512 | "type": "EMBEDS", 513 | "link": null 514 | }, 515 | { 516 | "name": "embed5", 517 | "type": "EMBEDS", 518 | "link": null 519 | } 520 | ], 521 | "outputs": [ 522 | { 523 | "name": "EMBEDS", 524 | "type": "EMBEDS", 525 | "links": [ 526 | 26 527 | ], 528 | "shape": 3, 529 | "slot_index": 0 530 | } 531 | ], 532 | "properties": { 533 | "Node name for S&R": "IPAdapterCombineEmbedsV2" 534 | }, 535 | "widgets_values": [ 536 | "average" 537 | ] 538 | }, 539 | { 540 | "id": 14, 541 | "type": "IPAdapterEmbedsV2", 542 | "pos": [ 543 | 1143, 544 | 160 545 | ], 546 | "size": { 547 | "0": 315, 548 | "1": 230 549 | }, 550 | "flags": {}, 551 | "order": 10, 552 | "mode": 0, 553 | "inputs": [ 554 | { 555 | "name": "model", 556 | "type": "MODEL", 557 | "link": 19 558 | }, 559 | { 560 | "name": "ipadapter", 561 | "type": "IPADAPTER", 562 | "link": 27 563 | }, 564 | { 565 | "name": "pos_embed", 566 | "type": "EMBEDS", 567 | "link": 26 568 | }, 569 | { 570 | "name": "neg_embed", 571 | "type": "EMBEDS", 572 | "link": 29 573 | }, 574 | { 575 | "name": "attn_mask", 576 | "type": "MASK", 577 | "link": null 578 | }, 579 | { 580 | "name": "clip_vision", 581 | "type": "CLIP_VISION", 582 | "link": null 583 | } 584 | ], 585 | "outputs": [ 586 | { 587 | "name": "MODEL", 588 | "type": "MODEL", 589 | "links": [ 590 | 28 591 | ], 592 | "shape": 3, 593 | "slot_index": 0 594 | } 595 | ], 596 | "properties": { 597 | "Node name for S&R": "IPAdapterEmbedsV2" 598 | }, 599 | "widgets_values": [ 600 | 0.8, 601 | "linear", 602 | 0, 603 | 1 604 | ] 605 | }, 606 | { 607 | "id": 16, 608 | "type": "IPAdapterEncoderV2", 609 | "pos": [ 610 | 863, 611 | -285 612 | ], 613 | "size": [ 614 | 210, 615 | 118 616 | ], 617 | "flags": {}, 618 | "order": 7, 619 | "mode": 0, 620 | "inputs": [ 621 | { 622 | "name": "ipadapter", 623 | "type": "IPADAPTER", 624 | "link": 20 625 | }, 626 | { 627 | "name": "image", 628 | "type": "IMAGE", 629 | "link": 21 630 | }, 631 | { 632 | "name": "mask", 633 | "type": "MASK", 634 | "link": null 635 | }, 636 | { 637 | "name": "clip_vision", 638 | "type": "CLIP_VISION", 639 | "link": null 640 | } 641 | ], 642 | "outputs": [ 643 | { 644 | "name": "pos_embed", 645 | "type": "EMBEDS", 646 | "links": [ 647 | 24 648 | ], 649 | "shape": 3, 650 | "slot_index": 0 651 | }, 652 | { 653 | "name": "neg_embed", 654 | "type": "EMBEDS", 655 | "links": [ 656 | 29 657 | ], 658 | "shape": 3, 659 | "slot_index": 1 660 | } 661 | ], 662 | "properties": { 663 | "Node name for S&R": "IPAdapterEncoderV2" 664 | }, 665 | "widgets_values": [ 666 | 0.6 667 | ] 668 | } 669 | ], 670 | "links": [ 671 | [ 672 | 2, 673 | 5, 674 | 0, 675 | 3, 676 | 3, 677 | "LATENT" 678 | ], 679 | [ 680 | 3, 681 | 4, 682 | 1, 683 | 6, 684 | 0, 685 | "CLIP" 686 | ], 687 | [ 688 | 4, 689 | 6, 690 | 0, 691 | 3, 692 | 1, 693 | "CONDITIONING" 694 | ], 695 | [ 696 | 5, 697 | 4, 698 | 1, 699 | 7, 700 | 0, 701 | "CLIP" 702 | ], 703 | [ 704 | 6, 705 | 7, 706 | 0, 707 | 3, 708 | 2, 709 | "CONDITIONING" 710 | ], 711 | [ 712 | 7, 713 | 3, 714 | 0, 715 | 8, 716 | 0, 717 | "LATENT" 718 | ], 719 | [ 720 | 8, 721 | 4, 722 | 2, 723 | 8, 724 | 1, 725 | "VAE" 726 | ], 727 | [ 728 | 9, 729 | 8, 730 | 0, 731 | 9, 732 | 0, 733 | "IMAGE" 734 | ], 735 | [ 736 | 10, 737 | 4, 738 | 0, 739 | 11, 740 | 0, 741 | "MODEL" 742 | ], 743 | [ 744 | 19, 745 | 11, 746 | 0, 747 | 14, 748 | 0, 749 | "MODEL" 750 | ], 751 | [ 752 | 20, 753 | 11, 754 | 1, 755 | 16, 756 | 0, 757 | "IPADAPTER" 758 | ], 759 | [ 760 | 21, 761 | 12, 762 | 0, 763 | 16, 764 | 1, 765 | "IMAGE" 766 | ], 767 | [ 768 | 22, 769 | 11, 770 | 1, 771 | 17, 772 | 0, 773 | "IPADAPTER" 774 | ], 775 | [ 776 | 23, 777 | 15, 778 | 0, 779 | 17, 780 | 1, 781 | "IMAGE" 782 | ], 783 | [ 784 | 24, 785 | 16, 786 | 0, 787 | 18, 788 | 0, 789 | "EMBEDS" 790 | ], 791 | [ 792 | 25, 793 | 17, 794 | 0, 795 | 18, 796 | 1, 797 | "EMBEDS" 798 | ], 799 | [ 800 | 26, 801 | 18, 802 | 0, 803 | 14, 804 | 2, 805 | "EMBEDS" 806 | ], 807 | [ 808 | 27, 809 | 11, 810 | 1, 811 | 14, 812 | 1, 813 | "IPADAPTER" 814 | ], 815 | [ 816 | 28, 817 | 14, 818 | 0, 819 | 3, 820 | 0, 821 | "MODEL" 822 | ], 823 | [ 824 | 29, 825 | 16, 826 | 1, 827 | 14, 828 | 3, 829 | "EMBEDS" 830 | ] 831 | ], 832 | "groups": [], 833 | "config": {}, 834 | "extra": {}, 835 | "version": 0.4 836 | } -------------------------------------------------------------------------------- /examples/ipadapter_weights.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 22, 3 | "last_link_id": 40, 4 | "nodes": [ 5 | { 6 | "id": 7, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 690, 10 | 840 11 | ], 12 | "size": { 13 | "0": 425.27801513671875, 14 | "1": 180.6060791015625 15 | }, 16 | "flags": {}, 17 | "order": 5, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 5 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "CONDITIONING", 29 | "type": "CONDITIONING", 30 | "links": [ 31 | 6 32 | ], 33 | "slot_index": 0 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "CLIPTextEncode" 38 | }, 39 | "widgets_values": [ 40 | "blurry, noisy, messy, lowres, jpeg, artifacts, ill, distorted, malformed" 41 | ] 42 | }, 43 | { 44 | "id": 8, 45 | "type": "VAEDecode", 46 | "pos": [ 47 | 1570, 48 | 700 49 | ], 50 | "size": { 51 | "0": 140, 52 | "1": 46 53 | }, 54 | "flags": {}, 55 | "order": 11, 56 | "mode": 0, 57 | "inputs": [ 58 | { 59 | "name": "samples", 60 | "type": "LATENT", 61 | "link": 7 62 | }, 63 | { 64 | "name": "vae", 65 | "type": "VAE", 66 | "link": 8 67 | } 68 | ], 69 | "outputs": [ 70 | { 71 | "name": "IMAGE", 72 | "type": "IMAGE", 73 | "links": [ 74 | 9 75 | ], 76 | "slot_index": 0 77 | } 78 | ], 79 | "properties": { 80 | "Node name for S&R": "VAEDecode" 81 | } 82 | }, 83 | { 84 | "id": 6, 85 | "type": "CLIPTextEncode", 86 | "pos": [ 87 | 690, 88 | 610 89 | ], 90 | "size": { 91 | "0": 422.84503173828125, 92 | "1": 164.31304931640625 93 | }, 94 | "flags": {}, 95 | "order": 4, 96 | "mode": 0, 97 | "inputs": [ 98 | { 99 | "name": "clip", 100 | "type": "CLIP", 101 | "link": 3 102 | } 103 | ], 104 | "outputs": [ 105 | { 106 | "name": "CONDITIONING", 107 | "type": "CONDITIONING", 108 | "links": [ 109 | 4 110 | ], 111 | "slot_index": 0 112 | } 113 | ], 114 | "properties": { 115 | "Node name for S&R": "CLIPTextEncode" 116 | }, 117 | "widgets_values": [ 118 | "in a peaceful spring morning a woman wearing a white shirt is sitting in a park on a bench\n\nhigh quality, detailed, diffuse light" 119 | ] 120 | }, 121 | { 122 | "id": 3, 123 | "type": "KSampler", 124 | "pos": [ 125 | 1210, 126 | 700 127 | ], 128 | "size": { 129 | "0": 315, 130 | "1": 262 131 | }, 132 | "flags": {}, 133 | "order": 10, 134 | "mode": 0, 135 | "inputs": [ 136 | { 137 | "name": "model", 138 | "type": "MODEL", 139 | "link": 31 140 | }, 141 | { 142 | "name": "positive", 143 | "type": "CONDITIONING", 144 | "link": 4 145 | }, 146 | { 147 | "name": "negative", 148 | "type": "CONDITIONING", 149 | "link": 6 150 | }, 151 | { 152 | "name": "latent_image", 153 | "type": "LATENT", 154 | "link": 2 155 | } 156 | ], 157 | "outputs": [ 158 | { 159 | "name": "LATENT", 160 | "type": "LATENT", 161 | "links": [ 162 | 7 163 | ], 164 | "slot_index": 0 165 | } 166 | ], 167 | "properties": { 168 | "Node name for S&R": "KSampler" 169 | }, 170 | "widgets_values": [ 171 | 0, 172 | "fixed", 173 | 30, 174 | 6.5, 175 | "ddpm", 176 | "karras", 177 | 1 178 | ] 179 | }, 180 | { 181 | "id": 19, 182 | "type": "IPAdapterBatchV2", 183 | "pos": [ 184 | 1173, 185 | 251 186 | ], 187 | "size": { 188 | "0": 315, 189 | "1": 254 190 | }, 191 | "flags": {}, 192 | "order": 9, 193 | "mode": 0, 194 | "inputs": [ 195 | { 196 | "name": "model", 197 | "type": "MODEL", 198 | "link": 37 199 | }, 200 | { 201 | "name": "ipadapter", 202 | "type": "IPADAPTER", 203 | "link": 29 204 | }, 205 | { 206 | "name": "image", 207 | "type": "IMAGE", 208 | "link": 30 209 | }, 210 | { 211 | "name": "image_negative", 212 | "type": "IMAGE", 213 | "link": null 214 | }, 215 | { 216 | "name": "attn_mask", 217 | "type": "MASK", 218 | "link": null 219 | }, 220 | { 221 | "name": "clip_vision", 222 | "type": "CLIP_VISION", 223 | "link": null 224 | }, 225 | { 226 | "name": "weight", 227 | "type": "FLOAT", 228 | "link": 38, 229 | "widget": { 230 | "name": "weight" 231 | }, 232 | "slot_index": 6 233 | } 234 | ], 235 | "outputs": [ 236 | { 237 | "name": "MODEL", 238 | "type": "MODEL", 239 | "links": [ 240 | 31 241 | ], 242 | "shape": 3, 243 | "slot_index": 0 244 | } 245 | ], 246 | "properties": { 247 | "Node name for S&R": "IPAdapterBatchV2" 248 | }, 249 | "widgets_values": [ 250 | 1, 251 | "linear", 252 | 0, 253 | 1, 254 | "V only" 255 | ] 256 | }, 257 | { 258 | "id": 18, 259 | "type": "IPAdapterUnifiedLoaderV2", 260 | "pos": [ 261 | 303, 262 | 132 263 | ], 264 | "size": { 265 | "0": 315, 266 | "1": 78 267 | }, 268 | "flags": {}, 269 | "order": 3, 270 | "mode": 0, 271 | "inputs": [ 272 | { 273 | "name": "model", 274 | "type": "MODEL", 275 | "link": 36 276 | }, 277 | { 278 | "name": "ipadapter", 279 | "type": "IPADAPTER", 280 | "link": null 281 | } 282 | ], 283 | "outputs": [ 284 | { 285 | "name": "model", 286 | "type": "MODEL", 287 | "links": [ 288 | 37 289 | ], 290 | "shape": 3, 291 | "slot_index": 0 292 | }, 293 | { 294 | "name": "ipadapter", 295 | "type": "IPADAPTER", 296 | "links": [ 297 | 29 298 | ], 299 | "shape": 3 300 | } 301 | ], 302 | "properties": { 303 | "Node name for S&R": "IPAdapterUnifiedLoaderV2" 304 | }, 305 | "widgets_values": [ 306 | "PLUS (high strength)" 307 | ] 308 | }, 309 | { 310 | "id": 4, 311 | "type": "CheckpointLoaderSimple", 312 | "pos": [ 313 | -79, 314 | 712 315 | ], 316 | "size": { 317 | "0": 315, 318 | "1": 98 319 | }, 320 | "flags": {}, 321 | "order": 0, 322 | "mode": 0, 323 | "outputs": [ 324 | { 325 | "name": "MODEL", 326 | "type": "MODEL", 327 | "links": [ 328 | 36 329 | ], 330 | "slot_index": 0 331 | }, 332 | { 333 | "name": "CLIP", 334 | "type": "CLIP", 335 | "links": [ 336 | 3, 337 | 5 338 | ], 339 | "slot_index": 1 340 | }, 341 | { 342 | "name": "VAE", 343 | "type": "VAE", 344 | "links": [ 345 | 8 346 | ], 347 | "slot_index": 2 348 | } 349 | ], 350 | "properties": { 351 | "Node name for S&R": "CheckpointLoaderSimple" 352 | }, 353 | "widgets_values": [ 354 | "sd15/realisticVisionV51_v51VAE.safetensors" 355 | ] 356 | }, 357 | { 358 | "id": 17, 359 | "type": "PrepImageForClipVisionV2", 360 | "pos": [ 361 | 788, 362 | 43 363 | ], 364 | "size": { 365 | "0": 315, 366 | "1": 106 367 | }, 368 | "flags": {}, 369 | "order": 6, 370 | "mode": 0, 371 | "inputs": [ 372 | { 373 | "name": "image", 374 | "type": "IMAGE", 375 | "link": 25 376 | } 377 | ], 378 | "outputs": [ 379 | { 380 | "name": "IMAGE", 381 | "type": "IMAGE", 382 | "links": [ 383 | 30 384 | ], 385 | "shape": 3, 386 | "slot_index": 0 387 | } 388 | ], 389 | "properties": { 390 | "Node name for S&R": "PrepImageForClipVisionV2" 391 | }, 392 | "widgets_values": [ 393 | "LANCZOS", 394 | "top", 395 | 0.15 396 | ] 397 | }, 398 | { 399 | "id": 9, 400 | "type": "SaveImage", 401 | "pos": [ 402 | 1770, 403 | 710 404 | ], 405 | "size": { 406 | "0": 556.2374267578125, 407 | "1": 892.1895751953125 408 | }, 409 | "flags": {}, 410 | "order": 12, 411 | "mode": 0, 412 | "inputs": [ 413 | { 414 | "name": "images", 415 | "type": "IMAGE", 416 | "link": 9 417 | } 418 | ], 419 | "properties": {}, 420 | "widgets_values": [ 421 | "IPAdapterV2" 422 | ] 423 | }, 424 | { 425 | "id": 12, 426 | "type": "LoadImage", 427 | "pos": [ 428 | 311, 429 | 270 430 | ], 431 | "size": { 432 | "0": 315, 433 | "1": 314 434 | }, 435 | "flags": {}, 436 | "order": 1, 437 | "mode": 0, 438 | "outputs": [ 439 | { 440 | "name": "IMAGE", 441 | "type": "IMAGE", 442 | "links": [ 443 | 25 444 | ], 445 | "shape": 3, 446 | "slot_index": 0 447 | }, 448 | { 449 | "name": "MASK", 450 | "type": "MASK", 451 | "links": null, 452 | "shape": 3 453 | } 454 | ], 455 | "properties": { 456 | "Node name for S&R": "LoadImage" 457 | }, 458 | "widgets_values": [ 459 | "warrior_woman.png", 460 | "image" 461 | ] 462 | }, 463 | { 464 | "id": 5, 465 | "type": "EmptyLatentImage", 466 | "pos": [ 467 | 801, 468 | 1097 469 | ], 470 | "size": [ 471 | 309.1109879864148, 472 | 82 473 | ], 474 | "flags": {}, 475 | "order": 7, 476 | "mode": 0, 477 | "inputs": [ 478 | { 479 | "name": "batch_size", 480 | "type": "INT", 481 | "link": 35, 482 | "widget": { 483 | "name": "batch_size" 484 | } 485 | } 486 | ], 487 | "outputs": [ 488 | { 489 | "name": "LATENT", 490 | "type": "LATENT", 491 | "links": [ 492 | 2 493 | ], 494 | "slot_index": 0 495 | } 496 | ], 497 | "properties": { 498 | "Node name for S&R": "EmptyLatentImage" 499 | }, 500 | "widgets_values": [ 501 | 512, 502 | 512, 503 | 6 504 | ] 505 | }, 506 | { 507 | "id": 21, 508 | "type": "PrimitiveNode", 509 | "pos": [ 510 | 340, 511 | 1093 512 | ], 513 | "size": { 514 | "0": 210, 515 | "1": 82 516 | }, 517 | "flags": {}, 518 | "order": 2, 519 | "mode": 0, 520 | "outputs": [ 521 | { 522 | "name": "INT", 523 | "type": "INT", 524 | "links": [ 525 | 35, 526 | 40 527 | ], 528 | "widget": { 529 | "name": "batch_size" 530 | }, 531 | "slot_index": 0 532 | } 533 | ], 534 | "title": "frames", 535 | "properties": { 536 | "Run widget replace on values": false 537 | }, 538 | "widgets_values": [ 539 | 6, 540 | "fixed" 541 | ] 542 | }, 543 | { 544 | "id": 22, 545 | "type": "IPAdapterWeightsV2", 546 | "pos": [ 547 | 761, 548 | 208 549 | ], 550 | "size": [ 551 | 299.9049990375719, 552 | 324.00000762939453 553 | ], 554 | "flags": {}, 555 | "order": 8, 556 | "mode": 0, 557 | "inputs": [ 558 | { 559 | "name": "image", 560 | "type": "IMAGE", 561 | "link": null 562 | }, 563 | { 564 | "name": "frames", 565 | "type": "INT", 566 | "link": 40, 567 | "widget": { 568 | "name": "frames" 569 | } 570 | } 571 | ], 572 | "outputs": [ 573 | { 574 | "name": "weights", 575 | "type": "FLOAT", 576 | "links": [ 577 | 38 578 | ], 579 | "shape": 3, 580 | "slot_index": 0 581 | }, 582 | { 583 | "name": "weights_invert", 584 | "type": "FLOAT", 585 | "links": null, 586 | "shape": 3 587 | }, 588 | { 589 | "name": "total_frames", 590 | "type": "INT", 591 | "links": null, 592 | "shape": 3 593 | }, 594 | { 595 | "name": "image_1", 596 | "type": "IMAGE", 597 | "links": null, 598 | "shape": 3 599 | }, 600 | { 601 | "name": "image_2", 602 | "type": "IMAGE", 603 | "links": null, 604 | "shape": 3 605 | } 606 | ], 607 | "properties": { 608 | "Node name for S&R": "IPAdapterWeightsV2" 609 | }, 610 | "widgets_values": [ 611 | "1.0, 0.0", 612 | "linear", 613 | 6, 614 | 0, 615 | 9999, 616 | 0, 617 | 0, 618 | "full batch" 619 | ] 620 | } 621 | ], 622 | "links": [ 623 | [ 624 | 2, 625 | 5, 626 | 0, 627 | 3, 628 | 3, 629 | "LATENT" 630 | ], 631 | [ 632 | 3, 633 | 4, 634 | 1, 635 | 6, 636 | 0, 637 | "CLIP" 638 | ], 639 | [ 640 | 4, 641 | 6, 642 | 0, 643 | 3, 644 | 1, 645 | "CONDITIONING" 646 | ], 647 | [ 648 | 5, 649 | 4, 650 | 1, 651 | 7, 652 | 0, 653 | "CLIP" 654 | ], 655 | [ 656 | 6, 657 | 7, 658 | 0, 659 | 3, 660 | 2, 661 | "CONDITIONING" 662 | ], 663 | [ 664 | 7, 665 | 3, 666 | 0, 667 | 8, 668 | 0, 669 | "LATENT" 670 | ], 671 | [ 672 | 8, 673 | 4, 674 | 2, 675 | 8, 676 | 1, 677 | "VAE" 678 | ], 679 | [ 680 | 9, 681 | 8, 682 | 0, 683 | 9, 684 | 0, 685 | "IMAGE" 686 | ], 687 | [ 688 | 25, 689 | 12, 690 | 0, 691 | 17, 692 | 0, 693 | "IMAGE" 694 | ], 695 | [ 696 | 29, 697 | 18, 698 | 1, 699 | 19, 700 | 1, 701 | "IPADAPTER" 702 | ], 703 | [ 704 | 30, 705 | 17, 706 | 0, 707 | 19, 708 | 2, 709 | "IMAGE" 710 | ], 711 | [ 712 | 31, 713 | 19, 714 | 0, 715 | 3, 716 | 0, 717 | "MODEL" 718 | ], 719 | [ 720 | 35, 721 | 21, 722 | 0, 723 | 5, 724 | 0, 725 | "INT" 726 | ], 727 | [ 728 | 36, 729 | 4, 730 | 0, 731 | 18, 732 | 0, 733 | "MODEL" 734 | ], 735 | [ 736 | 37, 737 | 18, 738 | 0, 739 | 19, 740 | 0, 741 | "MODEL" 742 | ], 743 | [ 744 | 38, 745 | 22, 746 | 0, 747 | 19, 748 | 6, 749 | "FLOAT" 750 | ], 751 | [ 752 | 40, 753 | 21, 754 | 0, 755 | 22, 756 | 1, 757 | "INT" 758 | ] 759 | ], 760 | "groups": [], 761 | "config": {}, 762 | "extra": {}, 763 | "version": 0.4 764 | } -------------------------------------------------------------------------------- /image_proj_models.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | from einops import rearrange 5 | from einops.layers.torch import Rearrange 6 | 7 | 8 | # FFN 9 | def FeedForward(dim, mult=4): 10 | inner_dim = int(dim * mult) 11 | return nn.Sequential( 12 | nn.LayerNorm(dim), 13 | nn.Linear(dim, inner_dim, bias=False), 14 | nn.GELU(), 15 | nn.Linear(inner_dim, dim, bias=False), 16 | ) 17 | 18 | 19 | def reshape_tensor(x, heads): 20 | bs, length, width = x.shape 21 | # (bs, length, width) --> (bs, length, n_heads, dim_per_head) 22 | x = x.view(bs, length, heads, -1) 23 | # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) 24 | x = x.transpose(1, 2) 25 | # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) 26 | x = x.reshape(bs, heads, length, -1) 27 | return x 28 | 29 | 30 | class PerceiverAttention(nn.Module): 31 | def __init__(self, *, dim, dim_head=64, heads=8): 32 | super().__init__() 33 | self.scale = dim_head**-0.5 34 | self.dim_head = dim_head 35 | self.heads = heads 36 | inner_dim = dim_head * heads 37 | 38 | self.norm1 = nn.LayerNorm(dim) 39 | self.norm2 = nn.LayerNorm(dim) 40 | 41 | self.to_q = nn.Linear(dim, inner_dim, bias=False) 42 | self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) 43 | self.to_out = nn.Linear(inner_dim, dim, bias=False) 44 | 45 | def forward(self, x, latents): 46 | """ 47 | Args: 48 | x (torch.Tensor): image features 49 | shape (b, n1, D) 50 | latent (torch.Tensor): latent features 51 | shape (b, n2, D) 52 | """ 53 | x = self.norm1(x) 54 | latents = self.norm2(latents) 55 | 56 | b, l, _ = latents.shape 57 | 58 | q = self.to_q(latents) 59 | kv_input = torch.cat((x, latents), dim=-2) 60 | k, v = self.to_kv(kv_input).chunk(2, dim=-1) 61 | 62 | q = reshape_tensor(q, self.heads) 63 | k = reshape_tensor(k, self.heads) 64 | v = reshape_tensor(v, self.heads) 65 | 66 | # attention 67 | scale = 1 / math.sqrt(math.sqrt(self.dim_head)) 68 | weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards 69 | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) 70 | out = weight @ v 71 | 72 | out = out.permute(0, 2, 1, 3).reshape(b, l, -1) 73 | 74 | return self.to_out(out) 75 | 76 | 77 | class Resampler(nn.Module): 78 | def __init__( 79 | self, 80 | dim=1024, 81 | depth=8, 82 | dim_head=64, 83 | heads=16, 84 | num_queries=8, 85 | embedding_dim=768, 86 | output_dim=1024, 87 | ff_mult=4, 88 | max_seq_len: int = 257, # CLIP tokens + CLS token 89 | apply_pos_emb: bool = False, 90 | num_latents_mean_pooled: int = 0, # number of latents derived from mean pooled representation of the sequence 91 | ): 92 | super().__init__() 93 | self.pos_emb = nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None 94 | 95 | self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) 96 | 97 | self.proj_in = nn.Linear(embedding_dim, dim) 98 | 99 | self.proj_out = nn.Linear(dim, output_dim) 100 | self.norm_out = nn.LayerNorm(output_dim) 101 | 102 | self.to_latents_from_mean_pooled_seq = ( 103 | nn.Sequential( 104 | nn.LayerNorm(dim), 105 | nn.Linear(dim, dim * num_latents_mean_pooled), 106 | Rearrange("b (n d) -> b n d", n=num_latents_mean_pooled), 107 | ) 108 | if num_latents_mean_pooled > 0 109 | else None 110 | ) 111 | 112 | self.layers = nn.ModuleList([]) 113 | for _ in range(depth): 114 | self.layers.append( 115 | nn.ModuleList( 116 | [ 117 | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), 118 | FeedForward(dim=dim, mult=ff_mult), 119 | ] 120 | ) 121 | ) 122 | 123 | def forward(self, x): 124 | if self.pos_emb is not None: 125 | n, device = x.shape[1], x.device 126 | pos_emb = self.pos_emb(torch.arange(n, device=device)) 127 | x = x + pos_emb 128 | 129 | latents = self.latents.repeat(x.size(0), 1, 1) 130 | 131 | x = self.proj_in(x) 132 | 133 | if self.to_latents_from_mean_pooled_seq: 134 | meanpooled_seq = masked_mean(x, dim=1, mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool)) 135 | meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq) 136 | latents = torch.cat((meanpooled_latents, latents), dim=-2) 137 | 138 | for attn, ff in self.layers: 139 | latents = attn(x, latents) + latents 140 | latents = ff(latents) + latents 141 | 142 | latents = self.proj_out(latents) 143 | return self.norm_out(latents) 144 | 145 | 146 | def masked_mean(t, *, dim, mask=None): 147 | if mask is None: 148 | return t.mean(dim=dim) 149 | 150 | denom = mask.sum(dim=dim, keepdim=True) 151 | mask = rearrange(mask, "b n -> b n 1") 152 | masked_t = t.masked_fill(~mask, 0.0) 153 | 154 | return masked_t.sum(dim=dim) / denom.clamp(min=1e-5) 155 | 156 | 157 | class FacePerceiverResampler(nn.Module): 158 | def __init__( 159 | self, 160 | *, 161 | dim=768, 162 | depth=4, 163 | dim_head=64, 164 | heads=16, 165 | embedding_dim=1280, 166 | output_dim=768, 167 | ff_mult=4, 168 | ): 169 | super().__init__() 170 | 171 | self.proj_in = nn.Linear(embedding_dim, dim) 172 | self.proj_out = nn.Linear(dim, output_dim) 173 | self.norm_out = nn.LayerNorm(output_dim) 174 | self.layers = nn.ModuleList([]) 175 | for _ in range(depth): 176 | self.layers.append( 177 | nn.ModuleList( 178 | [ 179 | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), 180 | FeedForward(dim=dim, mult=ff_mult), 181 | ] 182 | ) 183 | ) 184 | 185 | def forward(self, latents, x): 186 | x = self.proj_in(x) 187 | for attn, ff in self.layers: 188 | latents = attn(x, latents) + latents 189 | latents = ff(latents) + latents 190 | latents = self.proj_out(latents) 191 | return self.norm_out(latents) 192 | 193 | 194 | class MLPProjModel(nn.Module): 195 | def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024): 196 | super().__init__() 197 | 198 | self.proj = nn.Sequential( 199 | nn.Linear(clip_embeddings_dim, clip_embeddings_dim), 200 | nn.GELU(), 201 | nn.Linear(clip_embeddings_dim, cross_attention_dim), 202 | nn.LayerNorm(cross_attention_dim) 203 | ) 204 | 205 | def forward(self, image_embeds): 206 | clip_extra_context_tokens = self.proj(image_embeds) 207 | return clip_extra_context_tokens 208 | 209 | class MLPProjModelFaceId(nn.Module): 210 | def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, num_tokens=4): 211 | super().__init__() 212 | 213 | self.cross_attention_dim = cross_attention_dim 214 | self.num_tokens = num_tokens 215 | 216 | self.proj = nn.Sequential( 217 | nn.Linear(id_embeddings_dim, id_embeddings_dim*2), 218 | nn.GELU(), 219 | nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens), 220 | ) 221 | self.norm = nn.LayerNorm(cross_attention_dim) 222 | 223 | def forward(self, id_embeds): 224 | x = self.proj(id_embeds) 225 | x = x.reshape(-1, self.num_tokens, self.cross_attention_dim) 226 | x = self.norm(x) 227 | return x 228 | 229 | class ProjModelFaceIdPlus(nn.Module): 230 | def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, clip_embeddings_dim=1280, num_tokens=4): 231 | super().__init__() 232 | 233 | self.cross_attention_dim = cross_attention_dim 234 | self.num_tokens = num_tokens 235 | 236 | self.proj = nn.Sequential( 237 | nn.Linear(id_embeddings_dim, id_embeddings_dim*2), 238 | nn.GELU(), 239 | nn.Linear(id_embeddings_dim*2, cross_attention_dim*num_tokens), 240 | ) 241 | self.norm = nn.LayerNorm(cross_attention_dim) 242 | 243 | self.perceiver_resampler = FacePerceiverResampler( 244 | dim=cross_attention_dim, 245 | depth=4, 246 | dim_head=64, 247 | heads=cross_attention_dim // 64, 248 | embedding_dim=clip_embeddings_dim, 249 | output_dim=cross_attention_dim, 250 | ff_mult=4, 251 | ) 252 | 253 | def forward(self, id_embeds, clip_embeds, scale=1.0, shortcut=False): 254 | x = self.proj(id_embeds) 255 | x = x.reshape(-1, self.num_tokens, self.cross_attention_dim) 256 | x = self.norm(x) 257 | out = self.perceiver_resampler(x, clip_embeds) 258 | if shortcut: 259 | out = x + scale * out 260 | return out 261 | 262 | class ImageProjModel(nn.Module): 263 | def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): 264 | super().__init__() 265 | 266 | self.cross_attention_dim = cross_attention_dim 267 | self.clip_extra_context_tokens = clip_extra_context_tokens 268 | self.proj = nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) 269 | self.norm = nn.LayerNorm(cross_attention_dim) 270 | 271 | def forward(self, image_embeds): 272 | embeds = image_embeds 273 | x = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim) 274 | x = self.norm(x) 275 | return x 276 | -------------------------------------------------------------------------------- /layer_weights_slider.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class IPA_LayerWeightsSlider: 4 | 5 | def __init__(self): 6 | pass 7 | 8 | @classmethod 9 | def INPUT_TYPES(self): 10 | 11 | return { 12 | "required": { 13 | "layer0": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 14 | "layer1": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 15 | "layer2": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 16 | "layer3": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 17 | "layer4": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 18 | "layer5": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 19 | "layer6": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 20 | "layer7": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 21 | "layer8": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 22 | "layer9": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 23 | "layer10": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 24 | "layer11": ("FLOAT", {"default": 0, "min": -1, "max": 1, "step": 0.01, "display": "slider"}), 25 | "multiplier": ("FLOAT", {"default": 1, "min": 0, "max": 999, "step": 0.01}), 26 | }, 27 | "optional": { 28 | } 29 | } 30 | 31 | RETURN_TYPES = ("STRING",) 32 | RETURN_NAMES = ("layer_weights",) 33 | FUNCTION = 'ipa_layer_weights_slider' 34 | CATEGORY = 'ipadapter/dev' 35 | 36 | def ipa_layer_weights_slider(self, layer0, layer1, layer2, layer3, layer4, layer5, layer6, 37 | layer7, layer8, layer9, layer10, layer11, multiplier): 38 | ret_str = (f"0:{layer0 * multiplier}, 1:{layer1 * multiplier}, 2:{layer2 * multiplier}, " 39 | f"3:{layer3 * multiplier}, 4:{layer4 * multiplier}, 5:{layer5 * multiplier}, " 40 | f"6:{layer6 * multiplier}, 7:{layer7 * multiplier}, 8:{layer8 * multiplier}, " 41 | f"9:{layer9 * multiplier}, 10:{layer10 * multiplier}, 11:{layer11 * multiplier}") 42 | 43 | return (ret_str,) 44 | 45 | NODE_CLASS_MAPPINGS = { 46 | "IPAdapterLayerWeightsSlider": IPA_LayerWeightsSlider 47 | } 48 | 49 | NODE_DISPLAY_NAME_MAPPINGS = { 50 | "IPAdapterLayerWeightsSlider": "IPAdapter Layer Weights Slider" 51 | } -------------------------------------------------------------------------------- /models/legacy_directory_do_not_use.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_IPAdapter_plus_V2/24bda419546b0229fe290957411f74ab664fe52f/models/legacy_directory_do_not_use.txt -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui_ipadapter_plus_v2" 3 | description = "A copy of ComfyUI_IPAdapter_plus, Only changed node name to coexist with ComfyUI_IPAdapter_plus v1 version." 4 | version = "1.0.12" 5 | license = "GPL-3.0 license" 6 | 7 | [project.urls] 8 | Repository = "https://github.com/chflame163/ComfyUI_IPAdapter_plus_V2" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "chflame163" 13 | DisplayName = "ComfyUI_IPAdapter_plus_V2" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import torch 3 | import os 4 | import folder_paths 5 | from comfy.clip_vision import clip_preprocess, Output 6 | import comfy.utils 7 | import comfy.model_management as model_management 8 | try: 9 | import torchvision.transforms.v2 as T 10 | except ImportError: 11 | import torchvision.transforms as T 12 | 13 | def get_clipvision_file(preset): 14 | preset = preset.lower() 15 | clipvision_list = folder_paths.get_filename_list("clip_vision") 16 | 17 | if preset.startswith("vit-g"): 18 | pattern = r'(ViT.bigG.14.*39B.b160k|ipadapter.*sdxl|sdxl.*model)\.(bin|safetensors)' 19 | elif preset.startswith("kolors"): 20 | pattern = r'clip.vit.large.patch14.336\.(bin|safetensors)' 21 | else: 22 | pattern = r'(ViT.H.14.*s32B.b79K|ipadapter.*sd15|sd1.?5.*model)\.(bin|safetensors)' 23 | clipvision_file = [e for e in clipvision_list if re.search(pattern, e, re.IGNORECASE)] 24 | 25 | clipvision_file = folder_paths.get_full_path("clip_vision", clipvision_file[0]) if clipvision_file else None 26 | 27 | return clipvision_file 28 | 29 | def get_ipadapter_file(preset, is_sdxl): 30 | preset = preset.lower() 31 | ipadapter_list = folder_paths.get_filename_list("ipadapter") 32 | is_insightface = False 33 | lora_pattern = None 34 | 35 | if preset.startswith("light"): 36 | if is_sdxl: 37 | raise Exception("light model is not supported for SDXL") 38 | pattern = r'sd15.light.v11\.(safetensors|bin)$' 39 | # if v11 is not found, try with the old version 40 | if not [e for e in ipadapter_list if re.search(pattern, e, re.IGNORECASE)]: 41 | pattern = r'sd15.light\.(safetensors|bin)$' 42 | elif preset.startswith("standard"): 43 | if is_sdxl: 44 | pattern = r'ip.adapter.sdxl.vit.h\.(safetensors|bin)$' 45 | else: 46 | pattern = r'ip.adapter.sd15\.(safetensors|bin)$' 47 | elif preset.startswith("vit-g"): 48 | if is_sdxl: 49 | pattern = r'ip.adapter.sdxl\.(safetensors|bin)$' 50 | else: 51 | pattern = r'sd15.vit.g\.(safetensors|bin)$' 52 | elif preset.startswith("plus ("): 53 | if is_sdxl: 54 | pattern = r'plus.sdxl.vit.h\.(safetensors|bin)$' 55 | else: 56 | pattern = r'ip.adapter.plus.sd15\.(safetensors|bin)$' 57 | elif preset.startswith("plus face"): 58 | if is_sdxl: 59 | pattern = r'plus.face.sdxl.vit.h\.(safetensors|bin)$' 60 | else: 61 | pattern = r'plus.face.sd15\.(safetensors|bin)$' 62 | elif preset.startswith("full"): 63 | if is_sdxl: 64 | raise Exception("full face model is not supported for SDXL") 65 | pattern = r'full.face.sd15\.(safetensors|bin)$' 66 | elif preset.startswith("faceid portrait ("): 67 | if is_sdxl: 68 | pattern = r'portrait.sdxl\.(safetensors|bin)$' 69 | else: 70 | pattern = r'portrait.v11.sd15\.(safetensors|bin)$' 71 | # if v11 is not found, try with the old version 72 | if not [e for e in ipadapter_list if re.search(pattern, e, re.IGNORECASE)]: 73 | pattern = r'portrait.sd15\.(safetensors|bin)$' 74 | is_insightface = True 75 | elif preset.startswith("faceid portrait unnorm"): 76 | if is_sdxl: 77 | pattern = r'portrait.sdxl.unnorm\.(safetensors|bin)$' 78 | else: 79 | raise Exception("portrait unnorm model is not supported for SD1.5") 80 | is_insightface = True 81 | elif preset == "faceid": 82 | if is_sdxl: 83 | pattern = r'faceid.sdxl\.(safetensors|bin)$' 84 | lora_pattern = r'faceid.sdxl.lora\.safetensors$' 85 | else: 86 | pattern = r'faceid.sd15\.(safetensors|bin)$' 87 | lora_pattern = r'faceid.sd15.lora\.safetensors$' 88 | is_insightface = True 89 | elif preset.startswith("faceid plus -"): 90 | if is_sdxl: 91 | raise Exception("faceid plus model is not supported for SDXL") 92 | pattern = r'faceid.plus.sd15\.(safetensors|bin)$' 93 | lora_pattern = r'faceid.plus.sd15.lora\.safetensors$' 94 | is_insightface = True 95 | elif preset.startswith("faceid plus v2"): 96 | if is_sdxl: 97 | pattern = r'faceid.plusv2.sdxl\.(safetensors|bin)$' 98 | lora_pattern = r'faceid.plusv2.sdxl.lora\.safetensors$' 99 | else: 100 | pattern = r'faceid.plusv2.sd15\.(safetensors|bin)$' 101 | lora_pattern = r'faceid.plusv2.sd15.lora\.safetensors$' 102 | is_insightface = True 103 | # Community's models 104 | elif preset.startswith("composition"): 105 | if is_sdxl: 106 | pattern = r'plus.composition.sdxl\.safetensors$' 107 | else: 108 | pattern = r'plus.composition.sd15\.safetensors$' 109 | elif preset.startswith("kolors"): 110 | if is_sdxl: 111 | pattern = r'(ip_adapter_plus_general|kolors.ip.adapter.plus)\.(safetensors|bin)$' 112 | else: 113 | raise Exception("Only supported for Kolors model") 114 | else: 115 | raise Exception(f"invalid type '{preset}'") 116 | 117 | ipadapter_file = [e for e in ipadapter_list if re.search(pattern, e, re.IGNORECASE)] 118 | ipadapter_file = folder_paths.get_full_path("ipadapter", ipadapter_file[0]) if ipadapter_file else None 119 | 120 | return ipadapter_file, is_insightface, lora_pattern 121 | 122 | def get_lora_file(pattern): 123 | lora_list = folder_paths.get_filename_list("loras") 124 | lora_file = [e for e in lora_list if re.search(pattern, e, re.IGNORECASE)] 125 | lora_file = folder_paths.get_full_path("loras", lora_file[0]) if lora_file else None 126 | 127 | return lora_file 128 | 129 | def ipadapter_model_loader(file): 130 | model = comfy.utils.load_torch_file(file, safe_load=True) 131 | 132 | if file.lower().endswith(".safetensors"): 133 | st_model = {"image_proj": {}, "ip_adapter": {}} 134 | for key in model.keys(): 135 | if key.startswith("image_proj."): 136 | st_model["image_proj"][key.replace("image_proj.", "")] = model[key] 137 | elif key.startswith("ip_adapter."): 138 | st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key] 139 | elif key.startswith("adapter_modules."): 140 | st_model["ip_adapter"][key.replace("adapter_modules.", "")] = model[key] 141 | model = st_model 142 | del st_model 143 | elif "adapter_modules" in model.keys(): 144 | model["ip_adapter"] = model.pop("adapter_modules") 145 | 146 | if not "ip_adapter" in model.keys() or not model["ip_adapter"]: 147 | raise Exception("invalid IPAdapter model {}".format(file)) 148 | 149 | if 'plusv2' in file.lower(): 150 | model["faceidplusv2"] = True 151 | 152 | if 'unnorm' in file.lower(): 153 | model["portraitunnorm"] = True 154 | 155 | return model 156 | 157 | def insightface_loader(provider, model_name='buffalo_l'): 158 | try: 159 | from insightface.app import FaceAnalysis 160 | except ImportError as e: 161 | raise Exception(e) 162 | 163 | path = os.path.join(folder_paths.models_dir, "insightface") 164 | model = FaceAnalysis(name=model_name, root=path, providers=[provider + 'ExecutionProvider',]) 165 | model.prepare(ctx_id=0, det_size=(640, 640)) 166 | return model 167 | 168 | def split_tiles(embeds, num_split): 169 | _, H, W, _ = embeds.shape 170 | out = [] 171 | for x in embeds: 172 | x = x.unsqueeze(0) 173 | h, w = H // num_split, W // num_split 174 | x_split = torch.cat([x[:, i*h:(i+1)*h, j*w:(j+1)*w, :] for i in range(num_split) for j in range(num_split)], dim=0) 175 | out.append(x_split) 176 | 177 | x_split = torch.stack(out, dim=0) 178 | 179 | return x_split 180 | 181 | def merge_hiddenstates(x, tiles): 182 | chunk_size = tiles*tiles 183 | x = x.split(chunk_size) 184 | 185 | out = [] 186 | for embeds in x: 187 | num_tiles = embeds.shape[0] 188 | tile_size = int((embeds.shape[1]-1) ** 0.5) 189 | grid_size = int(num_tiles ** 0.5) 190 | 191 | # Extract class tokens 192 | class_tokens = embeds[:, 0, :] # Save class tokens: [num_tiles, embeds[-1]] 193 | avg_class_token = class_tokens.mean(dim=0, keepdim=True).unsqueeze(0) # Average token, shape: [1, 1, embeds[-1]] 194 | 195 | patch_embeds = embeds[:, 1:, :] # Shape: [num_tiles, tile_size^2, embeds[-1]] 196 | reshaped = patch_embeds.reshape(grid_size, grid_size, tile_size, tile_size, embeds.shape[-1]) 197 | 198 | merged = torch.cat([torch.cat([reshaped[i, j] for j in range(grid_size)], dim=1) 199 | for i in range(grid_size)], dim=0) 200 | 201 | merged = merged.unsqueeze(0) # Shape: [1, grid_size*tile_size, grid_size*tile_size, embeds[-1]] 202 | 203 | # Pool to original size 204 | pooled = torch.nn.functional.adaptive_avg_pool2d(merged.permute(0, 3, 1, 2), (tile_size, tile_size)).permute(0, 2, 3, 1) 205 | flattened = pooled.reshape(1, tile_size*tile_size, embeds.shape[-1]) 206 | 207 | # Add back the class token 208 | with_class = torch.cat([avg_class_token, flattened], dim=1) # Shape: original shape 209 | out.append(with_class) 210 | 211 | out = torch.cat(out, dim=0) 212 | 213 | return out 214 | 215 | def merge_embeddings(x, tiles): # TODO: this needs so much testing that I don't even 216 | chunk_size = tiles*tiles 217 | x = x.split(chunk_size) 218 | 219 | out = [] 220 | for embeds in x: 221 | num_tiles = embeds.shape[0] 222 | grid_size = int(num_tiles ** 0.5) 223 | tile_size = int(embeds.shape[1] ** 0.5) 224 | reshaped = embeds.reshape(grid_size, grid_size, tile_size, tile_size) 225 | 226 | # Merge the tiles 227 | merged = torch.cat([torch.cat([reshaped[i, j] for j in range(grid_size)], dim=1) 228 | for i in range(grid_size)], dim=0) 229 | 230 | merged = merged.unsqueeze(0) # Shape: [1, grid_size*tile_size, grid_size*tile_size] 231 | 232 | # Pool to original size 233 | pooled = torch.nn.functional.adaptive_avg_pool2d(merged, (tile_size, tile_size)) # pool to [1, tile_size, tile_size] 234 | pooled = pooled.flatten(1) # flatten to [1, tile_size^2] 235 | out.append(pooled) 236 | out = torch.cat(out, dim=0) 237 | 238 | return out 239 | 240 | def encode_image_masked(clip_vision, image, mask=None, batch_size=0, tiles=1, ratio=1.0, clipvision_size=224): 241 | # full image embeds 242 | embeds = encode_image_masked_(clip_vision, image, mask, batch_size, clipvision_size=clipvision_size) 243 | tiles = min(tiles, 16) 244 | 245 | if tiles > 1: 246 | # split in tiles 247 | image_split = split_tiles(image, tiles) 248 | 249 | # get the embeds for each tile 250 | embeds_split = Output() 251 | for i in image_split: 252 | encoded = encode_image_masked_(clip_vision, i, mask, batch_size, clipvision_size=clipvision_size) 253 | if not hasattr(embeds_split, "image_embeds"): 254 | #embeds_split["last_hidden_state"] = encoded["last_hidden_state"] 255 | embeds_split["image_embeds"] = encoded["image_embeds"] 256 | embeds_split["penultimate_hidden_states"] = encoded["penultimate_hidden_states"] 257 | else: 258 | #embeds_split["last_hidden_state"] = torch.cat((embeds_split["last_hidden_state"], encoded["last_hidden_state"]), dim=0) 259 | embeds_split["image_embeds"] = torch.cat((embeds_split["image_embeds"], encoded["image_embeds"]), dim=0) 260 | embeds_split["penultimate_hidden_states"] = torch.cat((embeds_split["penultimate_hidden_states"], encoded["penultimate_hidden_states"]), dim=0) 261 | 262 | #embeds_split['last_hidden_state'] = merge_hiddenstates(embeds_split['last_hidden_state']) 263 | embeds_split["image_embeds"] = merge_embeddings(embeds_split["image_embeds"], tiles) 264 | embeds_split["penultimate_hidden_states"] = merge_hiddenstates(embeds_split["penultimate_hidden_states"], tiles) 265 | 266 | #embeds['last_hidden_state'] = torch.cat([embeds_split['last_hidden_state'], embeds['last_hidden_state']]) 267 | if embeds['image_embeds'].shape[0] > 1: # if we have more than one image we need to average the embeddings for consistency 268 | embeds['image_embeds'] = embeds['image_embeds']*ratio + embeds_split['image_embeds']*(1-ratio) 269 | embeds['penultimate_hidden_states'] = embeds['penultimate_hidden_states']*ratio + embeds_split['penultimate_hidden_states']*(1-ratio) 270 | #embeds['image_embeds'] = (embeds['image_embeds']*ratio + embeds_split['image_embeds']) / 2 271 | #embeds['penultimate_hidden_states'] = (embeds['penultimate_hidden_states']*ratio + embeds_split['penultimate_hidden_states']) / 2 272 | else: # otherwise we can concatenate them, they can be averaged later 273 | embeds['image_embeds'] = torch.cat([embeds['image_embeds']*ratio, embeds_split['image_embeds']]) 274 | embeds['penultimate_hidden_states'] = torch.cat([embeds['penultimate_hidden_states']*ratio, embeds_split['penultimate_hidden_states']]) 275 | 276 | #del embeds_split 277 | 278 | return embeds 279 | 280 | def encode_image_masked_(clip_vision, image, mask=None, batch_size=0, clipvision_size=224): 281 | model_management.load_model_gpu(clip_vision.patcher) 282 | outputs = Output() 283 | 284 | if batch_size == 0: 285 | batch_size = image.shape[0] 286 | elif batch_size > image.shape[0]: 287 | batch_size = image.shape[0] 288 | 289 | image_batch = torch.split(image, batch_size, dim=0) 290 | 291 | for img in image_batch: 292 | img = img.to(clip_vision.load_device) 293 | pixel_values = clip_preprocess(img, size=clipvision_size).float() 294 | 295 | # TODO: support for multiple masks 296 | if mask is not None: 297 | pixel_values = pixel_values * mask.to(clip_vision.load_device) 298 | 299 | out = clip_vision.model(pixel_values=pixel_values, intermediate_output=-2) 300 | 301 | if not hasattr(outputs, "last_hidden_state"): 302 | outputs["last_hidden_state"] = out[0].to(model_management.intermediate_device()) 303 | outputs["image_embeds"] = out[2].to(model_management.intermediate_device()) 304 | outputs["penultimate_hidden_states"] = out[1].to(model_management.intermediate_device()) 305 | else: 306 | outputs["last_hidden_state"] = torch.cat((outputs["last_hidden_state"], out[0].to(model_management.intermediate_device())), dim=0) 307 | outputs["image_embeds"] = torch.cat((outputs["image_embeds"], out[2].to(model_management.intermediate_device())), dim=0) 308 | outputs["penultimate_hidden_states"] = torch.cat((outputs["penultimate_hidden_states"], out[1].to(model_management.intermediate_device())), dim=0) 309 | 310 | del img, pixel_values, out 311 | torch.cuda.empty_cache() 312 | 313 | return outputs 314 | 315 | def tensor_to_size(source, dest_size): 316 | if isinstance(dest_size, torch.Tensor): 317 | dest_size = dest_size.shape[0] 318 | source_size = source.shape[0] 319 | 320 | if source_size < dest_size: 321 | shape = [dest_size - source_size] + [1]*(source.dim()-1) 322 | source = torch.cat((source, source[-1:].repeat(shape)), dim=0) 323 | elif source_size > dest_size: 324 | source = source[:dest_size] 325 | 326 | return source 327 | 328 | def min_(tensor_list): 329 | # return the element-wise min of the tensor list. 330 | x = torch.stack(tensor_list) 331 | mn = x.min(axis=0)[0] 332 | return torch.clamp(mn, min=0) 333 | 334 | def max_(tensor_list): 335 | # return the element-wise max of the tensor list. 336 | x = torch.stack(tensor_list) 337 | mx = x.max(axis=0)[0] 338 | return torch.clamp(mx, max=1) 339 | 340 | # From https://github.com/Jamy-L/Pytorch-Contrast-Adaptive-Sharpening/ 341 | def contrast_adaptive_sharpening(image, amount): 342 | img = T.functional.pad(image, (1, 1, 1, 1)).cpu() 343 | 344 | a = img[..., :-2, :-2] 345 | b = img[..., :-2, 1:-1] 346 | c = img[..., :-2, 2:] 347 | d = img[..., 1:-1, :-2] 348 | e = img[..., 1:-1, 1:-1] 349 | f = img[..., 1:-1, 2:] 350 | g = img[..., 2:, :-2] 351 | h = img[..., 2:, 1:-1] 352 | i = img[..., 2:, 2:] 353 | 354 | # Computing contrast 355 | cross = (b, d, e, f, h) 356 | mn = min_(cross) 357 | mx = max_(cross) 358 | 359 | diag = (a, c, g, i) 360 | mn2 = min_(diag) 361 | mx2 = max_(diag) 362 | mx = mx + mx2 363 | mn = mn + mn2 364 | 365 | # Computing local weight 366 | inv_mx = torch.reciprocal(mx) 367 | amp = inv_mx * torch.minimum(mn, (2 - mx)) 368 | 369 | # scaling 370 | amp = torch.sqrt(amp) 371 | w = - amp * (amount * (1/5 - 1/8) + 1/8) 372 | div = torch.reciprocal(1 + 4*w) 373 | 374 | output = ((b + d + f + h)*w + e) * div 375 | output = torch.nan_to_num(output) 376 | output = output.clamp(0, 1) 377 | 378 | return output 379 | 380 | def tensor_to_image(tensor): 381 | image = tensor.mul(255).clamp(0, 255).byte().cpu() 382 | image = image[..., [2, 1, 0]].numpy() 383 | return image 384 | 385 | def image_to_tensor(image): 386 | tensor = torch.clamp(torch.from_numpy(image).float() / 255., 0, 1) 387 | tensor = tensor[..., [2, 1, 0]] 388 | return tensor 389 | --------------------------------------------------------------------------------