├── LICENSE ├── README.md ├── __init__.py ├── convert_to_safetensors.py └── example.json /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI NPNet (Golden Noise) 2 | 3 | A very barebones mostly-copypaste implementation of https://github.com/xie-lab-ml/Golden-Noise-for-Diffusion-Models 4 | 5 | ## Requirements 6 | You need the pre-trained weights for your model. Download and place them under `models/npnet` in your ComfyUI folder, or add an extra path in `extra_model_paths.yaml` for the `npnet` type. 7 | 8 | You can find safetensors-converted weights at https://huggingface.co/asagi4/NPNet 9 | 10 | The original pickle-format checkpoints are found at https://drive.google.com/drive/folders/1Z0wg4HADhpgrztyT3eWijPbJJN5Y2jQt?usp=drive_link 11 | 12 | ## Usage 13 | Use with custom sampling and pass in an initial noise from eg. `RandomNoise` and a prompt as a conditioning. See tooltips on the node for an explanation for the options. 14 | 15 | You can also run it on the CPU, though that appears to change the output for some reason. 16 | 17 | ## Notes 18 | The model works with 128x128 latents, apparently. If you pass in other shaped latents, it will reshape the noise into a square before running the noise model, and then reshape the result back to the original resolution. You can control how the reshape happens with the `reshape` and `method` parameters. 19 | 20 | If you get an error from the timm module when running this, update your timm package. It may be too old. 21 | 22 | You can use `convert_to_safetensors.py` to convert the pre-trained models into safetensors files (with fixed keys) 23 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import safetensors.torch 3 | import torch.nn as nn 4 | from torch.nn import functional as F 5 | from timm import create_model 6 | import einops 7 | 8 | from diffusers.models.normalization import AdaGroupNorm 9 | 10 | from timm.layers import use_fused_attn 11 | 12 | from comfy.utils import common_upscale 13 | 14 | import folder_paths 15 | import os.path 16 | 17 | 18 | class Attention(nn.Module): 19 | fused_attn = True 20 | 21 | def __init__( 22 | self, 23 | dim: int, 24 | num_heads: int = 8, 25 | qkv_bias: bool = False, 26 | qk_norm: bool = False, 27 | attn_drop: float = 0.0, 28 | proj_drop: float = 0.0, 29 | norm_layer: nn.Module = nn.LayerNorm, 30 | ) -> None: 31 | super().__init__() 32 | assert dim % num_heads == 0, "dim should be divisible by num_heads" 33 | self.num_heads = num_heads 34 | self.head_dim = dim // num_heads 35 | self.scale = self.head_dim**-0.5 36 | self.fused_attn = use_fused_attn() 37 | 38 | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) 39 | self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() 40 | self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() 41 | self.attn_drop = nn.Dropout(attn_drop) 42 | self.proj = nn.Linear(dim, dim) 43 | self.proj_drop = nn.Dropout(proj_drop) 44 | 45 | def forward(self, x: torch.Tensor) -> torch.Tensor: 46 | B, N, C = x.shape 47 | qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) 48 | q, k, v = qkv.unbind(0) 49 | q, k = self.q_norm(q), self.k_norm(k) 50 | 51 | if self.fused_attn: 52 | x = F.scaled_dot_product_attention( 53 | q, 54 | k, 55 | v, 56 | dropout_p=self.attn_drop.p if self.training else 0.0, 57 | ) 58 | else: 59 | q = q * self.scale 60 | attn = q @ k.transpose(-2, -1) 61 | attn = attn.softmax(dim=-1) 62 | attn = self.attn_drop(attn) 63 | x = attn @ v 64 | 65 | x = x.transpose(1, 2).reshape(B, N, C) 66 | x = self.proj(x) 67 | x = self.proj_drop(x) 68 | return x 69 | 70 | 71 | class SVDNoiseUnet(nn.Module): 72 | def __init__(self, in_channels=4, out_channels=4, resolution=128): # resolution = size // 8 73 | super(SVDNoiseUnet, self).__init__() 74 | 75 | _in = int(resolution * in_channels // 2) 76 | _out = int(resolution * out_channels // 2) 77 | self.mlp1 = nn.Sequential( 78 | nn.Linear(_in, 64), 79 | nn.ReLU(inplace=True), 80 | nn.Linear(64, _out), 81 | ) 82 | self.mlp2 = nn.Sequential( 83 | nn.Linear(_in, 64), 84 | nn.ReLU(inplace=True), 85 | nn.Linear(64, _out), 86 | ) 87 | 88 | self.mlp3 = nn.Sequential( 89 | nn.Linear(_in, _out), 90 | ) 91 | 92 | self.attention = Attention(_out) 93 | 94 | self.bn = nn.BatchNorm2d(_out) 95 | 96 | self.mlp4 = nn.Sequential( 97 | nn.Linear(_out, 1024), 98 | nn.ReLU(inplace=True), 99 | nn.Linear(1024, _out), 100 | ) 101 | 102 | def forward(self, x, residual=False): 103 | b, c, h, w = x.shape 104 | x = einops.rearrange(x, "b (a c)h w ->b (a h)(c w)", a=2, c=2) # x -> [1, 256, 256] 105 | U, s, V = torch.linalg.svd(x) # U->[b 256 256], s-> [b 256], V->[b 256 256] 106 | U_T = U.permute(0, 2, 1) 107 | out = self.mlp1(U_T) + self.mlp2(V) + self.mlp3(s).unsqueeze(1) # s -> [b, 1, 256] => [b, 256, 256] 108 | out = self.attention(out).mean(1) 109 | out = self.mlp4(out) + s 110 | pred = U @ torch.diag_embed(out) @ V 111 | return einops.rearrange(pred, "b (a h)(c w) -> b (a c) h w", a=2, c=2) 112 | 113 | 114 | class SVDNoiseUnet_Concise(nn.Module): 115 | def __init__(self, in_channels=4, out_channels=4, resolution=128): 116 | super(SVDNoiseUnet_Concise, self).__init__() 117 | 118 | 119 | class NoiseTransformer(nn.Module): 120 | def __init__(self, resolution=128): 121 | super().__init__() 122 | self.upsample = lambda x: F.interpolate(x, [224, 224]) 123 | self.downsample = lambda x: F.interpolate(x, [resolution, resolution]) 124 | self.upconv = nn.Conv2d(7, 4, (1, 1), (1, 1), (0, 0)) 125 | self.downconv = nn.Conv2d(4, 3, (1, 1), (1, 1), (0, 0)) 126 | # self.upconv = nn.Conv2d(7,4,(1,1),(1,1),(0,0)) 127 | self.swin = create_model("swin_tiny_patch4_window7_224", pretrained=True) 128 | 129 | def forward(self, x, residual=False): 130 | if residual: 131 | x = self.upconv(self.downsample(self.swin.forward_features(self.downconv(self.upsample(x))))) + x 132 | else: 133 | x = self.upconv(self.downsample(self.swin.forward_features(self.downconv(self.upsample(x))))) 134 | 135 | return x 136 | 137 | 138 | class NPNet(nn.Module): 139 | def __init__(self, pretrained_path, device="cuda") -> None: 140 | super().__init__() 141 | 142 | self.device = device 143 | self.pretrained_path = pretrained_path 144 | self.unet_embedding = NoiseTransformer(resolution=128) 145 | self.unet_svd = SVDNoiseUnet(resolution=128) 146 | self.alpha = torch.nn.Parameter(torch.empty(1)) 147 | self.beta = torch.nn.Parameter(torch.empty(1)) 148 | 149 | if ".pth" in pretrained_path: 150 | sd = torch.load(self.pretrained_path, weights_only=True, map_location=device) 151 | else: 152 | sd = safetensors.torch.load_file(self.pretrained_path) 153 | 154 | if "embeeding" in sd: 155 | # fix key format 156 | self._convert(sd) 157 | 158 | te_shape = sd["text_embedding.linear.weight"].shape[1] 159 | if te_shape == 77 * 1024: 160 | print("Model looks like NPNet DiT") 161 | elif te_shape == 77 * 2048: 162 | print("Model looks like NPNet SDXL or DreamShaper") 163 | else: 164 | print("Unrecognized TE shape:", te_shape, te_shape // 77) 165 | self.text_embedding = AdaGroupNorm(te_shape, 4, 1, eps=1e-6) 166 | self.load_state_dict(sd) 167 | self.to(dtype=torch.float32, device=device) 168 | 169 | def _convert(self, sd): 170 | for k in "unet_embedding", "unet_svd", "embeeding": 171 | subdict = sd.pop(k) 172 | if k == "embeeding": 173 | k = "text_embedding" 174 | for sk in subdict: 175 | sd[f"{k}.{sk}"] = subdict[sk] 176 | 177 | def to(self, *args, **kwargs): 178 | super().to(*args, **kwargs) 179 | self.device = self.alpha.device 180 | return self 181 | 182 | def forward(self, initial_noise, prompt_embeds): 183 | prompt_embeds = prompt_embeds.float().view(prompt_embeds.shape[0], -1) 184 | text_emb = self.text_embedding(initial_noise.float(), prompt_embeds) 185 | 186 | encoder_hidden_states_svd = initial_noise 187 | encoder_hidden_states_embedding = initial_noise + text_emb 188 | 189 | golden_embedding = self.unet_embedding(encoder_hidden_states_embedding.float()) 190 | 191 | golden_noise = ( 192 | self.unet_svd(encoder_hidden_states_svd.float()) 193 | + (2 * torch.sigmoid(self.alpha) - 1) * text_emb 194 | + self.beta * golden_embedding 195 | ) 196 | 197 | return golden_noise 198 | 199 | 200 | class NPNetGoldenNoise: 201 | npnet = None 202 | noise = None 203 | cond = None 204 | seed = None 205 | method = "nearest-exact" 206 | strategy = "resize" 207 | olp = "truncate" 208 | 209 | @classmethod 210 | def INPUT_TYPES(s): 211 | if "npnet" not in folder_paths.folder_names_and_paths: 212 | folder_paths.folder_names_and_paths["npnet"] = ( 213 | [os.path.join(folder_paths.models_dir, "npnet")], 214 | {".pth", ".safetensors"}, 215 | ) 216 | 217 | return { 218 | "required": { 219 | "noise": ("NOISE", {"tooltip": "Connect the output of eg. RandomNoise to this node"}), 220 | "prompt": ("CONDITIONING", {"tooltip": "This is the prompt you want the golden noise for"}), 221 | "model": ( 222 | folder_paths.get_filename_list("npnet"), 223 | {"tooltip": "Put your models under models/npnet in your ComfyUI directory"}, 224 | ), 225 | "device": (["cuda", "cpu"],), 226 | }, 227 | "optional": { 228 | "reshape": (["resize", "crop"], {"tooltip": "What to do with latents that NPNet can't handle"}), 229 | "reshape_method": (["nearest-exact", "bilinear", "area", "bicubic", "bislerp"],), 230 | "on_long_prompt": ( 231 | ["truncate", "average", "recurse"], 232 | { 233 | "tooltip": "What to do when the prompt is >77 tokens. 'truncate' will simply cut the prompt, average will apply the model to all prompts and average the result, and 'recurse' will apply npnet recursively for each 77-token chunk" 234 | }, 235 | ), 236 | }, 237 | } 238 | 239 | RETURN_TYPES = ("NOISE",) 240 | CATEGORY = "latent/noise" 241 | 242 | FUNCTION = "doit" 243 | 244 | def reshape(self, noise, shape): 245 | if shape[-1] == noise.shape[-1] and shape[-2] == noise.shape[-2]: 246 | return noise 247 | crop = "disabled" if self.strategy == "resize" else "center" 248 | return common_upscale(noise, shape[-1], shape[-2], self.method, crop) 249 | 250 | def generate_noise(self, input_latent): 251 | self.seed = self.noise.seed 252 | orig_shape = input_latent["samples"].shape 253 | input_latent = input_latent.copy() 254 | input_latent["samples"] = self.reshape(input_latent["samples"], (128, 128)) 255 | init_noise = self.noise.generate_noise(input_latent).to(self.npnet.device) 256 | cond = self.cond[0].clone().to(self.npnet.device) 257 | if cond.shape[1] != 77: 258 | print(f"Prompt has {cond.shape[1]} tokens. NPNet can't handle prompts >77, a workaround will be applied") 259 | if self.olp == "truncate": 260 | print("Truncating prompt to 77 tokens") 261 | cond = cond[:, :77, :] 262 | r = self.npnet(init_noise, cond) 263 | elif self.olp == "recurse": 264 | print("Applying NPNet recursively to all prompt chunks") 265 | r = init_noise 266 | for i, cond in enumerate(torch.split(cond, 77, 1)): 267 | r = self.npnet(r, cond) 268 | else: 269 | print("Averaging NPNet output for each chunk") 270 | r = torch.stack([self.npnet(init_noise, c) for c in torch.split(cond, 77, 1)]).mean(dim=0) 271 | else: 272 | r = self.npnet(init_noise, cond) 273 | 274 | return self.reshape(r.to("cpu"), orig_shape) 275 | 276 | def doit( 277 | self, noise, prompt, model, device, reshape="resize", reshape_method="nearest-exact", on_long_prompt="truncate" 278 | ): 279 | model_path = folder_paths.get_full_path("npnet", model) 280 | if self.npnet is None or self.npnet.pretrained_path != model_path: 281 | print("Loading NPNet from", model_path) 282 | self.npnet = NPNet(model_path, device=device) 283 | self.npnet.to(device) 284 | self.method = reshape_method 285 | self.strategy = reshape 286 | self.noise = noise 287 | self.olp = on_long_prompt 288 | self.cond = prompt[0] 289 | 290 | return (self,) 291 | 292 | 293 | NODE_CLASS_MAPPINGS = {"NPNetGoldenNoise": NPNetGoldenNoise} 294 | -------------------------------------------------------------------------------- /convert_to_safetensors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import torch 4 | from safetensors.torch import save_file 5 | from pathlib import Path 6 | 7 | files = sys.argv[1:] 8 | for f in files: 9 | f = Path(f) 10 | if f.suffix in [".pth"]: 11 | print("Converting", f) 12 | fn = f.with_suffix(".safetensors") 13 | if fn.exists(): 14 | print(f"{fn} exists, skipping...") 15 | continue 16 | print(f"Loading {f}...") 17 | try: 18 | model = torch.load(f, weights_only=True, map_location="cpu") 19 | weights = {} 20 | for k in "unet_embedding", "unet_svd", "embeeding": 21 | subdict = model.pop(k) 22 | if k == "embeeding": 23 | k = "text_embedding" 24 | for sk in subdict: 25 | weights[f"{k}.{sk}"] = subdict[sk] 26 | weights["alpha"] = model["alpha"] 27 | weights["beta"] = model["beta"] 28 | print(f"Saving {fn}...") 29 | save_file(weights, fn) 30 | del model 31 | del weights 32 | except Exception as ex: 33 | print(f"ERROR converting {f}: {ex}") 34 | 35 | print("Done!") 36 | -------------------------------------------------------------------------------- /example.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 20, 3 | "last_link_id": 33, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "CheckpointLoaderSimple", 8 | "pos": [ 9 | 60, 10 | 195 11 | ], 12 | "size": [ 13 | 315, 14 | 98 15 | ], 16 | "flags": {}, 17 | "order": 3, 18 | "mode": 0, 19 | "inputs": [], 20 | "outputs": [ 21 | { 22 | "name": "MODEL", 23 | "type": "MODEL", 24 | "links": [ 25 | 18, 26 | 26 27 | ], 28 | "slot_index": 0 29 | }, 30 | { 31 | "name": "CLIP", 32 | "type": "CLIP", 33 | "links": [ 34 | 3, 35 | 5 36 | ], 37 | "slot_index": 1 38 | }, 39 | { 40 | "name": "VAE", 41 | "type": "VAE", 42 | "links": [ 43 | 8 44 | ], 45 | "slot_index": 2 46 | } 47 | ], 48 | "properties": { 49 | "Node name for S&R": "CheckpointLoaderSimple" 50 | }, 51 | "widgets_values": [ 52 | "XL/sdxl_v10vaefix.safetensors" 53 | ] 54 | }, 55 | { 56 | "id": 5, 57 | "type": "EmptyLatentImage", 58 | "pos": [ 59 | 885, 60 | 330 61 | ], 62 | "size": [ 63 | 315, 64 | 106 65 | ], 66 | "flags": {}, 67 | "order": 1, 68 | "mode": 0, 69 | "inputs": [], 70 | "outputs": [ 71 | { 72 | "name": "LATENT", 73 | "type": "LATENT", 74 | "links": [ 75 | 22 76 | ], 77 | "slot_index": 0 78 | } 79 | ], 80 | "properties": { 81 | "Node name for S&R": "EmptyLatentImage" 82 | }, 83 | "widgets_values": [ 84 | 1024, 85 | 1024, 86 | 1 87 | ] 88 | }, 89 | { 90 | "id": 6, 91 | "type": "CLIPTextEncode", 92 | "pos": [ 93 | 415, 94 | 186 95 | ], 96 | "size": [ 97 | 422.84503173828125, 98 | 164.31304931640625 99 | ], 100 | "flags": {}, 101 | "order": 6, 102 | "mode": 0, 103 | "inputs": [ 104 | { 105 | "name": "clip", 106 | "type": "CLIP", 107 | "link": 3 108 | } 109 | ], 110 | "outputs": [ 111 | { 112 | "name": "CONDITIONING", 113 | "type": "CONDITIONING", 114 | "links": [ 115 | 16, 116 | 32 117 | ], 118 | "slot_index": 0 119 | } 120 | ], 121 | "properties": { 122 | "Node name for S&R": "CLIPTextEncode" 123 | }, 124 | "widgets_values": [ 125 | "beautiful scenery nature glass bottle landscape, purple galaxy bottle," 126 | ] 127 | }, 128 | { 129 | "id": 7, 130 | "type": "CLIPTextEncode", 131 | "pos": [ 132 | 413, 133 | 389 134 | ], 135 | "size": [ 136 | 425.27801513671875, 137 | 180.6060791015625 138 | ], 139 | "flags": {}, 140 | "order": 7, 141 | "mode": 0, 142 | "inputs": [ 143 | { 144 | "name": "clip", 145 | "type": "CLIP", 146 | "link": 5 147 | } 148 | ], 149 | "outputs": [ 150 | { 151 | "name": "CONDITIONING", 152 | "type": "CONDITIONING", 153 | "links": [ 154 | 17 155 | ], 156 | "slot_index": 0 157 | } 158 | ], 159 | "properties": { 160 | "Node name for S&R": "CLIPTextEncode" 161 | }, 162 | "widgets_values": [ 163 | "text, watermark" 164 | ] 165 | }, 166 | { 167 | "id": 8, 168 | "type": "VAEDecode", 169 | "pos": [ 170 | 1230, 171 | -30 172 | ], 173 | "size": [ 174 | 210, 175 | 46 176 | ], 177 | "flags": {}, 178 | "order": 11, 179 | "mode": 0, 180 | "inputs": [ 181 | { 182 | "name": "samples", 183 | "type": "LATENT", 184 | "link": 24 185 | }, 186 | { 187 | "name": "vae", 188 | "type": "VAE", 189 | "link": 8 190 | } 191 | ], 192 | "outputs": [ 193 | { 194 | "name": "IMAGE", 195 | "type": "IMAGE", 196 | "links": [ 197 | 25 198 | ], 199 | "slot_index": 0 200 | } 201 | ], 202 | "properties": { 203 | "Node name for S&R": "VAEDecode" 204 | }, 205 | "widgets_values": [] 206 | }, 207 | { 208 | "id": 11, 209 | "type": "RandomNoise", 210 | "pos": [ 211 | 135, 212 | -210 213 | ], 214 | "size": [ 215 | 315, 216 | 82 217 | ], 218 | "flags": {}, 219 | "order": 2, 220 | "mode": 0, 221 | "inputs": [], 222 | "outputs": [ 223 | { 224 | "name": "NOISE", 225 | "type": "NOISE", 226 | "links": [ 227 | 31 228 | ] 229 | } 230 | ], 231 | "properties": { 232 | "Node name for S&R": "RandomNoise" 233 | }, 234 | "widgets_values": [ 235 | 0, 236 | "fixed" 237 | ] 238 | }, 239 | { 240 | "id": 13, 241 | "type": "SamplerCustomAdvanced", 242 | "pos": [ 243 | 915, 244 | -45 245 | ], 246 | "size": [ 247 | 236.8000030517578, 248 | 326 249 | ], 250 | "flags": {}, 251 | "order": 10, 252 | "mode": 0, 253 | "inputs": [ 254 | { 255 | "name": "noise", 256 | "type": "NOISE", 257 | "link": 33 258 | }, 259 | { 260 | "name": "guider", 261 | "type": "GUIDER", 262 | "link": 19 263 | }, 264 | { 265 | "name": "sampler", 266 | "type": "SAMPLER", 267 | "link": 23 268 | }, 269 | { 270 | "name": "sigmas", 271 | "type": "SIGMAS", 272 | "link": 21 273 | }, 274 | { 275 | "name": "latent_image", 276 | "type": "LATENT", 277 | "link": 22 278 | } 279 | ], 280 | "outputs": [ 281 | { 282 | "name": "output", 283 | "type": "LATENT", 284 | "links": [ 285 | 24 286 | ], 287 | "slot_index": 0 288 | }, 289 | { 290 | "name": "denoised_output", 291 | "type": "LATENT", 292 | "links": null 293 | } 294 | ], 295 | "properties": { 296 | "Node name for S&R": "SamplerCustomAdvanced" 297 | } 298 | }, 299 | { 300 | "id": 14, 301 | "type": "CFGGuider", 302 | "pos": [ 303 | 885, 304 | 495 305 | ], 306 | "size": [ 307 | 315, 308 | 98 309 | ], 310 | "flags": {}, 311 | "order": 9, 312 | "mode": 0, 313 | "inputs": [ 314 | { 315 | "name": "model", 316 | "type": "MODEL", 317 | "link": 18 318 | }, 319 | { 320 | "name": "positive", 321 | "type": "CONDITIONING", 322 | "link": 16 323 | }, 324 | { 325 | "name": "negative", 326 | "type": "CONDITIONING", 327 | "link": 17 328 | } 329 | ], 330 | "outputs": [ 331 | { 332 | "name": "GUIDER", 333 | "type": "GUIDER", 334 | "links": [ 335 | 19 336 | ], 337 | "slot_index": 0 338 | } 339 | ], 340 | "properties": { 341 | "Node name for S&R": "CFGGuider" 342 | }, 343 | "widgets_values": [ 344 | 8 345 | ] 346 | }, 347 | { 348 | "id": 15, 349 | "type": "KSamplerSelect", 350 | "pos": [ 351 | 825, 352 | -195 353 | ], 354 | "size": [ 355 | 315, 356 | 58 357 | ], 358 | "flags": {}, 359 | "order": 4, 360 | "mode": 0, 361 | "inputs": [], 362 | "outputs": [ 363 | { 364 | "name": "SAMPLER", 365 | "type": "SAMPLER", 366 | "links": [ 367 | 23 368 | ], 369 | "slot_index": 0 370 | } 371 | ], 372 | "properties": { 373 | "Node name for S&R": "KSamplerSelect" 374 | }, 375 | "widgets_values": [ 376 | "euler" 377 | ] 378 | }, 379 | { 380 | "id": 16, 381 | "type": "BasicScheduler", 382 | "pos": [ 383 | 480, 384 | -210 385 | ], 386 | "size": [ 387 | 315, 388 | 106 389 | ], 390 | "flags": {}, 391 | "order": 5, 392 | "mode": 0, 393 | "inputs": [ 394 | { 395 | "name": "model", 396 | "type": "MODEL", 397 | "link": 26 398 | } 399 | ], 400 | "outputs": [ 401 | { 402 | "name": "SIGMAS", 403 | "type": "SIGMAS", 404 | "links": [ 405 | 21 406 | ] 407 | } 408 | ], 409 | "properties": { 410 | "Node name for S&R": "BasicScheduler" 411 | }, 412 | "widgets_values": [ 413 | "normal", 414 | 20, 415 | 1 416 | ] 417 | }, 418 | { 419 | "id": 17, 420 | "type": "PreviewImage", 421 | "pos": [ 422 | 1470, 423 | -30 424 | ], 425 | "size": [ 426 | 330, 427 | 375 428 | ], 429 | "flags": {}, 430 | "order": 12, 431 | "mode": 0, 432 | "inputs": [ 433 | { 434 | "name": "images", 435 | "type": "IMAGE", 436 | "link": 25 437 | } 438 | ], 439 | "outputs": [], 440 | "properties": { 441 | "Node name for S&R": "PreviewImage" 442 | } 443 | }, 444 | { 445 | "id": 18, 446 | "type": "Note", 447 | "pos": [ 448 | 135, 449 | -60 450 | ], 451 | "size": [ 452 | 315, 453 | 150 454 | ], 455 | "flags": {}, 456 | "order": 0, 457 | "mode": 0, 458 | "inputs": [], 459 | "outputs": [], 460 | "properties": {}, 461 | "widgets_values": [ 462 | "You need to have the npnet models installed in models/npnet/ for the model to be selectable" 463 | ], 464 | "color": "#432", 465 | "bgcolor": "#653" 466 | }, 467 | { 468 | "id": 20, 469 | "type": "NPNetGoldenNoise", 470 | "pos": [ 471 | 480, 472 | -45 473 | ], 474 | "size": [ 475 | 315, 476 | 174 477 | ], 478 | "flags": {}, 479 | "order": 8, 480 | "mode": 0, 481 | "inputs": [ 482 | { 483 | "name": "noise", 484 | "type": "NOISE", 485 | "link": 31 486 | }, 487 | { 488 | "name": "prompt", 489 | "type": "CONDITIONING", 490 | "link": 32 491 | } 492 | ], 493 | "outputs": [ 494 | { 495 | "name": "NOISE", 496 | "type": "NOISE", 497 | "links": [ 498 | 33 499 | ] 500 | } 501 | ], 502 | "properties": { 503 | "Node name for S&R": "NPNetGoldenNoise" 504 | }, 505 | "widgets_values": [ 506 | "npnet-sdxl.safetensors", 507 | "cuda", 508 | "resize", 509 | "nearest-exact", 510 | "truncate" 511 | ] 512 | } 513 | ], 514 | "links": [ 515 | [ 516 | 3, 517 | 4, 518 | 1, 519 | 6, 520 | 0, 521 | "CLIP" 522 | ], 523 | [ 524 | 5, 525 | 4, 526 | 1, 527 | 7, 528 | 0, 529 | "CLIP" 530 | ], 531 | [ 532 | 8, 533 | 4, 534 | 2, 535 | 8, 536 | 1, 537 | "VAE" 538 | ], 539 | [ 540 | 16, 541 | 6, 542 | 0, 543 | 14, 544 | 1, 545 | "CONDITIONING" 546 | ], 547 | [ 548 | 17, 549 | 7, 550 | 0, 551 | 14, 552 | 2, 553 | "CONDITIONING" 554 | ], 555 | [ 556 | 18, 557 | 4, 558 | 0, 559 | 14, 560 | 0, 561 | "MODEL" 562 | ], 563 | [ 564 | 19, 565 | 14, 566 | 0, 567 | 13, 568 | 1, 569 | "GUIDER" 570 | ], 571 | [ 572 | 21, 573 | 16, 574 | 0, 575 | 13, 576 | 3, 577 | "SIGMAS" 578 | ], 579 | [ 580 | 22, 581 | 5, 582 | 0, 583 | 13, 584 | 4, 585 | "LATENT" 586 | ], 587 | [ 588 | 23, 589 | 15, 590 | 0, 591 | 13, 592 | 2, 593 | "SAMPLER" 594 | ], 595 | [ 596 | 24, 597 | 13, 598 | 0, 599 | 8, 600 | 0, 601 | "LATENT" 602 | ], 603 | [ 604 | 25, 605 | 8, 606 | 0, 607 | 17, 608 | 0, 609 | "IMAGE" 610 | ], 611 | [ 612 | 26, 613 | 4, 614 | 0, 615 | 16, 616 | 0, 617 | "MODEL" 618 | ], 619 | [ 620 | 31, 621 | 11, 622 | 0, 623 | 20, 624 | 0, 625 | "NOISE" 626 | ], 627 | [ 628 | 32, 629 | 6, 630 | 0, 631 | 20, 632 | 1, 633 | "CONDITIONING" 634 | ], 635 | [ 636 | 33, 637 | 20, 638 | 0, 639 | 13, 640 | 0, 641 | "NOISE" 642 | ] 643 | ], 644 | "groups": [], 645 | "config": {}, 646 | "extra": { 647 | "ds": { 648 | "scale": 0.9229599817706623, 649 | "offset": [ 650 | 249.41173457307823, 651 | 409.79156200044184 652 | ] 653 | }, 654 | "ue_links": [], 655 | "controller_panel": { 656 | "controllers": {}, 657 | "hidden": true, 658 | "highlight": true, 659 | "version": 2, 660 | "default_order": [] 661 | }, 662 | "groupNodes": {} 663 | }, 664 | "version": 0.4 665 | } --------------------------------------------------------------------------------