├── .gitignore
├── pyproject.toml
├── __init__.py
├── scripts
└── smea.py
├── LICENSE
├── smea_sampling.py
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "euler-smea-dyn-sampler"
3 | description = "СomfyUI version of [a/Euler Smea Dyn Sampler](https://github.com/Koishi-Star/Euler-Smea-Dyn-Sampler). It adds samplers directly to KSampler nodes."
4 | version = "1.0.0"
5 | license = "LICENSE"
6 |
7 | [project.urls]
8 | Repository = "https://github.com/Koishi-Star/Euler-Smea-Dyn-Sampler"
9 | # Used by Comfy Registry https://comfyregistry.org
10 |
11 | [tool.comfy]
12 | PublisherId = ""
13 | DisplayName = "Euler-Smea-Dyn-Sampler"
14 | Icon = ""
15 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | from . import smea_sampling
2 | from .smea_sampling import sample_euler_dy, sample_euler_smea_dy, sample_euler_negative, sample_euler_dy_negative, sample_Kohaku_LoNyu_Yog
3 |
4 | if smea_sampling.BACKEND == "ComfyUI":
5 | if not smea_sampling.INITIALIZED:
6 | from comfy.k_diffusion import sampling as k_diffusion_sampling
7 | from comfy.samplers import SAMPLER_NAMES
8 |
9 | setattr(k_diffusion_sampling, "sample_euler_dy", sample_euler_dy)
10 | setattr(k_diffusion_sampling, "sample_euler_smea_dy", sample_euler_smea_dy)
11 | setattr(k_diffusion_sampling, "sample_euler_negative", sample_euler_negative)
12 | setattr(k_diffusion_sampling, "sample_euler_dy_negative", sample_euler_dy_negative)
13 | setattr(k_diffusion_sampling, "sample_Kohaku_LoNyu_Yog", sample_Kohaku_LoNyu_Yog)
14 |
15 | SAMPLER_NAMES.append("euler_dy")
16 | SAMPLER_NAMES.append("euler_smea_dy")
17 | SAMPLER_NAMES.append("euler_negative")
18 | SAMPLER_NAMES.append("euler_dy_negative")
19 | SAMPLER_NAMES.append("sample_Kohaku_LoNyu_Yog")
20 |
21 | smea_sampling.INITIALIZED = True
22 |
23 | NODE_CLASS_MAPPINGS = {}
24 |
--------------------------------------------------------------------------------
/scripts/smea.py:
--------------------------------------------------------------------------------
1 | try:
2 | import smea_sampling
3 | from smea_sampling import sample_euler_dy, sample_euler_smea_dy, sample_euler_negative, sample_euler_dy_negative, sample_Kohaku_LoNyu_Yog
4 |
5 | if smea_sampling.BACKEND == "WebUI":
6 | from modules import scripts, sd_samplers_common, sd_samplers
7 | from modules.sd_samplers_kdiffusion import sampler_extra_params, KDiffusionSampler
8 |
9 | class SMEA(scripts.Script):
10 | def title(self):
11 | "SMEA Samplers"
12 |
13 | def show(self, is_img2img):
14 | return False
15 |
16 | def __init__(self):
17 | if not smea_sampling.INITIALIZED:
18 | samplers_smea = [
19 | ("Euler Dy", sample_euler_dy, ["k_euler_dy"], {}),
20 | ("Euler SMEA Dy", sample_euler_smea_dy, ["k_euler_smea_dy"], {}),
21 | ("Euler Negative", sample_euler_negative, ["k_euler_negative"], {}),
22 | ("Euler Negative Dy", sample_euler_dy_negative, ["k_euler_negative_dy"], {}),
23 | ("Kohaku_LoNyu_Yog", sample_Kohaku_LoNyu_Yog, ["k_euler_Kohaku_LoNyu_Yog"], {}),
24 | ]
25 | samplers_data_smea = [
26 | sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
27 | for label, funcname, aliases, options in samplers_smea
28 | if callable(funcname)
29 | ]
30 | sampler_extra_params["sample_euler_dy"] = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
31 | sampler_extra_params["sample_euler_smea_dy"] = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
32 | sampler_extra_params["sample_euler_negative"] = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
33 | sampler_extra_params["sample_euler_dy_negative"] = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
34 | sampler_extra_params["sample_Kohaku_LoNyu_Yog"] = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
35 | sd_samplers.all_samplers.extend(samplers_data_smea)
36 | sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
37 | sd_samplers.set_samplers()
38 | smea_sampling.INITIALIZED = True
39 |
40 | except ImportError as _:
41 | pass
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2024 KBlueLeaf
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/smea_sampling.py:
--------------------------------------------------------------------------------
1 | from importlib import import_module
2 | from tqdm.auto import trange
3 | import torch
4 |
5 | sampling = None
6 | BACKEND = None
7 | INITIALIZED = False
8 |
9 | if not BACKEND:
10 | try:
11 | _ = import_module("modules.sd_samplers_kdiffusion")
12 | sampling = import_module("k_diffusion.sampling")
13 | BACKEND = "WebUI"
14 | except ImportError as _:
15 | pass
16 |
17 | if not BACKEND:
18 | try:
19 | sampling = import_module("comfy.k_diffusion.sampling")
20 | BACKEND = "ComfyUI"
21 | except ImportError as _:
22 | pass
23 |
24 |
25 | class _Rescaler:
26 | def __init__(self, model, x, mode, **extra_args):
27 | self.model = model
28 | self.x = x
29 | self.mode = mode
30 | self.extra_args = extra_args
31 | if BACKEND == "WebUI":
32 | self.init_latent, self.mask, self.nmask = model.init_latent, model.mask, model.nmask
33 | if BACKEND == "ComfyUI":
34 | self.latent_image, self.noise = model.latent_image, model.noise
35 | self.denoise_mask = self.extra_args.get("denoise_mask", None)
36 |
37 | def __enter__(self):
38 | if BACKEND == "WebUI":
39 | if self.init_latent is not None:
40 | self.model.init_latent = torch.nn.functional.interpolate(input=self.init_latent, size=self.x.shape[2:4], mode=self.mode)
41 | if self.mask is not None:
42 | self.model.mask = torch.nn.functional.interpolate(input=self.mask.unsqueeze(0), size=self.x.shape[2:4], mode=self.mode).squeeze(0)
43 | if self.nmask is not None:
44 | self.model.nmask = torch.nn.functional.interpolate(input=self.nmask.unsqueeze(0), size=self.x.shape[2:4], mode=self.mode).squeeze(0)
45 | if BACKEND == "ComfyUI":
46 | if self.latent_image is not None:
47 | self.model.latent_image = torch.nn.functional.interpolate(input=self.latent_image, size=self.x.shape[2:4], mode=self.mode)
48 | if self.noise is not None:
49 | self.model.noise = torch.nn.functional.interpolate(input=self.latent_image, size=self.x.shape[2:4], mode=self.mode)
50 | if self.denoise_mask is not None:
51 | self.extra_args["denoise_mask"] = torch.nn.functional.interpolate(input=self.denoise_mask, size=self.x.shape[2:4], mode=self.mode)
52 |
53 | return self
54 |
55 | def __exit__(self, type, value, traceback):
56 | if BACKEND == "WebUI":
57 | del self.model.init_latent, self.model.mask, self.model.nmask
58 | self.model.init_latent, self.model.mask, self.model.nmask = self.init_latent, self.mask, self.nmask
59 | if BACKEND == "ComfyUI":
60 | del self.model.latent_image, self.model.noise
61 | self.model.latent_image, self.model.noise = self.latent_image, self.noise
62 |
63 |
64 | def default_noise_sampler(x):
65 | return lambda sigma, sigma_next: torch.randn_like(x)
66 |
67 |
68 | def get_ancestral_step(sigma_from, sigma_to, eta=1.):
69 | """Calculates the noise level (sigma_down) to step down to and the amount
70 | of noise to add (sigma_up) when doing an ancestral sampling step."""
71 | if not eta:
72 | return sigma_to, 0.
73 | sigma_up = min(sigma_to, eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)
74 | sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5
75 | return sigma_down, sigma_up
76 |
77 |
78 | @torch.no_grad()
79 | def dy_sampling_step(x, model, dt, sigma_hat, **extra_args):
80 | original_shape = x.shape
81 | batch_size, channels, m, n = original_shape[0], original_shape[1], original_shape[2] // 2, original_shape[3] // 2
82 | extra_row = x.shape[2] % 2 == 1
83 | extra_col = x.shape[3] % 2 == 1
84 |
85 | if extra_row:
86 | extra_row_content = x[:, :, -1:, :]
87 | x = x[:, :, :-1, :]
88 | if extra_col:
89 | extra_col_content = x[:, :, :, -1:]
90 | x = x[:, :, :, :-1]
91 |
92 | a_list = x.unfold(2, 2, 2).unfold(3, 2, 2).contiguous().view(batch_size, channels, m * n, 2, 2)
93 | c = a_list[:, :, :, 1, 1].view(batch_size, channels, m, n)
94 |
95 | with _Rescaler(model, c, 'nearest-exact', **extra_args) as rescaler:
96 | denoised = model(c, sigma_hat * c.new_ones([c.shape[0]]), **rescaler.extra_args)
97 | d = sampling.to_d(c, sigma_hat, denoised)
98 | c = c + d * dt
99 |
100 | d_list = c.view(batch_size, channels, m * n, 1, 1)
101 | a_list[:, :, :, 1, 1] = d_list[:, :, :, 0, 0]
102 | x = a_list.view(batch_size, channels, m, n, 2, 2).permute(0, 1, 2, 4, 3, 5).reshape(batch_size, channels, 2 * m, 2 * n)
103 |
104 | if extra_row or extra_col:
105 | x_expanded = torch.zeros(original_shape, dtype=x.dtype, device=x.device)
106 | x_expanded[:, :, :2 * m, :2 * n] = x
107 | if extra_row:
108 | x_expanded[:, :, -1:, :2 * n + 1] = extra_row_content
109 | if extra_col:
110 | x_expanded[:, :, :2 * m, -1:] = extra_col_content
111 | if extra_row and extra_col:
112 | x_expanded[:, :, -1:, -1:] = extra_col_content[:, :, -1:, :]
113 | x = x_expanded
114 |
115 | return x
116 |
117 |
118 | @torch.no_grad()
119 | def sample_euler_dy(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0.,
120 | s_tmax=float('inf'), s_noise=1.):
121 | extra_args = {} if extra_args is None else extra_args
122 | s_in = x.new_ones([x.shape[0]])
123 | for i in trange(len(sigmas) - 1, disable=disable):
124 | # print(i)
125 | # i第一步为0
126 | gamma = max(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
127 | eps = torch.randn_like(x) * s_noise
128 | sigma_hat = sigmas[i] * (gamma + 1)
129 | # print(sigma_hat)
130 | dt = sigmas[i + 1] - sigma_hat
131 | if gamma > 0:
132 | x = x - eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
133 | denoised = model(x, sigma_hat * s_in, **extra_args)
134 | d = sampling.to_d(x, sigma_hat, denoised)
135 | if sigmas[i + 1] > 0:
136 | if i // 2 == 1:
137 | x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
138 | if callback is not None:
139 | callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
140 | # Euler method
141 | x = x + d * dt
142 | return x
143 |
144 |
145 | @torch.no_grad()
146 | def smea_sampling_step(x, model, dt, sigma_hat, **extra_args):
147 | m, n = x.shape[2], x.shape[3]
148 | x = torch.nn.functional.interpolate(input=x, scale_factor=(1.25, 1.25), mode='nearest-exact')
149 | with _Rescaler(model, x, 'nearest-exact', **extra_args) as rescaler:
150 | denoised = model(x, sigma_hat * x.new_ones([x.shape[0]]), **rescaler.extra_args)
151 | d = sampling.to_d(x, sigma_hat, denoised)
152 | x = x + d * dt
153 | x = torch.nn.functional.interpolate(input=x, size=(m,n), mode='nearest-exact')
154 | return x
155 |
156 |
157 | @torch.no_grad()
158 | def sample_euler_smea_dy(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0.,
159 | s_tmax=float('inf'), s_noise=1.):
160 | extra_args = {} if extra_args is None else extra_args
161 | s_in = x.new_ones([x.shape[0]])
162 | for i in trange(len(sigmas) - 1, disable=disable):
163 | gamma = max(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
164 | eps = torch.randn_like(x) * s_noise
165 | sigma_hat = sigmas[i] * (gamma + 1)
166 | dt = sigmas[i + 1] - sigma_hat
167 | if gamma > 0:
168 | x = x - eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
169 | denoised = model(x, sigma_hat * s_in, **extra_args)
170 | d = sampling.to_d(x, sigma_hat, denoised)
171 | # Euler method
172 | x = x + d * dt
173 | if sigmas[i + 1] > 0:
174 | if i + 1 // 2 == 1:
175 | x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
176 | if i + 1 // 2 == 0:
177 | x = smea_sampling_step(x, model, dt, sigma_hat, **extra_args)
178 | if callback is not None:
179 | callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
180 | return x
181 |
182 | @torch.no_grad()
183 | def sample_euler_negative(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0.,
184 | s_tmax=float('inf'), s_noise=1.):
185 | extra_args = {} if extra_args is None else extra_args
186 | s_in = x.new_ones([x.shape[0]])
187 | for i in trange(len(sigmas) - 1, disable=disable):
188 | # print(i)
189 | # i第一步为0
190 | gamma = max(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
191 | eps = torch.randn_like(x) * s_noise
192 | sigma_hat = sigmas[i] * (gamma + 1)
193 | # print(sigma_hat)
194 | dt = sigmas[i + 1] - sigma_hat
195 | if gamma > 0:
196 | x = x - eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
197 | denoised = model(x, sigma_hat * s_in, **extra_args)
198 | d = sampling.to_d(x, sigma_hat, denoised)
199 | if callback is not None:
200 | callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
201 | # Euler method
202 | if sigmas[i + 1] > 0 and i // 2 == 1:
203 | x = - x - d * dt
204 | else:
205 | x = x + d * dt
206 | return x
207 |
208 |
209 | @torch.no_grad()
210 | def sample_euler_dy_negative(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0.,
211 | s_tmax=float('inf'), s_noise=1.):
212 | extra_args = {} if extra_args is None else extra_args
213 | s_in = x.new_ones([x.shape[0]])
214 | for i in trange(len(sigmas) - 1, disable=disable):
215 | # print(i)
216 | # i第一步为0
217 | gamma = max(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
218 | eps = torch.randn_like(x) * s_noise
219 | sigma_hat = sigmas[i] * (gamma + 1)
220 | # print(sigma_hat)
221 | dt = sigmas[i + 1] - sigma_hat
222 | if gamma > 0:
223 | x = x - eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
224 | denoised = model(x, sigma_hat * s_in, **extra_args)
225 | d = sampling.to_d(x, sigma_hat, denoised)
226 | if callback is not None:
227 | callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
228 | # Euler method
229 | if sigmas[i + 1] > 0 and i // 2 == 1:
230 | x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
231 | x = - x - d * dt
232 | else:
233 | x = x + d * dt
234 | return x
235 |
236 |
237 | @torch.no_grad()
238 | def sample_Kohaku_LoNyu_Yog(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0.,
239 | s_tmax=float('inf'), s_noise=1., noise_sampler=None, eta=1.):
240 | """Kohaku_LoNyu_Yog"""
241 | extra_args = {} if extra_args is None else extra_args
242 | s_in = x.new_ones([x.shape[0]])
243 | noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
244 | for i in trange(len(sigmas) - 1, disable=disable):
245 | gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
246 | eps = torch.randn_like(x) * s_noise
247 | sigma_hat = sigmas[i] * (gamma + 1)
248 | if gamma > 0:
249 | x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
250 | denoised = model(x, sigma_hat * s_in, **extra_args)
251 | d = sampling.to_d(x, sigma_hat, denoised)
252 | sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
253 | if callback is not None:
254 | callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
255 | dt = sigma_down - sigmas[i]
256 |
257 | if i <= (len(sigmas) - 1) / 2:
258 | x2 = - x
259 | denoised2 = model(x2, sigma_hat * s_in, **extra_args)
260 | d2 = sampling.to_d(x2, sigma_hat, denoised2)
261 |
262 | x3 = x + ((d + d2) / 2) * dt
263 | denoised3 = model(x3, sigma_hat * s_in, **extra_args)
264 | d3 = sampling.to_d(x3, sigma_hat, denoised3)
265 |
266 | real_d = (d + d3) / 2
267 | x = x + real_d * dt
268 |
269 | x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
270 | else:
271 | x = x + d * dt
272 | return x
273 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | | Catalog |
2 | | ---- |
3 | | [2024.08.31 Ready To Add Kohaku_LoNyu_Yog Sampler 添加一个新采样器](#section14) |
4 | | [2024.05.09 Add Euler Negative And Euler dy Negtive 添加两个新采样器](#section1) |
5 | | [2024.04.24](#section2) |
6 | | [2024.04.18 Stage Technical Report 阶段性技术报告](#section3) |
7 | | [2024.04.15 Compatible with Stable Cascade Models 适配Sc模型](#section4) |
8 | | [2024.04.13 Fix Bug In ComfyUI 修复ComfyUI中的Bug](#section5) |
9 | | [2024.04.11 Important! This repo can be use as a extension! 重大!现在此仓库可作为插件使用](#section6) |
10 | | [2024.04.10](#section7) |
11 | | [2024.04.09](#section8) |
12 | | [Euler Smea Dyn Sampler](#section9) |
13 | | [Effect 效果](#section10) |
14 | | [how to use(This has become outdated, but it will be retained) 如何使用(已经过期,依然保留,仅做参考)](#section11) |
15 | | [The technical principles 技术报告](#section12) |
16 | | [Contact the author 联系作者](#section13) |
17 |
18 |
19 | ## 2024.08.31 Ready To Add Kohaku_LoNyu_Yog Sampler
20 |
21 | Add a new sampler named Kohaku_LoNyu_Yog. Recommended number of steps: 10 steps. Since it is a second-order method, it is slower than other methods.
22 |
23 | Add a new sampler named Kohaku_LoNyu_Yog. Recommended number of steps: 10 steps. Since it is a second-order method, it is slower than other methods.
24 |
25 | Principle: Please refer to the following two images. Since three-dimensional space is a subspace of high-dimensional space, all operations in three-dimensional space must be feasible in high-dimensional space. Therefore, I have used some geometric tricks (as shown in Figure 1), where it is assumed that the tensor and the target image can be simplified to a moving point. All statements in this section refer to the simplified form of the tensor in three-dimensional space.
26 |
27 | First, we find -x, calculate the gradient d and d2, and then from Figure 2, we can easily deduce geometrically that (d+d2)/2 must be a vector pointing downward toward the visual direction. Therefore, x+(d+d2)/2 must represent a point that is closer to the target region A. So, by performing denoising here, we can obtain the velocity vector d3. As shown in the figure, (d+d3)/2 is closer to the true target region.
28 |
29 | In the last few steps of sampling, you will find that this method deviates from the true region. You can verify this by plotting the image. So we only execute this method for half of the steps.
30 |
31 | You may question that if the trajectory of x is a concave function, this theory does not hold at all. This is completely correct. However, this sampling method is always effective, which sufficiently proves that the projection of x onto two or three dimensions must be a convex function.
32 |
33 | This sampler does not have a significant improvement in quality and speed (the quality may be slightly improved), but I believe it proves many things, such as the possibility of using geometric methods for analysis in three-dimensional space.
34 |
35 | **I am currently goofing off at the company, so test cases and plugins will be submitted later.**
36 |
37 | 
38 |
39 | 
40 |
41 |
42 |
43 | 新增一个名为Kohaku_LoNyu_Yog的采样器。推荐步数:10步。由于是二阶方法,速度比其他方法更慢。
44 |
45 | 原理:请看以下两张图片。由于三维空间是高维空间的一个子空间,三维空间中所有操作在高维空间中一定可行。所以我使用了一些几何技巧(如图1),这里假定tensor和目标图像可以被简化成一个动点。此段中所有表述皆指代tensor在三维空间中的简化形式。
46 |
47 | 首先求-x,求梯度d和d2,然后看图2,我们可以用几何学轻易推断出`(d+d2)/2`一定是一个指向视觉方向的下的向量。所以,`x+(d+d2)/2`一定代表一个位置更加靠近目标区域A的点。所以,在这里执行去噪可得到速度向量d3.如图所示,`(d+d3)/2`是更加贴近真实目标区域的。
48 |
49 | 在采样的最后几步,您会发现此法反而偏离了真实区域。您可以通过绘制图像验证这一点。所以我们只在一半的步骤上执行此法。
50 |
51 | 您可能会质疑,假定x的轨迹是一个凹函数,这套理论就完全不符合了。这完全正确。但本采样方法总是有效,充分说明x在二维/三维上的投影一定是一个凸函数。
52 |
53 | 本采样器在质量和速度上都没有明显提升(质量上可能有一些略微提高),但我认为它证明了许多东西,例如可以在三维空间采用几何方法进行分析。
54 |
55 | **我正在公司摸鱼写这个,所以测试样例和插件会在之后提交。**
56 |
57 | 
58 |
59 | 
60 |
61 |
62 | ## 2024.05.09 Add Euler Negative And Euler dy Negtive
63 |
64 | 新增两个采样器,Euler Negative 和 Euler dy Negtive。我不会说它们效果比别的好,因为没有理论依据。不过在实践中我很喜欢它们。
65 |
66 | 在SDXL表现更好一些,但在SD1.5使用效果也不差
67 |
68 | 我得去稍微进修一下关于AI的知识,目前这种纯粹依靠灵感和实践的方案过于自由。
69 |
70 | 以下是关于它们的测试:
71 |
72 | Two new samplers have been added, Euler Negative and Euler dy Negative. I won't claim they perform better than others because there's no theoretical basis for it. However, in practice, I quite like them.
73 |
74 | They perform slightly better in SDXL, but their performance in SD1.5 is also decent.
75 |
76 | I need to brush up on my professional knowledge of AI. Currently, relying solely on intuition and practice feels too unrestricted.
77 |
78 | Below are the test results for them:
79 |
80 | **768x768, model meinaMixV11**
81 | 
82 |
83 | **832x1216,model kohaku-xl-epsilon**
84 | 
85 | 
86 | 
87 |
88 | **832x1216,model animegineV30**
89 | 
90 |
91 | **Please Note**
92 |
93 | 目前插件有一些小bug,会使得人物在画面中的占比变小,就像这样:
94 |
95 | The current plugin has a few minor bugs that cause the characters to shrink in the frame, like this:
96 |
97 | 
98 | 
99 |
100 | 所以你可以采用其他方案,例如修改源码的方案去添加这两个采样器,请参考:[How to use](#section11)
101 |
102 | So, you can consider alternative solutions, such as modifying the source code to add these two samplers. Please refer to:[How to use](#section11)
103 |
104 |
105 | ## 2024.04.24
106 |
107 | 简单分析Dy Step的原理
108 |
109 | 最近我尝试在https://civitai.com/models/399873/kohaku-xl-epsilon上测试了Euler Dy,效果不尽人意(但在ang3以及pony系列效果不错)。因此我咨询了作者,得到的回复是“模型没有使用任何低分辨率的图片进行训练”。我想这就是原因所在,Euler Dy将图片放在一个小的尺度上,让去噪工作来到模型的舒适区,并给予一个参考。尤其在SD1.5,Euler Dy确保图像始终处于模型的舒适区域。
110 |
111 | 而在本次的SDXL模型中,它几乎遗忘了如何在小尺度上生成图片。所以,Dy Step的改进方向已经变得很明显:寻找到SDXL模型的舒适区域,并让采样器在SDXL的舒适区工作。
112 |
113 | 同时我也写了几个其他的采样器,效果平平,达不到Dy Step的效果。如果有人想试试它们,请在评论区留言。
114 |
115 | Recently, I attempted to analyze the principle of Dy Step on https://civitai.com/models/399873/kohaku-xl-epsilon using Euler Dy. The results were unsatisfactory (although they performed well on ang3 and pony series). Therefore, I consulted the author and received the response that "the model did not use any low-resolution images for training." I believe this is the reason why. Euler Dy places images on a small scale, allowing denoising to operate within the model's comfort zone and providing a reference. Especially in SD1.5, Euler Dy ensures that the image always remains within the model's comfort zone.
116 |
117 | However, in the current SDXL model, it has almost forgotten how to generate images on a small scale. Therefore, the direction for improving Dy Step has become apparent: to find the comfort zone of the SDXL model and enable the sampler to work within the comfort zone of SDXL.
118 |
119 | I've also written a few other samplers, but their performance is mediocre and doesn't match up to Dy Step's effectiveness. If anyone wants to try them out, please leave a comment in the discussions.
120 |
121 |
122 | ## 2024.04.18 Stage Technical Report
123 |
124 | 阶段性技术报告报告。
125 |
126 | 这些日子里,我尝试了超过二十种策略,但采样器的质量总是优于euler a却差于euler dy,所以暂时还不能发布euler dy a。我必须承认这和nai3的dyn是不同的东西。我依旧会长期维护这个项目,并为了新的采样方法努力,同时尽可能降低ai的算力需求。
127 |
128 | Stage Technical Report
129 |
130 | In these days, I have attempted over twenty strategies, but the quality of the sampler is always better than Euler A yet worse than Euler DY. Therefore, I cannot release Euler DY A for the time being. I must acknowledge that this is different from NAI3's DYN. I will continue to maintain this project in the long term and work towards developing new sampling methods while trying to minimize the AI's computing power requirements as much as possible.
131 |
132 | 
133 |
134 |
135 |
136 | ## 2024.04.15 Compatible with Stable Cascade Models
137 |
138 | Makes dy_step respect original channel count, making it compatible with Stable Cascade models.
139 |
140 | 使dy_step遵循原本的通道数,使其与Stable Cascade模型相适应。
141 |
142 |
143 | ## 2024.04.13 Fix Bug In ComfyUI
144 |
145 | Change code for ComfyUI import. This will fix the overwrite error that occurs in ComfyUI when other extensions use `scripts` as the import folder (I really hope ComfyUI will standardize its interfaces and version dependencies).
146 |
147 | P.S.You may find some commits with no means, that because I am not familiar with Github, and try times. So don't care.
148 |
149 | 更改代码,用于ComfyUI导入。这将修复在ComfyUI中存在其他插件时,若其他插件将`scripts`作为导入文件夹时引起的覆盖错误。(真希望ComfyUI能规范一下它的接口和版本依赖)。
150 |
151 | P.S.你可能会发现一些无意义的提交,这是因为我不熟悉Github的使用,并且尝试了几次。别在意。
152 |
153 |
154 | ## 2024.04.11 Important! This repo can be use as a extension!
155 |
156 | Thanks for @pamparamm, his selfless work has been a great help.
157 |
158 | Now this sampler can be use as a extension for **ComfyUI** and **WebUI from Automatic1111**.
159 |
160 | The inpainting bug will be fixed.(**At least doesn't throw any exceptions.**)
161 |
162 | Thanks again.
163 |
164 | Another extension from @licyk , in repo: https://github.com/licyk/advanced_euler_sampler_extension **suitable for 1.8 version**
165 |
166 | It's also useful, and thanks hard efforts from licky, too.
167 |
168 | In the future, I will work on making dy step compatible with more samplers (such as the DPM series).
169 |
170 | 感谢 @pamparamm,他的无私工作帮助很大。
171 |
172 | 现在,这个采样器可以作为 **ComfyUI** 和 **Automatic1111 的 WebUI** 的扩展来使用。
173 |
174 | 修复了inpainting的bug。(**至少不再抛出异常。**)
175 |
176 | 再次感谢。
177 |
178 | 另一个拓展来自@licyk,位于: https://github.com/licyk/advanced_euler_sampler_extension **适用于1.8**
179 |
180 | 也同样很好用, 同样感谢licyk的辛勤努力。
181 |
182 | 之后我会想办法让dy step适配更多采样器(例如dpm系列)。
183 |
184 |
185 | ## 2024.04.10
186 |
187 | 
188 |
189 | Find a way to avoid errors during inpaint and extensions.
190 |
191 | **Please note that this is just a temporary solution and doesn't actually resolve the issue.It will try to use Euler method if error occurs.**
192 |
193 | P.S.I trying to fix it……but all methods seems doesn't work.I've working for it over 36 hours.
194 |
195 | **Suggestions from anyone are welcome**.
196 |
197 | I need to take a short break and prepare for my other project.*<== A mobile phone app base on flutter, using for TRPG.(No worries, I don't mean I will give up this project,also not about diverting traffic either. LOL.)*
198 |
199 | 想了个办法避免在局部重绘以及拓展中的报错。
200 |
201 | **请注意,这只是一个临时解决方案,实际上并没有解决问题。如果出现错误,它将尝试使用欧拉方法。**
202 |
203 | 我努力尝试修复……但所有方案都不起作用。我已经连续工作了36小时以上。**欢迎任何人提出建议。**
204 | ,
205 | 我需要稍微休息一下,并且为我的其他项目做准备。*<== 一个基于Flutter的手机应用,用于TRPG。(别担心,我不是说我要放弃这个项目,也不是引流。)*
206 |
207 |
208 | ## 2024.04.09
209 |
210 | Add `__init__.py` for ComfyUI. Thanks for CapsAdmin. I don't use ComfyUI so I can't tell you how to add it, sorry.
211 |
212 | 为ComfyUI增加`__init__.py` 感谢CapsAdmin 我不用ComfyUI所以我没法告诉你怎么添加它,抱歉
213 |
214 |
215 | ## Euler Smea Dyn Sampler
216 |
217 | A sampling method based on Euler's approach, designed to generate superior imagery.
218 |
219 | The SMEA sampler can significantly mitigate the structural and limb collapse that occurs when generating large images, and to a great extent, it can produce superior hand depictions (not perfect, but better than existing sampling methods).
220 |
221 | The SMEA sampler is designed to accommodate the majority of image sizes, with particularly outstanding performance on larger images. It also supports the generation of images in unconventional sizes that lack sufficient training data (for example, running 512x512 in SDXL, 823x1216 in SD1.5, as well as 640x960, etc.).
222 |
223 | The SMEA sampler performs very well in SD1.5, but the effects are not as pronounced in SDXL.
224 |
225 | In terms of computational resource consumption, the Euler dy is approximately equivalent to the Euler a, while the Euler SMEA Dy sampler will consume more computational resources, approximately 1.25 times more.
226 |
227 | 一种基于Euler的采样方法,旨在生成更好的图片
228 |
229 | Dyn采样器可以很大程度上避免出大图时的结构、肢体崩坏,能很大程度得到更优秀的手部(不完美但比已有采样方法更好)
230 |
231 | Smea采样器理论上将增加图片的细节(**无法达到Nai3让图片闪闪发光的效果**)
232 |
233 | 适配绝大多数图片尺寸,在大图的效果尤其优秀,支持缺乏训练的异种尺寸(例如在sdxl跑512x512,在sd1.5跑823x1216,以及640x960等)
234 |
235 | 在SD1.5效果很好,在SDXL效果不明显。
236 |
237 | 计算资源消耗:Euler dy将约等于euler a, 而euler smea dy将消耗更多计算资源(约1.25倍)
238 |
239 |
240 | ## Effect
241 | **SD1.5,测试模型AnythingV5-Prt-RE,测试姿势Heart Hand,一个容易出坏手的姿势**
242 |
243 | **SD1.5: Testing the AnythingV5-Prt-RE model with the Heart Hand pose often results in distorted hand positions.**
244 |
245 | 768x768,without Lora:
246 | 
247 | 768x768,with Lora:
248 | 
249 | 832x1216,without lora:
250 | 
251 | 832x1216,with Lora:
252 | 
253 |
254 | **SDXL,测试模型animagineXLV31,测试姿势也是手部姿势**
255 |
256 | **SDXL: Testing animagineXLV31 model with hand poses.**
257 |
258 | 768x768:
259 | 
260 | 832x1216:
261 | 
262 | 
263 |
264 |
265 | ## how to use(This has become outdated, but it will be retained)
266 |
267 | **step.1:** 打开`sd-webui-aki-v4.6\repositories\k-diffusion\k_diffusion`文件夹,打开其中的`sampling.py`文件(可以用记事本打开,称为文件1)
268 |
269 | **Step 1:** Navigate to the `k_diffusion` folder within the `sd-webui-aki-v4.6\repositories\k-diffusion` directory and open the `sampling.py` file within it (this can be done using a text editor like Notepad, which will be referred to as File 1).
270 | 
271 |
272 | **step.2:** 复制本仓库中的`sampling.py`中的所有内容并粘贴到文件1末尾
273 |
274 | **Step 2:** Copy the entire content from the `sampling.py` file in the current repository and paste it at the end of File 1.
275 | 
276 | (To present the complete picture, I have utilized PyTorch's abbreviation feature.)
277 |
278 | **Step 3:** Open the `sd_samplers_kdiffusion.py` file located in the `sd-webui-aki-v4.6\modules` directory (refer to this as File 2).
279 | 
280 |
281 | **Step 4:** Copy the following two lines from this repository:
282 | 
283 |
284 | Paste them into File 2:
285 | 
286 |
287 | **Step 5:** Restart the webui, and you will see:
288 | 
289 |
290 | 现在你就可以使用它们了。在图生图中可能有一些bug,欢迎向我汇报(请带上截图/报错声明)
291 |
292 | Now you can start using them. There may be some bugs in the image generation process, and I welcome you to report any issues to me (please provide screenshots or error statements).
293 |
294 |
295 | ## The technical principles
296 |
297 | 简单地讲,dyn方法有规律地取出图片中的一部分,去噪后加回原图。在理论上这应当等同于euler a,但其加噪环节被替代为有引导的噪声。
298 |
299 | 而smea方法将图片潜空间放大再压缩回原本的大小,这增加了图片的可能性。很抱歉我没能实现Nai3中smea让图片微微发光的效果。
300 |
301 | 一点忠告:不要相信pytorch的插值放大和缩小方法,不会对改善图像带来任何帮助。同时用有条件引导取代随机噪声也是有希望的道路。
302 |
303 | In simple terms, the dyn method regularly extracts a portion of the image, denoises it, and then adds it back to the original image. Theoretically, this should be equivalent to the Euler A method, but its noise addition step is replaced with guided noise.
304 |
305 | The SMEA method enlarges the image's latent space and then compresses it back to its original dimensions, thereby increasing the range of possible image variations. I apologize that I was unable to achieve the subtle glowing effect in Nai3 with the SMEA method.
306 |
307 | A piece of advice: Do not trust PyTorch's interpolation methods for enlarging and shrinking images; they will not contribute to improving image quality. Additionally, replacing random noise with conditional guidance is also a promising path forward.
308 |
309 |
310 | ## Contact the author
311 |
312 | Email:872324454@qq.com
313 |
314 | Bilibili:星河主炮发射
315 |
--------------------------------------------------------------------------------