├── .github └── FUNDING.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── javascript └── vec_cc.js ├── lib_cc ├── __init__.py ├── callback.py ├── colorpicker.py ├── const.py ├── scaling.py ├── settings.py ├── style.py └── xyz.py ├── samples ├── 00.jpg ├── 01.jpg ├── 02.jpg ├── 03.jpg ├── api_example.json ├── method.jpg └── scaling.jpg ├── scripts ├── cc.py ├── cc_hdr.py ├── dot.png └── vectorscope.png └── style.css /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | ko_fi: haoming 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | styles.json 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ### v2.3.2 - 2024 Nov.06 2 | - Linting *(`internal`)* 3 | 4 | ### v2.3.1 - 2024 Nov.04 5 | - Implement **Range** Settings 6 | 7 | ### v2.3.0 - 2024 Sep.20 8 | - Refactor 9 | 10 | ### v2.2.6 - 2024 Sep.18 11 | - Allow disabling `do_not_save_to_config` to use **Defaults** 12 | 13 | ### v2.2.5 - 2024 Aug.30 14 | - Correct Y'CbCr **Conversion**? 15 | 16 | ### v2.2.4 - 2024 Aug.28 17 | - Optimization *(`internal`)* 18 | - Improve Color **Accuracy** ~~Slightly~~ 19 | 20 | ### v2.2.3 - 2024 Aug.27 21 | - Lib *(`internal`)* 22 | 23 | ### v2.2.2 - 2024 Aug.27 24 | - Fix **Color Picker** for Gradio **4** 25 | 26 | ### v2.2.1 - 2024 Aug.02 27 | - `@torch.inference_mode()` 28 | 29 | ### v2.2.0 - 2024 Jul.03 30 | - Add `Adv.` in **Styles Presets** 31 | - Improve **Consts** Logics *(`internal`)* 32 | 33 | ### v2.1.0 - 2024 Jul.03 34 | - Support **Randomize** Forever 35 | 36 | ### v2.0.3 - 2024 Jun.25 37 | - Format 38 | 39 | ### v2.0.2 - 2024 Jun.24 40 | - Bug Fix 41 | 42 | ### v2.0.1 - 2024 Mar.07 43 | - Support **ADetailer** 44 | 45 | ### v2.0.0 - 2024 Mar.05 46 | - Improved Logics 47 | 48 | ### v2.0.epsilon - 2024 Mar.04 49 | - Improved Logics 50 | 51 | ### v2.0.delta - 2024 Mar.04 52 | - Support **SDXL** 53 | 54 | ### v2.0.gamma - 2024 Mar.01 55 | - Major **Rewrite** & **Optimization** 56 | 57 | ### v2.0.beta - 2024 Feb.29 58 | - Revert Sampler **Hook** *(`internal`)* 59 | 60 | ### v2.0.alpha - 2024 Feb.29 61 | - Changed Sampler **Hook** *(`internal`)* 62 | - Removed **LFS** *(`GitHub`)* 63 | 64 | ### v1.5.1 - 2023 Dec.03 65 | - Bug Fix by. **catboxanon** 66 | 67 | ### v1.5.0 - 2023 Nov.08 68 | - Rewrote **Callback** logic *(`internal`)* 69 | 70 | ### v1.4.10 - 2023 Nov.01 71 | - Better **Hires. fix** logic 72 | 73 | ### v1.4.9 - 2023 Nov.01 74 | - Improve Sliders values refresh 75 | 76 | ### v1.4.8 - 2023 Nov.01 77 | - Removed "**magic numbers**" 78 | 79 | ### v1.4.7 - 2023 Nov.01 80 | - Removed **Skip** parameter 81 | 82 | ### v1.4.6 - 2023 Sep.19 83 | - Add **HDR** Script 84 | 85 | ### v1.4.5 - 2023 Sep.13 86 | - Bug Fix for Color Wheel in `img2img` 87 | - Minor Formatting 88 | 89 | ### v1.4.4 - 2023 Sep.13 90 | - Add **Infotext** Support by. **catboxanon** 91 | 92 | ### v1.4.3 - 2023 Sep.13 93 | - Improve **Color Wheel** Functionality by. **catboxanon** 94 | 95 | ### v1.4.2 - 2023 Sep.11 96 | - Fix the Reset and Randomize buttons for the new Contrast algorithm 97 | 98 | ### v1.4.1 - 2023 Sep.11 99 | - New **Contrast** algorithm 100 | 101 | ### v1.4.0 - 2023 Jul.11 102 | - Implement **Scaling** algorithm 103 | 104 | ### v1.3.5 - 2023 Jul.11 105 | - Implement **Color Picker** 106 | 107 | ### v1.3.4 - 2023 Jul.08 108 | - Implement **Metadata** 109 | 110 | ### v1.3.3 - 2023 Jul.07 111 | - Color Wheel now works at (0, 0, 0) 112 | - Style/Randomize/Reset now updates Color Wheel 113 | 114 | ### v1.3.2 - 2023 Jul.07 115 | - Implement **Color Wheel** 116 | - Finally added `changelog` 117 | 118 | ### v1.3.1 - 2023 Jul.06 119 | - Bug Fix 120 | 121 | ### v1.3.0 - 2023 Jul.06 122 | - Implement **Style Presets** 123 | - Separate logics into different scripts 124 | 125 | ### v1.2.1 - 2023 Jul.05 126 | - Add **Randomize** function 127 | - Add **Reset** function 128 | 129 | ### v1.2.0 - 2023 Jul.03 130 | - Implement multiple **Noise** algorithms 131 | 132 | ### v1.1.3 - 2023 Jun.26 133 | - Automatically refresh outdated Sliders values 134 | 135 | ### v1.1.2 - 2023 Jun.23 136 | - Bug Fix 137 | 138 | ### v1.1.1 - 2023 Jun.22 139 | - **Batch** Support 140 | 141 | ### v1.1.0 - 2023 Jun.21 142 | - **X/Y/Z Plot** Support 143 | 144 | ### v1.0.0 - 2023 Jun.20 145 | - Extension **Released**! -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Haoming 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SD Webui Vectorscope CC 2 | This is an Extension for the [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui), which performs a kind of **Offset Noise** natively during inference, allowing you to adjust the brightness, contrast, and color of the generations. 3 | 4 | > Also supports both old & new [Forge](https://github.com/lllyasviel/stable-diffusion-webui-forge) 5 | 6 | ## Example Images 7 | 8 |

9 |
10 | Base Image w/o Extension 11 |

12 | 13 |
14 | Infotext 15 | 16 | - **Checkpoint:** [realisticVisionV51](https://civitai.com/models/4201?modelVersionId=130072) 17 | - **Positive Prompt:** `(high quality, best quality), a 4k cinematic photo of a gentleman in suit, street in a city at night, (depth of field, bokeh)` 18 | - **Negative Prompt:** `(low quality, worst quality:1.2), [EasyNegative, EasyNegativeV2]` 19 | 20 | ```cpp 21 | Steps: 32, Sampler: DPM++ 2M Karras, CFG scale: 7.5, Seed: 3709157017, Size: 512x512, Denoising strength: 0.5 22 | Clip skip: 2, Token merging ratio: 0.2, Token merging ratio hr: 0.2, RNG: CPU, NGMS: 4 23 | Hires upscale: 2, Hires steps: 16, Hires upscaler: 2xNomosUni_esrgan_multijpg 24 | ``` 25 | 26 |
27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 51 | 62 | 74 | 75 | 76 |
VibrantCold"Movie when Mexico"
44 |
    45 |
  • Alt: True
  • 46 |
  • Saturation: 1.75
  • 47 |
  • Noise: Ones
  • 48 |
  • Scaling: 1 - Cos
  • 49 |
50 |
52 |
    53 |
  • Brightness: -5.0
  • 54 |
  • Contrast: 2.5
  • 55 |
  • Saturation: 0.75
  • 56 |
  • R: -3.0
  • 57 |
  • B: 3.0
  • 58 |
  • Noise: Ones
  • 59 |
  • Scaling: 1 - Sin
  • 60 |
61 |
63 |
    64 |
  • Brightness: 2.5
  • 65 |
  • Contrast: -2.5
  • 66 |
  • Saturation: 1.25
  • 67 |
  • R: 1.5
  • 68 |
  • G: 3.0
  • 69 |
  • B: -4.0
  • 70 |
  • Noise: Ones
  • 71 |
  • Scaling: 1 - Sin
  • 72 |
73 |
77 | 78 | ## How to Use 79 | 80 | > **Note:** Since this Extension modifies the underlying latent tensor, the composition may change drastically depending on the parameters 81 | 82 | ### Basic Parameters 83 | 84 | - **Enable:** Enable the Extension 💀 85 | - **Alt:** Cause the Extension effects to be stronger 86 | 87 |
88 | Technical Detail 89 | 90 | - This parameter makes the Extension modify the `denoised` Tensor instead of the `x` Tensor 91 | 92 |
93 | 94 | - **Brightness**, **Contrast**, **Saturation**: Adjust the overall `brightness` / `contrast` / `saturation` of the image 95 | 96 | #### Color Channels 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 |
ChannelLowerHigher
RCyanRed
GMagentaGreen
BYellowBlue
124 | 125 | - The Extension also comes with a Color Wheel for visualization, which you can also click on to pick a color directly 126 | 127 | > The color picker isn't 100% accurate due to multiple layers of conversions... 128 | 129 | #### Style Presets 130 | - To apply a Style, select from the `Dropdown` then click **Apply Style** 131 | - To save a Style, enter a name in the `Textbox` then click **Save Style** 132 | - To delete a Style, enter the name in the `Textbox` then click **Delete Style** 133 | - *Style that was deleted is still in the `styles.json` in case you wish to retrieve it* 134 | - Click **Refresh Style** to update the `Dropdown` if you edited the `styles.json` manually 135 | 136 |
137 | You can also find pre-made Styles by the community available online
138 | 145 |
146 | 147 | ### Advanced Parameters 148 | 149 | - **Process Hires. fix:** Enable this option to process during the **Hires. fix** phase too 150 | - By default, this Extension only functions during the regular phase of the `txt2img` mode 151 | - **Process ADetailer:** Enable this option to process during the **[ADetailer](https://github.com/Bing-su/adetailer)** phase too 152 | - Will usually cause a square of inconsistent colors 153 | - **Randomize using Seed:** Enable this option to use the current generation `seed` to randomize the basic parameters 154 | - Randomized results will be printed in the console 155 | 156 | #### Noise Settings 157 | > let **`x`** denote the latent Tensor ; let **`y`** denote the operations 158 | 159 | - **Straight:** All operations are calculated on the same Tensor 160 | - `x += x * y` 161 | - **Cross:** All operations are calculated on the Tensor opposite of the `Alt.` setting 162 | - `x += x' * y` 163 | - **Ones:** All operations are calculated on a Tensor filled with ones 164 | - `x += 1 * y` 165 | - **N.Random:** All operations are calculated on a Tensor filled with random values in normal distribution 166 | - `x += randn() * y` 167 | - **U.Random:** All operations are calculated on a Tensor filled with random values in uniform distribution 168 | - `x += rand() * y` 169 | - **Multi-Res:** All operations are calculated on a Tensor generated with multi-res noise algorithm 170 | - `x += multires() * y` 171 | - **Abs:** Calculate using the absolute values of the chosen Tensors instead 172 | - `x += abs(F) * y` 173 | 174 |

175 | 176 |

177 | 178 |
179 | Infotext 180 | 181 | - **Checkpoint:** [realisticVisionV51](https://civitai.com/models/4201?modelVersionId=130072) 182 | - **Positive Prompt:** `(high quality, best quality), a 4k photo of a cute dog running in the snow, mountains, day, (depth of field, bokeh)` 183 | - **Negative Prompt:** `(low quality, worst quality:1.2), [EasyNegative, EasyNegativeV2]` 184 | - **Brightness:** `2.5` 185 | - **Contrast:** `2.5` 186 | - **Alt:** `True` 187 | - **Scaling:** `1 - Cos` 188 | 189 | ```cpp 190 | Steps: 24, Sampler: DPM++ 2M Karras, CFG scale: 7.5, Seed: 1257068736, Size: 512x512, Denoising strength: 0.5 191 | Clip skip: 2, Token merging ratio: 0.2, Token merging ratio hr: 0.2, RNG: CPU, NGMS: 4 192 | Hires upscale: 1.5, Hires steps: 16, Hires upscaler: 2xNomosUni_esrgan_multijpg 193 | ``` 194 | 195 |
196 | 197 | #### Scaling Settings 198 | By default, this Extension offsets the noise by the same amount every step. But depending on the `Sampler` and `Scheduler` used, and whether `Alt.` was enabled or not, the effects might be too strong during the early or the later phase of the process, which in turn causes artifacts. 199 | 200 | - **Flat:** Default behavior 201 | - **Cos:** Cosine scaling `(High -> Low)` 202 | - **Sin:** Sine scaling `(Low -> High)` 203 | - **1 - Cos:** `(Low -> High)` 204 | - **1 - Sin:** `(High -> Low)` 205 | 206 |

207 | 208 |

209 | 210 |
211 | Infotext 212 | 213 | - **Checkpoint:** [realisticVisionV51](https://civitai.com/models/4201?modelVersionId=130072) 214 | - **Positive Prompt:** `(high quality, best quality), a 4k photo of a cute cat standing at a flower field in a park, day, (depth of field, bokeh)` 215 | - **Negative Prompt:** `(low quality, worst quality:1.2), [EasyNegative, EasyNegativeV2]` 216 | - **Alt:** `True` 217 | - **Noise:** `Straight Abs.` 218 | 219 | ```cpp 220 | Steps: 24, Sampler: DPM++ 2M Karras, CFG scale: 7.5, Seed: 3515074713, Size: 512x512, Denoising strength: 0.5 221 | Clip skip: 2, Token merging ratio: 0.2, Token merging ratio hr: 0.2, RNG: CPU, NGMS: 4 222 | Hires upscale: 1.5, Hires steps: 12, Hires upscaler: 2xNomosUni_esrgan_multijpg 223 | ``` 224 | 225 |
226 | 227 | ### Buttons 228 | - **Reset:** Reset all `Basic` and `Advanced` parameters to the default values 229 | - **Randomize:** Randomize the `Brightness`, `Contrast`, `Saturation`, `R`, `G`, `B` parameters 230 | 231 | ## Settings 232 | > The following settings are in the **Vectorscope CC** section under the **Stable Diffusion** category of the **Settings** tab 233 | 234 | - Append the parameters to the infotext 235 | - Disable `do_not_save_to_config` to use the Webui **Defaults** functionality 236 | - Set the `minimum` and `maximum` range for each parameter 237 | 238 | ## Roadmap 239 | - [X] Extension Released! 240 | - [X] Add Support for **X/Y/Z Plot** 241 | - [X] Implement different **Noise** functions 242 | - [X] Implement **Randomize** button 243 | - [X] Implement **Style** Presets 244 | - [X] Implement **Color Wheel** & **Color Picker** 245 | - [X] Implement better scaling algorithms 246 | - [X] Add API Docs 247 | - [X] Append Parameters to Infotext 248 | - [X] Improved Infotext Support *(by. [catboxanon](https://github.com/catboxanon))* 249 | - [X] Add **HDR** Script 250 | - [X] Add Support for **SDXL** 251 | - [ ] Implement Gradient features 252 | 253 | ## API 254 | You can use this Extension via [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) by adding an entry to the `alwayson_scripts` of your payload. An [example](samples/api_example.json) is provided. The `args` are sent in the following order in an `array`: 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 |
ParameterType
Enablebool
Alt.bool
Brightnessfloat
Contrastfloat
Saturationfloat
Rfloat
Gfloat
Bfloat
Hires. fixbool
ADetailerbool
Randomizebool
Noise Methodstr
Scalingstr
318 | 319 | ## Known Issues 320 | - In rare occasions, this Extension has little effects when used with certain **LoRA**s 321 | - Works better / worse with certain `Samplers` 322 | 323 | 324 | ## HDR 325 | 326 | > [Discussion Thread](https://github.com/Haoming02/sd-webui-vectorscope-cc/issues/16) 327 | 328 | In the **Script** `Dropdown` at the bottom, there is now a new **`High Dynamic Range`** option: 329 | 330 | - This script will generate multiple images *("Brackets")* of varying brightness, then merge them into 1 HDR image 331 | - **(Recommended)** Use a deterministic sampler and high enough steps. `Euler` *(**not** `Euler a`)* works well in my experience 332 | 333 | #### Options 334 | - **Brackets:** The numer of images to generate 335 | - **Gaps:** The brightness difference between each image 336 | - **Automatically Merge:** When enabled, this will merge the images using an `OpenCV` algorithm and save to the `HDR` folder in the `outputs` folder 337 | - Disable this if you want to merge them yourself using better external program 338 | 339 |
340 | 341 |
342 | Offset Noise TL;DR 343 | 344 | The most common *version* of **Offset Noise** you may have heard of is from this [blog post](https://www.crosslabs.org/blog/diffusion-with-offset-noise), where it was discovered that the noise functions used during **training** were flawed, causing `Stable Diffusion` to always generate images with an average of `0.5` *(**ie.** grey)*. 345 | 346 | > **ie.** Even if you prompt for dark/night or bright/snow, the average of the image is still "grey" 347 | 348 | > [Technical Explanations](https://youtu.be/cVxQmbf3q7Q) 349 | 350 | However, this Extension instead tries to offset the latent noise during the **inference** phase. Therefore, you do not need to use models that were specially trained, as this can work on any model. 351 |
352 | 353 |
354 | How does this work? 355 | 356 | After reading through and messing around with the code, I found out that it is possible to directly modify the Tensors representing the latent noise used by the Stable Diffusion process. 357 | 358 | The dimensions of the Tensors is `(X, 4, H / 8, W / 8)`, which represents **X** batch of noise images, with **4** channels, each with **(W / 8) x (H / 8)** values 359 | 360 | > **eg.** Generating a single 512x768 image will create a Tensor of size (1, 4, 96, 64) 361 | 362 | Then, I tried to play around with the values of each channel and ended up discovering these relationships. Essentially, the 4 channels correspond to the **CMYK** color format for `SD1` *(**Y'CbCr** for `SDXL`)*, hence why you can control the brightness as well as the colors. 363 | 364 |
365 | 366 |
367 | 368 | #### Vectorscope? 369 | The Extension is named this way because the color interactions remind me of the `Vectorscope` found in **Premiere Pro**'s **Lumetri Color**. Those who are experienced in Color Correction should be rather familiar with this Extension. 370 | 371 |

372 | 373 | ~~Yes. I'm aware that it's just how digital colors work in general.~~ 374 | 375 | ~~We've come full **circle** *(\*ba dum tss)* now that a Color Wheel is actually added.~~ 376 | -------------------------------------------------------------------------------- /javascript/vec_cc.js: -------------------------------------------------------------------------------- 1 | class VectorscopeCC { 2 | 3 | static dot = { 'txt': undefined, 'img': undefined }; 4 | 5 | /** 6 | * @param {number} r @param {number} g @param {number} b 7 | * @param {string} mode "txt" | "img" 8 | */ 9 | static updateCursor(r, g, b, mode) { 10 | const mag = Math.abs(r) + Math.abs(g) + Math.abs(b); 11 | let condX, condY; 12 | 13 | if (mag < Number.EPSILON) { 14 | condX = 0.0; 15 | condY = 0.0; 16 | } else { 17 | condX = 25 * Math.sqrt(r * r + g * g + b * b) * (r * -0.5 + g * -0.5 + b * 1.0) / mag; 18 | condY = 25 * Math.sqrt(r * r + g * g + b * b) * (r * -0.866 + g * 0.866 + b * 0.0) / mag; 19 | } 20 | 21 | this.dot[mode].style.left = `calc(50% + ${condX - 12}px)`; 22 | this.dot[mode].style.top = `calc(50% + ${condY - 12}px)`; 23 | } 24 | 25 | /** 26 | * @param {HTMLImageElement} wheel 27 | * @param {HTMLInputElement[]} sliders 28 | * @param {HTMLImageElement} dot 29 | */ 30 | static registerPicker(wheel, sliders, dot) { 31 | ['mousemove', 'click'].forEach((event) => { 32 | wheel.addEventListener(event, (e) => { 33 | e.preventDefault(); 34 | if (e.type === 'mousemove' && e.buttons != 1) 35 | return; 36 | 37 | const rect = e.target.getBoundingClientRect(); 38 | const p_rect = e.target.parentNode.getBoundingClientRect(); 39 | 40 | const shift = (p_rect.width - rect.width) / 2.0; 41 | dot.style.left = `calc(${e.clientX - rect.left}px - 12px + ${shift}px)`; 42 | dot.style.top = `calc(${e.clientY - rect.top}px - 12px)`; 43 | 44 | const x = ((e.clientX - rect.left) - 100.0) / 25; 45 | const y = ((e.clientY - rect.top) - 100.0) / 25; 46 | 47 | let r = -0.077 * (4.33 * x + 7.5 * y); 48 | let g = y / 0.866 + r; 49 | let b = x + 0.5 * r + 0.5 * g; 50 | 51 | const mag = Math.sqrt(r * r + g * g + b * b); 52 | const len = Math.abs(r) + Math.abs(g) + Math.abs(b); 53 | 54 | r = (r / mag * len).toFixed(2); 55 | g = (g / mag * len).toFixed(2); 56 | b = (b / mag * len).toFixed(2); 57 | 58 | sliders[0][0].value = r; 59 | sliders[0][1].value = r; 60 | sliders[1][0].value = g; 61 | sliders[1][1].value = g; 62 | sliders[2][0].value = b; 63 | sliders[2][1].value = b; 64 | }); 65 | }); 66 | 67 | ['mouseleave', 'mouseup'].forEach((event) => { 68 | wheel.addEventListener(event, () => { 69 | updateInput(sliders[0][0]); 70 | updateInput(sliders[1][0]); 71 | updateInput(sliders[2][0]); 72 | }); 73 | }); 74 | } 75 | 76 | } 77 | 78 | onUiLoaded(() => { 79 | 80 | ['txt', 'img'].forEach((mode) => { 81 | const container = document.getElementById(`cc-colorwheel-${mode}`); 82 | container.style.height = '200px'; 83 | container.style.width = '200px'; 84 | 85 | const wheel = container.querySelector('img'); 86 | container.insertBefore(wheel, container.firstChild); 87 | 88 | while (container.firstChild !== container.lastChild) 89 | container.lastChild.remove(); 90 | 91 | wheel.ondragstart = (e) => { e.preventDefault(); return false; }; 92 | wheel.id = `cc-img-${mode}`; 93 | 94 | const sliders = [ 95 | document.getElementById(`cc-r-${mode}`).querySelectorAll('input'), 96 | document.getElementById(`cc-g-${mode}`).querySelectorAll('input'), 97 | document.getElementById(`cc-b-${mode}`).querySelectorAll('input'), 98 | ]; 99 | 100 | const temp = document.getElementById(`cc-temp-${mode}`); 101 | 102 | const dot = temp.querySelector('img'); 103 | dot.style.left = 'calc(50% - 12px)'; 104 | dot.style.top = 'calc(50% - 12px)'; 105 | dot.id = `cc-dot-${mode}`; 106 | 107 | container.appendChild(dot); 108 | temp.remove(); 109 | 110 | VectorscopeCC.dot[mode] = dot; 111 | VectorscopeCC.registerPicker(wheel, sliders, dot); 112 | }); 113 | 114 | const config = document.getElementById("setting_cc_no_defaults").querySelector('input[type=checkbox]'); 115 | if (config.checked) 116 | return; 117 | 118 | setTimeout(() => { 119 | ['txt', 'img'].forEach((mode) => { 120 | const r = document.getElementById(`cc-r-${mode}`).querySelector("input").value; 121 | const g = document.getElementById(`cc-g-${mode}`).querySelector("input").value; 122 | const b = document.getElementById(`cc-b-${mode}`).querySelector("input").value; 123 | 124 | VectorscopeCC.updateCursor(r, g, b, mode); 125 | }); 126 | }, 100); 127 | 128 | }); 129 | -------------------------------------------------------------------------------- /lib_cc/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Haoming02 3 | License: MIT 4 | """ 5 | -------------------------------------------------------------------------------- /lib_cc/callback.py: -------------------------------------------------------------------------------- 1 | from modules.sd_samplers_kdiffusion import KDiffusionSampler 2 | from modules.script_callbacks import on_script_unloaded, on_ui_settings 3 | from functools import wraps 4 | from random import random 5 | import torch 6 | 7 | from .scaling import apply_scaling 8 | from .settings import settings 9 | 10 | 11 | class NoiseMethods: 12 | 13 | @staticmethod 14 | def get_delta(latent: torch.Tensor) -> torch.Tensor: 15 | mean = torch.mean(latent) 16 | return torch.sub(latent, mean) 17 | 18 | @staticmethod 19 | def to_abs(latent: torch.Tensor) -> torch.Tensor: 20 | return torch.abs(latent) 21 | 22 | @staticmethod 23 | def zeros(latent: torch.Tensor) -> torch.Tensor: 24 | return torch.zeros_like(latent) 25 | 26 | @staticmethod 27 | def ones(latent: torch.Tensor) -> torch.Tensor: 28 | return torch.ones_like(latent) 29 | 30 | @staticmethod 31 | def gaussian_noise(latent: torch.Tensor) -> torch.Tensor: 32 | return torch.rand_like(latent) 33 | 34 | @staticmethod 35 | def normal_noise(latent: torch.Tensor) -> torch.Tensor: 36 | return torch.randn_like(latent) 37 | 38 | @staticmethod 39 | @torch.inference_mode() 40 | def multires_noise( 41 | latent: torch.Tensor, 42 | use_zero: bool, 43 | iterations: int = 10, 44 | discount: float = 0.8, 45 | ): 46 | """ 47 | Credit: Kohya_SS 48 | https://github.com/kohya-ss/sd-scripts/blob/v0.8.5/library/custom_train_functions.py#L448 49 | """ 50 | 51 | noise = NoiseMethods.zeros(latent) if use_zero else NoiseMethods.ones(latent) 52 | device = latent.device 53 | 54 | b, c, w, h = noise.shape 55 | upsampler = torch.nn.Upsample(size=(w, h), mode="bilinear").to(device) 56 | 57 | for i in range(iterations): 58 | r = random() * 2 + 2 59 | 60 | wn = max(1, int(w / (r**i))) 61 | hn = max(1, int(h / (r**i))) 62 | 63 | noise += upsampler(torch.randn(b, c, wn, hn).to(device)) * discount**i 64 | 65 | if wn == 1 or hn == 1: 66 | break 67 | 68 | return noise / noise.std() 69 | 70 | 71 | def RGB2CbCr(r: float, g: float, b: float) -> tuple[float, float]: 72 | """Convert RGB channels into YCbCr for SDXL""" 73 | cb = -0.17 * r - 0.33 * g + 0.5 * b 74 | cr = 0.5 * r - 0.42 * g - 0.08 * b 75 | 76 | return cb, cr 77 | 78 | 79 | original_callback = KDiffusionSampler.callback_state 80 | 81 | 82 | @torch.no_grad() 83 | @torch.inference_mode() 84 | @wraps(original_callback) 85 | def cc_callback(self, d): 86 | if not self.vec_cc["enable"]: 87 | return original_callback(self, d) 88 | 89 | if getattr(self.p, "is_hr_pass", False) and not self.vec_cc["doHR"]: 90 | return original_callback(self, d) 91 | 92 | if getattr(self.p, "_ad_inner", False) and not self.vec_cc["doAD"]: 93 | return original_callback(self, d) 94 | 95 | is_xl: bool = self.p.sd_model.is_sdxl 96 | 97 | mode = str(self.vec_cc["mode"]) 98 | method = str(self.vec_cc["method"]) 99 | source: torch.Tensor = d[mode] 100 | target = None 101 | 102 | if "Straight" in method: 103 | target = d[mode].detach().clone() 104 | elif "Cross" in method: 105 | target = d["x" if mode == "denoised" else "denoised"].detach().clone() 106 | elif "Multi-Res" in method: 107 | target = NoiseMethods.multires_noise(d[mode], "Abs" in method) 108 | elif method == "Ones": 109 | target = NoiseMethods.ones(d[mode]) 110 | elif method == "N.Random": 111 | target = NoiseMethods.normal_noise(d[mode]) 112 | elif method == "U.Random": 113 | target = NoiseMethods.gaussian_noise(d[mode]) 114 | else: 115 | raise ValueError 116 | 117 | if "Abs" in method: 118 | target = NoiseMethods.to_abs(target) 119 | 120 | batchSize = int(d[mode].size(0)) 121 | 122 | bri, con, sat, r, g, b = apply_scaling( 123 | self.vec_cc["scaling"], 124 | d["i"], 125 | self.vec_cc["step"], 126 | self.vec_cc["bri"], 127 | self.vec_cc["con"], 128 | self.vec_cc["sat"], 129 | self.vec_cc["r"], 130 | self.vec_cc["g"], 131 | self.vec_cc["b"], 132 | ) 133 | 134 | if not is_xl: 135 | for i in range(batchSize): 136 | # Brightness 137 | source[i][0] += target[i][0] * bri 138 | # Contrast 139 | source[i][0] += NoiseMethods.get_delta(source[i][0]) * con 140 | 141 | # R 142 | source[i][2] -= target[i][2] * r 143 | # G 144 | source[i][1] += target[i][1] * g 145 | # B 146 | source[i][3] -= target[i][3] * b 147 | 148 | # Saturation 149 | source[i][2] *= sat 150 | source[i][1] *= sat 151 | source[i][3] *= sat 152 | 153 | else: 154 | cb, cr = RGB2CbCr(r, g, b) 155 | 156 | for i in range(batchSize): 157 | # Brightness 158 | source[i][0] += target[i][0] * bri 159 | # Contrast 160 | source[i][0] += NoiseMethods.get_delta(source[i][0]) * con 161 | 162 | # CbCr 163 | source[i][1] -= target[i][1] * cr 164 | source[i][2] -= target[i][2] * cb 165 | 166 | # Saturation 167 | source[i][1] *= sat 168 | source[i][2] *= sat 169 | 170 | return original_callback(self, d) 171 | 172 | 173 | def restore_callback(): 174 | KDiffusionSampler.callback_state = original_callback 175 | 176 | 177 | def hook_callbacks(): 178 | KDiffusionSampler.callback_state = cc_callback 179 | on_script_unloaded(restore_callback) 180 | on_ui_settings(settings) 181 | -------------------------------------------------------------------------------- /lib_cc/colorpicker.py: -------------------------------------------------------------------------------- 1 | from modules import scripts 2 | import gradio as gr 3 | import os 4 | 5 | WHEEL = os.path.join(scripts.basedir(), "scripts", "vectorscope.png") 6 | DOT = os.path.join(scripts.basedir(), "scripts", "dot.png") 7 | 8 | 9 | def create_colorpicker(is_img: bool): 10 | m: str = "img" if is_img else "txt" 11 | 12 | whl = gr.Image( 13 | value=WHEEL, 14 | interactive=False, 15 | container=False, 16 | elem_id=f"cc-colorwheel-{m}", 17 | ) 18 | 19 | dot = gr.Image( 20 | value=DOT, 21 | interactive=False, 22 | container=False, 23 | elem_id=f"cc-temp-{m}", 24 | ) 25 | 26 | whl.do_not_save_to_config = True 27 | dot.do_not_save_to_config = True 28 | -------------------------------------------------------------------------------- /lib_cc/const.py: -------------------------------------------------------------------------------- 1 | from modules.shared import opts 2 | import random 3 | 4 | 5 | class Param: 6 | 7 | def __init__(self, minimum: float, maximum: float, default: float): 8 | self.minimum = minimum 9 | self.maximum = maximum 10 | self.default = default 11 | 12 | def rand(self) -> float: 13 | return round(random.uniform(self.minimum, self.maximum), 2) 14 | 15 | 16 | Brightness: Param 17 | Contrast: Param 18 | Saturation: Param 19 | Color: Param 20 | 21 | 22 | def init(): 23 | global Brightness 24 | Brightness = Param( 25 | getattr(opts, "cc_brightness_min", -5.0), 26 | getattr(opts, "cc_brightness_max", 5.0), 27 | 0.0, 28 | ) 29 | 30 | global Contrast 31 | Contrast = Param( 32 | getattr(opts, "cc_contrast_min", -5.0), 33 | getattr(opts, "cc_contrast_max", 5.0), 34 | 0.0, 35 | ) 36 | 37 | global Saturation 38 | Saturation = Param( 39 | getattr(opts, "cc_saturation_min", 0.25), 40 | getattr(opts, "cc_saturation_max", 1.75), 41 | 1.0, 42 | ) 43 | 44 | global Color 45 | Color = Param( 46 | getattr(opts, "cc_color_min", -4.0), 47 | getattr(opts, "cc_color_max", 4.0), 48 | 0.0, 49 | ) 50 | -------------------------------------------------------------------------------- /lib_cc/scaling.py: -------------------------------------------------------------------------------- 1 | from math import cos, sin, pi 2 | 3 | 4 | def apply_scaling( 5 | alg: str, 6 | current_step: int, 7 | total_steps: int, 8 | bri: float, 9 | con: float, 10 | sat: float, 11 | r: float, 12 | g: float, 13 | b: float, 14 | ) -> list: 15 | 16 | mod = 1.0 17 | 18 | if alg != "Flat": 19 | ratio = float(current_step / total_steps) 20 | rad = ratio * pi / 2 21 | 22 | match alg: 23 | case "Cos": 24 | mod = cos(rad) 25 | case "Sin": 26 | mod = sin(rad) 27 | case "1 - Cos": 28 | mod = 1 - cos(rad) 29 | case "1 - Sin": 30 | mod = 1 - sin(rad) 31 | 32 | return [bri * mod, con * mod, (sat - 1.0) * mod + 1.0, r * mod, g * mod, b * mod] 33 | -------------------------------------------------------------------------------- /lib_cc/settings.py: -------------------------------------------------------------------------------- 1 | from modules.shared import OptionInfo, opts 2 | from modules import scripts 3 | from json import load, dump 4 | from gradio import Slider 5 | import os 6 | 7 | section = ("cc", "Vectorscope CC") 8 | 9 | 10 | def settings(): 11 | opts.add_option( 12 | "cc_metadata", 13 | OptionInfo( 14 | True, 15 | "Append Vectorscope CC parameters to generation infotext", 16 | section=section, 17 | category_id="sd", 18 | ), 19 | ) 20 | 21 | opts.add_option( 22 | "cc_no_defaults", 23 | OptionInfo( 24 | True, 25 | 'Add the "do_not_save_to_config" flag to all components', 26 | section=section, 27 | category_id="sd", 28 | onchange=reset_ui_config, 29 | ) 30 | .info("uncheck this option if you wish to use the built-in Defaults function") 31 | .info("enable again if the extension is not working correctly after an update") 32 | .needs_reload_ui(), 33 | ) 34 | 35 | for lbl, minVal, maxVal in [ 36 | ("Brightness", (-5.0, 0.0), (0.0, 5.0)), 37 | ("Contrast", (-5.0, 0.0), (0.0, 5.0)), 38 | ("Saturation", (0.25, 1.0), (1.0, 1.75)), 39 | ("Color", (-4.0, 0.0), (0.0, 4.0)), 40 | ]: 41 | 42 | opts.add_option( 43 | f"cc_{lbl.lower()}_min", 44 | OptionInfo( 45 | minVal[0], 46 | f"{lbl} - Min", 47 | Slider, 48 | {"step": 0.05, "minimum": minVal[0], "maximum": minVal[1]}, 49 | section=section, 50 | category_id="sd", 51 | ).needs_reload_ui(), 52 | ) 53 | 54 | opts.add_option( 55 | f"cc_{lbl.lower()}_max", 56 | OptionInfo( 57 | maxVal[1], 58 | f"{lbl} - Max", 59 | Slider, 60 | {"step": 0.05, "minimum": maxVal[0], "maximum": maxVal[1]}, 61 | section=section, 62 | category_id="sd", 63 | ).needs_reload_ui(), 64 | ) 65 | 66 | 67 | def reset_ui_config(): 68 | extension = "cc.py" 69 | ui_config = os.path.join(scripts.basedir(), "ui-config.json") 70 | 71 | with open(ui_config, "r", encoding="utf-8") as json_file: 72 | configs = load(json_file) 73 | 74 | cleaned_configs = { 75 | key: value for key, value in configs.items() if extension not in key 76 | } 77 | 78 | with open(ui_config, "w", encoding="utf-8") as json_file: 79 | dump(cleaned_configs, json_file) 80 | -------------------------------------------------------------------------------- /lib_cc/style.py: -------------------------------------------------------------------------------- 1 | from modules import scripts 2 | import gradio as gr 3 | import json 4 | import os 5 | 6 | 7 | STYLE_FILE = os.path.join(scripts.basedir(), "styles.json") 8 | EMPTY_STYLE = {"styles": {}, "deleted": {}} 9 | 10 | 11 | class StyleManager: 12 | 13 | def __init__(self): 14 | self.STYLE_SHEET: dict = {} 15 | 16 | def load_styles(self): 17 | if os.path.isfile(STYLE_FILE): 18 | with open(STYLE_FILE, "r", encoding="utf-8") as json_file: 19 | self.STYLE_SHEET = json.load(json_file) 20 | print("[Vec. CC] Style Sheet Loaded...") 21 | 22 | else: 23 | with open(STYLE_FILE, "w+", encoding="utf-8") as json_file: 24 | self.STYLE_SHEET = EMPTY_STYLE 25 | json.dump(self.STYLE_SHEET, json_file) 26 | print("[Vec. CC] Creating Empty Style Sheet...") 27 | 28 | return self.list_style() 29 | 30 | def list_style(self) -> list[str]: 31 | return list(self.STYLE_SHEET["styles"].keys()) 32 | 33 | def get_style(self, style_name: str) -> tuple[bool | str | float]: 34 | style: dict = self.STYLE_SHEET["styles"].get(style_name, None) 35 | 36 | if not style: 37 | print(f'\n[Error] No Style of name "{style_name}" was found!\n') 38 | return [gr.update()] * 12 39 | 40 | return ( 41 | style.get("alt", gr.update()), 42 | style.get("brightness", gr.update()), 43 | style.get("contrast", gr.update()), 44 | style.get("saturation", gr.update()), 45 | *style.get("rgb", (gr.update(), gr.update(), gr.update())), 46 | style.get("hr", gr.update()), 47 | style.get("ad", gr.update()), 48 | style.get("rn", gr.update()), 49 | style.get("noise", gr.update()), 50 | style.get("scaling", gr.update()), 51 | ) 52 | 53 | def save_style( 54 | self, 55 | style_name: str, 56 | latent: bool, 57 | bri: float, 58 | con: float, 59 | sat: float, 60 | r: float, 61 | g: float, 62 | b: float, 63 | hr: bool, 64 | ad: bool, 65 | rn: bool, 66 | noise: str, 67 | scaling: str, 68 | ): 69 | if style_name in self.STYLE_SHEET["styles"]: 70 | print(f'\n[Error] Duplicated Style Name: "{style_name}" Detected!') 71 | print("Values were not saved!\n") 72 | return self.list_style() 73 | 74 | new_style = { 75 | "alt": latent, 76 | "brightness": bri, 77 | "contrast": con, 78 | "saturation": sat, 79 | "rgb": [r, g, b], 80 | "hr": hr, 81 | "ad": ad, 82 | "rn": rn, 83 | "noise": noise, 84 | "scaling": scaling, 85 | } 86 | 87 | self.STYLE_SHEET["styles"].update({style_name: new_style}) 88 | 89 | with open(STYLE_FILE, "w+") as json_file: 90 | json.dump(self.STYLE_SHEET, json_file) 91 | 92 | print(f'\nStyle of Name "{style_name}" Saved!\n') 93 | return self.list_style() 94 | 95 | def delete_style(self, style_name: str): 96 | if style_name not in self.STYLE_SHEET["styles"]: 97 | print(f'\n[Error] No Style of name "{style_name}" was found!\n') 98 | return self.list_style() 99 | 100 | style: dict = self.STYLE_SHEET["styles"].get(style_name) 101 | self.STYLE_SHEET["deleted"].update({style_name: style}) 102 | del self.STYLE_SHEET["styles"][style_name] 103 | 104 | with open(STYLE_FILE, "w+") as json_file: 105 | json.dump(self.STYLE_SHEET, json_file) 106 | 107 | print(f'\nStyle of name "{style_name}" was deleted!\n') 108 | return self.list_style() 109 | -------------------------------------------------------------------------------- /lib_cc/xyz.py: -------------------------------------------------------------------------------- 1 | from modules import scripts 2 | 3 | 4 | def _grid_reference(): 5 | for data in scripts.scripts_data: 6 | if data.script_class.__module__ in ( 7 | "scripts.xyz_grid", 8 | "xyz_grid.py", 9 | ) and hasattr(data, "module"): 10 | return data.module 11 | 12 | raise SystemError("Could not find X/Y/Z Plot...") 13 | 14 | 15 | def xyz_support(cache: dict): 16 | 17 | def apply_field(field): 18 | def _(p, x, xs): 19 | cache.update({field: x}) 20 | 21 | return _ 22 | 23 | def choices_bool(): 24 | return ["False", "True"] 25 | 26 | def choices_method(): 27 | return [ 28 | "Disabled", 29 | "Straight", 30 | "Straight Abs.", 31 | "Cross", 32 | "Cross Abs.", 33 | "Ones", 34 | "N.Random", 35 | "U.Random", 36 | "Multi-Res", 37 | "Multi-Res Abs.", 38 | ] 39 | 40 | def choices_scaling(): 41 | return ["Flat", "Cos", "Sin", "1 - Cos", "1 - Sin"] 42 | 43 | xyz_grid = _grid_reference() 44 | 45 | extra_axis_options = [ 46 | xyz_grid.AxisOption( 47 | "[Vec.CC] Enable", str, apply_field("Enable"), choices=choices_bool 48 | ), 49 | xyz_grid.AxisOption( 50 | "[Vec.CC] Alt.", str, apply_field("Alt"), choices=choices_bool 51 | ), 52 | xyz_grid.AxisOption("[Vec.CC] Brightness", float, apply_field("Brightness")), 53 | xyz_grid.AxisOption("[Vec.CC] Contrast", float, apply_field("Contrast")), 54 | xyz_grid.AxisOption("[Vec.CC] Saturation", float, apply_field("Saturation")), 55 | xyz_grid.AxisOption("[Vec.CC] R", float, apply_field("R")), 56 | xyz_grid.AxisOption("[Vec.CC] G", float, apply_field("G")), 57 | xyz_grid.AxisOption("[Vec.CC] B", float, apply_field("B")), 58 | xyz_grid.AxisOption( 59 | "[Adv.CC] Proc. H.Fix", str, apply_field("DoHR"), choices=choices_bool 60 | ), 61 | xyz_grid.AxisOption( 62 | "[Adv.CC] Method", str, apply_field("Method"), choices=choices_method 63 | ), 64 | xyz_grid.AxisOption( 65 | "[Adv.CC] Scaling", str, apply_field("Scaling"), choices=choices_scaling 66 | ), 67 | xyz_grid.AxisOption("[Adv.CC] Randomize", int, apply_field("Random")), 68 | ] 69 | 70 | xyz_grid.axis_options.extend(extra_axis_options) 71 | -------------------------------------------------------------------------------- /samples/00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/00.jpg -------------------------------------------------------------------------------- /samples/01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/01.jpg -------------------------------------------------------------------------------- /samples/02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/02.jpg -------------------------------------------------------------------------------- /samples/03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/03.jpg -------------------------------------------------------------------------------- /samples/api_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt": "a photo of a dog", 3 | "negative_prompt": "(low quality, worst quality)", 4 | "sampler_name": "Euler a", 5 | "sampler_index": "euler", 6 | "steps": 24, 7 | "cfg_scale": 6.0, 8 | "batch_size": 1, 9 | "seed": -1, 10 | "width": 512, 11 | "height": 512, 12 | "alwayson_scripts": { 13 | "vectorscope cc": { 14 | "args": [ 15 | true, 16 | true, 17 | -2.5, 18 | 1.5, 19 | 0.85, 20 | 0.0, 21 | 0.0, 22 | 1.0, 23 | false, 24 | false, 25 | false, 26 | "Straight Abs.", 27 | "Flat" 28 | ] 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /samples/method.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/method.jpg -------------------------------------------------------------------------------- /samples/scaling.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/samples/scaling.jpg -------------------------------------------------------------------------------- /scripts/cc.py: -------------------------------------------------------------------------------- 1 | from modules.sd_samplers_kdiffusion import KDiffusionSampler 2 | from modules.shared import opts 3 | from modules import scripts 4 | 5 | from lib_cc.colorpicker import create_colorpicker 6 | from lib_cc.callback import hook_callbacks 7 | from lib_cc.style import StyleManager 8 | from lib_cc.xyz import xyz_support 9 | from lib_cc import const 10 | 11 | from random import seed 12 | import gradio as gr 13 | 14 | 15 | VERSION = "2.3.2" 16 | 17 | 18 | style_manager = StyleManager() 19 | style_manager.load_styles() 20 | hook_callbacks() 21 | const.init() 22 | 23 | 24 | class VectorscopeCC(scripts.Script): 25 | 26 | def __init__(self): 27 | self.xyzCache = {} 28 | xyz_support(self.xyzCache) 29 | 30 | def title(self): 31 | return "Vectorscope CC" 32 | 33 | def show(self, is_img2img): 34 | return scripts.AlwaysVisible 35 | 36 | def ui(self, is_img2img): 37 | mode: str = "img" if is_img2img else "txt" 38 | m: str = f'"{mode}"' 39 | 40 | with gr.Accordion( 41 | f"Vectorscope CC v{VERSION}", elem_id=f"vec-cc-{mode}", open=False 42 | ): 43 | 44 | with gr.Row(): 45 | enable = gr.Checkbox(label="Enable") 46 | latent = gr.Checkbox(label="Alt. (Stronger Effects)") 47 | 48 | with gr.Row(): 49 | bri = gr.Slider( 50 | label="Brightness", 51 | value=const.Brightness.default, 52 | minimum=const.Brightness.minimum, 53 | maximum=const.Brightness.maximum, 54 | step=0.05, 55 | ) 56 | con = gr.Slider( 57 | label="Contrast", 58 | value=const.Contrast.default, 59 | minimum=const.Contrast.minimum, 60 | maximum=const.Contrast.maximum, 61 | step=0.05, 62 | ) 63 | sat = gr.Slider( 64 | label="Saturation", 65 | value=const.Saturation.default, 66 | minimum=const.Saturation.minimum, 67 | maximum=const.Saturation.maximum, 68 | step=0.05, 69 | ) 70 | 71 | with gr.Row(): 72 | with gr.Column(): 73 | r = gr.Slider( 74 | label="R", 75 | info="Cyan | Red", 76 | value=const.Color.default, 77 | minimum=const.Color.minimum, 78 | maximum=const.Color.maximum, 79 | step=0.05, 80 | elem_id=f"cc-r-{mode}", 81 | ) 82 | g = gr.Slider( 83 | label="G", 84 | info="Magenta | Green", 85 | value=const.Color.default, 86 | minimum=const.Color.minimum, 87 | maximum=const.Color.maximum, 88 | step=0.05, 89 | elem_id=f"cc-g-{mode}", 90 | ) 91 | b = gr.Slider( 92 | label="B", 93 | info="Yellow | Blue", 94 | value=const.Color.default, 95 | minimum=const.Color.minimum, 96 | maximum=const.Color.maximum, 97 | step=0.05, 98 | elem_id=f"cc-b-{mode}", 99 | ) 100 | 101 | for c in (r, g, b): 102 | c.input( 103 | None, 104 | inputs=[r, g, b], 105 | _js=f"(r, g, b) => {{ VectorscopeCC.updateCursor(r, g, b, {m}); }}", 106 | ) 107 | 108 | create_colorpicker(is_img2img) 109 | 110 | with gr.Accordion("Styles", open=False): 111 | 112 | with gr.Row(elem_classes="style-rows"): 113 | style_choice = gr.Dropdown( 114 | label="CC Styles", choices=style_manager.list_style(), scale=3 115 | ) 116 | apply_btn = gr.Button( 117 | value="Apply Style", elem_id=f"cc-apply-{mode}", scale=2 118 | ) 119 | refresh_btn = gr.Button(value="Refresh Style", scale=2) 120 | 121 | with gr.Row(elem_classes="style-rows"): 122 | style_name = gr.Textbox( 123 | label="Style Name", lines=1, max_lines=1, scale=3 124 | ) 125 | save_btn = gr.Button( 126 | value="Save Style", elem_id=f"cc-save-{mode}", scale=2 127 | ) 128 | delete_btn = gr.Button(value="Delete Style", scale=2) 129 | 130 | if getattr(opts, "cc_no_defaults", True): 131 | style_choice.do_not_save_to_config = True 132 | 133 | [ 134 | setattr(comp, "do_not_save_to_config", True) 135 | for comp in ( 136 | apply_btn, 137 | refresh_btn, 138 | style_name, 139 | save_btn, 140 | delete_btn, 141 | ) 142 | ] 143 | 144 | with gr.Accordion("Advanced Settings", open=False): 145 | with gr.Row(): 146 | doHR = gr.Checkbox( 147 | label="Process Hires. fix", 148 | visible=(not is_img2img), 149 | ) 150 | doAD = gr.Checkbox(label="Process Adetailer") 151 | doRN = gr.Checkbox(label="Randomize using Seed") 152 | 153 | method = gr.Radio( 154 | choices=( 155 | "Straight", 156 | "Straight Abs.", 157 | "Cross", 158 | "Cross Abs.", 159 | "Ones", 160 | "N.Random", 161 | "U.Random", 162 | "Multi-Res", 163 | "Multi-Res Abs.", 164 | ), 165 | label="Noise Settings", 166 | value="Straight Abs.", 167 | ) 168 | 169 | scaling = gr.Radio( 170 | choices=("Flat", "Cos", "Sin", "1 - Cos", "1 - Sin"), 171 | label="Scaling Settings", 172 | value="Flat", 173 | ) 174 | 175 | comps: tuple[gr.components.Component] = ( 176 | latent, 177 | bri, 178 | con, 179 | sat, 180 | r, 181 | g, 182 | b, 183 | doHR, 184 | doAD, 185 | doRN, 186 | method, 187 | scaling, 188 | ) 189 | 190 | apply_btn.click( 191 | fn=style_manager.get_style, 192 | inputs=[style_choice], 193 | outputs=[*comps], 194 | ).then( 195 | None, 196 | inputs=[r, g, b], 197 | _js=f"(r, g, b) => {{ VectorscopeCC.updateCursor(r, g, b, {m}); }}", 198 | ) 199 | 200 | save_btn.click( 201 | fn=lambda *args: gr.update(choices=style_manager.save_style(*args)), 202 | inputs=[style_name, *comps], 203 | outputs=[style_choice], 204 | ) 205 | 206 | delete_btn.click( 207 | fn=lambda name: gr.update(choices=style_manager.delete_style(name)), 208 | inputs=[style_name], 209 | outputs=[style_choice], 210 | ) 211 | 212 | refresh_btn.click( 213 | fn=lambda: gr.update(choices=style_manager.load_styles()), 214 | outputs=[style_choice], 215 | ) 216 | 217 | with gr.Row(): 218 | reset_btn = gr.Button(value="Reset") 219 | random_btn = gr.Button(value="Randomize") 220 | 221 | def on_reset(): 222 | return [ 223 | gr.update(value=False), 224 | gr.update(value=const.Brightness.default), 225 | gr.update(value=const.Contrast.default), 226 | gr.update(value=const.Saturation.default), 227 | gr.update(value=const.Color.default), 228 | gr.update(value=const.Color.default), 229 | gr.update(value=const.Color.default), 230 | gr.update(value=False), 231 | gr.update(value=False), 232 | gr.update(value=False), 233 | gr.update(value="Straight Abs."), 234 | gr.update(value="Flat"), 235 | ] 236 | 237 | def on_random(): 238 | return [ 239 | gr.update(value=const.Brightness.rand()), 240 | gr.update(value=const.Contrast.rand()), 241 | gr.update(value=const.Saturation.rand()), 242 | gr.update(value=const.Color.rand()), 243 | gr.update(value=const.Color.rand()), 244 | gr.update(value=const.Color.rand()), 245 | ] 246 | 247 | reset_btn.click( 248 | fn=on_reset, 249 | outputs=[*comps], 250 | show_progress="hidden", 251 | ).then( 252 | fn=None, 253 | inputs=[r, g, b], 254 | _js=f"(r, g, b) => {{ VectorscopeCC.updateCursor(r, g, b, {m}); }}", 255 | ) 256 | 257 | random_btn.click( 258 | fn=on_random, 259 | outputs=[bri, con, sat, r, g, b], 260 | show_progress="hidden", 261 | ).then( 262 | fn=None, 263 | inputs=[r, g, b], 264 | _js=f"(r, g, b) => {{ VectorscopeCC.updateCursor(r, g, b, {m}); }}", 265 | ) 266 | 267 | self.paste_field_names = [] 268 | self.infotext_fields = [ 269 | (enable, "Vec CC Enabled"), 270 | (latent, "Vec CC Alt"), 271 | (bri, "Vec CC Brightness"), 272 | (con, "Vec CC Contrast"), 273 | (sat, "Vec CC Saturation"), 274 | (r, "Vec CC R"), 275 | (g, "Vec CC G"), 276 | (b, "Vec CC B"), 277 | (method, "Vec CC Noise"), 278 | (doHR, "Vec CC Proc HrF"), 279 | (doAD, "Vec CC Proc Ade"), 280 | (doRN, "Vec CC Seed Randomize"), 281 | (scaling, "Vec CC Scaling"), 282 | ] 283 | 284 | for comp, name in self.infotext_fields: 285 | if getattr(opts, "cc_no_defaults", True): 286 | comp.do_not_save_to_config = True 287 | self.paste_field_names.append(name) 288 | 289 | return [enable, *comps] 290 | 291 | def process_batch( 292 | self, 293 | p, 294 | enable: bool, 295 | latent: bool, 296 | bri: float, 297 | con: float, 298 | sat: float, 299 | r: float, 300 | g: float, 301 | b: float, 302 | doHR: bool, 303 | doAD: bool, 304 | doRN: bool, 305 | method: str, 306 | scaling: str, 307 | batch_number: int, 308 | prompts: list[str], 309 | seeds: list[int], 310 | subseeds: list[int], 311 | ): 312 | 313 | enable = self.xyzCache.pop("Enable", str(enable)).lower().strip() == "true" 314 | 315 | if not enable: 316 | if len(self.xyzCache) > 0: 317 | print("\n[Vec.CC] x [X/Y/Z Plot] Extension is not Enabled!\n") 318 | self.xyzCache.clear() 319 | 320 | setattr(KDiffusionSampler, "vec_cc", {"enable": False}) 321 | return p 322 | 323 | method = str(self.xyzCache.pop("Method", method)) 324 | 325 | if method == "Disabled": 326 | setattr(KDiffusionSampler, "vec_cc", {"enable": False}) 327 | return p 328 | 329 | if "Random" in self.xyzCache.keys(): 330 | print("[X/Y/Z Plot] x [Vec.CC] Randomize is Enabled.") 331 | if len(self.xyzCache) > 1: 332 | print("Some parameters will be overridden!") 333 | 334 | cc_seed = int(self.xyzCache.pop("Random")) 335 | else: 336 | cc_seed = int(seeds[0]) if doRN else None 337 | 338 | latent = self.xyzCache.pop("Alt", str(latent)).lower().strip() == "true" 339 | doHR = self.xyzCache.pop("DoHR", str(doHR)).lower().strip() == "true" 340 | scaling = str(self.xyzCache.pop("Scaling", scaling)) 341 | 342 | bri = float(self.xyzCache.pop("Brightness", bri)) 343 | con = float(self.xyzCache.pop("Contrast", con)) 344 | sat = float(self.xyzCache.pop("Saturation", sat)) 345 | 346 | r = float(self.xyzCache.pop("R", r)) 347 | g = float(self.xyzCache.pop("G", g)) 348 | b = float(self.xyzCache.pop("B", b)) 349 | 350 | assert len(self.xyzCache) == 0 351 | 352 | if cc_seed: 353 | seed(cc_seed) 354 | 355 | bri = const.Brightness.rand() 356 | con = const.Contrast.rand() 357 | sat = const.Saturation.rand() 358 | 359 | r = const.Color.rand() 360 | g = const.Color.rand() 361 | b = const.Color.rand() 362 | 363 | print(f"\n[Seed: {cc_seed}]") 364 | print(f"> Brightness: {bri}") 365 | print(f"> Contrast: {con}") 366 | print(f"> Saturation: {sat}") 367 | print(f"> R: {r}") 368 | print(f"> G: {g}") 369 | print(f"> B: {b}\n") 370 | 371 | if getattr(opts, "cc_metadata", True): 372 | p.extra_generation_params.update( 373 | { 374 | "Vec CC Enabled": enable, 375 | "Vec CC Alt": latent, 376 | "Vec CC Brightness": bri, 377 | "Vec CC Contrast": con, 378 | "Vec CC Saturation": sat, 379 | "Vec CC R": r, 380 | "Vec CC G": g, 381 | "Vec CC B": b, 382 | "Vec CC Noise": method, 383 | "Vec CC Proc HrF": doHR, 384 | "Vec CC Proc Ade": doAD, 385 | "Vec CC Seed Randomize": doRN, 386 | "Vec CC Scaling": scaling, 387 | "Vec CC Version": VERSION, 388 | } 389 | ) 390 | 391 | steps: int = getattr(p, "firstpass_steps", None) or p.steps 392 | 393 | bri /= steps 394 | con /= steps 395 | sat = pow(sat, 1.0 / steps) 396 | r /= steps 397 | g /= steps 398 | b /= steps 399 | 400 | mode: str = "x" if latent else "denoised" 401 | 402 | setattr( 403 | KDiffusionSampler, 404 | "vec_cc", 405 | { 406 | "enable": True, 407 | "mode": mode, 408 | "bri": bri, 409 | "con": con, 410 | "sat": sat, 411 | "r": r, 412 | "g": g, 413 | "b": b, 414 | "method": method, 415 | "doHR": doHR, 416 | "doAD": doAD, 417 | "scaling": scaling, 418 | "step": steps, 419 | }, 420 | ) 421 | 422 | return p 423 | 424 | def before_hr(self, p, enable: bool, *args, **kwargs): 425 | 426 | if enable: 427 | steps: int = getattr(p, "hr_second_pass_steps", None) or p.steps 428 | KDiffusionSampler.vec_cc["step"] = steps 429 | 430 | return p 431 | -------------------------------------------------------------------------------- /scripts/cc_hdr.py: -------------------------------------------------------------------------------- 1 | from modules.processing import process_images, get_fixed_seed 2 | from modules.shared import state 3 | from modules import scripts 4 | from copy import copy 5 | import gradio as gr 6 | import numpy as np 7 | import cv2 as cv 8 | 9 | 10 | def mergeHDR(imgs: list, path: str, depth: str, fmt: str, gamma: float): 11 | """https://docs.opencv.org/4.8.0/d2/df0/tutorial_py_hdr.html""" 12 | 13 | import datetime 14 | import math 15 | import os 16 | 17 | out_dir = os.path.join(os.path.dirname(path), "hdr") 18 | os.makedirs(out_dir, exist_ok=True) 19 | print(f'\nSaving HDR Outputs to "{out_dir}"\n') 20 | 21 | imgs_np = [np.asarray(img, dtype=np.uint8) for img in imgs] 22 | 23 | merge = cv.createMergeMertens() 24 | hdr = merge.process(imgs_np) 25 | 26 | # shift min to 0.0 27 | hdr += math.ceil(0.0 - np.min(hdr) * 1000) / 1000 28 | # print(f"({np.min(hdr)}, {np.max(hdr)}") 29 | 30 | target = 65535 if depth == "16bpc" else 255 31 | precision = np.uint16 if depth == "16bpc" else np.uint8 32 | 33 | hdr = np.power(hdr, (1 / gamma)) 34 | 35 | ldr = np.clip(hdr * target, 0, target).astype(precision) 36 | rgb = cv.cvtColor(ldr, cv.COLOR_BGR2RGB) 37 | 38 | time = datetime.datetime.now().strftime("%H-%M-%S") 39 | cv.imwrite(os.path.join(out_dir, f"{time}{fmt}"), rgb) 40 | 41 | 42 | class VectorHDR(scripts.Script): 43 | 44 | def title(self): 45 | return "High Dynamic Range" 46 | 47 | def show(self, is_img2img): 48 | return True 49 | 50 | def ui(self, is_img2img): 51 | 52 | with gr.Row(): 53 | count = gr.Slider( 54 | label="Brackets", 55 | minimum=3, 56 | maximum=9, 57 | step=2, 58 | value=5, 59 | ) 60 | 61 | gap = gr.Slider( 62 | label="Gaps", 63 | minimum=0.50, 64 | maximum=2.50, 65 | step=0.25, 66 | value=1.25, 67 | ) 68 | 69 | with gr.Accordion( 70 | "Merge Options", 71 | elem_id=f'vec-hdr-{"img" if is_img2img else "txt"}', 72 | open=False, 73 | ): 74 | 75 | auto = gr.Checkbox(label="Automatically Merge", value=True) 76 | 77 | with gr.Row(): 78 | depth = gr.Radio(["16bpc", "8bpc"], label="Bit Depth", value="16bpc") 79 | fmt = gr.Radio([".tiff", ".png"], label="Image Format", value=".tiff") 80 | 81 | gamma = gr.Slider( 82 | label="Gamma", 83 | info="Lower: Darker | Higher: Brighter", 84 | minimum=0.2, 85 | maximum=2.2, 86 | step=0.2, 87 | value=1.2, 88 | ) 89 | 90 | for comp in (count, gap, auto, depth, fmt, gamma): 91 | comp.do_not_save_to_config = True 92 | 93 | return [count, gap, auto, depth, fmt, gamma] 94 | 95 | def run( 96 | self, p, count: int, gap: float, auto: bool, depth: str, fmt: str, gamma: float 97 | ): 98 | center: int = count // 2 99 | brackets = brightness_brackets(count, gap) 100 | 101 | p.seed = get_fixed_seed(p.seed) 102 | p.scripts.script("vectorscope cc").xyzCache.update({"Enable": "False"}) 103 | 104 | baseline = process_images(p) 105 | pc = copy(p) 106 | 107 | imgs = [None] * count 108 | imgs[center] = baseline.images[0] 109 | 110 | for it in range(count): 111 | 112 | if state.skipped or state.interrupted or state.stopping_generation: 113 | print("HDR Process Skipped...") 114 | return baseline 115 | 116 | if it == center: 117 | continue 118 | 119 | pc.scripts.script("vectorscope cc").xyzCache.update( 120 | { 121 | "Enable": "True", 122 | "Alt": "True", 123 | "Brightness": brackets[it], 124 | "DoHR": "False", 125 | "Method": "Ones", 126 | "Scaling": "1 - Cos", 127 | } 128 | ) 129 | 130 | proc = process_images(pc) 131 | imgs[it] = proc.images[0] 132 | 133 | if auto: 134 | mergeHDR(imgs, p.outpath_samples, depth, fmt, gamma) 135 | 136 | baseline.images = imgs 137 | return baseline 138 | 139 | 140 | def brightness_brackets(count: int, gap: float) -> list[float]: 141 | half = count // 2 142 | return [gap * (i - half) for i in range(count)] 143 | -------------------------------------------------------------------------------- /scripts/dot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/scripts/dot.png -------------------------------------------------------------------------------- /scripts/vectorscope.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Haoming02/sd-webui-vectorscope-cc/39a4aeba45864967ff590ba57532cae600c0331e/scripts/vectorscope.png -------------------------------------------------------------------------------- /style.css: -------------------------------------------------------------------------------- 1 | #cc-dot-txt, #cc-dot-img { 2 | position: absolute; 3 | width: 24px; 4 | height: 24px; 5 | pointer-events: none; 6 | } 7 | 8 | #cc-img-txt, #cc-img-img { 9 | cursor: pointer; 10 | height: 100%; 11 | width: auto; 12 | margin: auto; 13 | } 14 | 15 | #vec-cc-txt, #vec-cc-img { 16 | user-select: none; 17 | } 18 | 19 | #vec-cc-txt button, #vec-cc-txt label { 20 | border-radius: 0.5em; 21 | } 22 | 23 | #vec-cc-img button, #vec-cc-img label { 24 | border-radius: 0.5em; 25 | } 26 | 27 | #vec-cc-txt .style-rows { 28 | align-items: end; 29 | gap: 1em; 30 | } 31 | 32 | #vec-cc-img .style-rows { 33 | align-items: end; 34 | gap: 1em; 35 | } 36 | 37 | #vec-hdr-txt label, #vec-hdr-img label { 38 | border-radius: 0.5em; 39 | } 40 | --------------------------------------------------------------------------------