├── .github
└── workflows
│ └── publish.yml
├── .gitignore
├── LICENSE
├── README.md
├── __init__.py
├── blend_modes
├── arithmetic.py
├── binary.py
├── darken.py
├── hsi.py
├── hsi_helpers.py
├── hsl.py
├── hsl_helpers.py
├── hsv.py
├── hsv_helpers.py
├── hsy.py
├── hsy_helpers.py
├── index.py
├── lighten.py
├── lighten_helpers.py
├── mix.py
├── modulo.py
└── negative.py
├── blend_modes_enum.py
├── helpers.py
├── pyproject.toml
└── workfow.example.json
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 |
5 | permissions:
6 | issues: write
7 |
8 | jobs:
9 | publish-node:
10 | name: Publish Custom Node to registry
11 | runs-on: ubuntu-latest
12 | if: ${{ github.repository_owner == 'vault-developer' }}
13 | steps:
14 | - name: Check out code
15 | uses: actions/checkout@v4
16 | - name: Publish Custom Node
17 | uses: Comfy-Org/publish-node-action@v1
18 | with:
19 | personal_access_token: ${{ secrets.COMFY_REGISTRY_ACCESS_TOKEN }}
20 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | */__pycache__/*
3 | *.py[cod]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Trott Albert
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## COMFYUI-IMAGE-BLENDER
2 |
3 | ## About
4 | ```ComfyuiImageBlender``` is a custom node for ComfyUI.
5 | You can use it to blend two images together using various modes.
6 | Currently, 88 blending modes are supported and 45 more are planned to be added.
7 | Modes logic were borrowed from / inspired by [Krita](https://github.com/KDE/krita) blending modes.
8 |
9 | Features:
10 | - 88 blending modes
11 | - support strength parameter
12 | - support mask parameter
13 |
14 |
15 | Supported blending modes:
16 |
17 | - Arithmetic group
18 |
19 | - addition
20 | - divide
21 | - inverse subtract
22 | - multiply
23 | - subtract
24 |
25 |
26 | - Binary group
27 |
28 | - AND
29 | - CONVERSE
30 | - IMPLICATION
31 | - NAND
32 | - NOR
33 | - NOT CONVERSE
34 | - NOT IMPLICATION
35 | - OR
36 | - XNOR
37 | - XOR
38 |
39 |
40 | - Darken group
41 |
42 | - burn
43 | - darken
44 | - darker color
45 | - easy burn
46 | - fog darken
47 | - gamma dark
48 | - linear burn
49 | - shade
50 |
51 |
52 | - HSI group
53 |
54 | - color hsi
55 | - hue hsi
56 | - saturation hsi
57 | - intensity
58 | - decrease saturation hsi
59 | - increase saturation hsi
60 | - decrease intensity
61 | - increase intensity
62 |
63 |
64 | - HSL group
65 |
66 | - color hsl
67 | - hue hsl
68 | - saturation hsl
69 | - lightness
70 | - decrease saturation hsl
71 | - increase saturation hsl
72 | - decrease lightness
73 | - increase lightness
74 |
75 |
76 | - HSV group
77 |
78 | - color hsv
79 | - hue hsv
80 | - saturation hsv
81 | - value
82 | - decrease saturation hsv
83 | - increase saturation hsv
84 | - decrease value
85 | - increase value
86 |
87 |
88 | - HSY group
89 |
90 | - color
91 | - hue
92 | - saturation
93 | - luminosity
94 | - decrease saturation
95 | - increase saturation
96 | - decrease luminosity
97 | - increase luminosity
98 |
99 |
100 | - Lighten group
101 |
102 | - color dodge
103 | - linear dodge
104 | - lighten
105 | - linear light
106 | - screen
107 | - pin light
108 | - vivid light
109 | - flat light
110 | - hard light
111 | - soft light (ifs illusions)
112 | - soft light (pegtop-delphi)
113 | - soft light (ps)
114 | - soft light (svg)
115 | - gamma light
116 | - gamma illumination
117 | - lighter color
118 | - p-norm a
119 | - p-norm b
120 | - super light
121 | - tint (ifs illusions)
122 | - fog lighten (ifs illusions)
123 | - easy dodge
124 | - luminosity/shine (sai)
125 |
126 |
127 | - Mix group
128 |
129 | - normal
130 | - overlay
131 |
132 |
133 | - Modulo group
134 |
135 | - modulo
136 | - divisive modulo
137 |
138 |
139 | - Negative group
140 |
141 | - difference
142 | - equivalence
143 | - additive subtractive
144 | - exclusion
145 | - arcus tangent
146 | - negation
147 |
148 |
149 |
150 |
151 |
152 |
153 | ## Examples:
154 |
155 | Addition
156 |
157 |
158 |
159 |
160 | Darken
161 |
162 |
163 |
164 |
165 | Saturation HSV
166 |
167 |
168 |
169 |
170 | ## Comfyui workflow
171 | Feel free to check the example workflow [here](https://github.com/vault-developer/comfyui-image-blender/blob/master/workfow.example.json).
172 |
173 | https://github.com/user-attachments/assets/4b503e6a-cdff-4a3d-ac2b-a482ab0d7d8c
174 |
175 |
176 | ## Installation
177 | You need [comfyui](https://github.com/comfyanonymous/ComfyUI) installed first.
178 | Then several options are available:
179 | 1. You can download or git clone this repository inside ComfyUI/custom_nodes/ directory.
180 | 2. If you use comfy-cli, node can be also downloaded from [comfy registry](https://registry.comfy.org/publishers/vault-developer/nodes/comfyui-image-blender):
181 | ```comfy node registry-install comfyui-image-blender```
182 | 3. Comfy-ui manager support will be added when [this pull request](https://github.com/ltdrdata/ComfyUI-Manager/pull/925) is merged.
183 |
184 | ## Contribution and troubleshooting
185 | This is rough implementation, I will appreciate any feedback.
186 | Feel free to raise issue if you spot any mistake or have a suggestion.
187 |
188 | If you want to contribute, feel free to fork this repository and create a pull request.
189 | There are still 45 blending modes to be added, so you can help with that.
190 |
191 |
192 | ## Future plans
193 | There are still some things to be done:
194 | - [ ] clean up the code
195 | - [ ] add more blending modes
196 | - [ ] test with PNG images
197 | - [ ] enhance error handling
198 | - [ ] add comfyui manager support
199 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from .blend_modes.index import blend_functions
4 | from .blend_modes_enum import BlendModes
5 |
6 | class ImageBlender:
7 | def __init__(self):
8 | self.blend_functions = blend_functions
9 |
10 | @classmethod
11 | def INPUT_TYPES(cls):
12 | return {
13 | "required": {
14 | "base_image": ("IMAGE",),
15 | "blend_image": ("IMAGE",),
16 | "strength": ("FLOAT", {
17 | "default": 1,
18 | "min": 0.0,
19 | "max": 1.0,
20 | "step": 0.01
21 | }),
22 | "blend_mode": (
23 | [mode.value for mode in BlendModes],
24 | {"default": BlendModes.MIX_NORMAL.value}
25 | ),
26 | },
27 | "optional": {
28 | "mask": ("MASK",),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("IMAGE",)
33 | FUNCTION = "blend"
34 | CATEGORY = "ImageBlender"
35 |
36 | def blend(self, base_image: torch.Tensor, blend_image: torch.Tensor, strength: float, blend_mode: str, mask: torch.Tensor = None) -> tuple:
37 | assert base_image.shape == blend_image.shape, "Base and blend images must have the same shape"
38 | assert base_image.shape[-1] == 3, "Input images must have 3 channels (RGB)"
39 |
40 | blend_function = self.blend_functions.get(BlendModes(blend_mode), lambda x, y: x)
41 | result = blend_function(base_image, blend_image)
42 |
43 | if mask is not None:
44 | # Ensure mask has the same number of channels as the images
45 | if mask.dim() == 3:
46 | mask = mask.unsqueeze(-1).expand(-1, -1, -1, base_image.shape[-1])
47 |
48 | if mask.size() != base_image.size():
49 | print(f"WARN: Mask size {mask.size()} is different from image size {base_image.size()}, mask is ignored")
50 | else:
51 | result = result * mask + base_image * (1 - mask)
52 |
53 | # Apply opacity
54 | result = result * strength + base_image * (1 - strength)
55 | # Normalize the result
56 | result = torch.clamp(result, 0, 1)
57 | return (result,)
58 |
59 | NODE_CLASS_MAPPINGS = {
60 | "ImageBlender": ImageBlender
61 | }
62 |
63 | NODE_DISPLAY_NAME_MAPPINGS = {
64 | "ImageBlender": "ImageBlender"
65 | }
--------------------------------------------------------------------------------
/blend_modes/arithmetic.py:
--------------------------------------------------------------------------------
1 | from ..helpers import replace_zeros
2 | from ..blend_modes_enum import BlendModes
3 | import torch
4 |
5 | def arithmetic_addition(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
6 | result = base_image + blend_image
7 | return torch.clamp(result, 0.0, 1.0)
8 |
9 | def arithmetic_divide(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
10 | safe_blend_image = replace_zeros(blend_image)
11 | safe_result = base_image / safe_blend_image
12 | return torch.clamp(safe_result, 0.0, 1.0)
13 |
14 | def arithmetic_inverse_subtract(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
15 | inverted_base_image = 1 - base_image
16 | result = blend_image - inverted_base_image
17 | return torch.clamp(result, 0.0, 1.0)
18 |
19 | def arithmetic_multiply(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
20 | result = base_image * blend_image
21 | return torch.clamp(result, 0.0, 1.0)
22 |
23 | def arithmetic_subtract(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
24 | result = base_image - blend_image
25 | return torch.clamp(result, 0.0, 1.0)
26 |
27 | arithmetic_blend_functions = {
28 | BlendModes.ARITHMETIC_ADDITION: arithmetic_addition,
29 | BlendModes.ARITHMETIC_DIVIDE: arithmetic_divide,
30 | BlendModes.ARITHMETIC_INVERSE_SUBTRACT: arithmetic_inverse_subtract,
31 | BlendModes.ARITHMETIC_MULTIPLY: arithmetic_multiply,
32 | BlendModes.ARITHMETIC_SUBTRACT: arithmetic_subtract,
33 | }
--------------------------------------------------------------------------------
/blend_modes/binary.py:
--------------------------------------------------------------------------------
1 | from ..helpers import replace_zeros, float_to_uint8, uint8_to_float
2 | from ..blend_modes_enum import BlendModes
3 | import torch
4 |
5 | def binary_and(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
6 | result = float_to_uint8(base_image) & float_to_uint8(blend_image)
7 | return uint8_to_float(result)
8 |
9 | def binary_converse(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
10 | result = ~float_to_uint8(base_image) | float_to_uint8(blend_image)
11 | return uint8_to_float(result)
12 |
13 | def binary_implication(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
14 | result = float_to_uint8(base_image) | ~float_to_uint8(blend_image)
15 | return uint8_to_float(result)
16 |
17 | def binary_nand(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
18 | result = ~(float_to_uint8(base_image) & float_to_uint8(blend_image))
19 | return uint8_to_float(result)
20 |
21 | def binary_nor(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
22 | result = ~(float_to_uint8(base_image) | float_to_uint8(blend_image))
23 | return uint8_to_float(result)
24 |
25 | def binary_not_converse(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
26 | result = float_to_uint8(base_image) & ~float_to_uint8(blend_image)
27 | return uint8_to_float(result)
28 |
29 | def binary_not_implication(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
30 | result = ~float_to_uint8(base_image) & float_to_uint8(blend_image)
31 | return uint8_to_float(result)
32 |
33 | def binary_or(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
34 | result = float_to_uint8(base_image) | float_to_uint8(blend_image)
35 | return uint8_to_float(result)
36 |
37 | def binary_xnor(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
38 | result = ~(float_to_uint8(base_image) ^ float_to_uint8(blend_image))
39 | return uint8_to_float(result)
40 |
41 | def binary_xor(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | result = float_to_uint8(base_image) ^ float_to_uint8(blend_image)
43 | return uint8_to_float(result)
44 |
45 | binary_blend_functions = {
46 | BlendModes.BINARY_AND: binary_and,
47 | BlendModes.BINARY_CONVERSE: binary_converse,
48 | BlendModes.BINARY_IMPLICATION: binary_implication,
49 | BlendModes.BINARY_NAND: binary_nand,
50 | BlendModes.BINARY_NOR: binary_nor,
51 | BlendModes.BINARY_NOT_CONVERSE: binary_not_converse,
52 | BlendModes.BINARY_NOT_IMPLICATION: binary_not_implication,
53 | BlendModes.BINARY_OR: binary_or,
54 | BlendModes.BINARY_XNOR: binary_xnor,
55 | BlendModes.BINARY_XOR: binary_xor,
56 | }
--------------------------------------------------------------------------------
/blend_modes/darken.py:
--------------------------------------------------------------------------------
1 | from ..helpers import replace_zeros
2 | from ..blend_modes_enum import BlendModes
3 | import torch
4 |
5 | def darken_burn(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
6 | safe_base_image = replace_zeros(base_image)
7 | result = 1 - (1 - blend_image) / safe_base_image
8 | return torch.clamp(result, 0.0, 1.0)
9 |
10 | def darken_darken(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
11 | result = torch.min(base_image, blend_image)
12 | return torch.clamp(result, 0.0, 1.0)
13 |
14 | def darken_darker_color(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
15 | base_sum = torch.sum(base_image, dim=-1, keepdim=True)
16 | blend_sum = torch.sum(blend_image, dim=-1, keepdim=True)
17 | return torch.where(base_sum < blend_sum, base_image, blend_image)
18 |
19 | def darken_easy_burn(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
20 | inverted_base_image = 1.0 - base_image
21 | safe_inverted_base_image = replace_zeros(inverted_base_image)
22 | result = 1.0 - torch.pow(safe_inverted_base_image, blend_image * (15 / 13))
23 | return torch.clamp(result, 0.0, 1.0)
24 |
25 | def darken_fog_darken(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
26 | result_low = (1 - base_image) * base_image + base_image * blend_image
27 | result_high = base_image * blend_image + base_image - torch.pow(base_image, 2)
28 | result = torch.where(base_image < 0.5, result_low, result_high)
29 | return torch.clamp(result, 0.0, 1.0)
30 |
31 | def darken_gamma_dark(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
32 | safe_base_image =replace_zeros(base_image)
33 | result = torch.pow(blend_image, 1.0 / safe_base_image)
34 | result = torch.where(base_image == 0.0, torch.zeros_like(result), result)
35 | return torch.clamp(result, 0.0, 1.0)
36 |
37 | def darken_linear_burn(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
38 | result = base_image + blend_image - 1
39 | return torch.clamp(result, 0.0, 1.0)
40 |
41 | def darken_shade(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | result = 1.0 - ((1.0 - blend_image) * base_image + torch.sqrt(1.0 - base_image))
43 | return torch.clamp(result, 0.0, 1.0)
44 |
45 | darken_blend_functions = {
46 | BlendModes.DARKEN_BURN: darken_burn,
47 | BlendModes.DARKEN_DARKEN: darken_darken,
48 | BlendModes.DARKEN_DARKER_COLOR: darken_darker_color,
49 | BlendModes.DARKEN_EASY_BURN: darken_easy_burn,
50 | BlendModes.DARKEN_FOG_DARKEN: darken_fog_darken,
51 | BlendModes.DARKEN_GAMMA_DARK: darken_gamma_dark,
52 | BlendModes.DARKEN_LINEAR_BURN: darken_linear_burn,
53 | BlendModes.DARKEN_SHADE: darken_shade,
54 | }
--------------------------------------------------------------------------------
/blend_modes/hsi.py:
--------------------------------------------------------------------------------
1 | from .hsi_helpers import get_intensity, add_intensity, get_saturation_hsi, set_intensity
2 | from ..blend_modes_enum import BlendModes
3 | from ..helpers import set_saturation
4 | import torch
5 |
6 | def hsi_intensity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
7 | blend_image_lightness = get_intensity(blend_image)
8 | result = set_intensity(base_image, blend_image_lightness)
9 | return result.clamp(0, 1)
10 |
11 | def hsi_increase_intensity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
12 | blend_image_lightness = get_intensity(blend_image)
13 | result = add_intensity(base_image, blend_image_lightness)
14 | return result.clamp(0, 1)
15 |
16 | def hsi_decrease_intensity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
17 | blend_image_lightness = get_intensity(blend_image) - 1
18 | result = add_intensity(base_image, blend_image_lightness)
19 | return result.clamp(0, 1)
20 |
21 | def hsi_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
22 | intensity = get_intensity(base_image)
23 | saturation = get_saturation_hsi(blend_image)
24 | result = set_saturation(base_image, saturation)
25 | result = set_intensity(result, intensity)
26 | return result.clamp(0, 1)
27 |
28 | def hsi_increase_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
29 | base_image_saturation = get_saturation_hsi(base_image)
30 | blend_image_saturation = get_saturation_hsi(blend_image)
31 |
32 | new_saturation = torch.lerp(base_image_saturation, torch.ones_like(base_image_saturation), blend_image_saturation)
33 |
34 | base_image_lightness = get_intensity(base_image)
35 |
36 | result = set_saturation(base_image, new_saturation)
37 | result = set_intensity(result, base_image_lightness)
38 |
39 | return result.clamp(0, 1)
40 |
41 | def hsi_decrease_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | base_image_saturation = get_saturation_hsi(base_image)
43 | blend_image_saturation = get_saturation_hsi(blend_image)
44 |
45 | new_saturation = torch.lerp(torch.zeros_like(base_image_saturation), base_image_saturation, blend_image_saturation)
46 |
47 | base_image_lightness = get_intensity(base_image)
48 |
49 | result = set_saturation(base_image, new_saturation)
50 | result = set_intensity(result, base_image_lightness)
51 |
52 | return result.clamp(0, 1)
53 |
54 | def hsi_color(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
55 | base_image_intensity = get_intensity(base_image)
56 | result = set_intensity(blend_image, base_image_intensity)
57 | return result.clamp(0, 1)
58 |
59 | def hsi_hue(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
60 | base_image_saturation = get_saturation_hsi(base_image)
61 | base_image_lightness = get_intensity(base_image)
62 |
63 | result = blend_image.clone()
64 | result = set_saturation(result, base_image_saturation)
65 | result = set_intensity(result, base_image_lightness)
66 | return result.clamp(0, 1)
67 |
68 | hsi_blend_functions = {
69 | BlendModes.HSI_COLOR: hsi_color,
70 | BlendModes.HSI_HUE: hsi_hue,
71 | BlendModes.HSI_SATURATION: hsi_saturation,
72 | BlendModes.HSI_INTENSITY: hsi_intensity,
73 | BlendModes.HSI_DECREASE_SATURATION: hsi_decrease_saturation,
74 | BlendModes.HSI_INCREASE_SATURATION: hsi_increase_saturation,
75 | BlendModes.HSI_DECREASE_INTENSITY: hsi_decrease_intensity,
76 | BlendModes.HSI_INCREASE_INTENSITY: hsi_increase_intensity,
77 | }
--------------------------------------------------------------------------------
/blend_modes/hsi_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from ..helpers import set_ilvy, add_ilvy
3 |
4 | def get_intensity(image: torch.Tensor) -> torch.Tensor:
5 | return torch.mean(image, dim=-1)
6 |
7 | def set_intensity(image: torch.Tensor, new_intensity: torch.Tensor) -> torch.Tensor:
8 | result = set_ilvy(image, new_intensity, get_intensity)
9 | return result.clamp(0, 1)
10 |
11 | def add_intensity(image: torch.Tensor, new_intensity: torch.Tensor) -> torch.Tensor:
12 | result = add_ilvy(image, new_intensity, get_intensity)
13 | return result.clamp(0, 1)
14 |
15 | def get_saturation_hsi(image: torch.Tensor) -> torch.Tensor:
16 | max_vals, _ = torch.max(image, dim=-1, keepdim=True)
17 | min_vals, _ = torch.min(image, dim=-1, keepdim=True)
18 |
19 | chroma = max_vals - min_vals
20 | intensity = get_intensity(image)
21 |
22 | saturation = torch.where(
23 | chroma > 1e-8,
24 | 1.0 - min_vals / intensity.unsqueeze(-1),
25 | torch.zeros_like(chroma)
26 | )
27 |
28 | return saturation.squeeze(-1).clamp(0, 1)
29 |
--------------------------------------------------------------------------------
/blend_modes/hsl.py:
--------------------------------------------------------------------------------
1 | from .hsl_helpers import get_lightness, set_lightness, get_saturation_hsl, add_lightness
2 | from ..blend_modes_enum import BlendModes
3 | from ..helpers import set_saturation
4 | import torch
5 |
6 | def hsl_lightness(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
7 | blend_image_lightness = get_lightness(blend_image)
8 | result = set_lightness(base_image, blend_image_lightness)
9 | return result.clamp(0, 1)
10 |
11 | def hsl_increase_lightness(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
12 | blend_image_lightness = get_lightness(blend_image)
13 | result = add_lightness(base_image, blend_image_lightness)
14 | return result.clamp(0, 1)
15 |
16 | def hsl_decrease_lightness(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
17 | blend_image_lightness = get_lightness(blend_image) - 1
18 | result = add_lightness(base_image, blend_image_lightness)
19 | return result.clamp(0, 1)
20 |
21 | def hsl_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
22 | intensity = get_lightness(base_image)
23 | saturation = get_saturation_hsl(blend_image)
24 | result = set_saturation(base_image, saturation)
25 | result = set_lightness(result, intensity)
26 | return result.clamp(0, 1)
27 |
28 | def hsl_increase_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
29 | base_image_saturation = get_saturation_hsl(base_image)
30 | blend_image_saturation = get_saturation_hsl(blend_image)
31 |
32 | new_saturation = torch.lerp(base_image_saturation, torch.ones_like(base_image_saturation), blend_image_saturation)
33 |
34 | base_image_lightness = get_lightness(base_image)
35 |
36 | result = set_saturation(base_image, new_saturation)
37 | result = set_lightness(result, base_image_lightness)
38 |
39 | return result.clamp(0, 1)
40 |
41 | def hsl_decrease_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | base_image_saturation = get_saturation_hsl(base_image)
43 | blend_image_saturation = get_saturation_hsl(blend_image)
44 |
45 | new_saturation = torch.lerp(torch.zeros_like(base_image_saturation), base_image_saturation, blend_image_saturation)
46 |
47 | base_image_lightness = get_lightness(base_image)
48 |
49 | result = set_saturation(base_image, new_saturation)
50 | result = set_lightness(result, base_image_lightness)
51 |
52 | return result.clamp(0, 1)
53 |
54 | def hsl_color(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
55 | base_image_intensity = get_lightness(base_image)
56 | result = set_lightness(blend_image, base_image_intensity)
57 | return result.clamp(0, 1)
58 |
59 | def hsl_hue(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
60 | base_image_saturation = get_saturation_hsl(base_image)
61 | base_image_lightness = get_lightness(base_image)
62 |
63 | result = blend_image.clone()
64 | result = set_saturation(result, base_image_saturation)
65 | result = set_lightness(result, base_image_lightness)
66 | return result.clamp(0, 1)
67 |
68 | hsl_blend_functions = {
69 | BlendModes.HSL_COLOR: hsl_color,
70 | BlendModes.HSL_HUE: hsl_hue,
71 | BlendModes.HSL_SATURATION: hsl_saturation,
72 | BlendModes.HSL_LIGHTNESS: hsl_lightness,
73 | BlendModes.HSL_DECREASE_SATURATION: hsl_decrease_saturation,
74 | BlendModes.HSL_INCREASE_SATURATION: hsl_increase_saturation,
75 | BlendModes.HSL_DECREASE_LIGHTNESS: hsl_decrease_lightness,
76 | BlendModes.HSL_INCREASE_LIGHTNESS: hsl_increase_lightness,
77 | }
--------------------------------------------------------------------------------
/blend_modes/hsl_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from ..helpers import set_ilvy, add_ilvy
3 |
4 | def get_lightness(image: torch.Tensor) -> torch.Tensor:
5 | max_vals, _ = torch.max(image, dim=-1)
6 | min_vals, _ = torch.min(image, dim=-1)
7 | lightness = (max_vals + min_vals) * 0.5
8 | return lightness
9 |
10 | def set_lightness(image: torch.Tensor, new_lightness: torch.Tensor) -> torch.Tensor:
11 | result = set_ilvy(image, new_lightness, get_lightness)
12 | return result.clamp(0, 1)
13 |
14 | def add_lightness(image: torch.Tensor, new_lightness: torch.Tensor) -> torch.Tensor:
15 | result = add_ilvy(image, new_lightness, get_lightness)
16 | return result.clamp(0, 1)
17 |
18 | def get_saturation_hsl(image: torch.Tensor) -> torch.Tensor:
19 | max_vals, _ = torch.max(image, dim=-1, keepdim=True)
20 | min_vals, _ = torch.min(image, dim=-1, keepdim=True)
21 |
22 | chroma = max_vals - min_vals
23 | lightness = get_lightness(image)
24 | divisor = 1.0 - torch.abs(2.0 * lightness.unsqueeze(-1) - 1.0)
25 |
26 | saturation = torch.where(
27 | divisor > 1e-8,
28 | chroma / divisor,
29 | torch.zeros_like(chroma)
30 | )
31 |
32 | return saturation.squeeze(-1).clamp(0, 1)
--------------------------------------------------------------------------------
/blend_modes/hsv.py:
--------------------------------------------------------------------------------
1 | from .hsv_helpers import get_value, set_value, get_saturation_hsv, add_value
2 | from ..blend_modes_enum import BlendModes
3 | from ..helpers import set_saturation
4 | import torch
5 |
6 | def hsv_value(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
7 | blend_image_lightness = get_value(blend_image)
8 | result = set_value(base_image, blend_image_lightness)
9 | return result.clamp(0, 1)
10 |
11 | def hsv_increase_value(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
12 | blend_image_lightness = get_value(blend_image)
13 | result = add_value(base_image, blend_image_lightness)
14 | return result.clamp(0, 1)
15 |
16 | def hsv_decrease_value(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
17 | blend_image_lightness = get_value(blend_image) - 1
18 | result = add_value(base_image, blend_image_lightness)
19 | return result.clamp(0, 1)
20 |
21 | def hsv_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
22 | intensity = get_value(base_image)
23 | saturation = get_saturation_hsv(blend_image)
24 | result = set_saturation(base_image, saturation)
25 | result = set_value(result, intensity)
26 | return result.clamp(0, 1)
27 |
28 | def hsv_increase_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
29 | base_image_saturation = get_saturation_hsv(base_image)
30 | blend_image_saturation = get_saturation_hsv(blend_image)
31 |
32 | new_saturation = torch.lerp(base_image_saturation, torch.ones_like(base_image_saturation), blend_image_saturation)
33 |
34 | base_image_lightness = get_value(base_image)
35 |
36 | result = set_saturation(base_image, new_saturation)
37 | result = set_value(result, base_image_lightness)
38 |
39 | return result.clamp(0, 1)
40 |
41 | def hsv_decrease_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | base_image_saturation = get_saturation_hsv(base_image)
43 | blend_image_saturation = get_saturation_hsv(blend_image)
44 |
45 | new_saturation = torch.lerp(torch.zeros_like(base_image_saturation), base_image_saturation, blend_image_saturation)
46 |
47 | base_image_lightness = get_value(base_image)
48 |
49 | result = set_saturation(base_image, new_saturation)
50 | result = set_value(result, base_image_lightness)
51 |
52 | return result.clamp(0, 1)
53 |
54 | def hsv_color(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
55 | base_image_intensity = get_value(base_image)
56 | result = set_value(blend_image, base_image_intensity)
57 | return result.clamp(0, 1)
58 |
59 | def hsv_hue(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
60 | base_image_saturation = get_saturation_hsv(base_image)
61 | base_image_lightness = get_value(base_image)
62 |
63 | result = blend_image.clone()
64 | result = set_saturation(result, base_image_saturation)
65 | result = set_value(result, base_image_lightness)
66 | return result.clamp(0, 1)
67 |
68 | hsv_blend_functions = {
69 | BlendModes.HSV_COLOR: hsv_color,
70 | BlendModes.HSV_HUE: hsv_hue,
71 | BlendModes.HSV_SATURATION: hsv_saturation,
72 | BlendModes.HSV_VALUE: hsv_value,
73 | BlendModes.HSV_DECREASE_SATURATION: hsv_decrease_saturation,
74 | BlendModes.HSV_INCREASE_SATURATION: hsv_increase_saturation,
75 | BlendModes.HSV_DECREASE_VALUE: hsv_decrease_value,
76 | BlendModes.HSV_INCREASE_VALUE: hsv_increase_value,
77 | }
--------------------------------------------------------------------------------
/blend_modes/hsv_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from ..helpers import set_ilvy, add_ilvy
3 |
4 | def get_value(image: torch.Tensor) -> torch.Tensor:
5 | max_vals, _ = torch.max(image, dim=-1)
6 | return max_vals
7 |
8 | def set_value(image: torch.Tensor, new_value: torch.Tensor) -> torch.Tensor:
9 | result = set_ilvy(image, new_value, get_value)
10 | return result.clamp(0, 1)
11 |
12 | def add_value(image: torch.Tensor, new_value: torch.Tensor) -> torch.Tensor:
13 | result = add_ilvy(image, new_value, get_value)
14 | return result.clamp(0, 1)
15 |
16 | def get_saturation_hsv(image: torch.Tensor) -> torch.Tensor:
17 | max_vals, _ = torch.max(image, dim=-1, keepdim=True)
18 | min_vals, _ = torch.min(image, dim=-1, keepdim=True)
19 |
20 | chroma = max_vals - min_vals
21 |
22 | saturation = torch.where(
23 | chroma > 1e-8,
24 | chroma / max_vals,
25 | torch.zeros_like(chroma)
26 | )
27 |
28 | return saturation.squeeze(-1).clamp(0, 1)
--------------------------------------------------------------------------------
/blend_modes/hsy.py:
--------------------------------------------------------------------------------
1 | from .hsy_helpers import get_luminosity, set_luminosity, get_saturation_hsy, add_luminosity
2 | from ..helpers import set_saturation
3 | from ..blend_modes_enum import BlendModes
4 | import torch
5 |
6 | def hsy_luminosity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
7 | blend_image_luminosity = get_luminosity(blend_image)
8 | result = set_luminosity(base_image, blend_image_luminosity)
9 | return result.clamp(0, 1)
10 |
11 | def hsy_increase_luminosity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
12 | blend_image_luminosity = get_luminosity(blend_image)
13 | result = add_luminosity(base_image, blend_image_luminosity)
14 | return result.clamp(0, 1)
15 |
16 | def hsy_decrease_luminosity(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
17 | blend_image_luminosity = get_luminosity(blend_image) - 1
18 | result = add_luminosity(base_image, blend_image_luminosity)
19 | return result.clamp(0, 1)
20 |
21 | def hsy_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
22 | luminosity = get_luminosity(base_image)
23 | saturation = get_saturation_hsy(blend_image)
24 | result = set_saturation(base_image, saturation)
25 | result = set_luminosity(result, luminosity)
26 | return result.clamp(0, 1)
27 |
28 | def hsy_increase_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
29 | base_image_saturation = get_saturation_hsy(base_image)
30 | blend_image_saturation = get_saturation_hsy(blend_image)
31 |
32 | new_saturation = torch.lerp(base_image_saturation, torch.ones_like(base_image_saturation), blend_image_saturation)
33 |
34 | base_image_luminosity = get_luminosity(base_image)
35 |
36 | result = set_saturation(base_image, new_saturation)
37 | result = set_luminosity(result, base_image_luminosity)
38 |
39 | return result.clamp(0, 1)
40 |
41 | def hsy_decrease_saturation(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | base_image_saturation = get_saturation_hsy(base_image)
43 | blend_image_saturation = get_saturation_hsy(blend_image)
44 |
45 | new_saturation = torch.lerp(torch.zeros_like(base_image_saturation), base_image_saturation, blend_image_saturation)
46 |
47 | base_image_luminosity = get_luminosity(base_image)
48 |
49 | result = set_saturation(base_image, new_saturation)
50 | result = set_luminosity(result, base_image_luminosity)
51 |
52 | return result.clamp(0, 1)
53 |
54 | def hsy_color(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
55 | base_image_luminosity = get_luminosity(base_image)
56 | result = set_luminosity(blend_image, base_image_luminosity)
57 | return result.clamp(0, 1)
58 |
59 | def hsy_hue(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
60 | base_image_saturation = get_saturation_hsy(base_image)
61 | base_image_luminosity = get_luminosity(base_image)
62 |
63 | result = blend_image.clone()
64 | result = set_saturation(result, base_image_saturation)
65 | result = set_luminosity(result, base_image_luminosity)
66 | return result.clamp(0, 1)
67 |
68 | hsy_blend_functions = {
69 | BlendModes.HSY_COLOR: hsy_color,
70 | BlendModes.HSY_HUE: hsy_hue,
71 | BlendModes.HSY_SATURATION: hsy_saturation,
72 | BlendModes.HSY_LUMINOSITY: hsy_luminosity,
73 | BlendModes.HSY_DECREASE_SATURATION: hsy_decrease_saturation,
74 | BlendModes.HSY_INCREASE_SATURATION: hsy_increase_saturation,
75 | BlendModes.HSY_DECREASE_LUMINOSITY: hsy_decrease_luminosity,
76 | BlendModes.HSY_INCREASE_LUMINOSITY: hsy_increase_luminosity,
77 | }
--------------------------------------------------------------------------------
/blend_modes/hsy_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from ..helpers import set_ilvy, add_ilvy
3 |
4 | def get_luminosity(image: torch.Tensor) -> torch.Tensor:
5 | r, g, b = image[..., 0], image[..., 1], image[..., 2]
6 | luminosity = 0.299 * r + 0.587 * g + 0.114 * b
7 | return luminosity
8 |
9 | def set_luminosity(image: torch.Tensor, new_luminosity: torch.Tensor) -> torch.Tensor:
10 | result = set_ilvy(image, new_luminosity, get_luminosity)
11 | return result.clamp(0, 1)
12 |
13 | def add_luminosity(image: torch.Tensor, new_luminosity: torch.Tensor) -> torch.Tensor:
14 | result = add_ilvy(image, new_luminosity, get_luminosity)
15 | return result.clamp(0, 1)
16 |
17 | def get_saturation_hsy(image: torch.Tensor) -> torch.Tensor:
18 | max_vals, _ = torch.max(image, dim=-1, keepdim=True)
19 | min_vals, _ = torch.min(image, dim=-1, keepdim=True)
20 |
21 | saturation = max_vals - min_vals
22 |
23 | return saturation.squeeze(-1).clamp(0, 1)
--------------------------------------------------------------------------------
/blend_modes/index.py:
--------------------------------------------------------------------------------
1 | from .arithmetic import arithmetic_blend_functions
2 | from .binary import binary_blend_functions
3 | from .darken import darken_blend_functions
4 | from .hsi import hsi_blend_functions
5 | from .hsl import hsl_blend_functions
6 | from .hsv import hsv_blend_functions
7 | from .hsy import hsy_blend_functions
8 | from .lighten import lighten_blend_functions
9 | from .mix import mix_blend_functions
10 | from .negative import negative_blend_functions
11 | from .modulo import modulo_blend_functions
12 |
13 | blend_functions = {}
14 |
15 | blend_functions.update(arithmetic_blend_functions)
16 | blend_functions.update(binary_blend_functions)
17 | blend_functions.update(darken_blend_functions)
18 | blend_functions.update(hsi_blend_functions)
19 | blend_functions.update(hsl_blend_functions)
20 | blend_functions.update(hsv_blend_functions)
21 | blend_functions.update(hsy_blend_functions)
22 | blend_functions.update(lighten_blend_functions)
23 | blend_functions.update(mix_blend_functions)
24 | blend_functions.update(negative_blend_functions)
25 | blend_functions.update(modulo_blend_functions)
--------------------------------------------------------------------------------
/blend_modes/lighten.py:
--------------------------------------------------------------------------------
1 | from ..helpers import inv
2 | from .darken import darken_gamma_dark
3 | from .hsy_helpers import get_luminosity
4 | from .lighten_helpers import penumbra
5 | from ..blend_modes_enum import BlendModes
6 | import torch
7 |
8 | def lighten_color_dodge(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
9 | max_val = torch.tensor(1.0, device=blend_image.device, dtype=blend_image.dtype)
10 | zero_val = torch.tensor(0.0, device=blend_image.device, dtype=blend_image.dtype)
11 |
12 | result = torch.where(
13 | blend_image == max_val,
14 | torch.where(base_image == zero_val, zero_val, max_val),
15 | torch.clamp(base_image / (max_val - blend_image), 0, 1)
16 | )
17 |
18 | return result
19 |
20 | def lighten_linear_dodge(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
21 | result = base_image + blend_image
22 | return result.clamp( 0, 1)
23 |
24 | def lighten_lighten(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
25 | return torch.max(base_image, blend_image)
26 |
27 | def lighten_linear_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
28 | result = (blend_image + blend_image + base_image) - 1.0
29 | return torch.clamp(result, 0, 1)
30 |
31 | def lighten_screen(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
32 | result = 1 - (1 - blend_image) * (1 - base_image)
33 | return torch.clamp(result, 0, 1)
34 |
35 | def lighten_pin_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
36 | blend_image2 = 2 * blend_image
37 | min = torch.min(base_image, blend_image2)
38 | result = torch.max(blend_image2 - 1.0, min)
39 | return result.clamp(0, 1)
40 |
41 | def lighten_vivid_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
42 | half_val = 0.5
43 | one_val = 1.0
44 | zero_val = 0.0
45 |
46 | result = torch.where(
47 | blend_image < half_val,
48 | torch.where(
49 | blend_image == zero_val,
50 | torch.where(base_image == one_val, one_val, zero_val),
51 | torch.clamp(one_val - (1 - base_image) / (2 * blend_image), 0, 1)
52 | ),
53 | torch.where(
54 | blend_image == one_val,
55 | torch.where(base_image == zero_val, zero_val, one_val),
56 | torch.clamp(base_image / (2 * (1 - blend_image)), 0, 1)
57 | )
58 | )
59 |
60 | return result
61 |
62 | def lighten_flat_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
63 | mask_src_zero = torch.isclose(blend_image, torch.tensor(0.0))
64 | result = torch.zeros_like(base_image)
65 |
66 | mask_src_nonzero = ~mask_src_zero
67 | inv_src = 1 - blend_image[mask_src_nonzero]
68 |
69 | sum_tensor = inv_src + base_image[mask_src_nonzero]
70 | hard_mix_result = torch.where(sum_tensor > 1.0, torch.ones_like(sum_tensor), torch.zeros_like(sum_tensor))
71 | penumbra_a_result = penumbra(base_image[mask_src_nonzero], blend_image[mask_src_nonzero], lighten_color_dodge)
72 | penumbra_b_result = penumbra(blend_image[mask_src_nonzero], base_image[mask_src_nonzero], lighten_color_dodge)
73 |
74 | result[mask_src_nonzero] = torch.where(
75 | torch.isclose(hard_mix_result, torch.tensor(1.0)),
76 | penumbra_b_result,
77 | penumbra_a_result
78 | )
79 |
80 | return result
81 |
82 | def lighten_hard_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
83 | ones = torch.ones_like(blend_image)
84 | sum = blend_image + blend_image
85 |
86 | screen_result = torch.where(
87 | blend_image > 0.5,
88 | base_image + sum - ones - base_image * (sum - ones),
89 | torch.zeros_like(base_image)
90 | )
91 |
92 | multiply_result = torch.where(
93 | blend_image <= 0.5,
94 | base_image * sum,
95 | torch.zeros_like(base_image)
96 | )
97 |
98 | return torch.clamp(screen_result + multiply_result, 0.0, 1.0)
99 |
100 | def lighten_soft_light_ifs_illusions(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
101 | exponent = torch.pow(2.0, 2.0 * (0.5 - blend_image))
102 | result = torch.pow(base_image, exponent)
103 |
104 | return torch.clamp(result, 0.0, 1.0)
105 |
106 | def lighten_soft_light_pegtop_delphi(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
107 | term1 = base_image * lighten_screen(blend_image, base_image)
108 | term2 = blend_image * base_image * (1 - base_image)
109 |
110 | result = lighten_linear_dodge(term1, term2)
111 | return torch.clamp(result, 0.0, 1.0)
112 |
113 | def lighten_soft_light_ps(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
114 | condition = blend_image > 0.5
115 | high_result = base_image + (2 * blend_image - 1) * (torch.sqrt(base_image) - base_image)
116 | low_result = base_image - (1 - 2 * blend_image) * base_image * (1 - base_image)
117 | result = torch.where(condition, high_result, low_result)
118 | return torch.clamp(result, 0.0, 1.0)
119 |
120 | def lighten_soft_light_svg(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
121 | condition_src = blend_image > 0.5
122 | condition_dst = base_image > 0.25
123 |
124 | D_high = torch.sqrt(base_image)
125 | D_low = ((16.0 * base_image - 12.0) * base_image + 4.0) * base_image
126 | D = torch.where(condition_dst, D_high, D_low)
127 |
128 | high_result = base_image + (2.0 * blend_image - 1.0) * (D - base_image)
129 | low_result = base_image - (1.0 - 2.0 * blend_image) * base_image * (1.0 - base_image)
130 |
131 | result = torch.where(condition_src, high_result, low_result)
132 | return torch.clamp(result, 0.0, 1.0)
133 |
134 | def lighten_gamma_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
135 | result = torch.pow(base_image, blend_image)
136 | return torch.clamp(result, 0.0, 1.0)
137 |
138 | def lighten_gamma_illumination(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
139 | inv_base = 1.0 - base_image
140 | inv_blend = 1.0 - blend_image
141 |
142 | result = 1 - darken_gamma_dark(inv_blend, inv_base)
143 | return torch.clamp(result, 0.0, 1.0)
144 |
145 | def lighten_lighter_color(blend_image: torch.Tensor, base_image: torch.Tensor) -> torch.Tensor:
146 | lum_base = get_luminosity(base_image)
147 | lum_blend = get_luminosity(blend_image)
148 |
149 | mask = lum_blend > lum_base
150 | mask = mask.unsqueeze(-1).expand(-1, -1, -1, 3)
151 |
152 | result = torch.where(mask, blend_image, base_image)
153 | return torch.clamp(result, 0.0, 1.0)
154 |
155 | def lighten_pnorm_a(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
156 | power_factor = 2.3333333333333333
157 | inverse_power_factor = 0.428571428571434
158 |
159 | src_power = torch.pow(blend_image, power_factor)
160 | dst_power = torch.pow(base_image, power_factor)
161 |
162 | sum_power = src_power + dst_power
163 | result = torch.pow(sum_power, inverse_power_factor)
164 |
165 | return result.clamp(0, 1)
166 |
167 | def lighten_pnorm_b(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
168 | power_factor = 4.0
169 | inverse_power_factor = 0.25
170 |
171 | src_power = torch.pow(blend_image, power_factor)
172 | dst_power = torch.pow(base_image, power_factor)
173 |
174 | sum_power = src_power + dst_power
175 | result = torch.pow(sum_power, inverse_power_factor)
176 |
177 | return result.clamp(0, 1)
178 |
179 |
180 | def lighten_super_light(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
181 | def pow_tensor(tensor, power):
182 | return torch.pow(tensor, power)
183 |
184 | def inv(tensor):
185 | return 1.0 - tensor
186 |
187 | power_val = 2.875
188 |
189 | result = torch.where(
190 | blend_image < 0.5,
191 | inv(pow_tensor(pow_tensor(inv(base_image), power_val) + pow_tensor(inv(2.0 * blend_image), power_val), 1.0 / power_val)),
192 | pow_tensor(pow_tensor(base_image, power_val) + pow_tensor(2.0 * blend_image - 1.0, power_val), 1.0 / power_val)
193 | )
194 |
195 | return result.clamp(0, 1)
196 |
197 | def lighten_tint_ifs_illusions(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
198 | result = base_image * (1 - blend_image) + torch.sqrt(blend_image)
199 | return result.clamp(0, 1)
200 |
201 | def lighten_fog_lighten_ifs_illusions(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
202 | result = torch.where(
203 | blend_image < 0.5,
204 | inv(inv(blend_image) * blend_image) - inv(base_image) * inv(blend_image),
205 | blend_image - inv(base_image) * inv(blend_image) + torch.pow(inv(blend_image), 2)
206 | )
207 |
208 | return result.clamp(0, 1)
209 |
210 | def lighten_easy_dodge(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
211 | result = torch.where(
212 | blend_image == 1.0,
213 | torch.tensor(1.0, device=blend_image.device, dtype=blend_image.dtype),
214 | torch.pow(base_image, inv(torch.where(blend_image != 1.0, blend_image, torch.tensor(0.999999999999, device=blend_image.device, dtype=blend_image.dtype))) * 1.039999999)
215 | )
216 |
217 | return result.clamp(0, 1)
218 |
219 | def lighten_luminosity_sai(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor:
220 | # Check if alpha channel is present
221 | has_alpha = src.shape[-1] == 4
222 |
223 | if has_alpha:
224 | src_rgb, src_a = src[..., :3], src[..., 3:]
225 | dst_rgb, dst_a = dst[..., :3], dst[..., 3:]
226 | else:
227 | src_rgb, src_a = src, torch.ones_like(src[..., :1])
228 | dst_rgb, dst_a = dst, torch.ones_like(dst[..., :1])
229 |
230 | result_rgb = src_rgb * src_a + dst_rgb
231 | result_rgb = torch.clamp(result_rgb, 0, 1)
232 | if has_alpha:
233 | result = torch.cat([result_rgb, dst_a], dim=-1)
234 | else:
235 | result = result_rgb
236 |
237 | return result
238 |
239 | lighten_blend_functions = {
240 | BlendModes.LIGHTEN_COLOR_DODGE: lighten_color_dodge,
241 | BlendModes.LIGHTEN_LINEAR_DODGE: lighten_linear_dodge,
242 | BlendModes.LIGHTEN_LIGHTEN: lighten_lighten,
243 | BlendModes.LIGHTEN_LINEAR_LIGHT: lighten_linear_light,
244 | BlendModes.LIGHTEN_SCREEN: lighten_screen,
245 | BlendModes.LIGHTEN_PIN_LIGHT: lighten_pin_light,
246 | BlendModes.LIGHTEN_VIVID_LIGHT: lighten_vivid_light,
247 | BlendModes.LIGHTEN_FLAT_LIGHT: lighten_flat_light,
248 | BlendModes.LIGHTEN_HARD_LIGHT: lighten_hard_light,
249 | BlendModes.LIGHTEN_SOFT_LIGHT_IFS_ILLUSIONS: lighten_soft_light_ifs_illusions,
250 | BlendModes.LIGHTEN_SOFT_LIGHT_PEGTOP_DELPHI: lighten_soft_light_pegtop_delphi,
251 | BlendModes.LIGHTEN_SOFT_LIGHT_PS: lighten_soft_light_ps,
252 | BlendModes.LIGHTEN_SOFT_LIGHT_SVG: lighten_soft_light_svg,
253 | BlendModes.LIGHTEN_GAMMA_LIGHT: lighten_gamma_light,
254 | BlendModes.LIGHTEN_GAMMA_ILLUMINATION: lighten_gamma_illumination,
255 | BlendModes.LIGHTEN_LIGHTER_COLOR: lighten_lighter_color,
256 | BlendModes.LIGHTEN_PNORM_A: lighten_pnorm_a,
257 | BlendModes.LIGHTEN_PNORM_B: lighten_pnorm_b,
258 | BlendModes.LIGHTEN_SUPER_LIGHT: lighten_super_light,
259 | BlendModes.LIGHTEN_TINT_IFS_ILLUSIONS: lighten_tint_ifs_illusions,
260 | BlendModes.LIGHTEN_FOG_LIGHTEN_IFS_ILLUSIONS: lighten_fog_lighten_ifs_illusions,
261 | BlendModes.LIGHTEN_EASY_DODGE: lighten_easy_dodge,
262 | BlendModes.LIGHTEN_LUMINOSITY_SAI: lighten_luminosity_sai,
263 | }
--------------------------------------------------------------------------------
/blend_modes/lighten_helpers.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 | import torch
3 |
4 | def penumbra(base_image: torch.Tensor, blend_image: torch.Tensor, lighten_color_dodge: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor:
5 | result = torch.zeros_like(base_image)
6 | mask_dst_one = torch.isclose(blend_image, torch.tensor(1.0))
7 | result[mask_dst_one] = 1.0
8 | mask_sum_less_than_one = (blend_image + base_image) < 1.0
9 | result[mask_sum_less_than_one] = lighten_color_dodge(base_image[mask_sum_less_than_one], blend_image[mask_sum_less_than_one]) / 2
10 | mask_remaining = ~(mask_dst_one | mask_sum_less_than_one) & ~torch.isclose(base_image, torch.tensor(0.0))
11 | result[mask_remaining] = 1 - torch.clamp((1 - blend_image[mask_remaining]) / base_image[mask_remaining] / 2, 0, 1)
12 |
13 | return result
14 |
--------------------------------------------------------------------------------
/blend_modes/mix.py:
--------------------------------------------------------------------------------
1 | from .lighten import lighten_hard_light
2 | from ..blend_modes_enum import BlendModes
3 | import torch
4 |
5 | def mix_normal(_, blend_image: torch.Tensor) -> torch.Tensor:
6 | return blend_image
7 |
8 | def mix_overlay(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
9 | return lighten_hard_light(blend_image, base_image)
10 |
11 | mix_blend_functions = {
12 | BlendModes.MIX_NORMAL: mix_normal,
13 | BlendModes.MIX_OVERLAY: mix_overlay,
14 | }
--------------------------------------------------------------------------------
/blend_modes/modulo.py:
--------------------------------------------------------------------------------
1 | from ..blend_modes_enum import BlendModes
2 | import torch
3 |
4 | def modulo_modulo(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
5 | result = torch.fmod(blend_image, base_image)
6 | return result
7 |
8 | def modulo_divisive_modulo(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
9 | result = torch.where(
10 | base_image == 0,
11 | torch.fmod((1.0 / 1e-10) * blend_image, 1.0),
12 | torch.fmod((1.0 / base_image) * blend_image, 1.0)
13 | )
14 | return result
15 |
16 | modulo_blend_functions = {
17 | BlendModes.MODULO_MODULO: modulo_modulo,
18 | BlendModes.MODULO_DIVISIVE_MODULO: modulo_divisive_modulo,
19 | }
--------------------------------------------------------------------------------
/blend_modes/negative.py:
--------------------------------------------------------------------------------
1 | from ..blend_modes_enum import BlendModes
2 | import torch
3 |
4 | def negative_difference(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
5 | max_val = torch.max(base_image, blend_image)
6 | min_val = torch.min(base_image, blend_image)
7 | result = max_val - min_val
8 | return result.clamp(0, 1)
9 |
10 | # This method behaves differently in Krita
11 | def negative_equivalence(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
12 | diff = blend_image - base_image
13 | abs_diff = torch.abs(diff)
14 | result = 1 - abs_diff
15 | return result.clamp(0, 1)
16 |
17 | def negative_additive_subtractive(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
18 | sqrt_base = torch.sqrt(base_image)
19 | sqrt_blend = torch.sqrt(blend_image)
20 | diff = sqrt_blend - sqrt_base
21 | result = torch.abs(diff)
22 | return result.clamp(0, 1)
23 |
24 | def negative_exclusion(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
25 | product = base_image * blend_image
26 | result = blend_image + base_image - 2 * product
27 | return result.clamp(0, 1)
28 |
29 | def negative_arcus_tangent(base_image: torch.Tensor, blend_image: torch.Tensor) -> torch.Tensor:
30 | zero_tensor = torch.zeros_like(base_image)
31 | unit_tensor = torch.ones_like(base_image)
32 |
33 | result = torch.where(
34 | blend_image == 0,
35 | torch.where(base_image == 0, zero_tensor, unit_tensor),
36 | 2.0 * torch.atan(base_image / blend_image) / torch.pi
37 | )
38 |
39 | return result.clamp(0, 1)
40 |
41 | def negative_negation(blend_image: torch.Tensor, base_image: torch.Tensor) -> torch.Tensor:
42 | unit_tensor = torch.ones_like(base_image)
43 | difference = unit_tensor - base_image - blend_image
44 | abs_difference = torch.abs(difference)
45 | result = unit_tensor - abs_difference
46 | return result.clamp(0, 1)
47 |
48 | negative_blend_functions = {
49 | BlendModes.NEGATIVE_DIFFERENCE: negative_difference,
50 | BlendModes.NEGATIVE_EQUIVALENCE: negative_equivalence,
51 | BlendModes.NEGATIVE_ADDITIVE_SUBTRACTIVE: negative_additive_subtractive,
52 | BlendModes.NEGATIVE_EXCLUSION: negative_exclusion,
53 | BlendModes.NEGATIVE_ARCUS_TANGENT: negative_arcus_tangent,
54 | BlendModes.NEGATIVE_NEGATION: negative_negation,
55 | }
--------------------------------------------------------------------------------
/blend_modes_enum.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | # Commented out blend modes are not implemented yet
4 | class BlendModes(Enum):
5 | ARITHMETIC_ADDITION = "arithmetic: addition"
6 | ARITHMETIC_DIVIDE = "arithmetic: divide"
7 | ARITHMETIC_INVERSE_SUBTRACT= "arithmetic: inverse subtract"
8 | ARITHMETIC_MULTIPLY = "arithmetic: multiply"
9 | ARITHMETIC_SUBTRACT= "arithmetic: subtract"
10 |
11 | BINARY_AND = "binary: AND"
12 | BINARY_CONVERSE = "binary: CONVERSE"
13 | BINARY_IMPLICATION = "binary: IMPLICATION"
14 | BINARY_NAND = "binary: NAND"
15 | BINARY_NOR = "binary: NOR"
16 | BINARY_NOT_CONVERSE = "binary: NOT CONVERSE"
17 | BINARY_NOT_IMPLICATION = "binary: NOT IMPLICATION"
18 | BINARY_OR = "binary: OR"
19 | BINARY_XNOR = "binary: XNOR"
20 | BINARY_XOR = "binary: XOR"
21 |
22 | DARKEN_BURN = "darken: burn"
23 | DARKEN_DARKEN = "darken: darken"
24 | DARKEN_DARKER_COLOR = "darken: darker color"
25 | DARKEN_EASY_BURN = "darken: easy burn"
26 | DARKEN_FOG_DARKEN = "darken: fog darken"
27 | DARKEN_GAMMA_DARK = "darken: gamma dark"
28 | DARKEN_LINEAR_BURN = "darken: linear burn"
29 | DARKEN_SHADE = "darken: shade"
30 |
31 | HSI_COLOR = "hsi: color hsi"
32 | HSI_HUE = "hsi: hue hsi"
33 | HSI_SATURATION = "hsi: saturation hsi"
34 | HSI_INTENSITY = "hsi: intensity"
35 | HSI_DECREASE_SATURATION = "hsi: decrease saturation hsi"
36 | HSI_INCREASE_SATURATION = "hsi: increase saturation hsi"
37 | HSI_DECREASE_INTENSITY = "hsi: decrease intensity"
38 | HSI_INCREASE_INTENSITY = "hsi: increase intensity"
39 |
40 | HSL_COLOR = "hsl: color hsl"
41 | HSL_HUE = "hsl: hue hsl"
42 | HSL_SATURATION = "hsl: saturation hsl"
43 | HSL_LIGHTNESS = "hsl: lightness"
44 | HSL_DECREASE_SATURATION = "hsl: decrease saturation hsl"
45 | HSL_INCREASE_SATURATION = "hsl: increase saturation hsl"
46 | HSL_DECREASE_LIGHTNESS = "hsl: decrease lightness"
47 | HSL_INCREASE_LIGHTNESS = "hsl: increase lightness"
48 |
49 | HSV_COLOR = "hsv: color hsv"
50 | HSV_HUE = "hsv: hue hsv"
51 | HSV_SATURATION = "hsv: saturation hsv"
52 | HSV_VALUE = "hsv: value"
53 | HSV_DECREASE_SATURATION = "hsv: decrease saturation hsv"
54 | HSV_INCREASE_SATURATION = "hsv: increase saturation hsv"
55 | HSV_DECREASE_VALUE = "hsv: decrease value"
56 | HSV_INCREASE_VALUE = "hsv: increase value"
57 |
58 | HSY_COLOR = "hsy: color"
59 | HSY_HUE = "hsy: hue"
60 | HSY_SATURATION = "hsy: saturation"
61 | HSY_LUMINOSITY = "hsy: luminosity"
62 | HSY_DECREASE_SATURATION = "hsy: decrease saturation"
63 | HSY_INCREASE_SATURATION = "hsy: increase saturation"
64 | HSY_DECREASE_LUMINOSITY = "hsy: decrease luminosity"
65 | HSY_INCREASE_LUMINOSITY = "hsy: increase luminosity"
66 |
67 | LIGHTEN_COLOR_DODGE = "lighten: color dodge"
68 | LIGHTEN_LINEAR_DODGE = "lighten: linear dodge"
69 | LIGHTEN_LIGHTEN = "lighten: lighten"
70 | LIGHTEN_LINEAR_LIGHT = "lighten: linear light"
71 | LIGHTEN_SCREEN = "lighten: screen"
72 | LIGHTEN_PIN_LIGHT = "lighten: pin light"
73 | LIGHTEN_VIVID_LIGHT = "lighten: vivid light"
74 | LIGHTEN_FLAT_LIGHT = "lighten: flat light"
75 | LIGHTEN_HARD_LIGHT = "lighten: hard light"
76 | LIGHTEN_SOFT_LIGHT_IFS_ILLUSIONS = "lighten: soft light (ifs illusions)"
77 | LIGHTEN_SOFT_LIGHT_PEGTOP_DELPHI = "lighten: soft light (pegtop-delphi)"
78 | LIGHTEN_SOFT_LIGHT_PS = "lighten: soft light (ps)"
79 | LIGHTEN_SOFT_LIGHT_SVG = "lighten: soft light (svg)"
80 | LIGHTEN_GAMMA_LIGHT = "lighten: gamma light"
81 | LIGHTEN_GAMMA_ILLUMINATION = "lighten: gamma illumination"
82 | LIGHTEN_LIGHTER_COLOR = "lighten: lighter color"
83 | LIGHTEN_PNORM_A = "lighten: p-norm a"
84 | LIGHTEN_PNORM_B = "lighten: p-norm b"
85 | LIGHTEN_SUPER_LIGHT = "lighten: super light"
86 | LIGHTEN_TINT_IFS_ILLUSIONS = "lighten: tint (ifs illusions)"
87 | LIGHTEN_FOG_LIGHTEN_IFS_ILLUSIONS = "lighten: fog lighten (ifs illusions)"
88 | LIGHTEN_EASY_DODGE = "lighten: easy dodge"
89 | LIGHTEN_LUMINOSITY_SAI = "lighten: luminosity/shine (sai)"
90 |
91 | # MISC_BUMPMAP = "misc: bumpmap"
92 | # MISC_COMBINE_NORMAL_MAP = "misc: combine normal map"
93 | # MISC_DISSOLVE = "misc: dissolve"
94 | # MISC_COPY_RED = "misc: copy red"
95 | # MISC_COPY_GREEN = "misc: copy green"
96 | # MISC_COPY_BLUE = "misc: copy blue"
97 | # MISC_COPY = "misc: copy"
98 | # MISC_TANGENT_NORMALMAP = "misc: tangent normalmap"
99 |
100 | MIX_NORMAL = "mix: normal"
101 | MIX_OVERLAY = "mix: overlay"
102 | # MIX_OVER = "mix: over"
103 | # MIX_BEHIND = "mix: behind"
104 | # MIX_GREATER = "mix: greater"
105 | # MIX_LAMBERT_LIGHTING_LINEAR = "mix: lambert lighting (linear)"
106 | # MIX_LAMBERT_LIGHTING_GAMMA_2_2 = "mix: lambert lighting (gamma 2.2)"
107 | # MIX_ERASE = "mix: erase"
108 | # MIX_ALPHA_DARKEN = "mix: alpha darken"
109 | # MIX_HARD_MIX = "mix: hard mix"
110 | # MIX_HARD_MIX_PS = "mix: hard mix (ps)"
111 | # MIX_HARD_MIX_SOFTER_PS = "mix: hard mix softer (ps)"
112 | # MIX_GRAIN_MERGE = "mix: grain merge"
113 | # MIX_GRAIN_EXTRACT = "mix: grain extract"
114 | # MIX_PARALLEL = "mix: parallel"
115 | # MIX_ALLANON = "mix: allanon"
116 | # MIX_GEOMETRIC_MEAN = "mix: geometric mean"
117 | # MIX_DESTINATION_ATOP = "mix: destination atop"
118 | # MIX_DESTINATION_IN = "mix: destination in"
119 | # MIX_HARD_OVERLAY = "mix: hard overlay"
120 | # MIX_INTERPOLATION = "mix: interpolation"
121 | # MIX_INTERPOLATION_2X = "mix: interpolation - 2x"
122 | # MIX_PENUMBRA_A = "mix: penumbra a"
123 | # MIX_PENUMBRA_B = "mix: penumbra b"
124 | # MIX_PENUMBRA_C = "mix: penumbra c"
125 | # MIX_PENUMBRA_D = "mix: penumbra d"
126 |
127 | MODULO_MODULO = "modulo: modulo"
128 | MODULO_DIVISIVE_MODULO = "modulo: divisive modulo"
129 | # MODULO_MODULO_CONTINUOUS = "modulo: modulo - continuous"
130 | # MODULO_DIVISIVE_MODULO_CONTINUOUS = "modulo: divisive modulo - continuous"
131 | # MODULO_MODULO_SHIFT = "modulo: modulo shift"
132 | # MODULO_MODULO_SHIFT_CONTINUOUS = "modulo: modulo shift - continuous"
133 |
134 | NEGATIVE_DIFFERENCE = "negative: difference"
135 | NEGATIVE_EQUIVALENCE = "negative: equivalence"
136 | NEGATIVE_ADDITIVE_SUBTRACTIVE = "negative: additive subtractive"
137 | NEGATIVE_EXCLUSION = "negative: exclusion"
138 | NEGATIVE_ARCUS_TANGENT = "negative: arcus tangent"
139 | NEGATIVE_NEGATION = "negative: negation"
140 |
141 | # QUADRATIC_REFLECT = "quadratic: reflect"
142 | # QUADRATIC_GLOW = "quadratic: glow"
143 | # QUADRATIC_FREEZE = "quadratic: freeze"
144 | # QUADRATIC_HEAT = "quadratic: heat"
145 | # QUADRATIC_GLOW_HEAT = "quadratic: glow-heat"
146 | # QUADRATIC_HEAT_GLOW = "quadratic: heat-glow"
147 | # QUADRATIC_REFLECT_FREEZE = "quadratic: reflect-freeze"
148 | # QUADRATIC_FREEZE_REFLECT = "quadratic: freeze-reflect"
149 | # QUADRATIC_HEAT_GLOW_FREEZE_REFLECT_HYBRID = "quadratic: heat-glow & freeze-reflect hybrid"
150 |
--------------------------------------------------------------------------------
/helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from typing import Callable
3 |
4 | def replace_zeros(tensor: torch.Tensor, epsilon: float = 1e-7) -> torch.Tensor:
5 | return torch.where(tensor == 0, torch.tensor(epsilon, device=tensor.device), tensor)
6 |
7 | def float_to_uint8(tensor: torch.Tensor) -> torch.Tensor:
8 | return (tensor * 256).to(torch.uint8)
9 |
10 | def uint8_to_float(tensor: torch.Tensor) -> torch.Tensor:
11 | return torch.clamp(tensor/256, 0.0, 1.0)
12 |
13 | def set_saturation(image: torch.Tensor, new_saturation: torch.Tensor) -> torch.Tensor:
14 | result = image.clone()
15 |
16 | min_idx = torch.argmin(result, dim=-1, keepdim=True)
17 | max_idx = torch.argmax(result, dim=-1, keepdim=True)
18 | mid_idx = torch.where(min_idx == max_idx, min_idx, 3 - min_idx - max_idx)
19 |
20 | min_val = torch.gather(result, -1, min_idx)
21 | mid_val = torch.gather(result, -1, mid_idx)
22 | max_val = torch.gather(result, -1, max_idx)
23 |
24 | result.scatter_(-1, min_idx, torch.zeros_like(min_val))
25 | result.scatter_(-1, mid_idx, ((mid_val - min_val) * new_saturation.unsqueeze(-1)) / replace_zeros(max_val - min_val))
26 | result.scatter_(-1, max_idx, new_saturation.unsqueeze(-1))
27 |
28 | # manage zeros
29 | result = torch.where(
30 | (max_val - min_val) > 0,
31 | result,
32 | torch.zeros_like(result)
33 | )
34 |
35 | return result.clamp(0, 1)
36 |
37 | def set_ilvy(image: torch.Tensor, new_ilvy: torch.Tensor, get_ilvy: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor:
38 | result = add_ilvy(image, new_ilvy - get_ilvy(image), get_ilvy)
39 | return result.clamp(0, 1)
40 |
41 | def add_ilvy(image: torch.Tensor, new_intensity: torch.Tensor, get_ilvy: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor:
42 | image = image + new_intensity.unsqueeze(-1)
43 |
44 | intensity = get_ilvy(image)
45 | min = torch.min(image, dim=-1).values
46 | max = torch.max(image, dim=-1).values
47 |
48 | # adjust overflows
49 | mask_min = min < 0.0
50 | iln = torch.where(mask_min, 1.0 / (intensity - min + 1e-8), torch.zeros_like(intensity))
51 | image = torch.where(mask_min.unsqueeze(-1), intensity.unsqueeze(-1) + ((image - intensity.unsqueeze(-1)) * intensity.unsqueeze(-1)) * iln.unsqueeze(-1), image)
52 |
53 | mask_max = (max > 1.0) & ((max - intensity) > torch.finfo(max.dtype).eps)
54 | il = torch.where(mask_max, 1.0 - intensity, torch.zeros_like(intensity))
55 | ixl = torch.where(mask_max, 1.0 / (max - intensity + 1e-8), torch.zeros_like(intensity))
56 | image = torch.where(mask_max.unsqueeze(-1), intensity.unsqueeze(-1) + ((image - intensity.unsqueeze(-1)) * il.unsqueeze(-1)) * ixl.unsqueeze(-1), image)
57 |
58 | return image.clamp(0, 1)
59 |
60 | def inv(image: torch.Tensor):
61 | return 1.0 - image
62 |
63 | def rgb2hsv_torch1(rgb: torch.Tensor) -> torch.Tensor:
64 | # Convert BHWC to BCHW
65 | rgb = rgb.permute(0, 3, 1, 2)
66 |
67 | cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)
68 | cmin = torch.min(rgb, dim=1, keepdim=True)[0]
69 | delta = cmax - cmin
70 | hsv_h = torch.empty_like(rgb[:, 0:1, :, :])
71 | cmax_idx[delta == 0] = 3
72 | hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]
73 | hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]
74 | hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]
75 | hsv_h[cmax_idx == 3] = 0.
76 | hsv_h /= 6.
77 | hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)
78 | hsv_v = cmax
79 | hsv = torch.cat([hsv_h, hsv_s, hsv_v], dim=1)
80 |
81 | # Convert back to BHWC
82 | return hsv.permute(0, 2, 3, 1)
83 |
84 | def rgb2hsv_torch(rgb: torch.Tensor) -> torch.Tensor:
85 | r, g, b = rgb[..., 0:1], rgb[..., 1:2], rgb[..., 2:3]
86 |
87 | cmax, cmax_idx = torch.max(rgb, dim=-1, keepdim=True)
88 | cmin = torch.min(rgb, dim=-1, keepdim=True)[0]
89 | delta = cmax - cmin
90 |
91 | hsv_h = torch.empty_like(r)
92 | cmax_idx[delta == 0] = 3
93 | hsv_h[cmax_idx == 0] = (((g - b) / delta) % 6)[cmax_idx == 0]
94 | hsv_h[cmax_idx == 1] = (((b - r) / delta) + 2)[cmax_idx == 1]
95 | hsv_h[cmax_idx == 2] = (((r - g) / delta) + 4)[cmax_idx == 2]
96 | hsv_h[cmax_idx == 3] = 0.
97 | hsv_h /= 6.
98 |
99 | hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)
100 | hsv_v = cmax
101 |
102 | return torch.cat([hsv_h, hsv_s, hsv_v], dim=-1)
103 |
104 | def hsv2rgb_torch(hsv: torch.Tensor) -> torch.Tensor:
105 | hsv_h, hsv_s, hsv_v = hsv[..., 0:1], hsv[..., 1:2], hsv[..., 2:3]
106 |
107 | _c = hsv_v * hsv_s
108 | _x = _c * (- torch.abs(hsv_h * 6. % 2. - 1) + 1.)
109 | _m = hsv_v - _c
110 | _o = torch.zeros_like(_c)
111 |
112 | idx = (hsv_h * 6.).type(torch.uint8)
113 | idx = (idx % 6).expand(-1, -1, -1, 3)
114 |
115 | rgb = torch.empty_like(hsv)
116 | rgb[idx == 0] = torch.cat([_c, _x, _o], dim=-1)[idx == 0]
117 | rgb[idx == 1] = torch.cat([_x, _c, _o], dim=-1)[idx == 1]
118 | rgb[idx == 2] = torch.cat([_o, _c, _x], dim=-1)[idx == 2]
119 | rgb[idx == 3] = torch.cat([_o, _x, _c], dim=-1)[idx == 3]
120 | rgb[idx == 4] = torch.cat([_x, _o, _c], dim=-1)[idx == 4]
121 | rgb[idx == 5] = torch.cat([_c, _o, _x], dim=-1)[idx == 5]
122 |
123 | rgb += _m
124 |
125 | return rgb
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui-image-blender"
3 | description = "A custom node to blend two images together using a specified blending mode."
4 | version = "1.1.0"
5 | license = { file = "LICENSE" }
6 |
7 | [project.urls]
8 | Repository = "https://github.com/vault-developer/comfyui-image-blender"
9 |
10 | [tool.comfy]
11 | PublisherId = "vault-developer"
12 | DisplayName = "Image-blender"
13 | Icon = ""
14 |
--------------------------------------------------------------------------------
/workfow.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "last_node_id": 20,
3 | "last_link_id": 47,
4 | "nodes": [
5 | {
6 | "id": 18,
7 | "type": "PreviewImage",
8 | "pos": [
9 | 81,
10 | -167
11 | ],
12 | "size": {
13 | "0": 269.4472351074219,
14 | "1": 416.9296569824219
15 | },
16 | "flags": {},
17 | "order": 2,
18 | "mode": 0,
19 | "inputs": [
20 | {
21 | "name": "images",
22 | "type": "IMAGE",
23 | "link": 35
24 | }
25 | ],
26 | "title": "Preview Base Image",
27 | "properties": {
28 | "Node name for S&R": "PreviewImage"
29 | }
30 | },
31 | {
32 | "id": 9,
33 | "type": "LoadImage",
34 | "pos": [
35 | 358,
36 | -236
37 | ],
38 | "size": {
39 | "0": 270.3406982421875,
40 | "1": 476.7440490722656
41 | },
42 | "flags": {},
43 | "order": 0,
44 | "mode": 0,
45 | "outputs": [
46 | {
47 | "name": "IMAGE",
48 | "type": "IMAGE",
49 | "links": [
50 | 41
51 | ],
52 | "shape": 3,
53 | "slot_index": 0
54 | },
55 | {
56 | "name": "MASK",
57 | "type": "MASK",
58 | "links": [],
59 | "shape": 3,
60 | "slot_index": 1
61 | }
62 | ],
63 | "title": "Load Blend Image",
64 | "properties": {
65 | "Node name for S&R": "LoadImage"
66 | },
67 | "widgets_values": [
68 | "1.png",
69 | "image"
70 | ]
71 | },
72 | {
73 | "id": 16,
74 | "type": "MaskToImage",
75 | "pos": [
76 | 356,
77 | 284
78 | ],
79 | "size": {
80 | "0": 270.4665832519531,
81 | "1": 26
82 | },
83 | "flags": {},
84 | "order": 4,
85 | "mode": 0,
86 | "inputs": [
87 | {
88 | "name": "mask",
89 | "type": "MASK",
90 | "link": 47
91 | }
92 | ],
93 | "outputs": [
94 | {
95 | "name": "IMAGE",
96 | "type": "IMAGE",
97 | "links": [
98 | 31
99 | ],
100 | "shape": 3,
101 | "slot_index": 0
102 | }
103 | ],
104 | "properties": {
105 | "Node name for S&R": "MaskToImage"
106 | }
107 | },
108 | {
109 | "id": 17,
110 | "type": "PreviewImage",
111 | "pos": [
112 | 351,
113 | 349
114 | ],
115 | "size": {
116 | "0": 275.3780517578125,
117 | "1": 386.188720703125
118 | },
119 | "flags": {},
120 | "order": 6,
121 | "mode": 0,
122 | "inputs": [
123 | {
124 | "name": "images",
125 | "type": "IMAGE",
126 | "link": 31
127 | }
128 | ],
129 | "properties": {
130 | "Node name for S&R": "PreviewImage"
131 | }
132 | },
133 | {
134 | "id": 2,
135 | "type": "LoadImage",
136 | "pos": [
137 | 81,
138 | 281
139 | ],
140 | "size": {
141 | "0": 259.80902099609375,
142 | "1": 451.9725036621094
143 | },
144 | "flags": {},
145 | "order": 1,
146 | "mode": 0,
147 | "outputs": [
148 | {
149 | "name": "IMAGE",
150 | "type": "IMAGE",
151 | "links": [
152 | 35,
153 | 45
154 | ],
155 | "shape": 3,
156 | "slot_index": 0
157 | },
158 | {
159 | "name": "MASK",
160 | "type": "MASK",
161 | "links": [
162 | 46,
163 | 47
164 | ],
165 | "shape": 3,
166 | "slot_index": 1
167 | }
168 | ],
169 | "title": "Load Base Image",
170 | "properties": {
171 | "Node name for S&R": "LoadImage"
172 | },
173 | "widgets_values": [
174 | "clipspace/clipspace-mask-181041.70000000298.png [input]",
175 | "image"
176 | ]
177 | },
178 | {
179 | "id": 20,
180 | "type": "ImageBlender",
181 | "pos": [
182 | 628,
183 | 610
184 | ],
185 | "size": {
186 | "0": 487.8569030761719,
187 | "1": 122
188 | },
189 | "flags": {},
190 | "order": 3,
191 | "mode": 0,
192 | "inputs": [
193 | {
194 | "name": "base_image",
195 | "type": "IMAGE",
196 | "link": 45
197 | },
198 | {
199 | "name": "blend_image",
200 | "type": "IMAGE",
201 | "link": 41
202 | },
203 | {
204 | "name": "mask",
205 | "type": "MASK",
206 | "link": 46
207 | }
208 | ],
209 | "outputs": [
210 | {
211 | "name": "IMAGE",
212 | "type": "IMAGE",
213 | "links": [
214 | 44
215 | ],
216 | "shape": 3,
217 | "slot_index": 0
218 | }
219 | ],
220 | "properties": {
221 | "Node name for S&R": "ImageBlender"
222 | },
223 | "widgets_values": [
224 | 1,
225 | "hsi: intensity"
226 | ]
227 | },
228 | {
229 | "id": 5,
230 | "type": "PreviewImage",
231 | "pos": [
232 | 636,
233 | -172
234 | ],
235 | "size": {
236 | "0": 487.2989501953125,
237 | "1": 743.5930786132812
238 | },
239 | "flags": {
240 | "collapsed": false
241 | },
242 | "order": 5,
243 | "mode": 0,
244 | "inputs": [
245 | {
246 | "name": "images",
247 | "type": "IMAGE",
248 | "link": 44
249 | }
250 | ],
251 | "title": "Preview Result",
252 | "properties": {
253 | "Node name for S&R": "PreviewImage"
254 | }
255 | }
256 | ],
257 | "links": [
258 | [
259 | 31,
260 | 16,
261 | 0,
262 | 17,
263 | 0,
264 | "IMAGE"
265 | ],
266 | [
267 | 35,
268 | 2,
269 | 0,
270 | 18,
271 | 0,
272 | "IMAGE"
273 | ],
274 | [
275 | 41,
276 | 9,
277 | 0,
278 | 20,
279 | 1,
280 | "IMAGE"
281 | ],
282 | [
283 | 44,
284 | 20,
285 | 0,
286 | 5,
287 | 0,
288 | "IMAGE"
289 | ],
290 | [
291 | 45,
292 | 2,
293 | 0,
294 | 20,
295 | 0,
296 | "IMAGE"
297 | ],
298 | [
299 | 46,
300 | 2,
301 | 1,
302 | 20,
303 | 2,
304 | "MASK"
305 | ],
306 | [
307 | 47,
308 | 2,
309 | 1,
310 | 16,
311 | 0,
312 | "MASK"
313 | ]
314 | ],
315 | "groups": [],
316 | "config": {},
317 | "extra": {
318 | "ds": {
319 | "scale": 0.7513148009015794,
320 | "offset": [
321 | 228.1861978759655,
322 | 275.64841254054596
323 | ]
324 | }
325 | },
326 | "version": 0.4
327 | }
--------------------------------------------------------------------------------