├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── nodes ├── colorspace.py ├── hdr.py ├── mod.py ├── save_hdr.py ├── save_png.py └── tonemap.py ├── pyproject.toml ├── requirements.txt └── web └── ColorMod.js /.gitignore: -------------------------------------------------------------------------------- 1 | research/ 2 | other/ 3 | test.py 4 | *.png 5 | 6 | # default github .gitignore follows 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | cover/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | .pybuilder/ 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | #pdm.lock 114 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 115 | # in version control. 116 | # https://pdm.fming.dev/#use-with-ide 117 | .pdm.toml 118 | 119 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 120 | __pypackages__/ 121 | 122 | # Celery stuff 123 | celerybeat-schedule 124 | celerybeat.pid 125 | 126 | # SageMath parsed files 127 | *.sage.py 128 | 129 | # Environments 130 | .env 131 | .venv 132 | env/ 133 | venv/ 134 | ENV/ 135 | env.bak/ 136 | venv.bak/ 137 | 138 | # Spyder project settings 139 | .spyderproject 140 | .spyproject 141 | 142 | # Rope project settings 143 | .ropeproject 144 | 145 | # mkdocs documentation 146 | /site 147 | 148 | # mypy 149 | .mypy_cache/ 150 | .dmypy.json 151 | dmypy.json 152 | 153 | # Pyre type checker 154 | .pyre/ 155 | 156 | # pytype static type analyzer 157 | .pytype/ 158 | 159 | # Cython debug symbols 160 | cython_debug/ 161 | 162 | # PyCharm 163 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 164 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 165 | # and can be added to the global gitignore or merged into this file. For a more nuclear 166 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 167 | #.idea/ 168 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![ColorModExample](https://github.com/city96/ComfyUI_ColorMod/assets/125218114/3be66a59-46df-46a6-bbcf-ac05142442ec) 2 | 3 | This repo contains nodes around image color manipulation, as well as HDR and tonemapping operations. 4 | 5 | ## Installation 6 | 7 | As with most node packs, you can install it by git cloning it to your custom nodes folder or by installing it through the manager. 8 | ``` 9 | git clone https://github.com/city96/ComfyUI_ColorMod ./ComfyUI/custom_nodes/ComfyUI_ColorMod 10 | ``` 11 | 12 | > [!IMPORTANT] 13 | > Installing the requirements isn't strictly required, but most of the core nodes will be missing without them. 14 | 15 | For regular installs, this can be done using the usual `pip install -r requirements.txt` in the node folder with the correct env/venv active. 16 | 17 | For standalone ComfyUI installs on windows, open a command line in the same location your `run_nvidia_gpu.bat` file is located and run the following: 18 | ``` 19 | .\python_embeded\python.exe -s -m pip install -r .\ComfyUI\custom_nodes\ComfyUI_ColorMod\requirements.txt 20 | ``` 21 | 22 | # Usage 23 | 24 | The ColorMod nodes all change the image color values in some way. In the most recent version, they all come with a small visualization tool to show how the values will affect the image. 25 | 26 | The graph, without any changes, is a straight line from the bottom left to the top right corner. The horizontal axis represents the input values while the vertical axist represents the remapped ones. As an example, moving the left side up will result in darker areas being brighter. 27 | 28 | Clipping should be enabled (unless HDR images are being manipulated), as passing values outside the expected range to the VAE/UNET can cause some odd behavior. 29 | 30 | 31 | For the HDR workflow in the image above, you can use this [Sample workflow](https://github.com/city96/ComfyUI_ColorMod/files/14913017/ColorModNarrowWF.json). 32 | 33 | Most of the HDR nodes require a bit of trial-and-error, especially the one for creating HDR images. Realistically, there is no "exposure" with generated images so these values will have to be guessed. 34 | 35 | Another issue can be that different diffusion passes at different brightness levels can end up diverging, resulting in artifacts when recombining them. Controlnet and similar techniques to keep the inputs and outputs similar are recommended. 36 | 37 | The tonemapping nodes also require some testing to get right, and **behave slightly differently for HDR/SDR** images. The "multiplier" value is largely non-standard, and is an idea adapted from [this great article](https://learnopencv.com/high-dynamic-range-hdr-imaging-using-opencv-cpp-python/) by Satya Mallick, which I referenced while figuring out the proper implementation. For HDR tonemapping, setting a multiplier of 2-3 might result in better image quality. 38 | 39 | Using HDR images directly without tonemapping is probably useless, and has a chance to cause errors since the values are no longer in the expected `[0.0,1.0]` range. 40 | 41 | ## Precision 42 | 43 | (needs retesting on newer versions) 44 | 45 |
46 | Click to expand. 47 | 48 | ## VAE 49 | 50 | After I added the node to load images in 16 bit precision, I could test how much gets lost when doing a single VAE encode -> VAE decode pass. The added noise makes it hard to see on a histogram, so I just ran a very agressive edge-detect to highlight any banding. 51 | 52 | From top to bottom: 53 | - `N16` = Native 16 bit gradient, 2048 wide, every column a different color. Not encoded. 54 | - `FP32` = `N16` as the input image, `--fp32-vae` launch arg 55 | - `FP16` = `N16` as the input image, `--fp16-vae` launch arg 56 | - `BF16` = `N16` as the input image, `--bf16-vae` launch arg ([default on 20XX cards and up](https://github.com/comfyanonymous/ComfyUI/commit/b8c7c770d3259543af35acfc45608449b3bc6caa)) 57 | - `N8` = Native 8 bit gradient, 2048 wide with 256 different colors. Not encoded. 58 | 59 | ### ft-mse-840000.ckpt 60 | I accidentally cropped the bottom edge off of `FP32`, hence the lack of noise there. 61 | 62 | ![vae840k](https://github.com/city96/ComfyUI_ColorMod/assets/125218114/f1a0a14b-eb49-4636-b176-a1613f3734ce) 63 | 64 | ### sdxl_v0.9.safetensors 65 | Had to use the [FP16 VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) for the FP16 test. 66 | 67 | ![vaeXL](https://github.com/city96/ComfyUI_ColorMod/assets/125218114/8ce9e157-681a-4054-ab4b-48468dfde984) 68 | 69 | ## UNET 70 | 71 | (I need to re-test this part to rule out the VAE messing with the results - i.e. run fp32 VAE, pass a 16 bit image into the UNET to begin with, etc, etc.) 72 | 73 | I ran a 8 bit gradient through the UNET at 99% denoise, then decoded it using `ft-mse-840000`. After this, I saved the output as a 16 bit PNG using the node in this pack. 74 | 75 | The graph below shows the first two decimal digits after converting the image to [0-255]. There is no point in charting INT8 images, since they all end in zero. Here's what I think these results mean: 76 | 77 | - The synthetic gradient was 2048 wide, with each column being `(column+1)/2048`% gray. The grouping mostly makes sense here. 78 | - The FP32 VAE adds a bunch of noise, so the distribution ends up pretty even, though it does lean towards values ending in zero (probably due to being trained on 8bit images). 79 | - The FP16 VAE seems to be similar to the synthetic one, maybe due to the lack of precision? 80 | - The BF16 VAE apparently only has 7 bits of precision for the mantissa, so it's a pretty bad format to store the [0-1] VAE output in. 81 | 82 | ![graph_rem](https://github.com/city96/ComfyUI_ColorMod/assets/125218114/955fc9cc-943d-44a1-93a2-0c1c821f3d63) 83 | 84 | *Or I might just be graphing weird float rounding errors. Who knows.* 85 | 86 |
87 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # only import if running as a custom node 2 | try: 3 | import comfy.utils 4 | except ImportError: 5 | pass 6 | else: 7 | WEB_DIRECTORY = "./web" 8 | NODE_CLASS_MAPPINGS = {} 9 | 10 | # main nodes (no deps) 11 | from .nodes.mod import NODE_CLASS_MAPPINGS as mod_nodes 12 | NODE_CLASS_MAPPINGS.update(mod_nodes) 13 | 14 | # pypng dep 15 | try: 16 | import png 17 | except ImportError: 18 | print("ColorMod: Can't find pypng! Please install to enable 16bit image support.") 19 | else: 20 | # 10bit PNG nodes 21 | from .nodes.save_png import NODE_CLASS_MAPPINGS as save_png_nodes 22 | NODE_CLASS_MAPPINGS.update(save_png_nodes) 23 | 24 | # cv2 dep 25 | try: 26 | import cv2 27 | except ImportError: 28 | print("ColorMod: Can't find opencv! Please install to enable HDR/tonemapping support.") 29 | else: 30 | # HDR creation/etc nodes 31 | from .nodes.hdr import NODE_CLASS_MAPPINGS as hdr_nodes 32 | NODE_CLASS_MAPPINGS.update(hdr_nodes) 33 | 34 | # HDR save/load nodes 35 | from .nodes.save_hdr import NODE_CLASS_MAPPINGS as save_hdr_nodes 36 | NODE_CLASS_MAPPINGS.update(save_hdr_nodes) 37 | 38 | # HDR tonemap nodes 39 | from .nodes.tonemap import NODE_CLASS_MAPPINGS as tonemap_nodes 40 | NODE_CLASS_MAPPINGS.update(tonemap_nodes) 41 | 42 | # color space nodes 43 | from .nodes.colorspace import NODE_CLASS_MAPPINGS as colorspace_nodes 44 | NODE_CLASS_MAPPINGS.update(colorspace_nodes) 45 | 46 | # export 47 | NODE_DISPLAY_NAME_MAPPINGS = {k:v.TITLE for k,v in NODE_CLASS_MAPPINGS.items()} 48 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] 49 | -------------------------------------------------------------------------------- /nodes/colorspace.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import numpy as np 4 | 5 | # You can add any color spaces supported by cv2 6 | # [x[6:] for x in dir(cv2) if x.startswith("COLOR_")] 7 | common = [ 8 | "RGB", "BGR", "HLS", "HSV", "YCrCb", "YUV" 9 | ] 10 | 11 | class ColorspaceConvert: 12 | def __init__(self): 13 | pass 14 | 15 | @classmethod 16 | def INPUT_TYPES(s): 17 | return { 18 | "required": { 19 | "image": ("IMAGE",), 20 | "src": (common, {"default": "RGB"}), 21 | "dst": (common, {"default": "RGB"}), 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | FUNCTION = "convert" 27 | CATEGORY = "ColorMod" 28 | TITLE = "Convert color space" 29 | 30 | def convert(self, image, src, dst): 31 | if src == dst: 32 | return (image, ) 33 | 34 | atr = getattr(cv2, f"COLOR_{src}2{dst}", None) 35 | assert atr, f"Color conversion failed! Missing cv2 op 'COLOR_{src}2{dst}'" 36 | 37 | out = [] 38 | for batch in image: 39 | img = (batch.cpu().numpy() * 255.0).astype(np.uint8) 40 | mod = cv2.cvtColor(img, atr) 41 | out.append(torch.from_numpy(mod.copy()) / 255.0) 42 | out = torch.stack(out, dim=0) 43 | print(torch.min(out), torch.mean(out), torch.max(out)) 44 | return (out,) 45 | 46 | NODE_CLASS_MAPPINGS = { 47 | "ColorspaceConvert": ColorspaceConvert, 48 | } 49 | -------------------------------------------------------------------------------- /nodes/hdr.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import numpy as np 4 | 5 | class HDRExposureFusion: 6 | def __init__(self): 7 | pass 8 | 9 | @classmethod 10 | def INPUT_TYPES(s): 11 | return { 12 | "required": { 13 | "image_a": ("IMAGE",), 14 | }, 15 | "optional": { 16 | "image_b": ("IMAGE",), 17 | "image_c": ("IMAGE",), 18 | "image_d": ("IMAGE",), 19 | } 20 | } 21 | 22 | RETURN_TYPES = ("IMAGE",) 23 | FUNCTION = "create_hdr" 24 | CATEGORY = "ColorMod/hdr" 25 | TITLE = "Exposure Fusion" 26 | 27 | def create_hdr(self, image_a, image_b=None, image_c=None, image_d=None): 28 | def img_to_cv2(img): 29 | img = img.cpu().numpy() 30 | img = img[:, :, ::-1] # PIL RGB to OpenCV BGR 31 | img = (img * 255.0).astype(np.uint8) 32 | return img 33 | 34 | images = [x.clone() for x in [image_a, image_b, image_c, image_d] if x is not None] 35 | assert all([x.shape[0] == images[0].shape[0] for x in images[1:]]), "Batch size mismatch!" 36 | images = torch.stack(images, dim=1) 37 | 38 | out = [] 39 | for batch in images: 40 | batch = [img_to_cv2(x) for x in batch] 41 | hdr = cv2.createMergeMertens().process(batch) 42 | out.append( 43 | torch.from_numpy(hdr[:, :, ::-1].copy()) 44 | ) 45 | out = torch.stack(out, dim=0) 46 | return (out,) 47 | 48 | class HDRCreate: 49 | def __init__(self): 50 | pass 51 | 52 | @classmethod 53 | def INPUT_TYPES(s): 54 | return { 55 | "required": { 56 | "image_a": ("IMAGE",), 57 | "image_b": ("IMAGE",), 58 | "image_c": ("IMAGE",), 59 | "exposure_a": ("FLOAT", {"default": 1.0, "min": 0.001, "max": 1024.0, "step": 0.1}), 60 | "exposure_b": ("FLOAT", {"default": 2.5, "min": 0.001, "max": 1024.0, "step": 0.1}), 61 | "exposure_c": ("FLOAT", {"default": 8.0, "min": 0.001, "max": 1024.0, "step": 0.1}), 62 | } 63 | } 64 | 65 | RETURN_TYPES = ("IMAGE",) 66 | FUNCTION = "create_hdr" 67 | CATEGORY = "ColorMod/hdr" 68 | TITLE = "Create HDR image" 69 | 70 | def create_hdr(self, image_a, image_b, image_c, exposure_a, exposure_b, exposure_c): 71 | def img_to_cv2(img): 72 | img = img.cpu().numpy() 73 | img = img[:, :, ::-1] # PIL RGB to OpenCV BGR 74 | img = (img * 255.0).astype(np.uint8) 75 | return img 76 | 77 | images = [x.clone() for x in [image_a, image_b, image_c]] 78 | assert all([x.shape[0] == images[0].shape[0] for x in images[1:]]), "Batch size mismatch!" 79 | images = torch.stack(images, dim=1) 80 | times = [exposure_a, exposure_b, exposure_c] 81 | times = np.array(times, dtype=np.float32) 82 | 83 | out = [] 84 | for batch in images: 85 | batch = [img_to_cv2(x) for x in batch] 86 | cal = cv2.createCalibrateDebevec().process(batch, times) 87 | hdr = cv2.createMergeDebevec().process(batch, times, cal) 88 | out.append( 89 | torch.from_numpy(hdr[:, :, ::-1].copy()) 90 | ) 91 | out = torch.stack(out, dim=0) 92 | return (out,) 93 | 94 | NODE_CLASS_MAPPINGS = { 95 | "HDRExposureFusion": HDRExposureFusion, 96 | "HDRCreate": HDRCreate, 97 | } 98 | -------------------------------------------------------------------------------- /nodes/mod.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class ColorModCompress: 4 | def __init__(self): 5 | pass 6 | 7 | @classmethod 8 | def INPUT_TYPES(s): 9 | return { 10 | "required": { 11 | "image": ("IMAGE",), 12 | "mode": (["clip", "normalize", "compress"],) 13 | } 14 | } 15 | 16 | RETURN_TYPES = ("IMAGE",) 17 | FUNCTION = "mod_compress" 18 | CATEGORY = "ColorMod" 19 | TITLE = "ColorMod (compress)" 20 | 21 | def mod_compress(self, image, mode): 22 | image = image.clone() 23 | if mode == "clip": 24 | out = torch.clip(image, 0.0, 1.0) 25 | elif mode == "normalize": 26 | out = [] 27 | for img in image: 28 | img_min = torch.min(image) 29 | img_max = torch.max(image) 30 | print(f"Normalizing [{img_min:6.4f}:{img_max:6.4f}] => [0.0;1.0]") 31 | img = (img - img_min) / (img_max - img_min) 32 | out.append(img) 33 | out = torch.stack(out, dim=0) 34 | elif mode == "compress": 35 | out = [] 36 | for img in image: 37 | ll = torch.minimum(image, torch.zeros(image.shape)) 38 | ll = torch.clip(torch.abs(ll), 0.0, 1.0) 39 | hh = torch.maximum(image, torch.ones(image.shape)) 40 | hh = torch.clip((torch.abs(hh)-1.0), 0.0, 1.0) 41 | out.append(ll + hh) 42 | out = torch.stack(out, dim=0) 43 | else: 44 | raise ValueError(f"Unknown mode '{mode}'") 45 | 46 | # self.check_range(image) # debug 47 | out = torch.clip(out, 0.0, 1.0) # sanity 48 | return (out,) 49 | 50 | class ColorModMove: 51 | def __init__(self): 52 | pass 53 | 54 | @classmethod 55 | def INPUT_TYPES(s): 56 | return { 57 | "required": { 58 | "image": ("IMAGE",), 59 | "move": ("FLOAT", {"default": 0.0, "min": -1.000, "max": 1.000, "step": 0.01}), 60 | "clip": ([True, False], {"default": True}), 61 | } 62 | } 63 | 64 | RETURN_TYPES = ("IMAGE",) 65 | FUNCTION = "mod_move" 66 | CATEGORY = "ColorMod" 67 | TITLE = "ColorMod (move)" 68 | 69 | def mod_move(self, image, move, clip=True): 70 | image = image.clone() 71 | 72 | move_map = torch.ones(image.shape) * move 73 | out = torch.clip((image + move_map), 0.0, 1.0) if clip else (image + move_map) 74 | return (out,) 75 | 76 | class ColorModPivot: 77 | def __init__(self): 78 | pass 79 | 80 | @classmethod 81 | def INPUT_TYPES(s): 82 | return { 83 | "required": { 84 | "image": ("IMAGE",), 85 | "pivot": ("FLOAT", {"default": 0.5, "min": 0.001, "max": 0.999, "step": 0.01}), 86 | "move": ("FLOAT", {"default": 0.0, "min": -2.000, "max": 2.000, "step": 0.01}), 87 | "clip": ([True, False], {"default": True}), 88 | } 89 | } 90 | 91 | RETURN_TYPES = ("IMAGE",) 92 | FUNCTION = "mod_pivot" 93 | CATEGORY = "ColorMod" 94 | TITLE = "ColorMod (move pivot)" 95 | 96 | def mod_pivot(self, image, pivot, move, clip=True): 97 | image = image.clone() 98 | 99 | pivot_map = torch.ones(image.shape) * pivot 100 | image_high = torch.maximum(image, pivot_map) - pivot 101 | image_low = torch.minimum(image, pivot_map) 102 | 103 | image_high = image_high * (1/(1-pivot)) * (1-(pivot + move)) 104 | image_low = image_low * (1/pivot) * (pivot + move) 105 | out = torch.clip((image_high + image_low), 0.0, 1.0) if clip else (image_high + image_low) 106 | return (out,) 107 | 108 | class ColorModEdges: 109 | def __init__(self): 110 | pass 111 | 112 | @classmethod 113 | def INPUT_TYPES(s): 114 | return { 115 | "required": { 116 | "image": ("IMAGE",), 117 | "low": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}), 118 | "pivot": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), 119 | "high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}), 120 | "clip": ([True, False], {"default": True}) 121 | } 122 | } 123 | 124 | RETURN_TYPES = ("IMAGE",) 125 | FUNCTION = "mod_edges" 126 | CATEGORY = "ColorMod" 127 | TITLE = "ColorMod (edges)" 128 | 129 | def mod_edges(self, image, low, pivot, high, clip=True): 130 | image = image.clone() 131 | 132 | pivot_map = torch.ones(image.shape) * pivot 133 | image_high = torch.maximum(image, pivot_map) - pivot 134 | image_low = torch.minimum(image, pivot_map) 135 | 136 | image_low = image_low * low + pivot * (1-low) 137 | image_high = image_high * high 138 | out = torch.clip((image_high + image_low), 0.0, 1.0) if clip else (image_high + image_low) 139 | return (out,) 140 | 141 | NODE_CLASS_MAPPINGS = { 142 | "ColorModCompress": ColorModCompress, 143 | "ColorModMove" : ColorModMove, 144 | "ColorModPivot": ColorModPivot, 145 | "ColorModEdges": ColorModEdges, 146 | } 147 | -------------------------------------------------------------------------------- /nodes/save_hdr.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import torch 4 | import hashlib 5 | import numpy as np 6 | 7 | import folder_paths 8 | 9 | class SaveImageHDR: 10 | def __init__(self): 11 | self.output_dir = folder_paths.get_output_directory() 12 | 13 | @classmethod 14 | def INPUT_TYPES(s): 15 | return { 16 | "required": { 17 | "images": ("IMAGE", ), 18 | "filename_prefix": ("STRING", {"default": "HDR/ComfyUI"}) 19 | } 20 | } 21 | 22 | OUTPUT_NODE = True 23 | RETURN_TYPES = () 24 | FUNCTION = "save_image" 25 | CATEGORY = "ColorMod/hdr" 26 | TITLE = "Save Image (HDR)" 27 | 28 | def save_image(self, images, filename_prefix): 29 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( 30 | filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0] 31 | ) 32 | for image in images: 33 | file = f"{filename}_{counter:05}_.hdr" 34 | path = os.path.join(full_output_folder, file) 35 | image = image.cpu().numpy() 36 | image = image[:, :, ::-1] 37 | cv2.imwrite(path, image) 38 | return () 39 | 40 | class LoadImageHDR: 41 | def __init__(self): 42 | pass 43 | 44 | @classmethod 45 | def INPUT_TYPES(s): 46 | exts = [".hdr"] 47 | input_dir = folder_paths.get_input_directory() 48 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 49 | files = [f for f in files if any([f.endswith(x) for x in exts])] 50 | return { 51 | "required" : { 52 | "image": (sorted(files),) 53 | } 54 | } 55 | 56 | RETURN_TYPES = ("IMAGE",) 57 | FUNCTION = "load_image" 58 | CATEGORY = "ColorMod/hdr" 59 | TITLE = "Load Image (HDR)" 60 | 61 | def load_image(self, image): 62 | path = folder_paths.get_annotated_filepath(image) 63 | img = cv2.imread(path, -1) 64 | assert img is not None, "Failed to read image!" 65 | out = torch.from_numpy(img[:, :, ::-1].copy()).unsqueeze(0) 66 | print(f"Loaded HDR image [{torch.min(out)},{torch.max(out)}]") 67 | return (out,) 68 | 69 | @classmethod 70 | def IS_CHANGED(s, image): 71 | image_path = folder_paths.get_annotated_filepath(image) 72 | m = hashlib.sha256() 73 | with open(image_path, 'rb') as f: 74 | m.update(f.read()) 75 | return m.digest().hex() 76 | 77 | @classmethod 78 | def VALIDATE_INPUTS(s, image): 79 | if not folder_paths.exists_annotated_filepath(image): 80 | return "Invalid image file: {}".format(image) 81 | return True 82 | 83 | NODE_CLASS_MAPPINGS = { 84 | "SaveImageHDR": SaveImageHDR, 85 | "LoadImageHDR": LoadImageHDR, 86 | } 87 | -------------------------------------------------------------------------------- /nodes/save_png.py: -------------------------------------------------------------------------------- 1 | import os 2 | import png 3 | import json 4 | import torch 5 | import random 6 | import numpy as np 7 | from io import BytesIO 8 | from PIL import Image 9 | from PIL.PngImagePlugin import PngInfo 10 | 11 | import folder_paths 12 | from comfy.cli_args import args 13 | from nodes import SaveImage, PreviewImage, LoadImage 14 | 15 | def get_PIL_tEXt(image, prompt, extra_pnginfo): 16 | """This is extremely stupid""" 17 | # prepare PIL image as normal 18 | print(image.dtype) 19 | i = image.cpu().numpy() 20 | img = np.clip(255.0*i, 0, 255).astype(np.uint8) 21 | img = Image.fromarray(img) 22 | 23 | metadata = None 24 | if not args.disable_metadata: 25 | metadata = PngInfo() 26 | if prompt is not None: 27 | metadata.add_text("prompt", json.dumps(prompt)) 28 | if extra_pnginfo is not None: 29 | for x in extra_pnginfo: 30 | metadata.add_text(x, json.dumps(extra_pnginfo[x])) 31 | 32 | # write temp PIL image 33 | tmp = BytesIO() 34 | img.save(tmp, "png", pnginfo=metadata, compress_level=0) 35 | tmp.seek(0) 36 | 37 | # read it back as PNG and get the tEXt chunks 38 | img=png.Reader(tmp) 39 | metadata = [x for x in img.chunks() if x[0] == b"tEXt"] 40 | return metadata 41 | 42 | def save_png(image, extra_chunks, path): 43 | i = image.cpu().numpy() 44 | img = np.clip(65535.0*i, 0, 65535).astype(np.uint16) 45 | 46 | writer = png.Writer( 47 | size = (img.shape[1],img.shape[0]), 48 | bitdepth = 16, 49 | greyscale = False, 50 | compression = 9, 51 | ) 52 | data = img.reshape(-1, img.shape[1]*img.shape[2]).tolist() 53 | # default writer without metadata 54 | if not extra_chunks: 55 | with open(path, "wb") as f: 56 | writer.write(f, data) 57 | return 58 | # jank in the tEXt chunks as well 59 | tmp = BytesIO() 60 | writer.write(tmp, data) 61 | tmp.seek(0) 62 | chunks = list(png.Reader(tmp).chunks()) 63 | for k in extra_chunks: 64 | chunks.insert(1, k) 65 | with open(path, "wb") as f: 66 | png.write_chunks(f, chunks) 67 | 68 | class SaveImageHighPrec(SaveImage): 69 | TITLE = "Save Image (16 bit)" 70 | CATEGORY = "ColorMod/16bit" 71 | 72 | def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): 73 | filename_prefix += self.prefix_append 74 | full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) 75 | results = list() 76 | for image in images: 77 | metadata = get_PIL_tEXt(image, prompt, extra_pnginfo) 78 | 79 | file = f"{filename}_{counter:05}_.png" 80 | path = os.path.join(full_output_folder, file) 81 | save_png(image, metadata, path) 82 | 83 | results.append({ 84 | "filename": file, 85 | "subfolder": subfolder, 86 | "type": self.type 87 | }) 88 | counter += 1 89 | 90 | return { "ui": { "images": results } } 91 | 92 | # Directly copied from nodes.py 93 | class PreviewImageHighPrec(SaveImageHighPrec): 94 | TITLE = "Preview Image (16 bit)" 95 | CATEGORY = "ColorMod/16bit" 96 | 97 | def __init__(self): 98 | self.output_dir = folder_paths.get_temp_directory() 99 | self.type = "temp" 100 | self.prefix_append = "_temp_" + ''.join( 101 | random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5) 102 | ) 103 | 104 | @classmethod 105 | def INPUT_TYPES(s): 106 | return { 107 | "required": { 108 | "images": ("IMAGE",) 109 | }, 110 | "hidden": { 111 | "prompt": "PROMPT", 112 | "extra_pnginfo": "EXTRA_PNGINFO" 113 | } 114 | } 115 | 116 | class LoadImageHighPrec(LoadImage): 117 | TITLE = "Load Image (16 bit)" 118 | FUNCTION = "load_image_high_precision" 119 | CATEGORY = "ColorMod/16bit" 120 | 121 | def load_image_high_precision(self, image): 122 | if not image.endswith(".png"): 123 | print("ColorMod: Only PNG files can be loaded in 16 bit color.") 124 | return (self.load_image(image)) 125 | 126 | image_path = folder_paths.get_annotated_filepath(image) 127 | reader = png.Reader(image_path) 128 | 129 | raw = reader.read() 130 | image = np.vstack(list(map(np.uint16, raw[2]))) 131 | 132 | dim_rgb = image.shape[1] // raw[0] 133 | div_max = 1.0 if np.max(image) <= 255 else 256.0 134 | 135 | image = np.reshape(image,(raw[1], raw[0], dim_rgb)) 136 | image = np.array(image).astype(np.float32) / (255.0 * div_max ) 137 | image = torch.from_numpy(np.clip(image, 0.0, 1.0))[None,] 138 | 139 | if image.shape[3] == 4: 140 | mask = 1.0 - image[:,:,:,3] 141 | image = torch.stack([image[:,:,:,x] for x in range(3)], dim = 3) 142 | else: 143 | mask = torch.zeros((64,64), dtype=torch.float32, device="cpu").unsqueeze(0) 144 | return (image, mask) 145 | 146 | NODE_CLASS_MAPPINGS = { 147 | "SaveImageHighPrec": SaveImageHighPrec, 148 | "PreviewImageHighPrec": PreviewImageHighPrec, 149 | "LoadImageHighPrec": LoadImageHighPrec, 150 | } 151 | -------------------------------------------------------------------------------- /nodes/tonemap.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import torch 3 | import numpy as np 4 | 5 | class CV2Tonemap: 6 | def __init__(self): 7 | pass 8 | 9 | @classmethod 10 | def INPUT_TYPES(s): 11 | return { 12 | "required": { 13 | "image": ("IMAGE",), 14 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 8.0, "step": 0.01}), 15 | "mult": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 8.0, "step": 0.01}), 16 | } 17 | } 18 | 19 | RETURN_TYPES = ("IMAGE",) 20 | FUNCTION = "apply_tonemap" 21 | CATEGORY = "ColorMod/tonemap" 22 | TITLE = "Tonemap (simple)" 23 | tonemap_op = getattr(cv2, "createTonemap", None) 24 | 25 | def tonemap(self, raw, mult, **kwargs): 26 | img = self.tonemap_op(**kwargs).process(raw) 27 | return np.clip(img * mult, 0.0, 1.0) 28 | 29 | def apply_tonemap(self, image, **kwargs): 30 | out = [] 31 | for raw in image: 32 | raw = raw.cpu().numpy()[:, :, ::-1] 33 | img = self.tonemap(raw, **kwargs) 34 | out.append( 35 | torch.from_numpy(img[:, :, ::-1].copy()) 36 | ) 37 | out = torch.stack(out, dim=0) 38 | return (out,) 39 | 40 | class CV2TonemapDrago(CV2Tonemap): 41 | def __init__(self): 42 | super().__init__() 43 | 44 | @classmethod 45 | def INPUT_TYPES(s): 46 | return { 47 | "required": { 48 | "image": ("IMAGE",), 49 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 50 | "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 51 | "bias": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), 52 | "mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 53 | } 54 | } 55 | TITLE = "Tonemap (Drago)" 56 | tonemap_op = getattr(cv2, "createTonemapDrago", None) 57 | 58 | class CV2TonemapDurand(CV2Tonemap): 59 | def __init__(self): 60 | super().__init__() 61 | 62 | @classmethod 63 | def INPUT_TYPES(s): 64 | return { 65 | "required": { 66 | "image": ("IMAGE",), 67 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 68 | "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 69 | "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 70 | "sigma_space": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 71 | "sigma_color": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 72 | "mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 73 | } 74 | } 75 | 76 | TITLE = "Tonemap (Durand)" 77 | tonemap_op = getattr(cv2, "createTonemapDurand", None) 78 | 79 | class CV2TonemapMantiuk(CV2Tonemap): 80 | def __init__(self): 81 | super().__init__() 82 | 83 | @classmethod 84 | def INPUT_TYPES(s): 85 | return { 86 | "required": { 87 | "image": ("IMAGE",), 88 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 89 | "scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 90 | "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 91 | "mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 92 | } 93 | } 94 | 95 | TITLE = "Tonemap (Mantiuk)" 96 | tonemap_op = getattr(cv2, "createTonemapMantiuk", None) 97 | 98 | class CV2TonemapReinhard(CV2Tonemap): 99 | def __init__(self): 100 | super().__init__() 101 | 102 | @classmethod 103 | def INPUT_TYPES(s): 104 | return { 105 | "required": { 106 | "image": ("IMAGE",), 107 | "gamma": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 108 | "intensity": ("FLOAT", {"default": 0.0, "min":-8.0, "max": 8.0, "step": 0.01}), 109 | "light_adapt": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.6, "step": 0.01}), 110 | "color_adapt": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.6, "step": 0.01}), 111 | "mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 8.0, "step": 0.01}), 112 | } 113 | } 114 | 115 | TITLE = "Tonemap (Reinhard)" 116 | tonemap_op = getattr(cv2, "createTonemapReinhard", None) 117 | 118 | NODE_CLASS_MAPPINGS = { 119 | "CV2Tonemap": CV2Tonemap, 120 | "CV2TonemapDrago": CV2TonemapDrago, 121 | "CV2TonemapDurand": CV2TonemapDurand, 122 | "CV2TonemapMantiuk": CV2TonemapMantiuk, 123 | "CV2TonemapReinhard": CV2TonemapReinhard, 124 | } 125 | 126 | # not all cv2 versions support all tonemap nodes (e.g Durand require nonfree) 127 | for name in list(NODE_CLASS_MAPPINGS.keys()): 128 | if NODE_CLASS_MAPPINGS[name].tonemap_op is None: 129 | print(f"ColorMod: Ignoring node '{name}' due to cv2 edition/version") 130 | del NODE_CLASS_MAPPINGS[name] 131 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui_colormod" 3 | description = "Nodes for contrast/color editing as well as 16bit/HDR/tonemapping support." 4 | version = "1.2.0" 5 | license = { file = "LICENSE" } 6 | dependencies = ["pypng", "opencv-contrib-python"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/city96/ComfyUI_ColorMod" 10 | 11 | [tool.comfy] 12 | PublisherId = "city96" 13 | DisplayName = "ComfyUI ColorMod" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pypng 2 | opencv-contrib-python 3 | -------------------------------------------------------------------------------- /web/ColorMod.js: -------------------------------------------------------------------------------- 1 | import { app } from "/scripts/app.js"; 2 | 3 | function getNamedWidget(node, name) { 4 | return node.widgets.find(function(w){return w.name == name}) 5 | } 6 | 7 | function addCanvasWidget(node) { 8 | const canvas = document.createElement("canvas"); 9 | canvas.width = 1000 10 | canvas.height = 400 11 | 12 | canvas.style.pointerEvents = "none"; 13 | canvas.style.border = "1px solid " + LiteGraph.WIDGET_OUTLINE_COLOR 14 | canvas.style.backgroundColor = LiteGraph.WIDGET_BGCOLOR 15 | canvas.stcolor = app.canvas.default_connection_color_byType.IMAGE 16 | 17 | let opts = { 18 | getMinHeight() { return 100 }, 19 | selectOn: [], 20 | } 21 | const widget = node.addDOMWidget("canvas", "CTYCanvas", canvas, opts); 22 | widget.canvas = canvas; 23 | return widget; 24 | } 25 | 26 | function drawCanvasInitial(ctx, width, height, pv_x=null, pv_y=null) { 27 | // clear 28 | ctx.clearRect(0, 0, width, height) 29 | if (pv_x && pv_y) { 30 | // set style 31 | ctx.beginPath() 32 | ctx.lineWidth = 5 33 | ctx.strokeStyle = "#444" 34 | // draw cross 35 | ctx.moveTo(0, pv_y) 36 | ctx.lineTo(width, pv_y) 37 | ctx.moveTo(pv_x, 0) 38 | ctx.lineTo(pv_x, height) 39 | ctx.stroke() 40 | } 41 | } 42 | 43 | function drawCanvasCMMove(node) { 44 | let move = getNamedWidget(node, "move").value 45 | let canvas = getNamedWidget(node, "canvas").canvas 46 | 47 | let ctx = canvas.getContext("2d") 48 | 49 | // Calc coords 50 | let width = canvas.width 51 | let height = canvas.height 52 | let offset = height * -move 53 | 54 | // clear 55 | drawCanvasInitial(ctx, width, height) 56 | // set style 57 | ctx.beginPath() 58 | ctx.lineWidth = 15 59 | ctx.strokeStyle = canvas.stcolor 60 | // draw lines 61 | ctx.moveTo(0, height + offset) // start 62 | ctx.lineTo(width, offset) // start -> end 63 | ctx.stroke() 64 | } 65 | 66 | function drawCanvasCMMovePivot(node) { 67 | let move = getNamedWidget(node, "move").value 68 | let pivot = getNamedWidget(node, "pivot").value 69 | let canvas = getNamedWidget(node, "canvas").canvas 70 | 71 | let ctx = canvas.getContext("2d") 72 | 73 | // Calc pivot coords 74 | let width = canvas.width 75 | let height = canvas.height 76 | let pv_x = width * pivot 77 | let pv_y = height * (1.0 - pivot) - (height * move) 78 | 79 | // clear 80 | drawCanvasInitial(ctx, width, height, pv_x, pv_y) 81 | // set style 82 | ctx.beginPath() 83 | ctx.lineWidth = 15 84 | ctx.strokeStyle = canvas.stcolor 85 | // draw lines 86 | ctx.moveTo(0, height) // start 87 | ctx.lineTo(pv_x, pv_y) // start -> pivot 88 | ctx.lineTo(width, 0) // pivot -> end 89 | ctx.stroke() 90 | } 91 | 92 | function drawCanvasCMEdges(node) { 93 | let low = getNamedWidget(node, "low").value 94 | let high = getNamedWidget(node, "high").value 95 | let pivot = getNamedWidget(node, "pivot").value 96 | let canvas = getNamedWidget(node, "canvas").canvas 97 | 98 | let ctx = canvas.getContext("2d") 99 | ctx.lineWidth = 15 100 | ctx.strokeStyle = canvas.stcolor 101 | 102 | // Calc pivot coords 103 | let width = canvas.width 104 | let height = canvas.height 105 | let pv_x = width * pivot 106 | let pv_y = height * (1.0 - pivot) 107 | 108 | // clear 109 | drawCanvasInitial(ctx, width, height, pv_x, pv_y) 110 | // set style 111 | ctx.beginPath() 112 | ctx.lineWidth = 15 113 | ctx.strokeStyle = canvas.stcolor 114 | // draw lines 115 | ctx.moveTo(0, height * low) // start 116 | ctx.lineTo(pv_x, pv_y) // start -> pivot 117 | ctx.lineTo(width, height * (1.0-high)) // pivot -> end 118 | ctx.stroke() 119 | } 120 | 121 | app.registerExtension({ 122 | name: "City96.ColorMod", 123 | nodeCreated(node, app) { 124 | if (node.__proto__.comfyClass == "ColorModMove") { 125 | var widget = addCanvasWidget(node) 126 | var refresh = function(v=null) { drawCanvasCMMove(node) } 127 | getNamedWidget(node, "move").callback = refresh 128 | setTimeout(refresh, 100); 129 | } 130 | if (node.__proto__.comfyClass == "ColorModPivot") { 131 | var widget = addCanvasWidget(node) 132 | var refresh = function(v=null) { drawCanvasCMMovePivot(node) } 133 | getNamedWidget(node, "move").callback = refresh 134 | getNamedWidget(node, "pivot").callback = refresh 135 | setTimeout(refresh, 100); 136 | } 137 | if (node.__proto__.comfyClass == "ColorModEdges") { 138 | var widget = addCanvasWidget(node) 139 | var refresh = function(v) { drawCanvasCMEdges(node) } 140 | getNamedWidget(node, "low").callback = refresh 141 | getNamedWidget(node, "high").callback = refresh 142 | getNamedWidget(node, "pivot").callback = refresh 143 | setTimeout(refresh, 100); 144 | } 145 | if (node.__proto__.comfyClass == "ColorModExposureFusion") { 146 | console.log(node) 147 | } 148 | } 149 | }) 150 | --------------------------------------------------------------------------------