├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── fonts ├── blackrumbleregular.ttf ├── fadeaway.ttf ├── millionastter.ttf ├── showdex.ttf ├── xangdashiny.ttf ├── 华光布兜体.ttf ├── 微软雅黑.ttf ├── 文道现代篆书.ttf ├── 汉呈王世李行书.ttf ├── 沙孟海书法字体.ttf └── 迷你简综艺.ttf ├── images ├── HSL_color.png ├── baidu_translate.png ├── color_balance.png ├── highlight_shadow.png ├── imitation1.png ├── imitation2.png ├── imitation3.png ├── light_source.png ├── remove_watermark.png └── watermark.png ├── nodes ├── add_watermark.py ├── baidu_translate.py ├── brightness_contrast_saturation.py ├── color_balance.py ├── gray_channel.py ├── highlight_shadow_brightness.py ├── hsl_color.py ├── imitation_hue.py ├── light_shape.py ├── remove_watermark.py └── upload_image_path.py ├── pyproject.toml ├── requirements.txt └── workflow ├── 光源_lightSource.json ├── 加水印_addWatermark.json └── 追色_imitationHue.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 110 | .pdm.toml 111 | .pdm-python 112 | .pdm-build/ 113 | 114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 115 | __pypackages__/ 116 | 117 | # Celery stuff 118 | celerybeat-schedule 119 | celerybeat.pid 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | .dmypy.json 146 | dmypy.json 147 | 148 | # Pyre type checker 149 | .pyre/ 150 | 151 | # pytype static type analyzer 152 | .pytype/ 153 | 154 | # Cython debug symbols 155 | cython_debug/ 156 | 157 | # PyCharm 158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 160 | # and can be added to the global gitignore or merged into this file. For a more nuclear 161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 162 | #.idea/ 163 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-MingNodes 2 | 3 | 开发的一些有用的功能,具体工作流在workflow文件夹 4 | 5 | Some useful functions developed, the specific workflow is in the workflow folder 6 | 7 | 1: 模仿像素蛋糕追色的功能,参数强度是色彩迁移的强度,皮肤保护参数值皮肤色彩越大越接近原图色彩,支持区域色彩迁移,加入SAM语义分割制作蒙板即可。 8 | 可以打开是否调节自动亮度,对比度,和饱和度选项,各项值的参数就是自动调节的范围0.5就是50%范围内自动调节。新增影调开关,可以模仿原图的影调。 9 | 还有需要注意的是,如果是相机的原片JPG或PNG等图片,需要到PS里面转下相应格式再传入,因为相机的色彩空间可能不同。 10 | 11 | Imitates the color tracking function of pixel cake. The parameter intensity is the intensity of color migration. 12 | The larger the face protection parameter value, the closer it is to the original image. 13 | Supports regional color migration, just add SAM semantic segmentation to create a mask。 14 | You can turn on the options to adjust the automatic brightness, contrast, and saturation. 15 | The parameters for each value are within the automatic adjustment range of 0.5, which is 50% automatic adjustment. 16 | Added a tone switch that can mimic the tone of the original image. 17 | It should also be noted that if it is a camera's original JPG or PNG image, 18 | it needs to be converted to PS format before being transmitted, as the color space of the camera may be different. 19 | 20 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/imitation1.png?raw=true) 21 | 22 | 2: IC-Light的自定义光源,可以定义光源各种形状,位置偏移(位置可调整为负数),缩放,亮度,旋转,背景和光源颜色,图片高斯模糊, 23 | 可以多个光源组合在一起,绘制任意图像的光源。 24 | 25 | IC-Light's custom light source can define various shapes, position offsets (position can be adjusted to negative numbers), 26 | scale, brightness, rotation, background and light color,gaussian blur of image.multiple light sources can be combined together to draw the light source of any image. 27 | 28 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/light_source.png?raw=true) 29 | 30 | 3:图片水印功能,可以添加图片水印和文字水印,可以调整水印的位置,大小,透明度,文字水印还可以自定义字体颜色和字体,可以自己增加字体, 31 | 放在插件目录的fonts文件夹下。 32 | 33 | The image watermark function can add image watermarks and text watermarks. You can adjust the position, size, 34 | and transparency of the watermark. The text watermark can also customize the font color and font. You can add fonts yourself and put them in the fonts folder of the plug-in directory. 35 | 36 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/watermark.png?raw=true) 37 | 38 | 4: AI去水印功能,基于开源的lama模型开发,在水印地方涂抹上蒙板即可去水印 39 | 40 | 模型下载地址:[模型下载](https://huggingface.co/anyisalin/big-lama/resolve/main/big-lama.safetensors) 下载模型放到models/ming文件夹 41 | 42 | AI watermark removal function, developed based on the open source lama model, can remove the watermark by applying a mask to the watermark area. 43 | 44 | Model download address: [model download](https://huggingface.co/anyisalin/big-lama/resolve/main/big-lama.safetensors) download the model and put it in the models/ming folder 45 | 46 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/remove_watermark.png?raw=true) 47 | 48 | 5: HSL调色, 调整范围-30到30 49 | 50 | HSL color adjustment, adjustment range -30 to 30 51 | 52 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/HSL_color.png?raw=true) 53 | 54 | 6: 色彩平衡,包含中间调,高光,阴影的调整,调整范围-100到100 55 | 56 | Color balance, including midtones, highlights, and shadows adjustments.Adjustment range: -100 to 100 57 | 58 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/color_balance.png?raw=true) 59 | 60 | 7:阴影高光调节,调整范围-10到10 61 | 62 | Shadow highlight adjustment, adjustment range -10 to 10 63 | 64 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/highlight_shadow.png?raw=true) 65 | 66 | 8:一个是百度翻译,可以中英繁之间的翻译,注意要填入百度的AppID和Appkey,可以到百度翻译申请,免费的。 67 | 68 | Baidu Translate, which can translate between Chinese, English and Traditional Chinese. 69 | Please note that you need to fill in Baidu's AppID and Appkey. You can apply for it at Baidu Translate, it is free. 70 | 71 | 注册地址 Registered Address: 72 | 73 | 74 | 75 | ![Image text](https://github.com/mingsky-ai/ComfyUI-MingNodes/blob/main/images/baidu_translate.png?raw=true) 76 | 77 | ## Star History 78 | 79 | [![Star History Chart](https://api.star-history.com/svg?repos=mingsky-ai/ComfyUI-MingNodes&type=Date)](https://star-history.com/#mingsky-ai/ComfyUI-MingNodes&Date) 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes.gray_channel import ConvertGrayChannelNode 2 | from .nodes.brightness_contrast_saturation import AdjustBrightnessContrastSaturationNode 3 | from .nodes.baidu_translate import BaiduTranslateNode 4 | from .nodes.add_watermark import AddWaterMarkNode 5 | from .nodes.imitation_hue import ImitationHueNode 6 | from .nodes.light_shape import LightShapeNode 7 | from .nodes.highlight_shadow_brightness import HighlightShadowBrightnessNode 8 | from .nodes.hsl_color import HSLColorNode 9 | from .nodes.color_balance import ColorBalanceNode 10 | from .nodes.remove_watermark import RemoveWatermarkNode 11 | 12 | NODE_CLASS_MAPPINGS = { 13 | "ConvertGrayChannelNode": ConvertGrayChannelNode, 14 | "AdjustBrightnessContrastSaturationNode": AdjustBrightnessContrastSaturationNode, 15 | "BaiduTranslateNode": BaiduTranslateNode, 16 | "AddWaterMarkNode": AddWaterMarkNode, 17 | "LightShapeNode": LightShapeNode, 18 | "ImitationHueNode": ImitationHueNode, 19 | "HighlightShadowBrightnessNode": HighlightShadowBrightnessNode, 20 | "HSLColorNode": HSLColorNode, 21 | "ColorBalanceNode": ColorBalanceNode, 22 | "RemoveWatermarkNode": RemoveWatermarkNode, 23 | } 24 | 25 | NODE_DISPLAY_NAME_MAPPINGS = { 26 | "ConvertGrayChannelNode": "Grayscale Channels", 27 | "AdjustBrightnessContrastSaturationNode": "Brightness Contrast Saturation", 28 | "BaiduTranslateNode": "Baidu Translate", 29 | "AddWaterMarkNode": "Add Watermark", 30 | "LightShapeNode": "IC-Light Light Shape", 31 | "ImitationHueNode": "Imitation Hue", 32 | "HighlightShadowBrightnessNode": "Highlight Shadow Brightness", 33 | "HSLColorNode": "HSL Color", 34 | "ColorBalanceNode": "Color Balance", 35 | "RemoveWatermarkNode": "AI Remove Watermark", 36 | } 37 | 38 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] 39 | -------------------------------------------------------------------------------- /fonts/blackrumbleregular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/blackrumbleregular.ttf -------------------------------------------------------------------------------- /fonts/fadeaway.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/fadeaway.ttf -------------------------------------------------------------------------------- /fonts/millionastter.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/millionastter.ttf -------------------------------------------------------------------------------- /fonts/showdex.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/showdex.ttf -------------------------------------------------------------------------------- /fonts/xangdashiny.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/xangdashiny.ttf -------------------------------------------------------------------------------- /fonts/华光布兜体.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/华光布兜体.ttf -------------------------------------------------------------------------------- /fonts/微软雅黑.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/微软雅黑.ttf -------------------------------------------------------------------------------- /fonts/文道现代篆书.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/文道现代篆书.ttf -------------------------------------------------------------------------------- /fonts/汉呈王世李行书.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/汉呈王世李行书.ttf -------------------------------------------------------------------------------- /fonts/沙孟海书法字体.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/沙孟海书法字体.ttf -------------------------------------------------------------------------------- /fonts/迷你简综艺.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/fonts/迷你简综艺.ttf -------------------------------------------------------------------------------- /images/HSL_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/HSL_color.png -------------------------------------------------------------------------------- /images/baidu_translate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/baidu_translate.png -------------------------------------------------------------------------------- /images/color_balance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/color_balance.png -------------------------------------------------------------------------------- /images/highlight_shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/highlight_shadow.png -------------------------------------------------------------------------------- /images/imitation1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/imitation1.png -------------------------------------------------------------------------------- /images/imitation2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/imitation2.png -------------------------------------------------------------------------------- /images/imitation3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/imitation3.png -------------------------------------------------------------------------------- /images/light_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/light_source.png -------------------------------------------------------------------------------- /images/remove_watermark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/remove_watermark.png -------------------------------------------------------------------------------- /images/watermark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mingsky-ai/ComfyUI-MingNodes/afaee2c3edb40bc0d585f0597ba234dcd1041657/images/watermark.png -------------------------------------------------------------------------------- /nodes/add_watermark.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from PIL import Image, ImageEnhance, ImageDraw, ImageFont 4 | import folder_paths 5 | import os 6 | 7 | 8 | def tensor2pil(t_image: torch.Tensor) -> Image: 9 | return Image.fromarray(np.clip(255.0 * t_image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) 10 | 11 | 12 | def pil2tensor(image: Image) -> torch.Tensor: 13 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 14 | 15 | 16 | def add_image_watermark(original, watermark, x, y, opacity, scale): 17 | width, height = original.size 18 | watermark = watermark.resize((int(watermark.width * scale), int(watermark.height * scale)), Image.LANCZOS) 19 | alpha = watermark.split()[3] 20 | alpha = ImageEnhance.Brightness(alpha).enhance(opacity) 21 | watermark.putalpha(alpha) 22 | transparent = Image.new('RGBA', (width, height), (0, 0, 0, 0)) 23 | transparent.paste(watermark, (x, y), watermark) 24 | watermarked = Image.alpha_composite(original, transparent) 25 | return watermarked 26 | 27 | 28 | def add_text_watermark(original, text, x, y, scale, opacity, color, fonts): 29 | txt = Image.new('RGBA', original.size, (255, 255, 255, 0)) 30 | font_path = os.path.join(folder_paths.get_output_directory(), 'ComfyUI-MingNodes', 'fonts') 31 | font_path = font_path.replace("output", "custom_nodes") 32 | font_path = os.path.join(font_path, fonts) 33 | font_size = int(40 * scale) 34 | font = ImageFont.truetype(font_path, font_size) 35 | d = ImageDraw.Draw(txt) 36 | d.text((x, y), text, font=font, fill=color + (int(255 * opacity),)) 37 | watermarked = Image.alpha_composite(original, txt) 38 | return watermarked 39 | 40 | 41 | def hex_to_rgb(hex_color): 42 | hex_color = hex_color.lstrip('#') 43 | r = int(hex_color[0:2], 16) 44 | g = int(hex_color[2:4], 16) 45 | b = int(hex_color[4:6], 16) 46 | return (r, g, b) 47 | 48 | 49 | class AddWaterMarkNode: 50 | @classmethod 51 | def INPUT_TYPES(s): 52 | font_path = os.path.join(folder_paths.get_output_directory(), 'ComfyUI-MingNodes', 'fonts') 53 | font_path = font_path.replace("output", "custom_nodes") 54 | files = [f for f in os.listdir(font_path) if os.path.isfile(os.path.join(font_path, f))] 55 | 56 | return { 57 | "required": { 58 | "image": ("IMAGE",), 59 | "image_watermark": ("BOOLEAN", {"default": True}), 60 | "position_X": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), 61 | "position_Y": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}), 62 | "opacity": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.1}), 63 | "scale": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1}), 64 | }, 65 | "optional": { 66 | "watermark": ("IMAGE",), 67 | "watermark_mask": ("MASK",), 68 | "text": ("STRING", {"default": "enter text"}), 69 | "text_color": ("STRING", {"default": "#FFFFFF"}), 70 | "fonts": ((sorted(files),)), 71 | }, 72 | } 73 | 74 | CATEGORY = "MingNodes/Image Process" 75 | 76 | RETURN_TYPES = ("IMAGE",) 77 | RETURN_NAMES = ("image",) 78 | FUNCTION = "add_watermark" 79 | 80 | def add_watermark(self, image, image_watermark, position_X, position_Y, opacity, scale, 81 | text, text_color, fonts, watermark=None, watermark_mask=None,): 82 | 83 | if image_watermark: 84 | result = [] 85 | for img1 in image: 86 | img_cv1 = Image.fromarray((img1.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGBA") 87 | for img2 in watermark: 88 | rgb_image = Image.fromarray((img2.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 89 | for img3 in watermark_mask: 90 | mask_image = Image.fromarray((img3.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("L") 91 | if rgb_image.size != mask_image.size: 92 | raise ValueError("RGB图像和mask图像的大小必须相同") 93 | rgba_image = Image.new('RGBA', rgb_image.size) 94 | rgb_data = rgb_image.getdata() 95 | mask_data = mask_image.getdata() 96 | rgba_data = [] 97 | for rgb, alpha in zip(rgb_data, mask_data): 98 | inverted_alpha = 255 - alpha 99 | rgba_data.append(rgb + (inverted_alpha,)) 100 | rgba_image.putdata(rgba_data) 101 | adjusted_image = add_image_watermark(img_cv1, rgba_image, position_X, position_Y, opacity, scale) 102 | rst = torch.from_numpy(np.array(adjusted_image).astype(np.float32) / 255.0).to(image.device) 103 | result.append(rst) 104 | final_tensor = torch.stack(result) 105 | return (final_tensor,) 106 | else: 107 | result2 = [] 108 | for img in image: 109 | img_cv2 = Image.fromarray((img.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGBA") 110 | adjusted_image2 = add_text_watermark(img_cv2, str(text), position_X, position_Y, scale, 111 | opacity, hex_to_rgb(text_color), fonts) 112 | rst2 = torch.from_numpy(np.array(adjusted_image2).astype(np.float32) / 255.0).to(image.device) 113 | result2.append(rst2) 114 | final_tensor2 = torch.stack(result2) 115 | return (final_tensor2,) 116 | -------------------------------------------------------------------------------- /nodes/baidu_translate.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import random 3 | from hashlib import md5 4 | 5 | 6 | def make_md5(s, encoding='utf-8'): 7 | return md5(s.encode(encoding)).hexdigest() 8 | 9 | 10 | class BaiduTranslateNode: 11 | 12 | @classmethod 13 | def INPUT_TYPES(s): 14 | return { 15 | "required": { 16 | "from_translate": ( 17 | [ 18 | 'auto', 19 | 'zh', 20 | 'cht', 21 | 'en' 22 | ], 23 | {"default": "auto"}, 24 | ), 25 | "to_translate": ([ 26 | 'zh', 27 | 'en', 28 | 'cht' 29 | ], {"default": "en"}), 30 | "baidu_appid": ("STRING", {"multiline": False, "placeholder": "Input AppId"}), 31 | "baidu_appkey": ("STRING", {"multiline": False, "placeholder": "Input AppKey"}), 32 | "text": ("STRING", {"multiline": True, "placeholder": "Input prompt"}), 33 | } 34 | } 35 | 36 | CATEGORY = "MingNodes/Translate" 37 | 38 | RETURN_TYPES = ("STRING",) 39 | RETURN_NAMES = ("text",) 40 | FUNCTION = "baidu_translate" 41 | 42 | def baidu_translate(self, from_translate, to_translate, text, baidu_appid, baidu_appkey): 43 | appid = baidu_appid 44 | appkey = baidu_appkey 45 | from_lang = from_translate 46 | to_lang = to_translate 47 | endpoint = 'https://api.fanyi.baidu.com' 48 | path = '/api/trans/vip/translate' 49 | url = endpoint + path 50 | query = text 51 | salt = random.randint(32768, 65536) 52 | sign = make_md5(appid + query + str(salt) + appkey) 53 | 54 | headers = {'Content-Type': 'application/x-www-form-urlencoded'} 55 | payload = {'appid': appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign} 56 | 57 | r = requests.post(url, params=payload, headers=headers) 58 | result = r.json() 59 | txt = result['trans_result'][0]['dst'] 60 | 61 | return (txt,) 62 | -------------------------------------------------------------------------------- /nodes/brightness_contrast_saturation.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageEnhance 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def adjust_image(img, brightness=0, contrast=0, saturation=0): 7 | 8 | if brightness != 0: 9 | enhancer = ImageEnhance.Brightness(img) 10 | img = enhancer.enhance(1 + brightness / 10) 11 | 12 | if contrast != 0: 13 | enhancer = ImageEnhance.Contrast(img) 14 | img = enhancer.enhance(1 + contrast / 10) 15 | 16 | if saturation != 0: 17 | enhancer = ImageEnhance.Color(img) 18 | img = enhancer.enhance(1 + saturation / 10) 19 | 20 | return img 21 | 22 | 23 | class AdjustBrightnessContrastSaturationNode: 24 | 25 | @classmethod 26 | def INPUT_TYPES(s): 27 | return { 28 | "required": { 29 | "image": ("IMAGE",), 30 | "brightness": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 31 | "contrast": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 32 | "saturation": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 33 | } 34 | } 35 | 36 | CATEGORY = "MingNodes/Image Process" 37 | 38 | RETURN_TYPES = ("IMAGE",) 39 | RETURN_NAMES = ("image",) 40 | FUNCTION = "brightness_contrast_saturation" 41 | 42 | def brightness_contrast_saturation(self, image, brightness, contrast, saturation): 43 | for img in image: 44 | rgb_image = Image.fromarray((img.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 45 | adjusted_image = adjust_image(rgb_image, brightness, contrast, saturation) 46 | rst = torch.from_numpy(np.array(adjusted_image).astype(np.float32) / 255.0).unsqueeze(0) 47 | 48 | return (rst,) 49 | -------------------------------------------------------------------------------- /nodes/color_balance.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def calculate_thresholds(luminance, shadow_percentile=45, highlight_percentile=55): 7 | hist, _ = np.histogram(luminance, bins=256, range=(0, 255)) 8 | cdf = hist.cumsum() 9 | cdf_normalized = cdf * hist.max() / cdf.max() 10 | shadow_threshold = np.argmin(np.abs(cdf_normalized - (shadow_percentile / 100 * cdf_normalized.max()))) 11 | highlight_threshold = np.argmin(np.abs(cdf_normalized - (highlight_percentile / 100 * cdf_normalized.max()))) 12 | return shadow_threshold, highlight_threshold 13 | 14 | 15 | def sigmoid(x, k=1): 16 | return 1 / (1 + np.exp(-k * x)) 17 | 18 | 19 | def adjust_color_balance(image, cyan_red, magenta_green, yellow_blue, adjustment_type='midtones'): 20 | img_array = np.array(image).astype(float) 21 | original_luminance = 0.299 * img_array[..., 0] + 0.587 * img_array[..., 1] + 0.114 * img_array[..., 2] 22 | shadow_threshold, highlight_threshold = calculate_thresholds(original_luminance) 23 | 24 | if adjustment_type == 'shadows': 25 | mask = sigmoid((shadow_threshold - original_luminance) / 20) 26 | elif adjustment_type == 'highlights': 27 | mask = sigmoid((original_luminance - highlight_threshold) / 20) 28 | else: 29 | min_luminance = np.min(original_luminance) 30 | max_luminance = np.max(original_luminance) 31 | mid_luminance = (min_luminance + max_luminance) / 2 32 | mask = 1 - np.abs(original_luminance - mid_luminance) / (max_luminance - min_luminance) 33 | mask = np.power(mask, 2) 34 | 35 | mask = np.stack([mask] * 3, axis=-1) 36 | adjustments = np.array([cyan_red, magenta_green, yellow_blue]) 37 | adjusted_array = img_array + adjustments * mask 38 | adjusted_array = np.clip(adjusted_array, 0, 255) 39 | adjusted_luminance = 0.299 * adjusted_array[..., 0] + 0.587 * adjusted_array[..., 1] + 0.114 * adjusted_array[..., 2] 40 | luminance_ratio = original_luminance / (adjusted_luminance + 1e-8) 41 | 42 | for i in range(3): 43 | adjusted_array[..., i] *= luminance_ratio 44 | 45 | adjusted_array = np.clip(adjusted_array, 0, 255).astype(np.uint8) 46 | return adjusted_array 47 | 48 | 49 | class ColorBalanceNode: 50 | 51 | @classmethod 52 | def INPUT_TYPES(s): 53 | return { 54 | "required": { 55 | "image": ("IMAGE",), 56 | "adjust_type": ( 57 | [ 58 | 'midtones', 59 | 'highlights', 60 | 'shadows', 61 | ], 62 | {"default": "midtones"}, 63 | ), 64 | "cyan_red": ("INT", {"default": 0, "min": -100, "max": 100, "step": 1}), 65 | "magenta_green": ("INT", {"default": 0, "min": -100, "max": 100, "step": 1}), 66 | "yellow_blue": ("INT", {"default": 0, "min": -100, "max": 100, "step": 1}), 67 | } 68 | } 69 | 70 | CATEGORY = "MingNodes/Image Process" 71 | 72 | RETURN_TYPES = ("IMAGE",) 73 | RETURN_NAMES = ("image",) 74 | FUNCTION = "color_balance" 75 | 76 | def color_balance(self, image, adjust_type, cyan_red, magenta_green, yellow_blue): 77 | for img in image: 78 | rgb_image = Image.fromarray((img.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 79 | balanced_image = adjust_color_balance(rgb_image, cyan_red*1.5, magenta_green*1.5, yellow_blue*1.5, adjust_type) 80 | rst = torch.from_numpy(np.array(balanced_image).astype(np.float32) / 255.0).unsqueeze(0) 81 | return (rst,) 82 | -------------------------------------------------------------------------------- /nodes/gray_channel.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | 5 | 6 | class ConvertGrayChannelNode: 7 | 8 | @classmethod 9 | def INPUT_TYPES(s): 10 | return { 11 | "required": { 12 | "image": ("IMAGE",), 13 | } 14 | } 15 | 16 | CATEGORY = "MingNodes/Image Process" 17 | 18 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE",) 19 | RETURN_NAMES = ("grayscale", "blue channel", "green channel", "red channel",) 20 | FUNCTION = "convert_gray_channel" 21 | 22 | def convert_gray_channel(self, image): 23 | gray_images = [] 24 | bb_images = [] 25 | gg_images = [] 26 | rr_images = [] 27 | for img in image: 28 | img_cv2 = img.cpu().numpy() 29 | img_cv2 = (img_cv2 * 255).astype(np.uint8) 30 | gray = cv2.cvtColor(img_cv2, cv2.COLOR_RGB2GRAY) 31 | gy = torch.from_numpy(gray.astype(np.float32) / 255.0).to(image.device) 32 | gray_images.append(gy) 33 | 34 | sp = cv2.split(img_cv2) 35 | b = sp[0][:, :] 36 | bb = torch.from_numpy(b.astype(np.float32) / 255.0).to(image.device) 37 | bb_images.append(bb) 38 | 39 | g = sp[1][:, :] 40 | gg = torch.from_numpy(g.astype(np.float32) / 255.0).to(image.device) 41 | gg_images.append(gg) 42 | 43 | r = sp[2][:, :] 44 | rr = torch.from_numpy(r.astype(np.float32) / 255.0).to(image.device) 45 | rr_images.append(rr) 46 | 47 | final_tensor1 = torch.stack(gray_images) 48 | final_tensor2 = torch.stack(bb_images) 49 | final_tensor3 = torch.stack(gg_images) 50 | final_tensor4 = torch.stack(rr_images) 51 | 52 | return (final_tensor1, final_tensor2, final_tensor3, final_tensor4, ) 53 | 54 | 55 | -------------------------------------------------------------------------------- /nodes/highlight_shadow_brightness.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | from scipy.signal import convolve2d 4 | import torch 5 | 6 | 7 | def rgb_to_hsv(rgb): 8 | rgb = rgb.astype('float') 9 | hsv = np.zeros_like(rgb) 10 | r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] 11 | maxc = np.max(rgb, axis=-1) 12 | minc = np.min(rgb, axis=-1) 13 | hsv[:, :, 2] = maxc 14 | mask = maxc != minc 15 | hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask] 16 | rc = np.zeros_like(r) 17 | gc = np.zeros_like(g) 18 | bc = np.zeros_like(b) 19 | rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask] 20 | gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask] 21 | bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask] 22 | hsv[:, :, 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc) 23 | hsv[:, :, 0] = (hsv[:, :, 0] / 6.0) % 1.0 24 | return hsv 25 | 26 | 27 | def hsv_to_rgb(hsv): 28 | rgb = np.zeros_like(hsv) 29 | h, s, v = hsv[:, :, 0], hsv[:, :, 1], hsv[:, :, 2] 30 | i = (h * 6.0).astype('uint8') 31 | f = (h * 6.0) - i 32 | p = v * (1.0 - s) 33 | q = v * (1.0 - s * f) 34 | t = v * (1.0 - s * (1.0 - f)) 35 | i = i % 6 36 | conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5] 37 | rgb[:, :, 0] = np.select(conditions, [v, q, p, p, t, v], default=v) 38 | rgb[:, :, 1] = np.select(conditions, [v, v, v, q, p, p], default=t) 39 | rgb[:, :, 2] = np.select(conditions, [v, p, t, v, v, q], default=p) 40 | return rgb 41 | 42 | 43 | def adjust_image(img, highlight=0, shadow=0, brightness=0): 44 | img_array = np.array(img, dtype=float) / 255.0 45 | hsv = rgb_to_hsv(img_array) 46 | v = hsv[:, :, 2] 47 | if shadow != 0: 48 | shadow_mask = np.clip(1 - (v / 0.8), 0, 1) 49 | adjustment = shadow / 100 50 | delta = v * adjustment * shadow_mask 51 | v_adjusted = v + delta 52 | v = np.clip(v_adjusted, 0, 1) 53 | if highlight != 0: 54 | adjustment = highlight / 100 55 | highlight_threshold = 0.6 56 | highlight_mask = np.clip((v - highlight_threshold) / (1 - highlight_threshold), 0, 1) 57 | 58 | if adjustment < 0: 59 | v_compressed = v ** (1 - adjustment) 60 | v = v * (1 - highlight_mask) + v_compressed * highlight_mask 61 | kernel = np.ones((5, 5)) / 25 62 | local_mean = convolve2d(v, kernel, mode='same', boundary='symm') 63 | v_detail = v - local_mean 64 | v += v_detail * (-adjustment * 0.5) 65 | v = (v - v.min()) / (v.max() - v.min()) 66 | v = np.power(v, 1 + (-adjustment * 0.2)) 67 | else: 68 | v_adjusted = v + (1 - v) * adjustment * highlight_mask 69 | v = v_adjusted 70 | 71 | if brightness != 0: 72 | v *= (1 + brightness / 100) 73 | 74 | hsv[:, :, 2] = np.clip(v, 0, 1) 75 | adjusted_array = hsv_to_rgb(hsv) 76 | adjusted_array = (adjusted_array * 255).astype(np.uint8) 77 | 78 | return adjusted_array 79 | 80 | 81 | class HighlightShadowBrightnessNode: 82 | 83 | @classmethod 84 | def INPUT_TYPES(s): 85 | return { 86 | "required": { 87 | "image": ("IMAGE",), 88 | "highlight": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 89 | "shadow": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 90 | "brightness": ("FLOAT", {"default": 0, "min": -10.0, "max": 10.0, "step": 0.1}), 91 | } 92 | } 93 | 94 | CATEGORY = "MingNodes/Image Process" 95 | 96 | RETURN_TYPES = ("IMAGE",) 97 | RETURN_NAMES = ("image",) 98 | FUNCTION = "highlight_shadow_brightness" 99 | 100 | def highlight_shadow_brightness(self, image, highlight, shadow, brightness): 101 | for img in image: 102 | rgb_image = Image.fromarray((img.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 103 | result_image = adjust_image(rgb_image, highlight*10, shadow*10, brightness*10) 104 | rst = torch.from_numpy(result_image.astype(np.float32) / 255.0).unsqueeze(0) 105 | return (rst,) 106 | -------------------------------------------------------------------------------- /nodes/hsl_color.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def create_color_mask(hsv_image, color_range): 7 | mask = np.zeros(hsv_image.shape[:2], dtype=np.float32) 8 | h, s, v = cv2.split(hsv_image) 9 | 10 | for lower, upper in color_range: 11 | lower_h, lower_s, lower_v = lower 12 | upper_h, upper_s, upper_v = upper 13 | if lower_h > upper_h: 14 | h_mask = ((h >= lower_h) | (h <= upper_h)).astype(np.float32) 15 | else: 16 | h_mask = ((h >= lower_h) & (h <= upper_h)).astype(np.float32) 17 | s_mask = ((s >= lower_s) & (s <= upper_s)).astype(np.float32) 18 | v_mask = ((v >= lower_v) & (v <= upper_v)).astype(np.float32) 19 | combined_mask = (h_mask * 0.8 + s_mask * 0.15 + v_mask * 0.05) 20 | mask = np.maximum(mask, combined_mask) 21 | 22 | mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=5, sigmaY=5) 23 | 24 | return mask 25 | 26 | 27 | def adjust_hsl(image, color_adjustments): 28 | hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) 29 | 30 | color_ranges = { 31 | 'red': [([0, 70, 70], [10, 255, 255]), ([170, 50, 50], [180, 255, 255])], 32 | 'orange': [([11, 70, 70], [20, 255, 255])], 33 | 'yellow': [([21, 70, 70], [30, 255, 255])], 34 | 'green': [([31, 70, 70], [75, 255, 255])], 35 | 'lightGreen': [([76, 70, 70], [90, 255, 255])], 36 | 'blue': [([96, 50, 50], [130, 255, 255])], 37 | 'purple': [([131, 70, 70], [155, 255, 255])], 38 | 'magenta': [([156, 70, 70], [169, 255, 255])] 39 | } 40 | 41 | all_masks = {} 42 | for color, ranges in color_ranges.items(): 43 | all_masks[color] = create_color_mask(hsv_image, ranges) 44 | 45 | adjustment_mask = np.zeros_like(hsv_image) 46 | 47 | for color, (h_shift, s_shift, l_shift) in color_adjustments.items(): 48 | color_mask = all_masks[color] 49 | if np.max(color_mask) < 1: 50 | continue 51 | if h_shift != 0: 52 | hue_multiplier = 2 if color == 'blue' else 1 53 | adjustment_mask[:, :, 0] += h_shift * color_mask * hue_multiplier 54 | if s_shift != 0: 55 | adjustment_mask[:, :, 1] += s_shift * color_mask * 3 56 | if l_shift != 0: 57 | adjustment_mask[:, :, 2] += l_shift * color_mask * 2 58 | 59 | for i in range(3): 60 | adjustment_mask[:, :, i] = cv2.GaussianBlur(adjustment_mask[:, :, i], (0, 0), sigmaX=3, sigmaY=3) 61 | 62 | hsv_image[:, :, 0] = np.mod(hsv_image[:, :, 0] + adjustment_mask[:, :, 0], 180) 63 | hsv_image[:, :, 1] = np.clip(hsv_image[:, :, 1] + adjustment_mask[:, :, 1], 0, 255) 64 | hsv_image[:, :, 2] = np.clip(hsv_image[:, :, 2] + adjustment_mask[:, :, 2], 0, 255) 65 | 66 | adjusted_image = cv2.cvtColor(hsv_image.astype(np.uint8), cv2.COLOR_HSV2BGR) 67 | return adjusted_image 68 | 69 | 70 | def create_color_adjustments(red, orange, yellow, green, lightGreen, blue, purple, magenta): 71 | return { 72 | 'red': red, 73 | 'orange': orange, 74 | 'yellow': yellow, 75 | 'green': green, 76 | 'lightGreen': lightGreen, 77 | 'blue': blue, 78 | 'purple': purple, 79 | 'magenta': magenta 80 | } 81 | 82 | 83 | def tensor2cv2(image: torch.Tensor) -> np.array: 84 | if image.dim() == 4: 85 | image = image.squeeze() 86 | npimage = image.numpy() 87 | cv2image = np.uint8(npimage * 255 / npimage.max()) 88 | return cv2.cvtColor(cv2image, cv2.COLOR_RGB2BGR) 89 | 90 | 91 | class HSLColorNode: 92 | 93 | @classmethod 94 | def INPUT_TYPES(s): 95 | return { 96 | "required": { 97 | "image": ("IMAGE",), 98 | "red_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 99 | "red_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 100 | "red_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 101 | "orange_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 102 | "orange_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 103 | "orange_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 104 | "yellow_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 105 | "yellow_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 106 | "yellow_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 107 | "green_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 108 | "green_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 109 | "green_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 110 | "lightGreen_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 111 | "lightGreen_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 112 | "lightGreen_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 113 | "blue_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 114 | "blue_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 115 | "blue_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 116 | "purple_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 117 | "purple_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 118 | "purple_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 119 | "magenta_hue": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 120 | "magenta_saturation": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 121 | "magenta_brightness": ("INT", {"default": 0, "min": -30, "max": 30, "step": 1}), 122 | } 123 | } 124 | 125 | CATEGORY = "MingNodes/Image Process" 126 | 127 | RETURN_TYPES = ("IMAGE",) 128 | RETURN_NAMES = ("image",) 129 | FUNCTION = "hsl_color" 130 | 131 | def hsl_color(self, image, red_hue, red_saturation, red_brightness, orange_hue, orange_saturation, orange_brightness 132 | , yellow_hue, yellow_saturation, yellow_brightness, green_hue, green_saturation, green_brightness 133 | , lightGreen_hue, lightGreen_saturation, lightGreen_brightness, blue_hue, blue_saturation, 134 | blue_brightness, purple_hue, purple_saturation, purple_brightness, magenta_hue, magenta_saturation, 135 | magenta_brightness): 136 | for img in image: 137 | img_cv1 = tensor2cv2(img) 138 | color_adjustments = create_color_adjustments( 139 | (red_hue, red_saturation, red_brightness), 140 | (orange_hue, orange_saturation, orange_brightness), 141 | (yellow_hue, yellow_saturation, yellow_brightness), 142 | (green_hue, green_saturation, green_brightness), 143 | (lightGreen_hue, lightGreen_saturation, lightGreen_brightness), 144 | (blue_hue, blue_saturation, blue_brightness), 145 | (purple_hue, purple_saturation, purple_brightness), 146 | (magenta_hue, magenta_saturation, magenta_brightness), 147 | ) 148 | adjusted_image = adjust_hsl(img_cv1, color_adjustments) 149 | result_img = cv2.cvtColor(adjusted_image, cv2.COLOR_BGR2RGB) 150 | rst = torch.from_numpy(result_img.astype(np.float32) / 255.0).unsqueeze(0) 151 | return (rst,) 152 | -------------------------------------------------------------------------------- /nodes/imitation_hue.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import torch 4 | 5 | 6 | def image_stats(image): 7 | return np.mean(image[:, :, 1:], axis=(0, 1)), np.std(image[:, :, 1:], axis=(0, 1)) 8 | 9 | 10 | def is_skin_or_lips(lab_image): 11 | l, a, b = lab_image[:, :, 0], lab_image[:, :, 1], lab_image[:, :, 2] 12 | skin = (l > 20) & (l < 250) & (a > 120) & (a < 180) & (b > 120) & (b < 190) 13 | lips = (l > 20) & (l < 200) & (a > 150) & (b > 140) 14 | return (skin | lips).astype(np.float32) 15 | 16 | 17 | def adjust_brightness(image, factor, mask=None): 18 | hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 19 | v = hsv[:, :, 2].astype(np.float32) 20 | if mask is not None: 21 | mask = mask.squeeze() 22 | v = np.where(mask > 0, np.clip(v * factor, 0, 255), v) 23 | else: 24 | v = np.clip(v * factor, 0, 255) 25 | hsv[:, :, 2] = v.astype(np.uint8) 26 | return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) 27 | 28 | 29 | def adjust_saturation(image, factor, mask=None): 30 | hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 31 | s = hsv[:, :, 1].astype(np.float32) 32 | if mask is not None: 33 | mask = mask.squeeze() 34 | s = np.where(mask > 0, np.clip(s * factor, 0, 255), s) 35 | else: 36 | s = np.clip(s * factor, 0, 255) 37 | hsv[:, :, 1] = s.astype(np.uint8) 38 | return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) 39 | 40 | 41 | def adjust_contrast(image, factor, mask=None): 42 | mean = np.mean(image) 43 | adjusted = image.astype(np.float32) 44 | if mask is not None: 45 | mask = mask.squeeze() 46 | mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2) 47 | adjusted = np.where(mask > 0, np.clip((adjusted - mean) * factor + mean, 0, 255), adjusted) 48 | else: 49 | adjusted = np.clip((adjusted - mean) * factor + mean, 0, 255) 50 | return adjusted.astype(np.uint8) 51 | 52 | 53 | def adjust_tone(source, target, tone_strength=0.7, mask=None): 54 | h, w = target.shape[:2] 55 | source = cv2.resize(source, (w, h)) 56 | lab_image = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32) 57 | lab_source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32) 58 | l_image = lab_image[:,:,0] 59 | l_source = lab_source[:,:,0] 60 | 61 | if mask is not None: 62 | mask = cv2.resize(mask, (w, h)) 63 | mask = mask.astype(np.float32) / 255.0 64 | l_adjusted = np.copy(l_image) 65 | mean_source = np.mean(l_source[mask > 0]) 66 | std_source = np.std(l_source[mask > 0]) 67 | mean_target = np.mean(l_image[mask > 0]) 68 | std_target = np.std(l_image[mask > 0]) 69 | l_adjusted[mask > 0] = (l_image[mask > 0] - mean_target) * (std_source / (std_target + 1e-6)) * 0.7 + mean_source 70 | l_adjusted[mask > 0] = np.clip(l_adjusted[mask > 0], 0, 255) 71 | clahe = cv2.createCLAHE(clipLimit=2.5, tileGridSize=(8,8)) 72 | l_enhanced = clahe.apply(l_adjusted.astype(np.uint8)) 73 | l_final = cv2.addWeighted(l_adjusted, 0.7, l_enhanced.astype(np.float32), 0.3, 0) 74 | l_final = np.clip(l_final, 0, 255) 75 | l_contrast = cv2.addWeighted(l_final, 1.3, l_final, 0, -20) 76 | l_contrast = np.clip(l_contrast, 0, 255) 77 | l_image[mask > 0] = l_image[mask > 0] * (1 - tone_strength) + l_contrast[mask > 0] * tone_strength 78 | else: 79 | mean_source = np.mean(l_source) 80 | std_source = np.std(l_source) 81 | l_mean = np.mean(l_image) 82 | l_std = np.std(l_image) 83 | l_adjusted = (l_image - l_mean) * (std_source / (l_std + 1e-6)) * 0.7 + mean_source 84 | l_adjusted = np.clip(l_adjusted, 0, 255) 85 | clahe = cv2.createCLAHE(clipLimit=2.5, tileGridSize=(8,8)) 86 | l_enhanced = clahe.apply(l_adjusted.astype(np.uint8)) 87 | l_final = cv2.addWeighted(l_adjusted, 0.7, l_enhanced.astype(np.float32), 0.3, 0) 88 | l_final = np.clip(l_final, 0, 255) 89 | l_contrast = cv2.addWeighted(l_final, 1.3, l_final, 0, -20) 90 | l_contrast = np.clip(l_contrast, 0, 255) 91 | l_image = l_image * (1 - tone_strength) + l_contrast * tone_strength 92 | 93 | lab_image[:,:,0] = l_image 94 | return cv2.cvtColor(lab_image.astype(np.uint8), cv2.COLOR_LAB2BGR) 95 | 96 | 97 | def tensor2cv2(image: torch.Tensor) -> np.array: 98 | if image.dim() == 4: 99 | image = image.squeeze() 100 | npimage = image.numpy() 101 | cv2image = np.uint8(npimage * 255 / npimage.max()) 102 | return cv2.cvtColor(cv2image, cv2.COLOR_RGB2BGR) 103 | 104 | 105 | def color_transfer(source, target, mask=None, strength=1.0, skin_protection=0.2, auto_brightness=True, 106 | brightness_range=0.5, auto_contrast=False, contrast_range=0.5, 107 | auto_saturation=False, saturation_range=0.5, auto_tone=False, tone_strength=0.7): 108 | source_lab = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32) 109 | target_lab = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32) 110 | 111 | src_means, src_stds = image_stats(source_lab) 112 | tar_means, tar_stds = image_stats(target_lab) 113 | 114 | skin_lips_mask = is_skin_or_lips(target_lab.astype(np.uint8)) 115 | skin_lips_mask = cv2.GaussianBlur(skin_lips_mask, (5, 5), 0) 116 | 117 | if mask is not None: 118 | mask = cv2.resize(mask, (target.shape[1], target.shape[0])) 119 | mask = mask.astype(np.float32) / 255.0 120 | 121 | result_lab = target_lab.copy() 122 | for i in range(1, 3): 123 | adjusted_channel = (target_lab[:, :, i] - tar_means[i - 1]) * (src_stds[i - 1] / (tar_stds[i - 1] + 1e-6)) + \ 124 | src_means[i - 1] 125 | adjusted_channel = np.clip(adjusted_channel, 0, 255) 126 | 127 | if mask is not None: 128 | result_lab[:, :, i] = target_lab[:, :, i] * (1 - mask) + \ 129 | (target_lab[:, :, i] * skin_lips_mask * skin_protection + \ 130 | adjusted_channel * skin_lips_mask * (1 - skin_protection) + \ 131 | adjusted_channel * (1 - skin_lips_mask)) * mask 132 | else: 133 | result_lab[:, :, i] = target_lab[:, :, i] * skin_lips_mask * skin_protection + \ 134 | adjusted_channel * skin_lips_mask * (1 - skin_protection) + \ 135 | adjusted_channel * (1 - skin_lips_mask) 136 | 137 | result_bgr = cv2.cvtColor(result_lab.astype(np.uint8), cv2.COLOR_LAB2BGR) 138 | final_result = cv2.addWeighted(target, 1 - strength, result_bgr, strength, 0) 139 | 140 | if mask is not None: 141 | mask = cv2.resize(mask, (target.shape[1], target.shape[0])) 142 | mask = mask.astype(np.float32) / 255.0 143 | if auto_brightness: 144 | source_brightness = np.mean(cv2.cvtColor(source, cv2.COLOR_BGR2GRAY)) 145 | target_brightness = np.mean(cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)) 146 | brightness_difference = source_brightness - target_brightness 147 | brightness_factor = 1.0 + np.clip(brightness_difference / 255 * brightness_range, brightness_range*-1, brightness_range) 148 | final_result = adjust_brightness(final_result, brightness_factor, mask) 149 | if auto_contrast: 150 | source_gray = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY) 151 | target_gray = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY) 152 | source_contrast = np.std(source_gray) 153 | target_contrast = np.std(target_gray) 154 | contrast_difference = source_contrast - target_contrast 155 | contrast_factor = 1.0 + np.clip(contrast_difference / 255, contrast_range*-1, contrast_range) 156 | final_result = adjust_contrast(final_result, contrast_factor, mask) 157 | if auto_saturation: 158 | source_hsv = cv2.cvtColor(source, cv2.COLOR_BGR2HSV) 159 | target_hsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV) 160 | source_saturation = np.mean(source_hsv[:, :, 1]) 161 | target_saturation = np.mean(target_hsv[:, :, 1]) 162 | saturation_difference = source_saturation - target_saturation 163 | saturation_factor = 1.0 + np.clip(saturation_difference / 255, saturation_range*-1, saturation_range) 164 | final_result = adjust_saturation(final_result, saturation_factor, mask) 165 | if auto_tone: 166 | final_result = adjust_tone(source, final_result, tone_strength, mask) 167 | else: 168 | if auto_brightness: 169 | source_brightness = np.mean(cv2.cvtColor(source, cv2.COLOR_BGR2GRAY)) 170 | target_brightness = np.mean(cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)) 171 | brightness_difference = source_brightness - target_brightness 172 | brightness_factor = 1.0 + np.clip(brightness_difference / 255 * brightness_range, brightness_range*-1, brightness_range) 173 | final_result = adjust_brightness(final_result, brightness_factor) 174 | if auto_contrast: 175 | source_gray = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY) 176 | target_gray = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY) 177 | source_contrast = np.std(source_gray) 178 | target_contrast = np.std(target_gray) 179 | contrast_difference = source_contrast - target_contrast 180 | contrast_factor = 1.0 + np.clip(contrast_difference / 255, contrast_range*-1, contrast_range) 181 | final_result = adjust_contrast(final_result, contrast_factor) 182 | if auto_saturation: 183 | source_hsv = cv2.cvtColor(source, cv2.COLOR_BGR2HSV) 184 | target_hsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV) 185 | source_saturation = np.mean(source_hsv[:, :, 1]) 186 | target_saturation = np.mean(target_hsv[:, :, 1]) 187 | saturation_difference = source_saturation - target_saturation 188 | saturation_factor = 1.0 + np.clip(saturation_difference / 255, saturation_range*-1, saturation_range) 189 | final_result = adjust_saturation(final_result, saturation_factor) 190 | if auto_tone: 191 | final_result = adjust_tone(source, final_result, tone_strength) 192 | 193 | return final_result 194 | 195 | 196 | class ImitationHueNode: 197 | @classmethod 198 | def INPUT_TYPES(s): 199 | return { 200 | "required": { 201 | "imitation_image": ("IMAGE",), 202 | "target_image": ("IMAGE",), 203 | "strength": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 1.0, "step": 0.1}), 204 | "skin_protection": ("FLOAT", {"default": 0.2, "min": 0, "max": 1.0, "step": 0.1}), 205 | "auto_brightness": ("BOOLEAN", {"default": True}), 206 | "brightness_range": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1}), 207 | "auto_contrast": ("BOOLEAN", {"default": False}), 208 | "contrast_range": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1}), 209 | "auto_saturation": ("BOOLEAN", {"default": False}), 210 | "saturation_range": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1}), 211 | "auto_tone": ("BOOLEAN", {"default": False}), 212 | "tone_strength": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1}), 213 | }, 214 | "optional": { 215 | "mask": ("MASK", {"default": None}), 216 | }, 217 | } 218 | 219 | CATEGORY = "MingNodes/Image Process" 220 | 221 | RETURN_TYPES = ("IMAGE",) 222 | RETURN_NAMES = ("image",) 223 | FUNCTION = "imitation_hue" 224 | 225 | def imitation_hue(self, imitation_image, target_image, strength, skin_protection, auto_brightness, brightness_range, 226 | auto_contrast, contrast_range, auto_saturation, saturation_range, auto_tone, tone_strength, 227 | mask=None): 228 | for img in imitation_image: 229 | img_cv1 = tensor2cv2(img) 230 | 231 | for img in target_image: 232 | img_cv2 = tensor2cv2(img) 233 | 234 | img_cv3 = None 235 | if mask is not None: 236 | for img3 in mask: 237 | img_cv3 = img3.cpu().numpy() 238 | img_cv3 = (img_cv3 * 255).astype(np.uint8) 239 | 240 | result_img = color_transfer(img_cv1, img_cv2, img_cv3, strength, skin_protection, auto_brightness, 241 | brightness_range,auto_contrast, contrast_range, auto_saturation, 242 | saturation_range, auto_tone, tone_strength) 243 | result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB) 244 | rst = torch.from_numpy(result_img.astype(np.float32) / 255.0).unsqueeze(0) 245 | 246 | return (rst,) 247 | -------------------------------------------------------------------------------- /nodes/light_shape.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageDraw, ImageFilter 2 | import math 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def draw_shape(shape, size=(200, 200), offset=(0, 0), scale=1.0, rotation=0, bg_color=(255, 255, 255), 8 | shape_color=(0, 0, 0), opacity=1.0, blur_radius=0, base_image=None): 9 | width, height = size 10 | offset_x, offset_y = offset 11 | center_x, center_y = width // 2 + offset_x, height // 2 + offset_y 12 | max_dim = min(width, height) * scale 13 | 14 | diagonal = int(math.sqrt(width ** 2 + height ** 2)) 15 | img_tmp = Image.new('RGBA', (diagonal, diagonal), (0, 0, 0, 0)) 16 | draw_tmp = ImageDraw.Draw(img_tmp) 17 | 18 | tmp_center = diagonal // 2 19 | 20 | alpha = int(opacity * 255) 21 | shape_color = shape_color + (alpha,) 22 | 23 | if shape == 'circle': 24 | bbox = (tmp_center - max_dim / 2, tmp_center - max_dim / 2, tmp_center + max_dim / 2, tmp_center + max_dim / 2) 25 | draw_tmp.ellipse(bbox, fill=shape_color) 26 | 27 | elif shape == 'semicircle': 28 | bbox = (tmp_center - max_dim / 2, tmp_center - max_dim / 2, tmp_center + max_dim / 2, tmp_center + max_dim / 2) 29 | draw_tmp.pieslice(bbox, start=0, end=180, fill=shape_color) 30 | 31 | elif shape == 'quarter_circle': 32 | bbox = (tmp_center - max_dim / 2, tmp_center - max_dim / 2, tmp_center + max_dim / 2, tmp_center + max_dim / 2) 33 | draw_tmp.pieslice(bbox, start=0, end=90, fill=shape_color) 34 | 35 | elif shape == 'ellipse': 36 | bbox = (tmp_center - max_dim / 2, tmp_center - max_dim / 4, tmp_center + max_dim / 2, tmp_center + max_dim / 4) 37 | draw_tmp.ellipse(bbox, fill=shape_color) 38 | 39 | elif shape == 'square': 40 | bbox = (tmp_center - max_dim / 2, tmp_center - max_dim / 2, tmp_center + max_dim / 2, tmp_center + max_dim / 2) 41 | draw_tmp.rectangle(bbox, fill=shape_color) 42 | 43 | elif shape == 'triangle': 44 | points = [ 45 | (tmp_center, tmp_center - max_dim / 2), 46 | (tmp_center - max_dim / 2, tmp_center + max_dim / 2), 47 | (tmp_center + max_dim / 2, tmp_center + max_dim / 2) 48 | ] 49 | draw_tmp.polygon(points, fill=shape_color) 50 | 51 | elif shape == 'cross': 52 | vertical = [(tmp_center - max_dim / 6, tmp_center - max_dim / 2), 53 | (tmp_center + max_dim / 6, tmp_center - max_dim / 2), 54 | (tmp_center + max_dim / 6, tmp_center + max_dim / 2), 55 | (tmp_center - max_dim / 6, tmp_center + max_dim / 2)] 56 | horizontal = [(tmp_center - max_dim / 2, tmp_center - max_dim / 6), 57 | (tmp_center + max_dim / 2, tmp_center - max_dim / 6), 58 | (tmp_center + max_dim / 2, tmp_center + max_dim / 6), 59 | (tmp_center - max_dim / 2, tmp_center + max_dim / 6)] 60 | draw_tmp.polygon(vertical, fill=shape_color) 61 | draw_tmp.polygon(horizontal, fill=shape_color) 62 | 63 | elif shape == 'star': 64 | points = [] 65 | for i in range(10): 66 | angle = i * 36 * math.pi / 180 67 | radius = max_dim / 2 if i % 2 == 0 else max_dim / 4 68 | points.append((tmp_center + radius * math.sin(angle), tmp_center - radius * math.cos(angle))) 69 | draw_tmp.polygon(points, fill=shape_color) 70 | 71 | elif shape == 'radial': 72 | num_rays = 12 73 | for i in range(num_rays): 74 | angle = i * (360 / num_rays) * math.pi / 180 75 | x1 = tmp_center + max_dim / 4 * math.cos(angle) 76 | y1 = tmp_center + max_dim / 4 * math.sin(angle) 77 | x2 = tmp_center + max_dim / 2 * math.cos(angle) 78 | y2 = tmp_center + max_dim / 2 * math.sin(angle) 79 | draw_tmp.line([(x1, y1), (x2, y2)], fill=shape_color, width=int(max_dim / 20)) 80 | 81 | img_tmp = img_tmp.rotate(rotation, resample=Image.BICUBIC, expand=True) 82 | if base_image is None: 83 | img = Image.new('RGBA', size, bg_color + (255,)) 84 | else: 85 | img = base_image.copy() 86 | if img.mode != 'RGBA': 87 | img = img.convert('RGBA') 88 | 89 | paste_x = center_x - img_tmp.width // 2 90 | paste_y = center_y - img_tmp.height // 2 91 | 92 | img.alpha_composite(img_tmp, (paste_x, paste_y)) 93 | 94 | if blur_radius > 0: 95 | img = img.filter(ImageFilter.GaussianBlur(radius=blur_radius)) 96 | 97 | return img 98 | 99 | 100 | def hex_to_rgb(hex_color): 101 | hex_color = hex_color.lstrip('#') 102 | r = int(hex_color[0:2], 16) 103 | g = int(hex_color[2:4], 16) 104 | b = int(hex_color[4:6], 16) 105 | return (r, g, b) 106 | 107 | 108 | class LightShapeNode: 109 | 110 | @classmethod 111 | def INPUT_TYPES(s): 112 | return { 113 | "required": { 114 | "wide": ("INT", {"default": 512, "min": 0, "max": 5000, "step": 1}), 115 | "height": ("INT", {"default": 512, "min": 0, "max": 5000, "step": 1}), 116 | "shape": ( 117 | [ 118 | 'circle', 119 | 'square', 120 | 'semicircle', 121 | 'quarter_circle', 122 | 'ellipse', 123 | 'triangle', 124 | 'cross', 125 | 'star', 126 | 'radial', 127 | ], 128 | {"default": "circle"}, 129 | ), 130 | "X_offset": ("INT", {"default": 0, "min": -10000, "max": 10000, "step": 1}), 131 | "Y_offset": ("INT", {"default": 0, "min": -10000, "max": 10000, "step": 1}), 132 | "scale": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1}), 133 | "rotation": ("INT", {"default": 0, "min": 0, "max": 360, "step": 1}), 134 | "opacity": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.1}), 135 | "blur_radius": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), 136 | "background_color": ("STRING", {"default": "#000000"}), 137 | "shape_color": ("STRING", {"default": "#FFFFFF"}), 138 | }, 139 | "optional": { 140 | "base_image": ("IMAGE", {"default": None}), 141 | }, 142 | } 143 | 144 | CATEGORY = "MingNodes/Image Process" 145 | 146 | RETURN_TYPES = ("IMAGE",) 147 | RETURN_NAMES = ("image",) 148 | FUNCTION = "drew_light_shape" 149 | 150 | def drew_light_shape(self, wide, height, shape, X_offset, Y_offset, scale, rotation, opacity, blur_radius, background_color, 151 | shape_color, base_image=None): 152 | 153 | if base_image is None: 154 | img = draw_shape(shape, size=(wide, height), offset=(X_offset, Y_offset), scale=scale, 155 | rotation=rotation, 156 | bg_color=hex_to_rgb(background_color), shape_color=hex_to_rgb(shape_color), 157 | opacity=opacity, blur_radius=blur_radius) 158 | else: 159 | img_cv = Image.fromarray((base_image.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGBA") 160 | img = draw_shape(shape, size=(wide, height), offset=(X_offset, Y_offset), scale=scale, 161 | rotation=rotation, 162 | bg_color=hex_to_rgb(background_color), shape_color=hex_to_rgb(shape_color), 163 | opacity=opacity, blur_radius=blur_radius, base_image=img_cv) 164 | 165 | rst = torch.from_numpy(np.array(img).astype(np.float32) / 255.0).unsqueeze(0) 166 | return (rst,) 167 | -------------------------------------------------------------------------------- /nodes/remove_watermark.py: -------------------------------------------------------------------------------- 1 | from litelama import LiteLama 2 | from litelama.model import download_file 3 | import os 4 | from PIL import Image 5 | import numpy as np 6 | import folder_paths 7 | import torch 8 | 9 | 10 | def remove(image, mask): 11 | Lama = LiteLama2() 12 | device = "cuda:0" 13 | result = None 14 | try: 15 | Lama.to(device) 16 | result = Lama.predict(image, mask) 17 | except: 18 | pass 19 | finally: 20 | Lama.to("cpu") 21 | 22 | return result 23 | 24 | 25 | class LiteLama2(LiteLama): 26 | _instance = None 27 | 28 | def __new__(cls, *args, **kw): 29 | if cls._instance is None: 30 | cls._instance = object.__new__(cls, *args, **kw) 31 | return cls._instance 32 | 33 | def __init__(self, checkpoint_path=None, config_path=None): 34 | self._checkpoint_path = checkpoint_path 35 | self._config_path = config_path 36 | self._model = None 37 | 38 | if self._checkpoint_path is None: 39 | MODEL_PATH = "ming/" 40 | checkpoint_path = os.path.join(folder_paths.models_dir, MODEL_PATH, "big-lama.safetensors") 41 | if os.path.exists(checkpoint_path) and os.path.isfile(checkpoint_path): 42 | pass 43 | else: 44 | download_file("https://huggingface.co/anyisalin/big-lama/resolve/main/big-lama.safetensors", 45 | checkpoint_path) 46 | 47 | self._checkpoint_path = checkpoint_path 48 | 49 | self.load(location="cuda:0") 50 | 51 | 52 | def pil2tensor(image: Image) -> torch.Tensor: 53 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 54 | 55 | 56 | class RemoveWatermarkNode: 57 | 58 | @classmethod 59 | def INPUT_TYPES(s): 60 | return { 61 | "required": { 62 | "image": ("IMAGE",), 63 | "mask": ("MASK",), 64 | } 65 | } 66 | 67 | CATEGORY = "MingNodes/Image Process" 68 | 69 | RETURN_TYPES = ("IMAGE",) 70 | RETURN_NAMES = ("image",) 71 | FUNCTION = "remove_watermark" 72 | 73 | def remove_watermark(self, image, mask): 74 | for img in image: 75 | o_img = Image.fromarray((img.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 76 | for ms in mask: 77 | m_img = Image.fromarray((ms.squeeze().cpu().numpy() * 255).astype(np.uint8)).convert("RGB") 78 | rm_img = remove(o_img, m_img) 79 | rst = pil2tensor(rm_img) 80 | return (rst,) 81 | -------------------------------------------------------------------------------- /nodes/upload_image_path.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import folder_paths 4 | import node_helpers 5 | import numpy as np 6 | from PIL import Image, ImageOps, ImageSequence 7 | import hashlib 8 | 9 | 10 | class LoadImagePathNode: 11 | @classmethod 12 | def INPUT_TYPES(s): 13 | input_dir = folder_paths.get_input_directory() 14 | files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] 15 | return {"required": 16 | {"image": (sorted(files), {"image_upload": True})}, 17 | } 18 | 19 | CATEGORY = "MingNodes/Loader" 20 | 21 | RETURN_TYPES = ("IMAGE", "MASK", "STRING") 22 | RETURN_NAMES = ("image", "mask", "image path") 23 | FUNCTION = "load_image" 24 | 25 | def load_image(self, image): 26 | image_path = folder_paths.get_annotated_filepath(image) 27 | 28 | img = node_helpers.pillow(Image.open, image_path) 29 | 30 | output_images = [] 31 | output_masks = [] 32 | w, h = None, None 33 | 34 | excluded_formats = ['MPO'] 35 | 36 | for i in ImageSequence.Iterator(img): 37 | i = node_helpers.pillow(ImageOps.exif_transpose, i) 38 | 39 | if i.mode == 'I': 40 | i = i.point(lambda i: i * (1 / 255)) 41 | 42 | if i.mode == 'RGB': 43 | image = i.convert("RGB") 44 | elif i.mode == 'RGBA': 45 | image = i.convert("RGBA") 46 | 47 | if len(output_images) == 0: 48 | w = image.size[0] 49 | h = image.size[1] 50 | 51 | if image.size[0] != w or image.size[1] != h: 52 | continue 53 | 54 | image = np.array(image).astype(np.float32) / 255.0 55 | image = torch.from_numpy(image)[None,] 56 | if 'A' in i.getbands(): 57 | mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 58 | mask = 1. - torch.from_numpy(mask) 59 | else: 60 | mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") 61 | output_images.append(image) 62 | output_masks.append(mask.unsqueeze(0)) 63 | 64 | if len(output_images) > 1 and img.format not in excluded_formats: 65 | output_image = torch.cat(output_images, dim=0) 66 | output_mask = torch.cat(output_masks, dim=0) 67 | else: 68 | output_image = output_images[0] 69 | output_mask = output_masks[0] 70 | 71 | return (output_image, output_mask, image_path) 72 | 73 | @classmethod 74 | def IS_CHANGED(s, image): 75 | image_path = folder_paths.get_annotated_filepath(image) 76 | m = hashlib.sha256() 77 | with open(image_path, 'rb') as f: 78 | m.update(f.read()) 79 | return m.digest().hex() 80 | 81 | @classmethod 82 | def VALIDATE_INPUTS(s, image): 83 | if not folder_paths.exists_annotated_filepath(image): 84 | return "Invalid image file: {}".format(image) 85 | 86 | return True 87 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ComfyUI-MingNodes" 3 | description = "Nodes: imitation hue,IC-light light shape, watermark, AI remove watermark, HSL color, color balance, brightness contrast saturation, highlight shadow brightness, gray channel, baidu translate" 4 | version = "1.0" 5 | license = { file = "LICENSE" } 6 | 7 | [project.urls] 8 | Repository = "https://github.com/mingsky-ai/ComfyUI-MingNodes" 9 | 10 | [tool.comfy] 11 | PublisherId = "mingsky" 12 | DisplayName = "ComfyUI-MingNodes" 13 | Icon = "" 14 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python>=4.5.5.62 2 | numpy 3 | pillow>=10.1.0 4 | scipy>=1.12.0 5 | litelama>=0.1.7 6 | -------------------------------------------------------------------------------- /workflow/光源_lightSource.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 14, 3 | "last_link_id": 28, 4 | "nodes": [ 5 | { 6 | "id": 11, 7 | "type": "LightShapeNode", 8 | "pos": { 9 | "0": 4350, 10 | "1": 2410 11 | }, 12 | "size": { 13 | "0": 320, 14 | "1": 290 15 | }, 16 | "flags": {}, 17 | "order": 2, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "base_image", 22 | "type": "IMAGE", 23 | "link": null, 24 | "label": "base_image" 25 | }, 26 | { 27 | "name": "background_color", 28 | "type": "STRING", 29 | "link": 15, 30 | "widget": { 31 | "name": "background_color" 32 | }, 33 | "label": "background_color" 34 | }, 35 | { 36 | "name": "shape_color", 37 | "type": "STRING", 38 | "link": 14, 39 | "widget": { 40 | "name": "shape_color" 41 | }, 42 | "label": "shape_color" 43 | } 44 | ], 45 | "outputs": [ 46 | { 47 | "name": "image", 48 | "type": "IMAGE", 49 | "links": [ 50 | 16, 51 | 21 52 | ], 53 | "slot_index": 0, 54 | "shape": 3, 55 | "label": "image" 56 | } 57 | ], 58 | "properties": { 59 | "Node name for S&R": "LightShapeNode" 60 | }, 61 | "widgets_values": [ 62 | 1024, 63 | 1024, 64 | "triangle", 65 | 0, 66 | -600, 67 | 1, 68 | 180, 69 | 1, 70 | 0, 71 | "#FFFFFF", 72 | "#FFFFFF" 73 | ] 74 | }, 75 | { 76 | "id": 14, 77 | "type": "AdjustBrightnessContrastSaturationNode", 78 | "pos": { 79 | "0": 5110, 80 | "1": 2480 81 | }, 82 | "size": { 83 | "0": 378, 84 | "1": 106 85 | }, 86 | "flags": {}, 87 | "order": 6, 88 | "mode": 0, 89 | "inputs": [ 90 | { 91 | "name": "image", 92 | "type": "IMAGE", 93 | "link": 27, 94 | "label": "image" 95 | } 96 | ], 97 | "outputs": [ 98 | { 99 | "name": "image", 100 | "type": "IMAGE", 101 | "links": [ 102 | 28 103 | ], 104 | "shape": 3, 105 | "label": "image", 106 | "slot_index": 0 107 | } 108 | ], 109 | "properties": { 110 | "Node name for S&R": "AdjustBrightnessContrastSaturationNode" 111 | }, 112 | "widgets_values": [ 113 | 1.5, 114 | 1, 115 | 1 116 | ] 117 | }, 118 | { 119 | "id": 12, 120 | "type": "LightShapeNode", 121 | "pos": { 122 | "0": 4720, 123 | "1": 2410 124 | }, 125 | "size": { 126 | "0": 320, 127 | "1": 290 128 | }, 129 | "flags": {}, 130 | "order": 4, 131 | "mode": 0, 132 | "inputs": [ 133 | { 134 | "name": "base_image", 135 | "type": "IMAGE", 136 | "link": 21, 137 | "label": "base_image" 138 | }, 139 | { 140 | "name": "background_color", 141 | "type": "STRING", 142 | "link": 23, 143 | "widget": { 144 | "name": "background_color" 145 | }, 146 | "label": "background_color" 147 | }, 148 | { 149 | "name": "shape_color", 150 | "type": "STRING", 151 | "link": 22, 152 | "widget": { 153 | "name": "shape_color" 154 | }, 155 | "label": "shape_color" 156 | } 157 | ], 158 | "outputs": [ 159 | { 160 | "name": "image", 161 | "type": "IMAGE", 162 | "links": [ 163 | 25, 164 | 27 165 | ], 166 | "slot_index": 0, 167 | "shape": 3, 168 | "label": "image" 169 | } 170 | ], 171 | "properties": { 172 | "Node name for S&R": "LightShapeNode" 173 | }, 174 | "widgets_values": [ 175 | 1024, 176 | 1024, 177 | "radial", 178 | 0, 179 | 600, 180 | 1, 181 | 0, 182 | 1, 183 | 100, 184 | "#FFFFFF", 185 | "#FFFFFF" 186 | ] 187 | }, 188 | { 189 | "id": 7, 190 | "type": "LayerUtility: ColorPicker", 191 | "pos": { 192 | "0": 3990, 193 | "1": 2420 194 | }, 195 | "size": { 196 | "0": 290, 197 | "1": 100 198 | }, 199 | "flags": {}, 200 | "order": 0, 201 | "mode": 0, 202 | "inputs": [], 203 | "outputs": [ 204 | { 205 | "name": "value", 206 | "type": "STRING", 207 | "links": [ 208 | 15, 209 | 23 210 | ], 211 | "slot_index": 0, 212 | "shape": 3, 213 | "label": "值" 214 | } 215 | ], 216 | "properties": { 217 | "Node name for S&R": "LayerUtility: ColorPicker" 218 | }, 219 | "widgets_values": [ 220 | "#000000", 221 | "HEX" 222 | ], 223 | "color": "#3d3e4c", 224 | "bgcolor": "#292a38" 225 | }, 226 | { 227 | "id": 6, 228 | "type": "LayerUtility: ColorPicker", 229 | "pos": { 230 | "0": 3990, 231 | "1": 2600 232 | }, 233 | "size": { 234 | "0": 290, 235 | "1": 100 236 | }, 237 | "flags": {}, 238 | "order": 1, 239 | "mode": 0, 240 | "inputs": [], 241 | "outputs": [ 242 | { 243 | "name": "value", 244 | "type": "STRING", 245 | "links": [ 246 | 14, 247 | 22 248 | ], 249 | "slot_index": 0, 250 | "shape": 3, 251 | "label": "值" 252 | } 253 | ], 254 | "properties": { 255 | "Node name for S&R": "LayerUtility: ColorPicker" 256 | }, 257 | "widgets_values": [ 258 | "#ffffff", 259 | "HEX" 260 | ], 261 | "color": "#3d3e4c", 262 | "bgcolor": "#292a38" 263 | }, 264 | { 265 | "id": 2, 266 | "type": "PreviewImage", 267 | "pos": { 268 | "0": 4200, 269 | "1": 2880 270 | }, 271 | "size": [ 272 | 370, 273 | 370 274 | ], 275 | "flags": {}, 276 | "order": 3, 277 | "mode": 0, 278 | "inputs": [ 279 | { 280 | "name": "images", 281 | "type": "IMAGE", 282 | "link": 16, 283 | "label": "图像" 284 | } 285 | ], 286 | "outputs": [], 287 | "properties": { 288 | "Node name for S&R": "PreviewImage" 289 | }, 290 | "widgets_values": [] 291 | }, 292 | { 293 | "id": 13, 294 | "type": "PreviewImage", 295 | "pos": { 296 | "0": 4630, 297 | "1": 2870 298 | }, 299 | "size": [ 300 | 370, 301 | 390 302 | ], 303 | "flags": {}, 304 | "order": 5, 305 | "mode": 0, 306 | "inputs": [ 307 | { 308 | "name": "images", 309 | "type": "IMAGE", 310 | "link": 25, 311 | "label": "图像" 312 | } 313 | ], 314 | "outputs": [], 315 | "properties": { 316 | "Node name for S&R": "PreviewImage" 317 | }, 318 | "widgets_values": [] 319 | }, 320 | { 321 | "id": 5, 322 | "type": "PreviewImage", 323 | "pos": { 324 | "0": 5040, 325 | "1": 2870 326 | }, 327 | "size": [ 328 | 390, 329 | 390 330 | ], 331 | "flags": {}, 332 | "order": 7, 333 | "mode": 0, 334 | "inputs": [ 335 | { 336 | "name": "images", 337 | "type": "IMAGE", 338 | "link": 28, 339 | "label": "图像" 340 | } 341 | ], 342 | "outputs": [], 343 | "properties": { 344 | "Node name for S&R": "PreviewImage" 345 | }, 346 | "widgets_values": [] 347 | } 348 | ], 349 | "links": [ 350 | [ 351 | 14, 352 | 6, 353 | 0, 354 | 11, 355 | 2, 356 | "STRING" 357 | ], 358 | [ 359 | 15, 360 | 7, 361 | 0, 362 | 11, 363 | 1, 364 | "STRING" 365 | ], 366 | [ 367 | 16, 368 | 11, 369 | 0, 370 | 2, 371 | 0, 372 | "IMAGE" 373 | ], 374 | [ 375 | 21, 376 | 11, 377 | 0, 378 | 12, 379 | 0, 380 | "IMAGE" 381 | ], 382 | [ 383 | 22, 384 | 6, 385 | 0, 386 | 12, 387 | 2, 388 | "STRING" 389 | ], 390 | [ 391 | 23, 392 | 7, 393 | 0, 394 | 12, 395 | 1, 396 | "STRING" 397 | ], 398 | [ 399 | 25, 400 | 12, 401 | 0, 402 | 13, 403 | 0, 404 | "IMAGE" 405 | ], 406 | [ 407 | 27, 408 | 12, 409 | 0, 410 | 14, 411 | 0, 412 | "IMAGE" 413 | ], 414 | [ 415 | 28, 416 | 14, 417 | 0, 418 | 5, 419 | 0, 420 | "IMAGE" 421 | ] 422 | ], 423 | "groups": [], 424 | "config": {}, 425 | "extra": { 426 | "ds": { 427 | "scale": 0.8769226950000005, 428 | "offset": [ 429 | -2760.805393018744, 430 | -2246.384612444121 431 | ] 432 | } 433 | }, 434 | "version": 0.4 435 | } -------------------------------------------------------------------------------- /workflow/加水印_addWatermark.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 13, 3 | "last_link_id": 21, 4 | "nodes": [ 5 | { 6 | "id": 8, 7 | "type": "AddWaterMarkNode", 8 | "pos": { 9 | "0": 4630, 10 | "1": 3260 11 | }, 12 | "size": { 13 | "0": 315, 14 | "1": 266 15 | }, 16 | "flags": {}, 17 | "order": 4, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "image", 22 | "type": "IMAGE", 23 | "link": 10, 24 | "label": "image" 25 | }, 26 | { 27 | "name": "watermark", 28 | "type": "IMAGE", 29 | "link": 11, 30 | "label": "watermark" 31 | }, 32 | { 33 | "name": "watermark_mask", 34 | "type": "MASK", 35 | "link": 17, 36 | "label": "watermark_mask" 37 | } 38 | ], 39 | "outputs": [ 40 | { 41 | "name": "image", 42 | "type": "IMAGE", 43 | "links": [ 44 | 18 45 | ], 46 | "slot_index": 0, 47 | "shape": 3, 48 | "label": "image" 49 | } 50 | ], 51 | "properties": { 52 | "Node name for S&R": "AddWaterMarkNode" 53 | }, 54 | "widgets_values": [ 55 | true, 56 | 0, 57 | 0, 58 | 1, 59 | 1, 60 | "", 61 | "#FFFFFF", 62 | "blackrumbleregular.ttf" 63 | ] 64 | }, 65 | { 66 | "id": 10, 67 | "type": "AddWaterMarkNode", 68 | "pos": { 69 | "0": 5000, 70 | "1": 3260 71 | }, 72 | "size": [ 73 | 310, 74 | 260 75 | ], 76 | "flags": {}, 77 | "order": 5, 78 | "mode": 0, 79 | "inputs": [ 80 | { 81 | "name": "image", 82 | "type": "IMAGE", 83 | "link": 18, 84 | "label": "image" 85 | }, 86 | { 87 | "name": "watermark", 88 | "type": "IMAGE", 89 | "link": null, 90 | "label": "watermark" 91 | }, 92 | { 93 | "name": "watermark_mask", 94 | "type": "MASK", 95 | "link": null, 96 | "label": "watermark_mask" 97 | }, 98 | { 99 | "name": "text_color", 100 | "type": "STRING", 101 | "link": 19, 102 | "widget": { 103 | "name": "text_color" 104 | }, 105 | "label": "text_color" 106 | }, 107 | { 108 | "name": "text", 109 | "type": "STRING", 110 | "link": 21, 111 | "widget": { 112 | "name": "text" 113 | }, 114 | "label": "text" 115 | } 116 | ], 117 | "outputs": [ 118 | { 119 | "name": "image", 120 | "type": "IMAGE", 121 | "links": [ 122 | 20 123 | ], 124 | "slot_index": 0, 125 | "shape": 3, 126 | "label": "image" 127 | } 128 | ], 129 | "properties": { 130 | "Node name for S&R": "AddWaterMarkNode" 131 | }, 132 | "widgets_values": [ 133 | false, 134 | 200, 135 | 1600, 136 | 1, 137 | 7, 138 | "", 139 | "#FFFFFF", 140 | "沙孟海书法字体.ttf" 141 | ] 142 | }, 143 | { 144 | "id": 2, 145 | "type": "LoadImage", 146 | "pos": { 147 | "0": 4200, 148 | "1": 3210 149 | }, 150 | "size": [ 151 | 320, 152 | 310 153 | ], 154 | "flags": {}, 155 | "order": 0, 156 | "mode": 0, 157 | "inputs": [], 158 | "outputs": [ 159 | { 160 | "name": "IMAGE", 161 | "type": "IMAGE", 162 | "links": [ 163 | 10 164 | ], 165 | "slot_index": 0, 166 | "shape": 3, 167 | "label": "图像" 168 | }, 169 | { 170 | "name": "MASK", 171 | "type": "MASK", 172 | "links": null, 173 | "shape": 3, 174 | "label": "遮罩" 175 | } 176 | ], 177 | "properties": { 178 | "Node name for S&R": "LoadImage" 179 | }, 180 | "widgets_values": [ 181 | "fenghuang.png", 182 | "image" 183 | ] 184 | }, 185 | { 186 | "id": 3, 187 | "type": "LoadImage", 188 | "pos": { 189 | "0": 4190, 190 | "1": 3590 191 | }, 192 | "size": [ 193 | 320, 194 | 310 195 | ], 196 | "flags": {}, 197 | "order": 1, 198 | "mode": 0, 199 | "inputs": [], 200 | "outputs": [ 201 | { 202 | "name": "IMAGE", 203 | "type": "IMAGE", 204 | "links": [ 205 | 11 206 | ], 207 | "slot_index": 0, 208 | "shape": 3, 209 | "label": "图像" 210 | }, 211 | { 212 | "name": "MASK", 213 | "type": "MASK", 214 | "links": [ 215 | 17 216 | ], 217 | "slot_index": 1, 218 | "shape": 3, 219 | "label": "遮罩" 220 | } 221 | ], 222 | "properties": { 223 | "Node name for S&R": "LoadImage" 224 | }, 225 | "widgets_values": [ 226 | "watermark.png", 227 | "image" 228 | ] 229 | }, 230 | { 231 | "id": 11, 232 | "type": "LayerUtility: ColorPicker", 233 | "pos": { 234 | "0": 4640, 235 | "1": 3580 236 | }, 237 | "size": { 238 | "0": 290, 239 | "1": 100 240 | }, 241 | "flags": {}, 242 | "order": 2, 243 | "mode": 0, 244 | "inputs": [], 245 | "outputs": [ 246 | { 247 | "name": "value", 248 | "type": "STRING", 249 | "links": [ 250 | 19 251 | ], 252 | "slot_index": 0, 253 | "shape": 3, 254 | "label": "值" 255 | } 256 | ], 257 | "properties": { 258 | "Node name for S&R": "LayerUtility: ColorPicker" 259 | }, 260 | "widgets_values": [ 261 | "#fac81d", 262 | "HEX" 263 | ], 264 | "color": "#3d3e4c", 265 | "bgcolor": "#292a38" 266 | }, 267 | { 268 | "id": 13, 269 | "type": "easy positive", 270 | "pos": { 271 | "0": 4630, 272 | "1": 3740 273 | }, 274 | "size": { 275 | "0": 330, 276 | "1": 130 277 | }, 278 | "flags": {}, 279 | "order": 3, 280 | "mode": 0, 281 | "inputs": [], 282 | "outputs": [ 283 | { 284 | "name": "positive", 285 | "type": "STRING", 286 | "links": [ 287 | 21 288 | ], 289 | "slot_index": 0, 290 | "shape": 3, 291 | "label": "正面提示词" 292 | } 293 | ], 294 | "properties": { 295 | "Node name for S&R": "easy positive" 296 | }, 297 | "widgets_values": [ 298 | "龙飞凤舞", 299 | true 300 | ], 301 | "color": "#232", 302 | "bgcolor": "#353" 303 | }, 304 | { 305 | "id": 12, 306 | "type": "PreviewImage", 307 | "pos": { 308 | "0": 5360, 309 | "1": 3250 310 | }, 311 | "size": [ 312 | 470, 313 | 600 314 | ], 315 | "flags": {}, 316 | "order": 6, 317 | "mode": 0, 318 | "inputs": [ 319 | { 320 | "name": "images", 321 | "type": "IMAGE", 322 | "link": 20, 323 | "label": "图像" 324 | } 325 | ], 326 | "outputs": [], 327 | "properties": { 328 | "Node name for S&R": "PreviewImage" 329 | }, 330 | "widgets_values": [] 331 | } 332 | ], 333 | "links": [ 334 | [ 335 | 10, 336 | 2, 337 | 0, 338 | 8, 339 | 0, 340 | "IMAGE" 341 | ], 342 | [ 343 | 11, 344 | 3, 345 | 0, 346 | 8, 347 | 1, 348 | "IMAGE" 349 | ], 350 | [ 351 | 17, 352 | 3, 353 | 1, 354 | 8, 355 | 2, 356 | "MASK" 357 | ], 358 | [ 359 | 18, 360 | 8, 361 | 0, 362 | 10, 363 | 0, 364 | "IMAGE" 365 | ], 366 | [ 367 | 19, 368 | 11, 369 | 0, 370 | 10, 371 | 3, 372 | "STRING" 373 | ], 374 | [ 375 | 20, 376 | 10, 377 | 0, 378 | 12, 379 | 0, 380 | "IMAGE" 381 | ], 382 | [ 383 | 21, 384 | 13, 385 | 0, 386 | 10, 387 | 4, 388 | "STRING" 389 | ] 390 | ], 391 | "groups": [], 392 | "config": {}, 393 | "extra": { 394 | "ds": { 395 | "scale": 1.1671841070450009, 396 | "offset": [ 397 | -3510.0632324420226, 398 | -3107.638839763504 399 | ] 400 | } 401 | }, 402 | "version": 0.4 403 | } -------------------------------------------------------------------------------- /workflow/追色_imitationHue.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 8, 3 | "last_link_id": 35, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "PreviewImage", 8 | "pos": { 9 | "0": 1500, 10 | "1": 620 11 | }, 12 | "size": { 13 | "0": 540, 14 | "1": 750 15 | }, 16 | "flags": {}, 17 | "order": 4, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "type": "IMAGE", 23 | "link": 3, 24 | "label": "图像" 25 | } 26 | ], 27 | "outputs": [], 28 | "properties": { 29 | "Node name for S&R": "PreviewImage" 30 | } 31 | }, 32 | { 33 | "id": 6, 34 | "type": "PreviewImage", 35 | "pos": { 36 | "0": 2050, 37 | "1": 620 38 | }, 39 | "size": { 40 | "0": 560, 41 | "1": 750 42 | }, 43 | "flags": {}, 44 | "order": 5, 45 | "mode": 0, 46 | "inputs": [ 47 | { 48 | "name": "images", 49 | "type": "IMAGE", 50 | "link": 7, 51 | "label": "图像" 52 | } 53 | ], 54 | "outputs": [], 55 | "properties": { 56 | "Node name for S&R": "PreviewImage" 57 | } 58 | }, 59 | { 60 | "id": 3, 61 | "type": "LoadImage", 62 | "pos": { 63 | "0": 730, 64 | "1": 650 65 | }, 66 | "size": [ 67 | 320, 68 | 310 69 | ], 70 | "flags": {}, 71 | "order": 0, 72 | "mode": 0, 73 | "inputs": [], 74 | "outputs": [ 75 | { 76 | "name": "IMAGE", 77 | "type": "IMAGE", 78 | "links": [ 79 | 2, 80 | 6 81 | ], 82 | "slot_index": 0, 83 | "shape": 3, 84 | "label": "图像" 85 | }, 86 | { 87 | "name": "MASK", 88 | "type": "MASK", 89 | "links": null, 90 | "shape": 3, 91 | "label": "遮罩" 92 | } 93 | ], 94 | "properties": { 95 | "Node name for S&R": "LoadImage" 96 | }, 97 | "widgets_values": [ 98 | "09.jpeg", 99 | "image" 100 | ] 101 | }, 102 | { 103 | "id": 2, 104 | "type": "LoadImage", 105 | "pos": { 106 | "0": 730, 107 | "1": 1020 108 | }, 109 | "size": [ 110 | 320, 111 | 310 112 | ], 113 | "flags": {}, 114 | "order": 1, 115 | "mode": 0, 116 | "inputs": [], 117 | "outputs": [ 118 | { 119 | "name": "IMAGE", 120 | "type": "IMAGE", 121 | "links": [ 122 | 1, 123 | 5 124 | ], 125 | "slot_index": 0, 126 | "shape": 3, 127 | "label": "图像" 128 | }, 129 | { 130 | "name": "MASK", 131 | "type": "MASK", 132 | "links": [], 133 | "slot_index": 1, 134 | "shape": 3, 135 | "label": "遮罩" 136 | } 137 | ], 138 | "properties": { 139 | "Node name for S&R": "LoadImage" 140 | }, 141 | "widgets_values": [ 142 | "10.jpg", 143 | "image" 144 | ] 145 | }, 146 | { 147 | "id": 1, 148 | "type": "ImitationHueNode", 149 | "pos": { 150 | "0": 1140, 151 | "1": 650 152 | }, 153 | "size": { 154 | "0": 315, 155 | "1": 314 156 | }, 157 | "flags": {}, 158 | "order": 2, 159 | "mode": 0, 160 | "inputs": [ 161 | { 162 | "name": "imitation_image", 163 | "type": "IMAGE", 164 | "link": 2, 165 | "label": "imitation_image" 166 | }, 167 | { 168 | "name": "target_image", 169 | "type": "IMAGE", 170 | "link": 1, 171 | "label": "target_image" 172 | }, 173 | { 174 | "name": "mask", 175 | "type": "MASK", 176 | "link": null, 177 | "label": "mask" 178 | } 179 | ], 180 | "outputs": [ 181 | { 182 | "name": "image", 183 | "type": "IMAGE", 184 | "links": [ 185 | 3 186 | ], 187 | "slot_index": 0, 188 | "shape": 3, 189 | "label": "image" 190 | } 191 | ], 192 | "properties": { 193 | "Node name for S&R": "ImitationHueNode" 194 | }, 195 | "widgets_values": [ 196 | 1, 197 | 0.2, 198 | true, 199 | 0.5, 200 | false, 201 | 0.5, 202 | false, 203 | 0.5, 204 | true, 205 | 0.7000000000000001 206 | ] 207 | }, 208 | { 209 | "id": 5, 210 | "type": "ImitationHueNode", 211 | "pos": { 212 | "0": 1140, 213 | "1": 1050 214 | }, 215 | "size": { 216 | "0": 315, 217 | "1": 314 218 | }, 219 | "flags": {}, 220 | "order": 3, 221 | "mode": 0, 222 | "inputs": [ 223 | { 224 | "name": "imitation_image", 225 | "type": "IMAGE", 226 | "link": 6, 227 | "label": "imitation_image" 228 | }, 229 | { 230 | "name": "target_image", 231 | "type": "IMAGE", 232 | "link": 5, 233 | "label": "target_image" 234 | }, 235 | { 236 | "name": "mask", 237 | "type": "MASK", 238 | "link": null, 239 | "label": "mask" 240 | } 241 | ], 242 | "outputs": [ 243 | { 244 | "name": "image", 245 | "type": "IMAGE", 246 | "links": [ 247 | 7 248 | ], 249 | "slot_index": 0, 250 | "shape": 3, 251 | "label": "image" 252 | } 253 | ], 254 | "properties": { 255 | "Node name for S&R": "ImitationHueNode" 256 | }, 257 | "widgets_values": [ 258 | 1, 259 | 0.2, 260 | true, 261 | 0.5, 262 | false, 263 | 0.5, 264 | false, 265 | 0.5, 266 | false, 267 | 1 268 | ] 269 | } 270 | ], 271 | "links": [ 272 | [ 273 | 1, 274 | 2, 275 | 0, 276 | 1, 277 | 1, 278 | "IMAGE" 279 | ], 280 | [ 281 | 2, 282 | 3, 283 | 0, 284 | 1, 285 | 0, 286 | "IMAGE" 287 | ], 288 | [ 289 | 3, 290 | 1, 291 | 0, 292 | 4, 293 | 0, 294 | "IMAGE" 295 | ], 296 | [ 297 | 5, 298 | 2, 299 | 0, 300 | 5, 301 | 1, 302 | "IMAGE" 303 | ], 304 | [ 305 | 6, 306 | 3, 307 | 0, 308 | 5, 309 | 0, 310 | "IMAGE" 311 | ], 312 | [ 313 | 7, 314 | 5, 315 | 0, 316 | 6, 317 | 0, 318 | "IMAGE" 319 | ] 320 | ], 321 | "groups": [], 322 | "config": {}, 323 | "extra": { 324 | "ds": { 325 | "scale": 0.7972024500000005, 326 | "offset": [ 327 | -644.2031763044106, 328 | -488.57514534667666 329 | ] 330 | } 331 | }, 332 | "version": 0.4 333 | } --------------------------------------------------------------------------------