├── .gitignore ├── LICENSE ├── README.md ├── hakuimg ├── __init__.py ├── blend.py ├── blur.py ├── chromatic │ ├── __init__.py │ └── kromo.py ├── color │ ├── __init__.py │ ├── exposure_offset.py │ ├── hdr.py │ ├── noise.py │ ├── sharpness.py │ └── vignette.py ├── curve.py ├── custom_exif.py ├── dither.py ├── flip.py ├── image_preprocess.py ├── lens_distortion.py ├── neon.py ├── outline_expansion.py ├── pixel.py ├── pixeloe.py ├── sketch.py └── tilt_shift │ ├── __init__.py │ └── utils.py ├── inoutpaint ├── __init__.py ├── main.py └── utils.py ├── install.py ├── javascript ├── swicth.js └── utils.js ├── requirements.txt ├── scripts └── main.py └── style.css /.gitignore: -------------------------------------------------------------------------------- 1 | *.png 2 | *.DS_Store 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 KBlueLeaf 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # a1111-sd-webui-haku-img 2 | An Image utils extension for A1111's sd-webui 3 | 4 | Features: 5 | * blending (support 17 mode) 6 | * color adjust 7 | * tone curve 8 | * blur 9 | * img2sketch(thourgh XDoG) 10 | * pixelize(Kmeans or dithering) 11 | * glow (blur and blend) 12 | * inoutpaint support tool 13 | 14 | ## Example: 15 | https://user-images.githubusercontent.com/59680068/212915298-92157d15-e7e0-40b0-9b89-74da3eb4ea18.mp4 16 | 17 | ## blend 18 | ![image](https://user-images.githubusercontent.com/59680068/215345290-dd9e11b8-f717-4c15-bd32-19259691122d.png) 19 | ![image](https://user-images.githubusercontent.com/59680068/215345295-0393cb8c-2bd1-48c2-809b-c25c1554197b.png) 20 | 21 | 22 | ## color 23 | ![image](https://user-images.githubusercontent.com/59680068/215345319-799e006d-6aa3-4c8b-90b6-87817eb0eeab.png) 24 | 25 | ## tone curve 26 | https://user-images.githubusercontent.com/59680068/215345100-4f0c74bb-f7e6-4f62-ba98-269ced40e246.mp4 27 | 28 | ## blur 29 | ![image](https://user-images.githubusercontent.com/59680068/212818343-4754764a-cd3d-4591-a823-a065bdc7b934.png) 30 | 31 | 32 | ## sketch 33 | ![image](https://user-images.githubusercontent.com/59680068/215345372-019e796e-04e7-48ac-81bd-f56eff2a338d.png) 34 | 35 | 36 | ## pixelize 37 | ![image](https://user-images.githubusercontent.com/59680068/215345447-b528655b-7f5a-411f-95aa-61e02fc41d09.png) 38 | -------------------------------------------------------------------------------- /hakuimg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KohakuBlueleaf/a1111-sd-webui-haku-img/ac80e1e9415e25109ae1e6193d4a3a33aaa86b13/hakuimg/__init__.py -------------------------------------------------------------------------------- /hakuimg/blend.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageFilter, ImageColor 2 | import numpy as np 3 | import cv2 4 | 5 | 6 | def basic(target, blend, opacity): 7 | return target * opacity + blend * (1 - opacity) 8 | 9 | 10 | def blender(func): 11 | def blend(target, blend, opacity=1, *args): 12 | res = func(target, blend, *args) 13 | res = basic(res, blend, opacity) 14 | return np.clip(res, 0, 1) 15 | 16 | return blend 17 | 18 | 19 | class Blend: 20 | @classmethod 21 | def method(cls, name): 22 | return getattr(cls, name) 23 | 24 | normal = basic 25 | 26 | @staticmethod 27 | @blender 28 | def darken(target, blend, *args): 29 | return np.minimum(target, blend) 30 | 31 | @staticmethod 32 | @blender 33 | def multiply(target, blend, *args): 34 | return target * blend 35 | 36 | @staticmethod 37 | @blender 38 | def color_burn(target, blend, *args): 39 | return 1 - (1 - target) / blend 40 | 41 | @staticmethod 42 | @blender 43 | def linear_burn(target, blend, *args): 44 | return target + blend - 1 45 | 46 | @staticmethod 47 | @blender 48 | def lighten(target, blend, *args): 49 | return np.maximum(target, blend) 50 | 51 | @staticmethod 52 | @blender 53 | def screen(target, blend, *args): 54 | return 1 - (1 - target) * (1 - blend) 55 | 56 | @staticmethod 57 | @blender 58 | def color_dodge(target, blend, *args): 59 | return target / (1 - blend) 60 | 61 | @staticmethod 62 | @blender 63 | def linear_dodge(target, blend, *args): 64 | return target + blend 65 | 66 | @staticmethod 67 | @blender 68 | def overlay(target, blend, *args): 69 | return (target > 0.5) * (1 - (2 - 2 * target) * (1 - blend)) + ( 70 | target <= 0.5 71 | ) * (2 * target * blend) 72 | 73 | @staticmethod 74 | @blender 75 | def soft_light(target, blend, *args): 76 | return (blend > 0.5) * (1 - (1 - target) * (1 - (blend - 0.5))) + ( 77 | blend <= 0.5 78 | ) * (target * (blend + 0.5)) 79 | 80 | @staticmethod 81 | @blender 82 | def hard_light(target, blend, *args): 83 | return (blend > 0.5) * (1 - (1 - target) * (2 - 2 * blend)) + (blend <= 0.5) * ( 84 | 2 * target * blend 85 | ) 86 | 87 | @staticmethod 88 | @blender 89 | def vivid_light(target, blend, *args): 90 | return (blend > 0.5) * (1 - (1 - target) / (2 * blend - 1)) + (blend <= 0.5) * ( 91 | target / (1 - 2 * blend) 92 | ) 93 | 94 | @staticmethod 95 | @blender 96 | def linear_light(target, blend, *args): 97 | return (blend > 0.5) * (target + 2 * (blend - 0.5)) + (blend <= 0.5) * ( 98 | target + 2 * blend 99 | ) 100 | 101 | @staticmethod 102 | @blender 103 | def pin_light(target, blend, *args): 104 | return (blend > 0.5) * np.maximum(target, 2 * (blend - 0.5)) + ( 105 | blend <= 0.5 106 | ) * np.minimum(target, 2 * blend) 107 | 108 | @staticmethod 109 | @blender 110 | def difference(target, blend, *args): 111 | return np.abs(target - blend) 112 | 113 | @staticmethod 114 | @blender 115 | def exclusion(target, blend, *args): 116 | return 0.5 - 2 * (target - 0.5) * (blend - 0.5) 117 | 118 | 119 | blend_methods = [i for i in Blend.__dict__.keys() if i[0] != "_" and i != "method"] 120 | 121 | 122 | def run(layers): 123 | def blend(bg, *args): 124 | assert len(args) % 5 == 0 125 | chunks = [args[i * layers : i * layers + layers] for i in range(5)] 126 | h, w, c = np.array([i["image"] for i in chunks[-1] if i is not None][0]).shape 127 | base_img = np.array( 128 | Image.new(mode="RGB", size=(w, h), color=ImageColor.getcolor(bg, "RGB")) 129 | ) 130 | base_img = base_img.astype(np.float64) / 255 131 | 132 | for alpha, mask_blur, mask_str, mode, img in reversed(list(zip(*chunks))): 133 | if img is None or img["image"] is None: 134 | continue 135 | img_now = img["image"].convert('RGB').resize((w, h)) 136 | mask = img["mask"].convert('L') 137 | 138 | img_now = np.array(img_now).astype(np.float64) / 255 139 | mask = mask.resize((w, h)).filter(ImageFilter.GaussianBlur(mask_blur)) 140 | mask = np.expand_dims(np.array(mask) * mask_str / 255, 2) 141 | 142 | img_now = Blend.normal(base_img, img_now, mask) 143 | base_img = Blend.method(mode)(img_now, base_img, alpha) 144 | base_img *= 255 145 | base_img = np.clip(base_img, 0, 255) 146 | 147 | return Image.fromarray(base_img.astype(np.uint8), mode="RGB") 148 | 149 | return blend 150 | -------------------------------------------------------------------------------- /hakuimg/blur.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageFilter 2 | 3 | 4 | def run(img, img_blur): 5 | blur = ImageFilter.GaussianBlur(img_blur) 6 | return img.filter(blur) 7 | -------------------------------------------------------------------------------- /hakuimg/chromatic/__init__.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | from .kromo import add_chromatic 4 | 5 | 6 | def run(pil_img, strength, blur=False): 7 | if strength <= 0: 8 | return pil_img 9 | 10 | img = pil_img 11 | 12 | if img.size[0] % 2 == 0 or img.size[1] % 2 == 0: 13 | if img.size[0] % 2 == 0: 14 | img = img.crop((0, 0, img.size[0] - 1, img.size[1])) 15 | img.load() 16 | if img.size[1] % 2 == 0: 17 | img = img.crop((0, 0, img.size[0], img.size[1] - 1)) 18 | img.load() 19 | 20 | img = add_chromatic(img, strength + 0.12, not blur) 21 | return img 22 | -------------------------------------------------------------------------------- /hakuimg/chromatic/kromo.py: -------------------------------------------------------------------------------- 1 | """Kromo V0.3 2 | === Author === 3 | Yoonsik Park 4 | park.yoonsik@icloud.com 5 | === Description === 6 | Use the command line interface to add chromatic abberation and 7 | lens blur to your images, or import some of the functions below. 8 | """ 9 | 10 | from PIL import Image 11 | import numpy as np 12 | import math 13 | import time 14 | from typing import List 15 | import os 16 | 17 | 18 | def cartesian_to_polar(data: np.ndarray) -> np.ndarray: 19 | """Returns the polar form of """ 20 | width = data.shape[1] 21 | height = data.shape[0] 22 | assert width > 2 23 | assert height > 2 24 | assert width % 2 == 1 25 | assert height % 2 == 1 26 | perimeter = 2 * (width + height - 2) 27 | halfdiag = math.ceil(((width**2 + height**2) ** 0.5) / 2) 28 | halfw = width // 2 29 | halfh = height // 2 30 | ret = np.zeros((halfdiag, perimeter, 3)) 31 | 32 | # Don't want to deal with divide by zero errors... 33 | ret[0 : (halfw + 1), halfh] = data[halfh, halfw::-1] 34 | ret[0 : (halfw + 1), height + width - 2 + halfh] = data[ 35 | halfh, halfw : (halfw * 2 + 1) 36 | ] 37 | ret[0 : (halfh + 1), height - 1 + halfw] = data[halfh : (halfh * 2 + 1), halfw] 38 | ret[0 : (halfh + 1), perimeter - halfw] = data[halfh::-1, halfw] 39 | 40 | # Divide the image into 8 triangles, and use the same calculation on 41 | # 4 triangles at a time. This is possible due to symmetry. 42 | # This section is also responsible for the corner pixels 43 | for i in range(0, halfh): 44 | slope = (halfh - i) / (halfw) 45 | diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5 46 | unit_xstep = diagx / (halfdiag - 1) 47 | unit_ystep = diagx * slope / (halfdiag - 1) 48 | for row in range(halfdiag): 49 | ystep = round(row * unit_ystep) 50 | xstep = round(row * unit_xstep) 51 | if (halfh >= ystep) and halfw >= xstep: 52 | ret[row, i] = data[halfh - ystep, halfw - xstep] 53 | ret[row, height - 1 - i] = data[halfh + ystep, halfw - xstep] 54 | ret[row, height + width - 2 + i] = data[halfh + ystep, halfw + xstep] 55 | ret[row, height + width + height - 3 - i] = data[ 56 | halfh - ystep, halfw + xstep 57 | ] 58 | else: 59 | break 60 | 61 | # Remaining 4 triangles 62 | for j in range(1, halfw): 63 | slope = (halfh) / (halfw - j) 64 | diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5 65 | unit_xstep = diagx / (halfdiag - 1) 66 | unit_ystep = diagx * slope / (halfdiag - 1) 67 | for row in range(halfdiag): 68 | ystep = round(row * unit_ystep) 69 | xstep = round(row * unit_xstep) 70 | if halfw >= xstep and halfh >= ystep: 71 | ret[row, height - 1 + j] = data[halfh + ystep, halfw - xstep] 72 | ret[row, height + width - 2 - j] = data[halfh + ystep, halfw + xstep] 73 | ret[row, height + width + height - 3 + j] = data[ 74 | halfh - ystep, halfw + xstep 75 | ] 76 | ret[row, perimeter - j] = data[halfh - ystep, halfw - xstep] 77 | else: 78 | break 79 | return ret 80 | 81 | 82 | def polar_to_cartesian(data: np.ndarray, width: int, height: int) -> np.ndarray: 83 | """Returns the cartesian form of . 84 | 85 | is the original width of the cartesian image 86 | is the original height of the cartesian image 87 | """ 88 | assert width > 2 89 | assert height > 2 90 | assert width % 2 == 1 91 | assert height % 2 == 1 92 | perimeter = 2 * (width + height - 2) 93 | halfdiag = math.ceil(((width**2 + height**2) ** 0.5) / 2) 94 | halfw = width // 2 95 | halfh = height // 2 96 | ret = np.zeros((height, width, 3)) 97 | 98 | def div0(): 99 | # Don't want to deal with divide by zero errors... 100 | ret[halfh, halfw::-1] = data[0 : (halfw + 1), halfh] 101 | ret[halfh, halfw : (halfw * 2 + 1)] = data[ 102 | 0 : (halfw + 1), height + width - 2 + halfh 103 | ] 104 | ret[halfh : (halfh * 2 + 1), halfw] = data[0 : (halfh + 1), height - 1 + halfw] 105 | ret[halfh::-1, halfw] = data[0 : (halfh + 1), perimeter - halfw] 106 | 107 | div0() 108 | 109 | # Same code as above, except the order of the assignments are switched 110 | # Code blocks are split up for easier profiling 111 | def part1(): 112 | for i in range(0, halfh): 113 | slope = (halfh - i) / (halfw) 114 | diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5 115 | unit_xstep = diagx / (halfdiag - 1) 116 | unit_ystep = diagx * slope / (halfdiag - 1) 117 | for row in range(halfdiag): 118 | ystep = round(row * unit_ystep) 119 | xstep = round(row * unit_xstep) 120 | if (halfh >= ystep) and halfw >= xstep: 121 | ret[halfh - ystep, halfw - xstep] = data[row, i] 122 | ret[halfh + ystep, halfw - xstep] = data[row, height - 1 - i] 123 | ret[halfh + ystep, halfw + xstep] = data[ 124 | row, height + width - 2 + i 125 | ] 126 | ret[halfh - ystep, halfw + xstep] = data[ 127 | row, height + width + height - 3 - i 128 | ] 129 | else: 130 | break 131 | 132 | part1() 133 | 134 | def part2(): 135 | for j in range(1, halfw): 136 | slope = (halfh) / (halfw - j) 137 | diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5 138 | unit_xstep = diagx / (halfdiag - 1) 139 | unit_ystep = diagx * slope / (halfdiag - 1) 140 | for row in range(halfdiag): 141 | ystep = round(row * unit_ystep) 142 | xstep = round(row * unit_xstep) 143 | if halfw >= xstep and halfh >= ystep: 144 | ret[halfh + ystep, halfw - xstep] = data[row, height - 1 + j] 145 | ret[halfh + ystep, halfw + xstep] = data[ 146 | row, height + width - 2 - j 147 | ] 148 | ret[halfh - ystep, halfw + xstep] = data[ 149 | row, height + width + height - 3 + j 150 | ] 151 | ret[halfh - ystep, halfw - xstep] = data[row, perimeter - j] 152 | else: 153 | break 154 | 155 | part2() 156 | 157 | # Repairs black/missing pixels in the transformed image 158 | def set_zeros(): 159 | zero_mask = ret[1:-1, 1:-1] == 0 160 | ret[1:-1, 1:-1] = np.where( 161 | zero_mask, (ret[:-2, 1:-1] + ret[2:, 1:-1]) / 2, ret[1:-1, 1:-1] 162 | ) 163 | 164 | set_zeros() 165 | 166 | return ret 167 | 168 | 169 | def get_gauss(n: int) -> List[float]: 170 | """Return the Gaussian 1D kernel for a diameter of 171 | Referenced from: https://stackoverflow.com/questions/11209115/ 172 | """ 173 | sigma = 0.3 * (n / 2 - 1) + 0.8 174 | r = range(-int(n / 2), int(n / 2) + 1) 175 | new_sum = sum( 176 | [ 177 | 1 178 | / (sigma * math.sqrt(2 * math.pi)) 179 | * math.exp(-float(x) ** 2 / (2 * sigma**2)) 180 | for x in r 181 | ] 182 | ) 183 | # Ensure that the gaussian array adds up to one 184 | return [ 185 | ( 186 | 1 187 | / (sigma * math.sqrt(2 * math.pi)) 188 | * math.exp(-float(x) ** 2 / (2 * sigma**2)) 189 | ) 190 | / new_sum 191 | for x in r 192 | ] 193 | 194 | 195 | def vertical_gaussian(data: np.ndarray, n: int) -> np.ndarray: 196 | """Peforms a Gaussian blur in the vertical direction on . Returns 197 | the resulting numpy array. 198 | 199 | is the radius, where 1 pixel radius indicates no blur 200 | """ 201 | padding = n - 1 202 | width = data.shape[1] 203 | height = data.shape[0] 204 | padded_data = np.zeros((height + padding * 2, width)) 205 | padded_data[padding:-padding, :] = data 206 | ret = np.zeros((height, width)) 207 | kernel = None 208 | old_radius = -1 209 | for i in range(height): 210 | radius = round(i * padding / (height - 1)) + 1 211 | # Recreate new kernel only if we have to 212 | if radius != old_radius: 213 | old_radius = radius 214 | kernel = np.tile(get_gauss(1 + 2 * (radius - 1)), (width, 1)).transpose() 215 | ret[i, :] = np.sum( 216 | np.multiply( 217 | padded_data[padding + i - radius + 1 : padding + i + radius, :], kernel 218 | ), 219 | axis=0, 220 | ) 221 | return ret 222 | 223 | 224 | def add_chromatic(im, strength: float = 1, no_blur: bool = False): 225 | """Splits into red, green, and blue channels, then performs a 226 | 1D Vertical Gaussian blur through a polar representation. Finally, 227 | it expands the green and blue channels slightly. 228 | 229 | determines the amount of expansion and blurring. 230 | disables the radial blur 231 | """ 232 | r, g, b = im.split() 233 | rdata = np.asarray(r) 234 | gdata = np.asarray(g) 235 | bdata = np.asarray(b) 236 | if no_blur: 237 | # channels remain unchanged 238 | rfinal = r 239 | gfinal = g 240 | bfinal = b 241 | else: 242 | poles = cartesian_to_polar(np.stack([rdata, gdata, bdata], axis=-1)) 243 | rpolar, gpolar, bpolar = ( 244 | poles[:, :, 0], 245 | poles[:, :, 1], 246 | poles[:, :, 2], 247 | ) 248 | 249 | bluramount = (im.size[0] + im.size[1] - 2) / 100 * strength 250 | if round(bluramount) > 0: 251 | rpolar = vertical_gaussian(rpolar, round(bluramount)) 252 | gpolar = vertical_gaussian(gpolar, round(bluramount * 1.2)) 253 | bpolar = vertical_gaussian(bpolar, round(bluramount * 1.4)) 254 | 255 | rgbpolar = np.stack([rpolar, gpolar, bpolar], axis=-1) 256 | cartes = polar_to_cartesian( 257 | rgbpolar, width=rdata.shape[1], height=rdata.shape[0] 258 | ) 259 | rcartes, gcartes, bcartes = ( 260 | cartes[:, :, 0], 261 | cartes[:, :, 1], 262 | cartes[:, :, 2], 263 | ) 264 | 265 | rfinal = Image.fromarray(np.uint8(rcartes), "L") 266 | gfinal = Image.fromarray(np.uint8(gcartes), "L") 267 | bfinal = Image.fromarray(np.uint8(bcartes), "L") 268 | 269 | # enlarge the green and blue channels slightly, blue being the most enlarged 270 | gfinal = gfinal.resize( 271 | ( 272 | round((1 + 0.018 * strength) * rdata.shape[1]), 273 | round((1 + 0.018 * strength) * rdata.shape[0]), 274 | ), 275 | Image.ANTIALIAS, 276 | ) 277 | bfinal = bfinal.resize( 278 | ( 279 | round((1 + 0.044 * strength) * rdata.shape[1]), 280 | round((1 + 0.044 * strength) * rdata.shape[0]), 281 | ), 282 | Image.ANTIALIAS, 283 | ) 284 | 285 | rwidth, rheight = rfinal.size 286 | gwidth, gheight = gfinal.size 287 | bwidth, bheight = bfinal.size 288 | rhdiff = (bheight - rheight) // 2 289 | rwdiff = (bwidth - rwidth) // 2 290 | ghdiff = (bheight - gheight) // 2 291 | gwdiff = (bwidth - gwidth) // 2 292 | 293 | # Centre the channels 294 | im = Image.merge( 295 | "RGB", 296 | ( 297 | rfinal.crop((-rwdiff, -rhdiff, bwidth - rwdiff, bheight - rhdiff)), 298 | gfinal.crop((-gwdiff, -ghdiff, bwidth - gwdiff, bheight - ghdiff)), 299 | bfinal, 300 | ), 301 | ) 302 | 303 | # Crop the image to the original image dimensions 304 | return im.crop((rwdiff, rhdiff, rwidth + rwdiff, rheight + rhdiff)) 305 | 306 | 307 | def add_jitter(im, pixels: int = 1): 308 | """Adds a small pixel offset to the Red and Blue channels of , 309 | resulting in a classic chromatic fringe effect. Very cheap computationally. 310 | 311 | how many pixels to offset the Red and Blue channels 312 | """ 313 | if pixels == 0: 314 | return im.copy() 315 | r, g, b = im.split() 316 | rwidth, rheight = r.size 317 | gwidth, gheight = g.size 318 | bwidth, bheight = b.size 319 | im = Image.merge( 320 | "RGB", 321 | ( 322 | r.crop((pixels, 0, rwidth + pixels, rheight)), 323 | g.crop((0, 0, gwidth, gheight)), 324 | b.crop((-pixels, 0, bwidth - pixels, bheight)), 325 | ), 326 | ) 327 | return im 328 | 329 | 330 | def blend_images(im, og_im, alpha: float = 1, strength: float = 1): 331 | """Blends original image as an overlay over , with 332 | an alpha value of . Resizes with respect to , 333 | before adding it as an overlay. 334 | """ 335 | og_im.putalpha(int(255 * alpha)) 336 | og_im = og_im.resize( 337 | ( 338 | round((1 + 0.018 * strength) * og_im.size[0]), 339 | round((1 + 0.018 * strength) * og_im.size[1]), 340 | ), 341 | Image.ANTIALIAS, 342 | ) 343 | 344 | hdiff = (og_im.size[1] - im.size[1]) // 2 345 | wdiff = (og_im.size[0] - im.size[0]) // 2 346 | og_im = og_im.crop((wdiff, hdiff, wdiff + im.size[0], hdiff + im.size[1])) 347 | im = im.convert("RGBA") 348 | 349 | final_im = Image.new("RGBA", im.size) 350 | final_im = Image.alpha_composite(final_im, im) 351 | final_im = Image.alpha_composite(final_im, og_im) 352 | final_im = final_im.convert("RGB") 353 | return final_im 354 | 355 | 356 | if __name__ == "__main__": 357 | import argparse 358 | 359 | parser = argparse.ArgumentParser( 360 | description="Apply chromatic aberration and lens blur to images" 361 | ) 362 | parser.add_argument("filename", help="input filename") 363 | parser.add_argument( 364 | "-s", 365 | "--strength", 366 | type=float, 367 | default=1.0, 368 | help="set blur/aberration strength, defaults to 1.0", 369 | ) 370 | parser.add_argument( 371 | "-j", 372 | "--jitter", 373 | type=int, 374 | default=0, 375 | help="set color channel offset pixels, defaults to 0", 376 | ) 377 | parser.add_argument( 378 | "-y", 379 | "--overlay", 380 | type=float, 381 | default=0.0, 382 | help="alpha of original image overlay, defaults to 0.0", 383 | ) 384 | parser.add_argument( 385 | "-n", "--noblur", help="disable radial blur", action="store_true" 386 | ) 387 | parser.add_argument( 388 | "-o", "--out", help="write to OUTPUT (supports multiple formats)" 389 | ) 390 | parser.add_argument( 391 | "-v", "--verbose", help="print status messages", action="store_true" 392 | ) 393 | args = parser.parse_args() 394 | # Get Start Time 395 | start = time.time() 396 | ifile = args.filename 397 | im = Image.open(ifile) 398 | if args.verbose: 399 | print("Original Image:", im.format, im.size, im.mode) 400 | 401 | if im.mode != "RGB": 402 | if args.verbose: 403 | print("Converting to RGB...") 404 | im = im.convert("RGB") 405 | 406 | # Ensure width and height are odd numbers 407 | if im.size[0] % 2 == 0 or im.size[1] % 2 == 0: 408 | if args.verbose: 409 | print("Dimensions must be odd numbers, cropping...") 410 | if im.size[0] % 2 == 0: 411 | im = im.crop((0, 0, im.size[0] - 1, im.size[1])) 412 | im.load() 413 | if im.size[1] % 2 == 0: 414 | im = im.crop((0, 0, im.size[0], im.size[1] - 1)) 415 | im.load() 416 | if args.verbose: 417 | print("New Dimensions:", im.size) 418 | 419 | og_im = im.copy() 420 | 421 | im = add_chromatic(im, strength=args.strength, no_blur=args.noblur) 422 | 423 | # Add Jitter Effect 424 | im = add_jitter(im, pixels=args.jitter) 425 | 426 | im = blend_images(im, og_im, alpha=args.overlay, strength=args.strength) 427 | 428 | # Save Final Image 429 | if args.out == None: 430 | im.save(os.path.splitext(ifile)[0] + "_chromatic.jpg", quality=99) 431 | else: 432 | im.save(args.out, quality=99) 433 | # Get Finish Time 434 | end = time.time() 435 | if args.verbose: 436 | print("Completed in: " + "% 6.2f" % (end - start) + "s") 437 | -------------------------------------------------------------------------------- /hakuimg/color/__init__.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageEnhance 2 | import cv2 3 | import numpy as np 4 | 5 | from .exposure_offset import get_exposure_offset 6 | from .hdr import get_hdr 7 | from .noise import get_noise 8 | from .sharpness import get_sharpness 9 | from .vignette import get_vignette 10 | 11 | 12 | def run( 13 | img1, 14 | bright, 15 | contrast, 16 | sat, 17 | temp, 18 | hue, 19 | gamma, 20 | exposure_offset, 21 | hdr, 22 | noise, 23 | sharpness, 24 | vignette, 25 | ): 26 | bright /= 100 27 | contrast /= 100 28 | temp /= 100 29 | sat /= 100 30 | 31 | res = img1 32 | 33 | res = get_exposure_offset(res, exposure_offset, bright) 34 | res = get_hdr(res, hdr, img1) 35 | res = get_sharpness(res, sharpness) 36 | res = get_noise(res, noise) 37 | res = get_vignette(res, vignette) 38 | 39 | # brigtness 40 | # res = Image.fromarray(res) 41 | brightness = ImageEnhance.Brightness(res) 42 | res = brightness.enhance(1 + bright) 43 | 44 | # contrast 45 | cont = ImageEnhance.Contrast(res) 46 | res = cont.enhance(1 + contrast) 47 | res = np.array(res).astype(np.float32) 48 | 49 | # temp 50 | if temp > 0: 51 | res[:, :, 0] *= 1 + temp 52 | res[:, :, 1] *= 1 + temp * 0.4 53 | elif temp < 0: 54 | res[:, :, 2] *= 1 - temp 55 | res = np.clip(res, 0, 255) / 255 56 | 57 | res = np.clip(np.power(res, gamma), 0, 1) 58 | 59 | # saturation 60 | print(res.shape) 61 | sat_real = 1 + sat 62 | hls_img = cv2.cvtColor(res, cv2.COLOR_RGB2HLS) 63 | hls_img[:, :, 2] = np.clip(sat_real * hls_img[:, :, 2], 0, 1) 64 | res = cv2.cvtColor(hls_img, cv2.COLOR_HLS2RGB) * 255 65 | 66 | # hue 67 | hsv_img = cv2.cvtColor(res, cv2.COLOR_RGB2HSV) 68 | print(np.max(hsv_img[:, :, 0]), np.max(hsv_img[:, :, 1]), np.max(hsv_img[:, :, 2])) 69 | hsv_img[:, :, 0] = (hsv_img[:, :, 0] + hue) % 360 70 | 71 | res = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB) 72 | 73 | res = res.astype(np.uint8) 74 | res = Image.fromarray(res, mode="RGB") 75 | return res 76 | -------------------------------------------------------------------------------- /hakuimg/color/exposure_offset.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageEnhance, Image 2 | import numpy as np 3 | 4 | 5 | def get_exposure_offset(img, value, brightness_value): 6 | if value <= 0: 7 | return img 8 | 9 | np_img = np.array(img).astype(float) + value * 75 10 | np_img = np.clip(np_img, 0, 255).astype(np.uint8) 11 | img = Image.fromarray(np_img) 12 | return ImageEnhance.Brightness(img).enhance((brightness_value + 1) - value / 4) 13 | -------------------------------------------------------------------------------- /hakuimg/color/hdr.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import ImageFilter, ImageChops, Image, ImageOps, ImageEnhance 4 | 5 | from blendmodes.blend import blendLayers, BlendType 6 | 7 | 8 | def get_hdr(img, value, original_img): 9 | if value <= 0: 10 | return img 11 | 12 | blurred = img.filter(ImageFilter.GaussianBlur(radius=2.8)) 13 | difference = ImageChops.difference(img, blurred) 14 | sharp_edges = Image.blend(img, difference, 1) 15 | 16 | converted_original_img = ( 17 | np.array(original_img)[:, :, ::-1].copy().astype("float32") / 255.0 18 | ) 19 | converted_sharped = ( 20 | np.array(sharp_edges)[:, :, ::-1].copy().astype("float32") / 255.0 21 | ) 22 | 23 | color_dodge = converted_original_img / (1 - converted_sharped) 24 | converted_color_dodge = (255 * color_dodge).clip(0, 255).astype(np.uint8) 25 | 26 | temp_img = Image.fromarray(cv2.cvtColor(converted_color_dodge, cv2.COLOR_BGR2RGB)) 27 | inverted_color_dodge = ImageOps.invert(temp_img) 28 | black_white_color_dodge = ImageEnhance.Color(inverted_color_dodge).enhance(0) 29 | hue = blendLayers(temp_img, black_white_color_dodge, BlendType.HUE) 30 | hdr_image = blendLayers(hue, temp_img, BlendType.NORMAL, 0.7) 31 | 32 | return blendLayers(img, hdr_image, BlendType.NORMAL, value * 2).convert("RGB") 33 | -------------------------------------------------------------------------------- /hakuimg/color/noise.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageChops, Image 2 | import numpy as np 3 | 4 | 5 | def get_noise(img, value): 6 | if value <= 0: 7 | return img 8 | 9 | noise = np.random.randint(0, value * 100, img.size, np.uint8) 10 | noise_img = Image.fromarray(noise, "L").resize(img.size).convert(img.mode) 11 | return ImageChops.add(img, noise_img) 12 | -------------------------------------------------------------------------------- /hakuimg/color/sharpness.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageEnhance 2 | 3 | 4 | def get_sharpness(img, value): 5 | if value <= 0: 6 | return img 7 | 8 | return ImageEnhance.Sharpness(img).enhance((value + 1) * 1.5) 9 | -------------------------------------------------------------------------------- /hakuimg/color/vignette.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageDraw, Image, ImageFilter 2 | 3 | 4 | def get_vignette(img, value): 5 | if value <= 0: 6 | return img 7 | 8 | width, height = img.size 9 | mask = Image.new("L", (width, height), 0) 10 | draw = ImageDraw.Draw(mask) 11 | padding = 100 - value * 100 12 | draw.ellipse((-padding, -padding, width + padding, height + padding), fill=255) 13 | mask = mask.filter(ImageFilter.GaussianBlur(radius=100)) 14 | return Image.composite(img, Image.new("RGB", img.size, "black"), mask) 15 | -------------------------------------------------------------------------------- /hakuimg/curve.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Tuple, List, Union 4 | from numpy.typing import NDArray 5 | 6 | import cv2 7 | from PIL import Image 8 | import numpy as np 9 | from scipy import interpolate 10 | 11 | import matplotlib 12 | 13 | matplotlib.use("agg") 14 | from matplotlib import pyplot as plt 15 | 16 | plt.style.use("dark_background") 17 | 18 | 19 | def make_curve(x_in, y_in): 20 | assert len(x_in) == len(y_in) 21 | his = set([0, 255]) 22 | 23 | xs = [] 24 | ys = [] 25 | for x, y in sorted(zip(x_in, y_in)): 26 | if x not in his: 27 | xs.append(x) 28 | ys.append(y) 29 | his.add(x) 30 | 31 | if len(xs): 32 | spline = interpolate.make_interp_spline( 33 | [0, *xs, 255], [0, *ys, 255], 2 + (len(xs) > 1) 34 | ) 35 | return lambda x: np.clip(spline(x), 0, 255) 36 | else: 37 | return lambda x: x 38 | 39 | 40 | def make_plot(points): 41 | xs, ys = points 42 | curve = make_curve(xs, ys) 43 | fig, ax = plt.subplots(1, 1) 44 | 45 | x = np.arange(0, 255, 1) 46 | y = np.clip(curve(x), 0, 255) 47 | 48 | ax.set_xlim(0, 255) 49 | ax.set_ylim(0, 255) 50 | ax.plot([0, 255], [0, 255], "white") 51 | ax.plot(x, y) 52 | ax.plot([0, *sorted(xs), 255], [0, *sorted(ys), 255], "ro") 53 | 54 | fig.canvas.draw() 55 | img = Image.frombytes( 56 | "RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb() 57 | ) 58 | plt.close("all") 59 | del fig, ax 60 | return img 61 | 62 | 63 | def run(points: int): 64 | def curve(img: Image, *args: List[int]): 65 | nonlocal points 66 | # all, r, g, b 67 | point = points * 2 68 | all, r, g, b = ( 69 | (k[::2], k[1::2]) 70 | for i in range(4) 71 | for k in [args[i * point : i * point + point]] 72 | ) 73 | 74 | img = np.array(img) 75 | img[:, :, 0] = make_curve(*r)(img[:, :, 0]) 76 | img[:, :, 1] = make_curve(*g)(img[:, :, 1]) 77 | img[:, :, 2] = make_curve(*b)(img[:, :, 2]) 78 | img = make_curve(*all)(img) 79 | return img.astype(np.uint8) 80 | 81 | return curve 82 | 83 | 84 | def curve_img(*all_points): 85 | return make_plot((all_points[::2], all_points[1::2])) 86 | 87 | 88 | if __name__ == "__main__": 89 | from matplotlib import pyplot as plt 90 | from time import time_ns 91 | 92 | plt.style.use("dark_background") 93 | 94 | t0 = time_ns() 95 | fig, ax = plt.subplots(1, 1) 96 | 97 | xs = [50, 125, 200] 98 | ys = [40, 150, 180] 99 | 100 | t2 = time_ns() 101 | curve = make_curve(xs, ys) 102 | x = np.arange(0, 255, 0.00001) 103 | y = np.clip(curve(x), 0, 255) 104 | t3 = time_ns() 105 | 106 | ax.set_xlim(0, 255) 107 | ax.set_ylim(0, 255) 108 | ax.plot([0, 255], [0, 255], "white") 109 | ax.plot(x[::10000], y[::10000]) 110 | ax.plot([0, *xs, 255], [0, *ys, 255], "ro") 111 | 112 | fig.canvas.draw() 113 | img = Image.frombytes( 114 | "RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb() 115 | ) 116 | plt.close(fig) 117 | 118 | t1 = time_ns() 119 | print((t1 - t0) / 1e6) 120 | print((t3 - t2) / 1e6, x.size) 121 | 122 | img.show() 123 | -------------------------------------------------------------------------------- /hakuimg/custom_exif.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | 4 | def run(img, text): 5 | if not text: 6 | return img 7 | 8 | img.info["parameters"] = text 9 | return img 10 | -------------------------------------------------------------------------------- /hakuimg/dither.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from itertools import product 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | 9 | def dithering(img: Image, find_new_color): 10 | img = np.array(img) 11 | d_h, d_w, c = img.shape 12 | new_res = np.array(img, dtype=np.float32) / 255 13 | for i, j in product(range(d_h), range(d_w)): 14 | old_val = new_res[i, j].copy() 15 | new_val = find_new_color(old_val) 16 | new_res[i, j] = new_val 17 | err = old_val - new_val 18 | 19 | if j < d_w - 1: 20 | new_res[i, j + 1] += err * 7 / 16 21 | if i < d_h - 1: 22 | new_res[i + 1, j] += err * 5 / 16 23 | if j > 0: 24 | new_res[i + 1, j - 1] += err * 3 / 16 25 | if j < d_w - 1: 26 | new_res[i + 1, j + 1] += err * 1 / 16 27 | return np.clip(new_res / np.max(new_res, axis=(0, 1)) * 255, 0, 255) 28 | -------------------------------------------------------------------------------- /hakuimg/flip.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import numpy as np 4 | from PIL import Image 5 | 6 | 7 | class Axis(str, enum.Enum): 8 | VERTICAL = "vertical" 9 | HORIZONTAL = "horizontal" 10 | 11 | 12 | def run(pil_img, axis): 13 | np_img = np.array(pil_img) 14 | if axis == Axis.VERTICAL: 15 | np_img = np.flipud(np_img) 16 | elif axis == Axis.HORIZONTAL: 17 | np_img = np.fliplr(np_img) 18 | 19 | return Image.fromarray(np_img) 20 | -------------------------------------------------------------------------------- /hakuimg/image_preprocess.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image 4 | 5 | 6 | def tensor_to_pil(image: torch.Tensor): 7 | return Image.fromarray( 8 | np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) 9 | ) 10 | 11 | 12 | def pil_to_tensor(image: Image): 13 | return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) 14 | 15 | 16 | def image_preprocess(img: torch.Tensor, device: str): 17 | use_channel_last = False 18 | if img.ndim == 3: 19 | img = img.unsqueeze(0) 20 | if img.size(3) <= 4: 21 | img = img.permute(0, 3, 1, 2) 22 | use_channel_last = True 23 | if img.size(1) == 4: 24 | img = img[:, :3] 25 | org_device = img.device 26 | if device != "default": 27 | img = img.to(device) 28 | return img, use_channel_last, org_device 29 | -------------------------------------------------------------------------------- /hakuimg/lens_distortion.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import cv2 4 | import numpy as np 5 | from PIL import Image 6 | 7 | 8 | def run(pil_img, k1, k2): 9 | np_img = np.array(pil_img) 10 | height, width = np_img.shape[:2] 11 | focal_length = width 12 | center_x = width / 2 13 | center_y = height / 2 14 | 15 | K = np.array( 16 | [[focal_length, 0, center_x], [0, focal_length, center_y], [0, 0, 1]], 17 | dtype=np.float64, 18 | ) 19 | D = np.array([k1, k2, 0, 0], dtype=np.float64) 20 | img = cv2.fisheye.undistortImage(np_img, K, D, Knew=K) 21 | 22 | return Image.fromarray(img) 23 | -------------------------------------------------------------------------------- /hakuimg/neon.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from PIL import Image 3 | import numpy as np 4 | import scipy as sp 5 | 6 | from hakuimg.blend import Blend 7 | 8 | 9 | def run(pil_img, blur, strength, mode="BS"): 10 | img = np.array(pil_img) 11 | img = img / 255 12 | 13 | if mode == "BS": 14 | img_blur = cv2.GaussianBlur(img, (0, 0), blur) 15 | img_glow = Blend.screen(img_blur, img, strength) 16 | elif mode == "BMBL": 17 | img_blur = cv2.GaussianBlur(img, (0, 0), blur) 18 | img_mul = Blend.multiply(img_blur, img) 19 | img_mul_blur = cv2.GaussianBlur(img_mul, (0, 0), blur) 20 | img_glow = Blend.lighten(img_mul_blur, img, strength) 21 | else: 22 | raise NotImplementedError 23 | 24 | return (img_glow * 255).astype(np.uint8) 25 | -------------------------------------------------------------------------------- /hakuimg/outline_expansion.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from .image_preprocess import image_preprocess, pil_to_tensor, tensor_to_pil 3 | from pixeloe.torch.outline import outline_expansion 4 | 5 | 6 | def run( 7 | img: Image, 8 | pixel_size: int, 9 | thickness: int, 10 | device: str, 11 | ): 12 | img = pil_to_tensor(img) 13 | img, use_channel_last, org_device = image_preprocess(img, device) 14 | oe_image, _ = outline_expansion(img, thickness, thickness, pixel_size) 15 | oe_image = oe_image.to(org_device) 16 | if use_channel_last: 17 | oe_image = oe_image.permute(0, 2, 3, 1) 18 | oe_image = tensor_to_pil(oe_image) 19 | return oe_image 20 | -------------------------------------------------------------------------------- /hakuimg/pixel.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Tuple, List, Union 4 | from numpy.typing import NDArray 5 | 6 | import cv2 7 | from PIL import Image 8 | import numpy as np 9 | import scipy as sp 10 | 11 | from hakuimg.dither import dithering 12 | 13 | 14 | INFLATE_FILTER = [ 15 | None, 16 | np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8), 17 | np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], np.uint8), 18 | np.array( 19 | [ 20 | [0, 0, 1, 0, 0], 21 | [0, 1, 1, 1, 0], 22 | [1, 1, 1, 1, 1], 23 | [0, 1, 1, 1, 0], 24 | [0, 0, 1, 0, 0], 25 | ], 26 | np.uint8, 27 | ), 28 | np.array( 29 | [ 30 | [1, 1, 1, 1, 1], 31 | [1, 1, 1, 1, 1], 32 | [1, 1, 1, 1, 1], 33 | [1, 1, 1, 1, 1], 34 | [1, 1, 1, 1, 1], 35 | ], 36 | np.uint8, 37 | ), 38 | np.ones((7, 7), np.uint8), 39 | np.ones((9, 9), np.uint8), 40 | np.ones((11, 11), np.uint8), 41 | np.ones((13, 13), np.uint8), 42 | np.ones((15, 15), np.uint8), 43 | np.ones((17, 17), np.uint8), 44 | ] 45 | 46 | 47 | def pil_imgread_img_as_array(img) -> NDArray[Any]: 48 | """Convert image to RGBA and read to ndarray""" 49 | img = Image.fromarray(img) 50 | img = img.convert("RGBA") 51 | img_arr = np.asarray(img) 52 | return img_arr 53 | 54 | 55 | def preprocess( 56 | img: NDArray[Any], 57 | blur: int = 0, 58 | erode: int = 0, 59 | ) -> NDArray[Any]: 60 | """ 61 | Process for 62 | * outline inflation (erode) 63 | * smoothing (blur) 64 | * saturation 65 | * contrast 66 | """ 67 | # outline process 68 | if erode: 69 | img = cv2.erode( 70 | img, 71 | INFLATE_FILTER[erode], 72 | iterations=1, 73 | ) 74 | 75 | # blur process 76 | if blur: 77 | img = cv2.bilateralFilter(img, 15, blur * 20, 20) 78 | img = img.astype(np.float32) 79 | return img 80 | 81 | 82 | def pixelize( 83 | img: Image, 84 | k: int, 85 | c: int, 86 | d_w: int, 87 | d_h: int, 88 | o_w: int, 89 | o_h: int, 90 | precise: int, 91 | mode: str = "dithering", 92 | resize: bool = True, 93 | ) -> Tuple[NDArray[Any], NDArray[Any]]: 94 | """ 95 | Use down scale and up scale to make pixel image. 96 | 97 | And use k-means to confine the num of colors. 98 | """ 99 | img = cv2.resize(img, (d_w, d_h), interpolation=cv2.INTER_NEAREST) 100 | 101 | # reshape to 1-dim array(for every color) for k-means 102 | # use k-means to abstract the colors to use 103 | if "kmeans" in mode: 104 | criteria = ( 105 | cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 106 | precise * 5, 107 | 0.01, 108 | ) 109 | img_cp = img.reshape(-1, c) 110 | _, label, center = cv2.kmeans( 111 | img_cp, k, None, criteria, 1, cv2.KMEANS_PP_CENTERS 112 | ) 113 | 114 | if "dithering" in mode: 115 | center /= 255 116 | kdt = sp.spatial.KDTree(center) 117 | 118 | def find_center(px): 119 | return center[kdt.query(px)[1]] 120 | 121 | result = dithering(img, find_center) 122 | else: 123 | result = center[label.flatten()].reshape(*img.shape) 124 | 125 | elif mode == "dithering": 126 | result = dithering(img, lambda px: np.round(px * (k - 1)) / (k - 1)) 127 | else: 128 | raise NotImplementedError("Unknown Method") 129 | 130 | if resize: 131 | result = cv2.resize(result, (o_w, o_h), interpolation=cv2.INTER_NEAREST) 132 | return result.astype(np.uint8) 133 | 134 | 135 | def run( 136 | src: Image.Image, 137 | k: int = 3, 138 | scale: int = 2, 139 | blur: int = 0, 140 | erode: int = 0, 141 | mode: str = "kmeans", 142 | precise: int = 10, 143 | resize: bool = True, 144 | ) -> Tuple[Image.Image, List[List[Union[str, float]]]]: 145 | # print('Start process.') 146 | # print('Read raw image... ', end='', flush=True) 147 | img = np.asarray(src.convert('RGBA')) 148 | 149 | # convert color space 150 | alpha_channel = img[:, :, 3] 151 | img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) 152 | h, w, c = img.shape 153 | d_h = h // scale 154 | d_w = w // scale 155 | o_h = h 156 | o_w = w 157 | # print('done!') 158 | 159 | # print('Image preprocess... ', end='', flush=True) 160 | # preprocess(erode, blur, saturation, contrast) 161 | img = preprocess(img, blur, erode) 162 | # print('done!') 163 | 164 | # print('Pixelize... ', end='', flush=True) 165 | # pixelize(using k-means) 166 | result = pixelize(img, k, c, d_w, d_h, o_w, o_h, precise, mode, resize) 167 | # print('done!') 168 | 169 | # print('Process output image... ', end='', flush=True) 170 | # add alpha channel 171 | a = cv2.resize(alpha_channel, (d_w, d_h), interpolation=cv2.INTER_NEAREST) 172 | if resize: 173 | a = cv2.resize(a, (o_w, o_h), interpolation=cv2.INTER_NEAREST) 174 | a[a != 0] = 255 175 | if 0 not in a: 176 | a[0, 0] = 0 177 | r, g, b = cv2.split(result) 178 | result = cv2.merge((r, g, b, a)) 179 | 180 | # for saving to png 181 | result = cv2.cvtColor(result, cv2.COLOR_RGBA2BGRA) 182 | # print('done!') 183 | 184 | return Image.fromarray(result) 185 | -------------------------------------------------------------------------------- /hakuimg/pixeloe.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from pixeloe.torch.pixelize import pixelize 3 | from .image_preprocess import image_preprocess, pil_to_tensor, tensor_to_pil 4 | 5 | 6 | def run( 7 | img: Image, 8 | pixel_size: int, 9 | thickness: int, 10 | num_colors: int, 11 | mode: str, 12 | quant_mode: str, 13 | dither_mode: str, 14 | device: str, 15 | color_quant: bool, 16 | no_post_upscale: bool, 17 | ): 18 | img = pil_to_tensor(img) 19 | img, use_channel_last, org_device = image_preprocess(img, device) 20 | result, _, _ = pixelize( 21 | img, 22 | pixel_size, 23 | thickness, 24 | mode, 25 | do_color_match=True, 26 | do_quant=color_quant, 27 | num_colors=num_colors, 28 | quant_mode=quant_mode, 29 | dither_mode=dither_mode, 30 | no_post_upscale=no_post_upscale, 31 | return_intermediate=True, 32 | ) 33 | result = result.to(org_device) 34 | if use_channel_last: 35 | result = result.permute(0, 2, 3, 1) 36 | result = tensor_to_pil(result) 37 | return result 38 | -------------------------------------------------------------------------------- /hakuimg/sketch.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import cv2 3 | import numpy as np 4 | 5 | 6 | def fix_float(val, eps=1e-3): 7 | return float(val) - eps 8 | 9 | 10 | def gaussian(img, kernel, sigma): 11 | return cv2.GaussianBlur(img, (kernel, kernel), sigma) 12 | 13 | 14 | def dog_filter(img, kernel=0, sigma=1.4, k_sigma=1.6, gamma=1): 15 | g1 = gaussian(img, kernel, sigma) 16 | g2 = gaussian(img, kernel, sigma * k_sigma) 17 | return g1 - fix_float(gamma) * g2 18 | 19 | 20 | def xdog(img, kernel, sigma, k_sigma, eps, phi, gamma, color, scale=True): 21 | img = np.array(img) 22 | if color == "gray": 23 | img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 24 | 25 | dog = dog_filter(img, kernel, sigma, k_sigma, gamma) 26 | dog = dog / dog.max() 27 | e = 1 + np.tanh(fix_float(phi) * (dog - fix_float(eps))) 28 | e[e >= 1] = 1 29 | 30 | if color == "gray": 31 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) 32 | 33 | if not scale: 34 | e[e < 1] = 0 35 | return Image.fromarray((e * 255).astype("uint8")) 36 | 37 | 38 | def run(*args): 39 | return xdog(*args) 40 | -------------------------------------------------------------------------------- /hakuimg/tilt_shift/__init__.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image 4 | 5 | from .utils import tilt_shift 6 | 7 | 8 | def run(pil_img, focus_ratio: float, dof: int): 9 | focus_ratio += 5 10 | 11 | np_img = np.array(pil_img) 12 | height = np_img.shape[0] 13 | 14 | focus_height = round(height * (focus_ratio / 10)) 15 | np_img = tilt_shift(np_img, dof=dof, focus_height=focus_height) 16 | 17 | return Image.fromarray(np_img) 18 | -------------------------------------------------------------------------------- /hakuimg/tilt_shift/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Original: https://github.com/andrewdcampbell/tilt-shift/ 3 | """ 4 | import cv2 5 | import numpy as np 6 | 7 | 8 | def tilt_shift(im, focus_height: int, dof: int = 60): 9 | if focus_height < 2 * dof: 10 | focus_height = 2 * dof 11 | if focus_height > im.shape[0] - 2 * dof: 12 | focus_height = im.shape[0] - 2 * dof 13 | 14 | above_focus, below_focus = im[:focus_height, :], im[focus_height:, :] 15 | above_focus = increasing_blur(above_focus[::-1, ...], dof)[::-1, ...] 16 | below_focus = increasing_blur(below_focus, dof) 17 | out = np.vstack((above_focus, below_focus)) 18 | 19 | return out 20 | 21 | 22 | def increasing_blur(im, dof=60): 23 | BLEND_WIDTH = dof 24 | blur_region = cv2.GaussianBlur(im[dof:, :], ksize=(15, 15), sigmaX=0) 25 | if blur_region.shape[0] > dof * 2: 26 | blur_region = increasing_blur(blur_region, dof) 27 | blend_col = np.linspace(1.0, 0, num=BLEND_WIDTH) 28 | blend_mask = np.tile(blend_col, (im.shape[1], 1)).T 29 | res = np.zeros_like(im) 30 | res[:dof, :] = im[:dof, :] 31 | # alpha blend region of width BLEND_WIDTH to hide seams between blur layers 32 | res[dof : dof + BLEND_WIDTH, :] = im[dof : dof + BLEND_WIDTH, :] * blend_mask[ 33 | :, :, None 34 | ] + blur_region[:BLEND_WIDTH, :] * (1 - blend_mask[:, :, None]) 35 | res[dof + BLEND_WIDTH :, :] = blur_region[BLEND_WIDTH:] 36 | return res 37 | -------------------------------------------------------------------------------- /inoutpaint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KohakuBlueleaf/a1111-sd-webui-haku-img/ac80e1e9415e25109ae1e6193d4a3a33aaa86b13/inoutpaint/__init__.py -------------------------------------------------------------------------------- /inoutpaint/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | import cv2 5 | from PIL import Image 6 | 7 | try: 8 | from .utils import * 9 | except: 10 | from utils import * 11 | 12 | 13 | HTML_TEMPLATES = {"resolution": """