├── .gitignore ├── LICENSE.md ├── README.md ├── material_color_utilities_python ├── __init__.py ├── blend │ ├── __init__.py │ └── blend.py ├── hct │ ├── __init__.py │ ├── cam16.py │ ├── hct.py │ └── viewing_conditions.py ├── palettes │ ├── __init__.py │ ├── core_palette.py │ └── tonal_palette.py ├── quantize │ ├── __init__.py │ ├── lab_point_provider.py │ ├── quantizer_celebi.py │ ├── quantizer_map.py │ ├── quantizer_wsmeans.py │ └── quantizer_wu.py ├── scheme │ ├── __init__.py │ └── scheme.py ├── score │ ├── __init__.py │ └── score.py └── utils │ ├── __init__.py │ ├── color_utils.py │ ├── image_utils.py │ ├── math_utils.py │ ├── string_utils.py │ └── theme_utils.py └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | poetry.lock -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Avanish Subbiah 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # material-color-utilities-python 2 | 3 | [![Chat on Matrix](https://matrix.to/img/matrix-badge.svg)](https://matrix.to/#/#AdwCustomizer:matrix.org) 4 | 5 | Python port of material-color-utilities used for Material You colors 6 | 7 | Original source code: https://github.com/material-foundation/material-color-utilities 8 | 9 | NOTE: This is an **unofficial** port of material-color-utilities from JavaScript to Python 10 | 11 | ## Build and install 12 | 13 | You need to have [Poetry](https://python-poetry.org) installed 14 | 15 | ```shell 16 | poetry build 17 | poetry install 18 | ``` 19 | 20 | ## Usage examples for Themeing 21 | 22 | Theme from color: 23 | 24 | ``` python 25 | from material_color_utilities_python import * 26 | 27 | theme = themeFromSourceColor(argbFromHex('#4285f4')) 28 | 29 | print(theme) 30 | ``` 31 | 32 | Color from image: 33 | 34 | ``` python 35 | from material_color_utilities_python import * 36 | 37 | img = Image.open('path/to/image.png') 38 | argb = sourceColorFromImage(img) 39 | 40 | print(hexFromArgb(argb)) 41 | ``` 42 | 43 | Theme from image: 44 | 45 | ``` python 46 | from material_color_utilities_python import * 47 | 48 | img = Image.open('/path/to/image') 49 | basewidth = 64 50 | wpercent = (basewidth/float(img.size[0])) 51 | hsize = int((float(img.size[1])*float(wpercent))) 52 | img = img.resize((basewidth,hsize),Image.Resampling.LANCZOS) 53 | print(themeFromImage(img)) 54 | 55 | print(theme) 56 | ``` 57 | -------------------------------------------------------------------------------- /material_color_utilities_python/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils.theme_utils import * 2 | from .utils.image_utils import * 3 | from .utils.string_utils import * -------------------------------------------------------------------------------- /material_color_utilities_python/blend/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/blend/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/blend/blend.py: -------------------------------------------------------------------------------- 1 | from ..hct.cam16 import * 2 | from ..hct.hct import * 3 | from ..utils.color_utils import * 4 | from ..utils.math_utils import * 5 | 6 | # // libmonet is designed to have a consistent API across platforms 7 | # // and modular components that can be moved around easily. Using a class as a 8 | # // namespace facilitates this. 9 | # // 10 | # // tslint:disable:class-as-namespace 11 | # /** 12 | # * Functions for blending in HCT and CAM16. 13 | # */ 14 | class Blend: 15 | # /** 16 | # * Blend the design color's HCT hue towards the key color's HCT 17 | # * hue, in a way that leaves the original color recognizable and 18 | # * recognizably shifted towards the key color. 19 | # * 20 | # * @param designColor ARGB representation of an arbitrary color. 21 | # * @param sourceColor ARGB representation of the main theme color. 22 | # * @return The design color with a hue shifted towards the 23 | # * system's color, a slightly warmer/cooler variant of the design 24 | # * color's hue. 25 | # */ 26 | # Changed var differenceDegrees to differenceDegrees_v to avoid overwrite 27 | @staticmethod 28 | def harmonize(designColor, sourceColor): 29 | fromHct = Hct.fromInt(designColor) 30 | toHct = Hct.fromInt(sourceColor) 31 | differenceDegrees_v = differenceDegrees(fromHct.hue, toHct.hue) 32 | rotationDegrees = min(differenceDegrees_v * 0.5, 15.0) 33 | outputHue = sanitizeDegreesDouble(fromHct.hue + rotationDegrees * Blend.rotationDirection(fromHct.hue, toHct.hue)) 34 | return Hct.fromHct(outputHue, fromHct.chroma, fromHct.tone).toInt() 35 | 36 | # /** 37 | # * Blends hue from one color into another. The chroma and tone of 38 | # * the original color are maintained. 39 | # * 40 | # * @param from ARGB representation of color 41 | # * @param to ARGB representation of color 42 | # * @param amount how much blending to perform; 0.0 >= and <= 1.0 43 | # * @return from, with a hue blended towards to. Chroma and tone 44 | # * are constant. 45 | # */ 46 | # Changed "from" arg to "from_v", from is reserved in Python 47 | @staticmethod 48 | def hctHue(from_v, to, amount): 49 | ucs = Blend.cam16Ucs(from_v, to, amount) 50 | ucsCam = Cam16.fromInt(ucs) 51 | fromCam = Cam16.fromInt(from_v) 52 | blended = Hct.fromHct(ucsCam.hue, fromCam.chroma, lstarFromArgb(from_v)) 53 | return blended.toInt() 54 | 55 | # /** 56 | # * Blend in CAM16-UCS space. 57 | # * 58 | # * @param from ARGB representation of color 59 | # * @param to ARGB representation of color 60 | # * @param amount how much blending to perform; 0.0 >= and <= 1.0 61 | # * @return from, blended towards to. Hue, chroma, and tone will 62 | # * change. 63 | # */ 64 | # Changed "from" arg to "from_v", from is reserved in Python 65 | @staticmethod 66 | def cam16Ucs(from_v, to, amount): 67 | fromCam = Cam16.fromInt(from_v) 68 | toCam = Cam16.fromInt(to) 69 | fromJ = fromCam.jstar 70 | fromA = fromCam.astar 71 | fromB = fromCam.bstar 72 | toJ = toCam.jstar 73 | toA = toCam.astar 74 | toB = toCam.bstar 75 | jstar = fromJ + (toJ - fromJ) * amount 76 | astar = fromA + (toA - fromA) * amount 77 | bstar = fromB + (toB - fromB) * amount 78 | return Cam16.fromUcs(jstar, astar, bstar).toInt() 79 | 80 | # /** 81 | # * Sign of direction change needed to travel from one angle to 82 | # * another. 83 | # * 84 | # * @param from The angle travel starts from, in degrees. 85 | # * @param to The angle travel ends at, in degrees. 86 | # * @return -1 if decreasing from leads to the shortest travel 87 | # * distance, 1 if increasing from leads to the shortest travel 88 | # * distance. 89 | # */ 90 | # Changed "from" arg to "from_v", from is reserved in Python 91 | @staticmethod 92 | def rotationDirection(from_v, to): 93 | a = to - from_v 94 | b = to - from_v + 360.0 95 | c = to - from_v - 360.0 96 | aAbs = abs(a) 97 | bAbs = abs(b) 98 | cAbs = abs(c) 99 | if (aAbs <= bAbs and aAbs <= cAbs): 100 | return 1.0 if a >= 0.0 else -1.0 101 | elif (bAbs <= aAbs and bAbs <= cAbs): 102 | return 1.0 if b >= 0.0 else -1.0 103 | else: 104 | return 1.0 if c >= 0.0 else -1.0 105 | -------------------------------------------------------------------------------- /material_color_utilities_python/hct/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/hct/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/hct/cam16.py: -------------------------------------------------------------------------------- 1 | from ..utils.color_utils import * 2 | from ..utils.math_utils import * 3 | from ..hct.viewing_conditions import * 4 | import math 5 | 6 | # /** 7 | # * CAM16, a color appearance model. Colors are not just defined by their hex 8 | # * code, but rather, a hex code and viewing conditions. 9 | # * 10 | # * CAM16 instances also have coordinates in the CAM16-UCS space, called J*, a*, 11 | # * b*, or jstar, astar, bstar in code. CAM16-UCS is included in the CAM16 12 | # * specification, and should be used when measuring distances between colors. 13 | # * 14 | # * In traditional color spaces, a color can be identified solely by the 15 | # * observer's measurement of the color. Color appearance models such as CAM16 16 | # * also use information about the environment where the color was 17 | # * observed, known as the viewing conditions. 18 | # * 19 | # * For example, white under the traditional assumption of a midday sun white 20 | # * point is accurately measured as a slightly chromatic blue by CAM16. (roughly, 21 | # * hue 203, chroma 3, lightness 100) 22 | # */ 23 | class Cam16: 24 | # /** 25 | # * All of the CAM16 dimensions can be calculated from 3 of the dimensions, in 26 | # * the following combinations: 27 | # * - {j or q} and {c, m, or s} and hue 28 | # * - jstar, astar, bstar 29 | # * Prefer using a static method that constructs from 3 of those dimensions. 30 | # * This constructor is intended for those methods to use to return all 31 | # * possible dimensions. 32 | # * 33 | # * @param hue 34 | # * @param chroma informally, colorfulness / color intensity. like saturation 35 | # * in HSL, except perceptually accurate. 36 | # * @param j lightness 37 | # * @param q brightness ratio of lightness to white point's lightness 38 | # * @param m colorfulness 39 | # * @param s saturation ratio of chroma to white point's chroma 40 | # * @param jstar CAM16-UCS J coordinate 41 | # * @param astar CAM16-UCS a coordinate 42 | # * @param bstar CAM16-UCS b coordinate 43 | # */ 44 | def __init__(self, hue, chroma, j, q, m, s, jstar, astar, bstar): 45 | self.hue = hue 46 | self.chroma = chroma 47 | self.j = j 48 | self.q = q 49 | self.m = m 50 | self.s = s 51 | self.jstar = jstar 52 | self.astar = astar 53 | self.bstar = bstar 54 | 55 | # /** 56 | # * CAM16 instances also have coordinates in the CAM16-UCS space, called J*, 57 | # * a*, b*, or jstar, astar, bstar in code. CAM16-UCS is included in the CAM16 58 | # * specification, and is used to measure distances between colors. 59 | # */ 60 | def distance(self, other): 61 | dJ = self.jstar - other.jstar 62 | dA = self.astar - other.astar 63 | dB = self.bstar - other.bstar 64 | dEPrime = math.sqrt(dJ * dJ + dA * dA + dB * dB) 65 | dE = 1.41 * pow(dEPrime, 0.63) 66 | return dE 67 | 68 | # /** 69 | # * @param argb ARGB representation of a color. 70 | # * @return CAM16 color, assuming the color was viewed in default viewing 71 | # * conditions. 72 | # */ 73 | @staticmethod 74 | def fromInt(argb): 75 | return Cam16.fromIntInViewingConditions(argb, ViewingConditions.DEFAULT) 76 | 77 | # /** 78 | # * @param argb ARGB representation of a color. 79 | # * @param viewingConditions Information about the environment where the color 80 | # * was observed. 81 | # * @return CAM16 color. 82 | # */ 83 | @staticmethod 84 | def fromIntInViewingConditions(argb, viewingConditions): 85 | red = (argb & 0x00ff0000) >> 16 86 | green = (argb & 0x0000ff00) >> 8 87 | blue = (argb & 0x000000ff) 88 | redL = linearized(red) 89 | greenL = linearized(green) 90 | blueL = linearized(blue) 91 | x = 0.41233895 * redL + 0.35762064 * greenL + 0.18051042 * blueL 92 | y = 0.2126 * redL + 0.7152 * greenL + 0.0722 * blueL 93 | z = 0.01932141 * redL + 0.11916382 * greenL + 0.95034478 * blueL 94 | rC = 0.401288 * x + 0.650173 * y - 0.051461 * z 95 | gC = -0.250268 * x + 1.204414 * y + 0.045854 * z 96 | bC = -0.002079 * x + 0.048952 * y + 0.953127 * z 97 | rD = viewingConditions.rgbD[0] * rC 98 | gD = viewingConditions.rgbD[1] * gC 99 | bD = viewingConditions.rgbD[2] * bC 100 | rAF = pow((viewingConditions.fl * abs(rD)) / 100.0, 0.42) 101 | gAF = pow((viewingConditions.fl * abs(gD)) / 100.0, 0.42) 102 | bAF = pow((viewingConditions.fl * abs(bD)) / 100.0, 0.42) 103 | rA = (signum(rD) * 400.0 * rAF) / (rAF + 27.13) 104 | gA = (signum(gD) * 400.0 * gAF) / (gAF + 27.13) 105 | bA = (signum(bD) * 400.0 * bAF) / (bAF + 27.13) 106 | a = (11.0 * rA + -12.0 * gA + bA) / 11.0 107 | b = (rA + gA - 2.0 * bA) / 9.0 108 | u = (20.0 * rA + 20.0 * gA + 21.0 * bA) / 20.0 109 | p2 = (40.0 * rA + 20.0 * gA + bA) / 20.0 110 | atan2 = math.atan2(b, a) 111 | atanDegrees = (atan2 * 180.0) / math.pi 112 | hue = atanDegrees + 360.0 if atanDegrees < 0 else atanDegrees - 360.0 if atanDegrees >= 360 else atanDegrees 113 | hueRadians = (hue * math.pi) / 180.0 114 | ac = p2 * viewingConditions.nbb 115 | j = 100.0 * pow(ac / viewingConditions.aw, viewingConditions.c * viewingConditions.z) 116 | q = (4.0 / viewingConditions.c) * math.sqrt(j / 100.0) * (viewingConditions.aw + 4.0) * viewingConditions.fLRoot 117 | huePrime = hue + 360 if hue < 20.14 else hue 118 | eHue = 0.25 * (math.cos((huePrime * math.pi) / 180.0 + 2.0) + 3.8) 119 | p1 = (50000.0 / 13.0) * eHue * viewingConditions.nc * viewingConditions.ncb 120 | t = (p1 * math.sqrt(a * a + b * b)) / (u + 0.305) 121 | alpha = pow(t, 0.9) * pow(1.64 - pow(0.29, viewingConditions.n), 0.73) 122 | c = alpha * math.sqrt(j / 100.0) 123 | m = c * viewingConditions.fLRoot 124 | s = 50.0 * math.sqrt((alpha * viewingConditions.c) / (viewingConditions.aw + 4.0)) 125 | jstar = ((1.0 + 100.0 * 0.007) * j) / (1.0 + 0.007 * j) 126 | mstar = (1.0 / 0.0228) * math.log(1.0 + 0.0228 * m) 127 | astar = mstar * math.cos(hueRadians) 128 | bstar = mstar * math.sin(hueRadians) 129 | return Cam16(hue, c, j, q, m, s, jstar, astar, bstar) 130 | 131 | # /** 132 | # * @param j CAM16 lightness 133 | # * @param c CAM16 chroma 134 | # * @param h CAM16 hue 135 | # */ 136 | @staticmethod 137 | def fromJch(j, c, h): 138 | return Cam16.fromJchInViewingConditions(j, c, h, ViewingConditions.DEFAULT) 139 | 140 | # /** 141 | # * @param j CAM16 lightness 142 | # * @param c CAM16 chroma 143 | # * @param h CAM16 hue 144 | # * @param viewingConditions Information about the environment where the color 145 | # * was observed. 146 | # */ 147 | @staticmethod 148 | def fromJchInViewingConditions(j, c, h, viewingConditions): 149 | q = (4.0 / viewingConditions.c) * math.sqrt(j / 100.0) * (viewingConditions.aw + 4.0) * viewingConditions.fLRoot 150 | m = c * viewingConditions.fLRoot 151 | alpha = c / math.sqrt(j / 100.0) 152 | s = 50.0 * math.sqrt((alpha * viewingConditions.c) / (viewingConditions.aw + 4.0)) 153 | hueRadians = (h * math.pi) / 180.0 154 | jstar = ((1.0 + 100.0 * 0.007) * j) / (1.0 + 0.007 * j) 155 | mstar = (1.0 / 0.0228) * math.log(1.0 + 0.0228 * m) 156 | astar = mstar * math.cos(hueRadians) 157 | bstar = mstar * math.sin(hueRadians) 158 | return Cam16(h, c, j, q, m, s, jstar, astar, bstar) 159 | 160 | # /** 161 | # * @param jstar CAM16-UCS lightness. 162 | # * @param astar CAM16-UCS a dimension. Like a* in L*a*b*, it is a Cartesian 163 | # * coordinate on the Y axis. 164 | # * @param bstar CAM16-UCS b dimension. Like a* in L*a*b*, it is a Cartesian 165 | # * coordinate on the X axis. 166 | # */ 167 | @staticmethod 168 | def fromUcs(jstar, astar, bstar): 169 | return Cam16.fromUcsInViewingConditions(jstar, astar, bstar, ViewingConditions.DEFAULT) 170 | 171 | # /** 172 | # * @param jstar CAM16-UCS lightness. 173 | # * @param astar CAM16-UCS a dimension. Like a* in L*a*b*, it is a Cartesian 174 | # * coordinate on the Y axis. 175 | # * @param bstar CAM16-UCS b dimension. Like a* in L*a*b*, it is a Cartesian 176 | # * coordinate on the X axis. 177 | # * @param viewingConditions Information about the environment where the color 178 | # * was observed. 179 | # */ 180 | @staticmethod 181 | def fromUcsInViewingConditions(jstar, astar, bstar, viewingConditions): 182 | a = astar 183 | b = bstar 184 | m = math.sqrt(a * a + b * b) 185 | M = (math.exp(m * 0.0228) - 1.0) / 0.0228 186 | c = M / viewingConditions.fLRoot 187 | h = math.atan2(b, a) * (180.0 / math.pi) 188 | if (h < 0.0): 189 | h += 360.0 190 | j = jstar / (1 - (jstar - 100) * 0.007) 191 | return Cam16.fromJchInViewingConditions(j, c, h, viewingConditions) 192 | 193 | # /** 194 | # * @return ARGB representation of color, assuming the color was viewed in 195 | # * default viewing conditions, which are near-identical to the default 196 | # * viewing conditions for sRGB. 197 | # */ 198 | def toInt(self): 199 | return self.viewed(ViewingConditions.DEFAULT) 200 | 201 | # /** 202 | # * @param viewingConditions Information about the environment where the color 203 | # * will be viewed. 204 | # * @return ARGB representation of color 205 | # */ 206 | def viewed(self, viewingConditions): 207 | alpha = 0.0 if self.chroma == 0.0 or self.j == 0.0 else self.chroma / math.sqrt(self.j / 100.0) 208 | t = pow(alpha / pow(1.64 - pow(0.29, viewingConditions.n), 0.73), 1.0 / 0.9) 209 | hRad = (self.hue * math.pi) / 180.0 210 | eHue = 0.25 * (math.cos(hRad + 2.0) + 3.8) 211 | ac = viewingConditions.aw * pow(self.j / 100.0, 1.0 / viewingConditions.c / viewingConditions.z) 212 | p1 = eHue * (50000.0 / 13.0) * viewingConditions.nc * viewingConditions.ncb 213 | p2 = ac / viewingConditions.nbb 214 | hSin = math.sin(hRad) 215 | hCos = math.cos(hRad) 216 | gamma = (23.0 * (p2 + 0.305) * t) / (23.0 * p1 + 11.0 * t * hCos + 108.0 * t * hSin) 217 | a = gamma * hCos 218 | b = gamma * hSin 219 | rA = (460.0 * p2 + 451.0 * a + 288.0 * b) / 1403.0 220 | gA = (460.0 * p2 - 891.0 * a - 261.0 * b) / 1403.0 221 | bA = (460.0 * p2 - 220.0 * a - 6300.0 * b) / 1403.0 222 | rCBase = max(0, (27.13 * abs(rA)) / (400.0 - abs(rA))) 223 | rC = signum(rA) * (100.0 / viewingConditions.fl) * pow(rCBase, 1.0 / 0.42) 224 | gCBase = max(0, (27.13 * abs(gA)) / (400.0 - abs(gA))) 225 | gC = signum(gA) * (100.0 / viewingConditions.fl) * pow(gCBase, 1.0 / 0.42) 226 | bCBase = max(0, (27.13 * abs(bA)) / (400.0 - abs(bA))) 227 | bC = signum(bA) * (100.0 / viewingConditions.fl) * pow(bCBase, 1.0 / 0.42) 228 | rF = rC / viewingConditions.rgbD[0] 229 | gF = gC / viewingConditions.rgbD[1] 230 | bF = bC / viewingConditions.rgbD[2] 231 | x = 1.86206786 * rF - 1.01125463 * gF + 0.14918677 * bF 232 | y = 0.38752654 * rF + 0.62144744 * gF - 0.00897398 * bF 233 | z = -0.01584150 * rF - 0.03412294 * gF + 1.04996444 * bF 234 | argb = argbFromXyz(x, y, z) 235 | return argb 236 | -------------------------------------------------------------------------------- /material_color_utilities_python/hct/hct.py: -------------------------------------------------------------------------------- 1 | # /** 2 | # * A color system built using CAM16 hue and chroma, and L* from 3 | # * L*a*b*. 4 | # * 5 | # * Using L* creates a link between the color system, contrast, and thus 6 | # * accessibility. Contrast ratio depends on relative luminance, or Y in the XYZ 7 | # * color space. L*, or perceptual luminance can be calculated from Y. 8 | # * 9 | # * Unlike Y, L* is linear to human perception, allowing trivial creation of 10 | # * accurate color tones. 11 | # * 12 | # * Unlike contrast ratio, measuring contrast in L* is linear, and simple to 13 | # * calculate. A difference of 40 in HCT tone guarantees a contrast ratio >= 3.0, 14 | # * and a difference of 50 guarantees a contrast ratio >= 4.5. 15 | # */ 16 | from ..utils.color_utils import * 17 | from ..utils.math_utils import * 18 | from ..hct.cam16 import * 19 | from ..hct.viewing_conditions import * 20 | 21 | # /** 22 | # * When the delta between the floor & ceiling of a binary search for maximum 23 | # * chroma at a hue and tone is less than this, the binary search terminates. 24 | # */ 25 | CHROMA_SEARCH_ENDPOINT = 0.4 26 | 27 | # /** 28 | # * The maximum color distance, in CAM16-UCS, between a requested color and the 29 | # * color returned. 30 | # */ 31 | DE_MAX = 1.0 32 | 33 | # /** The maximum difference between the requested L* and the L* returned. */ 34 | DL_MAX = 0.2 35 | 36 | # /** 37 | # * When the delta between the floor & ceiling of a binary search for J, 38 | # * lightness in CAM16, is less than this, the binary search terminates. 39 | # */ 40 | LIGHTNESS_SEARCH_ENDPOINT = 0.01 41 | 42 | # /** 43 | # * @param hue CAM16 hue 44 | # * @param chroma CAM16 chroma 45 | # * @param tone L*a*b* lightness 46 | # * @return CAM16 instance within error tolerance of the provided dimensions, 47 | # * or null. 48 | # */ 49 | def findCamByJ(hue, chroma, tone): 50 | low = 0.0 51 | high = 100.0 52 | mid = 0.0 53 | bestdL = 1000.0 54 | bestdE = 1000.0 55 | bestCam = None 56 | while (abs(low - high) > LIGHTNESS_SEARCH_ENDPOINT): 57 | mid = low + (high - low) / 2 58 | camBeforeClip = Cam16.fromJch(mid, chroma, hue) 59 | clipped = camBeforeClip.toInt() 60 | clippedLstar = lstarFromArgb(clipped) 61 | dL = abs(tone - clippedLstar) 62 | if (dL < DL_MAX): 63 | camClipped = Cam16.fromInt(clipped) 64 | dE = camClipped.distance(Cam16.fromJch(camClipped.j, camClipped.chroma, hue)) 65 | if (dE <= DE_MAX and dE <= bestdE): 66 | bestdL = dL 67 | bestdE = dE 68 | bestCam = camClipped 69 | if (bestdL == 0 and bestdE == 0): 70 | break 71 | if (clippedLstar < tone): 72 | low = mid 73 | else: 74 | high = mid 75 | return bestCam 76 | 77 | # /** 78 | # * @param hue CAM16 hue. 79 | # * @param chroma CAM16 chroma. 80 | # * @param tone L*a*b* lightness. 81 | # * @param viewingConditions Information about the environment where the color 82 | # * was observed. 83 | # */ 84 | def getIntInViewingConditions(hue, chroma, tone, viewingConditions): 85 | if (chroma < 1.0 or round(tone) <= 0.0 or round(tone) >= 100.0): 86 | return argbFromLstar(tone) 87 | 88 | hue = sanitizeDegreesDouble(hue) 89 | high = chroma 90 | mid = chroma 91 | low = 0.0 92 | isFirstLoop = True 93 | answer = None 94 | while (abs(low - high) >= CHROMA_SEARCH_ENDPOINT): 95 | possibleAnswer = findCamByJ(hue, mid, tone) 96 | if (isFirstLoop): 97 | if (possibleAnswer != None): 98 | return possibleAnswer.viewed(viewingConditions) 99 | else: 100 | isFirstLoop = False 101 | mid = low + (high - low) / 2.0 102 | continue 103 | if (possibleAnswer == None): 104 | high = mid 105 | else: 106 | answer = possibleAnswer 107 | low = mid 108 | mid = low + (high - low) / 2.0 109 | if (answer == None): 110 | return argbFromLstar(tone) 111 | return answer.viewed(viewingConditions) 112 | 113 | # /** 114 | # * @param hue a number, in degrees, representing ex. red, orange, yellow, etc. 115 | # * Ranges from 0 <= hue < 360. 116 | # * @param chroma Informally, colorfulness. Ranges from 0 to roughly 150. 117 | # * Like all perceptually accurate color systems, chroma has a different 118 | # * maximum for any given hue and tone, so the color returned may be lower 119 | # * than the requested chroma. 120 | # * @param tone Lightness. Ranges from 0 to 100. 121 | # * @return ARGB representation of a color in default viewing conditions 122 | # */ 123 | def getInt(hue, chroma, tone): 124 | return getIntInViewingConditions(sanitizeDegreesDouble(hue), chroma, clampDouble(0.0, 100.0, tone), ViewingConditions.DEFAULT) 125 | 126 | # /** 127 | # * HCT, hue, chroma, and tone. A color system that provides a perceptually 128 | # * accurate color measurement system that can also accurately render what colors 129 | # * will appear as in different lighting environments. 130 | # */ 131 | class Hct: 132 | def __init__(self, internalHue, internalChroma, internalTone): 133 | self.internalHue = internalHue 134 | self.internalChroma = internalChroma 135 | self.internalTone = internalTone 136 | self.setInternalState(self.toInt()) 137 | 138 | # /** 139 | # * @param hue 0 <= hue < 360; invalid values are corrected. 140 | # * @param chroma 0 <= chroma < ?; Informally, colorfulness. The color 141 | # * returned may be lower than the requested chroma. Chroma has a different 142 | # * maximum for any given hue and tone. 143 | # * @param tone 0 <= tone <= 100; invalid values are corrected. 144 | # * @return HCT representation of a color in default viewing conditions. 145 | # */ 146 | # Function renamed from "from" to "fromHct", from is reserved in Python 147 | @staticmethod 148 | def fromHct(hue, chroma, tone): 149 | return Hct(hue, chroma, tone) 150 | 151 | # /** 152 | # * @param argb ARGB representation of a color. 153 | # * @return HCT representation of a color in default viewing conditions 154 | # */ 155 | @staticmethod 156 | def fromInt(argb): 157 | cam = Cam16.fromInt(argb) 158 | tone = lstarFromArgb(argb) 159 | return Hct(cam.hue, cam.chroma, tone) 160 | 161 | def toInt(self): 162 | return getInt(self.internalHue, self.internalChroma, self.internalTone) 163 | 164 | # /** 165 | # * A number, in degrees, representing ex. red, orange, yellow, etc. 166 | # * Ranges from 0 <= hue < 360. 167 | # */ 168 | def get_hue(self): 169 | return self.internalHue 170 | 171 | # /** 172 | # * @param newHue 0 <= newHue < 360; invalid values are corrected. 173 | # * Chroma may decrease because chroma has a different maximum for any given 174 | # * hue and tone. 175 | # */ 176 | def set_hue(self, newHue): 177 | self.setInternalState(getInt(sanitizeDegreesDouble(newHue), self.internalChroma, self.internalTone)) 178 | 179 | def get_chroma(self): 180 | return self.internalChroma 181 | 182 | # /** 183 | # * @param newChroma 0 <= newChroma < ? 184 | # * Chroma may decrease because chroma has a different maximum for any given 185 | # * hue and tone. 186 | # */ 187 | def set_chroma(self, newChroma): 188 | self.setInternalState(getInt(self.internalHue, newChroma, self.internalTone)) 189 | 190 | # /** Lightness. Ranges from 0 to 100. */ 191 | def get_tone(self): 192 | return self.internalTone 193 | 194 | # /** 195 | # * @param newTone 0 <= newTone <= 100; invalid valids are corrected. 196 | # * Chroma may decrease because chroma has a different maximum for any given 197 | # * hue and tone. 198 | # */ 199 | def set_tone(self, newTone): 200 | self.setInternalState(getInt(self.internalHue, self.internalChroma, newTone)) 201 | 202 | def setInternalState(self, argb): 203 | cam = Cam16.fromInt(argb) 204 | tone = lstarFromArgb(argb) 205 | self.internalHue = cam.hue 206 | self.internalChroma = cam.chroma 207 | self.internalTone = tone 208 | 209 | # Adding properties for getters and setters 210 | hue = property(get_hue, set_hue) 211 | chroma = property(get_chroma, set_chroma) 212 | tone = property(get_tone, set_tone) 213 | -------------------------------------------------------------------------------- /material_color_utilities_python/hct/viewing_conditions.py: -------------------------------------------------------------------------------- 1 | from ..utils.color_utils import * 2 | from ..utils.math_utils import * 3 | import math 4 | 5 | # /** 6 | # * In traditional color spaces, a color can be identified solely by the 7 | # * observer's measurement of the color. Color appearance models such as CAM16 8 | # * also use information about the environment where the color was 9 | # * observed, known as the viewing conditions. 10 | # * 11 | # * For example, white under the traditional assumption of a midday sun white 12 | # * point is accurately measured as a slightly chromatic blue by CAM16. (roughly, 13 | # * hue 203, chroma 3, lightness 100) 14 | # * 15 | # * This class caches intermediate values of the CAM16 conversion process that 16 | # * depend only on viewing conditions, enabling speed ups. 17 | # */ 18 | class ViewingConditions: 19 | # /** 20 | # * Parameters are intermediate values of the CAM16 conversion process. Their 21 | # * names are shorthand for technical color science terminology, this class 22 | # * would not benefit from documenting them individually. A brief overview 23 | # * is available in the CAM16 specification, and a complete overview requires 24 | # * a color science textbook, such as Fairchild's Color Appearance Models. 25 | # */ 26 | def __init__(self, n, aw, nbb, ncb, c, nc, rgbD, fl, fLRoot, z): 27 | self.n = n 28 | self.aw = aw 29 | self.nbb = nbb 30 | self.ncb = ncb 31 | self.c = c 32 | self.nc = nc 33 | self.rgbD = rgbD 34 | self.fl = fl 35 | self.fLRoot = fLRoot 36 | self.z = z 37 | 38 | # /** 39 | # * Create ViewingConditions from a simple, physically relevant, set of 40 | # * parameters. 41 | # * 42 | # * @param whitePoint White point, measured in the XYZ color space. 43 | # * default = D65, or sunny day afternoon 44 | # * @param adaptingLuminance The luminance of the adapting field. Informally, 45 | # * how bright it is in the room where the color is viewed. Can be 46 | # * calculated from lux by multiplying lux by 0.0586. default = 11.72, 47 | # * or 200 lux. 48 | # * @param backgroundLstar The lightness of the area surrounding the color. 49 | # * measured by L* in L*a*b*. default = 50.0 50 | # * @param surround A general description of the lighting surrounding the 51 | # * color. 0 is pitch dark, like watching a movie in a theater. 1.0 is a 52 | # * dimly light room, like watching TV at home at night. 2.0 means there 53 | # * is no difference between the lighting on the color and around it. 54 | # * default = 2.0 55 | # * @param discountingIlluminant Whether the eye accounts for the tint of the 56 | # * ambient lighting, such as knowing an apple is still red in green light. 57 | # * default = false, the eye does not perform this process on 58 | # * self-luminous objects like displays. 59 | # */ 60 | @staticmethod 61 | def make(whitePoint = whitePointD65(), adaptingLuminance = (200.0 / math.pi) * yFromLstar(50.0) / 100.0, backgroundLstar = 50.0, surround = 2.0, discountingIlluminant = False): 62 | xyz = whitePoint 63 | rW = xyz[0] * 0.401288 + xyz[1] * 0.650173 + xyz[2] * -0.051461 64 | gW = xyz[0] * -0.250268 + xyz[1] * 1.204414 + xyz[2] * 0.045854 65 | bW = xyz[0] * -0.002079 + xyz[1] * 0.048952 + xyz[2] * 0.953127 66 | f = 0.8 + surround / 10.0 67 | c = lerp(0.59, 0.69, (f - 0.9) * 10.0) if f >= 0.9 else lerp(0.525, 0.59, (f - 0.8) * 10.0) 68 | d = 1.0 if discountingIlluminant else f * (1.0 - (1.0 / 3.6) * math.exp((-adaptingLuminance - 42.0) / 92.0)) 69 | d = 1.0 if d > 1.0 else 0.0 if d < 0.0 else d 70 | nc = f 71 | rgbD = [ 72 | d * (100.0 / rW) + 1.0 - d, 73 | d * (100.0 / gW) + 1.0 - d, 74 | d * (100.0 / bW) + 1.0 - d, 75 | ] 76 | k = 1.0 / (5.0 * adaptingLuminance + 1.0) 77 | k4 = k * k * k * k 78 | k4F = 1.0 - k4 79 | fl = k4 * adaptingLuminance + 0.1 * k4F * k4F * ((5.0 * adaptingLuminance)**(1. / 3)) 80 | n = yFromLstar(backgroundLstar) / whitePoint[1] 81 | z = 1.48 + math.sqrt(n) 82 | nbb = 0.725 / pow(n, 0.2) 83 | ncb = nbb 84 | rgbAFactors = [ 85 | pow((fl * rgbD[0] * rW) / 100.0, 0.42), 86 | pow((fl * rgbD[1] * gW) / 100.0, 0.42), 87 | pow((fl * rgbD[2] * bW) / 100.0, 0.42), 88 | ] 89 | rgbA = [ 90 | (400.0 * rgbAFactors[0]) / (rgbAFactors[0] + 27.13), 91 | (400.0 * rgbAFactors[1]) / (rgbAFactors[1] + 27.13), 92 | (400.0 * rgbAFactors[2]) / (rgbAFactors[2] + 27.13), 93 | ] 94 | aw = (2.0 * rgbA[0] + rgbA[1] + 0.05 * rgbA[2]) * nbb 95 | return ViewingConditions(n, aw, nbb, ncb, c, nc, rgbD, fl, pow(fl, 0.25), z) 96 | # /** sRGB-like viewing conditions. */ 97 | ViewingConditions.DEFAULT = ViewingConditions.make() -------------------------------------------------------------------------------- /material_color_utilities_python/palettes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/palettes/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/palettes/core_palette.py: -------------------------------------------------------------------------------- 1 | from ..hct.hct import * 2 | from ..palettes.tonal_palette import * 3 | 4 | # /** 5 | # * An intermediate concept between the key color for a UI theme, and a full 6 | # * color scheme. 5 sets of tones are generated, all except one use the same hue 7 | # * as the key color, and all vary in chroma. 8 | # */ 9 | class CorePalette: 10 | def __init__(self, argb): 11 | hct = Hct.fromInt(argb) 12 | hue = hct.hue 13 | self.a1 = TonalPalette.fromHueAndChroma(hue, max(48, hct.chroma)) 14 | self.a2 = TonalPalette.fromHueAndChroma(hue, 16) 15 | self.a3 = TonalPalette.fromHueAndChroma(hue + 60, 24) 16 | self.n1 = TonalPalette.fromHueAndChroma(hue, 4) 17 | self.n2 = TonalPalette.fromHueAndChroma(hue, 8) 18 | self.error = TonalPalette.fromHueAndChroma(25, 84) 19 | 20 | # /** 21 | # * @param argb ARGB representation of a color 22 | # */ 23 | @staticmethod 24 | def of(argb): 25 | return CorePalette(argb); 26 | -------------------------------------------------------------------------------- /material_color_utilities_python/palettes/tonal_palette.py: -------------------------------------------------------------------------------- 1 | from ..hct.hct import * 2 | from collections import OrderedDict 3 | 4 | # /** 5 | # * A convenience class for retrieving colors that are constant in hue and 6 | # * chroma, but vary in tone. 7 | # */ 8 | class TonalPalette: 9 | # Using OrderedDict() as replacement for Map() 10 | def __init__(self, hue, chroma): 11 | self.hue = hue 12 | self.chroma = chroma 13 | self.cache = OrderedDict() 14 | 15 | # /** 16 | # * @param argb ARGB representation of a color 17 | # * @return Tones matching that color's hue and chroma. 18 | # */ 19 | @staticmethod 20 | def fromInt(argb): 21 | hct = Hct.fromInt(argb) 22 | return TonalPalette.fromHueAndChroma(hct.hue, hct.chroma) 23 | 24 | # /** 25 | # * @param hue HCT hue 26 | # * @param chroma HCT chroma 27 | # * @return Tones matching hue and chroma. 28 | # */ 29 | @staticmethod 30 | def fromHueAndChroma(hue, chroma): 31 | return TonalPalette(hue, chroma) 32 | 33 | # /** 34 | # * @param tone HCT tone, measured from 0 to 100. 35 | # * @return ARGB representation of a color with that tone. 36 | # */ 37 | def tone(self, tone): 38 | argb = None 39 | if (tone not in self.cache.keys()): 40 | argb = Hct.fromHct(self.hue, self.chroma, tone).toInt() 41 | self.cache[tone] = argb 42 | else: 43 | argb = self.cache[tone] 44 | return argb 45 | -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/quantize/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/lab_point_provider.py: -------------------------------------------------------------------------------- 1 | from ..utils.color_utils import * 2 | 3 | # /** 4 | # * Provides conversions needed for K-Means quantization. Converting input to 5 | # * points, and converting the final state of the K-Means algorithm to colors. 6 | # */ 7 | class LabPointProvider: 8 | # /** 9 | # * Convert a color represented in ARGB to a 3-element array of L*a*b* 10 | # * coordinates of the color. 11 | # */ 12 | def fromInt(self, argb): 13 | return labFromArgb(argb) 14 | 15 | # /** 16 | # * Convert a 3-element array to a color represented in ARGB. 17 | # */ 18 | def toInt(self, point): 19 | return argbFromLab(point[0], point[1], point[2]) 20 | 21 | # /** 22 | # * Standard CIE 1976 delta E formula also takes the square root, unneeded 23 | # * here. This method is used by quantization algorithms to compare distance, 24 | # * and the relative ordering is the same, with or without a square root. 25 | # * 26 | # * This relatively minor optimization is helpful because this method is 27 | # * called at least once for each pixel in an image. 28 | # */ 29 | # Renamed "from" to "from_v", from is reserved in Python 30 | def distance(self, from_v, to): 31 | dL = from_v[0] - to[0] 32 | dA = from_v[1] - to[1] 33 | dB = from_v[2] - to[2] 34 | return dL * dL + dA * dA + dB * dB 35 | -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/quantizer_celebi.py: -------------------------------------------------------------------------------- 1 | from ..quantize.quantizer_wsmeans import * 2 | from ..quantize.quantizer_wu import * 3 | 4 | # /** 5 | # * An image quantizer that improves on the quality of a standard K-Means 6 | # * algorithm by setting the K-Means initial state to the output of a Wu 7 | # * quantizer, instead of random centroids. Improves on speed by several 8 | # * optimizations, as implemented in Wsmeans, or Weighted Square Means, K-Means 9 | # * with those optimizations. 10 | # * 11 | # * This algorithm was designed by M. Emre Celebi, and was found in their 2011 12 | # * paper, Improving the Performance of K-Means for Color Quantization. 13 | # * https://arxiv.org/abs/1101.0395 14 | # */ 15 | # // libmonet is designed to have a consistent API across platforms 16 | # // and modular components that can be moved around easily. Using a class as a 17 | # // namespace facilitates this. 18 | # // 19 | # // tslint:disable-next-line:class-as-namespace 20 | class QuantizerCelebi: 21 | # /** 22 | # * @param pixels Colors in ARGB format. 23 | # * @param maxColors The number of colors to divide the image into. A lower 24 | # * number of colors may be returned. 25 | # * @return Map with keys of colors in ARGB format, and values of number of 26 | # * pixels in the original image that correspond to the color in the 27 | # * quantized image. 28 | # */ 29 | @staticmethod 30 | def quantize(pixels, maxColors): 31 | wu = QuantizerWu() 32 | wuResult = wu.quantize(pixels, maxColors) 33 | return QuantizerWsmeans.quantize(pixels, wuResult, maxColors) 34 | -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/quantizer_map.py: -------------------------------------------------------------------------------- 1 | from ..utils.color_utils import * 2 | from collections import OrderedDict 3 | 4 | # /** 5 | # * Quantizes an image into a map, with keys of ARGB colors, and values of the 6 | # * number of times that color appears in the image. 7 | # */ 8 | # // libmonet is designed to have a consistent API across platforms 9 | # // and modular components that can be moved around easily. Using a class as a 10 | # // namespace facilitates this. 11 | # // 12 | # // tslint:disable-next-line:class-as-namespace 13 | class QuantizerMap: 14 | # /** 15 | # * @param pixels Colors in ARGB format. 16 | # * @return A Map with keys of ARGB colors, and values of the number of times 17 | # * the color appears in the image. 18 | # */ 19 | @staticmethod 20 | def quantize(pixels): 21 | countByColor = OrderedDict() 22 | for i in range(len(pixels)): 23 | pixel = pixels[i] 24 | alpha = alphaFromArgb(pixel) 25 | if (alpha < 255): 26 | continue 27 | countByColor[pixel] = (countByColor[pixel] if pixel in countByColor.keys() else 0) + 1 28 | return countByColor 29 | -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/quantizer_wsmeans.py: -------------------------------------------------------------------------------- 1 | from ..quantize.lab_point_provider import * 2 | from collections import OrderedDict 3 | import random 4 | import math 5 | 6 | MAX_ITERATIONS = 10 7 | MIN_MOVEMENT_DISTANCE = 3.0 8 | 9 | # /** 10 | # * An image quantizer that improves on the speed of a standard K-Means algorithm 11 | # * by implementing several optimizations, including deduping identical pixels 12 | # * and a triangle inequality rule that reduces the number of comparisons needed 13 | # * to identify which cluster a point should be moved to. 14 | # * 15 | # * Wsmeans stands for Weighted Square Means. 16 | # * 17 | # * This algorithm was designed by M. Emre Celebi, and was found in their 2011 18 | # * paper, Improving the Performance of K-Means for Color Quantization. 19 | # * https://arxiv.org/abs/1101.0395 20 | # */ 21 | # // libmonet is designed to have a consistent API across platforms 22 | # // and modular components that can be moved around easily. Using a class as a 23 | # // namespace facilitates this. 24 | # // 25 | # // tslint:disable-next-line:class-as-namespace 26 | class QuantizerWsmeans: 27 | # /** 28 | # * @param inputPixels Colors in ARGB format. 29 | # * @param startingClusters Defines the initial state of the quantizer. Passing 30 | # * an empty array is fine, the implementation will create its own initial 31 | # * state that leads to reproducible results for the same inputs. 32 | # * Passing an array that is the result of Wu quantization leads to higher 33 | # * quality results. 34 | # * @param maxColors The number of colors to divide the image into. A lower 35 | # * number of colors may be returned. 36 | # * @return Colors in ARGB format. 37 | # */ 38 | # Replacing Map() with OrderedDict() 39 | @staticmethod 40 | def quantize(inputPixels, startingClusters, maxColors): 41 | random.seed(69) 42 | pixelToCount = OrderedDict() 43 | points = [] 44 | pixels = [] 45 | pointProvider = LabPointProvider() 46 | pointCount = 0 47 | for i in range(len(inputPixels)): 48 | inputPixel = inputPixels[i] 49 | if (inputPixel not in pixelToCount.keys()): 50 | pointCount += 1 51 | points.append(pointProvider.fromInt(inputPixel)) 52 | pixels.append(inputPixel) 53 | pixelToCount[inputPixel] = 1 54 | else: 55 | pixelToCount[inputPixel] = pixelToCount[inputPixel] + 1 56 | counts = [] 57 | for i in range(pointCount): 58 | pixel = pixels[i] 59 | if (pixel in pixelToCount.keys()): 60 | # counts[i] = pixelToCount[pixel] 61 | counts.append(pixelToCount[pixel]) 62 | clusterCount = min(maxColors, pointCount) 63 | if (len(startingClusters) > 0): 64 | clusterCount = min(clusterCount, len(startingClusters)) 65 | clusters = [] 66 | for i in range(len(startingClusters)): 67 | clusters.append(pointProvider.fromInt(startingClusters[i])) 68 | additionalClustersNeeded = clusterCount - len(clusters) 69 | if (len(startingClusters) == 0 and additionalClustersNeeded > 0): 70 | for i in range(additionalClustersNeeded): 71 | l = random.uniform(0, 1) * 100.0 72 | a = random.uniform(0, 1) * (100.0 - (-100.0) + 1) + -100 73 | b = random.uniform(0, 1) * (100.0 - (-100.0) + 1) + -100 74 | clusters.append([l, a, b]) 75 | clusterIndices = [] 76 | for i in range(pointCount): 77 | clusterIndices.append(math.floor(random.uniform(0, 1) * clusterCount)) 78 | indexMatrix = [] 79 | for i in range(clusterCount): 80 | indexMatrix.append([]) 81 | for j in range(clusterCount): 82 | indexMatrix[i].append(0) 83 | distanceToIndexMatrix = [] 84 | for i in range(clusterCount): 85 | distanceToIndexMatrix.append([]) 86 | for j in range(clusterCount): 87 | distanceToIndexMatrix[i].append(DistanceAndIndex()) 88 | pixelCountSums = [] 89 | for i in range(clusterCount): 90 | pixelCountSums.append(0) 91 | for iteration in range(MAX_ITERATIONS): 92 | for i in range(clusterCount): 93 | for j in range(i + 1, clusterCount): 94 | distance = pointProvider.distance(clusters[i], clusters[j]) 95 | distanceToIndexMatrix[j][i].distance = distance 96 | distanceToIndexMatrix[j][i].index = i 97 | distanceToIndexMatrix[i][j].distance = distance 98 | distanceToIndexMatrix[i][j].index = j 99 | # This sort here doesn't seem to do anything because arr of objects 100 | # leaving just in case though 101 | # distanceToIndexMatrix[i].sort() 102 | for j in range(clusterCount): 103 | indexMatrix[i][j] = distanceToIndexMatrix[i][j].index 104 | pointsMoved = 0 105 | for i in range(pointCount): 106 | point = points[i] 107 | previousClusterIndex = clusterIndices[i] 108 | previousCluster = clusters[previousClusterIndex] 109 | previousDistance = pointProvider.distance(point, previousCluster) 110 | minimumDistance = previousDistance 111 | newClusterIndex = -1 112 | for j in range(clusterCount): 113 | if (distanceToIndexMatrix[previousClusterIndex][j].distance >= 4 * previousDistance): 114 | continue 115 | distance = pointProvider.distance(point, clusters[j]) 116 | if (distance < minimumDistance): 117 | minimumDistance = distance 118 | newClusterIndex = j 119 | if (newClusterIndex != -1): 120 | distanceChange = abs((math.sqrt(minimumDistance) - math.sqrt(previousDistance))) 121 | if (distanceChange > MIN_MOVEMENT_DISTANCE): 122 | pointsMoved += 1 123 | clusterIndices[i] = newClusterIndex 124 | if (pointsMoved == 0 and iteration != 0): 125 | break 126 | componentASums = [0] * clusterCount 127 | componentBSums = [0] * clusterCount 128 | componentCSums = [0] * clusterCount 129 | for i in range(clusterCount): 130 | pixelCountSums[i] = 0 131 | for i in range(pointCount): 132 | clusterIndex = clusterIndices[i] 133 | point = points[i] 134 | count = counts[i] 135 | pixelCountSums[clusterIndex] += count 136 | componentASums[clusterIndex] += (point[0] * count) 137 | componentBSums[clusterIndex] += (point[1] * count) 138 | componentCSums[clusterIndex] += (point[2] * count) 139 | for i in range(clusterCount): 140 | count = pixelCountSums[i] 141 | if (count == 0): 142 | clusters[i] = [0.0, 0.0, 0.0] 143 | continue 144 | a = componentASums[i] / count 145 | b = componentBSums[i] / count 146 | c = componentCSums[i] / count 147 | clusters[i] = [a, b, c] 148 | argbToPopulation = OrderedDict() 149 | for i in range(clusterCount): 150 | count = pixelCountSums[i] 151 | if (count == 0): 152 | continue 153 | possibleNewCluster = pointProvider.toInt(clusters[i]) 154 | if (possibleNewCluster in argbToPopulation.keys()): 155 | continue 156 | argbToPopulation[possibleNewCluster] = count 157 | return argbToPopulation 158 | 159 | # /** 160 | # * A wrapper for maintaining a table of distances between K-Means clusters. 161 | # */ 162 | class DistanceAndIndex: 163 | def __init__(self): 164 | self.distance = -1 165 | self.index = -1 166 | -------------------------------------------------------------------------------- /material_color_utilities_python/quantize/quantizer_wu.py: -------------------------------------------------------------------------------- 1 | from ..utils.color_utils import * 2 | from ..quantize.quantizer_map import * 3 | 4 | INDEX_BITS = 5 5 | SIDE_LENGTH = 33 # ((1 << INDEX_INDEX_BITS) + 1) 6 | TOTAL_SIZE = 35937 # SIDE_LENGTH * SIDE_LENGTH * SIDE_LENGTH 7 | directions = { 8 | "RED" : 'red', 9 | "GREEN" : 'green', 10 | "BLUE": 'blue', 11 | } 12 | 13 | # /** 14 | # * An image quantizer that divides the image's pixels into clusters by 15 | # * recursively cutting an RGB cube, based on the weight of pixels in each area 16 | # * of the cube. 17 | # * 18 | # * The algorithm was described by Xiaolin Wu in Graphic Gems II, published in 19 | # * 1991. 20 | # */ 21 | class QuantizerWu: 22 | def __init__(self, weights = [], momentsR = [], momentsG = [], momentsB = [], moments = [], cubes = []): 23 | self.weights = weights 24 | self.momentsR = momentsR 25 | self.momentsG = momentsG 26 | self.momentsB = momentsB 27 | self.moments = moments 28 | self.cubes = cubes 29 | 30 | # /** 31 | # * @param pixels Colors in ARGB format. 32 | # * @param maxColors The number of colors to divide the image into. A lower 33 | # * number of colors may be returned. 34 | # * @return Colors in ARGB format. 35 | # */ 36 | def quantize(self, pixels, maxColors): 37 | self.constructHistogram(pixels) 38 | self.computeMoments() 39 | createBoxesResult = self.createBoxes(maxColors) 40 | results = self.createResult(createBoxesResult.resultCount) 41 | return results 42 | 43 | def constructHistogram(self, pixels): 44 | _a = None 45 | self.weights = [0] * TOTAL_SIZE 46 | self.momentsR = [0] * TOTAL_SIZE 47 | self.momentsG = [0] * TOTAL_SIZE 48 | self.momentsB = [0] * TOTAL_SIZE 49 | self.moments = [0] * TOTAL_SIZE 50 | countByColor = QuantizerMap.quantize(pixels) 51 | for (pixel, count) in countByColor.items(): 52 | red = redFromArgb(pixel) 53 | green = greenFromArgb(pixel) 54 | blue = blueFromArgb(pixel) 55 | bitsToRemove = 8 - INDEX_BITS 56 | iR = (red >> bitsToRemove) + 1 57 | iG = (green >> bitsToRemove) + 1 58 | iB = (blue >> bitsToRemove) + 1 59 | index = self.getIndex(iR, iG, iB) 60 | self.weights[index] = (self.weights[index] if len(self.weights) > index else 0) + count 61 | self.momentsR[index] += count * red 62 | self.momentsG[index] += count * green 63 | self.momentsB[index] += count * blue 64 | self.moments[index] += count * (red * red + green * green + blue * blue) 65 | 66 | def computeMoments(self): 67 | for r in range(1, SIDE_LENGTH): 68 | area = [0] * SIDE_LENGTH 69 | areaR = [0] * SIDE_LENGTH 70 | areaG = [0] * SIDE_LENGTH 71 | areaB = [0] * SIDE_LENGTH 72 | area2 = [0.0] * SIDE_LENGTH 73 | for g in range(1, SIDE_LENGTH): 74 | line = 0 75 | lineR = 0 76 | lineG = 0 77 | lineB = 0 78 | line2 = 0.0 79 | for b in range(1, SIDE_LENGTH): 80 | index = self.getIndex(r, g, b) 81 | line += self.weights[index] 82 | lineR += self.momentsR[index] 83 | lineG += self.momentsG[index] 84 | lineB += self.momentsB[index] 85 | line2 += self.moments[index] 86 | area[b] += line 87 | areaR[b] += lineR 88 | areaG[b] += lineG 89 | areaB[b] += lineB 90 | area2[b] += line2 91 | previousIndex = self.getIndex(r - 1, g, b) 92 | self.weights[index] = self.weights[previousIndex] + area[b] 93 | self.momentsR[index] = self.momentsR[previousIndex] + areaR[b] 94 | self.momentsG[index] = self.momentsG[previousIndex] + areaG[b] 95 | self.momentsB[index] = self.momentsB[previousIndex] + areaB[b] 96 | self.moments[index] = self.moments[previousIndex] + area2[b] 97 | 98 | def createBoxes(self, maxColors): 99 | self.cubes = [Box() for x in [0] * maxColors] 100 | volumeVariance = [0.0] * maxColors 101 | self.cubes[0].r0 = 0 102 | self.cubes[0].g0 = 0 103 | self.cubes[0].b0 = 0 104 | self.cubes[0].r1 = SIDE_LENGTH - 1 105 | self.cubes[0].g1 = SIDE_LENGTH - 1 106 | self.cubes[0].b1 = SIDE_LENGTH - 1 107 | generatedColorCount = maxColors 108 | next = 0 109 | for i in range(1, maxColors): 110 | if (self.cut(self.cubes[next], self.cubes[i])): 111 | volumeVariance[next] = self.variance(self.cubes[next]) if self.cubes[next].vol > 1 else 0.0 112 | volumeVariance[i] = self.variance(self.cubes[i]) if self.cubes[i].vol > 1 else 0.0 113 | else: 114 | volumeVariance[next] = 0.0 115 | i -= 1 116 | next = 0 117 | temp = volumeVariance[0] 118 | for j in range(1, i): 119 | if (volumeVariance[j] > temp): 120 | temp = volumeVariance[j] 121 | next = j 122 | if (temp <= 0.0): 123 | generatedColorCount = i + 1 124 | break 125 | return CreateBoxesResult(maxColors, generatedColorCount) 126 | 127 | def createResult(self, colorCount): 128 | colors = [] 129 | for i in range(colorCount): 130 | cube = self.cubes[i] 131 | weight = self.volume(cube, self.weights) 132 | if (weight > 0): 133 | r = round(self.volume(cube, self.momentsR) / weight) 134 | g = round(self.volume(cube, self.momentsG) / weight) 135 | b = round(self.volume(cube, self.momentsB) / weight) 136 | color = (255 << 24) | ((r & 0x0ff) << 16) | ((g & 0x0ff) << 8) | (b & 0x0ff) 137 | colors.append(color) 138 | return colors 139 | 140 | def variance(self, cube): 141 | dr = self.volume(cube, self.momentsR) 142 | dg = self.volume(cube, self.momentsG) 143 | db = self.volume(cube, self.momentsB) 144 | xx = self.moments[self.getIndex(cube.r1, cube.g1, cube.b1)] - self.moments[self.getIndex(cube.r1, cube.g1, cube.b0)] - self.moments[self.getIndex(cube.r1, cube.g0, cube.b1)] + self.moments[self.getIndex(cube.r1, cube.g0, cube.b0)] - self.moments[self.getIndex(cube.r0, cube.g1, cube.b1)] + self.moments[self.getIndex(cube.r0, cube.g1, cube.b0)] + self.moments[self.getIndex(cube.r0, cube.g0, cube.b1)] - self.moments[self.getIndex(cube.r0, cube.g0, cube.b0)] 145 | hypotenuse = dr * dr + dg * dg + db * db 146 | volume = self.volume(cube, self.weights) 147 | return xx - hypotenuse / volume 148 | 149 | def cut(self, one, two): 150 | wholeR = self.volume(one, self.momentsR) 151 | wholeG = self.volume(one, self.momentsG) 152 | wholeB = self.volume(one, self.momentsB) 153 | wholeW = self.volume(one, self.weights) 154 | maxRResult = self.maximize(one, directions["RED"], one.r0 + 1, one.r1, wholeR, wholeG, wholeB, wholeW) 155 | maxGResult = self.maximize(one, directions["GREEN"], one.g0 + 1, one.g1, wholeR, wholeG, wholeB, wholeW) 156 | maxBResult = self.maximize(one, directions["BLUE"], one.b0 + 1, one.b1, wholeR, wholeG, wholeB, wholeW) 157 | direction = None 158 | maxR = maxRResult.maximum 159 | maxG = maxGResult.maximum 160 | maxB = maxBResult.maximum 161 | if (maxR >= maxG and maxR >= maxB): 162 | if (maxRResult.cutLocation < 0): 163 | return False 164 | direction = directions["RED"] 165 | elif (maxG >= maxR and maxG >= maxB): 166 | direction = directions["GREEN"] 167 | else: 168 | direction = directions["BLUE"] 169 | two.r1 = one.r1 170 | two.g1 = one.g1 171 | two.b1 = one.b1 172 | 173 | if (direction == directions["RED"]): 174 | one.r1 = maxRResult.cutLocation 175 | two.r0 = one.r1 176 | two.g0 = one.g0 177 | two.b0 = one.b0 178 | elif (direction == directions["GREEN"]): 179 | one.g1 = maxGResult.cutLocation 180 | two.r0 = one.r0 181 | two.g0 = one.g1 182 | two.b0 = one.b0 183 | elif (direction == directions["BLUE"]): 184 | one.b1 = maxBResult.cutLocation 185 | two.r0 = one.r0 186 | two.g0 = one.g0 187 | two.b0 = one.b1 188 | else: 189 | raise Exception('unexpected direction ' + direction) 190 | 191 | one.vol = (one.r1 - one.r0) * (one.g1 - one.g0) * (one.b1 - one.b0) 192 | two.vol = (two.r1 - two.r0) * (two.g1 - two.g0) * (two.b1 - two.b0) 193 | return True 194 | 195 | def maximize(self, cube, direction, first, last, wholeR, wholeG, wholeB, wholeW): 196 | bottomR = self.bottom(cube, direction, self.momentsR) 197 | bottomG = self.bottom(cube, direction, self.momentsG) 198 | bottomB = self.bottom(cube, direction, self.momentsB) 199 | bottomW = self.bottom(cube, direction, self.weights) 200 | max = 0.0 201 | cut = -1 202 | halfR = 0 203 | halfG = 0 204 | halfB = 0 205 | halfW = 0 206 | for i in range(first, last): 207 | halfR = bottomR + self.top(cube, direction, i, self.momentsR) 208 | halfG = bottomG + self.top(cube, direction, i, self.momentsG) 209 | halfB = bottomB + self.top(cube, direction, i, self.momentsB) 210 | halfW = bottomW + self.top(cube, direction, i, self.weights) 211 | if (halfW == 0): 212 | continue 213 | tempNumerator = (halfR * halfR + halfG * halfG + halfB * halfB) * 1.0 214 | tempDenominator = halfW * 1.0 215 | temp = tempNumerator / tempDenominator 216 | halfR = wholeR - halfR 217 | halfG = wholeG - halfG 218 | halfB = wholeB - halfB 219 | halfW = wholeW - halfW 220 | if (halfW == 0): 221 | continue 222 | tempNumerator = (halfR * halfR + halfG * halfG + halfB * halfB) * 1.0 223 | tempDenominator = halfW * 1.0 224 | temp += tempNumerator / tempDenominator 225 | if (temp > max): 226 | max = temp 227 | cut = i 228 | return MaximizeResult(cut, max) 229 | 230 | def volume(self, cube, moment): 231 | return (moment[self.getIndex(cube.r1, cube.g1, cube.b1)] - moment[self.getIndex(cube.r1, cube.g1, cube.b0)] - moment[self.getIndex(cube.r1, cube.g0, cube.b1)] + moment[self.getIndex(cube.r1, cube.g0, cube.b0)] - moment[self.getIndex(cube.r0, cube.g1, cube.b1)] + moment[self.getIndex(cube.r0, cube.g1, cube.b0)] + moment[self.getIndex(cube.r0, cube.g0, cube.b1)] - moment[self.getIndex(cube.r0, cube.g0, cube.b0)]) 232 | 233 | def bottom(self, cube, direction, moment): 234 | if (direction == directions["RED"]): 235 | return (-moment[self.getIndex(cube.r0, cube.g1, cube.b1)] + moment[self.getIndex(cube.r0, cube.g1, cube.b0)] + moment[self.getIndex(cube.r0, cube.g0, cube.b1)] - moment[self.getIndex(cube.r0, cube.g0, cube.b0)]) 236 | elif (direction == directions["GREEN"]): 237 | return (-moment[self.getIndex(cube.r1, cube.g0, cube.b1)] + moment[self.getIndex(cube.r1, cube.g0, cube.b0)] + moment[self.getIndex(cube.r0, cube.g0, cube.b1)] - moment[self.getIndex(cube.r0, cube.g0, cube.b0)]) 238 | elif (direction == directions["BLUE"]): 239 | return (-moment[self.getIndex(cube.r1, cube.g1, cube.b0)] + moment[self.getIndex(cube.r1, cube.g0, cube.b0)] + moment[self.getIndex(cube.r0, cube.g1, cube.b0)] - moment[self.getIndex(cube.r0, cube.g0, cube.b0)]) 240 | else: 241 | raise Exception('unexpected direction ' + direction) 242 | 243 | def top(self, cube, direction, position, moment): 244 | if (direction == directions["RED"]): 245 | return (moment[self.getIndex(position, cube.g1, cube.b1)] - moment[self.getIndex(position, cube.g1, cube.b0)] - moment[self.getIndex(position, cube.g0, cube.b1)] + moment[self.getIndex(position, cube.g0, cube.b0)]) 246 | elif (direction == directions["GREEN"]): 247 | return (moment[self.getIndex(cube.r1, position, cube.b1)] - moment[self.getIndex(cube.r1, position, cube.b0)] - moment[self.getIndex(cube.r0, position, cube.b1)] + moment[self.getIndex(cube.r0, position, cube.b0)]) 248 | elif (direction == directions["BLUE"]): 249 | return (moment[self.getIndex(cube.r1, cube.g1, position)] - moment[self.getIndex(cube.r1, cube.g0, position)] - moment[self.getIndex(cube.r0, cube.g1, position)] + moment[self.getIndex(cube.r0, cube.g0, position)]) 250 | else: 251 | raise Exception('unexpected direction ' + direction) 252 | 253 | def getIndex(self, r, g, b): 254 | return (r << (INDEX_BITS * 2)) + (r << (INDEX_BITS + 1)) + r + (g << INDEX_BITS) + g + b 255 | 256 | # /** 257 | # * Keeps track of the state of each box created as the Wu quantization 258 | # * algorithm progresses through dividing the image's pixels as plotted in RGB. 259 | # */ 260 | class Box: 261 | def __init__(self, r0 = 0, r1 = 0, g0 = 0, g1 = 0, b0 = 0, b1 = 0, vol = 0): 262 | self.r0 = r0 263 | self.r1 = r1 264 | self.g0 = g0 265 | self.g1 = g1 266 | self.b0 = b0 267 | self.b1 = b1 268 | self.vol = vol 269 | 270 | # /** 271 | # * Represents final result of Wu algorithm. 272 | # */ 273 | class CreateBoxesResult: 274 | # /** 275 | # * @param requestedCount how many colors the caller asked to be returned from 276 | # * quantization. 277 | # * @param resultCount the actual number of colors achieved from quantization. 278 | # * May be lower than the requested count. 279 | # */ 280 | def __init__(self, requestedCount, resultCount): 281 | self.requestedCount = requestedCount 282 | self.resultCount = resultCount 283 | 284 | # /** 285 | # * Represents the result of calculating where to cut an existing box in such 286 | # * a way to maximize variance between the two new boxes created by a cut. 287 | # */ 288 | class MaximizeResult: 289 | def __init__(self, cutLocation, maximum): 290 | self.cutLocation = cutLocation 291 | self.maximum = maximum 292 | -------------------------------------------------------------------------------- /material_color_utilities_python/scheme/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/scheme/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/scheme/scheme.py: -------------------------------------------------------------------------------- 1 | from curses import termattrs 2 | 3 | from regex import P 4 | from ..palettes.core_palette import * 5 | import json 6 | 7 | # /** 8 | # * Represents a Material color scheme, a mapping of color roles to colors. 9 | # */ 10 | # Using dictionary instead of JavaScript Object 11 | class Scheme: 12 | def __init__(self, props): 13 | self.props = props 14 | 15 | def get_primary(self): 16 | return self.props["primary"] 17 | 18 | def get_primaryContainer(self): 19 | return self.props["primaryContainer"] 20 | 21 | def get_onPrimary(self): 22 | return self.props["onPrimary"] 23 | 24 | def get_onPrimaryContainer(self): 25 | return self.props["onPrimaryContainer"] 26 | 27 | def get_secondary(self): 28 | return self.props["secondary"] 29 | 30 | def get_secondaryContainer(self): 31 | return self.props["secondaryContainer"] 32 | 33 | def get_onSecondary(self): 34 | return self.props["onSecondary"] 35 | 36 | def get_onSecondaryContainer(self): 37 | return self.props["onSecondaryContainer"] 38 | 39 | def get_tertiary(self): 40 | return self.props["tertiary"] 41 | 42 | def get_onTertiary(self): 43 | return self.props["onTertiary"] 44 | 45 | def get_tertiaryContainer(self): 46 | return self.props["tertiaryContainer"] 47 | 48 | def get_onTertiaryContainer(self): 49 | return self.props["onTertiaryContainer"] 50 | 51 | def get_error(self): 52 | return self.props["error"] 53 | 54 | def get_onError(self): 55 | return self.props["onError"] 56 | 57 | def get_errorContainer(self): 58 | return self.props["errorContainer"] 59 | 60 | def get_onErrorContainer(self): 61 | return self.props["onErrorContainer"] 62 | 63 | def get_background(self): 64 | return self.props["background"] 65 | 66 | def get_onBackground(self): 67 | return self.props["onBackground"] 68 | 69 | def get_surface(self): 70 | return self.props["surface"] 71 | 72 | def get_onSurface(self): 73 | return self.props["onSurface"] 74 | 75 | def get_surfaceVariant(self): 76 | return self.props["surfaceVariant"] 77 | 78 | def get_onSurfaceVariant(self): 79 | return self.props["onSurfaceVariant"] 80 | 81 | def get_outline(self): 82 | return self.props["outline"] 83 | 84 | def get_shadow(self): 85 | return self.props["shadow"] 86 | 87 | def get_inverseSurface(self): 88 | return self.props["inverseSurface"] 89 | 90 | def get_inverseOnSurface(self): 91 | return self.props["inverseOnSurface"] 92 | 93 | def get_inversePrimary(self): 94 | return self.props["inversePrimary"] 95 | 96 | primary = property(get_primary) 97 | primaryContainer = property(get_primaryContainer) 98 | onPrimary = property(get_onPrimary) 99 | onPrimaryContainer = property(get_onPrimaryContainer) 100 | secondary = property(get_secondary) 101 | secondaryContainer = property(get_secondaryContainer) 102 | onSecondary = property(get_onSecondary) 103 | onSecondaryContainer = property(get_onSecondaryContainer) 104 | tertiary = property(get_tertiary) 105 | onTertiary = property(get_onTertiary) 106 | tertiaryContainer = property(get_tertiaryContainer) 107 | onTertiaryContainer = property(get_onTertiaryContainer) 108 | error = property(get_error) 109 | onError = property(get_onError) 110 | errorContainer = property(get_errorContainer) 111 | onErrorContainer = property(get_onErrorContainer) 112 | background = property(get_background) 113 | onBackground = property(get_onBackground) 114 | surface = property(get_surface) 115 | onSurface = property(get_onSurface) 116 | surfaceVariant = property(get_surfaceVariant) 117 | onSurfaceVariant = property(get_onSurfaceVariant) 118 | outline = property(get_outline) 119 | shadow = property(get_shadow) 120 | inverseSurface = property(get_inverseSurface) 121 | inverseOnSurface = property(get_inverseOnSurface) 122 | inversePrimary = property(get_inversePrimary) 123 | 124 | # /** 125 | # * @param argb ARGB representation of a color. 126 | # * @return Light Material color scheme, based on the color's hue. 127 | # */ 128 | @staticmethod 129 | def light(argb): 130 | core = CorePalette.of(argb) 131 | return Scheme({ 132 | "primary" : core.a1.tone(40), 133 | "onPrimary" : core.a1.tone(100), 134 | "primaryContainer" : core.a1.tone(90), 135 | "onPrimaryContainer" : core.a1.tone(10), 136 | "secondary" : core.a2.tone(40), 137 | "onSecondary" : core.a2.tone(100), 138 | "secondaryContainer" : core.a2.tone(90), 139 | "onSecondaryContainer" : core.a2.tone(10), 140 | "tertiary" : core.a3.tone(40), 141 | "onTertiary" : core.a3.tone(100), 142 | "tertiaryContainer" : core.a3.tone(90), 143 | "onTertiaryContainer" : core.a3.tone(10), 144 | "error" : core.error.tone(40), 145 | "onError" : core.error.tone(100), 146 | "errorContainer" : core.error.tone(90), 147 | "onErrorContainer" : core.error.tone(10), 148 | "background" : core.n1.tone(99), 149 | "onBackground" : core.n1.tone(10), 150 | "surface" : core.n1.tone(99), 151 | "onSurface" : core.n1.tone(10), 152 | "surfaceVariant" : core.n2.tone(90), 153 | "onSurfaceVariant" : core.n2.tone(30), 154 | "outline" : core.n2.tone(50), 155 | "shadow" : core.n1.tone(0), 156 | "inverseSurface" : core.n1.tone(20), 157 | "inverseOnSurface" : core.n1.tone(95), 158 | "inversePrimary" : core.a1.tone(80) 159 | }) 160 | 161 | # /** 162 | # * @param argb ARGB representation of a color. 163 | # * @return Dark Material color scheme, based on the color's hue. 164 | # */ 165 | @staticmethod 166 | def dark(argb): 167 | core = CorePalette.of(argb); 168 | return Scheme({ 169 | "primary" : core.a1.tone(80), 170 | "onPrimary" : core.a1.tone(20), 171 | "primaryContainer" : core.a1.tone(30), 172 | "onPrimaryContainer" : core.a1.tone(90), 173 | "secondary" : core.a2.tone(80), 174 | "onSecondary" : core.a2.tone(20), 175 | "secondaryContainer" : core.a2.tone(30), 176 | "onSecondaryContainer" : core.a2.tone(90), 177 | "tertiary" : core.a3.tone(80), 178 | "onTertiary" : core.a3.tone(20), 179 | "tertiaryContainer" : core.a3.tone(30), 180 | "onTertiaryContainer" : core.a3.tone(90), 181 | "error" : core.error.tone(80), 182 | "onError" : core.error.tone(20), 183 | "errorContainer" : core.error.tone(30), 184 | "onErrorContainer" : core.error.tone(80), 185 | "background" : core.n1.tone(10), 186 | "onBackground" : core.n1.tone(90), 187 | "surface" : core.n1.tone(10), 188 | "onSurface" : core.n1.tone(90), 189 | "surfaceVariant" : core.n2.tone(30), 190 | "onSurfaceVariant" : core.n2.tone(80), 191 | "outline" : core.n2.tone(60), 192 | "shadow" : core.n1.tone(0), 193 | "inverseSurface" : core.n1.tone(90), 194 | "inverseOnSurface" : core.n1.tone(20), 195 | "inversePrimary" : core.a1.tone(40) 196 | }) 197 | 198 | def toJSON(self): 199 | return json.dumps(self.props) 200 | -------------------------------------------------------------------------------- /material_color_utilities_python/score/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/score/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/score/score.py: -------------------------------------------------------------------------------- 1 | from ..hct.cam16 import * 2 | from ..utils.color_utils import * 3 | from ..utils.math_utils import * 4 | from collections import OrderedDict 5 | 6 | # /** 7 | # * Given a large set of colors, remove colors that are unsuitable for a UI 8 | # * theme, and rank the rest based on suitability. 9 | # * 10 | # * Enables use of a high cluster count for image quantization, thus ensuring 11 | # * colors aren't muddied, while curating the high cluster count to a much 12 | # * smaller number of appropriate choices. 13 | # */ 14 | class Score: 15 | def __init__(self): 16 | pass 17 | 18 | # /** 19 | # * Given a map with keys of colors and values of how often the color appears, 20 | # * rank the colors based on suitability for being used for a UI theme. 21 | # * 22 | # * @param colorsToPopulation map with keys of colors and values of how often 23 | # * the color appears, usually from a source image. 24 | # * @return Colors sorted by suitability for a UI theme. The most suitable 25 | # * color is the first item, the least suitable is the last. There will 26 | # * always be at least one color returned. If all the input colors 27 | # * were not suitable for a theme, a default fallback color will be 28 | # * provided, Google Blue. 29 | # */ 30 | # Using OrderedDict for JavaScript Map 31 | @staticmethod 32 | def score(colorsToPopulation): 33 | # // Determine the total count of all colors. 34 | populationSum = 0 35 | for population in colorsToPopulation.values(): 36 | populationSum += population 37 | # // Turn the count of each color into a proportion by dividing by the total 38 | # // count. Also, fill a cache of CAM16 colors representing each color, and 39 | # // record the proportion of colors for each CAM16 hue. 40 | colorsToProportion = OrderedDict() 41 | colorsToCam = OrderedDict() 42 | hueProportions = [0] * 361 43 | for (color, population) in colorsToPopulation.items(): 44 | proportion = population / populationSum 45 | colorsToProportion[color] = proportion 46 | cam = Cam16.fromInt(color) 47 | colorsToCam[color] = cam 48 | hue = round(cam.hue) 49 | hueProportions[hue] += proportion 50 | # // Determine the proportion of the colors around each color, by summing the 51 | # // proportions around each color's hue. 52 | colorsToExcitedProportion = OrderedDict() 53 | for (color, cam) in colorsToCam.items(): 54 | hue = round(cam.hue) 55 | excitedProportion = 0 56 | for i in range((hue - 15), (hue + 15)): 57 | neighborHue = sanitizeDegreesInt(i) 58 | excitedProportion += hueProportions[neighborHue] 59 | colorsToExcitedProportion[color] = excitedProportion 60 | # // Score the colors by their proportion, as well as how chromatic they are. 61 | colorsToScore = OrderedDict() 62 | for (color, cam) in colorsToCam.items(): 63 | proportion = colorsToExcitedProportion[color] 64 | proportionScore = proportion * 100.0 * Score.WEIGHT_PROPORTION 65 | chromaWeight = Score.WEIGHT_CHROMA_BELOW if cam.chroma < Score.TARGET_CHROMA else Score.WEIGHT_CHROMA_ABOVE 66 | chromaScore = (cam.chroma - Score.TARGET_CHROMA) * chromaWeight 67 | score = proportionScore + chromaScore 68 | colorsToScore[color] = score 69 | # // Remove colors that are unsuitable, ex. very dark or unchromatic colors. 70 | # // Also, remove colors that are very similar in hue. 71 | filteredColors = Score.filter(colorsToExcitedProportion, colorsToCam) 72 | dedupedColorsToScore = OrderedDict() 73 | for color in filteredColors: 74 | duplicateHue = False 75 | hue = colorsToCam[color].hue 76 | for alreadyChosenColor in dedupedColorsToScore: 77 | alreadyChosenHue = colorsToCam[alreadyChosenColor].hue 78 | if (differenceDegrees(hue, alreadyChosenHue) < 15): 79 | duplicateHue = True 80 | break 81 | if (duplicateHue): 82 | continue 83 | dedupedColorsToScore[color] = colorsToScore[color] 84 | # // Ensure the list of colors returned is sorted such that the first in the 85 | # // list is the most suitable, and the last is the least suitable. 86 | colorsByScoreDescending = list(dedupedColorsToScore.items()) 87 | colorsByScoreDescending.sort(reverse = True, key = lambda x: x[1]) 88 | answer = list(map(lambda x: x[0], colorsByScoreDescending)) 89 | # // Ensure that at least one color is returned. 90 | if (len(answer) == 0): 91 | answer.append(0xff4285F4) # // Google Blue 92 | return answer 93 | 94 | @staticmethod 95 | def filter(colorsToExcitedProportion, colorsToCam): 96 | filtered = [] 97 | for (color, cam) in colorsToCam.items(): 98 | proportion = colorsToExcitedProportion[color] 99 | if (cam.chroma >= Score.CUTOFF_CHROMA and 100 | lstarFromArgb(color) >= Score.CUTOFF_TONE and 101 | proportion >= Score.CUTOFF_EXCITED_PROPORTION): 102 | filtered.append(color) 103 | return filtered 104 | 105 | Score.TARGET_CHROMA = 48.0 106 | Score.WEIGHT_PROPORTION = 0.7 107 | Score.WEIGHT_CHROMA_ABOVE = 0.3 108 | Score.WEIGHT_CHROMA_BELOW = 0.1 109 | Score.CUTOFF_CHROMA = 15.0 110 | Score.CUTOFF_TONE = 10.0 111 | Score.CUTOFF_EXCITED_PROPORTION = 0.01 112 | -------------------------------------------------------------------------------- /material_color_utilities_python/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avanisubbiah/material-color-utilities-python/66158f7efa69f1dc751a6a79c3bb4e6ae257c3bf/material_color_utilities_python/utils/__init__.py -------------------------------------------------------------------------------- /material_color_utilities_python/utils/color_utils.py: -------------------------------------------------------------------------------- 1 | from .math_utils import * 2 | import math 3 | 4 | # /** 5 | # * Color science utilities. 6 | # * 7 | # * Utility methods for color science constants and color space 8 | # * conversions that aren't HCT or CAM16. 9 | # */ 10 | SRGB_TO_XYZ = [ 11 | [0.41233895, 0.35762064, 0.18051042], 12 | [0.2126, 0.7152, 0.0722], 13 | [0.01932141, 0.11916382, 0.95034478], 14 | ] 15 | XYZ_TO_SRGB = [ 16 | [ 17 | 3.2413774792388685, 18 | -1.5376652402851851, 19 | -0.49885366846268053, 20 | ], 21 | [ 22 | -0.9691452513005321, 23 | 1.8758853451067872, 24 | 0.04156585616912061, 25 | ], 26 | [ 27 | 0.05562093689691305, 28 | -0.20395524564742123, 29 | 1.0571799111220335, 30 | ], 31 | ] 32 | 33 | WHITE_POINT_D65 = [95.047, 100.0, 108.883] 34 | 35 | # /** 36 | # * Converts a color from RGB components to ARGB format. 37 | # */ 38 | def rshift(val, n): return val>>n if val >= 0 else (val+0x100000000)>>n 39 | def argbFromRgb(red, green, blue): 40 | return rshift((255 << 24 | (red & 255) << 16 | (green & 255) << 8 | blue & 255), 0) 41 | 42 | # /** 43 | # * Returns the alpha component of a color in ARGB format. 44 | # */ 45 | def alphaFromArgb(argb): 46 | return argb >> 24 & 255 47 | 48 | # /** 49 | # * Returns the red component of a color in ARGB format. 50 | # */ 51 | def redFromArgb(argb): 52 | return argb >> 16 & 255 53 | 54 | # /** 55 | # * Returns the green component of a color in ARGB format. 56 | # */ 57 | def greenFromArgb(argb): 58 | return argb >> 8 & 255 59 | 60 | # /** 61 | # * Returns the blue component of a color in ARGB format. 62 | # */ 63 | def blueFromArgb(argb): 64 | return argb & 255 65 | 66 | # /** 67 | # * Returns whether a color in ARGB format is opaque. 68 | # */ 69 | def isOpaque(argb): 70 | return alphaFromArgb(argb) >= 255 71 | 72 | # /** 73 | # * Converts a color from ARGB to XYZ. 74 | # */ 75 | def argbFromXyz(x, y, z): 76 | matrix = XYZ_TO_SRGB 77 | linearR = matrix[0][0] * x + matrix[0][1] * y + matrix[0][2] * z 78 | linearG = matrix[1][0] * x + matrix[1][1] * y + matrix[1][2] * z 79 | linearB = matrix[2][0] * x + matrix[2][1] * y + matrix[2][2] * z 80 | r = delinearized(linearR) 81 | g = delinearized(linearG) 82 | b = delinearized(linearB) 83 | return argbFromRgb(r, g, b) 84 | 85 | # /** 86 | # * Converts a color from XYZ to ARGB. 87 | # */ 88 | def xyzFromArgb(argb): 89 | r = linearized(redFromArgb(argb)) 90 | g = linearized(greenFromArgb(argb)) 91 | b = linearized(blueFromArgb(argb)) 92 | return matrixMultiply([r, g, b], SRGB_TO_XYZ) 93 | 94 | # /** 95 | # * Converts a color represented in Lab color space into an ARGB 96 | # * integer. 97 | # */ 98 | def labInvf(ft): 99 | e = 216.0 / 24389.0 100 | kappa = 24389.0 / 27.0 101 | ft3 = ft * ft * ft 102 | if (ft3 > e): 103 | return ft3 104 | else: 105 | return (116 * ft - 16) / kappa 106 | 107 | def argbFromLab(l, a, b): 108 | whitePoint = WHITE_POINT_D65 109 | fy = (l + 16.0) / 116.0 110 | fx = a / 500.0 + fy 111 | fz = fy - b / 200.0 112 | xNormalized = labInvf(fx) 113 | yNormalized = labInvf(fy) 114 | zNormalized = labInvf(fz) 115 | x = xNormalized * whitePoint[0] 116 | y = yNormalized * whitePoint[1] 117 | z = zNormalized * whitePoint[2] 118 | return argbFromXyz(x, y, z) 119 | 120 | # /** 121 | # * Converts a color from ARGB representation to L*a*b* 122 | # * representation. 123 | # * 124 | # * @param argb the ARGB representation of a color 125 | # * @return a Lab object representing the color 126 | # */ 127 | def labF(t): 128 | e = 216.0 / 24389.0 129 | kappa = 24389.0 / 27.0 130 | if (t > e): 131 | return math.pow(t, 1.0 / 3.0) 132 | else: 133 | return (kappa * t + 16) / 116 134 | 135 | def labFromArgb(argb): 136 | linearR = linearized(redFromArgb(argb)) 137 | linearG = linearized(greenFromArgb(argb)) 138 | linearB = linearized(blueFromArgb(argb)) 139 | matrix = SRGB_TO_XYZ 140 | x = matrix[0][0] * linearR + matrix[0][1] * linearG + matrix[0][2] * linearB 141 | y = matrix[1][0] * linearR + matrix[1][1] * linearG + matrix[1][2] * linearB 142 | z = matrix[2][0] * linearR + matrix[2][1] * linearG + matrix[2][2] * linearB 143 | whitePoint = WHITE_POINT_D65 144 | xNormalized = x / whitePoint[0] 145 | yNormalized = y / whitePoint[1] 146 | zNormalized = z / whitePoint[2] 147 | fx = labF(xNormalized) 148 | fy = labF(yNormalized) 149 | fz = labF(zNormalized) 150 | l = 116.0 * fy - 16 151 | a = 500.0 * (fx - fy) 152 | b = 200.0 * (fy - fz) 153 | return [l, a, b] 154 | 155 | # /** 156 | # * Converts an L* value to an ARGB representation. 157 | # * 158 | # * @param lstar L* in L*a*b* 159 | # * @return ARGB representation of grayscale color with lightness 160 | # * matching L* 161 | # */ 162 | def argbFromLstar(lstar): 163 | fy = (lstar + 16.0) / 116.0 164 | fz = fy 165 | fx = fy 166 | kappa = 24389.0 / 27.0 167 | epsilon = 216.0 / 24389.0 168 | lExceedsEpsilonKappa = lstar > 8.0 169 | y = fy * fy * fy if lExceedsEpsilonKappa else lstar / kappa 170 | cubeExceedEpsilon = fy * fy * fy > epsilon 171 | x = fx * fx * fx if cubeExceedEpsilon else lstar / kappa 172 | z = fz * fz * fz if cubeExceedEpsilon else lstar / kappa 173 | whitePoint = WHITE_POINT_D65 174 | return argbFromXyz(x * whitePoint[0], y * whitePoint[1], z * whitePoint[2]) 175 | 176 | # /** 177 | # * Computes the L* value of a color in ARGB representation. 178 | # * 179 | # * @param argb ARGB representation of a color 180 | # * @return L*, from L*a*b*, coordinate of the color 181 | # */ 182 | def lstarFromArgb(argb): 183 | y = xyzFromArgb(argb)[1] / 100.0 184 | e = 216.0 / 24389.0 185 | if (y <= e): 186 | return 24389.0 / 27.0 * y 187 | else: 188 | yIntermediate = math.pow(y, 1.0 / 3.0) 189 | return 116.0 * yIntermediate - 16.0 190 | 191 | # /** 192 | # * Converts an L* value to a Y value. 193 | # * 194 | # * L* in L*a*b* and Y in XYZ measure the same quantity, luminance. 195 | # * 196 | # * L* measures perceptual luminance, a linear scale. Y in XYZ 197 | # * measures relative luminance, a logarithmic scale. 198 | # * 199 | # * @param lstar L* in L*a*b* 200 | # * @return Y in XYZ 201 | # */ 202 | def yFromLstar(lstar): 203 | ke = 8.0 204 | if (lstar > ke): 205 | return math.pow((lstar + 16.0) / 116.0, 3.0) * 100.0 206 | else: 207 | return lstar / (24389.0 / 27.0) * 100.0 208 | 209 | # /** 210 | # * Linearizes an RGB component. 211 | # * 212 | # * @param rgbComponent 0 <= rgb_component <= 255, represents R/G/B 213 | # * channel 214 | # * @return 0.0 <= output <= 100.0, color channel converted to 215 | # * linear RGB space 216 | # */ 217 | def linearized(rgbComponent): 218 | normalized = rgbComponent / 255.0 219 | if (normalized <= 0.040449936): 220 | return normalized / 12.92 * 100.0 221 | else: 222 | return math.pow((normalized + 0.055) / 1.055, 2.4) * 100.0 223 | 224 | 225 | # /** 226 | # * Delinearizes an RGB component. 227 | # * 228 | # * @param rgbComponent 0.0 <= rgb_component <= 100.0, represents 229 | # * linear R/G/B channel 230 | # * @return 0 <= output <= 255, color channel converted to regular 231 | # * RGB space 232 | # */ 233 | def delinearized(rgbComponent): 234 | normalized = rgbComponent / 100.0 235 | delinearized = 0.0 236 | if (normalized <= 0.0031308): 237 | delinearized = normalized * 12.92 238 | else: 239 | delinearized = 1.055 * math.pow(normalized, 1.0 / 2.4) - 0.055 240 | return clampInt(0, 255, round(delinearized * 255.0)) 241 | 242 | # /** 243 | # * Returns the standard white point white on a sunny day. 244 | # * 245 | # * @return The white point 246 | # */ 247 | def whitePointD65(): 248 | return WHITE_POINT_D65 249 | 250 | 251 | 252 | 253 | -------------------------------------------------------------------------------- /material_color_utilities_python/utils/image_utils.py: -------------------------------------------------------------------------------- 1 | from ..quantize.quantizer_celebi import * 2 | from ..score.score import * 3 | from .color_utils import * 4 | from PIL import Image 5 | 6 | # /** 7 | # * Get the source color from an image. 8 | # * 9 | # * @param image The image element 10 | # * @return Source color - the color most suitable for creating a UI theme 11 | # */ 12 | def sourceColorFromImage(image): 13 | # // Convert Image data to Pixel Array 14 | # const imageBytes = await new Promise((resolve, reject) => { 15 | # const canvas = document.createElement('canvas'); 16 | # const context = canvas.getContext('2d'); 17 | # if (!context) { 18 | # return reject(new Error('Could not get canvas context')); 19 | # } 20 | # image.onload = () => { 21 | # canvas.width = image.width; 22 | # canvas.height = image.height; 23 | # context.drawImage(image, 0, 0); 24 | # resolve(context.getImageData(0, 0, image.width, image.height).data); 25 | # }; 26 | # }); 27 | # // Convert Image data to Pixel Array 28 | # const pixels = []; 29 | # for (let i = 0; i < imageBytes.length; i += 4) { 30 | # const r = imageBytes[i]; 31 | # const g = imageBytes[i + 1]; 32 | # const b = imageBytes[i + 2]; 33 | # const a = imageBytes[i + 3]; 34 | # if (a < 255) { 35 | # continue; 36 | # } 37 | # const argb = argbFromRgb(r, g, b); 38 | # pixels.push(argb); 39 | # } 40 | if (image.mode == 'RGB'): 41 | image = image.convert('RGBA') 42 | if (image.mode != 'RGBA'): 43 | print("Warning: Image not in RGB|RGBA format - Converting...") 44 | image = image.convert('RGBA') 45 | 46 | pixels = [] 47 | for x in range(image.width): 48 | for y in range(image.height): 49 | # for the given pixel at w,h, lets check its value against the threshold 50 | pixel = image.getpixel((x, y)) 51 | r = pixel[0] 52 | g = pixel[1] 53 | b = pixel[2] 54 | a = pixel[3] 55 | if (a < 255): 56 | continue 57 | argb = argbFromRgb(r, g, b) 58 | pixels.append(argb) 59 | 60 | # // Convert Pixels to Material Colors 61 | result = QuantizerCelebi.quantize(pixels, 128) 62 | ranked = Score.score(result) 63 | top = ranked[0] 64 | return top 65 | -------------------------------------------------------------------------------- /material_color_utilities_python/utils/math_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Utility methods for mathematical operations. 3 | # 4 | # 5 | # The signum function. 6 | # 7 | # @return 1 if num > 0, -1 if num < 0, and 0 if num = 0 8 | # 9 | def signum(num): 10 | if (num < 0): 11 | return -1 12 | elif (num == 0): 13 | return 0 14 | else: 15 | return 1 16 | 17 | # /** 18 | # * The linear interpolation function. 19 | # * 20 | # * @return start if amount = 0 and stop if amount = 1 21 | # */ 22 | def lerp(start, stop, amount): 23 | return (1.0 - amount) * start + amount * stop 24 | 25 | # /** 26 | # * Clamps an integer between two integers. 27 | # * 28 | # * @return input when min <= input <= max, and either min or max 29 | # * otherwise. 30 | # */ 31 | def clampInt(min, max, input): 32 | if (input < min): 33 | return min 34 | elif (input > max): 35 | return max 36 | return input 37 | 38 | # /** 39 | # * Clamps an integer between two floating-point numbers. 40 | # * 41 | # * @return input when min <= input <= max, and either min or max 42 | # * otherwise. 43 | # */ 44 | def clampDouble(min, max, input): 45 | if (input < min): 46 | return min 47 | elif (input > max): 48 | return max 49 | return input 50 | 51 | # /** 52 | # * Sanitizes a degree measure as an integer. 53 | # * 54 | # * @return a degree measure between 0 (inclusive) and 360 55 | # * (exclusive). 56 | # */ 57 | def sanitizeDegreesInt(degrees): 58 | degrees = degrees % 360 59 | if (degrees < 0): 60 | degrees = degrees + 360 61 | return degrees 62 | 63 | # /** 64 | # * Sanitizes a degree measure as a floating-point number. 65 | # * 66 | # * @return a degree measure between 0.0 (inclusive) and 360.0 67 | # * (exclusive). 68 | # */ 69 | def sanitizeDegreesDouble(degrees): 70 | degrees = degrees % 360.0 71 | if (degrees < 0): 72 | degrees = degrees + 360.0 73 | return degrees 74 | 75 | # /** 76 | # * Distance of two points on a circle, represented using degrees. 77 | # */ 78 | def differenceDegrees(a, b): 79 | return 180.0 - abs(abs(a - b) - 180.0) 80 | 81 | # /** 82 | # * Multiplies a 1x3 row vector with a 3x3 matrix. 83 | # */ 84 | def matrixMultiply(row, matrix): 85 | a = row[0] * matrix[0][0] + row[1] * matrix[0][1] + row[2] * matrix[0][2] 86 | b = row[0] * matrix[1][0] + row[1] * matrix[1][1] + row[2] * matrix[1][2] 87 | c = row[0] * matrix[2][0] + row[1] * matrix[2][1] + row[2] * matrix[2][2] 88 | return [a, b, c]; -------------------------------------------------------------------------------- /material_color_utilities_python/utils/string_utils.py: -------------------------------------------------------------------------------- 1 | from .color_utils import * 2 | 3 | # /** 4 | # * Utility methods for hexadecimal representations of colors. 5 | # */ 6 | # /** 7 | # * @param argb ARGB representation of a color. 8 | # * @return Hex string representing color, ex. #ff0000 for red. 9 | # */ 10 | def hexFromArgb(argb): 11 | r = redFromArgb(argb) 12 | g = greenFromArgb(argb) 13 | b = blueFromArgb(argb) 14 | outParts = [f'{r:x}', f'{g:x}', f'{b:x}'] 15 | # Pad single-digit output values 16 | for i, part in enumerate(outParts): 17 | if (len(part) == 1): 18 | outParts[i] = '0' + part 19 | return '#' + ''.join(outParts) 20 | 21 | # /** 22 | # * @param hex String representing color as hex code. Accepts strings with or 23 | # * without leading #, and string representing the color using 3, 6, or 8 24 | # * hex characters. 25 | # * @return ARGB representation of color. 26 | # */ 27 | def parseIntHex(value): 28 | # tslint:disable-next-line:ban 29 | return int(value, 16) 30 | 31 | def argbFromHex(hex): 32 | hex = hex.replace('#', '') 33 | isThree = len(hex) == 3 34 | isSix = len(hex) == 6 35 | isEight = len(hex) == 8 36 | if (not isThree and not isSix and not isEight): 37 | raise Exception('unexpected hex ' + hex) 38 | 39 | r = 0 40 | g = 0 41 | b = 0 42 | if (isThree): 43 | r = parseIntHex(hex[0:1]*2) 44 | g = parseIntHex(hex[1:2]*2) 45 | b = parseIntHex(hex[2:3]*2) 46 | elif (isSix): 47 | r = parseIntHex(hex[0:2]) 48 | g = parseIntHex(hex[2:4]) 49 | b = parseIntHex(hex[4:6]) 50 | elif (isEight): 51 | r = parseIntHex(hex[2:4]) 52 | g = parseIntHex(hex[4:6]) 53 | b = parseIntHex(hex[6:8]) 54 | 55 | return rshift(((255 << 24) | ((r & 0x0ff) << 16) | ((g & 0x0ff) << 8) | (b & 0x0ff)), 0) 56 | -------------------------------------------------------------------------------- /material_color_utilities_python/utils/theme_utils.py: -------------------------------------------------------------------------------- 1 | from ..blend.blend import * 2 | from ..palettes.core_palette import * 3 | from ..scheme.scheme import * 4 | from .image_utils import * 5 | from .string_utils import * 6 | 7 | # /** 8 | # * Generate custom color group from source and target color 9 | # * 10 | # * @param source Source color 11 | # * @param color Custom color 12 | # * @return Custom color group 13 | # * 14 | # * @link https://m3.material.io/styles/color/the-color-system/color-roles 15 | # */ 16 | # NOTE: Changes made to output format to be Dictionary 17 | def customColor(source, color): 18 | value = color["value"] 19 | from_v = value 20 | to = source 21 | if (color["blend"]): 22 | value = Blend.harmonize(from_v, to) 23 | palette = CorePalette.of(value) 24 | tones = palette.a1 25 | return { 26 | "color": color, 27 | "value": value, 28 | "light": { 29 | "color": tones.tone(40), 30 | "onColor": tones.tone(100), 31 | "colorContainer": tones.tone(90), 32 | "onColorContainer": tones.tone(10), 33 | }, 34 | "dark": { 35 | "color": tones.tone(80), 36 | "onColor": tones.tone(20), 37 | "colorContainer": tones.tone(30), 38 | "onColorContainer": tones.tone(90), 39 | }, 40 | } 41 | 42 | # /** 43 | # * Generate a theme from a source color 44 | # * 45 | # * @param source Source color 46 | # * @param customColors Array of custom colors 47 | # * @return Theme object 48 | # */ 49 | # NOTE: Changes made to output format to be Dictionary 50 | def themeFromSourceColor(source, customColors = []): 51 | palette = CorePalette.of(source) 52 | return { 53 | "source": source, 54 | "schemes": { 55 | "light": Scheme.light(source), 56 | "dark": Scheme.dark(source), 57 | }, 58 | "palettes": { 59 | "primary": palette.a1, 60 | "secondary": palette.a2, 61 | "tertiary": palette.a3, 62 | "neutral": palette.n1, 63 | "neutralVariant": palette.n2, 64 | "error": palette.error, 65 | }, 66 | "customColors": [customColor(source, c) for c in customColors] 67 | } 68 | 69 | # /** 70 | # * Generate a theme from an image source 71 | # * 72 | # * @param image Image element 73 | # * @param customColors Array of custom colors 74 | # * @return Theme object 75 | # */ 76 | def themeFromImage(image, customColors = []): 77 | source = sourceColorFromImage(image) 78 | return themeFromSourceColor(source, customColors) 79 | 80 | 81 | # Not really applicable to python CLI 82 | # # /** 83 | # # * Apply a theme to an element 84 | # # * 85 | # # * @param theme Theme object 86 | # # * @param options Options 87 | # # */ 88 | # export function applyTheme(theme, options) { 89 | # var _a; 90 | # const target = (options === null || options === void 0 ? void 0 : options.target) || document.body; 91 | # const isDark = (_a = options === null || options === void 0 ? void 0 : options.dark) !== null && _a !== void 0 ? _a : false; 92 | # const scheme = isDark ? theme.schemes.dark : theme.schemes.light; 93 | # for (const [key, value] of Object.entries(scheme.toJSON())) { 94 | # const token = key.replace(/([a-z])([A-Z])/g, "$1-$2").toLowerCase(); 95 | # const color = hexFromArgb(value); 96 | # target.style.setProperty(`--md-sys-color-${token}`, color); 97 | # } 98 | # } 99 | # //# sourceMappingURL=theme_utils.js.map -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "material-color-utilities-python" 3 | version = "0.1.3" 4 | description = "Python port of material-color-utilities used for Material You colors" 5 | authors = ["Avanish Subbiah ", "0xMRTT", "David Lapshin"] 6 | license = "Apache 2.0" 7 | 8 | [tool.poetry.dependencies] 9 | Pillow = "^9.2.0" 10 | regex = "*" 11 | 12 | [tool.poetry.dev-dependencies] 13 | 14 | [build-system] 15 | requires = ["poetry-core>=1.0.0"] 16 | build-backend = "poetry.core.masonry.api" 17 | --------------------------------------------------------------------------------