├── .gitignore ├── .travis.yml ├── LICENSE ├── Pipfile ├── Pipfile.lock ├── README.md ├── docs ├── blurry_ground2x2_bad.jpg ├── blurry_ground2x2_good.jpg ├── blurry_ground_bad.jpg ├── blurry_ground_good.jpg ├── composed.png ├── cropped-sunset.png ├── diptych-parrot.png ├── diptych.png ├── docs.svg ├── dropshadow.png ├── generate.py ├── gibbons.jpg ├── gradient.png ├── ground.jpg ├── ground2x2.jpg ├── index.html ├── index.md ├── island.png ├── island_strip.png ├── isles.png ├── noise.png ├── parrot.png ├── quad.png ├── reference.html ├── reference.md ├── sdf.png ├── small.exr ├── snowflake.png ├── snowflake64.png ├── snowy.jpg ├── snowy2.png ├── tiled_noise_bad.png ├── tiled_noise_good.png ├── tiled_sdf_bad.png ├── tiled_sdf_good.png └── xforms.png ├── setup.py ├── snowy ├── __init__.py ├── color.py ├── distance.py ├── draw.py ├── filtering.py ├── io.py ├── lighting.py ├── noise.py └── ops.py └── tests ├── demo.py ├── gamma_dalai_lama_gray.jpg ├── islands.png ├── overlay.png ├── performance.py ├── small_dalai_lama.png ├── sobel_input.png ├── terrain.png ├── test.html ├── test_color.py ├── test_dist.py ├── test_draw.py ├── test_io.py ├── test_lighting.py ├── test_snowy.py └── texture.png /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .env 3 | .flaskenv 4 | *.pyc 5 | *.pyo 6 | env/ 7 | env* 8 | dist/ 9 | build/ 10 | *.egg 11 | *.egg-info/ 12 | _mailinglist 13 | .tox/ 14 | .cache/ 15 | .pytest_cache/ 16 | .idea/ 17 | docs/_build/ 18 | __pycache__ 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: "3.6" 3 | install: 4 | - pip install numpy imageio numba pytest 5 | script: 6 | - python setup.py sdist bdist_wheel 7 | - python setup.py install 8 | # - python setup.py test 9 | 10 | #- pip install beautifulsoup4 Pygments CommonMark scipy 11 | #- python docs/generate.py 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Philip Rideout 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | "bs4" = "*" 8 | pygments = "*" 9 | commonmark = "*" 10 | numpy = "*" 11 | imageio = "*" 12 | numba = "*" 13 | scipy = "*" 14 | pytest = "*" 15 | scikit-image = "*" 16 | twine = "*" 17 | 18 | [dev-packages] 19 | 20 | [requires] 21 | python_version = "3.7" 22 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "21f5396beea359073eec389faec7af1d38d34f4ac29bfb34ffc322d09e2c2d43" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.7" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "atomicwrites": { 20 | "hashes": [ 21 | "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4", 22 | "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6" 23 | ], 24 | "version": "==1.3.0" 25 | }, 26 | "attrs": { 27 | "hashes": [ 28 | "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", 29 | "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" 30 | ], 31 | "version": "==19.3.0" 32 | }, 33 | "beautifulsoup4": { 34 | "hashes": [ 35 | "sha256:05fd825eb01c290877657a56df4c6e4c311b3965bda790c613a3d6fb01a5462a", 36 | "sha256:9fbb4d6e48ecd30bcacc5b63b94088192dcda178513b2ae3c394229f8911b887", 37 | "sha256:e1505eeed31b0f4ce2dbb3bc8eb256c04cc2b3b72af7d551a4ab6efd5cbe5dae" 38 | ], 39 | "version": "==4.8.2" 40 | }, 41 | "bleach": { 42 | "hashes": [ 43 | "sha256:44f69771e2ac81ff30d929d485b7f9919f3ad6d019b6b20c74f3b8687c3f70df", 44 | "sha256:aa8b870d0f46965bac2c073a93444636b0e1ca74e9777e34f03dd494b8a59d48" 45 | ], 46 | "index": "pypi", 47 | "version": "==3.1.1" 48 | }, 49 | "bs4": { 50 | "hashes": [ 51 | "sha256:36ecea1fd7cc5c0c6e4a1ff075df26d50da647b75376626cc186e2212886dd3a" 52 | ], 53 | "index": "pypi", 54 | "version": "==0.0.1" 55 | }, 56 | "certifi": { 57 | "hashes": [ 58 | "sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3", 59 | "sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f" 60 | ], 61 | "version": "==2019.11.28" 62 | }, 63 | "chardet": { 64 | "hashes": [ 65 | "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", 66 | "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" 67 | ], 68 | "version": "==3.0.4" 69 | }, 70 | "commonmark": { 71 | "hashes": [ 72 | "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60", 73 | "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9" 74 | ], 75 | "index": "pypi", 76 | "version": "==0.9.1" 77 | }, 78 | "cycler": { 79 | "hashes": [ 80 | "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", 81 | "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" 82 | ], 83 | "version": "==0.10.0" 84 | }, 85 | "decorator": { 86 | "hashes": [ 87 | "sha256:54c38050039232e1db4ad7375cfce6748d7b41c29e95a081c8a6d2c30364a2ce", 88 | "sha256:5d19b92a3c8f7f101c8dd86afd86b0f061a8ce4540ab8cd401fa2542756bce6d" 89 | ], 90 | "version": "==4.4.1" 91 | }, 92 | "docutils": { 93 | "hashes": [ 94 | "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", 95 | "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" 96 | ], 97 | "version": "==0.16" 98 | }, 99 | "idna": { 100 | "hashes": [ 101 | "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", 102 | "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" 103 | ], 104 | "version": "==2.9" 105 | }, 106 | "imageio": { 107 | "hashes": [ 108 | "sha256:c9763e5c187ecf74091c845626b0bdcc6130a20a0de7a86ae0108e2b5335ed3f", 109 | "sha256:f44eb231b9df485874f2ffd22dfd0c3c711e7de076516b9374edea5c65bc67ae" 110 | ], 111 | "index": "pypi", 112 | "version": "==2.6.1" 113 | }, 114 | "importlib-metadata": { 115 | "hashes": [ 116 | "sha256:06f5b3a99029c7134207dd882428a66992a9de2bef7c2b699b5641f9886c3302", 117 | "sha256:b97607a1a18a5100839aec1dc26a1ea17ee0d93b20b0f008d80a5a050afb200b" 118 | ], 119 | "markers": "python_version < '3.8'", 120 | "version": "==1.5.0" 121 | }, 122 | "kiwisolver": { 123 | "hashes": [ 124 | "sha256:05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f", 125 | "sha256:210d8c39d01758d76c2b9a693567e1657ec661229bc32eac30761fa79b2474b0", 126 | "sha256:26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7", 127 | "sha256:3b15d56a9cd40c52d7ab763ff0bc700edbb4e1a298dc43715ecccd605002cf11", 128 | "sha256:3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe", 129 | "sha256:400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c", 130 | "sha256:47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5", 131 | "sha256:53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75", 132 | "sha256:58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187", 133 | "sha256:5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641", 134 | "sha256:5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883", 135 | "sha256:682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5", 136 | "sha256:76275ee077772c8dde04fb6c5bc24b91af1bb3e7f4816fd1852f1495a64dad93", 137 | "sha256:79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2", 138 | "sha256:7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3", 139 | "sha256:8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389", 140 | "sha256:8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897", 141 | "sha256:9105ce82dcc32c73eb53a04c869b6a4bc756b43e4385f76ea7943e827f529e4d", 142 | "sha256:933df612c453928f1c6faa9236161a1d999a26cd40abf1dc5d7ebbc6dbfb8fca", 143 | "sha256:939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a", 144 | "sha256:9491578147849b93e70d7c1d23cb1229458f71fc79c51d52dce0809b2ca44eea", 145 | "sha256:9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c", 146 | "sha256:a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326", 147 | "sha256:a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0", 148 | "sha256:aa716b9122307c50686356cfb47bfbc66541868078d0c801341df31dca1232a9", 149 | "sha256:acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e", 150 | "sha256:b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544", 151 | "sha256:d22702cadb86b6fcba0e6b907d9f84a312db9cd6934ee728144ce3018e715ee1", 152 | "sha256:d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995", 153 | "sha256:d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f", 154 | "sha256:db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee", 155 | "sha256:e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004", 156 | "sha256:e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2", 157 | "sha256:f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9", 158 | "sha256:f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a", 159 | "sha256:f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f", 160 | "sha256:fe51b79da0062f8e9d49ed0182a626a7dc7a0cbca0328f612c6ee5e4711c81e4" 161 | ], 162 | "version": "==1.1.0" 163 | }, 164 | "llvmlite": { 165 | "hashes": [ 166 | "sha256:00fdf20d2f4972a00bcfaa9ce62ee55208ae5df38906a198650ddf91ba6a5fba", 167 | "sha256:1a917b1b27895d0707078028ae842b3184e617ac04014965b197212e00d8e057", 168 | "sha256:22ab2b9d7ec79fab66ac8b3d2133347de86addc2e2df1b3793e523ac84baa3c8", 169 | "sha256:312cca3af9f539a81432cd47f06a621d895c21e8cb1db2f9cfb22acd7fb69fa5", 170 | "sha256:3601a869da83e5fb1abbe55ecfcecb957f9d899179a4ef66fc4b3b82b461fc5a", 171 | "sha256:363738f3eb3c6bed65cac38f295ff81a19a74e5aeab3d02e4e3b820279d8e36a", 172 | "sha256:4e8154d1496a58157652fad1b13d817f73db80eb85b5915d49a9573a26655e38", 173 | "sha256:5593acefd5f01765ee403ea4b0288a59aff2276eb8f5241deb2e52018219a66a", 174 | "sha256:5a771ae6398c24117797e9fa0fa70def5334eeab918beaafd40718e80e5f936a", 175 | "sha256:5bcbb90807a42cd7f47d32e25e270a6886f89ae6783059ee3e1e4aa13d13f2a0", 176 | "sha256:5c676a549f44e559d7406824de83f659995b13b80c18aca4760981c9049a1a2d", 177 | "sha256:607fb3738cdc803ffcc69ef3732595f7c66c203c5a7eea35b26f89822fb2baaa", 178 | "sha256:6699b0e6637f4f8624c2dd069b7b427da582f7ca3900f080bfebb5ff556272bd", 179 | "sha256:6e4129f2d33a76c1415d2808bae58a15d5480b1cc09fd3e80fc8d0e25bf6c27e", 180 | "sha256:7e8baa6d5b4383ca7b0abe0e8968b16d4e6b1c0691cbcef18eab298e9c840b5e", 181 | "sha256:7f4bffdf6cb4f5bab4060ea57cdc11344d9b406227efb8ae1c5e4823e8d146d1", 182 | "sha256:8189c8ad167ef8a65f4bbf9cbff19efbec7f0392996eec18f5e6b274462111b7", 183 | "sha256:81fc9ac6682a41f53bca113a8f97959b92728112f91714e850a72dc6d9dd8ced", 184 | "sha256:947b81539aa751ff627626172a3bdb0ba0a92bfd1de1847f4c1ad7928ea2ea70", 185 | "sha256:94c5c625088a9ddc0fcd2953f1e7cd94038e3c90b24522a5ba10559b2dd7f563", 186 | "sha256:c842b722a38370ef5836e16cf73981da8f2f3765cead55b2d51fc87c65840f70", 187 | "sha256:c999bbf838b7b29b81e070517ec2dad7f408428e4651f779028e0a35ed6f2dea", 188 | "sha256:d2a4e62b9f703bb669bb78ca45202dfedf6a1df000866d3ed694f29a85a73f4f", 189 | "sha256:d45b733f5ac76838a20c56d19d6a3032b856c2cedf7a65ce2a4e8a45f4062f93", 190 | "sha256:ebc86e3bb85fd2ad7f88ce80507350814719195d7555a802cd628713d3c08883", 191 | "sha256:fdba22341cefbdd7b8d57cc23256b5d5ab16faa64d744d66ecae6a0af86b1f9b" 192 | ], 193 | "version": "==0.31.0" 194 | }, 195 | "matplotlib": { 196 | "hashes": [ 197 | "sha256:23b71560c721109954c0215ffc81f4c80ce8528749d534a01a61e8ab737c5bce", 198 | "sha256:4164265ca573481ce61c83322e6b33628203afeabeb3e22c50376f5d3ee0f9be", 199 | "sha256:470eed601ff5132364e0121a20d7c3d43fab969c8c333422c1b6b72fde2ed3c1", 200 | "sha256:635ded7834f43c8d999076236f7e90074d77f7b8345e5e82cd95af053cc29df1", 201 | "sha256:6a0031774c6c68298183438edf2e738856d63a4c4797876fa81d0ee337f5361c", 202 | "sha256:78d0772412c0653aa3e860c52ff08d1f5ba64334e2b86b09dc2d502657d8ca73", 203 | "sha256:8efff896c49676700dc6adace6137a854ff64a4d44ca057ff726960ffdaa47bf", 204 | "sha256:97f04d29a358826f205320fbc88d46ce5c5ff6fb54ae050042ff396beda52ca4", 205 | "sha256:b4c0010eff09ab65c77ad1a0eec6c7cccb9f6838c3c77dc5b4002fe0cf2912fd", 206 | "sha256:b5ace0531255932ad19fe64c116ada2713f7b38381db8f68df0fa694409e67d1", 207 | "sha256:c7bb7ed3e011324b56462391ec3f4bbb7c8c6af5892ebfb45d312b15b4cdfc8d", 208 | "sha256:db3121f12fb9b99f105d1413aebaeb3d943f269f3d262b45586d12765866f0c6", 209 | "sha256:db8bbba9284845034a2f0e1add91dc5e89db8c996359bdcf677a8d6f88875cf1", 210 | "sha256:f0023322c99328c40ce22678ab0ab5adfc27e338419966539398239996f63e8d" 211 | ], 212 | "version": "==3.1.3" 213 | }, 214 | "more-itertools": { 215 | "hashes": [ 216 | "sha256:5dd8bcf33e5f9513ffa06d5ad33d78f31e1931ac9a18f33d37e77a180d393a7c", 217 | "sha256:b1ddb932186d8a6ac451e1d95844b382f55e12686d51ca0c68b6f61f2ab7a507" 218 | ], 219 | "version": "==8.2.0" 220 | }, 221 | "networkx": { 222 | "hashes": [ 223 | "sha256:cdfbf698749a5014bf2ed9db4a07a5295df1d3a53bf80bf3cbd61edf9df05fa1", 224 | "sha256:f8f4ff0b6f96e4f9b16af6b84622597b5334bf9cae8cf9b2e42e7985d5c95c64" 225 | ], 226 | "version": "==2.4" 227 | }, 228 | "numba": { 229 | "hashes": [ 230 | "sha256:03944e9d267ab9e9ee9f528e97b74104d0219d775a14ef6358e1c0ef1eebe11a", 231 | "sha256:05e8f2ebb672201736c24292a7e911baf09c88bd040e634573ac110934d9e3d0", 232 | "sha256:0af9a5d32a00468942d5f2d14ec2a6ae6a8bf7b39153d9bc9813b047e106ad8d", 233 | "sha256:35ad551b84747449e9e14fece95d743ae54457effadb84db599d2caf21c69faf", 234 | "sha256:42a8105fe509970fa66c7a3c7379ef9253703db767a1411646f1d77736bde841", 235 | "sha256:47db8f276609fdf9e27b5ea0d43ee081e087fb22466f8b334a86f2dc595bf712", 236 | "sha256:581c03d740ee5706a322ee1bcd259bb86a947253435b67de3b8e216f57e846cb", 237 | "sha256:5ee714f98ae21dd20827079744b711aa0ee6cdbaaed910effe07ad69d4f6a7a5", 238 | "sha256:691fb4a96b9f66dd4c1514e20c9063607dd58873e634542eae66c804d8dc9703", 239 | "sha256:6adbe61b2756d20bd029fde665859330a39c1647ddd442e2ef2b785be3005e4b", 240 | "sha256:7d3d541652fd75f7e2cdc5184297d6d56a095e07ebdaaaac98f157de258fe8cb", 241 | "sha256:8484c60e8c96322391f3ca23646bd0db018de17966ef75e93c2f6c13e24780a2", 242 | "sha256:87126540fb1bb9ba5cf4be19893d457a97510be6c0bff5c20288f6b029824900", 243 | "sha256:930dcd2660f24000f99b4b5be632534bf7b4cf9c161bd268f35d3666aadf8a9f", 244 | "sha256:ab74bf22e4c6cc81b0289e6a450d0e33bff64cf747ad65a2a3218977fca3211d", 245 | "sha256:c046cdaaa40265af6b3f66322e89bc9898a741915919fd96c24b2773cc3d125a", 246 | "sha256:c2cbaeae60f80805290fff50175028726fae12692404a36babd3326730fbceee", 247 | "sha256:cd4afb8b146f35ad8b9f2b186ba8e0420a76022ea5cf141a5efe41dbb2aca875", 248 | "sha256:d10d4fd28f972248a97e4e69a517d74bcbec5f2ee15760545ea353ea809358f4", 249 | "sha256:d2487d38ee2ef36e5c0d2f0565e8f942451546a2aec6ce4f93c9c3f47381de51", 250 | "sha256:ff5914fa1f5558896c25dd50c155e2cc0ec3db279090964bda4f8dc9c8f1bd36" 251 | ], 252 | "index": "pypi", 253 | "version": "==0.46.0" 254 | }, 255 | "numpy": { 256 | "hashes": [ 257 | "sha256:0b0dd8f47fb177d00fa6ef2d58783c4f41ad3126b139c91dd2f7c4b3fdf5e9a5", 258 | "sha256:25ffe71f96878e1da7e014467e19e7db90ae7d4e12affbc73101bcf61785214e", 259 | "sha256:26efd7f7d755e6ca966a5c0ac5a930a87dbbaab1c51716ac26a38f42ecc9bc4b", 260 | "sha256:28b1180c758abf34a5c3fea76fcee66a87def1656724c42bb14a6f9717a5bdf7", 261 | "sha256:2e418f0a59473dac424f888dd57e85f77502a593b207809211c76e5396ae4f5c", 262 | "sha256:30c84e3a62cfcb9e3066f25226e131451312a044f1fe2040e69ce792cb7de418", 263 | "sha256:4650d94bb9c947151737ee022b934b7d9a845a7c76e476f3e460f09a0c8c6f39", 264 | "sha256:4dd830a11e8724c9c9379feed1d1be43113f8bcce55f47ea7186d3946769ce26", 265 | "sha256:4f2a2b279efde194877aff1f76cf61c68e840db242a5c7169f1ff0fd59a2b1e2", 266 | "sha256:62d22566b3e3428dfc9ec972014c38ed9a4db4f8969c78f5414012ccd80a149e", 267 | "sha256:669795516d62f38845c7033679c648903200980d68935baaa17ac5c7ae03ae0c", 268 | "sha256:75fcd60d682db3e1f8fbe2b8b0c6761937ad56d01c1dc73edf4ef2748d5b6bc4", 269 | "sha256:9395b0a41e8b7e9a284e3be7060db9d14ad80273841c952c83a5afc241d2bd98", 270 | "sha256:9e37c35fc4e9410093b04a77d11a34c64bf658565e30df7cbe882056088a91c1", 271 | "sha256:a0678793096205a4d784bd99f32803ba8100f639cf3b932dc63b21621390ea7e", 272 | "sha256:b46554ad4dafb2927f88de5a1d207398c5385edbb5c84d30b3ef187c4a3894d8", 273 | "sha256:c867eeccd934920a800f65c6068acdd6b87e80d45cd8c8beefff783b23cdc462", 274 | "sha256:dd0667f5be56fb1b570154c2c0516a528e02d50da121bbbb2cbb0b6f87f59bc2", 275 | "sha256:de2b1c20494bdf47f0160bd88ed05f5e48ae5dc336b8de7cfade71abcc95c0b9", 276 | "sha256:f1df7b2b7740dd777571c732f98adb5aad5450aee32772f1b39249c8a50386f6", 277 | "sha256:ffca69e29079f7880c5392bf675eb8b4146479d976ae1924d01cd92b04cccbcc" 278 | ], 279 | "index": "pypi", 280 | "version": "==1.17.3" 281 | }, 282 | "packaging": { 283 | "hashes": [ 284 | "sha256:170748228214b70b672c581a3dd610ee51f733018650740e98c7df862a583f73", 285 | "sha256:e665345f9eef0c621aa0bf2f8d78cf6d21904eef16a93f020240b704a57f1334" 286 | ], 287 | "version": "==20.1" 288 | }, 289 | "pillow": { 290 | "hashes": [ 291 | "sha256:0a628977ac2e01ca96aaae247ec2bd38e729631ddf2221b4b715446fd45505be", 292 | "sha256:4d9ed9a64095e031435af120d3c910148067087541131e82b3e8db302f4c8946", 293 | "sha256:54ebae163e8412aff0b9df1e88adab65788f5f5b58e625dc5c7f51eaf14a6837", 294 | "sha256:5bfef0b1cdde9f33881c913af14e43db69815c7e8df429ceda4c70a5e529210f", 295 | "sha256:5f3546ceb08089cedb9e8ff7e3f6a7042bb5b37c2a95d392fb027c3e53a2da00", 296 | "sha256:5f7ae9126d16194f114435ebb79cc536b5682002a4fa57fa7bb2cbcde65f2f4d", 297 | "sha256:62a889aeb0a79e50ecf5af272e9e3c164148f4bd9636cc6bcfa182a52c8b0533", 298 | "sha256:7406f5a9b2fd966e79e6abdaf700585a4522e98d6559ce37fc52e5c955fade0a", 299 | "sha256:8453f914f4e5a3d828281a6628cf517832abfa13ff50679a4848926dac7c0358", 300 | "sha256:87269cc6ce1e3dee11f23fa515e4249ae678dbbe2704598a51cee76c52e19cda", 301 | "sha256:875358310ed7abd5320f21dd97351d62de4929b0426cdb1eaa904b64ac36b435", 302 | "sha256:8ac6ce7ff3892e5deaab7abaec763538ffd011f74dc1801d93d3c5fc541feee2", 303 | "sha256:91b710e3353aea6fc758cdb7136d9bbdcb26b53cefe43e2cba953ac3ee1d3313", 304 | "sha256:9d2ba4ed13af381233e2d810ff3bab84ef9f18430a9b336ab69eaf3cd24299ff", 305 | "sha256:a62ec5e13e227399be73303ff301f2865bf68657d15ea50b038d25fc41097317", 306 | "sha256:ab76e5580b0ed647a8d8d2d2daee170e8e9f8aad225ede314f684e297e3643c2", 307 | "sha256:bf4003aa538af3f4205c5fac56eacaa67a6dd81e454ffd9e9f055fff9f1bc614", 308 | "sha256:bf598d2e37cf8edb1a2f26ed3fb255191f5232badea4003c16301cb94ac5bdd0", 309 | "sha256:c18f70dc27cc5d236f10e7834236aff60aadc71346a5bc1f4f83a4b3abee6386", 310 | "sha256:c5ed816632204a2fc9486d784d8e0d0ae754347aba99c811458d69fcdfd2a2f9", 311 | "sha256:dc058b7833184970d1248135b8b0ab702e6daa833be14035179f2acb78ff5636", 312 | "sha256:ff3797f2f16bf9d17d53257612da84dd0758db33935777149b3334c01ff68865" 313 | ], 314 | "version": "==7.0.0" 315 | }, 316 | "pkginfo": { 317 | "hashes": [ 318 | "sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb", 319 | "sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32" 320 | ], 321 | "version": "==1.5.0.1" 322 | }, 323 | "pluggy": { 324 | "hashes": [ 325 | "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", 326 | "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" 327 | ], 328 | "version": "==0.13.1" 329 | }, 330 | "py": { 331 | "hashes": [ 332 | "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa", 333 | "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0" 334 | ], 335 | "version": "==1.8.1" 336 | }, 337 | "pygments": { 338 | "hashes": [ 339 | "sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127", 340 | "sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297" 341 | ], 342 | "index": "pypi", 343 | "version": "==2.4.2" 344 | }, 345 | "pyparsing": { 346 | "hashes": [ 347 | "sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f", 348 | "sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec" 349 | ], 350 | "version": "==2.4.6" 351 | }, 352 | "pytest": { 353 | "hashes": [ 354 | "sha256:7e4800063ccfc306a53c461442526c5571e1462f61583506ce97e4da6a1d88c8", 355 | "sha256:ca563435f4941d0cb34767301c27bc65c510cb82e90b9ecf9cb52dc2c63caaa0" 356 | ], 357 | "index": "pypi", 358 | "version": "==5.2.1" 359 | }, 360 | "python-dateutil": { 361 | "hashes": [ 362 | "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", 363 | "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" 364 | ], 365 | "version": "==2.8.1" 366 | }, 367 | "pywavelets": { 368 | "hashes": [ 369 | "sha256:076ca8907001fdfe4205484f719d12b4a0262dfe6652fa1cfc3c5c362d14dc84", 370 | "sha256:18a51b3f9416a2ae6e9a35c4af32cf520dd7895f2b69714f4aa2f4342fca47f9", 371 | "sha256:1a64b40f6acb4ffbaccce0545d7fc641744f95351f62e4c6aaa40549326008c9", 372 | "sha256:35959c041ec014648575085a97b498eafbbaa824f86f6e4a59bfdef8a3fe6308", 373 | "sha256:55e39ec848ceec13c9fa1598253ae9dd5c31d09dfd48059462860d2b908fb224", 374 | "sha256:6162dc0ae04669ea04b4b51420777b9ea2d30b0a9d02901b2a3b4d61d159c2e9", 375 | "sha256:68b5c33741d26c827074b3d8f0251de1c3019bb9567b8d303eb093c822ce28f1", 376 | "sha256:720dbcdd3d91c6dfead79c80bf8b00a1d8aa4e5d551dc528c6d5151e4efc3403", 377 | "sha256:7947e51ca05489b85928af52a34fe67022ab5b81d4ae32a4109a99e883a0635e", 378 | "sha256:79f5b54f9dc353e5ee47f0c3f02bebd2c899d49780633aa771fed43fa20b3149", 379 | "sha256:80b924edbc012ded8aa8b91cb2fd6207fb1a9a3a377beb4049b8a07445cec6f0", 380 | "sha256:889d4c5c5205a9c90118c1980df526857929841df33e4cd1ff1eff77c6817a65", 381 | "sha256:935ff247b8b78bdf77647fee962b1cc208c51a7b229db30b9ba5f6da3e675178", 382 | "sha256:98b2669c5af842a70cfab33a7043fcb5e7535a690a00cd251b44c9be0be418e5", 383 | "sha256:9e2528823ccf5a0a1d23262dfefe5034dce89cd84e4e124dc553dfcdf63ebb92", 384 | "sha256:bc5e87b72371da87c9bebc68e54882aada9c3114e640de180f62d5da95749cd3", 385 | "sha256:be105382961745f88d8196bba5a69ee2c4455d87ad2a2e5d1eed6bd7fda4d3fd", 386 | "sha256:c06d2e340c7bf8b9ec71da2284beab8519a3908eab031f4ea126e8ccfc3fd567", 387 | "sha256:cfe79844526dd92e3ecc9490b5031fca5f8ab607e1e858feba232b1b788ff0ea", 388 | "sha256:d510aef84d9852653d079c84f2f81a82d5d09815e625f35c95714e7364570ad4", 389 | "sha256:e02a0558e0c2ac8b8bbe6a6ac18c136767ec56b96a321e0dfde2173adfa5a504" 390 | ], 391 | "version": "==1.1.1" 392 | }, 393 | "readme-renderer": { 394 | "hashes": [ 395 | "sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f", 396 | "sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d" 397 | ], 398 | "version": "==24.0" 399 | }, 400 | "requests": { 401 | "hashes": [ 402 | "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", 403 | "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" 404 | ], 405 | "version": "==2.23.0" 406 | }, 407 | "requests-toolbelt": { 408 | "hashes": [ 409 | "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f", 410 | "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0" 411 | ], 412 | "version": "==0.9.1" 413 | }, 414 | "scikit-image": { 415 | "hashes": [ 416 | "sha256:063d1c20fcd53762f82ee58c29783ae4e8f6fbed445b41b704fa33b6f355729d", 417 | "sha256:0715b7940778ba5d73da3908d60ddf2eb93863f7c394493a522fe56d3859295c", 418 | "sha256:0808ab5f8218d91a1c008036993636535a37efd67a52ab0f2e6e3f4b7e75aeda", 419 | "sha256:2a54bea469eb1b611bee1ce36e60710f5f94f29205bc5bd67a51793909b1e62b", 420 | "sha256:2aa962aa82d815606d7dad7f045f5d7ca55c65b4320d47e15a98fc92612c2d6c", 421 | "sha256:2d346d49b6852cffb47cbde995e2696d5b07f688d8c057a0a4548abf3a98f920", 422 | "sha256:3ad2efa792ab8de5fcefe6f4f5bc1ab64c411cdb5c829ce1526ab3a5a7729627", 423 | "sha256:3af3d781ce085573ced37b2b5b9abfd32ce3d4723bd17f37e829025d189b0421", 424 | "sha256:41e28db0136f29ecd305bef0408fdfc64be9d415e54f5099a95555c65f5c1865", 425 | "sha256:6786b127f33470fd843e644435522fbf43bce05c9f5527946c390ccb9e1cac27", 426 | "sha256:8b2b768b02c6b7476f2e16ddd91f827d3817aef73f82cf28bff7a8dcdfd8c55c", 427 | "sha256:a48fb0d34a090b578b87ffebab0fe035295c1945dbc2b28e1a55ea2cf6031751", 428 | "sha256:dd7fbd32da74d4e9967dc15845f731f16e7966cee61f5dc0e12e2abb1305068c", 429 | "sha256:e18d73cc8893e2268b172c29f9aab530faf8cd3b7c11ae0bee3e763d719d35c5", 430 | "sha256:e774377876cb258e8f4d63f7809863f961c98aa02263b3ff54a39483bc6f7d26" 431 | ], 432 | "index": "pypi", 433 | "version": "==0.16.2" 434 | }, 435 | "scipy": { 436 | "hashes": [ 437 | "sha256:0baa64bf42592032f6f6445a07144e355ca876b177f47ad8d0612901c9375bef", 438 | "sha256:243b04730d7223d2b844bda9500310eecc9eda0cba9ceaf0cde1839f8287dfa8", 439 | "sha256:2643cfb46d97b7797d1dbdb6f3c23fe3402904e3c90e6facfe6a9b98d808c1b5", 440 | "sha256:396eb4cdad421f846a1498299474f0a3752921229388f91f60dc3eda55a00488", 441 | "sha256:3ae3692616975d3c10aca6d574d6b4ff95568768d4525f76222fb60f142075b9", 442 | "sha256:435d19f80b4dcf67dc090cc04fde2c5c8a70b3372e64f6a9c58c5b806abfa5a8", 443 | "sha256:46a5e55850cfe02332998b3aef481d33f1efee1960fe6cfee0202c7dd6fc21ab", 444 | "sha256:75b513c462e58eeca82b22fc00f0d1875a37b12913eee9d979233349fce5c8b2", 445 | "sha256:7ccfa44a08226825126c4ef0027aa46a38c928a10f0a8a8483c80dd9f9a0ad44", 446 | "sha256:89dd6a6d329e3f693d1204d5562dd63af0fd7a17854ced17f9cbc37d5b853c8d", 447 | "sha256:a81da2fe32f4eab8b60d56ad43e44d93d392da228a77e229e59b51508a00299c", 448 | "sha256:a9d606d11eb2eec7ef893eb825017fbb6eef1e1d0b98a5b7fc11446ebeb2b9b1", 449 | "sha256:ac37eb652248e2d7cbbfd89619dce5ecfd27d657e714ed049d82f19b162e8d45", 450 | "sha256:cbc0611699e420774e945f6a4e2830f7ca2b3ee3483fca1aa659100049487dd5", 451 | "sha256:d02d813ec9958ed63b390ded463163685af6025cb2e9a226ec2c477df90c6957", 452 | "sha256:dd3b52e00f93fd1c86f2d78243dfb0d02743c94dd1d34ffea10055438e63b99d" 453 | ], 454 | "index": "pypi", 455 | "version": "==1.3.1" 456 | }, 457 | "six": { 458 | "hashes": [ 459 | "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a", 460 | "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c" 461 | ], 462 | "version": "==1.14.0" 463 | }, 464 | "soupsieve": { 465 | "hashes": [ 466 | "sha256:e914534802d7ffd233242b785229d5ba0766a7f487385e3f714446a07bf540ae", 467 | "sha256:fcd71e08c0aee99aca1b73f45478549ee7e7fc006d51b37bec9e9def7dc22b69" 468 | ], 469 | "version": "==2.0" 470 | }, 471 | "tqdm": { 472 | "hashes": [ 473 | "sha256:0d8b5afb66e23d80433102e9bd8b5c8b65d34c2a2255b2de58d97bd2ea8170fd", 474 | "sha256:f35fb121bafa030bd94e74fcfd44f3c2830039a2ddef7fc87ef1c2d205237b24" 475 | ], 476 | "version": "==4.43.0" 477 | }, 478 | "twine": { 479 | "hashes": [ 480 | "sha256:5319dd3e02ac73fcddcd94f035b9631589ab5d23e1f4699d57365199d85261e1", 481 | "sha256:9fe7091715c7576df166df8ef6654e61bada39571783f2fd415bdcba867c6993" 482 | ], 483 | "index": "pypi", 484 | "version": "==2.0.0" 485 | }, 486 | "urllib3": { 487 | "hashes": [ 488 | "sha256:2f3db8b19923a873b3e5256dc9c2dedfa883e33d87c690d9c7913e1f40673cdc", 489 | "sha256:87716c2d2a7121198ebcb7ce7cccf6ce5e9ba539041cfbaeecfb641dc0bf6acc" 490 | ], 491 | "version": "==1.25.8" 492 | }, 493 | "wcwidth": { 494 | "hashes": [ 495 | "sha256:8fd29383f539be45b20bd4df0dc29c20ba48654a41e661925e612311e9f3c603", 496 | "sha256:f28b3e8a6483e5d49e7f8949ac1a78314e740333ae305b4ba5defd3e74fb37a8" 497 | ], 498 | "version": "==0.1.8" 499 | }, 500 | "webencodings": { 501 | "hashes": [ 502 | "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", 503 | "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923" 504 | ], 505 | "version": "==0.5.1" 506 | }, 507 | "zipp": { 508 | "hashes": [ 509 | "sha256:12248a63bbdf7548f89cb4c7cda4681e537031eda29c02ea29674bc6854460c2", 510 | "sha256:7c0f8e91abc0dc07a5068f315c52cb30c66bfbc581e5b50704c8a2f6ebae794a" 511 | ], 512 | "version": "==3.0.0" 513 | } 514 | }, 515 | "develop": {} 516 | } 517 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/prideout/snowy.svg?branch=master)](https://travis-ci.org/prideout/snowy) 2 | 3 | ## Snowy 4 | 5 | This is a tiny Python 3 module for manipulating and generating images. 6 | - Simple and flat API. See the [documentation](https://prideout.net/snowy/). 7 | - Supports resize and blur with a variety of filters. 8 | - Honors a specified wrap mode for proper boundary behavior. 9 | - Supports simple OpenEXR images (never clamps colors). 10 | - Written purely in Python 3 and accelerated using [numba](https://numba.pydata.org/). 11 | - Efficiently generates gradient noise and distance fields. 12 | 13 | Snowy is somewhat similar to [pillow](https://python-pillow.org/) and 14 | [scikit-image](https://scikit-image.org/), but has a small feature set with some unique abilities. 15 | Painting and 2D paths are outside the scope of the library. See 16 | [pycairo](https://pycairo.readthedocs.io/en/latest/) or 17 | [skia-pathops](https://github.com/fonttools/skia-pathops). 18 | 19 | 95 | -------------------------------------------------------------------------------- /docs/blurry_ground2x2_bad.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/blurry_ground2x2_bad.jpg -------------------------------------------------------------------------------- /docs/blurry_ground2x2_good.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/blurry_ground2x2_good.jpg -------------------------------------------------------------------------------- /docs/blurry_ground_bad.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/blurry_ground_bad.jpg -------------------------------------------------------------------------------- /docs/blurry_ground_good.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/blurry_ground_good.jpg -------------------------------------------------------------------------------- /docs/composed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/composed.png -------------------------------------------------------------------------------- /docs/cropped-sunset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/cropped-sunset.png -------------------------------------------------------------------------------- /docs/diptych-parrot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/diptych-parrot.png -------------------------------------------------------------------------------- /docs/diptych.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/diptych.png -------------------------------------------------------------------------------- /docs/docs.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | documentation 16 | documentation 17 | 18 | -------------------------------------------------------------------------------- /docs/dropshadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/dropshadow.png -------------------------------------------------------------------------------- /docs/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from bs4 import BeautifulSoup, Comment 4 | 5 | from pygments import highlight 6 | from pygments.formatters import HtmlFormatter 7 | from pygments.lexers import PythonLexer 8 | 9 | import commonmark 10 | import inspect 11 | import numpy as np 12 | import os 13 | import pygments.styles 14 | import subprocess 15 | import sys 16 | 17 | sys.path.append('../snowy') 18 | import snowy 19 | 20 | GRAY_ISLAND = True 21 | 22 | def optimize(filename): 23 | os.system('optipng ' + filename + ' >/dev/null 2>&1') 24 | 25 | def smoothstep(edge0, edge1, x): 26 | t = np.clip((x - edge0) / (edge1 - edge0), 0.0, 1.0) 27 | return t * t * (3.0 - 2.0 * t) 28 | 29 | def create_circle(w, h, radius=0.4, cx=0.5, cy=0.5): 30 | hw, hh = 0.5 / w, 0.5 / h 31 | dp = max(hw, hh) 32 | x = np.linspace(hw, 1 - hw, w) 33 | y = np.linspace(hh, 1 - hh, h) 34 | u, v = np.meshgrid(x, y, sparse=True) 35 | d2, r2 = (u-cx)**2 + (v-cy)**2, radius**2 36 | result = 1 - smoothstep(radius-dp, radius+dp, np.sqrt(d2)) 37 | return snowy.reshape(result) 38 | 39 | def qualify(filename: str): 40 | scriptdir = os.path.dirname(os.path.realpath(__file__)) 41 | return os.path.join(scriptdir, filename) 42 | 43 | def create_wrap_figures(): 44 | ground = snowy.load(qualify('ground.jpg')) 45 | hground = np.hstack([ground, ground]) 46 | ground2x2 = np.vstack([hground, hground]) 47 | snowy.export(ground2x2, qualify('ground2x2.jpg')) 48 | 49 | ground = snowy.blur(ground, radius=14, filter=snowy.LANCZOS) 50 | snowy.export(ground, qualify('blurry_ground_bad.jpg')) 51 | hground = np.hstack([ground, ground]) 52 | ground2x2 = np.vstack([hground, hground]) 53 | snowy.export(ground2x2, qualify('blurry_ground2x2_bad.jpg')) 54 | 55 | ground = snowy.load(qualify('ground.jpg')) 56 | 57 | ground = snowy.blur(ground, radius=14, wrapx=True, wrapy=True, 58 | filter=snowy.LANCZOS) 59 | snowy.export(ground, qualify('blurry_ground_good.jpg')) 60 | hground = np.hstack([ground, ground]) 61 | ground2x2 = np.vstack([hground, hground]) 62 | snowy.export(ground2x2, qualify('blurry_ground2x2_good.jpg')) 63 | 64 | n = snowy.generate_noise(256, 512, frequency=4, seed=42, wrapx=False) 65 | n = 0.5 + 0.5 * np.sign(n) - n 66 | n = np.hstack([n, n]) 67 | n = snowy.add_border(n, width=4) 68 | snowy.export(n, qualify('tiled_noise_bad.png')) 69 | 70 | n = snowy.generate_noise(256, 512, frequency=4, seed=42, wrapx=True) 71 | n = 0.5 + 0.5 * np.sign(n) - n 72 | n = np.hstack([n, n]) 73 | n = snowy.add_border(n, width=4) 74 | snowy.export(n, qualify('tiled_noise_good.png')) 75 | 76 | c0 = create_circle(400, 200, 0.3) 77 | c1 = create_circle(400, 200, 0.08, 0.8, 0.8) 78 | circles = np.clip(c0 + c1, 0, 1) 79 | mask = circles != 0.0 80 | sdf = snowy.unitize(snowy.generate_sdf(mask, wrapx=True, wrapy=True)) 81 | sdf = np.hstack([sdf, sdf, sdf, sdf]) 82 | sdf = snowy.resize(np.vstack([sdf, sdf]), width=512) 83 | sdf = snowy.add_border(sdf) 84 | snowy.export(sdf, qualify('tiled_sdf_good.png')) 85 | 86 | sdf = snowy.unitize(snowy.generate_sdf(mask, wrapx=False, wrapy=False)) 87 | sdf = np.hstack([sdf, sdf, sdf, sdf]) 88 | sdf = snowy.resize(np.vstack([sdf, sdf]), width=512) 89 | sdf = snowy.add_border(sdf) 90 | snowy.export(sdf, qualify('tiled_sdf_bad.png')) 91 | 92 | create_wrap_figures() 93 | 94 | result = subprocess.run('git rev-parse HEAD'.split(), stdout=subprocess.PIPE) 95 | sha = result.stdout.strip().decode("utf-8")[:7] 96 | sha = f'{sha}' 97 | version = f'v0.0.9 ~ {sha}' 98 | 99 | header = ''' 100 | 101 | 102 | 105 | 111 | Snowy 112 | 113 | 114 | 115 | 116 | 118 | 119 | 120 | 121 | 122 | 124 | 125 | 127 | 129 | 202 | ''' 203 | 204 | forkme = ''' 205 | 206 | 208 | 225 | 226 | 227 | ''' 228 | 229 | def generate_page(sourcefile, resultfile, genref): 230 | 231 | # Generate html DOM from markdown. 232 | markdown = open(sourcefile).read() 233 | htmldoc = commonmark.commonmark(markdown) 234 | soup = BeautifulSoup(htmldoc, 'html.parser') 235 | 236 | # Remove comments. 237 | comments = soup.find_all(string=lambda text:isinstance(text,Comment)) 238 | for comment in comments: 239 | comment.extract() 240 | 241 | # All h4 sections are actually asides. 242 | admonitions = soup.findAll("h4") 243 | for admonition in admonitions: 244 | p = admonition.find_next_sibling("p") 245 | p['class'] = 'aside' 246 | admonition.extract() 247 | 248 | # Colorize the code blocks. 249 | formatter = HtmlFormatter(style='tango') 250 | snippets = soup.findAll("code", {"class": "language-python"}) 251 | for snippet in snippets: 252 | code = snippet.contents[0] 253 | highlighted = highlight(code, PythonLexer(), formatter) 254 | newcode = BeautifulSoup(highlighted, 'html.parser') 255 | snippet.parent.replace_with(newcode) 256 | 257 | # Generate the HTML in its initial form, including ') 267 | htmlfile.write('
\n') 268 | htmlfile.write(forkme) 269 | htmlfile.write(str(soup)) 270 | 271 | # Generate quickref. 272 | quickref = '' 273 | if genref: 274 | for member in inspect.getmembers(snowy): 275 | name, value = member 276 | if name.startswith('__'): 277 | continue 278 | if not inspect.isfunction(value): 279 | continue 280 | module = inspect.getmodule(value) 281 | if not module.__name__.startswith('snowy'): 282 | continue 283 | deprecated = name == 'save' 284 | if deprecated: 285 | continue 286 | lname = name.lower() 287 | doc = inspect.getdoc(value) 288 | src = inspect.getsource(value) 289 | dsbegin = src.find(r'"""') 290 | dsend = src.rfind(r'"""') + 4 291 | dsbegin = src[:dsbegin].rfind('\n') + 1 292 | src = src[:dsbegin] + src[dsend:] 293 | nlines = len(src.split('\n')) 294 | highlighted_src = highlight(src, PythonLexer(), formatter) 295 | if doc: 296 | doclines = doc.split('\n') 297 | quickref += '\n' 298 | quickref += f'{name}\n' 299 | quickref += f'{doclines[0]}\n' 300 | quickref += '\n' 301 | htmlfile.write(f'

{name}

\n

\n') 302 | htmlfile.write(' '.join(doclines)) 303 | htmlfile.write('\n

\n') 304 | htmlfile.write(highlighted_src) 305 | htmlfile.write('
\n') 306 | htmlfile.close() 307 | 308 | # Post process HTML by adding anchors, etc. 309 | htmldoc = open(resultfile).read() 310 | htmldoc = htmldoc.replace('$quickref$', quickref) 311 | htmldoc = htmldoc.replace('

', version + '\n

') 312 | soup = BeautifulSoup(htmldoc, 'html.parser') 313 | for tag in 'h2 h3 h4'.split(): 314 | headings = soup.find_all(tag) 315 | for heading in headings: 316 | content = heading.contents[0].strip() 317 | id = content.replace(' ', '_').lower() 318 | heading["id"] = id 319 | anchor = soup.new_tag('a', href='#' + id) 320 | anchor.string = content 321 | heading.contents[0].replace_with(anchor) 322 | open(resultfile, 'w').write(str(soup)) 323 | 324 | generate_page(qualify('index.md'), qualify('index.html'), False) 325 | generate_page(qualify('reference.md'), qualify('reference.html'), True) 326 | 327 | # Test rotations and flips 328 | 329 | gibbons = snowy.load(qualify('gibbons.jpg')) 330 | gibbons = snowy.resize(gibbons, width=gibbons.shape[1] // 5) 331 | gibbons90 = snowy.rotate(gibbons, 90) 332 | gibbons180 = snowy.rotate(gibbons, 180) 333 | gibbons270 = snowy.rotate(gibbons, 270) 334 | hflipped = snowy.hflip(gibbons) 335 | vflipped = snowy.vflip(gibbons) 336 | snowy.export(snowy.hstack([gibbons, gibbons180, vflipped], 337 | border_width=4, border_value=[0.5,0,0]), qualify("xforms.png")) 338 | 339 | # Test noise generation 340 | 341 | n = snowy.generate_noise(100, 100, frequency=4, seed=42, wrapx=True) 342 | n = np.hstack([n, n]) 343 | n = 0.5 + 0.5 * n 344 | snowy.show(n) 345 | snowy.export(n, qualify('noise.png')) 346 | 347 | # First try minifying grayscale 348 | 349 | gibbons = snowy.load(qualify('snowy.jpg')) 350 | gibbons = np.swapaxes(gibbons, 0, 2) 351 | gibbons = np.swapaxes(gibbons[0], 0, 1) 352 | gibbons = snowy.reshape(gibbons) 353 | source = snowy.resize(gibbons, height=200) 354 | blurry = snowy.blur(source, radius=4.0) 355 | diptych_filename = qualify('diptych.png') 356 | snowy.export(snowy.hstack([source, blurry]), diptych_filename) 357 | optimize(diptych_filename) 358 | snowy.show(diptych_filename) 359 | 360 | # Next try color 361 | 362 | gibbons = snowy.load(qualify('snowy.jpg')) 363 | source = snowy.resize(gibbons, height=200) 364 | blurry = snowy.blur(source, radius=4.0) 365 | diptych_filename = qualify('diptych.png') 366 | snowy.export(snowy.hstack([source, blurry]), diptych_filename) 367 | optimize(diptych_filename) 368 | snowy.show(diptych_filename) 369 | 370 | # Moving on to magnification... 371 | 372 | parrot = snowy.load(qualify('parrot.png')) 373 | scale = 6 374 | nearest = snowy.resize(parrot, width=32*scale, filter=snowy.NEAREST) 375 | mitchell = snowy.resize(parrot, height=26*scale) 376 | diptych_filename = qualify('diptych-parrot.png') 377 | parrot = snowy.hstack([nearest, mitchell]) 378 | parrot = snowy.extract_rgb(parrot) 379 | snowy.export(parrot, diptych_filename) 380 | optimize(diptych_filename) 381 | snowy.show(diptych_filename) 382 | 383 | # EXR cropping 384 | 385 | sunset = snowy.load(qualify('small.exr'), False) 386 | sunset = sunset[:100,:,:] / 50.0 387 | cropped_filename = qualify('cropped-sunset.png') 388 | snowy.export(sunset, cropped_filename) 389 | optimize(cropped_filename) 390 | snowy.show(cropped_filename) 391 | 392 | # Alpha composition 393 | 394 | icon = snowy.load(qualify('snowflake.png')) 395 | icon = snowy.resize(icon, height=100) 396 | sunset[:100,200:300] = snowy.compose(sunset[:100,200:300], icon) 397 | snowy.export(sunset, qualify('composed.png')) 398 | optimize(qualify('composed.png')) 399 | snowy.show(sunset) 400 | 401 | # Drop shadows 402 | 403 | shadow = np.zeros([150, 150, 4]) 404 | shadow[25:-25,25:-25,:] = icon 405 | 406 | white = shadow.copy() 407 | white[:,:,:3] = 1.0 - white[:,:,:3] 408 | 409 | shadow = snowy.blur(shadow, radius=10.0) 410 | shadow = snowy.compose(shadow, shadow) 411 | shadow = snowy.compose(shadow, shadow) 412 | shadow = snowy.compose(shadow, shadow) 413 | 414 | dropshadow = snowy.compose(shadow, white) 415 | snowy.export(dropshadow, qualify('dropshadow.png')) 416 | optimize(qualify('dropshadow.png')) 417 | 418 | STEPPED_PALETTE = [ 419 | 000, 0x203060 , 420 | 64, 0x2C316F , 421 | 125, 0x2C316F , 422 | 125, 0x46769D , 423 | 126, 0x46769D , 424 | 127, 0x324060 , 425 | 131, 0x324060 , 426 | 132, 0x9C907D , 427 | 137, 0x9C907D , 428 | 137, 0x719457 , 429 | 170, 0x719457 , # Light green 430 | 170, 0x50735A , 431 | 180, 0x50735A , 432 | 180, 0x9FA881 , 433 | 200, 0x9FA881 , 434 | 250, 0xFFFFFF , 435 | 255, 0xFFFFFF 436 | ] 437 | 438 | SMOOTH_PALETTE = [ 439 | 000, 0x203060 , # Dark Blue 440 | 126, 0x2C316F , # Light Blue 441 | 127, 0xE0F0A0 , # Yellow 442 | 128, 0x719457 , # Dark Green 443 | 200, 0xFFFFFF , # White 444 | 255, 0xFFFFFF ] # White 445 | 446 | from scipy import interpolate 447 | 448 | def applyColorGradient(elevation_image, gradient_image): 449 | xvals = np.arange(256) 450 | yvals = gradient_image[0] 451 | apply_lut = interpolate.interp1d(xvals, yvals, axis=0) 452 | return apply_lut(snowy.unshape(np.clip(elevation_image, 0, 255))) 453 | 454 | def create_falloff(w, h, radius=0.4, cx=0.5, cy=0.5): 455 | hw, hh = 0.5 / w, 0.5 / h 456 | x = np.linspace(hw, 1 - hw, w) 457 | y = np.linspace(hh, 1 - hh, h) 458 | u, v = np.meshgrid(x, y, sparse=True) 459 | d2 = (u-cx)**2 + (v-cy)**2 460 | return 1-snowy.unitize(snowy.reshape(d2)) 461 | 462 | c0 = create_circle(200, 200, 0.3) 463 | c1 = create_circle(200, 200, 0.08, 0.8, 0.8) 464 | c0 = np.clip(c0 + c1, 0, 1) 465 | circles = snowy.add_border(c0, value=1) 466 | sdf = snowy.unitize(snowy.generate_sdf(circles != 0.0)) 467 | stack = snowy.hstack([circles, sdf]) 468 | snowy.export(stack, qualify('sdf.png')) 469 | snowy.show(stack) 470 | 471 | # Islands 472 | def create_island(seed, gradient, freq=3.5): 473 | w, h = 750, 512 474 | falloff = create_falloff(w, h) 475 | n1 = 1.000 * snowy.generate_noise(w, h, freq*1, seed+0) 476 | n2 = 0.500 * snowy.generate_noise(w, h, freq*2, seed+1) 477 | n3 = 0.250 * snowy.generate_noise(w, h, freq*4, seed+2) 478 | n4 = 0.125 * snowy.generate_noise(w, h, freq*8, seed+3) 479 | elevation = falloff * (falloff / 2 + n1 + n2 + n3 + n4) 480 | mask = elevation < 0.4 481 | elevation = snowy.unitize(snowy.generate_sdf(mask)) 482 | if GRAY_ISLAND: 483 | return (1 - mask) * np.power(elevation, 3.0) 484 | elevation = snowy.generate_sdf(mask) - 100 * n4 485 | mask = np.where(elevation < 0, 1, 0) 486 | el = 128 + 127 * elevation / np.amax(elevation) 487 | return applyColorGradient(el, gradient) 488 | 489 | def createColorGradient(pal): 490 | inds = pal[0::2] 491 | cols = np.array(pal[1::2]) 492 | red, grn, blu = cols >> 16, cols >> 8, cols 493 | cols = [c & 0xff for c in [red, grn, blu]] 494 | cols = [interpolate.interp1d(inds, c) for c in cols] 495 | img = np.arange(0, 255) 496 | img = np.dstack([fn(img) for fn in cols]) 497 | return snowy.resize(img, 256, 32) 498 | 499 | gradient = createColorGradient(STEPPED_PALETTE) 500 | snowy.export(snowy.add_border(gradient), qualify('gradient.png')) 501 | isles = [] 502 | for i in range(6): 503 | isle = create_island(i * 5, gradient) 504 | isle = snowy.resize(isle, width=isle.shape[1] // 3) 505 | isles.append(isle) 506 | snowy.export(isles[2], qualify('island.png')) 507 | optimize(qualify('island.png')) 508 | isles = snowy.hstack(isles) 509 | snowy.export(isles, qualify('isles.png')) 510 | 511 | def draw_quad(): 512 | verts = np.array([[-0.67608007, 0.38439575, 3.70544936, 0., 0. ], 513 | [-0.10726266, 0.38439575, 2.57742041, 1., 0. ], 514 | [-0.10726266, -0.96069041, 2.57742041, 1., 1. ], 515 | [-0.67608007, -0.96069041, 3.70544936, 0., 1. ]]) 516 | texture = snowy.load(qualify('../tests/texture.png')) 517 | target = np.full((1080, 1920, 4), (0.54, 0.54, 0.78, 1.00), 518 | dtype=np.float32) 519 | snowy.draw_polygon(target, texture, verts) 520 | target = snowy.resize(target[400:770, 700:1000], height = 256) 521 | texture = snowy.resize(texture, height = 256) 522 | quad = snowy.hstack([texture, target]) 523 | snowy.export(quad, qualify('quad.png')) 524 | snowy.show(quad) 525 | 526 | draw_quad() 527 | -------------------------------------------------------------------------------- /docs/gibbons.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/gibbons.jpg -------------------------------------------------------------------------------- /docs/gradient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/gradient.png -------------------------------------------------------------------------------- /docs/ground.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/ground.jpg -------------------------------------------------------------------------------- /docs/ground2x2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/ground2x2.jpg -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | 13 | Snowy 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 98 |
178 | 179 | 180 | 194 | 195 | v0.0.9 ~ f0a51f7 196 |

Snowy

197 |

User's Guide | API Reference

198 |

Snowy is a tiny module for manipulating and generating floating-point images.

199 | 205 |

Snowy does not define a special class for images. Instead, images are always three-dimensional 206 | numpy arrays in row-major order.

207 |

For example, RGB images have shape [height,width,3] and grayscale images have shape 208 | [height,width,1]. Snowy provides some utility functions that make it easy to work with other 209 | modules (see interop).

210 |

Snowy is not an Image IO library, but for convenience it provides load and 211 | export functions that have limited support for PNG, EXR, and JPEG.

212 |

If you're interested in tone mapping and other HDR operations, check out the 213 | hydra module. If you wish to simply load / store raw 214 | floating-point data, consider using npy files instead of image files. The relevant functions are 215 | numpy.load and 216 | numpy.save.

217 |

Installing

218 |

To install and update snowy, do this:

219 |

pip3 install -U snowy

220 |

Examples

221 |

Resize and blur

222 |

This snippet does a resize, then a blur, then horizontally concatenates the two 223 | images.

224 |
import snowy
225 | 
226 | source = snowy.load('poodle.png')
227 | source = snowy.resize(source, height=200)
228 | blurry = snowy.blur(source, radius=4.0)
229 | snowy.export(snowy.hstack([source, blurry]), 'diptych.png')
230 | 
231 | 232 |

The next snippet first magnifies an image using a nearest-neighbor filter, then using the default 233 | Mitchell filter.

234 |
parrot = snowy.load('parrot.png')
235 | height, width = parrot.shape[:2]
236 | nearest = snowy.resize(parrot, width * 6, filter=snowy.NEAREST) 
237 | mitchell = snowy.resize(parrot, width * 6)
238 | snowy.show(snowy.hstack([nearest, mitchell]))
239 | 
240 | 241 |

Rotate and flip

242 |
gibbons = snowy.load('gibbons.jpg')
243 | rotated = snowy.rotate(gibbons, 180)
244 | flipped = snowy.vflip(gibbons)
245 | triptych = snowy.hstack([gibbons, rotated, flipped],
246 |     border_width=4, border_value=[0.5,0,0])
247 | 
248 | 249 |

Cropping

250 |

If you need to crop an image, just use numpy slicing.

251 |

For example, this loads an OpenEXR image then crops out the top half:

252 |
sunrise = snowy.load('sunrise.exr')
253 | cropped_sunrise = sunrise[:100,:,:]
254 | snowy.show(cropped_sunrise / 50.0) # darken the image
255 | 
256 | 257 |

Alpha composition

258 |

To copy a section of one image into another, simply use numpy slicing.

259 |

However, to achieve "source-over" style alpha blending, using raw numpy math would be cumbersome. 260 | Snowy provides compose to make this easier:

261 |
icon = snowy.load('snowflake.png')
262 | icon = snowy.resize(snowflake, height=100)
263 | sunset[:100,200:300] = snowy.compose(sunset[:100,200:300], icon)
264 | snowy.show(sunset)
265 | 
266 | 267 |

Drop shadows

268 |

Combining operations like blur and compose can be 269 | used to create a drop shadow:

270 |
# Extend the 100x100 snowflake image on 4 sides to give room for blur.
271 | shadow = np.zeros([150, 150, 4])
272 | shadow[25:-25,25:-25,:] = icon
273 | 
274 | # Invert the colors but not the alpha.
275 | white = shadow.copy()
276 | white[:,:,:3] = 1.0 - white[:,:,:3]
277 | 
278 | # Blur the shadow, then "strengthen" it.
279 | shadow = snowy.blur(shadow, radius=10.0)
280 | shadow = snowy.compose(shadow, shadow)
281 | shadow = snowy.compose(shadow, shadow)
282 | shadow = snowy.compose(shadow, shadow)
283 | 
284 | # Compose the white flake onto its shadow.
285 | dropshadow = snowy.compose(shadow, white)
286 | 
287 | 288 |

Gradient noise

289 |

Snowy's generate_noise function generates a single-channel image whose values are 290 | in [-1, +1]. Here we create a square noise image that can be tiled horizontally:

291 |
n = snowy.generate_noise(100, 100, frequency=4, seed=42, wrapx=True)
292 | n = np.hstack([n, n])
293 | snowy.show(0.5 + 0.5 * n)
294 | 
295 | 296 |

If you're interested in other types of noise, or if you need a super-fast noise generator, you might 297 | want to try pyfastnoisesimd.

298 |

Distance fields

299 |

This example uses generate_sdf to create a signed distance field from a monochrome picture of two circles 300 | enclosed by a square. Note the usage of unitize to adjust the values into the [0,1] range.

301 |
circles = snowy.load('circles.png')
302 | sdf = snowy.unitize(snowy.generate_sdf(circles != 0.0))
303 | snowy.show(snowy.hstack([circles, sdf]))
304 | 
305 | 306 |

Image generation

307 |

Combining Snowy's unique features with numpy can be used to create interesting procedural images. 308 | The following example creates an elevation map for an imaginary island.

309 |
def create_falloff(w, h, radius=0.4, cx=0.5, cy=0.5):
310 |     hw, hh = 0.5 / w, 0.5 / h
311 |     x = np.linspace(hw, 1 - hw, w)
312 |     y = np.linspace(hh, 1 - hh, h)
313 |     u, v = np.meshgrid(x, y, sparse=True)
314 |     d2 = (u-cx)**2 + (v-cy)**2
315 |     return 1-snowy.unitize(snowy.reshape(d2))
316 | 
317 | def create_island(seed, freq=3.5):
318 |     w, h = 750, 512
319 |     falloff = create_falloff(w, h)
320 |     n1 = 1.000 * snowy.generate_noise(w, h, freq*1, seed+0)
321 |     n2 = 0.500 * snowy.generate_noise(w, h, freq*2, seed+1)
322 |     n3 = 0.250 * snowy.generate_noise(w, h, freq*4, seed+2)
323 |     n4 = 0.125 * snowy.generate_noise(w, h, freq*8, seed+3)
324 |     elevation = falloff * (falloff / 2 + n1 + n2 + n3 + n4)
325 |     mask = elevation < 0.4
326 |     elevation = snowy.unitize(snowy.generate_sdf(mask))
327 |     return (1 - mask) * np.power(elevation, 3.0)
328 | 
329 | snowy.export(create_island(10), 'island.png')
330 | 
331 |

332 |

Snowy also offers the compute_skylight and 333 | compute_normals functions to help with 3D rendering. These 334 | functions were used to create the following images.

335 |

336 |

The first panel shows ambient occlusion generated by compute_skylight, the second panel shows 337 | the normal map generated by compute_normals, the right two panels use numpy to add diffuse 338 | lighting and a color gradient. The code for this is in 339 | test_lighting.py.

340 |

Wrap modes

341 |

Snowy's blur, resize, 342 | generate_noise, and generate_sdf 343 | functions all take wrapx and wrapy arguments, both of which default to False. These arguments 344 | tell Snowy how to sample from outside the boundaries of the source image or noise function.

345 |

To help understand these arguments, consider this tileable image and its 2x2 tiling:

346 | 347 |
348 | 349 |

Next, let's try blurring the tile naively:

350 | 351 |

See the seams? Now let's blur it with wrapx and wrapy set to True when we call 352 | blur:

353 | 354 |

Wrappable gradient noise

355 |

The wrap arguments are also useful for 2D noise. One way of making tileable gradient noise is to 356 | sample 3D noise on the surface of a cylinder, torus, or cube. However Snowy can do this more 357 | efficiently by generating 2D noise with modulus arithmetic.

358 |

Here we created a 128x256 tile using generate_noise without the 359 | wrapx argument, then horizontally tiled it twice:

360 | 361 |

Here's another tiling of gradient noise, but this time the tile was generated with wrapx set to 362 | True:

363 | 364 |

Wrappable distance fields

365 |

Snowy's generate_sdf function also takes wrap arguments. For example 366 | here's a distance field in a 4x2 tiling:

367 | 368 |

Here's the same distance field, this time with wrapx and wrapy set to True:

369 | 370 |

Drawing quads

371 |

Snowy can also rasterize convex textured polygons. For example:

372 |
verts = np.array([
373 |     [-0.67,  0.38,  3.70,  0., 0. ],
374 |     [-0.10,  0.38,  2.57,  1., 0. ],
375 |     [-0.10, -0.96,  2.57,  1., 1. ],
376 |     [-0.67, -0.96,  3.70,  0., 1. ]])
377 | texture = snowy.load('texture.png')
378 | background = np.full((1080, 1920, 4), (0.54, 0.54, 0.78, 1.00))
379 | snowy.draw_polygon(background, texture, verts)
380 | snowy.show(background)
381 | 
382 | 383 |

Each vertex is specified as a 5-tuple to enable perspective-correct 384 | interpolation. For more information see 385 | draw_polygon.

386 |

Interop

387 |

Snowy's algorithms require images to be row-major three-dimensional float64 numpy arrays, with 388 | color channels living in the trailing dimension. If you're working with another module that does not 389 | follow this convention, consider using one of the following interop functions.

390 | 400 |
401 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Snowy 2 | 3 | **User's Guide** | [API Reference](reference.html) 4 | 5 | Snowy is a tiny module for manipulating and generating floating-point images. 6 | 7 | - Small and flat API (free functions only). 8 | - Written purely in Python 3. 9 | - Accelerated with [numba](https://numba.pydata.org/). 10 | - Configurable boundaries ([wrap modes](#wrap_modes)). 11 | 12 | Snowy does not define a special class for images. Instead, **images are always three-dimensional 13 | numpy arrays** in row-major order. 14 | 15 | #### aside 16 | 17 | For example, RGB images have shape `[height,width,3]` and grayscale images have shape 18 | `[height,width,1]`. Snowy provides some utility functions that make it easy to work with other 19 | modules (see [interop](#interop)). 20 | 21 | Snowy is not an Image IO library, but for convenience it provides [load](reference.html#load) and 22 | [export](reference.html#export) functions that have limited support for PNG, EXR, and JPEG. 23 | 24 | #### aside 25 | 26 | If you're interested in tone mapping and other HDR operations, check out the 27 | [hydra](https://github.com/tatsy/hydra) module. If you wish to simply load / store raw 28 | floating-point data, consider using npy files instead of image files. The relevant functions are 29 | [numpy.load](https://docs.scipy.org/doc/numpy/reference/generated/numpy.load.html) and 30 | [numpy.save](https://docs.scipy.org/doc/numpy/reference/generated/numpy.save.html). 31 | 32 | ## Installing 33 | 34 | To install and update snowy, do this: 35 | 36 | `pip3 install -U snowy` 37 | 38 | ## Examples 39 | 40 | ### Resize and blur 41 | 42 | This snippet does a resize, then a blur, then horizontally concatenates the two 43 | images. 44 | 45 | ```python 46 | import snowy 47 | 48 | source = snowy.load('poodle.png') 49 | source = snowy.resize(source, height=200) 50 | blurry = snowy.blur(source, radius=4.0) 51 | snowy.export(snowy.hstack([source, blurry]), 'diptych.png') 52 | ``` 53 | 54 | 55 | 56 | The next snippet first magnifies an image using a nearest-neighbor filter, then using the default 57 | Mitchell filter. 58 | 59 | ```python 60 | parrot = snowy.load('parrot.png') 61 | height, width = parrot.shape[:2] 62 | nearest = snowy.resize(parrot, width * 6, filter=snowy.NEAREST) 63 | mitchell = snowy.resize(parrot, width * 6) 64 | snowy.show(snowy.hstack([nearest, mitchell])) 65 | ``` 66 | 67 | 68 | 69 | ### Rotate and flip 70 | 71 | ```python 72 | gibbons = snowy.load('gibbons.jpg') 73 | rotated = snowy.rotate(gibbons, 180) 74 | flipped = snowy.vflip(gibbons) 75 | triptych = snowy.hstack([gibbons, rotated, flipped], 76 | border_width=4, border_value=[0.5,0,0]) 77 | ``` 78 | 79 | 80 | 81 | ### Cropping 82 | 83 | If you need to crop an image, just use numpy slicing. 84 | 85 | For example, this loads an OpenEXR image then crops out the top half: 86 | 87 | ```python 88 | sunrise = snowy.load('sunrise.exr') 89 | cropped_sunrise = sunrise[:100,:,:] 90 | snowy.show(cropped_sunrise / 50.0) # darken the image 91 | ``` 92 | 93 | 94 | 95 | ### Alpha composition 96 | 97 | To copy a section of one image into another, simply use numpy slicing. 98 | 99 | However, to achieve "source-over" style alpha blending, using raw numpy math would be cumbersome. 100 | Snowy provides [compose](reference.html#compose) to make this easier: 101 | 102 | ```python 103 | icon = snowy.load('snowflake.png') 104 | icon = snowy.resize(snowflake, height=100) 105 | sunset[:100,200:300] = snowy.compose(sunset[:100,200:300], icon) 106 | snowy.show(sunset) 107 | ``` 108 | 109 | 110 | 111 | ### Drop shadows 112 | 113 | Combining operations like [blur](reference.html#blur) and [compose](reference.html#compose) can be 114 | used to create a drop shadow: 115 | 116 | ```python 117 | # Extend the 100x100 snowflake image on 4 sides to give room for blur. 118 | shadow = np.zeros([150, 150, 4]) 119 | shadow[25:-25,25:-25,:] = icon 120 | 121 | # Invert the colors but not the alpha. 122 | white = shadow.copy() 123 | white[:,:,:3] = 1.0 - white[:,:,:3] 124 | 125 | # Blur the shadow, then "strengthen" it. 126 | shadow = snowy.blur(shadow, radius=10.0) 127 | shadow = snowy.compose(shadow, shadow) 128 | shadow = snowy.compose(shadow, shadow) 129 | shadow = snowy.compose(shadow, shadow) 130 | 131 | # Compose the white flake onto its shadow. 132 | dropshadow = snowy.compose(shadow, white) 133 | ``` 134 | 135 | 136 | 137 | ### Gradient noise 138 | 139 | Snowy's `generate_noise` function generates a single-channel image whose values are 140 | in [-1, +1]. Here we create a square noise image that can be tiled horizontally: 141 | 142 | ```python 143 | n = snowy.generate_noise(100, 100, frequency=4, seed=42, wrapx=True) 144 | n = np.hstack([n, n]) 145 | snowy.show(0.5 + 0.5 * n) 146 | ``` 147 | 148 | 149 | 150 | #### aside 151 | 152 | If you're interested in other types of noise, or if you need a super-fast noise generator, you might 153 | want to try [pyfastnoisesimd](https://github.com/robbmcleod/pyfastnoisesimd). 154 | 155 | ### Distance fields 156 | 157 | This example uses `generate_sdf` to create a signed distance field from a monochrome picture of two circles 158 | enclosed by a square. Note the usage of `unitize` to adjust the values into the `[0,1]` range. 159 | 160 | ```python 161 | circles = snowy.load('circles.png') 162 | sdf = snowy.unitize(snowy.generate_sdf(circles != 0.0)) 163 | snowy.show(snowy.hstack([circles, sdf])) 164 | ``` 165 | 166 | 167 | 168 | ### Image generation 169 | 170 | Combining Snowy's unique features with numpy can be used to create interesting procedural images. 171 | The following example creates an elevation map for an imaginary island. 172 | 173 | ```python 174 | def create_falloff(w, h, radius=0.4, cx=0.5, cy=0.5): 175 | hw, hh = 0.5 / w, 0.5 / h 176 | x = np.linspace(hw, 1 - hw, w) 177 | y = np.linspace(hh, 1 - hh, h) 178 | u, v = np.meshgrid(x, y, sparse=True) 179 | d2 = (u-cx)**2 + (v-cy)**2 180 | return 1-snowy.unitize(snowy.reshape(d2)) 181 | 182 | def create_island(seed, freq=3.5): 183 | w, h = 750, 512 184 | falloff = create_falloff(w, h) 185 | n1 = 1.000 * snowy.generate_noise(w, h, freq*1, seed+0) 186 | n2 = 0.500 * snowy.generate_noise(w, h, freq*2, seed+1) 187 | n3 = 0.250 * snowy.generate_noise(w, h, freq*4, seed+2) 188 | n4 = 0.125 * snowy.generate_noise(w, h, freq*8, seed+3) 189 | elevation = falloff * (falloff / 2 + n1 + n2 + n3 + n4) 190 | mask = elevation < 0.4 191 | elevation = snowy.unitize(snowy.generate_sdf(mask)) 192 | return (1 - mask) * np.power(elevation, 3.0) 193 | 194 | snowy.export(create_island(10), 'island.png') 195 | ``` 196 | 197 | ![](island.png) 198 | 199 | Snowy also offers the [compute_skylight](reference.html#compute_skylight) and 200 | [compute_normals](reference.html#compute_normals) functions to help with 3D rendering. These 201 | functions were used to create the following images. 202 | 203 | ![](island_strip.png) 204 | 205 | The first panel shows ambient occlusion generated by `compute_skylight`, the second panel shows 206 | the normal map generated by `compute_normals`, the right two panels use numpy to add diffuse 207 | lighting and a color gradient. The code for this is in 208 | [test_lighting.py](https://github.com/prideout/snowy/blob/master/tests/test_lighting.py). 209 | 210 | ## Wrap modes 211 | 212 | Snowy's [blur](reference.html#blur), [resize](reference.html#resize), 213 | [generate_noise](reference.html#generate_noise), and [generate_sdf](reference.html#generate_sdf) 214 | functions all take `wrapx` and `wrapy` arguments, both of which default to `False`. These arguments 215 | tell Snowy how to sample from outside the boundaries of the source image or noise function. 216 | 217 | To help understand these arguments, consider this tileable image and its 2x2 tiling: 218 | 219 | 220 |
221 | 222 | 223 | Next, let's try blurring the tile naively: 224 | 225 | 226 | 227 | See the seams? Now let's blur it with `wrapx` and `wrapy` set to `True` when we call 228 | [blur](reference.html#blur): 229 | 230 | 231 | 232 | ### Wrappable gradient noise 233 | 234 | The wrap arguments are also useful for 2D noise. One way of making tileable gradient noise is to 235 | sample 3D noise on the surface of a cylinder, torus, or cube. However Snowy can do this more 236 | efficiently by generating 2D noise with modulus arithmetic. 237 | 238 | Here we created a 128x256 tile using [generate_noise](reference.html#generate_noise) without the 239 | `wrapx` argument, then horizontally tiled it twice: 240 | 241 | 242 | 243 | Here's another tiling of gradient noise, but this time the tile was generated with `wrapx` set to 244 | `True`: 245 | 246 | 247 | 248 | ### Wrappable distance fields 249 | 250 | Snowy's [generate_sdf](reference.html#generate_sdf) function also takes wrap arguments. For example 251 | here's a distance field in a 4x2 tiling: 252 | 253 | 254 | 255 | Here's the same distance field, this time with wrapx and wrapy set to `True`: 256 | 257 | 258 | 259 | ## Drawing quads 260 | 261 | Snowy can also rasterize convex textured polygons. For example: 262 | 263 | ```python 264 | verts = np.array([ 265 | [-0.67, 0.38, 3.70, 0., 0. ], 266 | [-0.10, 0.38, 2.57, 1., 0. ], 267 | [-0.10, -0.96, 2.57, 1., 1. ], 268 | [-0.67, -0.96, 3.70, 0., 1. ]]) 269 | texture = snowy.load('texture.png') 270 | background = np.full((1080, 1920, 4), (0.54, 0.54, 0.78, 1.00)) 271 | snowy.draw_polygon(background, texture, verts) 272 | snowy.show(background) 273 | ``` 274 | 275 | 276 | 277 | Each vertex is specified as a 5-tuple to enable perspective-correct 278 | interpolation. For more information see 279 | [draw_polygon](reference.html#draw_polygon). 280 | 281 | ## Interop 282 | 283 | Snowy's algorithms require images to be row-major three-dimensional `float64` numpy arrays, with 284 | color channels living in the trailing dimension. If you're working with another module that does not 285 | follow this convention, consider using one of the following interop functions. 286 | 287 | - To add or remove the trailing 1 from the shape of grayscale images, use 288 | [reshape](reference.html#reshape) and [unshape](reference.html#unshape). 289 | - To swap color channels in or out of the leading dimension, use 290 | [to_planar](reference.html#to_planar) and [from_planar](reference.html#from_planar). 291 | - To cast between `float64` and other types, just use numpy. For example, 292 | `np.uint8(myimg * 255)` or `np.float64(myimg) / 255`. 293 | - To swap rows with columns, use numpy's 294 | [swapaxes function](https://docs.scipy.org/doc/numpy/reference/generated/numpy.swapaxes.html). 295 | -------------------------------------------------------------------------------- /docs/island.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/island.png -------------------------------------------------------------------------------- /docs/island_strip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/island_strip.png -------------------------------------------------------------------------------- /docs/isles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/isles.png -------------------------------------------------------------------------------- /docs/noise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/noise.png -------------------------------------------------------------------------------- /docs/parrot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/parrot.png -------------------------------------------------------------------------------- /docs/quad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/quad.png -------------------------------------------------------------------------------- /docs/reference.md: -------------------------------------------------------------------------------- 1 | # Snowy 2 | 3 | [User's Guide](index.html) | **API Reference** 4 | 5 | --- 6 | 7 | 8 | $quickref$ 9 |
10 | 11 | --- 12 | -------------------------------------------------------------------------------- /docs/sdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/sdf.png -------------------------------------------------------------------------------- /docs/small.exr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/small.exr -------------------------------------------------------------------------------- /docs/snowflake.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/snowflake.png -------------------------------------------------------------------------------- /docs/snowflake64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/snowflake64.png -------------------------------------------------------------------------------- /docs/snowy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/snowy.jpg -------------------------------------------------------------------------------- /docs/snowy2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/snowy2.png -------------------------------------------------------------------------------- /docs/tiled_noise_bad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/tiled_noise_bad.png -------------------------------------------------------------------------------- /docs/tiled_noise_good.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/tiled_noise_good.png -------------------------------------------------------------------------------- /docs/tiled_sdf_bad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/tiled_sdf_bad.png -------------------------------------------------------------------------------- /docs/tiled_sdf_good.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/tiled_sdf_good.png -------------------------------------------------------------------------------- /docs/xforms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/docs/xforms.png -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="snowy", 8 | version="0.0.9", 9 | author="Philip Rideout", 10 | description="Small Image Library for Python 3", 11 | long_description=long_description, 12 | long_description_content_type="text/markdown", 13 | python_requires='>=3.6', 14 | install_requires=[ 15 | 'imageio>=2.3', 16 | 'numpy>=1.14', 17 | 'numba>=0.39', 18 | 'scipy>=0.16', 19 | ], 20 | url="https://github.com/prideout/snowy", 21 | packages=setuptools.find_packages(), 22 | classifiers=( 23 | "Programming Language :: Python :: 3", 24 | "License :: OSI Approved :: MIT License", 25 | "Operating System :: OS Independent", 26 | ), 27 | ) 28 | -------------------------------------------------------------------------------- /snowy/__init__.py: -------------------------------------------------------------------------------- 1 | """Tiny module for manipulating and generating floating-point images. 2 | 3 | The Snowy API is intentionally flat and only defines functions, no 4 | classes. Users should simply import the top-level package, not any of 5 | its submodules. 6 | """ 7 | 8 | from .filtering import * 9 | from .io import * 10 | from .ops import * 11 | from .distance import * 12 | from .noise import * 13 | from .lighting import * 14 | from .color import * 15 | from .draw import * 16 | 17 | __all__ = ''' 18 | GAUSSIAN HERMITE LANCZOS MITCHELL NEAREST TRIANGLE 19 | blur resize 20 | 21 | LINEAR SRGB GAMMA 22 | delinearize 23 | ensure_alpha 24 | extract_alpha 25 | extract_rgb 26 | from_planar 27 | linearize 28 | load 29 | reshape 30 | export 31 | show 32 | to_planar 33 | unshape 34 | ColorSpace 35 | 36 | add_border 37 | compose 38 | compose_premultiplied 39 | gradient 40 | hflip 41 | hstack 42 | rotate 43 | unitize 44 | vflip 45 | vstack 46 | 47 | generate_gdf 48 | generate_sdf 49 | generate_udf 50 | generate_cpcf 51 | dereference_coords 52 | 53 | generate_noise 54 | generate_fBm 55 | 56 | compute_skylight 57 | compute_normals 58 | 59 | rgb_to_luminance 60 | compute_sobel 61 | 62 | draw_triangle 63 | draw_polygon 64 | '''.split() 65 | 66 | # deprecated functions: 67 | save = export 68 | dereference_cpcf = dereference_coords 69 | -------------------------------------------------------------------------------- /snowy/color.py: -------------------------------------------------------------------------------- 1 | from numba import jit 2 | import numpy as np 3 | from . import io 4 | 5 | def rgb_to_luminance(image: np.ndarray): 6 | "Read the first three color planes and return a grayscale image." 7 | assert image.shape[2] >= 3 8 | r, g, b = np.dsplit(image[:,:,:3], image.shape[2]) 9 | return io.reshape(0.2125 * r + 0.7154 * g + 0.0721 * b) 10 | 11 | def compute_sobel(image: np.ndarray): 12 | "Apply Sobel operator for edge detection." 13 | assert len(image.shape) == 3, 'Shape is not rows x cols x channels' 14 | assert image.shape[2] == 1, 'Image must be grayscale' 15 | result = np.empty(image.shape) 16 | _compute_sobel(result, image) 17 | return result 18 | 19 | @jit(nopython=True, fastmath=True, cache=True) 20 | def _compute_sobel(target, source): 21 | height, width = source.shape[:2] 22 | for row in range(height): 23 | for col in range(width): 24 | xm1 = max(0, col - 1) 25 | ym1 = max(0, row - 1) 26 | xp1 = min(width - 1, col + 1) 27 | yp1 = min(height - 1, row + 1) 28 | t00 = source[ym1][xm1] 29 | t10 = source[ym1][col] 30 | t20 = source[ym1][xp1] 31 | t01 = source[row][xm1] 32 | t21 = source[row][xp1] 33 | t02 = source[yp1][xm1] 34 | t12 = source[yp1][col] 35 | t22 = source[yp1][xp1] 36 | gx = t00 + 2.0 * t01 + t02 - t20 - 2.0 * t21 - t22 37 | gy = t00 + 2.0 * t10 + t20 - t02 - 2.0 * t12 - t22 38 | target[row][col] = np.sqrt(gx * gx + gy * gy) 39 | -------------------------------------------------------------------------------- /snowy/distance.py: -------------------------------------------------------------------------------- 1 | """ 2 | This implements the paper 'Distance Transforms of Sampled Functions' 3 | by Felzenszwalb and Huttenlocher. 4 | 5 | Distance fields are useful in a variety of applications, including 6 | image segmentation, antialiasing algorithms, and texture synthesis. 7 | """ 8 | 9 | from numba import jit 10 | import numpy as np 11 | from . import io 12 | 13 | INF = 1e20 14 | 15 | def generate_sdf(image: np.ndarray, wrapx=False, wrapy=False): 16 | """Create a signed distance field from a boolean field.""" 17 | a = generate_udf(image, wrapx, wrapy) 18 | b = generate_udf(image == 0.0, wrapx, wrapy) 19 | return a - b 20 | 21 | def generate_udf(image: np.ndarray, wrapx=False, wrapy=False): 22 | """Create an unsigned distance field from a boolean field.""" 23 | assert image.dtype == 'bool', 'Pixel values must be boolean' 24 | assert len(image.shape) == 3, 'Shape is not rows x cols x channels' 25 | assert image.shape[2] == 1, 'Image must be grayscale' 26 | return _generate_edt(image, wrapx, wrapy) 27 | 28 | def generate_gdf(image: np.ndarray, wrapx=False, wrapy=False): 29 | "Create an generalized squared distance field from a scalar field." 30 | assert image.dtype == 'float64', 'Pixel values must be real' 31 | assert len(image.shape) == 3, 'Shape is not rows x cols x channels' 32 | assert image.shape[2] == 1, 'Image must be grayscale' 33 | return _generate_gdt(image, wrapx, wrapy) 34 | 35 | def generate_cpcf(image: np.ndarray): 36 | """Create closest point coordinate field from a boolean field.""" 37 | assert image.dtype == 'bool', 'Pixel values must be boolean' 38 | assert len(image.shape) == 3, 'Shape is not rows x cols x channels' 39 | assert image.shape[2] == 1, 'Image must be grayscale' 40 | return _generate_cpcf(image) 41 | 42 | def dereference_coords(source: np.ndarray, coords: np.ndarray): 43 | """ 44 | For each 2D value in the coord field, make a lookup in the source. 45 | This is useful for creating generalized voronoi diagrams. 46 | """ 47 | assert len(coords.shape) == 3, 'Shape is not rows x cols x channels' 48 | assert len(source.shape) == 3, 'Shape is not rows x cols x channels' 49 | assert coords.shape[2] == 2, 'Coordinate must be 2-tuples' 50 | voronoi = source.copy() 51 | height, width = source.shape[:2] 52 | coords = coords.copy() 53 | coords[:,:,0] = np.clip(coords[:,:,0], 0, width - 1) 54 | coords[:,:,1] = np.clip(coords[:,:,1], 0, height - 1) 55 | _deref_coords(voronoi, source, coords) 56 | return voronoi 57 | 58 | @jit(nopython=True, fastmath=True, cache=True) 59 | def _deref_coords(voronoi, source, coords): 60 | for y in range(coords.shape[0]): 61 | for x in range(coords.shape[1]): 62 | i, j = coords[y][x] 63 | voronoi[y][x] = source[j][i] 64 | 65 | def _generate_gdt(image, wrapx, wrapy): 66 | image = io.unshape(image) 67 | result = image.copy() 68 | _generate_udf(result, wrapx, wrapy) 69 | return io.reshape(result) 70 | 71 | def _generate_edt(image, wrapx, wrapy): 72 | image = io.unshape(image) 73 | result = np.where(image, 0.0, INF) 74 | _generate_udf(result, wrapx, wrapy) 75 | return np.sqrt(io.reshape(result)) 76 | 77 | def _generate_cpcf(image): 78 | image = io.unshape(image) 79 | result = np.where(image, 0.0, INF) 80 | i, j = _generate_udf(result, False, False) 81 | cpcf = np.dstack([i, j]) 82 | result = cpcf.copy() 83 | _process_cpcf(cpcf, result) 84 | np.copyto(cpcf, result) 85 | return cpcf 86 | 87 | @jit(nopython=True, fastmath=True, cache=True) 88 | def _process_cpcf(cpcf, result): 89 | for y in range(cpcf.shape[0]): 90 | for x in range(cpcf.shape[1]): 91 | i, j = cpcf[y][x] 92 | result[y][x][0] = i 93 | result[y][x][1] = cpcf[y][i][1] 94 | 95 | def _generate_udf(result, wrapx, wrapy): 96 | 97 | scratch = result 98 | if wrapx: scratch = np.hstack([scratch, scratch, scratch]) 99 | if wrapy: scratch = np.vstack([scratch, scratch, scratch]) 100 | 101 | height, width = scratch.shape 102 | capacity = max(width, height) 103 | i = np.empty(scratch.shape, dtype='u2') 104 | j = np.empty(scratch.shape, dtype='u2') 105 | d = np.zeros([capacity]) 106 | z = np.zeros([capacity + 1]) 107 | v = np.zeros([capacity], dtype='u2') 108 | _generate_udf_native(width, height, d, z, v, i, j, scratch) 109 | 110 | x0, x1 = width // 3, 2 * width // 3 111 | y0, y1 = height // 3, 2 * height // 3 112 | if wrapx: scratch = scratch[:,x0:x1] 113 | if wrapy: scratch = scratch[y0:y1,:] 114 | if wrapx or wrapy: np.copyto(result, scratch) 115 | 116 | return i, j 117 | 118 | @jit(nopython=True, fastmath=True, cache=True) 119 | def _generate_udf_native(width, height, d, z, v, i, j, result): 120 | for x in range(width): 121 | f = result[:,x] 122 | edt(f, d, z, v, j[:,x], height) 123 | result[:,x] = d[:height] 124 | for y in range(height): 125 | f = result[y,:] 126 | edt(f, d, z, v, i[y,:], width) 127 | result[y,:] = d[:width] 128 | 129 | @jit(nopython=True, fastmath=True, cache=True) 130 | def edt(f, d, z, v, i, n): 131 | # Find the lower envelope of a sequence of parabolas. 132 | # f...source data (returns the Y of the parabola vertex at X) 133 | # d...destination data (final distance values are written here) 134 | # z...temporary used to store X coords of parabola intersections 135 | # v...temporary used to store X coords of parabola vertices 136 | # i...resulting X coords of parabola vertices 137 | # n...number of pixels in "f" to process 138 | 139 | # Always add the first pixel to the enveloping set since it is 140 | # obviously lower than all parabolas processed so far. 141 | k: int = 0 142 | v[0] = 0 143 | z[0] = -INF 144 | z[1] = +INF 145 | 146 | for q in range(1, n): 147 | 148 | # If the new parabola is lower than the right-most parabola in 149 | # the envelope, remove it from the envelope. To make this 150 | # determination, find the X coordinate of the intersection (s) 151 | # between the parabolas with vertices at (q,f[q]) and (p,f[p]). 152 | p = v[k] 153 | s = ((f[q] + q*q) - (f[p] + p*p)) / (2.0*q - 2.0*p) 154 | while s <= z[k]: 155 | k = k - 1 156 | p = v[k] 157 | s = ((f[q] + q*q) - (f[p] + p*p)) / (2.0*q - 2.0*p) 158 | 159 | # Add the new parabola to the envelope. 160 | k = k + 1 161 | v[k] = q 162 | z[k] = s 163 | z[k + 1] = +INF 164 | 165 | # Go back through the parabolas in the envelope and evaluate them 166 | # in order to populate the distance values at each X coordinate. 167 | k = 0 168 | for q in range(n): 169 | while z[k + 1] < float(q): 170 | k = k + 1 171 | dx = q - v[k] 172 | d[q] = dx * dx + f[v[k]] 173 | i[q] = v[k] 174 | -------------------------------------------------------------------------------- /snowy/draw.py: -------------------------------------------------------------------------------- 1 | from numba import guvectorize 2 | 3 | import numpy as np 4 | import math 5 | 6 | def draw_polygon(target: np.ndarray, source: np.ndarray, 7 | vertices: np.ndarray): 8 | """Draw a textured convex polygon into the target image. 9 | 10 | The vertices are specified with a nx5 array where each row is XYWUV. 11 | The UV coordinates address the source image in [0,+1] with +V going 12 | downward. The XY coordinates are in the range [-1,+1] and their 13 | domain is the entire target image with +Y going upward. The W 14 | coordinate is to allow for perspective-correct interpolation. If you 15 | don't know what that means, then set W to 1. 16 | """ 17 | assert len(target.shape) == 3, 'Target shape must be 3D.' 18 | assert target.shape[2] == 4, 'Target must be RGBA.' 19 | assert len(source.shape) == 3, 'Source shape must be 3D.' 20 | assert source.shape[2] == 4, 'Source must be RGBA.' 21 | assert vertices.shape[1] == 5, 'Vertices must be nx5.' 22 | 23 | n = vertices.shape[0] 24 | for tri in range(2, n): 25 | indices = np.array([0, tri - 1, tri]) 26 | triangle = vertices[indices] 27 | draw_triangle(target, source, triangle) 28 | 29 | def draw_triangle(target: np.ndarray, source: np.ndarray, 30 | vertices: np.ndarray): 31 | """Draw a textured triangle into the target image. 32 | 33 | The vertices are specified with a 3x5 array where each row is XYWUV. 34 | The UV coordinates address the source image in [0,+1] with +V going 35 | downward. The XY coordinates are in the range [-1,+1] and their 36 | domain is the entire target image with +Y going upward. The W 37 | coordinate is to allow for perspective-correct interpolation. If you 38 | don't know what that means, then set W to 1. 39 | """ 40 | assert len(target.shape) == 3, 'Target shape must be 3D.' 41 | assert target.shape[2] == 4, 'Target must be RGBA.' 42 | assert len(source.shape) == 3, 'Source shape must be 3D.' 43 | assert source.shape[2] == 4, 'Source must be RGBA.' 44 | assert vertices.shape == (3, 5), 'Vertices must be 3x5.' 45 | 46 | vertices = np.copy(vertices) 47 | xy = vertices[:, :2] 48 | w = vertices[:, 2:3] 49 | uv = vertices[:, 3:] 50 | 51 | w = 1.0 / w 52 | xy *= w 53 | uv *= w 54 | w = w[:, 0] 55 | 56 | height, width, _ = target.shape 57 | xy[:, 0] = (xy[:, 0] + 1.0) * 0.5 * width 58 | xy[:, 1] = height - 1 -(xy[:, 1] + 1.0) * 0.5 * height 59 | 60 | v0, v1, v2 = xy 61 | area = 1 / edge(v0, v1, v2) 62 | 63 | source = source.astype(target.dtype, copy=False) 64 | v0 = v0.astype(np.float32, copy=False) 65 | v1 = v1.astype(np.float32, copy=False) 66 | v2 = v2.astype(np.float32, copy=False) 67 | uv = uv.astype(np.float32, copy=False) 68 | w = w.astype(np.float32, copy=False) 69 | _rasterize(target, source, area, v0, v1, v2, uv, w) 70 | 71 | SIG0 = "void(f4[:,:,:],f4[:,:,:],f8,f4[:],f4[:],f4[:],f4[:,:],f4[:])" 72 | SIG1 = "(r0,c0,D4),(r1,c1,D4),(),(D2),(D2),(D2),(D3,D2),(D3)" 73 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 74 | def _rasterize(target, source, area, v0, v1, v2, uv, w): 75 | height, width, _ = target.shape 76 | sheight, swidth, _ = source.shape 77 | ya0 = v2[1] - v1[1] 78 | ya1 = v0[1] - v2[1] 79 | ya2 = v1[1] - v0[1] 80 | yb0 = v2[0] - v1[0] 81 | yb1 = v0[0] - v2[0] 82 | yb2 = v1[0] - v0[0] 83 | 84 | maxx = max(max(int(v0[0]), int(v1[0])), int(v2[0])) 85 | maxy = max(max(int(v0[1]), int(v1[1])), int(v2[1])) 86 | minx = min(min(int(v0[0]), int(v1[0])), int(v2[0])) 87 | miny = min(min(int(v0[1]), int(v1[1])), int(v2[1])) 88 | 89 | minx = max(0, minx) 90 | miny = max(0, miny) 91 | maxx = min(width - 1, maxx) 92 | maxy = min(height - 1, maxy) 93 | 94 | for row in range(miny, maxy + 1): 95 | for col in range(minx, maxx + 1): 96 | px = col + .5 97 | py = row + .5 98 | w0 = (py - v1[1]) * yb0 - (px - v1[0]) * ya0 99 | w1 = (py - v2[1]) * yb1 - (px - v2[0]) * ya1 100 | w2 = (py - v0[1]) * yb2 - (px - v0[0]) * ya2 101 | if w0 < 0 or w1 < 0 or w2 < 0: 102 | continue 103 | w0 *= area 104 | w1 *= area 105 | w2 *= area 106 | s = w0 * uv[0][0] + w1 * uv[1][0] + w2 * uv[2][0] 107 | t = w0 * uv[0][1] + w1 * uv[1][1] + w2 * uv[2][1] 108 | s /= w0 * w[0] + w1 * w[1] + w2 * w[2] 109 | t /= w0 * w[0] + w1 * w[1] + w2 * w[2] 110 | scol = int(s * swidth) % swidth 111 | srow = int(t * sheight) % sheight 112 | target[row][col] = source[srow][scol] 113 | 114 | def edge(a, b, c): 115 | return (c[0] - a[0]) * (b[1] - a[1]) - (c[1] - a[1]) * (b[0] - a[0]) 116 | -------------------------------------------------------------------------------- /snowy/filtering.py: -------------------------------------------------------------------------------- 1 | """Define resize, blur, and related constants.""" 2 | 3 | from . import io 4 | from collections import namedtuple 5 | from numba import guvectorize 6 | import math 7 | import numpy as np 8 | 9 | RowOps = namedtuple('RowOps', 'tindices sindices fweights'.split()) 10 | 11 | GAUSSIAN_SCALE = 1.0 / np.sqrt(0.5 * np.pi) 12 | 13 | def hermite(x): 14 | x = np.clip(x, 0, 1) 15 | return 2 * x * x * x - 3 * x * x + 1 16 | 17 | def triangle(x): 18 | x = np.clip(x, 0, 1) 19 | return 1.0 - x 20 | 21 | def gaussian(x): 22 | x = np.clip(x, 0, 2) 23 | return np.exp(-2 * x * x) * GAUSSIAN_SCALE 24 | 25 | def nearest(x): 26 | return np.less_equal(x, 0.5) * 1.0 27 | 28 | def sinc(x): 29 | if x <= 0.00001: return 1.0 30 | return np.sin(np.pi * x) / (np.pi * x) 31 | 32 | def lanczos(x): 33 | x = np.clip(x, 0, 1) 34 | return sinc(x) * sinc(x) 35 | 36 | def mitchell(x): 37 | B = 1.0 / 3.0 38 | C = 1.0 / 3.0 39 | P0 = (6 - 2*B) / 6.0 40 | P1 = 0 41 | P2 = (-18 +12*B + 6*C) / 6.0 42 | P3 = (12 - 9*B - 6*C) / 6.0 43 | Q0 = (8*B +24*C) / 6.0 44 | Q1 = (-12*B -48*C) / 6.0 45 | Q2 = (6*B +30*C) / 6.0 46 | Q3 = (-1*B - 6*C) / 6.0 47 | if x >= 2.0: return 0.0 48 | if x >= 1.0: return Q0 + Q1*x + Q2*x*x + Q3*x*x*x 49 | return P0 + P1*x + P2*x*x + P3*x*x*x 50 | 51 | class Filter: 52 | def __init__(self, fn, radius): 53 | self.radius = radius 54 | self.function = fn 55 | 56 | HERMITE = Filter(hermite, 1) 57 | TRIANGLE = Filter(triangle, 1) 58 | GAUSSIAN = Filter(gaussian, 2) 59 | NEAREST = Filter(nearest, 0) 60 | LANCZOS = Filter(lanczos, 1) 61 | MITCHELL = Filter(mitchell, 2) 62 | 63 | def resize(source, width=None, height=None, filter=None, radius=1, 64 | wrapx=False, wrapy=False): 65 | """Create a new numpy image with the desired size. 66 | 67 | Either width or height can be null, in which case its value 68 | is inferred from the aspect ratio of the source image. 69 | 70 | Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or 71 | MITCHELL. 72 | """ 73 | assert len(source.shape) == 3, 'Shape is not rows x cols x channels' 74 | assert width != None or height != None, 'Missing target size' 75 | aspect = source.shape[1] / source.shape[0] 76 | if width == None: width = height * aspect 77 | if height == None: height = width / aspect 78 | magnifying = width > source.shape[1] 79 | if filter == None: filter = MITCHELL if magnifying else LANCZOS 80 | return resample(source, width, height, filter, radius, wrapx, wrapy) 81 | 82 | def resample(source, width, height, filter, radius, wrapx, wrapy): 83 | nchans = source.shape[2] 84 | def fn(t): return filter.function(t / radius) 85 | scaled_filter = Filter(fn, radius * filter.radius) 86 | srows, scols = source.shape[0], source.shape[1] 87 | trows, tcols = int(height), int(width) 88 | vresult = np.zeros([srows, tcols, nchans]) 89 | rowops = create_ops(tcols, scols, scaled_filter, wrapx) 90 | convolve(vresult, source, rowops) 91 | vresult = transpose(vresult) 92 | hresult = np.zeros([tcols, trows, nchans]) 93 | rowops = create_ops(trows, srows, scaled_filter, wrapy) 94 | convolve(hresult, vresult, rowops) 95 | return transpose(hresult) 96 | 97 | def blur(image, filter=GAUSSIAN, radius=4, wrapx=False, wrapy=False): 98 | """Resample an image and produce a new image with the same size. 99 | 100 | For a list of available filters, see resize. 101 | """ 102 | width, height = image.shape[1], image.shape[0] 103 | return resize(image, width, height, filter, radius, wrapx, wrapy) 104 | 105 | def transpose(source: np.ndarray): 106 | return np.swapaxes(source, 0, 1) 107 | 108 | def create_ops(ntarget, nsource, filter: Filter, wrap) -> RowOps: 109 | # Generate a sequence of operations to perform a 1D convolution 110 | # where each operation is represented by 3-tuple of: target index, 111 | # source index, weight. 112 | tindices, sindices, fweights = [], [], [] 113 | dtarget = 1.0 / ntarget 114 | dsource = 1.0 / nsource 115 | minifying = ntarget < nsource 116 | fextent = dtarget if minifying else dsource 117 | fdomain = float(ntarget if minifying else nsource) 118 | x = dtarget / 2 119 | for tindex in range(ntarget): 120 | minx = x - filter.radius * fextent 121 | maxx = x + filter.radius * fextent 122 | minsi = int(minx * float(nsource)) 123 | maxsi = int(math.ceil(maxx * float(nsource))) 124 | localops = [] 125 | weightsum = 0.0 126 | for sindex in range(minsi, maxsi+1): 127 | wrapped = sindex 128 | if sindex < 0 or sindex >= nsource: 129 | if wrap: 130 | wrapped = sindex % nsource 131 | else: 132 | continue 133 | sx = (0.5 + sindex) * dsource 134 | t = fdomain * abs(sx - x) 135 | weight = filter.function(t) 136 | if weight != 0: 137 | localops.append((tindex, wrapped, weight)) 138 | weightsum += weight 139 | if weightsum > 0.0: 140 | for op in localops: 141 | tindices.append(op[0]) 142 | sindices.append(op[1]) 143 | fweights.append(op[2] / weightsum) 144 | x += dtarget 145 | return RowOps(tindices, sindices, fweights) 146 | 147 | SIG0 = "void(f8[:,:,:], f8[:,:,:], i4[:], i4[:], f8[:])" 148 | SIG1 = "(r0,c0,d),(r0,c1,d),(i),(i),(i)" 149 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 150 | def jit_convolve(target, source, tinds, sinds, weights): 151 | nrows, nchan, nops = target.shape[0], target.shape[2], len(tinds) 152 | for c in range(nchan): 153 | for row in range(nrows): 154 | for op in range(nops): 155 | tind, sind, weight = tinds[op], sinds[op], weights[op] 156 | target[row][tind][c] += source[row][sind][c] * weight 157 | 158 | def convolve(target, source, rowops: RowOps): 159 | # Perform highly generalized 1D convolution. This is almost 160 | # equivalent to: 161 | # 162 | # for row in range(len(target)): 163 | # target[row][tindices] += source[row][sindices] * fweights 164 | # 165 | # ...but with the crucial feature of allowing the same index to 166 | # appear multiple times in tindices. 167 | # 168 | # Note that standard numpy convolution assumes a stationary kernel, 169 | # whereas this function could possibly be used to apply a varying 170 | # kernel. 171 | tindices, sindices, fweights = rowops 172 | assert len(tindices) == len(sindices) == len(fweights) 173 | assert len(target) == len(source) 174 | jit_convolve(target, source, 175 | np.int32(tindices), np.int32(sindices), 176 | np.double(fweights)) 177 | -------------------------------------------------------------------------------- /snowy/io.py: -------------------------------------------------------------------------------- 1 | """Define export, load, show, reshape and unshape.""" 2 | 3 | import imageio 4 | import numpy as np 5 | import os 6 | import platform 7 | import sys 8 | import tempfile 9 | 10 | LINEAR = 0 11 | SRGB = 1 12 | GAMMA = 2 13 | 14 | def sRGB_to_linear(s): 15 | a = 0.055 16 | return np.where(s <= 0.04045, s / 12.92, ((s+a) / (1+a)) ** 2.4) 17 | 18 | def linear_to_sRGB(s): 19 | a = 0.055 20 | return np.where(s <= 0.0031308, 12.92 * s, (1+a) * s**(1/2.4) - a) 21 | 22 | def gamma_to_linear(s): 23 | return s ** 2.2 24 | 25 | def linear_to_gamma(s): 26 | return s ** (1/2.2) 27 | 28 | def linearize(image, target_space=SRGB): 29 | """Transform colors from perceptually linear to physically linear. 30 | 31 | This is automatically performed when using load 32 | on a PNG or JPEG. See also delinearize. 33 | """ 34 | if target_space == SRGB: 35 | return sRGB_to_linear(image) 36 | return gamma_to_linear(image) 37 | 38 | def delinearize(image, source_space=SRGB): 39 | """Transform colors from physically linear to perceptually linear. 40 | 41 | This is automatically performed when using export 42 | to a PNG or JPEG. See also linearize. 43 | """ 44 | if source_space == SRGB: 45 | return linear_to_sRGB(image) 46 | return linear_to_gamma(image) 47 | 48 | def show(image, delinearize=True): 49 | """Display an image in a platform-specific way.""" 50 | if isinstance(image, np.ndarray): 51 | show_array(image, delinearize) 52 | elif isinstance(image, str): 53 | show_filename(image) 54 | else: 55 | raise ValueError('Unsupported type') 56 | 57 | def reshape(image): 58 | """Add a trailing dimension to single-channel 2D images. 59 | 60 | See also unshape. 61 | """ 62 | if len(image.shape) == 2: 63 | image = np.reshape(image, image.shape + (1,)) 64 | return image 65 | 66 | def unshape(image): 67 | """Remove the trailing dimension from single-channel 3D images. 68 | 69 | See also reshape. 70 | """ 71 | if len(image.shape) == 3 and image.shape[2] == 1: 72 | return np.reshape(image, image.shape[:2]) 73 | return image 74 | 75 | def _load(filename: str, extension: str, linear: bool): 76 | if extension == '.png': 77 | img = imageio.imread(filename, 'PNG-PIL', pilmode='RGBA') 78 | img = np.clip(np.float64(img) / 255, 0, None) 79 | elif extension == '.jpg' or extension == '.jpeg': 80 | img = imageio.imread(filename) 81 | img = np.clip(np.float64(img) / 255, 0, None) 82 | elif extension == '.exr': 83 | imageio.plugins.freeimage.download() 84 | img = np.float64(imageio.imread(filename)) 85 | return linearize(img) if not linear else img 86 | 87 | def load(filename: str, linearize=True) -> np.ndarray: 88 | """Create a numpy array from the given PNG, JPEG, or EXR image file. 89 | 90 | Regardless of the pixel format on disk, PNG / JPEG images are always 91 | divided by 255, and PNG images are extended to 4 color channels. 92 | 93 | See also reshape and 94 | linearize (which this calls). 95 | """ 96 | 97 | ext = filename[filename.rfind('.'):] 98 | assert ext == '.png' or ext == '.jpeg' or ext == '.jpg' or ext == '.exr' 99 | return reshape(np.float64(_load(filename, ext, not linearize))) 100 | 101 | def _export(image: np.ndarray, filename: str, linear): 102 | image_format = None 103 | if linear: 104 | image = delinearize(np.clip(image, 0, None)) 105 | if filename.endswith('.exr'): 106 | imageio.plugins.freeimage.download() 107 | image_format = 'EXR-FI' 108 | image = np.float32(image) 109 | else: 110 | image = np.uint8(np.clip(image * 255, 0, 255)) 111 | imageio.imwrite(filename, unshape(image), image_format) 112 | 113 | def export(image: np.ndarray, filename: str, delinearize=True): 114 | """Export a numpy array to a PNG, JPEG, or EXR image file. 115 | 116 | This function automatically multiplies PNG / JPEG images by 255. 117 | 118 | See also unshape and 119 | delinearize (which this calls). 120 | """ 121 | assert filename.endswith('.png') or filename.endswith('.jpeg') or \ 122 | filename.endswith('.jpg') or filename.endswith('.exr') 123 | _export(image, filename, delinearize) 124 | 125 | def show_array(image: np.ndarray, delinearize): 126 | with tempfile.NamedTemporaryFile() as fp: 127 | filename = fp.name + '.png' 128 | export(image, filename, delinearize) 129 | show_filename(filename) 130 | 131 | def show_filename(image: str): 132 | if 0 == os.system('which imgcat >/dev/null 2>&1'): 133 | print("\n") 134 | os.system('imgcat ' + image) 135 | elif 'pytest' in sys.modules: 136 | return 137 | elif platform.system() == 'Darwin': 138 | os.system('open ' + image) 139 | elif platform.system() == 'Linux' and \ 140 | os.environ.get('DESKTOP_SESSION'): 141 | os.system('xdg-open ' + image) 142 | else: 143 | print('Generated ' + image) 144 | 145 | def ensure_alpha(src: np.ndarray) -> np.ndarray: 146 | """If the incoming image is 3-channel, adds a 4th channel.""" 147 | assert len(src.shape) == 3 148 | if src.shape[2] != 3: 149 | return src 150 | alpha = np.ones(src.shape[:2]) 151 | r, g, b = to_planar(src) 152 | return from_planar(np.array([r, g, b, alpha])) 153 | 154 | def extract_alpha(image: np.ndarray) -> np.ndarray: 155 | """Extract the alpha plane from an RGBA image. 156 | 157 | Note that this returns a copy, not a view. To manipulate the pixels 158 | in a view of the alpha plane, simply make a numpy slice, as 159 | in: alpha_view = myimage[:,:,3]. 160 | """ 161 | assert len(image.shape) == 3 and image.shape[2] == 4 162 | return np.dsplit(image, 4)[3].copy() 163 | 164 | def extract_rgb(image: np.ndarray) -> np.ndarray: 165 | """Extract the RGB planes from an RGBA image. 166 | 167 | Note that this returns a copy. If you wish to obtain a view that 168 | allows mutating pixels, simply use slicing instead. For 169 | example, to invert the colors of an image while leaving alpha 170 | intact, you can do: 171 | myimage[:,:,:3] = 1.0 - myimage[:,:,:3]. 172 | """ 173 | assert len(image.shape) == 3 and image.shape[2] >= 3 174 | planes = np.dsplit(image, image.shape[2]) 175 | return np.dstack(planes[:3]) 176 | 177 | def to_planar(image: np.ndarray) -> np.ndarray: 178 | """Convert a row-major image into a channel-major image. 179 | 180 | This creates a copy, not a view. 181 | """ 182 | assert len(image.shape) == 3 183 | result = np.array(np.dsplit(image, image.shape[2])) 184 | return np.reshape(result, result.shape[:-1]) 185 | 186 | def from_planar(image: np.ndarray) -> np.ndarray: 187 | """Create a channel-major image into row-major image. 188 | 189 | This creates a copy, not a view. 190 | """ 191 | assert len(image.shape) == 3 192 | return np.dstack(image) 193 | -------------------------------------------------------------------------------- /snowy/lighting.py: -------------------------------------------------------------------------------- 1 | from . import io 2 | from numba import prange, jit 3 | import math 4 | import numpy as np 5 | 6 | SWEEP_DIRECTIONS = np.int16([ 7 | (1, 0), (0, 1), (-1, 0), (0, -1), # Rook 8 | (1, 1), (-1, -1), (1, -1), (-1, 1), # Bishop 9 | (2, 1), (2, -1), (-2, 1), (-2, -1), # Knight 10 | (1, 2), (1, -2), (-1, 2), (-1, -2) # Knight 11 | ]) 12 | 13 | def compute_skylight(elevation, verbose=False): 14 | """Compute ambient occlusion from a height map.""" 15 | height, width, nchan = elevation.shape 16 | assert nchan == 1 17 | result = np.zeros([height, width]) 18 | _compute_skylight(result, elevation[:,:,0], verbose) 19 | return io.reshape(np.clip(1.0 - result, 0, 1)) 20 | 21 | def compute_normals(elevation): 22 | """Generate a 3-channel normal map from a height map. 23 | 24 | The normal components are in the range [-1,+1] and the size of the 25 | normal map is (width-1, height-1) due to forward differencing. 26 | """ 27 | height, width, nchan = elevation.shape 28 | assert nchan == 1 29 | normals = np.empty([height - 1, width - 1, 3]) 30 | _compute_normals(elevation[:,:,0], normals) 31 | return normals 32 | 33 | @jit(nopython=True, fastmath=True, cache=True) 34 | def _compute_normals(el, normals): 35 | h, w = normals.shape[:2] 36 | for row in range(h): 37 | for col in range(w): 38 | p = np.float64((col / w, row / h, el[row][col])) 39 | dx = np.float64(((col+1) / w, row / h, el[row][col+1])) 40 | dy = np.float64((col / w, (row+1) / h, el[row+1][col])) 41 | v1 = dx - p 42 | v2 = dy - p 43 | n = np.float64(( 44 | (v1[1] * v2[2]) - (v1[2] * v2[1]), 45 | (v1[2] * v2[0]) - (v1[0] * v2[2]), 46 | (v1[0] * v2[1]) - (v1[1] * v2[0]))) 47 | isq = 1 / np.linalg.norm(n) 48 | normals[row][col] = n * isq 49 | 50 | def _compute_skylight(dst, src, verbose): 51 | height, width = src.shape 52 | cnt = np.zeros(dst.shape, dtype='u8') 53 | 54 | # TODO Fix allocation or explain the "3" 55 | seedpoints = np.empty([3 * max(width, height), 2], dtype='i2') 56 | maxpathlen = max(width, height) + 1 57 | 58 | for direction in SWEEP_DIRECTIONS: 59 | nsweeps = _generate_seedpoints(src, direction, seedpoints) 60 | if verbose: 61 | print('Horizon direction: ', direction) 62 | sweeps = np.empty([nsweeps, maxpathlen, 3]) 63 | pts = np.empty([nsweeps, 3]) 64 | _horizon_scan(src, dst, cnt, direction, seedpoints, sweeps, pts) 65 | 66 | dst /= cnt 67 | dst *= 4 / np.pi 68 | 69 | # TODO This function needs to be rewritten or documented. 70 | def _generate_seedpoints(field, direction, seedpoints): 71 | h, w = field.shape[:2] 72 | s = 0 73 | sx, sy = np.sign(direction) 74 | ax, ay = np.abs(direction) 75 | nsweeps = ay * w + ax * h - (ax + ay - 1) 76 | for x in range(-ax, w - ax): 77 | for y in range(-ay, h - ay): 78 | if x >= 0 and x < w and y >= 0 and y < h: continue 79 | px, py = x, y 80 | if sx < 0: px = w - x - 1 81 | if sy < 0: py = h - y - 1 82 | seedpoints[s][0] = px 83 | seedpoints[s][1] = py 84 | s += 1 85 | assert nsweeps == s 86 | return nsweeps 87 | 88 | SIG0 = "void(f8[:,:],f8[:,:],u8[:,:],i2[:],i2[:,:],f8[:,:,:],f8[:,:])" 89 | @jit([SIG0], nopython=True, fastmath=True, parallel=True) 90 | def _horizon_scan(heights, occlusion, counts, direction, seedpoints, 91 | sweeps, pts): 92 | h, w = heights.shape[:2] 93 | cellw = 1 / max(w, h) 94 | cellh = 1 / max(w, h) 95 | nsweeps = len(sweeps) 96 | for sweep in prange(nsweeps): 97 | thispt = pts[sweep] 98 | stack = sweeps[sweep] 99 | startpt = seedpoints[sweep] 100 | pathlen = 0 101 | i, j = startpt 102 | ii, jj = min(max(0, i), w-1), min(max(0, j), h-1) 103 | 104 | thispt[0] = i * cellw 105 | thispt[1] = j * cellh 106 | thispt[2] = heights[jj][ii] 107 | 108 | stack_top = 0 109 | 110 | stack[stack_top] = thispt 111 | 112 | i += direction[0] 113 | j += direction[1] 114 | while i >= 0 and i < w and j >= 0 and j < h: 115 | 116 | thispt[0] = i * cellw 117 | thispt[1] = j * cellh 118 | thispt[2] = heights[j][i] 119 | 120 | while stack_top > 0: 121 | 122 | a, b = thispt, stack[stack_top] 123 | dx = b[0] - a[0] 124 | dy = b[1] - a[1] 125 | y = b[2] - a[2] 126 | x = math.sqrt(dx * dx + dy * dy) 127 | s1 = y / x 128 | 129 | a, b = thispt, stack[stack_top - 1] 130 | dx = b[0] - a[0] 131 | dy = b[1] - a[1] 132 | y = b[2] - a[2] 133 | x = math.sqrt(dx * dx + dy * dy) 134 | s2 = y / x 135 | 136 | if s1 >= s2: break 137 | stack_top -= 1 138 | 139 | horizonpt = stack[stack_top] 140 | stack_top += 1 141 | stack[stack_top] = thispt 142 | 143 | d = horizonpt - thispt 144 | dx = d[2] / np.linalg.norm(d) 145 | occlusion[j][i] += math.atan(max(dx, 0)) 146 | counts[j][i] += 1 147 | 148 | i += direction[0] 149 | j += direction[1] 150 | -------------------------------------------------------------------------------- /snowy/noise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | from . import io 4 | 5 | def generate_noise(width, height, frequency, seed=1, wrapx=False, 6 | wrapy=False, offset=[0,0]): 7 | """Generate a single-channel gradient noise image. 8 | 9 | A frequency of 1.0 creates a single surflet across the width of the 10 | image, while a frequency of 4.0 creates a 4x4 grid such that the 11 | (2,2) surflet is centered. Noise values live within the [-1,+1] 12 | range. 13 | """ 14 | return _noise(width, height, frequency, seed, wrapx, wrapy, offset) 15 | 16 | def generate_fBm(width, height, freq, layers, seed, lacunarity=2, 17 | persistence=2, wrapx=False, wrapy=False): 18 | """Generate 2D fractional brownian motion by adding layers of noise. 19 | 20 | See also generate_noise. 21 | """ 22 | noise = generate_noise 23 | n = np.zeros([height, width, 1]) 24 | amplitude = 1 25 | for f in range(layers): 26 | lseed = seed + int(f) 27 | n += amplitude * noise(width, height, freq, lseed, wrapx, wrapy) 28 | freq *= lacunarity 29 | amplitude /= persistence 30 | return n 31 | 32 | def _noise(width, height, frequency, seed, wrapx, wrapy, offset): 33 | nrows, ncols = int(height), int(width) 34 | table = Noise(seed) 35 | 36 | # Compute the span of U texcoords in [0,+1] such that 0 is at the 37 | # left edge of the left-most texel, and +1 is at the right edge of 38 | # the right-most pixel. 39 | maxx = frequency 40 | hw = 0.5 * maxx / width 41 | u = np.linspace(hw, maxx - hw, ncols) + offset[0] 42 | 43 | # Compute the span of V texcoords according to the aspect ratio. 44 | maxy = frequency * float(height) / width 45 | hh = 0.5 * maxy / height 46 | v = np.linspace(hh, maxy - hh, nrows) + offset[1] 47 | 48 | # Generate floating point texture coordinates, then split them into 49 | # integer and fractional components. 50 | u, v = np.meshgrid(u, v, sparse=True) 51 | i0, j0 = np.floor(u).astype(int), np.floor(v).astype(int) 52 | i1, j1 = i0 + 1, j0 + 1 53 | x0, y0 = u - i0, v - j0 54 | x1, y1 = x0 - 1, y0 - 1 55 | 56 | # Find the 2D vectors at the nearest grid cell corners. 57 | if wrapx: 58 | assert math.modf(frequency)[0] == 0.0, \ 59 | "wrapx requires an integer frequency" 60 | i0 = i0 % int(frequency) 61 | i1 = i1 % int(frequency) 62 | if wrapy: 63 | assert math.modf(maxy)[0] == 0.0, \ 64 | "wrapy requires frequency*width/height to be an integer" 65 | j0 = j0 % int(maxy) 66 | j1 = j1 % int(maxy) 67 | grad00 = _gradient(table, i0, j0) 68 | grad01 = _gradient(table, i0, j1) 69 | grad10 = _gradient(table, i1, j0) 70 | grad11 = _gradient(table, i1, j1) 71 | 72 | va = dot(x0, y0, grad00[0], grad00[1]) 73 | vb = dot(x1, y0, grad10[0], grad10[1]) 74 | vc = dot(x0, y1, grad01[0], grad01[1]) 75 | vd = dot(x1, y1, grad11[0], grad11[1]) 76 | 77 | # Lerp the neighboring 4 surflets 78 | t0 = x0*x0*x0*(x0*(x0*6.0 - 15.0) + 10.0) 79 | t1 = y0*y0*y0*(y0*(y0*6.0 - 15.0) + 10.0) 80 | result = va + t0 * (vb-va) + t1 * (vc-va) + t0 * t1 * (va-vb-vc+vd) 81 | return io.reshape(result) 82 | 83 | class Noise: 84 | def __init__(self, seed): 85 | self.rnd = np.random.RandomState(seed) 86 | self.size = 256 87 | self.mask = int(self.size - 1) 88 | self.indices = np.arange(self.size, dtype = np.int16) 89 | self.rnd.shuffle(self.indices) 90 | theta = np.linspace(0, math.tau, self.size, endpoint=False) 91 | self.gradients = [np.cos(theta), np.sin(theta)] 92 | 93 | def _gradient(table: Noise, i, j): 94 | perm, mask = table.indices, table.mask 95 | u, v = table.gradients 96 | hash = perm[np.bitwise_and(perm[np.bitwise_and(i, mask)] + j, mask)] 97 | return u[hash], v[hash] 98 | 99 | def dot(x, y, gradx, grady): 100 | return gradx * x + grady * y 101 | -------------------------------------------------------------------------------- /snowy/ops.py: -------------------------------------------------------------------------------- 1 | """Define add_border etc.""" 2 | 3 | from snowy.io import * 4 | from numba import guvectorize 5 | import numpy as np 6 | 7 | def add_left(image: np.ndarray, T=2, V=0) -> np.ndarray: 8 | height, width, nchan = image.shape 9 | newshape = height, width + T, nchan 10 | result = np.full(newshape, np.float64(V)) 11 | np.copyto(result[:,T:], image) 12 | return result 13 | 14 | def add_right(image: np.ndarray, T=2, V=0) -> np.ndarray: 15 | height, width, nchan = image.shape 16 | newshape = height, width + T, nchan 17 | result = np.full(newshape, np.float64(V)) 18 | np.copyto(result[:,:-T], image) 19 | return result 20 | 21 | def add_top(image: np.ndarray, T=2, V=0) -> np.ndarray: 22 | height, width, nchan = image.shape 23 | newshape = height + T, width, nchan 24 | result = np.full(newshape, np.float64(V)) 25 | np.copyto(result[T:,:], image) 26 | return result 27 | 28 | def add_bottom(image: np.ndarray, T=2, V=0) -> np.ndarray: 29 | height, width, nchan = image.shape 30 | newshape = height + T, width, nchan 31 | result = np.full(newshape, np.float64(V)) 32 | np.copyto(result[:-T,:], image) 33 | return result 34 | 35 | def add_border(image: np.ndarray, width=2, value=0, sides='ltrb'): 36 | """Extend the size of an image by adding borders. 37 | 38 |

39 | The sides argument defaults to 40 | "LTRB", which enables borders for all four sides: Left, 41 | Top, Right, and Bottom. This can be used to select which borders you 42 | wish to add. 43 |

44 | 45 | """ 46 | result = image 47 | sides = sides.upper() 48 | if 'L' in sides: result = add_left(result, width, value) 49 | if 'T' in sides: result = add_top(result, width, value) 50 | if 'R' in sides: result = add_right(result, width, value) 51 | if 'B' in sides: result = add_bottom(result, width, value) 52 | return result 53 | 54 | def hstack(images, border_width=2, border_value=0): 55 | """Horizontally concatenate a list of images with a border. 56 | 57 | This is similar to numpy's hstack except that it adds 58 | a border around each image. The borders can be controlled 59 | with the optional border_width and 60 | border_value arguments. See also 61 | vstack. 62 | """ 63 | if border_width == 0: return np.hstack(images) 64 | T, V = border_width, border_value 65 | result = [] 66 | for image in images[:-1]: 67 | result.append(add_border(image, T, V, 'LTB')) 68 | result.append(add_border(images[-1], T, V)) 69 | return np.hstack(result) 70 | 71 | def vstack(images, border_width=2, border_value=0): 72 | """Vertically concatenate a list of images with a border. 73 | 74 | This is similar to numpy's vstack except that it adds 75 | a border around each image. The borders can be controlled 76 | with the optional border_width and 77 | border_value arguments. See also 78 | hstack. 79 | """ 80 | if border_width == 0: return np.vstack(images) 81 | T, V = border_width, border_value 82 | result = [] 83 | for image in images[:-1]: 84 | result.append(add_border(image, T, V, 'LTR')) 85 | result.append(add_border(images[-1], T, V)) 86 | return np.vstack(result) 87 | 88 | def unitize(img): 89 | """Remap the values so that they span the range from 0 to +1.""" 90 | return (img - np.amin(img)) / (np.amax(img) - np.amin(img)) 91 | 92 | def gradient(img): 93 | """Compute X derivatives and Y derivatives.""" 94 | nx, ny = np.gradient(unshape(img)) 95 | return reshape(nx), reshape(ny) 96 | 97 | def rotate(source: np.ndarray, degrees) -> np.ndarray: 98 | """Rotate image counter-clockwise by a multiple of 90 degrees.""" 99 | assert len(source.shape) == 3, 'Shape is not rows x cols x channels' 100 | assert source.dtype == np.float, 'Images must be doubles.' 101 | h, w, c = source.shape 102 | degrees %= 360 103 | if degrees == 90: 104 | result = np.empty([w, h, c]) 105 | rotate90(result, source) 106 | elif degrees == 180: 107 | result = np.empty([h, w, c]) 108 | rotate180(result, source) 109 | elif degrees == 270: 110 | result = np.empty([w, h, c]) 111 | rotate270(result, source) 112 | else: 113 | assert False, 'Angle must be a multiple of 90.' 114 | return result 115 | 116 | def hflip(source: np.ndarray) -> np.ndarray: 117 | """Horizontally mirror the given image.""" 118 | assert len(source.shape) == 3, 'Shape is not rows x cols x channels' 119 | assert source.dtype == np.float, 'Images must be doubles.' 120 | h, w, c = source.shape 121 | result = np.empty([h, w, c]) 122 | jit_hflip(result, source) 123 | return result 124 | 125 | def vflip(source: np.ndarray) -> np.ndarray: 126 | """Vertically mirror the given image.""" 127 | assert len(source.shape) == 3, 'Shape is not rows x cols x channels' 128 | assert source.dtype == np.float, 'Images must be doubles.' 129 | h, w, c = source.shape 130 | result = np.empty([h, w, c]) 131 | jit_vflip(result, source) 132 | return result 133 | 134 | def compose(dst: np.ndarray, src: np.ndarray) -> np.ndarray: 135 | """Compose a source image with alpha onto a destination image.""" 136 | a, b = ensure_alpha(src), ensure_alpha(dst) 137 | alpha = extract_alpha(a) 138 | result = b * (1.0 - alpha) + a * alpha 139 | if dst.shape[2] == 3: 140 | return extract_rgb(result) 141 | return result 142 | 143 | def compose_premultiplied(dst: np.ndarray, src: np.ndarray): 144 | """Draw an image with premultiplied alpha over the destination.""" 145 | a, b = ensure_alpha(src), ensure_alpha(dst) 146 | alpha = extract_alpha(a) 147 | result = b * (1.0 - alpha) + a 148 | if dst.shape[2] == 3: 149 | return extract_rgb(result) 150 | return result 151 | 152 | SIG0 = "void(f8[:,:,:], f8[:,:,:])" 153 | SIG1 = "(r,c,d),(c,r,d)" 154 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 155 | def rotate90(result, source): 156 | nrows, ncols, nchan = source.shape 157 | for row in range(nrows): 158 | for col in range(ncols): 159 | for chan in range(nchan): 160 | v = source[row][col][chan] 161 | result[-col-1][row][chan] = v 162 | 163 | SIG0 = "void(f8[:,:,:], f8[:,:,:])" 164 | SIG1 = "(r,c,d),(r,c,d)" 165 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 166 | def rotate180(result, source): 167 | nrows, ncols, nchan = source.shape 168 | for row in range(nrows): 169 | for col in range(ncols): 170 | for chan in range(nchan): 171 | v = source[row][col][chan] 172 | result[-row-1][-col-1][chan] = v 173 | 174 | SIG0 = "void(f8[:,:,:], f8[:,:,:])" 175 | SIG1 = "(r,c,d),(c,r,d)" 176 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 177 | def rotate270(result, source): 178 | nrows, ncols, nchan = source.shape 179 | for row in range(nrows): 180 | for col in range(ncols): 181 | for chan in range(nchan): 182 | v = source[row][col][chan] 183 | result[col][-row-1][chan] = v 184 | 185 | SIG0 = "void(f8[:,:,:], f8[:,:,:])" 186 | SIG1 = "(r,c,d),(r,c,d)" 187 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 188 | def jit_hflip(result, source): 189 | nrows, ncols, nchan = source.shape 190 | for row in range(nrows): 191 | for col in range(ncols): 192 | for chan in range(nchan): 193 | v = source[row][col][chan] 194 | result[row][-col-1][chan] = v 195 | 196 | SIG0 = "void(f8[:,:,:], f8[:,:,:])" 197 | SIG1 = "(r,c,d),(r,c,d)" 198 | @guvectorize([SIG0], SIG1, target='parallel', cache=True) 199 | def jit_vflip(result, source): 200 | nrows, ncols, nchan = source.shape 201 | for row in range(nrows): 202 | for col in range(ncols): 203 | for chan in range(nchan): 204 | v = source[row][col][chan] 205 | result[-row-1][col][chan] = v 206 | -------------------------------------------------------------------------------- /tests/demo.py: -------------------------------------------------------------------------------- 1 | # 1. Create falloff shape. 2 | 3 | import snowy 4 | import numpy as np 5 | from functools import reduce 6 | from scipy import interpolate 7 | 8 | width, height = 768, 256 9 | x, y = np.linspace(-1, 1, width), np.linspace(-1, 1, height) 10 | u, v = np.meshgrid(x, y, sparse=True) 11 | falloff = np.clip(1 - (u * u + v * v), 0, 1) 12 | falloff = snowy.reshape(falloff / 2) 13 | snowy.show(falloff) 14 | 15 | # 2. Add layers of gradient noise and scale with falloff. 16 | 17 | noise = snowy.generate_noise 18 | noise = [noise(width, height, 6 * 2**f, int(f)) * 1/2**f for f in range(4)] 19 | noise = reduce(lambda x, y: x+y, noise) 20 | elevation = falloff * (falloff / 2 + noise) 21 | elevation = snowy.generate_udf(elevation < 0.1) 22 | elevation /= np.amax(elevation) 23 | snowy.show(elevation) 24 | 25 | # 3. Compute ambient occlusion. 26 | 27 | occlusion = snowy.compute_skylight(elevation) 28 | snowy.show(occlusion) 29 | 30 | # 4. Generate normal map. 31 | 32 | normals = snowy.resize(snowy.compute_normals(elevation), width, height) 33 | snowy.show(0.5 + 0.5 * normals) 34 | 35 | # 5. Apply harsh diffuse lighting. 36 | 37 | lightdir = np.float64([0.2, -0.2, 1]) 38 | lightdir /= np.linalg.norm(lightdir) 39 | lambert = np.sum(normals * lightdir, 2) 40 | snowy.show(snowy.reshape(lambert) * occlusion) 41 | 42 | # 6. Lighten the occlusion, flatten the normals, and re-light. 43 | 44 | occlusion = 0.5 + 0.5 * occlusion 45 | normals += np.float64([0,0,0.5]) 46 | normals /= snowy.reshape(np.sqrt(np.sum(normals * normals, 2))) 47 | lambert = np.sum(normals * lightdir, 2) 48 | lighting = snowy.reshape(lambert) * occlusion 49 | snowy.show(lighting) 50 | 51 | # 7. Apply color gradient. 52 | 53 | xvals = np.arange(256) 54 | yvals = snowy.load('tests/terrain.png')[0,:,:3] 55 | apply_lut = interpolate.interp1d(xvals, yvals, axis=0) 56 | el = elevation * 0.2 + 0.49 57 | el = np.clip(255 * el, 0, 255) 58 | albedo = apply_lut(snowy.unshape(el)) 59 | snowy.show(albedo * lighting) 60 | -------------------------------------------------------------------------------- /tests/gamma_dalai_lama_gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/gamma_dalai_lama_gray.jpg -------------------------------------------------------------------------------- /tests/islands.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/islands.png -------------------------------------------------------------------------------- /tests/overlay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/overlay.png -------------------------------------------------------------------------------- /tests/performance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Snowy implements high-quality filtering and is written purely in Python. 5 | We do not expect it to be quite as fast as PIL or vips. However we do 6 | ensure that it performs reasonably with large images, which it achieves 7 | through careful usage of numba. 8 | """ 9 | 10 | import sys 11 | sys.path.append('../snowy') 12 | 13 | import timeit 14 | import snowy 15 | from PIL import Image 16 | import numpy as np 17 | 18 | global imgarray 19 | global pilimage 20 | 21 | ZOOM = 16 22 | 23 | def minify_with_pil(): 24 | global imgarray 25 | global pilimage 26 | height, width = imgarray.shape[:2] 27 | print(imgarray.shape, imgarray.dtype) 28 | pilimage = pilimage.resize((width//ZOOM, height//ZOOM)) 29 | # pilimage.show() 30 | 31 | def minify_with_snowy(): 32 | global imgarray 33 | global pilimage 34 | print(imgarray.shape, imgarray.dtype) 35 | height, width = imgarray.shape[:2] 36 | imgarray = snowy.resize(imgarray, width//ZOOM, height//ZOOM) 37 | # snowy.show(imgarray) 38 | 39 | def setup(grayscale=False, imgfile='~/Desktop/SaltLakes.jpg'): 40 | print('Loading image...') 41 | global imgarray 42 | global pilimage 43 | imgarray = snowy.load(imgfile) 44 | if grayscale: 45 | assert imgarray.shape[2] == 3, "Not an RGB image." 46 | r,g,b = np.split(imgarray, 3, axis=2) 47 | imgarray = r 48 | pilimage = Image.fromarray(np.uint8(snowy.unshape(imgarray))) 49 | 50 | seconds = timeit.timeit('minify_with_pil()', setup='setup()', 51 | globals=globals(), number=1) 52 | print(f"PIL minification took {seconds:6.3} seconds") 53 | 54 | seconds = timeit.timeit('minify_with_snowy()', setup='setup()', 55 | globals=globals(), number=1) 56 | print(f"Snowy minification took {seconds:6.3} seconds") 57 | -------------------------------------------------------------------------------- /tests/small_dalai_lama.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/small_dalai_lama.png -------------------------------------------------------------------------------- /tests/sobel_input.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/sobel_input.png -------------------------------------------------------------------------------- /tests/terrain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/terrain.png -------------------------------------------------------------------------------- /tests/test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | snowy test 5 | 16 | 17 | 18 | 19 |

20 | 21 | 22 | 23 |

24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /tests/test_color.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -m pytest -s 2 | 3 | # The shebang runs the test with stdout enabled and must be invoked from 4 | # the repo root. 5 | 6 | import snowy as sn 7 | import numpy as np 8 | import math 9 | import pytest 10 | import timeit 11 | 12 | from skimage.color.adapt_rgb import adapt_rgb 13 | from skimage import filters 14 | from skimage.color import rgb2gray 15 | 16 | def as_gray(image_filter, image, *args, **kwargs): 17 | gray_image = rgb2gray(image) 18 | return image_filter(gray_image, *args, **kwargs) 19 | 20 | @adapt_rgb(as_gray) 21 | def skimage_sobel(image): 22 | return filters.sobel(image) 23 | 24 | def create_circle(w, h, radius, center=[0,5.0,5]): 25 | cx, cy = center 26 | hw, hh = 0.5 / w, 0.5 / h 27 | dp = max(hw, hh) 28 | x = np.linspace(hw, 1 - hw, w) 29 | y = np.linspace(hh, 1 - hh, h) 30 | u, v = np.meshgrid(x, y, sparse=True) 31 | d2, r2 = (u-cx)**2 + (v-cy)**2, radius**2 32 | result = np.where(d2 < r2, 1.0, 0.0) 33 | return sn.reshape(result) 34 | 35 | def test_luminance(): 36 | source = sn.load('tests/sobel_input.png')[:,:,:3] 37 | 38 | L = rgb2gray(source) 39 | skresult = np.dstack([L, L, L]) 40 | small_skresult = sn.resize(skresult, width=256) 41 | 42 | L = sn.rgb_to_luminance(source) 43 | snresult = np.dstack([L, L, L]) 44 | small_snresult = sn.resize(snresult, width=256) 45 | 46 | L = skimage_sobel(source) 47 | sksobel = np.dstack([L, L, L]) 48 | small_sksobel = sn.resize(sksobel, width=256) 49 | 50 | L = sn.rgb_to_luminance(source) 51 | L = sn.compute_sobel(L) 52 | snsobel = np.dstack([L, L, L]) 53 | small_snsobel = sn.resize(snsobel, width=256) 54 | 55 | sn.show(np.hstack([ 56 | small_skresult, 57 | small_snresult, 58 | small_sksobel, 59 | small_snsobel])) 60 | 61 | def test_thick(): 62 | source = sn.load('tests/sobel_input.png')[:,:,:3] 63 | small_source = sn.resize(source, width=256) 64 | blurred = sn.blur(source, radius=2) 65 | small_blurred = sn.resize(blurred, width=256) 66 | 67 | L = skimage_sobel(blurred) 68 | sksobel = np.dstack([L, L, L]) 69 | small_sksobel = sn.resize(sksobel, width=256) 70 | 71 | L = sn.rgb_to_luminance(blurred) 72 | L = sn.compute_sobel(L) 73 | snsobel = np.dstack([L, L, L]) 74 | small_snsobel = sn.resize(snsobel, width=256) 75 | 76 | small_sksobel = np.clip(1 - small_sksobel * 40, 0, 1) 77 | small_snsobel = np.clip(1 - small_snsobel * 40, 0, 1) 78 | 79 | strip = np.hstack([ 80 | small_blurred, 81 | small_source * small_sksobel, 82 | small_source * small_snsobel]) 83 | sn.show(strip) 84 | 85 | def test_coords(): 86 | 87 | h, w = 800, 800 88 | height, width = h, w 89 | 90 | # Draw seed image 91 | 92 | cyan = np.full([h, w, 3], np.float64([(27, 56, 80)]) / 200) 93 | pink = np.full([h, w, 3], np.float64([175, 111, 127]) / 255) 94 | orange = np.full([h, w, 3], np.float64([239, 159, 95]) / 255) 95 | yellow = np.full([h, w, 3], np.float64([239, 207, 95]) / 255) 96 | 97 | colors = np.zeros([h, w, 3]) 98 | def max_color(v): return np.maximum(colors, v) 99 | def sub_color(v): return colors * (1 - v) 100 | 101 | colors = max_color(create_circle(w, h, 0.37, [0.4, 0.5]) * cyan) 102 | colors = max_color(create_circle(w, h, 0.37, [0.6, 0.4]) * cyan) 103 | colors = max_color(create_circle(w, h, 0.27, [0.7, 0.6]) * cyan) 104 | colors = sub_color(create_circle(w, h, 0.35, [0.4, 0.5])) 105 | colors = sub_color(create_circle(w, h, 0.35, [0.6, 0.4])) 106 | colors = sub_color(create_circle(w, h, 0.25, [0.7, 0.6])) 107 | colors = max_color(create_circle(w, h, 0.01, [0.4, 0.5]) * orange) 108 | colors = max_color(create_circle(w, h, 0.01, [0.6, 0.4]) * pink) 109 | colors = max_color(create_circle(w, h, 0.01, [0.7, 0.6]) * yellow) 110 | 111 | colors = sn.linearize(colors) 112 | 113 | # Create generalized voronoi 114 | 115 | luma = sn.reshape(np.sum(colors, 2)) 116 | coords = sn.generate_cpcf(luma != 0) 117 | voronoi = sn.dereference_cpcf(colors, coords) 118 | 119 | # Warp the voronoi 120 | 121 | warpx, warpy = width / 15, height / 15 122 | noise = sn.generate_fBm(width, height, 4, 4, 3) 123 | 124 | i, j = np.arange(width, dtype='i2'), np.arange(height, dtype='i2') 125 | coords = np.dstack(np.meshgrid(i, j, sparse=False)) 126 | 127 | warpx = warpx * np.cos(noise * math.pi * 2) 128 | warpy = warpy * np.sin(noise * math.pi * 2) 129 | coords += np.int16(np.dstack([warpx, warpy])) 130 | 131 | coords[:,:,0] = np.clip(coords[:,:,0], 0, width - 1) 132 | coords[:,:,1] = np.clip(coords[:,:,1], 0, height - 1) 133 | warped = sn.dereference_cpcf(voronoi, coords) 134 | 135 | strip = [sn.resize(i, height=256) for i in (colors, voronoi, warped)] 136 | sn.show(sn.hstack(strip)) 137 | -------------------------------------------------------------------------------- /tests/test_dist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -m pytest -s 2 | 3 | # The shebang runs the test with stdout enabled and must be invoked from 4 | # the repo root. 5 | 6 | import snowy 7 | import numpy as np 8 | import pytest 9 | 10 | w, h = 1920 / 4, 1080 / 4 11 | 12 | def smoothstep(edge0, edge1, x): 13 | t = np.clip((x - edge0) / (edge1 - edge0), 0.0, 1.0) 14 | return t * t * (3.0 - 2.0 * t) 15 | 16 | def create_circle(w, h, radius=0.4, cx=0.5, cy=0.5): 17 | hw, hh = 0.5 / w, 0.5 / h 18 | dp = max(hw, hh) 19 | x = np.linspace(hw, 1 - hw, w) 20 | y = np.linspace(hh, 1 - hh, h) 21 | u, v = np.meshgrid(x, y, sparse=True) 22 | d2, r2 = (u-cx)**2 + (v-cy)**2, radius**2 23 | result = 1 - smoothstep(radius-dp, radius+dp, np.sqrt(d2)) 24 | return snowy.reshape(result) 25 | 26 | def test_cpcf(): 27 | 28 | w, h = 500, 500 29 | def show(im): 30 | snowy.show(snowy.resize(im, height=100, filter=None)) 31 | 32 | yellow = np.full((w, h, 3), (1, 1, 0)) 33 | red = np.full((w, h, 3), (1, 0, 0)) 34 | 35 | blue_border = np.full((w, h, 3), (0, 0, 1)) 36 | t = 5; blue_border[t:h-t,t:w-t] *= 0 37 | 38 | c0 = create_circle(w, h, 0.3) * yellow * 100000 39 | c1 = create_circle(w, h, 0.07, 0.8, 0.8) * red * 10000 40 | circles = np.clip(c0 + c1 + blue_border, 0, 1) 41 | 42 | r, g, b = circles.swapaxes(0, 2) 43 | luma = snowy.reshape(r + g + b) 44 | 45 | mask = luma != 0.0 46 | sdf = snowy.unitize(np.abs(snowy.generate_sdf(mask))) 47 | cpcf = snowy.generate_cpcf(mask) 48 | 49 | voronoi = np.empty(circles.shape) 50 | np.copyto(voronoi, snowy.dereference_coords(circles, cpcf)) 51 | 52 | luma = np.dstack([luma, luma, luma]) 53 | sdf = np.dstack([sdf, sdf, sdf]) 54 | final = np.hstack([circles, luma, sdf, voronoi]) 55 | final = snowy.resize(final, height=400) 56 | show(final) 57 | 58 | def test_sdf(): 59 | c0 = create_circle(200, 200, 0.3) 60 | c1 = create_circle(200, 200, 0.08, 0.8, 0.8) 61 | c0 = np.clip(c0 + c1, 0, 1) 62 | circles = snowy.add_border(c0, value=1) 63 | mask = circles != 0.0 64 | sdf = snowy.unitize(snowy.generate_sdf(mask)) 65 | nx, ny = snowy.gradient(sdf) 66 | grad = snowy.unitize(nx + ny) 67 | snowy.show(snowy.hstack([circles, sdf, grad])) 68 | 69 | def test_udf(): 70 | c0 = create_circle(200, 200, 0.3) 71 | c1 = create_circle(200, 200, 0.08, 0.8, 0.8) 72 | c0 = np.clip(c0 + c1, 0, 1) 73 | circles = snowy.add_border(c0, value=1) 74 | mask = circles != 0.0 75 | udf = snowy.unitize(snowy.generate_udf(mask)) 76 | nx, ny = snowy.gradient(udf) 77 | grad = snowy.unitize(nx + ny) 78 | snowy.show(snowy.hstack([circles, udf, grad])) 79 | 80 | def test_gdf(): 81 | "This is a (failed) effort to create a smoother distance field." 82 | c0 = create_circle(200, 200, 0.3) 83 | c1 = create_circle(200, 200, 0.08, 0.8, 0.8) 84 | c0 = np.clip(c0 + c1, 0, 1) 85 | circles = snowy.add_border(c0, value=1) 86 | circles = np.clip(snowy.blur(circles, radius=2), 0, 1) 87 | circles = np.clip(snowy.blur(circles, radius=2), 0, 1) 88 | source = (1.0 - circles) * 2000.0 89 | gdf = np.sqrt(snowy.generate_gdf(source)) 90 | gdf = snowy.unitize(gdf) 91 | nx, ny = snowy.gradient(gdf) 92 | grad = snowy.unitize(nx + ny) 93 | snowy.show(snowy.hstack([circles, gdf, grad])) 94 | 95 | def test_tweet(): 96 | import snowy as sn, numpy as np 97 | im = sn.generate_noise(2000, 500, 5, seed=2, wrapx=True) 98 | df = sn.generate_sdf(im < 0.0, wrapx=True) 99 | im = 0.5 + 0.5 * np.sign(im) - im 100 | cl = lambda L, U: np.where(np.logical_and(df>L, df= 0 and np.amax(ground) <= 1 61 | 62 | with tempfile.NamedTemporaryFile() as fp: 63 | target = fp.name + '.png' 64 | snowy.export(ground, target) 65 | show_filename(target) 66 | 67 | show_filename(source) 68 | show_array(ground, True) 69 | 70 | blurred = snowy.blur(ground, radius=10) 71 | snowy.show(blurred) 72 | 73 | def test_solid(): 74 | gray = np.ones([100, 100, 4]) / 2 75 | snowy.show(gray) 76 | 77 | def test_gamma(): 78 | 79 | source = path('gamma_dalai_lama_gray.jpg') 80 | dalai_lama = snowy.load(source) 81 | snowy.show(dalai_lama) 82 | 83 | small = snowy.resize(dalai_lama, height=32) 84 | snowy.export(small, path('small_dalai_lama.png')) 85 | snowy.show(small) 86 | -------------------------------------------------------------------------------- /tests/test_lighting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -m pytest -s 2 | 3 | # The shebang runs the test with stdout enabled and must be invoked from 4 | # the repo root. 5 | 6 | import os 7 | import snowy as sn 8 | import numpy as np 9 | from scipy import interpolate 10 | import timeit 11 | 12 | def path(filename: str): 13 | scriptdir = os.path.dirname(os.path.realpath(__file__)) 14 | return os.path.join(scriptdir, filename) 15 | 16 | def create_falloff(w, h, radius=0.4, cx=0.5, cy=0.5): 17 | hw, hh = 0.5 / w, 0.5 / h 18 | x = np.linspace(hw, 1 - hw, w) 19 | y = np.linspace(hh, 1 - hh, h) 20 | u, v = np.meshgrid(x, y, sparse=True) 21 | d2 = (u-cx)**2 + (v-cy)**2 22 | return 1-sn.unitize(sn.reshape(d2)) 23 | 24 | def create_island(seed, freq=3.5): 25 | w, h = 750, 512 26 | falloff = create_falloff(w, h) 27 | n1 = 1.000 * sn.generate_noise(w, h, freq*1, seed+0) 28 | n2 = 0.500 * sn.generate_noise(w, h, freq*2, seed+1) 29 | n3 = 0.250 * sn.generate_noise(w, h, freq*4, seed+2) 30 | n4 = 0.125 * sn.generate_noise(w, h, freq*8, seed+3) 31 | elevation = falloff * (falloff / 2 + n1 + n2 + n3 + n4) 32 | elevation = sn.generate_sdf(elevation < 0.4) 33 | elmax = max(abs(np.amin(elevation)), abs(np.amax(elevation))) 34 | return elevation / elmax 35 | 36 | def test_normals(): 37 | isle = create_island(10) 38 | height, width, nchan = isle.shape 39 | 40 | occlusion = np.empty([height, width, 1]) 41 | seconds = timeit.timeit(lambda: np.copyto(occlusion, 42 | sn.compute_skylight(isle)), number=1) 43 | print(f'\ncompute_skylight took {seconds} seconds') 44 | 45 | normals = np.empty([height - 1, width - 1, 3]) 46 | seconds = timeit.timeit(lambda: np.copyto(normals, 47 | sn.compute_normals(isle)), number=1) 48 | print(f'\ncompute_normals took {seconds} seconds') 49 | 50 | normals = sn.resize(normals, 750, 512) 51 | 52 | # Flatten the normals according to landmass versus sea. 53 | normals += np.float64([0,0,100]) * np.where(isle < 0.0, 1.0, 0.005) 54 | normals /= sn.reshape(np.sqrt(np.sum(normals * normals, 2))) 55 | 56 | # Compute the lambertian diffuse factor 57 | lightdir = np.float64([0.2, -0.2, 1]) 58 | lightdir /= np.linalg.norm(lightdir) 59 | df = np.clip(np.sum(normals * lightdir, 2), 0, 1) 60 | df = sn.reshape(df) 61 | df *= occlusion 62 | 63 | # Apply color LUT 64 | gradient_image = sn.resize(sn.load(path('terrain.png')), width=1024)[:,:,:3] 65 | def applyColorGradient(elevation): 66 | xvals = np.arange(1024) 67 | yvals = gradient_image[0] 68 | apply_lut = interpolate.interp1d(xvals, yvals, axis=0) 69 | el = np.clip(1023 * elevation, 0, 1023) 70 | return apply_lut(sn.unshape(el)) 71 | albedo = applyColorGradient(isle * 0.5 + 0.5) 72 | albedo *= df 73 | 74 | # Visualize the lighting layers 75 | normals = 0.5 * (normals + 1.0) 76 | isle = np.dstack([isle, isle, isle]) 77 | occlusion = np.dstack([occlusion, occlusion, occlusion]) 78 | df = np.dstack([df, df, df]) 79 | island_strip = sn.resize(sn.hstack([occlusion, normals, df, albedo]), height=256) 80 | sn.save(island_strip, 'docs/island_strip.png') 81 | sn.show(island_strip) 82 | -------------------------------------------------------------------------------- /tests/test_snowy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 -m pytest -s 2 | 3 | # The shebang runs the test with stdout enabled and must be invoked from 4 | # the repo root. In addition to this script, we use our custom doc 5 | # generator for regression testing, because it exercises quite a bit 6 | # of functionality to generate the images seen in the documentation. 7 | 8 | import snowy 9 | import numpy as np 10 | import pytest 11 | 12 | w, h = 1920 / 4, 1080 / 4 13 | 14 | def smoothstep(edge0, edge1, x): 15 | t = np.clip((x - edge0) / (edge1 - edge0), 0.0, 1.0) 16 | return t * t * (3.0 - 2.0 * t) 17 | 18 | def create_circle(w, h, radius=0.4, cx=0.5, cy=0.5): 19 | hw, hh = 0.5 / w, 0.5 / h 20 | dp = max(hw, hh) 21 | x = np.linspace(hw, 1 - hw, w) 22 | y = np.linspace(hh, 1 - hh, h) 23 | u, v = np.meshgrid(x, y, sparse=True) 24 | d2, r2 = (u-cx)**2 + (v-cy)**2, radius**2 25 | result = 1 - smoothstep(radius-dp, radius+dp, np.sqrt(d2)) 26 | return snowy.reshape(result) 27 | 28 | def test_minification(): 29 | n = snowy.generate_noise(1000, 1000, frequency=5, seed=42) 30 | n = 0.5 + 0.5 * np.sign(n) 31 | a = snowy.resize(n, 100, 100) 32 | b = snowy.resize(n, 100, 100, snowy.MITCHELL) 33 | c = snowy.resize(n, 100, 100, snowy.GAUSSIAN) 34 | d = snowy.resize(n, 100, 100, snowy.NEAREST) 35 | x = [a,b,c,d] + [create_circle(100, 100)] 36 | snowy.show(np.hstack(x)) 37 | 38 | def test_magnification(): 39 | i = create_circle(8, 8) 40 | a = snowy.resize(i, 100, 100, snowy.NEAREST) 41 | b = snowy.resize(i, 100, 100, snowy.TRIANGLE) 42 | c = snowy.resize(i, 100, 100, snowy.GAUSSIAN) 43 | e = snowy.resize(i, 100, 100, snowy.MITCHELL) 44 | d = snowy.resize(i, 100, 100, snowy.LANCZOS) 45 | f = snowy.resize(i, 100, 100) 46 | snowy.show(np.hstack([a, b, c, d, e, f])) 47 | 48 | def test_noise_smoothness(): 49 | noise = 0.5 + 0.5 * snowy.generate_noise(300, 150, 4, seed=42) 50 | grad = snowy.gradient(noise) 51 | grad = grad[0] + grad[1] 52 | grad = snowy.unitize(grad) 53 | snowy.show(grad) 54 | 55 | def test_tileable(): 56 | n = snowy.generate_noise(200, 400, frequency=4, seed=42, wrapx=True) 57 | n = 0.5 + 0.5 * np.sign(n) - n 58 | n = np.hstack([n, n]) 59 | gold = snowy.resize(n, 200, 200) 60 | 61 | n = snowy.generate_noise(20, 40, frequency=4, seed=42, wrapx=True) 62 | n = 0.5 + 0.5 * np.sign(n) - n 63 | n = snowy.resize(n, 100, 200) 64 | bad = np.hstack([n, n]) 65 | 66 | n = snowy.generate_noise(20, 40, frequency=4, seed=42, wrapx=True) 67 | n = 0.5 + 0.5 * np.sign(n) - n 68 | n = snowy.resize(n, 100, 200, wrapx=True) 69 | good = np.hstack([n, n]) 70 | 71 | snowy.show(snowy.hstack([gold, bad, good], 2, .7)) 72 | 73 | def test_tileable_distance(): 74 | c0 = create_circle(400, 200, 0.3) 75 | c1 = create_circle(400, 200, 0.08, 0.8, 0.8) 76 | circles = np.clip(c0 + c1, 0, 1) 77 | mask = circles != 0.0 78 | 79 | sdf = snowy.unitize(snowy.generate_sdf(mask, wrapx=True, wrapy=True)) 80 | nx, ny = snowy.gradient(sdf) 81 | grad = snowy.unitize(nx + ny) 82 | stack2 = np.hstack([sdf, sdf, grad, grad]) 83 | 84 | snowy.show(snowy.resize(np.vstack([stack2, stack2]), 600, 200)) 85 | 86 | get_mask = lambda L, U: np.logical_and(sdf > L, sdf < U) 87 | get_contour = lambda L, U: np.where(get_mask(L, U), sdf, 0) 88 | sdf -= get_contour(.20, .25) 89 | sdf -= get_contour(.60, .65) 90 | sdf -= get_contour(.90, .95) 91 | 92 | snowy.show(snowy.resize(np.hstack([sdf, sdf, sdf, sdf]), height=300)) 93 | -------------------------------------------------------------------------------- /tests/texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/prideout/snowy/995c373bd751daf35d8b9a851de7a744329552d7/tests/texture.png --------------------------------------------------------------------------------