├── .dockerignore ├── .gitignore ├── Dockerfile ├── LICENSE ├── Pipfile ├── Pipfile.lock ├── README.md ├── data └── .gitkeep ├── datasets ├── __init__.py ├── basic_dataset.py ├── cufed5_dataset.py ├── reference_dataset.py └── swapping_dataset.py ├── download_dataset.py ├── download_pretrained_model.py ├── losses ├── __init__.py ├── adversarial_loss.py ├── back_projection_loss.py ├── metrics.py ├── perceptual_loss.py └── texture_loss.py ├── models ├── __init__.py ├── discriminator.py ├── srntt.py ├── swapper.py └── vgg.py ├── offline_texture_swapping.py ├── online_inference.py ├── scripts └── train.sh ├── src ├── comparison_002.png ├── comparison_065.png └── comparison_078.png ├── test.py ├── train.py ├── utils └── __init__.py └── weights └── .gitkeep /.dockerignore: -------------------------------------------------------------------------------- 1 | # Common 2 | LICENSE 3 | README.md 4 | CHANGELOG.md 5 | docker-compose.yml 6 | Dockerfile 7 | .dockerignore 8 | 9 | # git 10 | .git 11 | .gitattributes 12 | .gitignore 13 | 14 | # Byte-compiled / optimized / DLL files 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | 19 | # C extensions 20 | *.so 21 | 22 | # Distribution / packaging 23 | .Python 24 | build/ 25 | develop-eggs/ 26 | dist/ 27 | downloads/ 28 | eggs/ 29 | .eggs/ 30 | lib/ 31 | lib64/ 32 | parts/ 33 | sdist/ 34 | var/ 35 | wheels/ 36 | pip-wheel-metadata/ 37 | share/python-wheels/ 38 | *.egg-info/ 39 | .installed.cfg 40 | *.egg 41 | MANIFEST 42 | 43 | # PyInstaller 44 | # Usually these files are written by a python script from a template 45 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 46 | *.manifest 47 | *.spec 48 | 49 | # Installer logs 50 | pip-log.txt 51 | pip-delete-this-directory.txt 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .nox/ 57 | .coverage 58 | .coverage.* 59 | .cache 60 | nosetests.xml 61 | coverage.xml 62 | *.cover 63 | *.py,cover 64 | .hypothesis/ 65 | .pytest_cache/ 66 | 67 | # Translations 68 | *.mo 69 | *.pot 70 | 71 | # Django stuff: 72 | *.log 73 | local_settings.py 74 | db.sqlite3 75 | db.sqlite3-journal 76 | 77 | # Flask stuff: 78 | instance/ 79 | .webassets-cache 80 | 81 | # Scrapy stuff: 82 | .scrapy 83 | 84 | # Sphinx documentation 85 | docs/_build/ 86 | 87 | # PyBuilder 88 | target/ 89 | 90 | # Jupyter Notebook 91 | .ipynb_checkpoints 92 | 93 | # IPython 94 | profile_default/ 95 | ipython_config.py 96 | 97 | # pyenv 98 | .python-version 99 | 100 | # pipenv 101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 104 | # install all needed dependencies. 105 | #Pipfile.lock 106 | 107 | # celery beat schedule file 108 | celerybeat-schedule 109 | 110 | # SageMath parsed files 111 | *.sage.py 112 | 113 | # Environments 114 | .env 115 | .venv 116 | env/ 117 | venv/ 118 | ENV/ 119 | env.bak/ 120 | venv.bak/ 121 | 122 | # Spyder project settings 123 | .spyderproject 124 | .spyproject 125 | 126 | # Rope project settings 127 | .ropeproject 128 | 129 | # mkdocs documentation 130 | /site 131 | 132 | # mypy 133 | .mypy_cache/ 134 | .dmypy.json 135 | dmypy.json 136 | 137 | # Pyre type checker 138 | .pyre/ 139 | 140 | # my 141 | data 142 | runs 143 | notebooks 144 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # celery beat schedule file 95 | celerybeat-schedule 96 | 97 | # SageMath parsed files 98 | *.sage.py 99 | 100 | # Environments 101 | .env 102 | .venv 103 | env/ 104 | venv/ 105 | ENV/ 106 | env.bak/ 107 | venv.bak/ 108 | 109 | # Spyder project settings 110 | .spyderproject 111 | .spyproject 112 | 113 | # Rope project settings 114 | .ropeproject 115 | 116 | # mkdocs documentation 117 | /site 118 | 119 | # mypy 120 | .mypy_cache/ 121 | .dmypy.json 122 | dmypy.json 123 | 124 | # Pyre type checker 125 | .pyre/ 126 | 127 | # additional ignore 128 | .vscode/ 129 | data/* 130 | weights/* 131 | runs/ 132 | archive/ 133 | notebooks/ 134 | !.gitkeep 135 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu16.04 2 | 3 | ENV PYTHON_VERSION 3.7.4 4 | ENV HOME /root 5 | ENV PYTHON_ROOT $HOME/local/python-$PYTHON_VERSION 6 | ENV PATH $PYTHON_ROOT/bin:$PATH 7 | ENV PYENV_ROOT $HOME/.pyenv 8 | 9 | RUN apt-get update && apt-get upgrade -y && apt-get install -y \ 10 | make build-essential libssl-dev zlib1g-dev libbz2-dev \ 11 | libreadline-dev libsqlite3-dev llvm libncurses5-dev libncursesw5-dev \ 12 | xz-utils tk-dev libffi-dev liblzma-dev python-openssl git wget curl 13 | 14 | RUN git clone https://github.com/pyenv/pyenv.git $PYENV_ROOT \ 15 | && $PYENV_ROOT/plugins/python-build/install.sh \ 16 | && /usr/local/bin/python-build -v $PYTHON_VERSION $PYTHON_ROOT \ 17 | && rm -rf $PYENV_ROOT 18 | 19 | ADD . /src 20 | WORKDIR /src 21 | RUN pip install pipenv && pipenv install 22 | 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | ipython = "*" 8 | pylint = "*" 9 | autopep8 = "*" 10 | flake8 = "*" 11 | jupyterlab = "*" 12 | 13 | [packages] 14 | torch = "*" 15 | torchvision = "*" 16 | tqdm = "*" 17 | future = "*" 18 | tensorboard = "*" 19 | scikit-learn = "*" 20 | kornia = "*" 21 | pandas = "*" 22 | googledrivedownloader = "*" 23 | opencv-python = "*" 24 | 25 | [requires] 26 | python_version = "3.7" 27 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "1635159ff4c836423418663edd4fee17584c1c56c9f1d7a40f1e516549e93ed9" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.7" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "absl-py": { 20 | "hashes": [ 21 | "sha256:5d15f85b8cc859c6245bc9886ba664460ed96a6fee895416caa37d669ee74a9a", 22 | "sha256:f568809938c49abbda89826223c992b630afd23c638160ad7840cfe347710d97" 23 | ], 24 | "markers": "python_version >= '3.6'", 25 | "version": "==1.2.0" 26 | }, 27 | "cachetools": { 28 | "hashes": [ 29 | "sha256:89ea6f1b638d5a73a4f9226be57ac5e4f399d22770b92355f92dcb0f7f001693", 30 | "sha256:92971d3cb7d2a97efff7c7bb1657f21a8f5fb309a37530537c71b1774189f2d1" 31 | ], 32 | "markers": "python_version ~= '3.5'", 33 | "version": "==4.2.4" 34 | }, 35 | "certifi": { 36 | "hashes": [ 37 | "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d", 38 | "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412" 39 | ], 40 | "markers": "python_version >= '3.6'", 41 | "version": "==2022.6.15" 42 | }, 43 | "charset-normalizer": { 44 | "hashes": [ 45 | "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845", 46 | "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f" 47 | ], 48 | "markers": "python_version >= '3.6'", 49 | "version": "==2.1.1" 50 | }, 51 | "future": { 52 | "hashes": [ 53 | "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" 54 | ], 55 | "index": "pypi", 56 | "version": "==0.18.2" 57 | }, 58 | "google-auth": { 59 | "hashes": [ 60 | "sha256:997516b42ecb5b63e8d80f5632c1a61dddf41d2a4c2748057837e06e00014258", 61 | "sha256:b7033be9028c188ee30200b204ea00ed82ea1162e8ac1df4aa6ded19a191d88e" 62 | ], 63 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", 64 | "version": "==1.35.0" 65 | }, 66 | "google-auth-oauthlib": { 67 | "hashes": [ 68 | "sha256:3f2a6e802eebbb6fb736a370fbf3b055edcb6b52878bf2f26330b5e041316c73", 69 | "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a" 70 | ], 71 | "markers": "python_version >= '3.6'", 72 | "version": "==0.4.6" 73 | }, 74 | "googledrivedownloader": { 75 | "hashes": [ 76 | "sha256:26ef906c4a038de6fb36f375b0cb0af6f0b6d7ea9ce019a3a08abc50fd6a3b73", 77 | "sha256:4b34c1337b2ff3bf2bd7581818efbdcaea7d50ffd484ccf80809688f5ca0e204" 78 | ], 79 | "index": "pypi", 80 | "version": "==0.4" 81 | }, 82 | "grpcio": { 83 | "hashes": [ 84 | "sha256:0425b5577be202d0a4024536bbccb1b052c47e0766096e6c3a5789ddfd5f400d", 85 | "sha256:06c0739dff9e723bca28ec22301f3711d85c2e652d1c8ae938aa0f7ad632ef9a", 86 | "sha256:08307dc5a6ac4da03146d6c00f62319e0665b01c6ffe805cfcaa955c17253f9c", 87 | "sha256:090dfa19f41efcbe760ae59b34da4304d4be9a59960c9682b7eab7e0b6748a79", 88 | "sha256:0a24b50810aae90c74bbd901c3f175b9645802d2fbf03eadaf418ddee4c26668", 89 | "sha256:0cd44d78f302ff67f11a8c49b786c7ccbed2cfef6f4fd7bb0c3dc9255415f8f7", 90 | "sha256:0d8a7f3eb6f290189f48223a5f4464c99619a9de34200ce80d5092fb268323d2", 91 | "sha256:14d2bc74218986e5edf5527e870b0969d63601911994ebf0dce96288548cf0ef", 92 | "sha256:1bb9afa85e797a646bfcd785309e869e80a375c959b11a17c9680abebacc0cb0", 93 | "sha256:1ec63bbd09586e5cda1bdc832ae6975d2526d04433a764a1cc866caa399e50d4", 94 | "sha256:2061dbe41e43b0a5e1fd423e8a7fb3a0cf11d69ce22d0fac21f1a8c704640b12", 95 | "sha256:324e363bad4d89a8ec7124013371f268d43afd0ac0fdeec1b21c1a101eb7dafb", 96 | "sha256:35dfd981b03a3ec842671d1694fe437ee9f7b9e6a02792157a2793b0eba4f478", 97 | "sha256:43857d06b2473b640467467f8f553319b5e819e54be14c86324dad83a0547818", 98 | "sha256:4706c78b0c183dca815bbb4ef3e8dd2136ccc8d1699f62c585e75e211ad388f6", 99 | "sha256:4d9ad7122f60157454f74a850d1337ba135146cef6fb7956d78c7194d52db0fe", 100 | "sha256:544da3458d1d249bb8aed5504adf3e194a931e212017934bf7bfa774dad37fb3", 101 | "sha256:55782a31ec539f15b34ee56f19131fe1430f38a4be022eb30c85e0b0dcf57f11", 102 | "sha256:55cd8b13c5ef22003889f599b8f2930836c6f71cd7cf3fc0196633813dc4f928", 103 | "sha256:5dbba95fab9b35957b4977b8904fc1fa56b302f9051eff4d7716ebb0c087f801", 104 | "sha256:5f57b9b61c22537623a5577bf5f2f970dc4e50fac5391090114c6eb3ab5a129f", 105 | "sha256:64e097dd08bb408afeeaee9a56f75311c9ca5b27b8b0278279dc8eef85fa1051", 106 | "sha256:664a270d3eac68183ad049665b0f4d0262ec387d5c08c0108dbcfe5b351a8b4d", 107 | "sha256:668350ea02af018ca945bd629754d47126b366d981ab88e0369b53bc781ffb14", 108 | "sha256:67cd275a651532d28620eef677b97164a5438c5afcfd44b15e8992afa9eb598c", 109 | "sha256:68b5e47fcca8481f36ef444842801928e60e30a5b3852c9f4a95f2582d10dcb2", 110 | "sha256:7191ffc8bcf8a630c547287ab103e1fdf72b2e0c119e634d8a36055c1d988ad0", 111 | "sha256:815089435d0f113719eabf105832e4c4fa1726b39ae3fb2ca7861752b0f70570", 112 | "sha256:8dbef03853a0dbe457417c5469cb0f9d5bf47401b49d50c7dad3c495663b699b", 113 | "sha256:91cd292373e85a52c897fa5b4768c895e20a7dc3423449c64f0f96388dd1812e", 114 | "sha256:9298d6f2a81f132f72a7e79cbc90a511fffacc75045c2b10050bb87b86c8353d", 115 | "sha256:96cff5a2081db82fb710db6a19dd8f904bdebb927727aaf4d9c427984b79a4c1", 116 | "sha256:9e63e0619a5627edb7a5eb3e9568b9f97e604856ba228cc1d8a9f83ce3d0466e", 117 | "sha256:a278d02272214ec33f046864a24b5f5aab7f60f855de38c525e5b4ef61ec5b48", 118 | "sha256:a6b2432ac2353c80a56d9015dfc5c4af60245c719628d4193ecd75ddf9cd248c", 119 | "sha256:b821403907e865e8377af3eee62f0cb233ea2369ba0fcdce9505ca5bfaf4eeb3", 120 | "sha256:b88bec3f94a16411a1e0336eb69f335f58229e45d4082b12d8e554cedea97586", 121 | "sha256:bfdb8af4801d1c31a18d54b37f4e49bb268d1f485ecf47f70e78d56e04ff37a7", 122 | "sha256:c79996ae64dc4d8730782dff0d1daacc8ce7d4c2ba9cef83b6f469f73c0655ce", 123 | "sha256:cc34d182c4fd64b6ff8304a606b95e814e4f8ed4b245b6d6cc9607690e3ef201", 124 | "sha256:d0d481ff55ea6cc49dab2c8276597bd4f1a84a8745fedb4bc23e12e9fb9d0e45", 125 | "sha256:e9723784cf264697024778dcf4b7542c851fe14b14681d6268fb984a53f76df1", 126 | "sha256:f4508e8abd67ebcccd0fbde6e2b1917ba5d153f3f20c1de385abd8722545e05f", 127 | "sha256:f515782b168a4ec6ea241add845ccfebe187fc7b09adf892b3ad9e2592c60af1", 128 | "sha256:f89de64d9eb3478b188859214752db50c91a749479011abd99e248550371375f", 129 | "sha256:fcd5d932842df503eb0bf60f9cc35e6fe732b51f499e78b45234e0be41b0018d" 130 | ], 131 | "markers": "python_version >= '3.6'", 132 | "version": "==1.47.0" 133 | }, 134 | "idna": { 135 | "hashes": [ 136 | "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff", 137 | "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d" 138 | ], 139 | "markers": "python_version >= '3.5'", 140 | "version": "==3.3" 141 | }, 142 | "importlib-metadata": { 143 | "hashes": [ 144 | "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670", 145 | "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23" 146 | ], 147 | "markers": "python_version < '3.10'", 148 | "version": "==4.12.0" 149 | }, 150 | "joblib": { 151 | "hashes": [ 152 | "sha256:4158fcecd13733f8be669be0683b96ebdbbd38d23559f54dca7205aea1bf1e35", 153 | "sha256:f21f109b3c7ff9d95f8387f752d0d9c34a02aa2f7060c2135f465da0e5160ff6" 154 | ], 155 | "markers": "python_version >= '3.6'", 156 | "version": "==1.1.0" 157 | }, 158 | "kornia": { 159 | "hashes": [ 160 | "sha256:0012bec4226e2f0b041f51049953775b23ed71b6e760ee03a1fb89e97e06b174", 161 | "sha256:caf74d656417e96cd0f6d1eea0101ad3d2dfa1211df9b5d1314ce564a4496d55" 162 | ], 163 | "index": "pypi", 164 | "version": "==0.1.4.post2" 165 | }, 166 | "markdown": { 167 | "hashes": [ 168 | "sha256:08fb8465cffd03d10b9dd34a5c3fea908e20391a2a90b88d66362cb05beed186", 169 | "sha256:3b809086bb6efad416156e00a0da66fe47618a5d6918dd688f53f40c8e4cfeff" 170 | ], 171 | "markers": "python_version >= '3.7'", 172 | "version": "==3.4.1" 173 | }, 174 | "markupsafe": { 175 | "hashes": [ 176 | "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003", 177 | "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88", 178 | "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5", 179 | "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7", 180 | "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a", 181 | "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603", 182 | "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1", 183 | "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135", 184 | "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247", 185 | "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6", 186 | "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601", 187 | "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77", 188 | "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02", 189 | "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e", 190 | "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63", 191 | "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f", 192 | "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980", 193 | "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b", 194 | "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812", 195 | "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff", 196 | "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96", 197 | "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1", 198 | "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925", 199 | "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a", 200 | "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6", 201 | "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e", 202 | "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f", 203 | "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4", 204 | "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f", 205 | "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3", 206 | "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c", 207 | "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a", 208 | "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417", 209 | "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a", 210 | "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a", 211 | "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37", 212 | "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452", 213 | "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933", 214 | "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a", 215 | "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7" 216 | ], 217 | "markers": "python_version >= '3.7'", 218 | "version": "==2.1.1" 219 | }, 220 | "numpy": { 221 | "hashes": [ 222 | "sha256:1dbe1c91269f880e364526649a52eff93ac30035507ae980d2fed33aaee633ac", 223 | "sha256:357768c2e4451ac241465157a3e929b265dfac85d9214074985b1786244f2ef3", 224 | "sha256:3820724272f9913b597ccd13a467cc492a0da6b05df26ea09e78b171a0bb9da6", 225 | "sha256:4391bd07606be175aafd267ef9bea87cf1b8210c787666ce82073b05f202add1", 226 | "sha256:4aa48afdce4660b0076a00d80afa54e8a97cd49f457d68a4342d188a09451c1a", 227 | "sha256:58459d3bad03343ac4b1b42ed14d571b8743dc80ccbf27444f266729df1d6f5b", 228 | "sha256:5c3c8def4230e1b959671eb959083661b4a0d2e9af93ee339c7dada6759a9470", 229 | "sha256:5f30427731561ce75d7048ac254dbe47a2ba576229250fb60f0fb74db96501a1", 230 | "sha256:643843bcc1c50526b3a71cd2ee561cf0d8773f062c8cbaf9ffac9fdf573f83ab", 231 | "sha256:67c261d6c0a9981820c3a149d255a76918278a6b03b6a036800359aba1256d46", 232 | "sha256:67f21981ba2f9d7ba9ade60c9e8cbaa8cf8e9ae51673934480e45cf55e953673", 233 | "sha256:6aaf96c7f8cebc220cdfc03f1d5a31952f027dda050e5a703a0d1c396075e3e7", 234 | "sha256:7c4068a8c44014b2d55f3c3f574c376b2494ca9cc73d2f1bd692382b6dffe3db", 235 | "sha256:7c7e5fa88d9ff656e067876e4736379cc962d185d5cd808014a8a928d529ef4e", 236 | "sha256:7f5ae4f304257569ef3b948810816bc87c9146e8c446053539947eedeaa32786", 237 | "sha256:82691fda7c3f77c90e62da69ae60b5ac08e87e775b09813559f8901a88266552", 238 | "sha256:8737609c3bbdd48e380d463134a35ffad3b22dc56295eff6f79fd85bd0eeeb25", 239 | "sha256:9f411b2c3f3d76bba0865b35a425157c5dcf54937f82bbeb3d3c180789dd66a6", 240 | "sha256:a6be4cb0ef3b8c9250c19cc122267263093eee7edd4e3fa75395dfda8c17a8e2", 241 | "sha256:bcb238c9c96c00d3085b264e5c1a1207672577b93fa666c3b14a45240b14123a", 242 | "sha256:bf2ec4b75d0e9356edea834d1de42b31fe11f726a81dfb2c2112bc1eaa508fcf", 243 | "sha256:d136337ae3cc69aa5e447e78d8e1514be8c3ec9b54264e680cf0b4bd9011574f", 244 | "sha256:d4bf4d43077db55589ffc9009c0ba0a94fa4908b9586d6ccce2e0b164c86303c", 245 | "sha256:d6a96eef20f639e6a97d23e57dd0c1b1069a7b4fd7027482a4c5c451cd7732f4", 246 | "sha256:d9caa9d5e682102453d96a0ee10c7241b72859b01a941a397fd965f23b3e016b", 247 | "sha256:dd1c8f6bd65d07d3810b90d02eba7997e32abbdf1277a481d698969e921a3be0", 248 | "sha256:e31f0bb5928b793169b87e3d1e070f2342b22d5245c755e2b81caa29756246c3", 249 | "sha256:ecb55251139706669fdec2ff073c98ef8e9a84473e51e716211b41aa0f18e656", 250 | "sha256:ee5ec40fdd06d62fe5d4084bef4fd50fd4bb6bfd2bf519365f569dc470163ab0", 251 | "sha256:f17e562de9edf691a42ddb1eb4a5541c20dd3f9e65b09ded2beb0799c0cf29bb", 252 | "sha256:fdffbfb6832cd0b300995a2b08b8f6fa9f6e856d562800fea9182316d99c4e8e" 253 | ], 254 | "markers": "python_version < '3.11' and python_version >= '3.7'", 255 | "version": "==1.21.6" 256 | }, 257 | "oauthlib": { 258 | "hashes": [ 259 | "sha256:23a8208d75b902797ea29fd31fa80a15ed9dc2c6c16fe73f5d346f83f6fa27a2", 260 | "sha256:6db33440354787f9b7f3a6dbd4febf5d0f93758354060e802f6c06cb493022fe" 261 | ], 262 | "markers": "python_version >= '3.6'", 263 | "version": "==3.2.0" 264 | }, 265 | "opencv-python": { 266 | "hashes": [ 267 | "sha256:01505b131dc35f60e99a5da98b77156e37f872ae0ff5596e5e68d526bb572d3c", 268 | "sha256:0478a1037505ddde312806c960a5e8958d2cf7a2885e8f2f5dde74c4028e0b04", 269 | "sha256:17810b89f9ef8e8537e75332acf533e619e26ccadbf1b73f24bf338f2d327ddd", 270 | "sha256:19ad2ea9fb32946761b47b9d6eed51876a8329da127f27788263fecd66651ba0", 271 | "sha256:1a250edb739baf3e7c25d99a2ee252aac4f59a97e0bee39237eaa490fd0281d3", 272 | "sha256:3505468970448f66cd776cb9e179570c87988f94b5cf9bcbc4c2d88bd88bbdf1", 273 | "sha256:4e04a91da157885359f487534433340b2d709927559c80acf62c28167e59be02", 274 | "sha256:5a49cffcdec5e37217672579c3343565926d999642844efa9c6a031ed5f32318", 275 | "sha256:604b2ce3d4a86480ced0813da7fba269b4605ad9fea26cd2144d8077928d4b49", 276 | "sha256:61cbb8fa9565a0480c46028599431ad8f19181a7fac8070a700515fd54cd7377", 277 | "sha256:62d7c6e511c9454f099616315c695d02a584048e1affe034b39160db7a2ae34d", 278 | "sha256:6555272dd9efd412d17cdc1a4f4c2da5753c099d95d9ff01aca54bb9782fb5cf", 279 | "sha256:67d994c6b2b14cb9239e85dc7dfa6c08ef7cf6eb4def80c0af6141dfacc8cbb9", 280 | "sha256:68c9cbe538666c4667523821cc56caee49389bea06bae4c0fc2cd68bd264226a", 281 | "sha256:822ad8f628a9498f569c57d30865f5ef9ee17824cee0a1d456211f742028c135", 282 | "sha256:82d972429eb4fee22c1dc4204af2a2e981f010e5e4f66daea2a6c68381b79184", 283 | "sha256:9128924f5b58269ee221b8cf2d736f31bd3bb0391b92ee8504caadd68c8176a2", 284 | "sha256:9172cf8270572c494d8b2ae12ef87c0f6eed9d132927e614099f76843b0c91d7", 285 | "sha256:952bce4d30a8287b17721ddaad7f115dab268efee8576249ddfede80ec2ce404", 286 | "sha256:a8147718e70b1f170a3d26518e992160137365a4db0ed82a9efd3040f9f660d4", 287 | "sha256:bfdb636a3796ff223460ea0fcfda906b3b54f4bef22ae433a5b67e66fab00b25", 288 | "sha256:c9c3f27867153634e1083390920067008ebaaab78aeb09c4e0274e69746cb2c8", 289 | "sha256:d69be21973d450a4662ae6bd1b3df6b1af030e448d7276380b0d1adf7c8c2ae6", 290 | "sha256:db1479636812a6579a3753b72a6fefaa73190f32bf7b19e483f8bc750cebe1a5", 291 | "sha256:db8313d755962a7dd61e5c22a651e0743208adfdb255c6ec8904ce9cb02940c6", 292 | "sha256:e4625a6b032e7797958aeb630d6e3e91e3896d285020aae612e6d7b342d6dfea", 293 | "sha256:e8397a26966a1290836a52c34b362aabc65a422b9ffabcbbdec1862f023ccab8" 294 | ], 295 | "index": "pypi", 296 | "version": "==4.1.1.26" 297 | }, 298 | "pandas": { 299 | "hashes": [ 300 | "sha256:00dff3a8e337f5ed7ad295d98a31821d3d0fe7792da82d78d7fd79b89c03ea9d", 301 | "sha256:22361b1597c8c2ffd697aa9bf85423afa9e1fcfa6b1ea821054a244d5f24d75e", 302 | "sha256:255920e63850dc512ce356233081098554d641ba99c3767dde9e9f35630f994b", 303 | "sha256:26382aab9c119735908d94d2c5c08020a4a0a82969b7e5eefb92f902b3b30ad7", 304 | "sha256:33970f4cacdd9a0ddb8f21e151bfb9f178afb7c36eb7c25b9094c02876f385c2", 305 | "sha256:4545467a637e0e1393f7d05d61dace89689ad6d6f66f267f86fff737b702cce9", 306 | "sha256:52da74df8a9c9a103af0a72c9d5fdc8e0183a90884278db7f386b5692a2220a4", 307 | "sha256:61741f5aeb252f39c3031d11405305b6d10ce663c53bc3112705d7ad66c013d0", 308 | "sha256:6a3ac2c87e4e32a969921d1428525f09462770c349147aa8e9ab95f88c71ec71", 309 | "sha256:7458c48e3d15b8aaa7d575be60e1e4dd70348efcd9376656b72fecd55c59a4c3", 310 | "sha256:78bf638993219311377ce9836b3dc05f627a666d0dbc8cec37c0ff3c9ada673b", 311 | "sha256:8153705d6545fd9eb6dd2bc79301bff08825d2e2f716d5dced48daafc2d0b81f", 312 | "sha256:975c461accd14e89d71772e89108a050fa824c0b87a67d34cedf245f6681fc17", 313 | "sha256:9962957a27bfb70ab64103d0a7b42fa59c642fb4ed4cb75d0227b7bb9228535d", 314 | "sha256:adc3d3a3f9e59a38d923e90e20c4922fc62d1e5a03d083440468c6d8f3f1ae0a", 315 | "sha256:bbe3eb765a0b1e578833d243e2814b60c825b7fdbf4cdfe8e8aae8a08ed56ecf", 316 | "sha256:df8864824b1fe488cf778c3650ee59c3a0d8f42e53707de167ba6b4f7d35f133", 317 | "sha256:e45055c30a608076e31a9fcd780a956ed3b1fa20db61561b8d88b79259f526f7", 318 | "sha256:ee50c2142cdcf41995655d499a157d0a812fce55c97d9aad13bc1eef837ed36c" 319 | ], 320 | "index": "pypi", 321 | "version": "==0.25.3" 322 | }, 323 | "pillow": { 324 | "hashes": [ 325 | "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927", 326 | "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14", 327 | "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc", 328 | "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58", 329 | "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60", 330 | "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76", 331 | "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c", 332 | "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac", 333 | "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490", 334 | "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1", 335 | "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f", 336 | "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d", 337 | "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f", 338 | "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069", 339 | "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402", 340 | "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885", 341 | "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e", 342 | "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be", 343 | "sha256:408673ed75594933714482501fe97e055a42996087eeca7e5d06e33218d05aa8", 344 | "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff", 345 | "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da", 346 | "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004", 347 | "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f", 348 | "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20", 349 | "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d", 350 | "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c", 351 | "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544", 352 | "sha256:727dd1389bc5cb9827cbd1f9d40d2c2a1a0c9b32dd2261db522d22a604a6eec9", 353 | "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3", 354 | "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04", 355 | "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c", 356 | "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5", 357 | "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4", 358 | "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb", 359 | "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4", 360 | "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c", 361 | "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467", 362 | "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e", 363 | "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421", 364 | "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b", 365 | "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8", 366 | "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb", 367 | "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3", 368 | "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf", 369 | "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1", 370 | "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a", 371 | "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28", 372 | "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0", 373 | "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1", 374 | "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8", 375 | "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd", 376 | "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4", 377 | "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8", 378 | "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f", 379 | "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013", 380 | "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59", 381 | "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc", 382 | "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4" 383 | ], 384 | "markers": "python_version >= '3.7'", 385 | "version": "==9.2.0" 386 | }, 387 | "protobuf": { 388 | "hashes": [ 389 | "sha256:011c0f267e85f5d73750b6c25f0155d5db1e9443cd3590ab669a6221dd8fcdb0", 390 | "sha256:3ec6f5b37935406bb9df9b277e79f8ed81d697146e07ef2ba8a5a272fb24b2c9", 391 | "sha256:5310cbe761e87f0c1decce019d23f2101521d4dfff46034f8a12a53546036ec7", 392 | "sha256:5e0b272217aad8971763960238c1a1e6a65d50ef7824e23300da97569a251c55", 393 | "sha256:5e0ce02418ef03d7657a420ae8fd6fec4995ac713a3cb09164e95f694dbcf085", 394 | "sha256:5eb0724615e90075f1d763983e708e1cef08e66b1891d8b8b6c33bc3b2f1a02b", 395 | "sha256:7b6f22463e2d1053d03058b7b4ceca6e4ed4c14f8c286c32824df751137bf8e7", 396 | "sha256:a7faa62b183d6a928e3daffd06af843b4287d16ef6e40f331575ecd236a7974d", 397 | "sha256:b04484d6f42f48c57dd2737a72692f4c6987529cdd148fb5b8e5f616862a2e37", 398 | "sha256:b52e7a522911a40445a5f588bd5b5e584291bfc5545e09b7060685e4b2ff814f", 399 | "sha256:bf711b451212dc5b0fa45ae7dada07d8e71a4b0ff0bc8e4783ee145f47ac4f82", 400 | "sha256:e5c5a2886ae48d22a9d32fbb9b6636a089af3cd26b706750258ce1ca96cc0116", 401 | "sha256:eb1106e87e095628e96884a877a51cdb90087106ee693925ec0a300468a9be3a", 402 | "sha256:ee04f5823ed98bb9a8c3b1dc503c49515e0172650875c3f76e225b223793a1f2" 403 | ], 404 | "markers": "python_version >= '3.7'", 405 | "version": "==4.21.5" 406 | }, 407 | "pyasn1": { 408 | "hashes": [ 409 | "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", 410 | "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", 411 | "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", 412 | "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", 413 | "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", 414 | "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", 415 | "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", 416 | "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", 417 | "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", 418 | "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776", 419 | "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", 420 | "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", 421 | "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3" 422 | ], 423 | "version": "==0.4.8" 424 | }, 425 | "pyasn1-modules": { 426 | "hashes": [ 427 | "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8", 428 | "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199", 429 | "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811", 430 | "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed", 431 | "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4", 432 | "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e", 433 | "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74", 434 | "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb", 435 | "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45", 436 | "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd", 437 | "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0", 438 | "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d", 439 | "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405" 440 | ], 441 | "version": "==0.2.8" 442 | }, 443 | "python-dateutil": { 444 | "hashes": [ 445 | "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", 446 | "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" 447 | ], 448 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 449 | "version": "==2.8.2" 450 | }, 451 | "pytz": { 452 | "hashes": [ 453 | "sha256:220f481bdafa09c3955dfbdddb7b57780e9a94f5127e35456a48589b9e0c0197", 454 | "sha256:cea221417204f2d1a2aa03ddae3e867921971d0d76f14d87abb4414415bbdcf5" 455 | ], 456 | "version": "==2022.2.1" 457 | }, 458 | "requests": { 459 | "hashes": [ 460 | "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983", 461 | "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349" 462 | ], 463 | "markers": "python_version >= '3.7' and python_version < '4'", 464 | "version": "==2.28.1" 465 | }, 466 | "requests-oauthlib": { 467 | "hashes": [ 468 | "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5", 469 | "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a" 470 | ], 471 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 472 | "version": "==1.3.1" 473 | }, 474 | "rsa": { 475 | "hashes": [ 476 | "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", 477 | "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21" 478 | ], 479 | "markers": "python_version >= '3.6'", 480 | "version": "==4.9" 481 | }, 482 | "scikit-learn": { 483 | "hashes": [ 484 | "sha256:1ac81293d261747c25ea5a0ee8cd2bb1f3b5ba9ec05421a7f9f0feb4eb7c4116", 485 | "sha256:289361cf003d90b007f5066b27fcddc2d71324c82f1c88e316fedacb0dfdd516", 486 | "sha256:3a14d0abd4281fc3fd2149c486c3ec7cedad848b8d5f7b6f61522029d65a29f8", 487 | "sha256:5083a5e50d9d54548e4ada829598ae63a05651dd2bb319f821ffd9e8388384a6", 488 | "sha256:777cdd5c077b7ca9cb381396c81990cf41d2fa8350760d3cad3b4c460a7db644", 489 | "sha256:8bf2ff63da820d09b96b18e88f9625228457bff8df4618f6b087e12442ef9e15", 490 | "sha256:8d319b71c449627d178f21c57614e21747e54bb3fc9602b6f42906c3931aa320", 491 | "sha256:928050b65781fea9542dfe9bfe02d8c4f5530baa8472ec60782ea77347d2c836", 492 | "sha256:92c903613ff50e22aa95d589f9fff5deb6f34e79f7f21f609680087f137bb524", 493 | "sha256:ae322235def5ce8fae645b439e332e6f25d34bb90d6a6c8e261f17eb476457b7", 494 | "sha256:c1cd6b29eb1fd1cc672ac5e4a8be5f6ea936d094a3dc659ada0746d6fac750b1", 495 | "sha256:c41a6e2685d06bcdb0d26533af2540f54884d40db7e48baed6a5bcbf1a7cc642", 496 | "sha256:d07fcb0c0acbc043faa0e7cf4d2037f71193de3fb04fb8ed5c259b089af1cf5c", 497 | "sha256:d146d5443cda0a41f74276e42faf8c7f283fef49e8a853b832885239ef544e05", 498 | "sha256:eb2b7bed0a26ba5ce3700e15938b28a4f4513578d3e54a2156c29df19ac5fd01", 499 | "sha256:eb9b8ebf59eddd8b96366428238ab27d05a19e89c5516ce294abc35cea75d003" 500 | ], 501 | "index": "pypi", 502 | "version": "==0.21.3" 503 | }, 504 | "scipy": { 505 | "hashes": [ 506 | "sha256:033ce76ed4e9f62923e1f8124f7e2b0800db533828c853b402c7eec6e9465d80", 507 | "sha256:173308efba2270dcd61cd45a30dfded6ec0085b4b6eb33b5eb11ab443005e088", 508 | "sha256:21b66200cf44b1c3e86495e3a436fc7a26608f92b8d43d344457c54f1c024cbc", 509 | "sha256:2c56b820d304dffcadbbb6cbfbc2e2c79ee46ea291db17e288e73cd3c64fefa9", 510 | "sha256:304dfaa7146cffdb75fbf6bb7c190fd7688795389ad060b970269c8576d038e9", 511 | "sha256:3f78181a153fa21c018d346f595edd648344751d7f03ab94b398be2ad083ed3e", 512 | "sha256:4d242d13206ca4302d83d8a6388c9dfce49fc48fdd3c20efad89ba12f785bf9e", 513 | "sha256:5d1cc2c19afe3b5a546ede7e6a44ce1ff52e443d12b231823268019f608b9b12", 514 | "sha256:5f2cfc359379c56b3a41b17ebd024109b2049f878badc1e454f31418c3a18436", 515 | "sha256:65bd52bf55f9a1071398557394203d881384d27b9c2cad7df9a027170aeaef93", 516 | "sha256:7edd9a311299a61e9919ea4192dd477395b50c014cdc1a1ac572d7c27e2207fa", 517 | "sha256:8499d9dd1459dc0d0fe68db0832c3d5fc1361ae8e13d05e6849b358dc3f2c279", 518 | "sha256:866ada14a95b083dd727a845a764cf95dd13ba3dc69a16b99038001b05439709", 519 | "sha256:87069cf875f0262a6e3187ab0f419f5b4280d3dcf4811ef9613c605f6e4dca95", 520 | "sha256:93378f3d14fff07572392ce6a6a2ceb3a1f237733bd6dcb9eb6a2b29b0d19085", 521 | "sha256:95c2d250074cfa76715d58830579c64dff7354484b284c2b8b87e5a38321672c", 522 | "sha256:ab5875facfdef77e0a47d5fd39ea178b58e60e454a4c85aa1e52fcb80db7babf", 523 | "sha256:b0e0aeb061a1d7dcd2ed59ea57ee56c9b23dd60100825f98238c06ee5cc4467e", 524 | "sha256:b78a35c5c74d336f42f44106174b9851c783184a85a3fe3e68857259b37b9ffb", 525 | "sha256:c9e04d7e9b03a8a6ac2045f7c5ef741be86727d8f49c45db45f244bdd2bcff17", 526 | "sha256:ca36e7d9430f7481fc7d11e015ae16fbd5575615a8e9060538104778be84addf", 527 | "sha256:ceebc3c4f6a109777c0053dfa0282fddb8893eddfb0d598574acfb734a926168", 528 | "sha256:e2c036492e673aad1b7b0d0ccdc0cb30a968353d2c4bf92ac8e73509e1bf212c", 529 | "sha256:eb326658f9b73c07081300daba90a8746543b5ea177184daed26528273157294", 530 | "sha256:eb7ae2c4dbdb3c9247e07acc532f91077ae6dbc40ad5bd5dca0bb5a176ee9bda", 531 | "sha256:edad1cf5b2ce1912c4d8ddad20e11d333165552aba262c882e28c78bbc09dbf6", 532 | "sha256:eef93a446114ac0193a7b714ce67659db80caf940f3232bad63f4c7a81bc18df", 533 | "sha256:f7eaea089345a35130bc9a39b89ec1ff69c208efa97b3f8b25ea5d4c41d88094", 534 | "sha256:f99d206db1f1ae735a8192ab93bd6028f3a42f6fa08467d37a14eb96c9dd34a3" 535 | ], 536 | "markers": "python_version < '3.11' and python_version >= '3.7'", 537 | "version": "==1.7.3" 538 | }, 539 | "setuptools": { 540 | "hashes": [ 541 | "sha256:7f4bc85450898a09f76ebf28b72fa25bc7111f6c7d665d514a60bba9c75ef2a9", 542 | "sha256:a3ca5857c89f82f5c9410e8508cb32f4872a3bafd4aa7ae122a24ca33bccc750" 543 | ], 544 | "markers": "python_version >= '3.7'", 545 | "version": "==65.2.0" 546 | }, 547 | "six": { 548 | "hashes": [ 549 | "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", 550 | "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" 551 | ], 552 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 553 | "version": "==1.16.0" 554 | }, 555 | "tensorboard": { 556 | "hashes": [ 557 | "sha256:203bd0c2fa33e18c009fa21253b67b67b78ef9624c4df3f70d3ef1b4f0ca3f9c", 558 | "sha256:bf66fc182fcbfff6fc2e770754a100ef5c6bdc8601fece92375f31da60733fdc" 559 | ], 560 | "index": "pypi", 561 | "version": "==2.0.1" 562 | }, 563 | "torch": { 564 | "hashes": [ 565 | "sha256:0cec2e13a2e95c24c34f17d437f354ee2a40902e8d515a524556b350e12555dd", 566 | "sha256:134e8291a97151b1ffeea09cb9ddde5238beb4e6d9dfb66657143d6990bfb865", 567 | "sha256:31062923ac2e60eac676f6a0ae14702b051c158bbcf7f440eaba266b0defa197", 568 | "sha256:3b05233481b51bb636cee63dc761bb7f602e198178782ff4159d385d1759608b", 569 | "sha256:458f1d87e5b7064b2c39e36675d84e163be3143dd2fc806057b7878880c461bc", 570 | "sha256:72a1c85bffd2154f085bc0a1d378d8a54e55a57d49664b874fe7c949022bf071", 571 | "sha256:77fd8866c0bf529861ffd850a5dada2190a8d9c5167719fb0cfa89163e23b143", 572 | "sha256:b6f01d851d1c5989d4a99b50ae0187762b15b7718dcd1a33704b665daa2402f9", 573 | "sha256:d8e1d904a6193ed14a4fed220b00503b2baa576e71471286d1ebba899c851fae" 574 | ], 575 | "index": "pypi", 576 | "version": "==1.3.1" 577 | }, 578 | "torchvision": { 579 | "hashes": [ 580 | "sha256:0f8245d6378acc86917f58492675f93df5279abae8bc5f832e3510722191f6c9", 581 | "sha256:1ad7593d94f6612ccb84a59467f0d10cdc213fb3e2bb91f1e773eb844787fa4c", 582 | "sha256:2553405b9afe3cedb410873b9877eb18b1526f8b01cb7c2747e51b69a936e0b5", 583 | "sha256:276a385f2f5fe484bf08467b5d081d9144b97eb458ba5b4a11e4640389e53149", 584 | "sha256:66deba9c577e36f4f071decdd894bf7ba794ac133dae64b3fd02fc3f0c6b989d", 585 | "sha256:7a458330e4efcd66f9f70127ab21fcf8cfea84acda8e707322fd2843aa6dd396", 586 | "sha256:8ff715c2323d9eca89126824ebfa74b282a95d6f64a4743fbe9b738d2de21c77", 587 | "sha256:dca4aadc12a123730957b501f9c5c2870d2f6727a2c28552cb7907b68b0ea10c", 588 | "sha256:dda25ce304978bba19e6543f7dcfee4f37d2f128ec83d4ab0c7e8f991d64865f" 589 | ], 590 | "index": "pypi", 591 | "version": "==0.4.2" 592 | }, 593 | "tqdm": { 594 | "hashes": [ 595 | "sha256:9de4722323451eb7818deb0161d9d5523465353a6707a9f500d97ee42919b902", 596 | "sha256:c1d677f3a85fa291b34bdf8f770f877119b9754b32673699653556f85e2c2f13" 597 | ], 598 | "index": "pypi", 599 | "version": "==4.38.0" 600 | }, 601 | "typing-extensions": { 602 | "hashes": [ 603 | "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02", 604 | "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6" 605 | ], 606 | "markers": "python_version < '3.8'", 607 | "version": "==4.3.0" 608 | }, 609 | "urllib3": { 610 | "hashes": [ 611 | "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e", 612 | "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997" 613 | ], 614 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_version < '4'", 615 | "version": "==1.26.12" 616 | }, 617 | "werkzeug": { 618 | "hashes": [ 619 | "sha256:7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f", 620 | "sha256:f979ab81f58d7318e064e99c4506445d60135ac5cd2e177a2de0089bfd4c9bd5" 621 | ], 622 | "markers": "python_version >= '3.7'", 623 | "version": "==2.2.2" 624 | }, 625 | "wheel": { 626 | "hashes": [ 627 | "sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a", 628 | "sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4" 629 | ], 630 | "markers": "python_version >= '3'", 631 | "version": "==0.37.1" 632 | }, 633 | "zipp": { 634 | "hashes": [ 635 | "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2", 636 | "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009" 637 | ], 638 | "markers": "python_version >= '3.7'", 639 | "version": "==3.8.1" 640 | } 641 | }, 642 | "develop": { 643 | "argon2-cffi": { 644 | "hashes": [ 645 | "sha256:8c976986f2c5c0e5000919e6de187906cfd81fb1c72bf9d88c01177e77da7f80", 646 | "sha256:d384164d944190a7dd7ef22c6aa3ff197da12962bd04b17f64d4e93d934dba5b" 647 | ], 648 | "markers": "python_version >= '3.6'", 649 | "version": "==21.3.0" 650 | }, 651 | "argon2-cffi-bindings": { 652 | "hashes": [ 653 | "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670", 654 | "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f", 655 | "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583", 656 | "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194", 657 | "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c", 658 | "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a", 659 | "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082", 660 | "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5", 661 | "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f", 662 | "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7", 663 | "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d", 664 | "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f", 665 | "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae", 666 | "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3", 667 | "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86", 668 | "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367", 669 | "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d", 670 | "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93", 671 | "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb", 672 | "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e", 673 | "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351" 674 | ], 675 | "markers": "python_version >= '3.6'", 676 | "version": "==21.2.0" 677 | }, 678 | "astroid": { 679 | "hashes": [ 680 | "sha256:71ea07f44df9568a75d0f354c49143a4575d90645e9fead6dfb52c26a85ed13a", 681 | "sha256:840947ebfa8b58f318d42301cf8c0a20fd794a33b61cc4638e28e9e61ba32f42" 682 | ], 683 | "markers": "python_version >= '3.5'", 684 | "version": "==2.3.3" 685 | }, 686 | "attrs": { 687 | "hashes": [ 688 | "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6", 689 | "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c" 690 | ], 691 | "markers": "python_version >= '3.5'", 692 | "version": "==22.1.0" 693 | }, 694 | "autopep8": { 695 | "hashes": [ 696 | "sha256:4d8eec30cc81bc5617dbf1218201d770dc35629363547f17577c61683ccfb3ee" 697 | ], 698 | "index": "pypi", 699 | "version": "==1.4.4" 700 | }, 701 | "backcall": { 702 | "hashes": [ 703 | "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e", 704 | "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255" 705 | ], 706 | "version": "==0.2.0" 707 | }, 708 | "beautifulsoup4": { 709 | "hashes": [ 710 | "sha256:58d5c3d29f5a36ffeb94f02f0d786cd53014cf9b3b3951d42e0080d8a9498d30", 711 | "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693" 712 | ], 713 | "markers": "python_version >= '3.6'", 714 | "version": "==4.11.1" 715 | }, 716 | "bleach": { 717 | "hashes": [ 718 | "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a", 719 | "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c" 720 | ], 721 | "markers": "python_version >= '3.7'", 722 | "version": "==5.0.1" 723 | }, 724 | "cffi": { 725 | "hashes": [ 726 | "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", 727 | "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", 728 | "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", 729 | "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", 730 | "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", 731 | "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", 732 | "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", 733 | "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", 734 | "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", 735 | "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", 736 | "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", 737 | "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", 738 | "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", 739 | "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", 740 | "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", 741 | "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", 742 | "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", 743 | "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", 744 | "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", 745 | "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", 746 | "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", 747 | "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", 748 | "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", 749 | "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", 750 | "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", 751 | "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", 752 | "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", 753 | "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", 754 | "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", 755 | "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", 756 | "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", 757 | "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", 758 | "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", 759 | "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", 760 | "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", 761 | "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", 762 | "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", 763 | "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", 764 | "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", 765 | "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", 766 | "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", 767 | "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", 768 | "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", 769 | "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", 770 | "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", 771 | "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", 772 | "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", 773 | "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", 774 | "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", 775 | "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", 776 | "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", 777 | "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", 778 | "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", 779 | "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", 780 | "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", 781 | "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", 782 | "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", 783 | "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", 784 | "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", 785 | "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", 786 | "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", 787 | "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", 788 | "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", 789 | "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" 790 | ], 791 | "version": "==1.15.1" 792 | }, 793 | "decorator": { 794 | "hashes": [ 795 | "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", 796 | "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186" 797 | ], 798 | "markers": "python_version >= '3.5'", 799 | "version": "==5.1.1" 800 | }, 801 | "defusedxml": { 802 | "hashes": [ 803 | "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", 804 | "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61" 805 | ], 806 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", 807 | "version": "==0.7.1" 808 | }, 809 | "entrypoints": { 810 | "hashes": [ 811 | "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19", 812 | "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451" 813 | ], 814 | "markers": "python_version >= '2.7'", 815 | "version": "==0.3" 816 | }, 817 | "fastjsonschema": { 818 | "hashes": [ 819 | "sha256:2f7158c4de792555753d6c2277d6a2af2d406dfd97aeca21d17173561ede4fe6", 820 | "sha256:d6fa3ffbe719768d70e298b9fb847484e2bdfdb7241ed052b8d57a9294a8c334" 821 | ], 822 | "version": "==2.16.1" 823 | }, 824 | "flake8": { 825 | "hashes": [ 826 | "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb", 827 | "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca" 828 | ], 829 | "index": "pypi", 830 | "version": "==3.7.9" 831 | }, 832 | "importlib-metadata": { 833 | "hashes": [ 834 | "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670", 835 | "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23" 836 | ], 837 | "markers": "python_version < '3.10'", 838 | "version": "==4.12.0" 839 | }, 840 | "importlib-resources": { 841 | "hashes": [ 842 | "sha256:5481e97fb45af8dcf2f798952625591c58fe599d0735d86b10f54de086a61681", 843 | "sha256:f78a8df21a79bcc30cfd400bdc38f314333de7c0fb619763f6b9dabab8268bb7" 844 | ], 845 | "markers": "python_version < '3.9'", 846 | "version": "==5.9.0" 847 | }, 848 | "ipykernel": { 849 | "hashes": [ 850 | "sha256:4ea44b90ae1f7c38987ad58ea0809562a17c2695a0499644326f334aecd369ec", 851 | "sha256:66f824af1ef4650e1e2f6c42e1423074321440ef79ca3651a6cfd06a4e25e42f" 852 | ], 853 | "markers": "python_version >= '3.5'", 854 | "version": "==5.5.6" 855 | }, 856 | "ipython": { 857 | "hashes": [ 858 | "sha256:dfd303b270b7b5232b3d08bd30ec6fd685d8a58cabd54055e3d69d8f029f7280", 859 | "sha256:ed7ebe1cba899c1c3ccad6f7f1c2d2369464cc77dba8eebc65e2043e19cda995" 860 | ], 861 | "index": "pypi", 862 | "version": "==7.9.0" 863 | }, 864 | "ipython-genutils": { 865 | "hashes": [ 866 | "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8", 867 | "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8" 868 | ], 869 | "version": "==0.2.0" 870 | }, 871 | "isort": { 872 | "hashes": [ 873 | "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1", 874 | "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd" 875 | ], 876 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 877 | "version": "==4.3.21" 878 | }, 879 | "jedi": { 880 | "hashes": [ 881 | "sha256:637c9635fcf47945ceb91cd7f320234a7be540ded6f3e99a50cb6febdfd1ba8d", 882 | "sha256:74137626a64a99c8eb6ae5832d99b3bdd7d29a3850fe2aa80a4126b2a7d949ab" 883 | ], 884 | "markers": "python_version >= '3.6'", 885 | "version": "==0.18.1" 886 | }, 887 | "jinja2": { 888 | "hashes": [ 889 | "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", 890 | "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" 891 | ], 892 | "markers": "python_version >= '3.7'", 893 | "version": "==3.1.2" 894 | }, 895 | "json5": { 896 | "hashes": [ 897 | "sha256:993189671e7412e9cdd8be8dc61cf402e8e579b35f1d1bb20ae6b09baa78bbce", 898 | "sha256:ad9f048c5b5a4c3802524474ce40a622fae789860a86f10cc4f7e5f9cf9b46ab" 899 | ], 900 | "version": "==0.9.10" 901 | }, 902 | "jsonschema": { 903 | "hashes": [ 904 | "sha256:15062f4cc6f591400cd528d2c355f2cfa6a57e44c820dc783aee5e23d36a831f", 905 | "sha256:9892b8d630a82990521a9ca630d3446bd316b5ad54dbe981338802787f3e0d2d" 906 | ], 907 | "markers": "python_version >= '3.7'", 908 | "version": "==4.14.0" 909 | }, 910 | "jupyter-client": { 911 | "hashes": [ 912 | "sha256:17d74b0d0a7b24f1c8c527b24fcf4607c56bee542ffe8e3418e50b21e514b621", 913 | "sha256:aa9a6c32054b290374f95f73bb0cae91455c58dfb84f65c8591912b8f65e6d56" 914 | ], 915 | "markers": "python_version >= '3.7'", 916 | "version": "==7.3.4" 917 | }, 918 | "jupyter-core": { 919 | "hashes": [ 920 | "sha256:2e5f244d44894c4154d06aeae3419dd7f1b0ef4494dc5584929b398c61cfd314", 921 | "sha256:715e22bb6cc7db3718fddfac1f69f1c7e899ca00e42bdfd4bf3705452b9fd84a" 922 | ], 923 | "markers": "python_version >= '3.7'", 924 | "version": "==4.11.1" 925 | }, 926 | "jupyterlab": { 927 | "hashes": [ 928 | "sha256:2188a9bcaaf0b6a68ff9098a481f37ece8231634b862fd3c9adedc466aac79f2", 929 | "sha256:e458312c9afe0386399be1cc808178ef7014ef88c562084856b6830e9602afa1" 930 | ], 931 | "index": "pypi", 932 | "version": "==1.2.3" 933 | }, 934 | "jupyterlab-pygments": { 935 | "hashes": [ 936 | "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f", 937 | "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d" 938 | ], 939 | "markers": "python_version >= '3.7'", 940 | "version": "==0.2.2" 941 | }, 942 | "jupyterlab-server": { 943 | "hashes": [ 944 | "sha256:13dc66acd6aee04907af015e840d36dc51380af2c03bdaccc3d4de525c29b9e6", 945 | "sha256:2096f7a7797997727176c599779ab41f0f10ec8ad50070ca33ae4b3e109294ff" 946 | ], 947 | "markers": "python_version >= '3.5'", 948 | "version": "==1.0.9" 949 | }, 950 | "lazy-object-proxy": { 951 | "hashes": [ 952 | "sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d", 953 | "sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449", 954 | "sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08", 955 | "sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a", 956 | "sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50", 957 | "sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd", 958 | "sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239", 959 | "sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb", 960 | "sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea", 961 | "sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e", 962 | "sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156", 963 | "sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142", 964 | "sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442", 965 | "sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62", 966 | "sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db", 967 | "sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531", 968 | "sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383", 969 | "sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a", 970 | "sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357", 971 | "sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4", 972 | "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0" 973 | ], 974 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 975 | "version": "==1.4.3" 976 | }, 977 | "lxml": { 978 | "hashes": [ 979 | "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318", 980 | "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c", 981 | "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b", 982 | "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000", 983 | "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73", 984 | "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d", 985 | "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb", 986 | "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8", 987 | "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2", 988 | "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345", 989 | "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94", 990 | "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e", 991 | "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b", 992 | "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc", 993 | "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a", 994 | "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9", 995 | "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc", 996 | "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387", 997 | "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb", 998 | "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7", 999 | "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4", 1000 | "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97", 1001 | "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67", 1002 | "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627", 1003 | "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7", 1004 | "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd", 1005 | "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3", 1006 | "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7", 1007 | "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130", 1008 | "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b", 1009 | "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036", 1010 | "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785", 1011 | "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca", 1012 | "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91", 1013 | "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc", 1014 | "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536", 1015 | "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391", 1016 | "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3", 1017 | "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d", 1018 | "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21", 1019 | "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3", 1020 | "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d", 1021 | "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29", 1022 | "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715", 1023 | "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed", 1024 | "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25", 1025 | "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c", 1026 | "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785", 1027 | "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837", 1028 | "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4", 1029 | "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b", 1030 | "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2", 1031 | "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067", 1032 | "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448", 1033 | "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d", 1034 | "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2", 1035 | "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc", 1036 | "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c", 1037 | "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5", 1038 | "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84", 1039 | "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8", 1040 | "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf", 1041 | "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7", 1042 | "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e", 1043 | "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb", 1044 | "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b", 1045 | "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3", 1046 | "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad", 1047 | "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8", 1048 | "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f" 1049 | ], 1050 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", 1051 | "version": "==4.9.1" 1052 | }, 1053 | "markupsafe": { 1054 | "hashes": [ 1055 | "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003", 1056 | "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88", 1057 | "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5", 1058 | "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7", 1059 | "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a", 1060 | "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603", 1061 | "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1", 1062 | "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135", 1063 | "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247", 1064 | "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6", 1065 | "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601", 1066 | "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77", 1067 | "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02", 1068 | "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e", 1069 | "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63", 1070 | "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f", 1071 | "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980", 1072 | "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b", 1073 | "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812", 1074 | "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff", 1075 | "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96", 1076 | "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1", 1077 | "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925", 1078 | "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a", 1079 | "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6", 1080 | "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e", 1081 | "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f", 1082 | "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4", 1083 | "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f", 1084 | "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3", 1085 | "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c", 1086 | "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a", 1087 | "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417", 1088 | "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a", 1089 | "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a", 1090 | "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37", 1091 | "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452", 1092 | "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933", 1093 | "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a", 1094 | "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7" 1095 | ], 1096 | "markers": "python_version >= '3.7'", 1097 | "version": "==2.1.1" 1098 | }, 1099 | "mccabe": { 1100 | "hashes": [ 1101 | "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", 1102 | "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" 1103 | ], 1104 | "version": "==0.6.1" 1105 | }, 1106 | "mistune": { 1107 | "hashes": [ 1108 | "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e", 1109 | "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4" 1110 | ], 1111 | "version": "==0.8.4" 1112 | }, 1113 | "nbclient": { 1114 | "hashes": [ 1115 | "sha256:3c5a7fc6bb74be7d31edf2817b44501a65caa99e5e56363bc359649b97cd24b9", 1116 | "sha256:d4e32459e7e96783285d1daac92dc2c60ee7b8a82b7cf7d2e55be9d89d7ac463" 1117 | ], 1118 | "markers": "python_version >= '3.7'", 1119 | "version": "==0.6.7" 1120 | }, 1121 | "nbconvert": { 1122 | "hashes": [ 1123 | "sha256:0a3e224ee753ac4dceeb0257c4a315c069dcc6f9f4ae0ad15c5ea84713d15e28", 1124 | "sha256:2c01f3f518fee736c3d3f999dd20e0a16febba17a0d60a3b0fd28fbdec14115d" 1125 | ], 1126 | "index": "pypi", 1127 | "version": "==6.5.1" 1128 | }, 1129 | "nbformat": { 1130 | "hashes": [ 1131 | "sha256:0d6072aaec95dddc39735c144ee8bbc6589c383fb462e4058abc855348152dad", 1132 | "sha256:44ba5ca6acb80c5d5a500f1e5b83ede8cbe364d5a495c4c8cf60aaf1ba656501" 1133 | ], 1134 | "markers": "python_version >= '3.7'", 1135 | "version": "==5.4.0" 1136 | }, 1137 | "nest-asyncio": { 1138 | "hashes": [ 1139 | "sha256:b98e3ec1b246135e4642eceffa5a6c23a3ab12c82ff816a92c612d68205813b2", 1140 | "sha256:e442291cd942698be619823a17a86a5759eabe1f8613084790de189fe9e16d65" 1141 | ], 1142 | "markers": "python_version >= '3.5'", 1143 | "version": "==1.5.5" 1144 | }, 1145 | "notebook": { 1146 | "hashes": [ 1147 | "sha256:6268c9ec9048cff7a45405c990c29ac9ca40b0bc3ec29263d218c5e01f2b4e86", 1148 | "sha256:8c07a3bb7640e371f8a609bdbb2366a1976c6a2589da8ef917f761a61e3ad8b1" 1149 | ], 1150 | "markers": "python_version >= '3.7'", 1151 | "version": "==6.4.12" 1152 | }, 1153 | "packaging": { 1154 | "hashes": [ 1155 | "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb", 1156 | "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522" 1157 | ], 1158 | "markers": "python_version >= '3.6'", 1159 | "version": "==21.3" 1160 | }, 1161 | "pandocfilters": { 1162 | "hashes": [ 1163 | "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38", 1164 | "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f" 1165 | ], 1166 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 1167 | "version": "==1.5.0" 1168 | }, 1169 | "parso": { 1170 | "hashes": [ 1171 | "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0", 1172 | "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75" 1173 | ], 1174 | "markers": "python_version >= '3.6'", 1175 | "version": "==0.8.3" 1176 | }, 1177 | "pexpect": { 1178 | "hashes": [ 1179 | "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937", 1180 | "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c" 1181 | ], 1182 | "markers": "sys_platform != 'win32'", 1183 | "version": "==4.8.0" 1184 | }, 1185 | "pickleshare": { 1186 | "hashes": [ 1187 | "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca", 1188 | "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56" 1189 | ], 1190 | "version": "==0.7.5" 1191 | }, 1192 | "pkgutil-resolve-name": { 1193 | "hashes": [ 1194 | "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174", 1195 | "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e" 1196 | ], 1197 | "markers": "python_version < '3.9'", 1198 | "version": "==1.3.10" 1199 | }, 1200 | "prometheus-client": { 1201 | "hashes": [ 1202 | "sha256:522fded625282822a89e2773452f42df14b5a8e84a86433e3f8a189c1d54dc01", 1203 | "sha256:5459c427624961076277fdc6dc50540e2bacb98eebde99886e59ec55ed92093a" 1204 | ], 1205 | "markers": "python_version >= '3.6'", 1206 | "version": "==0.14.1" 1207 | }, 1208 | "prompt-toolkit": { 1209 | "hashes": [ 1210 | "sha256:46642344ce457641f28fc9d1c9ca939b63dadf8df128b86f1b9860e59c73a5e4", 1211 | "sha256:e7f8af9e3d70f514373bf41aa51bc33af12a6db3f71461ea47fea985defb2c31", 1212 | "sha256:f15af68f66e664eaa559d4ac8a928111eebd5feda0c11738b5998045224829db" 1213 | ], 1214 | "version": "==2.0.10" 1215 | }, 1216 | "ptyprocess": { 1217 | "hashes": [ 1218 | "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", 1219 | "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220" 1220 | ], 1221 | "markers": "os_name != 'nt'", 1222 | "version": "==0.7.0" 1223 | }, 1224 | "pycodestyle": { 1225 | "hashes": [ 1226 | "sha256:95a2219d12372f05704562a14ec30bc76b05a5b297b21a5dfe3f6fac3491ae56", 1227 | "sha256:e40a936c9a450ad81df37f549d676d127b1b66000a6c500caa2b085bc0ca976c" 1228 | ], 1229 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 1230 | "version": "==2.5.0" 1231 | }, 1232 | "pycparser": { 1233 | "hashes": [ 1234 | "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", 1235 | "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" 1236 | ], 1237 | "version": "==2.21" 1238 | }, 1239 | "pyflakes": { 1240 | "hashes": [ 1241 | "sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0", 1242 | "sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2" 1243 | ], 1244 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 1245 | "version": "==2.1.1" 1246 | }, 1247 | "pygments": { 1248 | "hashes": [ 1249 | "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1", 1250 | "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42" 1251 | ], 1252 | "markers": "python_version >= '3.6'", 1253 | "version": "==2.13.0" 1254 | }, 1255 | "pylint": { 1256 | "hashes": [ 1257 | "sha256:3db5468ad013380e987410a8d6956226963aed94ecb5f9d3a28acca6d9ac36cd", 1258 | "sha256:886e6afc935ea2590b462664b161ca9a5e40168ea99e5300935f6591ad467df4" 1259 | ], 1260 | "index": "pypi", 1261 | "version": "==2.4.4" 1262 | }, 1263 | "pyparsing": { 1264 | "hashes": [ 1265 | "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb", 1266 | "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc" 1267 | ], 1268 | "markers": "python_full_version >= '3.6.8'", 1269 | "version": "==3.0.9" 1270 | }, 1271 | "pyrsistent": { 1272 | "hashes": [ 1273 | "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c", 1274 | "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc", 1275 | "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e", 1276 | "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26", 1277 | "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec", 1278 | "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286", 1279 | "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045", 1280 | "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec", 1281 | "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8", 1282 | "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c", 1283 | "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca", 1284 | "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22", 1285 | "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a", 1286 | "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96", 1287 | "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc", 1288 | "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1", 1289 | "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07", 1290 | "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6", 1291 | "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b", 1292 | "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5", 1293 | "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6" 1294 | ], 1295 | "markers": "python_version >= '3.7'", 1296 | "version": "==0.18.1" 1297 | }, 1298 | "python-dateutil": { 1299 | "hashes": [ 1300 | "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", 1301 | "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" 1302 | ], 1303 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 1304 | "version": "==2.8.2" 1305 | }, 1306 | "pyzmq": { 1307 | "hashes": [ 1308 | "sha256:022cf5ea7bcaa8a06a03c2706e0ae66904b6138b2155577cd34c64bc7cc637ab", 1309 | "sha256:044447ae4b2016a6b8697571fd633f799f860b19b76c4a2fd9b1140d52ee6745", 1310 | "sha256:07ed8aaf7ffe150af873269690cc654ffeca7491f62aae0f3821baa181f8d5fe", 1311 | "sha256:10d1910ec381b851aeb024a042a13db178cb1edf125e76a4e9d2548ad103aadb", 1312 | "sha256:12e62ff0d5223ec09b597ab6d73858b9f64a51221399f3cb08aa495e1dff7935", 1313 | "sha256:1f368a82b29f80071781b20663c0fc0c8f6b13273f9f5abe1526af939534f90f", 1314 | "sha256:20bafc4095eab00f41a510579363a3f5e1f5c69d7ee10f1d88895c4df0259183", 1315 | "sha256:2141e6798d5981be04c08996d27962086a1aa3ea536fe9cf7e89817fd4523f86", 1316 | "sha256:23e708fbfdf4ee3107422b69ca65da1b9f056b431fc0888096a8c1d6cd908e8f", 1317 | "sha256:28dbdb90b2f6b131f8f10e6081012e4e25234213433420e67e0c1162de537113", 1318 | "sha256:29b74774a0bfd3c4d98ac853f0bdca55bd9ec89d5b0def5486407cca54472ef8", 1319 | "sha256:2b381aa867ece7d0a82f30a0c7f3d4387b7cf2e0697e33efaa5bed6c5784abcd", 1320 | "sha256:2f67b63f53c6994d601404fd1a329e6d940ac3dd1d92946a93b2b9c70df67b9f", 1321 | "sha256:342ca3077f47ec2ee41b9825142b614e03e026347167cbc72a59b618c4f6106c", 1322 | "sha256:35e635343ff367f697d00fa1484262bb68e36bc74c9b80737eac5a1e04c4e1b1", 1323 | "sha256:385609812eafd9970c3752c51f2f6c4f224807e3e441bcfd8c8273877d00c8a8", 1324 | "sha256:38e106b64bad744fe469dc3dd864f2764d66399178c1bf39d45294cc7980f14f", 1325 | "sha256:39dd252b683816935702825e5bf775df16090619ced9bb4ba68c2d0b6f0c9b18", 1326 | "sha256:407f909c4e8fde62fbdad9ebd448319792258cc0550c2815567a4d9d8d9e6d18", 1327 | "sha256:415ff62ac525d9add1e3550430a09b9928d2d24a20cc4ce809e67caac41219ab", 1328 | "sha256:4805af9614b0b41b7e57d17673459facf85604dac502a5a9244f6e8c9a4de658", 1329 | "sha256:48400b96788cdaca647021bf19a9cd668384f46e4d9c55cf045bdd17f65299c8", 1330 | "sha256:49d30ba7074f469e8167917abf9eb854c6503ae10153034a6d4df33618f1db5f", 1331 | "sha256:4bb798bef181648827019001f6be43e1c48b34b477763b37a8d27d8c06d197b8", 1332 | "sha256:4d6f110c56f7d5b4d64dde3a382ae61b6d48174e30742859d8e971b18b6c9e5c", 1333 | "sha256:55568a020ad2cae9ae36da6058e7ca332a56df968f601cbdb7cf6efb2a77579a", 1334 | "sha256:565bd5ab81f6964fc4067ccf2e00877ad0fa917308975694bbb54378389215f8", 1335 | "sha256:5c558b50402fca1acc94329c5d8f12aa429738904a5cfb32b9ed3c61235221bb", 1336 | "sha256:5e05492be125dce279721d6b54fd1b956546ecc4bcdfcf8e7b4c413bc0874c10", 1337 | "sha256:624fd38071a817644acdae075b92a23ea0bdd126a58148288e8284d23ec361ce", 1338 | "sha256:650389bbfca73955b262b2230423d89992f38ec48033307ae80e700eaa2fbb63", 1339 | "sha256:67975a9e1237b9ccc78f457bef17691bbdd2055a9d26e81ee914ba376846d0ce", 1340 | "sha256:6b1e79bba24f6df1712e3188d5c32c480d8eda03e8ecff44dc8ecb0805fa62f3", 1341 | "sha256:6fd5d0d50cbcf4bc376861529a907bed026a4cbe8c22a500ff8243231ef02433", 1342 | "sha256:71b32a1e827bdcbf73750e60370d3b07685816ff3d8695f450f0f8c3226503f8", 1343 | "sha256:794871988c34727c7f79bdfe2546e6854ae1fa2e1feb382784f23a9c6c63ecb3", 1344 | "sha256:79a87831b47a9f6161ad23fa5e89d5469dc585abc49f90b9b07fea8905ae1234", 1345 | "sha256:7e0113d70b095339e99bb522fe7294f5ae6a7f3b2b8f52f659469a74b5cc7661", 1346 | "sha256:84678153432241bcdca2210cf4ff83560b200556867aea913ffbb960f5d5f340", 1347 | "sha256:8a68f57b7a3f7b6b52ada79876be1efb97c8c0952423436e84d70cc139f16f0d", 1348 | "sha256:8c02a0cd39dc01659b3d6cb70bb3a41aebd9885fd78239acdd8d9c91351c4568", 1349 | "sha256:8c842109d31a9281d678f668629241c405928afbebd913c48a5a8e7aee61f63d", 1350 | "sha256:8dc66f109a245653b19df0f44a5af7a3f14cb8ad6c780ead506158a057bd36ce", 1351 | "sha256:90d88f9d9a2ae6cfb1dc4ea2d1710cdf6456bc1b9a06dd1bb485c5d298f2517e", 1352 | "sha256:9269fbfe3a4eb2009199120861c4571ef1655fdf6951c3e7f233567c94e8c602", 1353 | "sha256:929d548b74c0f82f7f95b54e4a43f9e4ce2523cfb8a54d3f7141e45652304b2a", 1354 | "sha256:99a5a77a10863493a1ee8dece02578c6b32025fb3afff91b40476bc489e81648", 1355 | "sha256:9a39ddb0431a68954bd318b923230fa5b649c9c62b0e8340388820c5f1b15bd2", 1356 | "sha256:9d0ab2936085c85a1fc6f9fd8f89d5235ae99b051e90ec5baa5e73ad44346e1f", 1357 | "sha256:9e5bf6e7239fc9687239de7a283aa8b801ab85371116045b33ae20132a1325d6", 1358 | "sha256:a0f09d85c45f58aa8e715b42f8b26beba68b3b63a8f7049113478aca26efbc30", 1359 | "sha256:a114992a193577cb62233abf8cb2832970f9975805a64740e325d2f895e7f85a", 1360 | "sha256:a3fd44b5046d247e7f0f1660bcafe7b5fb0db55d0934c05dd57dda9e1f823ce7", 1361 | "sha256:ad28ddb40db8e450d7d4bf8a1d765d3f87b63b10e7e9a825a3c130c6371a8c03", 1362 | "sha256:aecd6ceaccc4b594e0092d6513ef3f1c0fa678dd89f86bb8ff1a47014b8fca35", 1363 | "sha256:b815991c7d024bf461f358ad871f2be1135576274caed5749c4828859e40354e", 1364 | "sha256:b861db65f6b8906c8d6db51dde2448f266f0c66bf28db2c37aea50f58a849859", 1365 | "sha256:c3ebf1668664d20c8f7d468955f18379b7d1f7bc8946b13243d050fa3888c7ff", 1366 | "sha256:c56b1a62a1fb87565343c57b6743fd5da6e138b8c6562361d7d9b5ce4acf399a", 1367 | "sha256:c780acddd2934c6831ff832ecbf78a45a7b62d4eb216480f863854a8b7d54fa7", 1368 | "sha256:c890309296f53f9aa32ffcfc51d805705e1982bffd27c9692a8f1e1b8de279f4", 1369 | "sha256:c9cfaf530e6a7ff65f0afe275e99f983f68b54dfb23ea401f0bc297a632766b6", 1370 | "sha256:d904f6595acfaaf99a1a61881fea068500c40374d263e5e073aa4005e5f9c28a", 1371 | "sha256:e06747014a5ad1b28cebf5bc1ddcdaccfb44e9b441d35e6feb1286c8a72e54be", 1372 | "sha256:e1fe30bcd5aea5948c42685fad910cd285eacb2518ea4dc6c170d6b535bee95d", 1373 | "sha256:e753eee6d3b93c5354e8ba0a1d62956ee49355f0a36e00570823ef64e66183f5", 1374 | "sha256:ec9803aca9491fd6f0d853d2a6147f19f8deaaa23b1b713d05c5d09e56ea7142", 1375 | "sha256:efb9e38b2a590282704269585de7eb33bf43dc294cad092e1b172e23d4c217e5", 1376 | "sha256:f07016e3cf088dbfc6e7c5a7b3f540db5c23b0190d539e4fd3e2b5e6beffa4b5", 1377 | "sha256:f392cbea531b7142d1958c0d4a0c9c8d760dc451e5848d8dd3387804d3e3e62c", 1378 | "sha256:f619fd38fc2641abfb53cca719c165182500600b82c695cc548a0f05f764be05", 1379 | "sha256:fefdf9b685fda4141b95ebec975946076a5e0723ff70b037032b2085c5317684", 1380 | "sha256:ffc6b1623d0f9affb351db4ca61f432dca3628a5ee015f9bf2bfbe9c6836881c" 1381 | ], 1382 | "markers": "python_version >= '3.6'", 1383 | "version": "==23.2.1" 1384 | }, 1385 | "send2trash": { 1386 | "hashes": [ 1387 | "sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d", 1388 | "sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08" 1389 | ], 1390 | "version": "==1.8.0" 1391 | }, 1392 | "setuptools": { 1393 | "hashes": [ 1394 | "sha256:7f4bc85450898a09f76ebf28b72fa25bc7111f6c7d665d514a60bba9c75ef2a9", 1395 | "sha256:a3ca5857c89f82f5c9410e8508cb32f4872a3bafd4aa7ae122a24ca33bccc750" 1396 | ], 1397 | "markers": "python_version >= '3.7'", 1398 | "version": "==65.2.0" 1399 | }, 1400 | "six": { 1401 | "hashes": [ 1402 | "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", 1403 | "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" 1404 | ], 1405 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 1406 | "version": "==1.16.0" 1407 | }, 1408 | "soupsieve": { 1409 | "hashes": [ 1410 | "sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759", 1411 | "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d" 1412 | ], 1413 | "markers": "python_version >= '3.6'", 1414 | "version": "==2.3.2.post1" 1415 | }, 1416 | "terminado": { 1417 | "hashes": [ 1418 | "sha256:0d5f126fbfdb5887b25ae7d9d07b0d716b1cc0ccaacc71c1f3c14d228e065197", 1419 | "sha256:ab4eeedccfcc1e6134bfee86106af90852c69d602884ea3a1e8ca6d4486e9bfe" 1420 | ], 1421 | "markers": "python_version >= '3.7'", 1422 | "version": "==0.15.0" 1423 | }, 1424 | "tinycss2": { 1425 | "hashes": [ 1426 | "sha256:b2e44dd8883c360c35dd0d1b5aad0b610e5156c2cb3b33434634e539ead9d8bf", 1427 | "sha256:fe794ceaadfe3cf3e686b22155d0da5780dd0e273471a51846d0a02bc204fec8" 1428 | ], 1429 | "markers": "python_version >= '3.6'", 1430 | "version": "==1.1.1" 1431 | }, 1432 | "tornado": { 1433 | "hashes": [ 1434 | "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca", 1435 | "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72", 1436 | "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23", 1437 | "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8", 1438 | "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b", 1439 | "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9", 1440 | "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13", 1441 | "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75", 1442 | "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac", 1443 | "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e", 1444 | "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b" 1445 | ], 1446 | "markers": "python_version >= '3.7'", 1447 | "version": "==6.2" 1448 | }, 1449 | "traitlets": { 1450 | "hashes": [ 1451 | "sha256:0bb9f1f9f017aa8ec187d8b1b2a7a6626a2a1d877116baba52a129bfa124f8e2", 1452 | "sha256:65fa18961659635933100db8ca120ef6220555286949774b9cfc106f941d1c7a" 1453 | ], 1454 | "markers": "python_version >= '3.7'", 1455 | "version": "==5.3.0" 1456 | }, 1457 | "typed-ast": { 1458 | "hashes": [ 1459 | "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace", 1460 | "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff", 1461 | "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266", 1462 | "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528", 1463 | "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6", 1464 | "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808", 1465 | "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4", 1466 | "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363", 1467 | "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341", 1468 | "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04", 1469 | "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41", 1470 | "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e", 1471 | "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3", 1472 | "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899", 1473 | "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805", 1474 | "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c", 1475 | "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c", 1476 | "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39", 1477 | "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a", 1478 | "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3", 1479 | "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7", 1480 | "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f", 1481 | "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075", 1482 | "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0", 1483 | "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40", 1484 | "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428", 1485 | "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927", 1486 | "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3", 1487 | "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f", 1488 | "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65" 1489 | ], 1490 | "markers": "python_version < '3.8' and implementation_name == 'cpython'", 1491 | "version": "==1.4.3" 1492 | }, 1493 | "typing-extensions": { 1494 | "hashes": [ 1495 | "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02", 1496 | "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6" 1497 | ], 1498 | "markers": "python_version < '3.8'", 1499 | "version": "==4.3.0" 1500 | }, 1501 | "wcwidth": { 1502 | "hashes": [ 1503 | "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", 1504 | "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" 1505 | ], 1506 | "version": "==0.2.5" 1507 | }, 1508 | "webencodings": { 1509 | "hashes": [ 1510 | "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", 1511 | "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923" 1512 | ], 1513 | "version": "==0.5.1" 1514 | }, 1515 | "wrapt": { 1516 | "hashes": [ 1517 | "sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1" 1518 | ], 1519 | "version": "==1.11.2" 1520 | }, 1521 | "zipp": { 1522 | "hashes": [ 1523 | "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2", 1524 | "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009" 1525 | ], 1526 | "markers": "python_version >= '3.7'", 1527 | "version": "==3.8.1" 1528 | } 1529 | } 1530 | } 1531 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # srntt-pytorch 2 | A PyTorch implementation of SRNTT[1], which is a novel Reference-based Super-Resolution method proposed in CVPR 2019. 3 | 4 | ## Requirements 5 | ### Python 6 | ```shell 7 | $ python --version 8 | Python 3.7.4 9 | ``` 10 | 11 | ### Packages 12 | Mainly used packages are below: 13 | ``` 14 | - torch 15 | - torchvision 16 | - kornia 17 | - scikit-learn etc., see more in Pipfile 18 | ``` 19 | This Python environment is managed by `pipenv`, so you can create a virtual environmet like below: 20 | ```shell 21 | $ pipenv install 22 | ``` 23 | 24 | ### [WIP] Working with Docker 🐳 25 | #### Build Docker image 26 | ```shell 27 | $ sudo docker build -t srntt-pytorch . 28 | ``` 29 | #### Run Docker container 30 | ```shell 31 | $ sudo docker run --rm --runtime nvidia -it srntt-pytorch /bin/bash 32 | ``` 33 | 34 | ## Training 35 | ### Get started 36 | ```shell 37 | $ sh scripts/train.sh 38 | ``` 39 | More detailed descriptions are provided in the following sections. 40 | 41 | ### Downloading data 42 | You can get data through the following command, which is provided by the author. 43 | ```shell 44 | $ python download_dataset.py --dataset_name DATASE_NAME 45 | ``` 46 | The available dataset names are `{DIV2K, CUFED, CUFED5}`. `DIV2K` and `CUFED` datasets can be used as train data, and `CUFED5` can be used as test data. 47 | The downloaded data will be placed in `./data` directory. 48 | 49 | ### Downloading a pre-trained weight 50 | In the training of SRNTT, the initial weights for content extractor is set as pre-trained SRGAN model. In this implementation, we exploit Modified-SRGAN (MSRGAN) provided by MMSR bacause it is thought as a reliable source. Let's download it as below! 51 | ```shell 52 | $ python download_pretrained_model.py 53 | ``` 54 | 55 | ### Offline texture swapping 56 | SRNTT requires HR-LR image pairs and the corresponding reference(Ref) images, moreover the swapped feature maps culculated from LR-Ref images. 57 | The calculation is time-comsuming and has high comtational costs. 58 | 59 | In general, the swapped feature maps of train data is culculated offline to speed up the training. 60 | The offline calculation will be done by the following command. 61 | ```shell 62 | $ python offline_texture_swapping.py --dataroot DATSET_NAME 63 | ``` 64 | 65 | ### Training SRNTT 66 | All of the preparation was done as above, so let's train the networks. 67 | We recommend to use `--use_weights` option at all times (appeared in Eq. (6)). 68 | ```shell 69 | $ python train.py --use_weights 70 | ``` 71 | 72 | ## Testing 73 | You can test on `CUFED5` dataset as below. 74 | If you have not used `--use_weights` option in the training, you don't have to specify the option. 75 | ```shell 76 | $ python test.py -w ./runs/your/pth/path --use_weights 77 | ``` 78 | 79 | ## Online inference on any images 80 | Now, we support online inference on any images. 81 | The input image you specify will be x4 downscaled by bicubic, and super-resolved with your ref image. 82 | ```shell 83 | $ python online_inference.py -w ./runs/your/pth/path -i ./your/input/image -r ./your/ref/image --use_weights 84 | ``` 85 | 86 | ## Pretrained models 87 | You can get pretrained models from [here](https://drive.google.com/drive/folders/1yrIft2GXcDR-IW_uFLy69caVqEaByFDf?usp=sharing). 88 | 89 | 90 | ## Results 91 | `HR/LR image` means GT/input image. `MSRGAN` is the result of MSRGAN from MMSR. `SR image (HR)` indicates super-resolved image with HR image itself, and `SR image (Lx)` indicates super-resolved one with a similar reference (left bottom) image. 92 | ![comparison_002](./src/comparison_002.png) 93 | ![comparison_065](./src/comparison_065.png) 94 | ![comparison_078](./src/comparison_078.png) 95 | 96 | See the other results and metrics [here](https://drive.google.com/drive/folders/1ZMmlJD4gHVYUCisRr785h_04zMDyTjC-?usp=sharing). 97 | 98 | ## Reference 99 | Thanks you for the following! 100 | ### Papers 101 | 1. Zhang, Zhifei, et al. "Image Super-Resolution by Neural Texture Transfer." Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019. 102 | ### Implementations 103 | 1. The original implementation (TF) - https://github.com/ZZUTK/SRNTT 104 | 1. MMSR - https://github.com/open-mmlab/mmsr 105 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/S-aiueo32/srntt-pytorch/c1ee824a752dc2a8877e928e917050567bddf67d/data/.gitkeep -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .basic_dataset import BasicDataset 2 | from .cufed5_dataset import CUFED5Dataset 3 | from .reference_dataset import ReferenceDataset, ReferenceDatasetEval 4 | from .swapping_dataset import SwappingDataset 5 | 6 | __all__ = [ 7 | 'BasicDataset', 8 | 'ReferenceDataset', 9 | 'SwappingDataset', 10 | 'CUFED5Dataset', 11 | 'ReferenceDatasetEval' 12 | ] 13 | -------------------------------------------------------------------------------- /datasets/basic_dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import functools 3 | 4 | from PIL import Image 5 | from torch.utils.data import Dataset 6 | from torchvision import transforms 7 | from torchvision.transforms import functional as TF 8 | 9 | IMG_EXTENSIONS = [ 10 | '.jpg', '.JPG', '.jpeg', '.JPEG', 11 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 12 | ] 13 | 14 | 15 | def is_image(path: Path): 16 | return path.suffix in IMG_EXTENSIONS 17 | 18 | 19 | def pad(img, scale): 20 | width, height = img.size 21 | pad_h = width % scale 22 | pad_v = height % scale 23 | img = TF.pad( 24 | img, (0, 0, scale - pad_h, scale - pad_v), padding_mode='reflect') 25 | return img 26 | 27 | 28 | class BasicDataset(Dataset): 29 | def __init__(self, data_dir, scale_factor, patch_size=0, mode='train'): 30 | 31 | assert patch_size % scale_factor == 0 32 | assert (mode == 'train' and patch_size != 0) or mode == 'eval' 33 | 34 | if isinstance(data_dir, str): 35 | data_dir = Path(data_dir) 36 | 37 | self.filenames = [f for f in data_dir.glob('*') if is_image(f)] 38 | self.scale_factor = scale_factor 39 | 40 | if mode == 'train': 41 | self.transforms = transforms.Compose([ 42 | transforms.RandomCrop( 43 | patch_size, pad_if_needed=True, padding_mode='reflect'), 44 | transforms.RandomApply([ 45 | functools.partial(TF.rotate, angle=0), 46 | functools.partial(TF.rotate, angle=90), 47 | functools.partial(TF.rotate, angle=180), 48 | functools.partial(TF.rotate, angle=270), 49 | ]), 50 | transforms.RandomHorizontalFlip(), 51 | transforms.RandomVerticalFlip(), 52 | ]) 53 | elif mode == 'eval': 54 | self.filenames.sort() 55 | if patch_size > 0: 56 | self.transforms = transforms.Compose([ 57 | transforms.CenterCrop(patch_size) 58 | ]) 59 | else: 60 | self.transforms = transforms.Compose([ 61 | functools.partial(pad, scale=scale_factor) 62 | ]) 63 | else: 64 | raise NotImplementedError 65 | 66 | def __getitem__(self, index): 67 | filename = self.filenames[index] 68 | img = Image.open(filename).convert('RGB') 69 | img_hr = self.transforms(img) 70 | down_size = [l // self.scale_factor for l in img_hr.size[::-1]] 71 | img_lr = TF.resize(img_hr, down_size, interpolation=Image.BICUBIC) 72 | return {'lr': TF.to_tensor(img_lr) * 2 - 1, 73 | 'hr': TF.to_tensor(img_hr) * 2 - 1, 74 | 'path': filename.stem} 75 | 76 | def __len__(self): 77 | return len(self.filenames) 78 | -------------------------------------------------------------------------------- /datasets/cufed5_dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from PIL import Image 4 | from torch.utils.data import Dataset 5 | from torchvision import transforms 6 | 7 | 8 | class CUFED5Dataset(Dataset): 9 | """ 10 | Dataset class for CUFED5, which is a dataset provided the author of SRNTT. 11 | """ 12 | 13 | def __init__(self, 14 | dataroot: Path, 15 | scale_factor: int = 4): 16 | 17 | super(CUFED5Dataset, self).__init__() 18 | 19 | self.dataroot = Path(dataroot) 20 | self.filenames = list(set( 21 | [f.stem.split('_')[0] for f in self.dataroot.glob('*.png')] 22 | )) 23 | 24 | self.transforms = transforms.Compose([ 25 | transforms.ToTensor(), 26 | # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 27 | ]) 28 | self.warp = transforms.RandomAffine( 29 | degrees=(10, 30), 30 | translate=(0.25, 0.5), 31 | scale=(1.2, 2.0), 32 | resample=Image.BICUBIC 33 | ) 34 | 35 | def __getitem__(self, index): 36 | def load_ref(f, i, warp=False): 37 | # ref image 38 | img_ref = Image.open(self.dataroot / f'{f}_{i}.png').convert('RGB') 39 | size = (x - (x % 4) for x in img_ref.size) 40 | img_ref = img_ref.resize(size, Image.BICUBIC) # adjustment to x4 41 | if warp: 42 | img_ref = self.warp(img_ref) 43 | 44 | # down-upsampling ref image 45 | size = (x // 4 for x in img_ref.size) 46 | img_ref_blur = img_ref.resize(size, Image.BICUBIC)\ 47 | .resize(img_ref.size, Image.BICUBIC) 48 | 49 | return {'ref': self.transforms(img_ref), 50 | 'ref_blur': self.transforms(img_ref_blur)} 51 | 52 | filename = self.filenames[index] 53 | 54 | # HR image 55 | img_hr = Image.open(self.dataroot / f'{filename}_0.png').convert('RGB') 56 | size = (x - (x % 4) for x in img_hr.size) 57 | img_hr = img_hr.resize(size, Image.BICUBIC) # adjustment to x4 58 | 59 | # LR image 60 | size = (x // 4 for x in img_hr.size) 61 | img_lr = img_hr.resize(size, Image.BICUBIC) 62 | 63 | # for feature swapping 64 | img_in_up = img_lr.resize(img_hr.size, Image.BICUBIC) 65 | ref_dict = {i: load_ref(filename, i) for i in range(6)} 66 | ref_dict.update({6: load_ref(filename, 0, warp=True)}) 67 | 68 | return {'img_hr': self.transforms(img_hr), 69 | 'img_lr': self.transforms(img_lr), 70 | 'img_in_up': self.transforms(img_in_up), 71 | 'ref': ref_dict, 72 | 'filename': filename} 73 | 74 | def __len__(self): 75 | return len(self.filenames) 76 | 77 | 78 | if __name__ == "__main__": 79 | from torch.utils.data import DataLoader 80 | 81 | dataset = CUFED5Dataset( 82 | dataroot='/home/ubuntu/srntt-pytorch/data/CUFED5' 83 | ) 84 | dataloader = DataLoader(dataset) 85 | 86 | for batch in dataloader: 87 | break 88 | -------------------------------------------------------------------------------- /datasets/reference_dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import random 3 | 4 | import numpy as np 5 | from PIL import Image 6 | import torch 7 | from torch.utils.data import Dataset 8 | from torchvision import transforms 9 | 10 | 11 | class ReferenceDataset(Dataset): 12 | """ 13 | Dataset class for Ref-SR. 14 | """ 15 | 16 | def __init__(self, 17 | files: list, 18 | dataroot: Path, 19 | scale_factor: int = 4): 20 | 21 | super(ReferenceDataset, self).__init__() 22 | 23 | self.filenames = files 24 | self.dataroot = Path(dataroot) 25 | self.input_dir = self.dataroot / 'input' 26 | self.ref_dir = self.dataroot / 'ref' 27 | self.map_dir = self.dataroot / 'map' 28 | 29 | self.transforms = transforms.Compose([ 30 | transforms.ToTensor(), 31 | # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 32 | ]) 33 | 34 | def __getitem__(self, index): 35 | filename = self.filenames[index] 36 | 37 | img_hr = Image.open(self.input_dir / f'{filename}.png').convert('RGB') 38 | img_lr = img_hr.resize((x // 4 for x in img_hr.size), Image.BICUBIC) 39 | 40 | img_hr = self.transforms(img_hr) 41 | img_lr = self.transforms(img_lr) 42 | 43 | with np.load(self.map_dir / f'{filename}.npz') as f: 44 | relu3_1 = torch.tensor(f['relu3_1']) 45 | relu2_1 = torch.tensor(f['relu2_1']) 46 | relu1_1 = torch.tensor(f['relu1_1']) 47 | weights = torch.tensor(f['weights']).unsqueeze(0) 48 | 49 | # random rotate 50 | state = random.randint(0, 3) 51 | img_hr = img_hr.rot90(state, [1, 2]) 52 | img_lr = img_lr.rot90(state, [1, 2]) 53 | relu3_1 = relu3_1.rot90(state, [1, 2]) 54 | relu2_1 = relu2_1.rot90(state, [1, 2]) 55 | relu1_1 = relu1_1.rot90(state, [1, 2]) 56 | weights = weights.rot90(state, [1, 2]) 57 | 58 | # random flip 59 | if random.random() < 0.5: 60 | img_hr = img_hr.flip([1]) 61 | img_lr = img_lr.flip([1]) 62 | relu3_1 = relu3_1.flip([1]) 63 | relu2_1 = relu2_1.flip([1]) 64 | relu1_1 = relu1_1.flip([1]) 65 | weights = weights.flip([1]) 66 | if random.random() < 0.5: 67 | img_hr = img_hr.flip([2]) 68 | img_lr = img_lr.flip([2]) 69 | relu3_1 = relu3_1.flip([2]) 70 | relu2_1 = relu2_1.flip([2]) 71 | relu1_1 = relu1_1.flip([2]) 72 | weights = weights.flip([2]) 73 | 74 | return {'img_hr': img_hr, 'img_lr': img_lr, 75 | 'maps': {'relu3_1': relu3_1, 76 | 'relu2_1': relu2_1, 77 | 'relu1_1': relu1_1}, 78 | 'weights': weights} 79 | 80 | def __len__(self): 81 | return len(self.filenames) 82 | 83 | 84 | class ReferenceDatasetEval(Dataset): 85 | """ 86 | Dataset class for Ref-SR. 87 | """ 88 | 89 | def __init__(self, 90 | files: list, 91 | dataroot: Path, 92 | scale_factor: int = 4): 93 | 94 | super(ReferenceDatasetEval, self).__init__() 95 | 96 | self.filenames = files 97 | self.dataroot = Path(dataroot) 98 | self.input_dir = self.dataroot / 'input' 99 | self.ref_dir = self.dataroot / 'ref' 100 | self.map_dir = self.dataroot / 'map' 101 | 102 | self.transforms = transforms.Compose([ 103 | transforms.ToTensor(), 104 | # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) 105 | ]) 106 | 107 | def __getitem__(self, index): 108 | filename = self.filenames[index] 109 | 110 | img_hr = Image.open(self.input_dir / f'{filename}.png').convert('RGB') 111 | w, h = img_hr.size 112 | img_lr = img_hr.resize((w // 4, h // 4), Image.BICUBIC) 113 | 114 | with np.load(self.map_dir / f'{filename}.npz') as f: 115 | relu3_1 = torch.tensor(f['relu3_1']) 116 | relu2_1 = torch.tensor(f['relu2_1']) 117 | relu1_1 = torch.tensor(f['relu1_1']) 118 | weights = torch.tensor(f['weights']).unsqueeze(0) 119 | 120 | return {'img_hr': self.transforms(img_hr), 121 | 'img_lr': self.transforms(img_lr), 122 | 'maps': {'relu3_1': relu3_1, 123 | 'relu2_1': relu2_1, 124 | 'relu1_1': relu1_1}, 125 | 'weights': weights} 126 | 127 | def __len__(self): 128 | return len(self.filenames) 129 | 130 | 131 | if __name__ == "__main__": 132 | from torch.utils.data import DataLoader 133 | 134 | dataset = ReferenceDataset( 135 | dataroot='/home/ubuntu/srntt-pytorch/data/CUFED' 136 | ) 137 | dataloader = DataLoader(dataset) 138 | 139 | for batch in dataloader: 140 | img_hr = batch['img_hr'] 141 | img_lr = batch['img_lr'] 142 | maps = batch['maps'] 143 | 144 | break 145 | -------------------------------------------------------------------------------- /datasets/swapping_dataset.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from PIL import Image 4 | from torch.utils.data import Dataset 5 | from torchvision.transforms import functional as TF 6 | 7 | 8 | class SwappingDataset(Dataset): 9 | """ 10 | Dataset class for offline feature swapping. 11 | """ 12 | 13 | def __init__(self, 14 | dataroot: Path, 15 | input_size: int, 16 | scale_factor: int = 4): 17 | 18 | super(SwappingDataset, self).__init__() 19 | 20 | self.input_dir = Path(dataroot) / 'input' 21 | self.ref_dir = Path(dataroot) / 'ref' 22 | 23 | input_file_len = len(list(self.input_dir.glob('*.png'))) 24 | ref_file_len = len(list(self.ref_dir.glob('*.png'))) 25 | assert input_file_len == ref_file_len,\ 26 | 'input/ref folder must have the same files.' 27 | 28 | self.filenames = [f.name for f in self.input_dir.glob('*.png')] 29 | 30 | self.input_size = (input_size, input_size) 31 | self.output_size = (input_size * 4, input_size * 4) 32 | 33 | def __getitem__(self, index): 34 | filename = self.filenames[index] 35 | 36 | img_in = Image.open(self.input_dir / filename).convert('RGB') 37 | img_in_lr = img_in.resize(self.input_size, Image.BICUBIC) 38 | img_in_up = img_in_lr.resize(self.output_size, Image.BICUBIC) 39 | 40 | img_ref = Image.open(self.ref_dir / filename).convert('RGB') 41 | img_ref = img_ref.resize(self.output_size, Image.BICUBIC) 42 | img_ref_lr = img_ref.resize(self.input_size, Image.BICUBIC) 43 | img_ref_up = img_ref_lr.resize(self.output_size, Image.BICUBIC) 44 | 45 | return {'img_in': TF.to_tensor(img_in_up), 46 | 'img_ref': TF.to_tensor(img_ref), 47 | 'img_ref_blur': TF.to_tensor(img_ref_up), 48 | 'filename': Path(filename).stem} 49 | 50 | def __len__(self): 51 | return len(self.filenames) 52 | -------------------------------------------------------------------------------- /download_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | This code was coppied from the original implementation and adjusted our file structure. 3 | """ 4 | 5 | import requests 6 | import zipfile 7 | import sys 8 | import os 9 | import argparse 10 | from shutil import rmtree 11 | 12 | 13 | parser = argparse.ArgumentParser('download_dataset') 14 | parser.add_argument('--dataset_name', type=str, default='CUFED5', 15 | help='The name of dataset: CUFED5, DIV2K, or CUFED') 16 | args = parser.parse_args() 17 | 18 | 19 | CUFED5_TEST_DATA_URL = 'https://drive.google.com/uc?export=download&id=1Fa1mopExA9YGG1RxrCZZn7QFTYXLx6ph' 20 | DIV2K_INPUT_PATCH_DATA_URL = 'https://drive.google.com/uc?export=download&id=1nGeoNLVd-zPifH6sLOYvpY9lVYKnUc0w' 21 | DIV2K_REF_PATCH_DATA_URL = 'https://drive.google.com/uc?export=download&id=1sj72-zL3cGjsVqbbnk3PxJxjQWATQx61' 22 | CUFED_INPUT_PATCH_DATA_URL = 'https://drive.google.com/uc?export=download&id=1gN5IPZgPNkjdeXdTySe1Urog5OG8mrLc' 23 | CUFED_REF_PATCH_DATA_URL = 'https://drive.google.com/uc?export=download&id=13BX-UY4jUZu9S--X2Cd6yZ-3nH77nqo_' 24 | 25 | 26 | datasets = { 27 | 'CUFED5': { 28 | 'name': 'CUFED5', 29 | 'url': CUFED5_TEST_DATA_URL, 30 | 'save_dir': 'data/', 31 | 'data_size': 233 32 | }, 33 | 'DIV2K_input': { 34 | 'name': 'DIV2K_input', 35 | 'url': DIV2K_INPUT_PATCH_DATA_URL, 36 | 'save_dir': 'data/DIV2K/', 37 | 'data_size': 1835 38 | }, 39 | 'DIV2K_ref': { 40 | 'name': 'DIV2K_ref', 41 | 'url': DIV2K_REF_PATCH_DATA_URL, 42 | 'save_dir': 'data/DIV2K/', 43 | 'data_size': 1905 44 | }, 45 | 'CUFED_input': { 46 | 'name': 'CUFED_input', 47 | 'url': CUFED_INPUT_PATCH_DATA_URL, 48 | 'save_dir': 'data/CUFED/', 49 | 'data_size': 567 50 | }, 51 | 'CUFED_ref': { 52 | 'name': 'CUFED_ref', 53 | 'url': CUFED_REF_PATCH_DATA_URL, 54 | 'save_dir': 'data/CUFED/', 55 | 'data_size': 588 56 | } 57 | } 58 | 59 | 60 | def download_file_from_google_drive(url, save_dir, data_name, data_size=None): 61 | if not os.path.exists(os.path.join(save_dir, '%s.zip' % data_name)): 62 | if not os.path.exists(save_dir): 63 | os.makedirs(save_dir) 64 | with requests.Session() as session: 65 | response = session.get(url, stream=True) 66 | token = get_confirm_token(response) 67 | if token: 68 | response = session.get(url, params={'confirm': token}, stream=True) 69 | save_response_content(response, os.path.join(save_dir, '%s.zip' % data_name), data_size) 70 | else: 71 | print('[!] %s already exist! Skip download.' % os.path.join(save_dir, '%s.zip' % data_name)) 72 | 73 | if os.path.exists(os.path.join(save_dir, data_name.split('_')[-1])): 74 | rmtree(os.path.join(save_dir, data_name.split('_')[-1])) 75 | 76 | zip_ref = zipfile.ZipFile(os.path.join(save_dir, '%s.zip' % data_name), 'r') 77 | if 'train' in save_dir: 78 | print('>> Unzip %s --> %s' % (os.path.join(save_dir, '%s.zip' % data_name), 79 | os.path.join(save_dir, data_name.split('_')[0], data_name.split('_')[-1]))) 80 | zip_ref.extractall(os.path.join(save_dir, data_name.split('_')[0])) 81 | else: 82 | print('>> Unzip %s --> %s' % (os.path.join(save_dir, '%s.zip' % data_name), 83 | os.path.join(save_dir, data_name.split('_')[-1]))) 84 | zip_ref.extractall(save_dir) 85 | zip_ref.close() 86 | 87 | 88 | def get_confirm_token(response): 89 | for key, value in response.cookies.items(): 90 | if key.startswith('download_warning'): 91 | return value 92 | return None 93 | 94 | 95 | def save_response_content(response, save_dir, data_size=None): 96 | chunk_size = 1024 * 1024 # in byte 97 | with open(save_dir, "wb") as f: 98 | len_content = 0 99 | for chunk in response.iter_content(chunk_size): 100 | if chunk: # filter out keep-alive new chunks 101 | f.write(chunk) 102 | len_content += len(chunk) 103 | if data_size is not None: 104 | sys.stdout.write('\r>> Downloading %s %.1f%%' % (save_dir, min(len_content / 1024. / 1024. / data_size * 100, 100))) 105 | sys.stdout.flush() 106 | else: 107 | sys.stdout.write('\r>> Downloading %s %.1f MB' % (save_dir, len_content / 1024. / 1024.)) 108 | sys.stdout.flush() 109 | print('') 110 | 111 | 112 | if __name__ == "__main__": 113 | is_downloaded = False 114 | for key in datasets: 115 | if args.dataset_name == key.split('_')[0]: 116 | dataset = datasets[key] 117 | download_file_from_google_drive( 118 | url=dataset['url'], 119 | save_dir=dataset['save_dir'], 120 | data_name=dataset['name'], 121 | data_size=dataset['data_size'] 122 | ) 123 | is_downloaded = True 124 | if not is_downloaded: 125 | print('''[!] Unrecognized dataset name "%s"''' % args.dataset_name) -------------------------------------------------------------------------------- /download_pretrained_model.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from collections import OrderedDict 3 | from pathlib import Path 4 | 5 | from google_drive_downloader import GoogleDriveDownloader as gdd 6 | from PIL import Image 7 | import torch 8 | from torchvision.transforms import functional as TF 9 | from torchvision.utils import save_image 10 | 11 | from models import SRNTT 12 | 13 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 14 | 15 | 16 | def parse_args(): 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('--save_dir', type=str, default='./weights') 19 | parser.add_argument('--save_image', action='store_true') 20 | return parser.parse_args() 21 | 22 | 23 | def main(args): 24 | save_dir = Path(args.save_dir) 25 | output_path = save_dir / 'SRGAN.pth' 26 | 27 | # download pre-trained SRGAN model of MMSR, 28 | # see also: https://github.com/open-mmlab/mmsr 29 | gdd.download_file_from_google_drive( 30 | file_id='1c0YNygNMfTLynR-C3y7nsZgaWbczbW5j', 31 | dest_path=output_path 32 | ) 33 | 34 | # load state_dict 35 | old_state_dict = torch.load(output_path) 36 | 37 | # create a new state_dict with renamed keys 38 | new_state_dict = OrderedDict() 39 | for key, val in old_state_dict.items(): 40 | new_key = key 41 | if 'conv_first' in new_key: 42 | new_key = new_key.replace('conv_first', 'head.0') 43 | if 'recon_trunk' in new_key: 44 | new_key = new_key.replace('recon_trunk', 'body') 45 | if '.conv1.weight' in new_key: 46 | new_key = new_key.replace('.conv1.weight', '.body.0.weight') 47 | if '.conv1.bias' in new_key: 48 | new_key = new_key.replace('.conv1.bias', '.body.0.bias') 49 | if '.conv2.weight' in new_key: 50 | new_key = new_key.replace('.conv2.weight', '.body.2.weight') 51 | if '.conv2.bias' in new_key: 52 | new_key = new_key.replace('.conv2.bias', '.body.2.bias') 53 | if 'upconv1' in new_key: 54 | new_key = new_key.replace('upconv1', 'tail.0') 55 | if 'upconv2' in new_key: 56 | new_key = new_key.replace('upconv2', 'tail.3') 57 | if 'HRconv' in new_key: 58 | new_key = new_key.replace('HRconv', 'tail.6') 59 | if 'conv_last' in new_key: 60 | new_key = new_key.replace('conv_last', 'tail.8') 61 | new_state_dict[new_key] = val 62 | 63 | # check the loading and forwarding 64 | model = SRNTT().to(device) 65 | model.content_extractor.load_state_dict(new_state_dict) 66 | print('Loading succeeded.') 67 | 68 | img = Image.open('./data/CUFED5/000_0.png').convert('RGB') 69 | img = TF.to_tensor(img).to(device) 70 | img = img.unsqueeze(0) 71 | out, _ = model(img, None) 72 | print('Forwarding succeeded.') 73 | 74 | if args.save_image: 75 | save_image(out.clamp(0, 1), save_dir / 'tmp.png') 76 | print('Please verify the output image.') 77 | 78 | # save the new state_dict 79 | torch.save(new_state_dict, output_path) 80 | 81 | 82 | if __name__ == "__main__": 83 | main(parse_args()) 84 | -------------------------------------------------------------------------------- /losses/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.autograd as autograd 3 | 4 | from .adversarial_loss import AdversarialLoss 5 | from .perceptual_loss import PerceptualLoss 6 | from .texture_loss import TextureLoss 7 | from .back_projection_loss import BackProjectionLoss 8 | from .metrics import PSNR, SSIM 9 | 10 | __all__ = [ 11 | 'AdversarialLoss', 12 | 'BackProjectionLoss', 13 | 'PerceptualLoss', 14 | 'TextureLoss', 15 | 'compute_gp', 16 | 'PSNR', 17 | 'SSIM' 18 | ] 19 | 20 | 21 | def compute_gp(netD, real_data, fake_data): 22 | device = real_data.device 23 | alpha = torch.rand(real_data.shape[0], 1, 1, 1, device=device) 24 | alpha = alpha.expand(real_data.size()) 25 | 26 | interpolates = alpha * real_data + ((1 - alpha) * fake_data) 27 | interpolates = autograd.Variable(interpolates, requires_grad=True) 28 | 29 | disc_interpolates = netD(interpolates) 30 | 31 | gradients = autograd.grad( 32 | outputs=disc_interpolates, 33 | inputs=interpolates, 34 | grad_outputs=torch.ones_like(disc_interpolates, device=device), 35 | create_graph=True, 36 | retain_graph=True, 37 | only_inputs=True)[0] 38 | gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() 39 | 40 | return gradient_penalty 41 | -------------------------------------------------------------------------------- /losses/adversarial_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class AdversarialLoss(nn.Module): 6 | """ 7 | PyTorch module for GAN loss. 8 | This code is inspired by https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix. 9 | """ 10 | def __init__(self, 11 | gan_mode='wgangp', 12 | target_real_label=1.0, 13 | target_fake_label=0.0): 14 | 15 | super(AdversarialLoss, self).__init__() 16 | 17 | self.register_buffer('real_label', torch.tensor(target_real_label)) 18 | self.register_buffer('fake_label', torch.tensor(target_fake_label)) 19 | 20 | self.gan_mode = gan_mode 21 | if gan_mode == 'vanilla': 22 | self.loss = nn.BCEWithLogitsLoss() 23 | elif gan_mode == 'lsgan': 24 | self.loss = nn.MSELoss() 25 | elif gan_mode in ['wgangp']: 26 | self.loss = None 27 | else: 28 | raise NotImplementedError(f'gan mode {gan_mode} not implemented') 29 | 30 | def get_target_tensor(self, prediction, target_is_real): 31 | if target_is_real: 32 | target_tensor = self.real_label 33 | else: 34 | target_tensor = self.fake_label 35 | return target_tensor.expand_as(prediction).detach() 36 | 37 | def forward(self, prediction, target_is_real): 38 | if self.gan_mode in ['lsgan', 'vanilla']: 39 | target_tensor = self.get_target_tensor(prediction, target_is_real) 40 | loss = self.loss(prediction, target_tensor) 41 | elif self.gan_mode == 'wgangp': 42 | if target_is_real: 43 | loss = - prediction.mean() 44 | else: 45 | loss = prediction.mean() 46 | return loss 47 | -------------------------------------------------------------------------------- /losses/back_projection_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class BackProjectionLoss(nn.Module): 6 | def __init__(self, scale_factor=4): 7 | super(BackProjectionLoss, self).__init__() 8 | self.scale_factor = scale_factor 9 | 10 | def forward(self, x, y): 11 | assert x.shape[2] == y.shape[2] * self.scale_factor 12 | assert x.shape[3] == y.shape[3] * self.scale_factor 13 | x = F.interpolate(x, y.size()[-2:], mode='bicubic', align_corners=True) 14 | return F.l1_loss(x, y) 15 | -------------------------------------------------------------------------------- /losses/metrics.py: -------------------------------------------------------------------------------- 1 | import kornia 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | 7 | class SSIM(nn.Module): 8 | def __init__(self, window_size=11): 9 | super(SSIM, self).__init__() 10 | self.window_size = window_size 11 | 12 | def forward(self, x, y): 13 | if x.shape[1] == 3: 14 | x = kornia.color.rgb_to_grayscale(x) 15 | if y.shape[1] == 3: 16 | y = kornia.color.rgb_to_grayscale(y) 17 | return 1 - kornia.losses.ssim(x, y, self.window_size, 'mean') 18 | 19 | 20 | class PSNR(nn.Module): 21 | def __init__(self, max_val=1., mode='Y'): 22 | super(PSNR, self).__init__() 23 | self.max_val = max_val 24 | self.mode = mode 25 | 26 | def forward(self, x, y): 27 | if self.mode == 'Y' and x.shape[1] == 3 and y.shape[1] == 3: 28 | x = kornia.color.rgb_to_grayscale(x) 29 | y = kornia.color.rgb_to_grayscale(y) 30 | mse = F.mse_loss(x, y, reduction='mean') 31 | psnr = 10 * torch.log10(self.max_val ** 2 / mse) 32 | return psnr 33 | -------------------------------------------------------------------------------- /losses/perceptual_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from models import VGG 6 | 7 | 8 | class PerceptualLoss(nn.Module): 9 | """ 10 | PyTorch module for perceptual loss. 11 | 12 | Parameters 13 | --- 14 | model_type : str 15 | select from [`vgg11`, `vgg11bn`, `vgg13`, `vgg13bn`, 16 | `vgg16`, `vgg16bn`, `vgg19`, `vgg19bn`, ]. 17 | target_layers : str 18 | the layer name you want to compare. 19 | norm_type : str 20 | the type of norm, select from ['mse', 'fro'] 21 | """ 22 | def __init__(self, 23 | model_type: str = 'vgg19', 24 | target_layer: str = 'relu5_1', 25 | norm_type: str = 'fro'): 26 | super(PerceptualLoss, self).__init__() 27 | 28 | assert norm_type in ['mse', 'fro'] 29 | 30 | self.model = VGG(model_type=model_type) 31 | self.target_layer = target_layer 32 | self.norm_type = norm_type 33 | 34 | def forward(self, x, y): 35 | x_feat, *_ = self.model(x, [self.target_layer]).values() 36 | y_feat, *_ = self.model(y, [self.target_layer]).values() 37 | 38 | # frobenius norm in the paper, but mse loss is actually used in 39 | # https://github.com/ZZUTK/SRNTT/blob/master/SRNTT/model.py#L376. 40 | if self.norm_type == 'mse': 41 | loss = F.mse_loss(x_feat, y_feat) 42 | elif self.norm_type == 'fro': 43 | loss = torch.norm(x_feat - y_feat, p='fro') 44 | 45 | return loss 46 | -------------------------------------------------------------------------------- /losses/texture_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from models import VGG 6 | 7 | 8 | def gram_matrix(features): 9 | N, C, H, W = features.size() 10 | feat_reshaped = features.view(N, C, -1) 11 | 12 | # Use torch.bmm for batch multiplication of matrices 13 | gram = torch.bmm(feat_reshaped, feat_reshaped.transpose(1, 2)) 14 | 15 | return gram 16 | 17 | 18 | class TextureLoss(nn.Module): 19 | """ 20 | creates a criterion to compute weighted gram loss. 21 | """ 22 | def __init__(self, use_weights=False): 23 | super(TextureLoss, self).__init__() 24 | self.use_weights = use_weights 25 | 26 | self.model = VGG(model_type='vgg19') 27 | self.register_buffer('a', torch.tensor(-20., requires_grad=False)) 28 | self.register_buffer('b', torch.tensor(.65, requires_grad=False)) 29 | 30 | def forward(self, x, maps, weights): 31 | input_size = x.shape[-1] 32 | x_feat = self.model(x, ['relu1_1', 'relu2_1', 'relu3_1']) 33 | 34 | if self.use_weights: 35 | weights = F.pad(weights, (1, 1, 1, 1), mode='replicate') 36 | for idx, l in enumerate(['relu3_1', 'relu2_1', 'relu1_1']): 37 | # adjust the scale 38 | weights_scaled = F.interpolate( 39 | weights, None, 2**idx, 'bicubic', True) 40 | 41 | # compute coefficients 42 | coeff = weights_scaled * self.a.detach() + self.b.detach() 43 | coeff = torch.sigmoid(coeff) 44 | 45 | # weighting features and swapped maps 46 | maps[l] = maps[l] * coeff 47 | x_feat[l] = x_feat[l] * coeff 48 | 49 | # for large scale 50 | loss_relu1_1 = torch.norm( 51 | gram_matrix(x_feat['relu1_1']) - gram_matrix(maps['relu1_1']), 52 | ) / 4. / ((input_size * input_size * 1024) ** 2) 53 | 54 | # for medium scale 55 | loss_relu2_1 = torch.norm( 56 | gram_matrix(x_feat['relu2_1']) - gram_matrix(maps['relu2_1']) 57 | ) / 4. / ((input_size * input_size * 512) ** 2) 58 | 59 | # for small scale 60 | loss_relu3_1 = torch.norm( 61 | gram_matrix(x_feat['relu3_1']) - gram_matrix(maps['relu3_1']) 62 | ) / 4. / ((input_size * input_size * 256) ** 2) 63 | 64 | loss = (loss_relu1_1 + loss_relu2_1 + loss_relu3_1) / 3. 65 | 66 | return loss 67 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn.init as init 2 | 3 | from .discriminator import Discriminator, ImageDiscriminator 4 | from .srntt import SRNTT, ContentExtractor 5 | from .swapper import Swapper 6 | from .vgg import VGG 7 | 8 | __all__ = [ 9 | 'Discriminator', 10 | 'ImageDiscriminator', 11 | 'SRNTT', 12 | 'ContentExtractor', 13 | 'Swapper', 14 | 'VGG' 15 | ] 16 | 17 | 18 | def init_weights(net, init_type='normal', init_gain=0.02): 19 | def init_func(m): 20 | name = m.__class__.__name__ 21 | if hasattr(m, 'weight') and ('Conv' in name or 'Linear' in name): 22 | if init_type == 'normal': 23 | init.normal_(m.weight.data, 0.0, init_gain) 24 | elif init_type == 'xavier': 25 | init.xavier_normal_(m.weight.data, gain=init_gain) 26 | elif init_type == 'kaiming': 27 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 28 | elif init_type == 'orthogonal': 29 | init.orthogonal_(m.weight.data, gain=init_gain) 30 | else: 31 | raise NotImplementedError( 32 | f'initialization method [{init_type}] is not implemented') 33 | if hasattr(m, 'bias') and m.bias is not None: 34 | init.constant_(m.bias.data, 0.0) 35 | elif 'BatchNorm2d' in name: 36 | init.normal_(m.weight.data, 1.0, init_gain) 37 | init.constant_(m.bias.data, 0.0) 38 | 39 | net.apply(init_func) 40 | -------------------------------------------------------------------------------- /models/discriminator.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | import models 4 | 5 | 6 | class Discriminator(nn.Sequential): 7 | def __init__(self, ndf=32): 8 | def conv_block(in_channels, out_channels): 9 | block = [ 10 | nn.Conv2d(in_channels, out_channels, 3, 1, 1), 11 | nn.BatchNorm2d(out_channels), 12 | nn.LeakyReLU(0.2, True), 13 | nn.Conv2d(out_channels, out_channels, 3, 2, 1), 14 | nn.BatchNorm2d(out_channels), 15 | nn.LeakyReLU(0.2, True), 16 | ] 17 | return block 18 | 19 | super(Discriminator, self).__init__( 20 | *conv_block(3, ndf), 21 | *conv_block(ndf, ndf * 2), 22 | *conv_block(ndf * 2, ndf * 4), 23 | *conv_block(ndf * 4, ndf * 8), 24 | *conv_block(ndf * 8, ndf * 16), 25 | nn.Conv2d(ndf * 16, 1024, kernel_size=1), 26 | nn.LeakyReLU(0.2), 27 | nn.Conv2d(1024, 1, kernel_size=1), 28 | nn.Sigmoid() 29 | ) 30 | 31 | models.init_weights(self, init_type='normal', init_gain=0.02) 32 | 33 | 34 | class ImageDiscriminator(nn.Sequential): 35 | def __init__(self, ndf=32): 36 | def conv_block(in_channels, out_channels): 37 | block = [ 38 | nn.Conv2d(in_channels, out_channels, 3, 1, 1), 39 | nn.BatchNorm2d(out_channels), 40 | nn.LeakyReLU(0.2, True), 41 | nn.Conv2d(out_channels, out_channels, 3, 2, 1), 42 | nn.BatchNorm2d(out_channels), 43 | nn.LeakyReLU(0.2, True), 44 | ] 45 | return block 46 | 47 | super(ImageDiscriminator, self).__init__( 48 | *conv_block(3, ndf), 49 | *conv_block(ndf, ndf * 2), 50 | *conv_block(ndf * 2, ndf * 4), 51 | *conv_block(ndf * 4, ndf * 8), 52 | *conv_block(ndf * 8, ndf * 16), 53 | nn.AdaptiveAvgPool2d(1), 54 | nn.Conv2d(ndf * 16, 1024, kernel_size=1), 55 | nn.LeakyReLU(0.2), 56 | nn.Conv2d(1024, 1, kernel_size=1), 57 | nn.Sigmoid() 58 | ) 59 | 60 | models.init_weights(self, init_type='normal', init_gain=0.02) 61 | -------------------------------------------------------------------------------- /models/srntt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import models 6 | 7 | 8 | class SRNTT(nn.Module): 9 | """ 10 | PyTorch Module for SRNTT. 11 | Now x4 is only supported. 12 | 13 | Parameters 14 | --- 15 | ngf : int, optional 16 | the number of filterd of generator. 17 | n_blucks : int, optional 18 | the number of residual blocks for each module. 19 | """ 20 | def __init__(self, ngf=64, n_blocks=16, use_weights=False): 21 | super(SRNTT, self).__init__() 22 | self.content_extractor = ContentExtractor(ngf, n_blocks) 23 | self.texture_transfer = TextureTransfer(ngf, n_blocks, use_weights) 24 | models.init_weights(self, init_type='normal', init_gain=0.02) 25 | 26 | def forward(self, x, maps, weights=None): 27 | """ 28 | Parameters 29 | --- 30 | x : torch.Tensor 31 | the input image of SRNTT. 32 | maps : dict of torch.Tensor 33 | the swapped feature maps on relu3_1, relu2_1 and relu1_1. 34 | depths of the maps are 256, 128 and 64 respectively. 35 | """ 36 | 37 | base = F.interpolate(x, None, 4, 'bilinear', False) 38 | upscale_plain, content_feat = self.content_extractor(x) 39 | 40 | if maps is not None: 41 | if hasattr(self.texture_transfer, 'a'): # if weight is used 42 | upscale_srntt = self.texture_transfer( 43 | content_feat, maps, weights) 44 | else: 45 | upscale_srntt = self.texture_transfer( 46 | content_feat, maps) 47 | return upscale_plain + base, upscale_srntt + base 48 | else: 49 | return upscale_plain + base, None 50 | 51 | 52 | class ContentExtractor(nn.Module): 53 | """ 54 | Content Extractor for SRNTT, which outputs maps before-and-after upscale. 55 | more detail: https://github.com/ZZUTK/SRNTT/blob/master/SRNTT/model.py#L73. 56 | Currently this module only supports `scale_factor=4`. 57 | 58 | Parameters 59 | --- 60 | ngf : int, optional 61 | a number of generator's features. 62 | n_blocks : int, optional 63 | a number of residual blocks, see also `ResBlock` class. 64 | """ 65 | 66 | def __init__(self, ngf=64, n_blocks=16): 67 | super(ContentExtractor, self).__init__() 68 | 69 | self.head = nn.Sequential( 70 | nn.Conv2d(3, ngf, kernel_size=3, stride=1, padding=1), 71 | nn.LeakyReLU(0.1, True) 72 | ) 73 | self.body = nn.Sequential( 74 | *[ResBlock(ngf) for _ in range(n_blocks)], 75 | # nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1), 76 | # nn.BatchNorm2d(ngf) 77 | ) 78 | self.tail = nn.Sequential( 79 | nn.Conv2d(ngf, ngf * 4, kernel_size=3, stride=1, padding=1), 80 | nn.PixelShuffle(2), 81 | nn.LeakyReLU(0.1, True), 82 | nn.Conv2d(ngf, ngf * 4, kernel_size=3, stride=1, padding=1), 83 | nn.PixelShuffle(2), 84 | nn.LeakyReLU(0.1, True), 85 | nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1), 86 | nn.LeakyReLU(0.1, True), 87 | nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1), 88 | # nn.Tanh() 89 | ) 90 | 91 | def forward(self, x): 92 | h = self.head(x) 93 | h = self.body(h) + h 94 | upscale = self.tail(h) 95 | return upscale, h 96 | 97 | 98 | class TextureTransfer(nn.Module): 99 | """ 100 | Conditional Texture Transfer for SRNTT, 101 | see https://github.com/ZZUTK/SRNTT/blob/master/SRNTT/model.py#L116. 102 | This module is devided 3 parts for each scales. 103 | 104 | Parameters 105 | --- 106 | ngf : int 107 | a number of generator's filters. 108 | n_blocks : int, optional 109 | a number of residual blocks, see also `ResBlock` class. 110 | """ 111 | 112 | def __init__(self, ngf=64, n_blocks=16, use_weights=False): 113 | super(TextureTransfer, self).__init__() 114 | 115 | # for small scale 116 | self.head_small = nn.Sequential( 117 | nn.Conv2d(ngf + 256, ngf, kernel_size=3, stride=1, padding=1), 118 | nn.LeakyReLU(0.1, True), 119 | ) 120 | self.body_small = nn.Sequential( 121 | *[ResBlock(ngf) for _ in range(n_blocks)], 122 | # nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1), 123 | # nn.BatchNorm2d(ngf) 124 | ) 125 | self.tail_small = nn.Sequential( 126 | nn.Conv2d(ngf, ngf * 4, kernel_size=3, stride=1, padding=1), 127 | nn.PixelShuffle(2), 128 | nn.LeakyReLU(0.1, True), 129 | ) 130 | 131 | # for medium scale 132 | self.head_medium = nn.Sequential( 133 | nn.Conv2d(ngf + 128, ngf, kernel_size=3, stride=1, padding=1), 134 | nn.LeakyReLU(0.1, True), 135 | ) 136 | self.body_medium = nn.Sequential( 137 | *[ResBlock(ngf) for _ in range(n_blocks)], 138 | # nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1), 139 | # nn.BatchNorm2d(ngf) 140 | ) 141 | self.tail_medium = nn.Sequential( 142 | nn.Conv2d(ngf, ngf * 4, kernel_size=3, stride=1, padding=1), 143 | nn.PixelShuffle(2), 144 | nn.LeakyReLU(0.1, True), 145 | ) 146 | 147 | # for large scale 148 | self.head_large = nn.Sequential( 149 | nn.Conv2d(ngf + 64, ngf, kernel_size=3, stride=1, padding=1), 150 | nn.LeakyReLU(0.1, True), 151 | ) 152 | self.body_large = nn.Sequential( 153 | *[ResBlock(ngf) for _ in range(n_blocks)], 154 | # nn.Conv2d(ngf, ngf, kernel_size=3, stride=1, padding=1), 155 | # nn.BatchNorm2d(ngf) 156 | ) 157 | self.tail_large = nn.Sequential( 158 | nn.Conv2d(ngf, ngf // 2, kernel_size=3, stride=1, padding=1), 159 | nn.LeakyReLU(0.1, True), 160 | nn.Conv2d(ngf // 2, 3, kernel_size=3, stride=1, padding=1), 161 | # nn.Tanh() 162 | ) 163 | 164 | if use_weights: 165 | self.a = nn.Parameter(torch.ones(3), requires_grad=True) 166 | self.b = nn.Parameter(torch.ones(3), requires_grad=True) 167 | 168 | def forward(self, x, maps, weights=None): 169 | # compute weighted maps 170 | if hasattr(self, 'a') and weights is not None: 171 | for idx, layer in enumerate(['relu3_1', 'relu2_1', 'relu1_1']): 172 | weights_scaled = F.interpolate( 173 | F.pad(weights, (1, 1, 1, 1), mode='replicate'), 174 | scale_factor=2**idx, 175 | mode='bicubic', 176 | align_corners=True) * self.a[idx] + self.b[idx] 177 | maps[layer] *= torch.sigmoid(weights_scaled) 178 | 179 | # small scale 180 | h = torch.cat([x, maps['relu3_1']], 1) 181 | h = self.head_small(h) 182 | h = self.body_small(h) + x 183 | x = self.tail_small(h) 184 | 185 | # medium scale 186 | h = torch.cat([x, maps['relu2_1']], 1) 187 | h = self.head_medium(h) 188 | h = self.body_medium(h) + x 189 | x = self.tail_medium(h) 190 | 191 | # large scale 192 | h = torch.cat([x, maps['relu1_1']], 1) 193 | h = self.head_large(h) 194 | h = self.body_large(h) + x 195 | x = self.tail_large(h) 196 | 197 | return x 198 | 199 | 200 | class ResBlock(nn.Module): 201 | """ 202 | Basic residual block for SRNTT. 203 | 204 | Parameters 205 | --- 206 | n_filters : int, optional 207 | a number of filters. 208 | """ 209 | 210 | def __init__(self, n_filters=64): 211 | super(ResBlock, self).__init__() 212 | self.body = nn.Sequential( 213 | nn.Conv2d(n_filters, n_filters, 3, 1, 1), 214 | nn.ReLU(True), 215 | nn.Conv2d(n_filters, n_filters, 3, 1, 1), 216 | ) 217 | 218 | def forward(self, x): 219 | return self.body(x) + x 220 | 221 | 222 | if __name__ == "__main__": 223 | device = torch.device('cuda:0') 224 | 225 | x = torch.rand(16, 3, 24, 24).to(device) 226 | 227 | maps = {} 228 | maps.update({'relu3_1': torch.rand(16, 256, 24, 24).to(device)}) 229 | maps.update({'relu2_1': torch.rand(16, 128, 48, 48).to(device)}) 230 | maps.update({'relu1_1': torch.rand(16, 64, 96, 96).to(device)}) 231 | 232 | model = SRNTT().to(device) 233 | _, out = model(x, maps) 234 | -------------------------------------------------------------------------------- /models/swapper.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | 6 | 7 | class Swapper: 8 | """ 9 | Class for feature swapping. 10 | 11 | Parameters 12 | --- 13 | patch_size : int 14 | default patch size. increased depending on map size when applying. 15 | stride : int 16 | default stride. increased depending on map size when applying. 17 | """ 18 | 19 | def __init__(self, patch_size: int = 3, stride: int = 1): 20 | super(Swapper, self).__init__() 21 | self.patch_size = patch_size 22 | self.stride = stride 23 | self.device = torch.device('cpu') 24 | 25 | def __call__(self, 26 | map_in: OrderedDict, 27 | map_ref: OrderedDict, 28 | map_ref_blur: OrderedDict, 29 | is_weight: bool = True): 30 | """ 31 | Feature matching and swapping. 32 | For the fist, matching process is conducted on relu3_1 layer. 33 | Next, swapping features on all layers based on the match. 34 | 35 | Parameters 36 | --- 37 | map_in : OrderedDict 38 | VGG output of I^{LR↑}, see also ~/models/vgg.py. 39 | map_ref : OrderedDict 40 | VGG output of I^{Ref} 41 | map_ref_blur : OrderedDict 42 | VGG output of I^{Ref↓↑} 43 | is_weight : bool, optional 44 | whethere weights is output. 45 | 46 | Returns 47 | --- 48 | maps : dict of np.array 49 | swapped feature maps for each layer. 50 | weights : np.array 51 | weight maps for each layer if `is_weight`'s True, otherwise `None`. 52 | max_idx : np.array 53 | index maps of the most similar patch for each position and layer. 54 | """ 55 | 56 | assert map_in['relu1_1'].shape[2] % 4 == 0 57 | assert map_in['relu1_1'].shape[3] % 4 == 0 58 | 59 | max_idx, max_val, weights = self.match(map_in, map_ref_blur, is_weight) 60 | maps = self.swap(map_in, map_ref, max_idx) 61 | 62 | if is_weight: 63 | weights = weights.to('cpu').numpy() 64 | 65 | return maps, weights, max_idx.to('cpu').numpy() 66 | 67 | def match(self, 68 | map_in: OrderedDict, 69 | map_ref_blur: OrderedDict, 70 | is_weight: bool = True) -> tuple: 71 | """ 72 | Patch matching between content and condition images. 73 | 74 | Parameters 75 | --- 76 | content : torch.Tensor 77 | The VGG feature map of the content image, shape: (C, H, W) 78 | patch_condition : torch.Tensor 79 | The decomposed patches of the condition image, 80 | shape: (C, patch_size, patch_size, n_patches) 81 | 82 | Returns 83 | --- 84 | max_idx : torch.Tensor 85 | The indices of the most similar patches 86 | max_val : torch.Tensor 87 | The pixel value within max_idx. 88 | """ 89 | 90 | content = map_in['relu3_1'].squeeze(0) 91 | condition = map_ref_blur['relu3_1'].squeeze(0) 92 | 93 | # patch decomposition 94 | patch_content = self.sample_patches(content) 95 | patch_condition = self.sample_patches(condition) 96 | 97 | # normalize content and condition 98 | patch_content /= patch_content.norm(p=2, dim=(0, 1, 2)) + 1e-5 99 | patch_condition /= patch_condition.norm(p=2, dim=(0, 1, 2)) + 1e-5 100 | 101 | _, H, W = content.shape 102 | batch_size = int(1024. ** 2 * 512 / (H * W)) 103 | n_patches = patch_condition.shape[-1] 104 | 105 | max_idx, max_val = None, None 106 | for idx in range(0, n_patches, batch_size): 107 | batch = patch_condition[..., idx:idx+batch_size] 108 | corr = F.conv2d(content.unsqueeze(0), 109 | batch.permute(3, 0, 1, 2), 110 | stride=self.stride) 111 | 112 | max_val_tmp, max_idx_tmp = corr.squeeze(0).max(dim=0) 113 | 114 | if max_idx is None: 115 | max_idx, max_val = max_idx_tmp, max_val_tmp 116 | else: 117 | indices = max_val_tmp > max_val 118 | max_val[indices] = max_val_tmp[indices] 119 | max_idx[indices] = max_idx_tmp[indices] + idx 120 | 121 | if is_weight: # weight calculation 122 | weight = self.compute_weights( 123 | patch_content, patch_condition).reshape(max_idx.shape) 124 | else: 125 | weight = None 126 | 127 | return max_idx, max_val, weight 128 | 129 | def compute_weights(self, 130 | patch_content: torch.Tensor, 131 | patch_condition: torch.Tensor): 132 | """ 133 | Compute weights 134 | 135 | Parameters 136 | --- 137 | patch_content : torch.Tensor 138 | The decomposed patches of the content image, 139 | shape: (C, patch_size, patch_size, n_patches) 140 | patch_condition : torch.Tensor 141 | The decomposed patches of the condition image, 142 | shape: (C, patch_size, patch_size, n_patches) 143 | """ 144 | 145 | # reshape patches to (C * patch_size ** 2, n_patches) 146 | content_vec = patch_content.reshape(-1, patch_content.shape[-1]) 147 | style_vec = patch_condition.reshape(-1, patch_condition.shape[-1]) 148 | 149 | # compute matmul between content and condition, 150 | # output shape is (n_patches_content, n_patches_condition) 151 | corr = torch.matmul(content_vec.transpose(0, 1), style_vec) 152 | 153 | # the best match over condition patches 154 | weights, _ = torch.max(corr, dim=-1) 155 | 156 | return weights 157 | 158 | def swap(self, 159 | map_in: OrderedDict, 160 | map_ref: OrderedDict, 161 | max_idx: torch.Tensor) -> dict: 162 | """ 163 | Feature swapping 164 | 165 | Parameter 166 | --- 167 | map_in : namedtuple 168 | map_ref : namedtuple 169 | max_idx : namedtuple 170 | """ 171 | 172 | swapped_maps = {} 173 | for idx, layer in enumerate(['relu3_1', 'relu2_1', 'relu1_1']): 174 | ratio = 2 ** idx 175 | _patch_size = self.patch_size * ratio 176 | _stride = self.stride * ratio 177 | 178 | content = map_in[layer].squeeze(0) 179 | style = map_ref[layer].squeeze(0) 180 | patches_style = self.sample_patches(style, _patch_size, _stride) 181 | 182 | target_map = torch.zeros_like(content).to(self.device) 183 | count_map = torch.zeros(target_map.shape[1:]).to(self.device) 184 | for i in range(max_idx.shape[0]): 185 | for j in range(max_idx.shape[1]): 186 | _i, _j = i * ratio, j * ratio 187 | target_map[:, _i:_i+_patch_size, _j:_j+_patch_size]\ 188 | += patches_style[..., max_idx[i, j]] 189 | count_map[_i:_i+_patch_size, _j:_j+_patch_size] += 1 190 | target_map /= count_map 191 | 192 | assert not torch.isnan(target_map).any() 193 | 194 | swapped_maps.update({layer: target_map.cpu().numpy()}) 195 | 196 | return swapped_maps 197 | 198 | def sample_patches(self, 199 | inputs: torch.Tensor, 200 | patch_size: int = None, 201 | stride: int = None) -> torch.Tensor: 202 | """ 203 | Patch sampler for feature maps. 204 | 205 | Parameters 206 | --- 207 | inputs : torch.Tensor 208 | the input feature maps, shape: (c, h, w). 209 | patch_size : int, optional 210 | the spatial size of sampled patches 211 | stride : int, optional 212 | the stride of sampling. 213 | 214 | Returns 215 | --- 216 | patches : torch.Tensor 217 | extracted patches, shape: (c, patch_size, patch_size, n_patches). 218 | """ 219 | 220 | if patch_size is None: 221 | patch_size = self.patch_size 222 | if stride is None: 223 | stride = self.stride 224 | 225 | c, h, w = inputs.shape 226 | patches = inputs.unfold(1, patch_size, stride)\ 227 | .unfold(2, patch_size, stride)\ 228 | .reshape(c, -1, patch_size, patch_size)\ 229 | .permute(0, 2, 3, 1) 230 | return patches 231 | 232 | def to(self, device): 233 | self.device = device 234 | return self 235 | -------------------------------------------------------------------------------- /models/vgg.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | import warnings 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torchvision.models.vgg as vgg 7 | 8 | __all__ = ['VGG'] 9 | 10 | NAMES = { 11 | 'vgg11': [ 12 | 'conv1_1', 'relu1_1', 'pool1', 13 | 'conv2_1', 'relu2_1', 'pool2', 14 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 15 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 16 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5', 17 | ], 18 | 'vgg13': [ 19 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 20 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 21 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 22 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 23 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5', 24 | ], 25 | 'vgg16': [ 26 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 27 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 28 | 'conv3_1', 'relu3_1', 29 | 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 30 | 'conv4_1', 'relu4_1', 31 | 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 32 | 'conv5_1', 'relu5_1', 33 | 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5', 34 | ], 35 | 'vgg19': [ 36 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 37 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 38 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 39 | 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 40 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 41 | 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 42 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 43 | 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5', 44 | ] 45 | } 46 | 47 | 48 | def insert_bn(names: list): 49 | """ 50 | Inserts bn layer after each conv. 51 | 52 | Parameters 53 | --- 54 | names : list 55 | The list of layer names. 56 | """ 57 | names_bn = [] 58 | for name in names: 59 | names_bn.append(name) 60 | if 'conv' in name: 61 | pos = name.replace('conv', '') 62 | names_bn.append('bn' + pos) 63 | return names_bn 64 | 65 | 66 | class VGG(nn.Module): 67 | """ 68 | Creates any type of VGG models. 69 | 70 | Parameters 71 | --- 72 | model_type : str 73 | The model type you want to load. 74 | requires_grad : bool, optional 75 | Whethere compute gradients. 76 | """ 77 | def __init__(self, model_type: str, requires_grad: bool = False): 78 | super(VGG, self).__init__() 79 | 80 | features = getattr(vgg, model_type)(True).features 81 | self.names = NAMES[model_type.replace('_bn', '')] 82 | if 'bn' in model_type: 83 | self.names = insert_bn(self.names) 84 | 85 | self.net = nn.Sequential(OrderedDict([ 86 | (k, v) for k, v in zip(self.names, features) 87 | ])) 88 | 89 | if not requires_grad: 90 | for param in self.parameters(): 91 | param.requires_grad = False 92 | 93 | self.register_buffer( 94 | name='vgg_mean', 95 | tensor=torch.tensor([[[0.485]], [[0.456]], [[0.406]]], 96 | requires_grad=False) 97 | ) 98 | self.register_buffer( 99 | name='vgg_std', 100 | tensor=torch.tensor([[[0.229]], [[0.224]], [[0.225]]], 101 | requires_grad=False) 102 | ) 103 | 104 | def z_score(self, x): 105 | x = x.sub(self.vgg_mean.detach()) 106 | x = x.div(self.vgg_std.detach()) 107 | return x 108 | 109 | def forward(self, x: torch.Tensor, targets: list) -> dict: 110 | """ 111 | Parameters 112 | --- 113 | x : torch.Tensor 114 | The input tensor normalized to [0, 1]. 115 | target : list of str 116 | The layer names you want to pick up. 117 | Returns 118 | --- 119 | out_dict : dict of torch.Tensor 120 | The dictionary of tensors you specified. 121 | The elements are ordered by the original VGG order. 122 | """ 123 | 124 | assert all([t in self.names for t in targets]),\ 125 | 'Specified name does not exist.' 126 | 127 | if torch.all(x < 0.) and torch.all(x > 1.): 128 | warnings.warn('input tensor is not normalize to [0, 1].') 129 | 130 | x = self.z_score(x) 131 | 132 | out_dict = OrderedDict() 133 | for key, layer in self.net._modules.items(): 134 | x = layer(x) 135 | if key in targets: 136 | out_dict.update({key: x}) 137 | if len(out_dict) == len(targets): # to reduce wasting computation 138 | break 139 | 140 | return out_dict 141 | -------------------------------------------------------------------------------- /offline_texture_swapping.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | import torch 6 | from torch.utils.data import DataLoader 7 | from tqdm import tqdm 8 | 9 | from datasets.swapping_dataset import SwappingDataset 10 | from models import VGG 11 | from models.swapper import Swapper 12 | 13 | TARGET_LAYERS = ['relu3_1', 'relu2_1', 'relu1_1'] 14 | 15 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 16 | 17 | 18 | def parse_args(): 19 | parser = argparse.ArgumentParser() 20 | parser.add_argument('--dataroot', type=str, required=True) 21 | parser.add_argument('--patch_size', default=3) 22 | parser.add_argument('--stride', default=1) 23 | parser.add_argument('--debug', action='store_true') 24 | return parser.parse_args() 25 | 26 | 27 | def main(args): 28 | dataroot = Path(args.dataroot) 29 | save_dir = dataroot / 'map' 30 | save_dir.mkdir(exist_ok=True) 31 | 32 | dataset = SwappingDataset( 33 | dataroot=dataroot, input_size=40 if 'CUFED' in dataroot.name else 80) 34 | dataloader = DataLoader(dataset) 35 | model = VGG(model_type='vgg19').to(device) 36 | swapper = Swapper(args.patch_size, args.stride).to(device) 37 | 38 | for i, batch in enumerate(tqdm(dataloader), 1): 39 | img_in = batch['img_in'].to(device) 40 | img_ref = batch['img_ref'].to(device) 41 | img_ref_blur = batch['img_ref_blur'].to(device) 42 | 43 | map_in = model(img_in, TARGET_LAYERS) 44 | map_ref = model(img_ref, TARGET_LAYERS) 45 | map_ref_blur = model(img_ref_blur, TARGET_LAYERS) 46 | 47 | maps, weights, correspondences = swapper(map_in, map_ref, map_ref_blur) 48 | 49 | np.savez_compressed(save_dir / f'{batch["filename"][0]}.npz', 50 | relu1_1=maps['relu1_1'], 51 | relu2_1=maps['relu2_1'], 52 | relu3_1=maps['relu3_1'], 53 | weights=weights, 54 | correspondences=correspondences) 55 | 56 | if args.debug and i == 10: 57 | break 58 | 59 | 60 | if __name__ == "__main__": 61 | main(parse_args()) 62 | -------------------------------------------------------------------------------- /online_inference.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from contextlib import contextmanager 3 | 4 | from PIL import Image 5 | import torch 6 | from torchvision.transforms import functional as TF 7 | from torchvision.utils import save_image 8 | 9 | from models import Swapper, VGG, SRNTT 10 | from losses import PSNR, SSIM 11 | 12 | TARGET_LAYERS = ['relu3_1', 'relu2_1', 'relu1_1'] 13 | 14 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 15 | 16 | 17 | @contextmanager 18 | def timer(name): 19 | start = torch.cuda.Event(enable_timing=True) 20 | end = torch.cuda.Event(enable_timing=True) 21 | 22 | start.record() 23 | yield 24 | end.record() 25 | 26 | torch.cuda.synchronize() 27 | print(f'[{name}] done in {start.elapsed_time(end):.3f} ms') 28 | 29 | 30 | def parse_args(): 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--input', '-i', type=str, required=True) 33 | parser.add_argument('--ref', '-r', type=str, required=True) 34 | parser.add_argument('--weight', '-w', type=str, required=True) 35 | parser.add_argument('--use_weights', action='store_true') 36 | return parser.parse_args() 37 | 38 | 39 | def load_image(filename_in, filename_ref): 40 | img_hr = Image.open(filename_in) 41 | img_ref = Image.open(filename_ref) 42 | 43 | # adjust to x4 44 | img_hr = img_hr.resize( 45 | (x - (x % 4) for x in img_hr.size), Image.BICUBIC) 46 | img_ref = img_ref.resize( 47 | (x - (x % 4) for x in img_ref.size), Image.BICUBIC) 48 | 49 | # input image 50 | img_lr = img_hr.resize( 51 | (x // 4 for x in img_hr.size), Image.BICUBIC) 52 | img_bic = img_lr.resize(img_hr.size, Image.BICUBIC) 53 | 54 | # reference image 55 | img_ref_down = img_ref.resize( 56 | (x // 4 for x in img_ref.size), Image.BICUBIC) 57 | img_ref_blur = img_ref_down.resize(img_ref.size, Image.BICUBIC) 58 | 59 | # to tensor 60 | img_hr = TF.to_tensor(img_hr).unsqueeze(0) 61 | img_lr = TF.to_tensor(img_lr).unsqueeze(0) 62 | img_bic = TF.to_tensor(img_bic).unsqueeze(0) 63 | img_ref = TF.to_tensor(img_ref).unsqueeze(0) 64 | img_ref_blur = TF.to_tensor(img_ref_blur).unsqueeze(0) 65 | 66 | return {'hr': img_hr, 'lr': img_lr, 'bic': img_bic, 67 | 'ref': img_ref, 'ref_blur': img_ref_blur} 68 | 69 | 70 | def main(args): 71 | imgs = load_image(args.input, args.ref) 72 | 73 | vgg = VGG(model_type='vgg19').to(device) 74 | swapper = Swapper().to(device) 75 | 76 | map_in = vgg(imgs['bic'].to(device), TARGET_LAYERS) 77 | map_ref = vgg(imgs['ref'].to(device), TARGET_LAYERS) 78 | map_ref_blur = vgg(imgs['ref_blur'].to(device), TARGET_LAYERS) 79 | 80 | with torch.no_grad(), timer('Feature swapping'): 81 | maps, weights, correspondences = swapper(map_in, map_ref, map_ref_blur) 82 | 83 | model = SRNTT(use_weights=args.use_weights).to(device) 84 | model.load_state_dict(torch.load(args.weight)) 85 | 86 | img_hr = imgs['hr'].to(device) 87 | img_lr = imgs['lr'].to(device) 88 | maps = { 89 | k: torch.tensor(v).unsqueeze(0).to(device) for k, v in maps.items()} 90 | weights = torch.tensor(weights).reshape(1, 1, *weights.shape).to(device) 91 | 92 | with torch.no_grad(), timer('Inference'): 93 | _, img_sr = model(img_lr, maps, weights) 94 | 95 | psnr = PSNR()(img_sr.clamp(0, 1), img_hr.clamp(0, 1)).item() 96 | ssim = SSIM()(img_sr.clamp(0, 1), img_hr.clamp(0, 1)).item() 97 | print(f'[Result] PSNR:{psnr:.2f}, SSIM:{ssim:.4f}') 98 | 99 | save_image(img_sr.clamp(0, 1), './out.png') 100 | 101 | 102 | if __name__ == "__main__": 103 | main(parse_args()) 104 | -------------------------------------------------------------------------------- /scripts/train.sh: -------------------------------------------------------------------------------- 1 | # download dataset 2 | python download_dataset.py --dataset_name CUFED 3 | python download_dataset.py --dataset_name CUFED5 4 | rm -rf data/CUFED/*.zip data/CUFED5.zip data/__MACOS__ 5 | 6 | # download pre-trained model 7 | python download_pretrained_model.py 8 | 9 | # texture swapping 10 | python offline_texture_swapping.py --dataroot data/CUFED 11 | 12 | # training 13 | python train.py --use_weights 14 | -------------------------------------------------------------------------------- /src/comparison_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/S-aiueo32/srntt-pytorch/c1ee824a752dc2a8877e928e917050567bddf67d/src/comparison_002.png -------------------------------------------------------------------------------- /src/comparison_065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/S-aiueo32/srntt-pytorch/c1ee824a752dc2a8877e928e917050567bddf67d/src/comparison_065.png -------------------------------------------------------------------------------- /src/comparison_078.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/S-aiueo32/srntt-pytorch/c1ee824a752dc2a8877e928e917050567bddf67d/src/comparison_078.png -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | import pandas as pd 5 | import torch 6 | from torch.utils.data import DataLoader 7 | from torchvision.utils import save_image 8 | from tqdm import tqdm 9 | 10 | from models import Swapper, VGG, SRNTT 11 | from datasets import CUFED5Dataset 12 | from losses import PSNR 13 | 14 | TARGET_LAYERS = ['relu3_1', 'relu2_1', 'relu1_1'] 15 | 16 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 17 | 18 | 19 | if __name__ == "__main__": 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--dataroot', type=str, default='./data/CUFED5') 22 | parser.add_argument('--weight', '-w', type=str, required=True) 23 | parser.add_argument('--use_weights', action='store_true') 24 | args = parser.parse_args() 25 | 26 | dataset = CUFED5Dataset(args.dataroot) 27 | dataloader = DataLoader(dataset) 28 | 29 | vgg = VGG(model_type='vgg19').to(device) 30 | swapper = Swapper().to(device) 31 | model = SRNTT(use_weights=args.use_weights).to(device) 32 | model.load_state_dict(torch.load(args.weight)) 33 | 34 | criterion_psnr = PSNR() 35 | 36 | table = [] 37 | tbar = tqdm(total=len(dataloader)) 38 | for batch_idx, batch in enumerate(dataloader): 39 | with torch.no_grad(): 40 | img_hr = batch['img_hr'].to(device) 41 | img_lr = batch['img_lr'].to(device) 42 | img_in_up = batch['img_in_up'].to(device) 43 | 44 | map_in = vgg(img_in_up, TARGET_LAYERS) 45 | 46 | row = [batch['filename'][0].split('_')[0]] 47 | for ref_idx in range(7): 48 | ref = batch['ref'][ref_idx] 49 | map_ref = vgg(ref['ref'].to(device), TARGET_LAYERS) 50 | map_ref_blur = vgg(ref['ref_blur'].to(device), TARGET_LAYERS) 51 | 52 | maps, weights, correspondences = swapper( 53 | map_in, map_ref, map_ref_blur) 54 | 55 | maps = {k: torch.tensor(v).unsqueeze(0).to(device) 56 | for k, v in maps.items()} 57 | weights = torch.tensor(weights).to(device) 58 | weights = weights.reshape(1, 1, *weights.shape) 59 | 60 | _, img_sr = model(img_lr, maps, weights) 61 | 62 | name = f'{batch["filename"][0]}_{ref_idx}.png' 63 | save_image(img_sr.clamp(0, 1), Path(args.weight).parent / name) 64 | 65 | psnr = criterion_psnr(img_sr.clamp(0, 1), img_hr.clamp(0, 1)) 66 | row.append(psnr.item()) 67 | 68 | table.append(row) 69 | 70 | torch.cuda.empty_cache() 71 | tbar.update(1) 72 | 73 | df = pd.DataFrame( 74 | table, columns=('name', 'HR', 'L1', 'L2', 'L3', 'L4', 'L5', 'warp')) 75 | df = df.sort_values('name') 76 | df.to_csv(Path(args.weight).parent / 'result.csv', index=False) 77 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | from sklearn.model_selection import train_test_split 5 | import torch 6 | import torch.nn as nn 7 | import torch.optim as optim 8 | from torch.optim.lr_scheduler import StepLR 9 | from torch.utils.data import DataLoader 10 | from torch.utils.tensorboard import SummaryWriter 11 | from tqdm import tqdm 12 | 13 | from models import SRNTT, Discriminator, ImageDiscriminator 14 | from datasets import ReferenceDataset, ReferenceDatasetEval 15 | from losses import (AdversarialLoss, PerceptualLoss, TextureLoss, 16 | PSNR, SSIM, compute_gp) 17 | from utils import init_seeds 18 | 19 | torch.autograd.set_detect_anomaly(True) 20 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 21 | 22 | 23 | def parse_args(): 24 | parser = argparse.ArgumentParser(description='Train SRNTT') 25 | # data setting 26 | parser.add_argument('--dataroot', type=str, default='data/CUFED') 27 | # train setting 28 | parser.add_argument('--n_epochs_init', type=int, default=5) 29 | parser.add_argument('--n_epochs', type=int, default=100) 30 | parser.add_argument('--batch_size', type=int, default=9) 31 | # model setting 32 | parser.add_argument('--ngf', type=int, default=64) 33 | parser.add_argument('--ndf', type=int, default=32) 34 | parser.add_argument('--netD', default='image', choices=['patch', 'image']) 35 | parser.add_argument('--n_blocks', type=int, default=16) 36 | parser.add_argument('--use_weights', action='store_true') 37 | # loss function setting 38 | parser.add_argument('--lambda_rec', type=float, default=1.) 39 | parser.add_argument('--lambda_per', type=float, default=1e-4) 40 | parser.add_argument('--lambda_tex', type=float, default=1e-4) 41 | parser.add_argument('--lambda_adv', type=float, default=1e-6) 42 | # optimizer setting 43 | parser.add_argument('--lr', type=float, default=1e-4) 44 | # logging setting 45 | parser.add_argument('--pid', type=str, default=None) 46 | parser.add_argument('--display_freq', type=int, default=100) 47 | # weights setting 48 | parser.add_argument('--init_weight', type=str, default='weights/SRGAN.pth') 49 | parser.add_argument('--netG_pre', type=str, default=None) 50 | parser.add_argument('--netD_pre', type=str, default=None) 51 | # seed 52 | parser.add_argument('--seed', type=int, default=123) 53 | # debug 54 | parser.add_argument('--debug', action='store_true') 55 | return parser.parse_args() 56 | 57 | 58 | def main(args): 59 | init_seeds(seed=args.seed) 60 | 61 | # split data 62 | files = list([f.stem for f in Path(args.dataroot).glob('map/*.npz')]) 63 | train_files, val_files = train_test_split(files, test_size=0.1) 64 | 65 | # define dataloaders 66 | train_set = ReferenceDataset(train_files, args.dataroot) 67 | val_set = ReferenceDatasetEval(val_files, args.dataroot) 68 | train_loader = DataLoader( 69 | train_set, args.batch_size, shuffle=True, num_workers=4) 70 | val_loader = DataLoader(val_set, args.batch_size, drop_last=True) 71 | 72 | # define networks 73 | netG = SRNTT(args.ngf, args.n_blocks, args.use_weights).to(device) 74 | netG.content_extractor.load_state_dict(torch.load(args.init_weight)) 75 | if args.netD == 'image': 76 | netD = ImageDiscriminator(args.ndf).to(device) 77 | elif args.netD == 'patch': 78 | netD = Discriminator(args.ndf).to(device) 79 | 80 | # define criteria 81 | criterion_rec = nn.L1Loss().to(device) 82 | criterion_per = PerceptualLoss().to(device) 83 | criterion_adv = AdversarialLoss().to(device) 84 | criterion_tex = TextureLoss(args.use_weights).to(device) 85 | 86 | # metrics 87 | criterion_psnr = PSNR(max_val=1., mode='Y') 88 | criterion_ssim = SSIM(window_size=11) 89 | 90 | # define optimizers 91 | optimizer_G = optim.Adam(netG.parameters(), args.lr) 92 | optimizer_D = optim.Adam(netD.parameters(), args.lr) 93 | 94 | scheduler_G = StepLR( 95 | optimizer_G, int(args.n_epochs * len(train_loader) / 2), 0.1) 96 | scheduler_D = StepLR( 97 | optimizer_D, int(args.n_epochs * len(train_loader) / 2), 0.1) 98 | 99 | # for tensorboard 100 | writer = SummaryWriter(log_dir=f'runs/{args.pid}' if args.pid else None) 101 | 102 | if args.netG_pre is None: 103 | """ pretrain """ 104 | step = 0 105 | for epoch in range(1, args.n_epochs_init + 1): 106 | for i, batch in enumerate(train_loader, 1): 107 | img_hr = batch['img_hr'].to(device) 108 | img_lr = batch['img_lr'].to(device) 109 | maps = {k: v.to(device) for k, v in batch['maps'].items()} 110 | weights = batch['weights'].to(device) 111 | 112 | _, img_sr = netG(img_lr, maps, weights) 113 | 114 | """ train G """ 115 | optimizer_G.zero_grad() 116 | g_loss = criterion_rec(img_sr, img_hr) 117 | g_loss.backward() 118 | optimizer_G.step() 119 | 120 | """ logging """ 121 | writer.add_scalar('pre/g_loss', g_loss.item(), step) 122 | if step % args.display_freq == 0: 123 | writer.add_images('pre/img_lr', img_lr.clamp(0, 1), step) 124 | writer.add_images('pre/img_hr', img_hr.clamp(0, 1), step) 125 | writer.add_images('pre/img_sr', img_sr.clamp(0, 1), step) 126 | 127 | log_txt = [ 128 | f'[Pre][Epoch{epoch}][{i}/{len(train_loader)}]', 129 | f'G Loss: {g_loss.item()}' 130 | ] 131 | print(' '.join(log_txt)) 132 | 133 | step += 1 134 | 135 | if args.debug: 136 | break 137 | 138 | out_path = Path(writer.log_dir) / f'netG_pre{epoch:03}.pth' 139 | torch.save(netG.state_dict(), out_path) 140 | 141 | else: # ommit pre-training 142 | netG.load_state_dict(torch.load(args.netG_pre)) 143 | if args.netD_pre: 144 | netD.load_state_dict(torch.load(args.netD_pre)) 145 | 146 | """ train with all losses """ 147 | step = 0 148 | for epoch in range(1, args.n_epochs + 1): 149 | """ training loop """ 150 | netG.train() 151 | netD.train() 152 | for i, batch in enumerate(train_loader, 1): 153 | img_hr = batch['img_hr'].to(device) 154 | img_lr = batch['img_lr'].to(device) 155 | maps = {k: v.to(device) for k, v in batch['maps'].items()} 156 | weights = batch['weights'].to(device) 157 | 158 | _, img_sr = netG(img_lr, maps, weights) 159 | 160 | """ train D """ 161 | optimizer_D.zero_grad() 162 | for p in netD.parameters(): 163 | p.requires_grad = True 164 | for p in netG.parameters(): 165 | p.requires_grad = False 166 | 167 | # compute WGAN loss 168 | d_out_real = netD(img_hr) 169 | d_loss_real = criterion_adv(d_out_real, True) 170 | d_out_fake = netD(img_sr.detach()) 171 | d_loss_fake = criterion_adv(d_out_fake, False) 172 | d_loss = d_loss_real + d_loss_fake 173 | 174 | # gradient penalty 175 | gradient_penalty = compute_gp(netD, img_hr.data, img_sr.data) 176 | d_loss += 10 * gradient_penalty 177 | 178 | d_loss.backward() 179 | optimizer_D.step() 180 | 181 | """ train G """ 182 | optimizer_G.zero_grad() 183 | for p in netD.parameters(): 184 | p.requires_grad = False 185 | for p in netG.parameters(): 186 | p.requires_grad = True 187 | 188 | # compute all losses 189 | loss_rec = criterion_rec(img_sr, img_hr) 190 | loss_per = criterion_per(img_sr, img_hr) 191 | loss_adv = criterion_adv(netD(img_sr), True) 192 | loss_tex = criterion_tex(img_sr, maps, weights) 193 | 194 | # optimize with combined d_loss 195 | g_loss = (loss_rec * args.lambda_rec + 196 | loss_per * args.lambda_per + 197 | loss_adv * args.lambda_adv + 198 | loss_tex * args.lambda_tex) 199 | g_loss.backward() 200 | optimizer_G.step() 201 | 202 | """ logging """ 203 | writer.add_scalar('train/g_loss', g_loss.item(), step) 204 | writer.add_scalar('train/loss_rec', loss_rec.item(), step) 205 | writer.add_scalar('train/loss_per', loss_per.item(), step) 206 | writer.add_scalar('train/loss_tex', loss_tex.item(), step) 207 | writer.add_scalar('train/loss_adv', loss_adv.item(), step) 208 | writer.add_scalar('train/d_loss', d_loss.item(), step) 209 | writer.add_scalar('train/d_real', d_loss_real.item(), step) 210 | writer.add_scalar('train/d_fake', d_loss_fake.item(), step) 211 | if step % args.display_freq == 0: 212 | writer.add_images('train/img_lr', img_lr, step) 213 | writer.add_images('train/img_hr', img_hr, step) 214 | writer.add_images('train/img_sr', img_sr.clamp(0, 1), step) 215 | 216 | log_txt = [ 217 | f'[Train][Epoch{epoch}][{i}/{len(train_loader)}]', 218 | f'G Loss: {g_loss.item()}, D Loss: {d_loss.item()}' 219 | ] 220 | print(' '.join(log_txt)) 221 | 222 | scheduler_G.step() 223 | scheduler_D.step() 224 | 225 | step += 1 226 | 227 | if args.debug: 228 | break 229 | 230 | """ validation loop """ 231 | netG.eval() 232 | netD.eval() 233 | val_psnr, val_ssim = 0, 0 234 | tbar = tqdm(total=len(val_loader)) 235 | for i, batch in enumerate(val_loader, 1): 236 | img_hr = batch['img_hr'].to(device) 237 | img_lr = batch['img_lr'].to(device) 238 | maps = {k: v.to(device) for k, v in batch['maps'].items()} 239 | weights = batch['weights'].to(device) 240 | 241 | with torch.no_grad(): 242 | _, img_sr = netG(img_lr, maps, weights) 243 | val_psnr += criterion_psnr(img_hr, img_sr.clamp(0, 1)).item() 244 | val_ssim += criterion_ssim(img_hr, img_sr.clamp(0, 1)).item() 245 | 246 | tbar.update(1) 247 | 248 | if args.debug: 249 | break 250 | else: 251 | tbar.close() 252 | val_psnr /= len(val_loader) 253 | val_ssim /= len(val_loader) 254 | 255 | writer.add_scalar('val/psnr', val_psnr, epoch) 256 | writer.add_scalar('val/ssim', val_ssim, epoch) 257 | 258 | print(f'[Val][Epoch{epoch}] PSNR:{val_psnr:.4f}, SSIM:{val_ssim:.4f}') 259 | 260 | netG_path = Path(writer.log_dir) / f'netG_{epoch:03}.pth' 261 | netD_path = Path(writer.log_dir) / f'netD_{epoch:03}.pth' 262 | torch.save(netG.state_dict(), netG_path) 263 | torch.save(netD.state_dict(), netD_path) 264 | 265 | 266 | if __name__ == "__main__": 267 | args = parse_args() 268 | main(args) 269 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def init_seeds(seed=123): 8 | random.seed(seed) 9 | torch.manual_seed(seed) 10 | if torch.cuda.is_available(): 11 | torch.cuda.manual_seed(seed) 12 | torch.backends.cudnn.deterministic = True 13 | torch.backends.cudnn.benchmark = False 14 | np.random.seed(seed) 15 | -------------------------------------------------------------------------------- /weights/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/S-aiueo32/srntt-pytorch/c1ee824a752dc2a8877e928e917050567bddf67d/weights/.gitkeep --------------------------------------------------------------------------------