├── .gitignore ├── LICENSE ├── README.md ├── TF_newop ├── __init__.py ├── compile_op_v2_sz224.sh ├── cuda_op_kernel_v2_sz224.cc └── cuda_op_kernel_v2_sz224.cu.cc ├── _3dmm_utils.py ├── config.py ├── expected_rendered_img_without_bg.png ├── images └── nonlinear-3dmm.jpg ├── main_non_linear_3DMM.py ├── mean_exp_para.npy ├── mean_m.npy ├── mean_shape.npy ├── model_non_linear_3DMM.py ├── model_non_linear_3DMM_proxy.py ├── ops.py ├── rendering_example.py ├── rendering_example_dev.py ├── rendering_ops.py ├── sample_data.npz ├── std_exp_para.npy ├── std_m.npy ├── std_shape.npy └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # 107 | TF_newop/*.o 108 | TF_newop/*.so 109 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2018 Luan Tran 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nonlinear 3D Face Morphable Model 2 | ### [[Project page]](http://cvlab.cse.msu.edu/project-nonlinear-3dmm.html) [[CVPR'18 Paper]](http://cvlab.cse.msu.edu/pdfs/Tran_Liu_CVPR2018.pdf) [[CVPR'19 Paper]](http://cvlab.cse.msu.edu/pdfs/Tran_Liu_Liu_CVPR2019.pdf) 3 | 4 | 5 | ![Teaser](./images/nonlinear-3dmm.jpg) 6 | 7 | ## Library requirements 8 | 9 | * Tensorflow 10 | 11 | 12 | ## Data 13 | 14 | Download following pre-processed training data (10GB) and unzip into ./data/300W_LP/ 15 | 16 | [Filelist](https://drive.google.com/open?id=1R80j6Y1JiNPzsucsMOGpoogKDiYg2ynP) 17 | [Images](https://drive.google.com/open?id=1QkBiPAOA-a2buta--8atVVcKoAl5sj7O) 18 | [Textures](https://drive.google.com/open?id=1oW8wTKkkw2VDVpCv9q8UjqG3mGQCHLQd) 19 | [Masks](https://drive.google.com/open?id=1xTTtYYWIJlq8wYEl5BSQfjM-Vuw3jmwq) 20 | 21 | Download following 3DMM definition and unzip into current folder (./) 22 | [3DMM_definition.zip](https://drive.google.com/open?id=1-UJdQeFw0cf9u9gUHokNoheH0z3L1fEH) 23 | 24 | ## Compile the rendering layer - CUDA code 25 | Please edit TF_newop/compile_op_v2_sz224.sh based on your TF version and whether you install TF with Anaconda (instruction in the file) 26 | 27 | ```bash 28 | $ # Compile 29 | $ cd TF_newop/ 30 | $ ./compile_op_v2_sz224.sh 31 | $ # Run an example 32 | $ python rendering_example.py 33 | ``` 34 | Currently the code is working but not optimal (i.e see line 139 of TF_newop/cuda_op_kernel_v2_sz224.cu.cc) 35 | also the image size is hard-coded. Any contribution is welcome! 36 | 37 | 38 | ## Run the code 39 | 40 | Note: In recent TF version, set --is_ False (i.e --is_using_recon False) doesn't actually set it to False. In this case, you can just don't set it and use the default False value. Please print out those flags value to make sure. 41 | 42 | Pretraining 43 | 44 | ```bash 45 | python main_non_linear_3DMM.py --batch_size 128 --sample_size 128 --is_train True --learning_rate 0.001 --ouput_size 224 \ 46 | --gf_dim 32 --df_dim 32 --dfc_dim 320 --gfc_dim 320 --z_dim 20 --c_dim 3 \ 47 | --is_using_landmark True --shape_loss l2 --tex_loss l1 \ 48 | --is_using_recon False --is_using_frecon False --is_partbase_albedo False --is_using_symetry True \ 49 | --is_albedo_supervision False --is_batchwise_white_shading True --is_const_albedo True --is_const_local_albedo False --is_smoothness True 50 | --gpu 0,1,2,3 51 | ``` 52 | 53 | 54 | Finetunning 55 | Manually reduce the m_loss, shape_loss weight by 10 times 56 | 57 | ```bash 58 | python main_non_linear_3DMM.py --batch_size 64 --sample_size 64 --is_train True --learning_rate 0.001 --ouput_size 224 \ 59 | --gf_dim 32 --df_dim 32 --dfc_dim 320 --gfc_dim 320 --z_dim 20 --c_dim 3 \ 60 | --is_using_landmark True --shape_loss l2 --tex_loss l1 \ 61 | --is_using_recon True --is_using_frecon True --is_partbase_albedo False --is_using_symetry True \ 62 | --is_albedo_supervision False --is_batchwise_white_shading True --is_const_albedo True --is_const_local_albedo True --is_smoothness True 63 | --gpu 0,1,2,3 \ 64 | ``` 65 | 66 | ## Pretrain model 67 | 68 | This is the [pretrained model](https://www.cse.msu.edu/computervision/evaluation_code.zip) of CVPR'19 paper. Input images are 256 x 256. 69 | 70 | 71 | ## Citation 72 | 73 | If you find this work useful, please cite our papers with the following bibtex: 74 | ```latex 75 | @inproceedings{ tran2019towards, 76 | author = { Luan Tran and Feng Liu and Xiaoming Liu }, 77 | title = { Towards High-fidelity Nonlinear 3D Face Morphable Model }, 78 | booktitle = { In Proceeding of IEEE Computer Vision and Pattern Recognition }, 79 | address = { Long Beach, CA }, 80 | month = { June }, 81 | year = { 2019 }, 82 | } 83 | ``` 84 | 85 | ```latex 86 | @article{ tran2018on, 87 | author = { Luan Tran and Xiaoming Liu }, 88 | title = { On Learning 3D Face Morphable Model from In-the-wild Images }, 89 | journal = { IEEE Transactions on Pattern Analysis and Machine Intelligence }, 90 | month = { July }, 91 | year = { 2019 }, 92 | } 93 | ``` 94 | 95 | 96 | ```latex 97 | @inproceedings{ tran2018nonlinear, 98 | author = { Luan Tran and Xiaoming Liu }, 99 | title = { Nonlinear 3D Face Morphable Model }, 100 | booktitle = { IEEE Computer Vision and Pattern Recognition (CVPR) }, 101 | address = { Salt Lake City, UT }, 102 | month = { June }, 103 | year = { 2018 }, 104 | } 105 | ``` 106 | 107 | ## Contacts 108 | 109 | If you have any questions, feel free to drop an email to _tranluan@msu.edu_. 110 | -------------------------------------------------------------------------------- /TF_newop/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/TF_newop/__init__.py -------------------------------------------------------------------------------- /TF_newop/compile_op_v2_sz224.sh: -------------------------------------------------------------------------------- 1 | ## Note: for Anaconda user, please use -D_GLIBCXX_USE_CXX11_ABI=1 (instead of -D_GLIBCXX_USE_CXX11_ABI=0) 2 | 3 | ## ----------- Test with TF v1.8 4 | 5 | TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) 6 | TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') ) 7 | 8 | nvcc -std=c++11 -c -o cuda_op_kernel_v2_sz224.cu.o cuda_op_kernel_v2_sz224.cu.cc ${TF_CFLAGS[@]} -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -D_MWAITXINTRIN_H_INCLUDED 9 | 10 | g++ -std=c++11 -shared -o cuda_op_kernel_v2_sz224.so cuda_op_kernel_v2_sz224.cc cuda_op_kernel_v2_sz224.cu.o ${TF_CFLAGS[@]} -fPIC -lcudart ${TF_LFLAGS[@]} -D_GLIBCXX_USE_CXX11_ABI=0 -L /usr/local/cuda/lib64/ 11 | 12 | 13 | ## ----------- Tested with TF v1.3 14 | #TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 15 | 16 | #nvcc -std=c++11 -c -o cuda_op_kernel_v2_sz224.cu.o cuda_op_kernel_v2_sz224.cu.cc -I $TF_INC -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -D_MWAITXINTRIN_H_INCLUDED 17 | 18 | #g++ -std=c++11 -shared -o cuda_op_kernel_v2_sz224.so cuda_op_kernel_v2_sz224.cc cuda_op_kernel_v2_sz224.cu.o -I $TF_INC -fPIC -lcudart -D_GLIBCXX_USE_CXX11_ABI=0 19 | -------------------------------------------------------------------------------- /TF_newop/cuda_op_kernel_v2_sz224.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow/core/framework/op.h" 17 | #include "tensorflow/core/framework/op_kernel.h" 18 | #include "tensorflow/core/framework/resource_mgr.h" 19 | 20 | 21 | using namespace tensorflow; // NOLINT(build/namespaces) 22 | 23 | REGISTER_OP("ZbufferTriV2Sz224") 24 | .Input("s2d: float") 25 | .Input("tri: int32") 26 | .Input("vis: bool") 27 | .Output("output: int32") 28 | .Output("zbuffer: float") 29 | .Doc(R"doc( 30 | )doc"); 31 | 32 | 33 | 34 | void ZbufferTriLauncher(const float* s2d, const int* tri, const bool* vis, const int tri_num, const int vertex_num, int* out, float* zbuffer); 35 | 36 | class ZbufferTriOp : public OpKernel { 37 | public: 38 | explicit ZbufferTriOp(OpKernelConstruction* context) : OpKernel(context) {} 39 | 40 | void Compute(OpKernelContext* context) override { 41 | int img_sz = 224; 42 | // Grab the input tensors 43 | const Tensor& s2d_tensor = context->input(0); 44 | auto s2d = s2d_tensor.flat(); 45 | 46 | const Tensor& tri_tensor = context->input(1); 47 | auto tri = tri_tensor.flat(); 48 | 49 | const Tensor& vis_tensor = context->input(2); 50 | auto vis = vis_tensor.flat(); 51 | 52 | // Create an output tensor 53 | Tensor* output_tensor = nullptr; 54 | OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({img_sz, img_sz}), &output_tensor)); 55 | auto output = output_tensor->template flat(); 56 | 57 | 58 | Tensor* zbuffer_tensor = nullptr; 59 | OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape({img_sz, img_sz}), &zbuffer_tensor)); 60 | auto zbuffer = zbuffer_tensor->template flat(); 61 | 62 | // Set all but the first element of the output tensor to 0. 63 | const int tri_num = tri_tensor.shape().dim_size(1); 64 | const int vertex_num = s2d_tensor.shape().dim_size(1); 65 | // Call the cuda kernel launcher 66 | ZbufferTriLauncher(s2d.data(), tri.data(), vis.data(), tri_num, vertex_num, output.data(), zbuffer.data()); 67 | } 68 | }; 69 | 70 | REGISTER_KERNEL_BUILDER(Name("ZbufferTriV2Sz224").Device(DEVICE_GPU), ZbufferTriOp); 71 | -------------------------------------------------------------------------------- /TF_newop/cuda_op_kernel_v2_sz224.cu.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #if GOOGLE_CUDA 17 | #define EIGEN_USE_GPU 18 | #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" 19 | #include 20 | 21 | 22 | #define max(a,b) \ 23 | ({ __typeof__ (a) _a = (a); \ 24 | __typeof__ (b) _b = (b); \ 25 | _a > _b ? _a : _b; }) 26 | 27 | #define min(a,b) \ 28 | ({ __typeof__ (a) _a = (a); \ 29 | __typeof__ (b) _b = (b); \ 30 | _a < _b ? _a : _b; }) 31 | 32 | #define min3(a,b,c) (min(min(a,b), c)) 33 | 34 | #define max3(a,b,c) (max(max(a,b), c)) 35 | 36 | __global__ void ZbufferTriKernel(const float* s2d, const int* tri, const bool* vis, const int tri_num, const int vertex_num, int* out, float* zbuffer, int img_sz) { 37 | for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < tri_num - 1; i += blockDim.x * gridDim.x) { 38 | if (vis[i]) { 39 | 40 | int vt1 = tri[ i]; 41 | int vt2 = tri[ tri_num + i]; 42 | int vt3 = tri[2*tri_num + i]; 43 | 44 | float point1_u = s2d[ vt1]; 45 | float point1_v = s2d[vertex_num + vt1]; 46 | 47 | float point2_u = s2d[ vt2]; 48 | float point2_v = s2d[vertex_num + vt2]; 49 | 50 | float point3_u = s2d[ vt3]; 51 | float point3_v = s2d[vertex_num + vt3]; 52 | 53 | int umin = int(ceil (double( min3(point1_u, point2_u, point3_u) ))); 54 | int umax = int(floor(double( max3(point1_u, point2_u, point3_u) ))); 55 | 56 | int vmin = int(ceil (double( min3(point1_v, point2_v, point3_v) ))); 57 | int vmax = int(floor(double( max3(point1_v, point2_v, point3_v) ))); 58 | 59 | float r = (s2d[2*vertex_num+vt1] + s2d[2*vertex_num+vt2] + s2d[2*vertex_num+vt3])/3; 60 | 61 | 62 | if (umax < img_sz && vmax < img_sz && umin >= 0 && vmin >= 0 ){ 63 | for (int u = umin; u <= umax; u++){ 64 | for (int v = vmin; v <= vmax; v++){ 65 | 66 | bool flag; 67 | 68 | float v0_u = point3_u - point1_u; //C - A 69 | float v0_v = point3_v - point1_v; //C - A 70 | 71 | float v1_u = point2_u - point1_u; //B - A 72 | float v1_v = point2_v - point1_v; //B - A 73 | 74 | float v2_u = u - point1_u; 75 | float v2_v = v - point1_v; 76 | 77 | float dot00 = v0_u * v0_u + v0_v * v0_v; 78 | float dot01 = v0_u * v1_u + v0_v * v1_v; 79 | float dot02 = v0_u * v2_u + v0_v * v2_v; 80 | float dot11 = v1_u * v1_u + v1_v * v1_v; 81 | float dot12 = v1_u * v2_u + v1_v * v2_v; 82 | 83 | float inverDeno = 1 / (dot00 * dot11 - dot01 * dot01 + 1e-6); 84 | float uu = (dot11 * dot02 - dot01 * dot12) * inverDeno; 85 | float vv = 0; 86 | if (uu < 0 or uu > 1){ 87 | flag = 0; 88 | 89 | } 90 | else { 91 | vv = (dot00 * dot12 - dot01 * dot02) * inverDeno; 92 | if (vv < 0 or vv > 1){ 93 | flag = 0; 94 | } 95 | else 96 | { 97 | flag = uu + vv <= 1; 98 | } 99 | 100 | } 101 | 102 | if (flag){ 103 | if (zbuffer[u * img_sz + v] < r ){ // and triCpoint(np.asarray([u, v]), pt1, pt2, pt3)): 104 | zbuffer[u * img_sz + v] = r; 105 | out[u * img_sz + v ] = i; 106 | } 107 | 108 | } 109 | } 110 | } 111 | } 112 | } 113 | } 114 | 115 | } 116 | 117 | __global__ void Initialize(const int tri_num, int* out, float* zbuffer, int img_sz) { 118 | for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < img_sz*img_sz; i += blockDim.x * gridDim.x) { 119 | zbuffer[i] = -INFINITY; 120 | out[i] = tri_num; 121 | } 122 | } 123 | 124 | __global__ void ConvertToMask(float* zbuffer, int img_sz) { 125 | for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < img_sz*img_sz; i += blockDim.x * gridDim.x) { 126 | if (zbuffer[i] == -INFINITY){ 127 | zbuffer[i] = 0; 128 | } 129 | else{ 130 | zbuffer[i] = 1; 131 | } 132 | } 133 | } 134 | 135 | 136 | void ZbufferTriLauncher(const float* s2d, const int* tri, const bool* vis, const int tri_num, const int vertex_num, int* out, float* zbuffer) { 137 | int img_sz = 224; 138 | Initialize<<<32, 256>>>(tri_num-1, out, zbuffer, img_sz); 139 | ZbufferTriKernel<<<1, 1>>>(s2d, tri, vis, tri_num, vertex_num, out, zbuffer, img_sz); 140 | // TODO: Make sure the correctness when process in paralell i.e ZbufferTriKernel<<<32, 256>>>(s2d, tri, vis, tri_num, vertex_num, out, zbuffer); 141 | ConvertToMask<<<32, 256>>>(zbuffer, img_sz); 142 | } 143 | 144 | #endif 145 | -------------------------------------------------------------------------------- /_3dmm_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Notes: Many of .dat files are written using Matlab. 3 | Hence, there are "-1" subtraction to Python 0-based indexing 4 | """ 5 | from __future__ import division 6 | import math 7 | import numpy as np 8 | from config import _3DMM_DEFINITION_DIR 9 | 10 | 11 | VERTEX_NUM = 53215 12 | TRI_NUM = 105840 13 | N = VERTEX_NUM * 3 14 | 15 | 16 | def load_3DMM_tri(): 17 | # Triangle definition (i.e. from Basel model) 18 | 19 | print 'Loading 3DMM tri ...' 20 | 21 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_tri.dat') 22 | tri = np.fromfile(file=fd, dtype=np.int32) 23 | fd.close() 24 | #print tri 25 | 26 | tri = tri.reshape((3,-1)).astype(np.int32) 27 | tri = tri - 1 28 | tri = np.append(tri, [[ VERTEX_NUM], [VERTEX_NUM], [VERTEX_NUM]], axis = 1 ) 29 | 30 | print ' DONE' 31 | return tri 32 | 33 | def load_3DMM_vertex_tri(): 34 | # Vertex to triangle mapping (list of all trianlge containing the cureent vertex) 35 | 36 | print 'Loading 3DMM vertex tri ...' 37 | 38 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_vertex_tri.dat') 39 | vertex_tri = np.fromfile(file=fd, dtype=np.int32) 40 | fd.close() 41 | 42 | vertex_tri = vertex_tri.reshape((8,-1)).astype(np.int32) 43 | #vertex_tri = np.append(vertex_tri, np.zeros([8,1]), 1) 44 | vertex_tri[vertex_tri == 0] = TRI_NUM + 1 45 | vertex_tri = vertex_tri - 1 46 | 47 | print ' DONE' 48 | return vertex_tri 49 | 50 | def load_3DMM_vt2pixel(): 51 | # Mapping in UV space 52 | 53 | fd = open(_3DMM_DEFINITION_DIR + 'vertices_2d_u.dat') 54 | vt2pixel_u = np.fromfile(file=fd, dtype=np.float32) 55 | vt2pixel_u = np.append(vt2pixel_u - 1, 0) 56 | fd.close() 57 | 58 | fd = open(_3DMM_DEFINITION_DIR + 'vertices_2d_v.dat') 59 | vt2pixel_v = np.fromfile(file=fd, dtype=np.float32) 60 | vt2pixel_v = np.append(vt2pixel_v - 1, 0) 61 | fd.close() 62 | 63 | return vt2pixel_u, vt2pixel_v 64 | 65 | def load_3DMM_kpts(): 66 | # 68 keypoints indices 67 | 68 | print 'Loading 3DMM keypoints ...' 69 | 70 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_keypoints.dat') 71 | kpts = np.fromfile(file=fd, dtype=np.int32) 72 | kpts = kpts.reshape((-1, 1)) 73 | fd.close() 74 | 75 | return kpts - 1 76 | 77 | def load_3DMM_tri_2d(with_mask = False): 78 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_tri_2d.dat') 79 | tri_2d = np.fromfile(file=fd, dtype=np.int32) 80 | fd.close() 81 | 82 | tri_2d = tri_2d.reshape(192, 224) 83 | 84 | tri_mask = tri_2d != 0 85 | 86 | tri_2d[tri_2d == 0] = TRI_NUM+1 #VERTEX_NUM + 1 87 | tri_2d = tri_2d - 1 88 | 89 | if with_mask: 90 | return tri_2d, tri_mask 91 | 92 | return tri_2d 93 | 94 | def load_Basel_basic(element, is_reduce = False): 95 | fn = _3DMM_DEFINITION_DIR + '3DMM_'+element+'_basis.dat' 96 | print 'Loading ' + fn + ' ...' 97 | 98 | 99 | 100 | fd = open(fn) 101 | all_paras = np.fromfile(file=fd, dtype=np.float32) 102 | fd.close() 103 | 104 | all_paras = np.transpose(all_paras.reshape((-1,N)).astype(np.float32)) 105 | 106 | mu = all_paras[:,0] 107 | w = all_paras[:,1:] 108 | 109 | print ' DONE' 110 | 111 | return mu, w 112 | 113 | def load_const_alb_mask(): 114 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_const_alb_mask.dat') 115 | const_alb_mask = np.fromfile(file=fd, dtype=np.uint8) 116 | fd.close() 117 | const_alb_mask = const_alb_mask - 1 118 | const_alb_mask = const_alb_mask.reshape((-1,2)).astype(np.uint8) 119 | 120 | return const_alb_mask 121 | 122 | def load_3DMM_tri_2d_barycoord(): 123 | fd = open(_3DMM_DEFINITION_DIR + '3DMM_tri_2d_barycoord.dat') 124 | tri_2d_barycoord = np.fromfile(file=fd, dtype=np.float32) 125 | fd.close() 126 | 127 | tri_2d_barycoord = tri_2d_barycoord.reshape(192, 224, 3) 128 | 129 | 130 | return tri_2d_barycoord -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | _3DMM_DEFINITION_DIR = './3DMM_definition/' 2 | _300W_LP_DIR='./data/300W_LP_crop/' -------------------------------------------------------------------------------- /expected_rendered_img_without_bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/expected_rendered_img_without_bg.png -------------------------------------------------------------------------------- /images/nonlinear-3dmm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/images/nonlinear-3dmm.jpg -------------------------------------------------------------------------------- /main_non_linear_3DMM.py: -------------------------------------------------------------------------------- 1 | import os 2 | import scipy.misc 3 | import numpy as np 4 | 5 | from model_non_linear_3DMM import DCGAN 6 | from utils import pp, visualize, to_json 7 | 8 | import tensorflow as tf 9 | 10 | flags = tf.app.flags 11 | flags.DEFINE_integer("epoch", 1000, "Epoch to train [25]") 12 | flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]") 13 | flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]") 14 | flags.DEFINE_integer("train_size", 5000000, "The size of train images [np.inf]") 15 | flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]") 16 | flags.DEFINE_integer("sample_size", 64, "The size of batch samples images [64]") 17 | flags.DEFINE_integer("image_size", 108, "The size of image to use (will be center cropped) [108]") 18 | flags.DEFINE_integer("output_size", 224, "The size of the output images to produce [64]") 19 | flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]") 20 | flags.DEFINE_boolean("is_with_y", True, "True for with lable") 21 | flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]") 22 | flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]") 23 | flags.DEFINE_string("samples_dir", "samples", "Directory name to save the image samples [samples]") 24 | flags.DEFINE_boolean("is_train", True, "True for training, False for testing [False]") 25 | flags.DEFINE_boolean("is_reduce", False, "True for 6k verteices, False for 50k vertices") 26 | flags.DEFINE_boolean("is_crop", False, "True for training, False for testing [False]") 27 | flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]") 28 | flags.DEFINE_integer("gf_dim", 32, "") 29 | flags.DEFINE_integer("gfc_dim", 512, "") 30 | flags.DEFINE_integer("df_dim", 32, "") 31 | flags.DEFINE_integer("dfc_dim", 512, "") 32 | flags.DEFINE_integer("z_dim", 50, "") 33 | flags.DEFINE_string("gpu", "1,2", "GPU to use [0]") 34 | 35 | flags.DEFINE_boolean("is_pretrain", False, "Is in pretrain stage [False]") 36 | 37 | flags.DEFINE_boolean("is_using_landmark", False, "Using landmark loss [False]") 38 | flags.DEFINE_boolean("is_using_symetry", False, "Using symetry loss [False]") 39 | flags.DEFINE_boolean("is_using_recon", False, "Using rescontruction loss [False]") 40 | flags.DEFINE_boolean("is_using_frecon", False, "Using feature rescontruction loss [False]") 41 | flags.DEFINE_boolean("is_using_graddiff", False, "Using gradient difference [False]") 42 | flags.DEFINE_boolean("is_gt_m", False, "Using gt m [False]") 43 | flags.DEFINE_boolean("is_partbase_albedo", False, "Using part based albedo decoder [False]") 44 | flags.DEFINE_boolean("is_using_linear", False, "Using linear model supervision [False]") 45 | flags.DEFINE_boolean("is_batchwise_white_shading", False, "Using batchwise white shading constraint [False]") 46 | flags.DEFINE_boolean("is_const_albedo", False, "Using batchwise const albedo constraint [False]") 47 | flags.DEFINE_boolean("is_const_local_albedo", False, "Using batchwise const albedo constraint [False]") 48 | flags.DEFINE_boolean("is_smoothness", False, "Using pairwise loss [False]") 49 | 50 | 51 | FLAGS = flags.FLAGS 52 | 53 | 54 | def main(_): 55 | #pp.pprint(FLAGS.__flags) 56 | pp.pprint(tf.app.flags.FLAGS.flag_values_dict()) 57 | 58 | 59 | if not os.path.exists(FLAGS.checkpoint_dir): 60 | os.makedirs(FLAGS.checkpoint_dir) 61 | if not os.path.exists(FLAGS.samples_dir): 62 | os.makedirs(FLAGS.samples_dir) 63 | 64 | gpu_options = tf.GPUOptions(visible_device_list =FLAGS.gpu, per_process_gpu_memory_fraction = 0.8, allow_growth = True) 65 | 66 | with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)) as sess: 67 | dcgan = DCGAN(sess, FLAGS) 68 | 69 | if FLAGS.is_train: 70 | dcgan.train(FLAGS) 71 | else: 72 | dcgan.load(FLAGS.checkpoint_dir) 73 | dcgan.test(FLAGS, True) 74 | ''' 75 | if FLAGS.visualize: 76 | to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0], 77 | [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1], 78 | [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2], 79 | [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3], 80 | [dcgan.h4_w, dcgan.h4_b, None]) 81 | 82 | # Below is codes for visualization 83 | OPTION = 2 84 | visualize(sess, dcgan, FLAGS, OPTION)''' 85 | 86 | if __name__ == '__main__': 87 | tf.app.run() 88 | -------------------------------------------------------------------------------- /mean_exp_para.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/mean_exp_para.npy -------------------------------------------------------------------------------- /mean_m.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/mean_m.npy -------------------------------------------------------------------------------- /mean_shape.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/mean_shape.npy -------------------------------------------------------------------------------- /model_non_linear_3DMM.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Outline of the main training script 3 | Part of data/input pipeline is ommitted 4 | 5 | ''' 6 | 7 | 8 | from __future__ import division 9 | import os 10 | import time 11 | import csv 12 | import random 13 | from random import randint 14 | from math import floor 15 | from glob import glob 16 | import tensorflow as tf 17 | import tensorflow.contrib.slim as slim 18 | import numpy as np 19 | from six.moves import xrange 20 | #from progress.bar import Bar 21 | from rendering_ops import * 22 | from ops import * 23 | from utils import * 24 | 25 | 26 | TRI_NUM = 105840 27 | VERTEX_NUM = 53215 28 | CONST_PIXELS_NUM = 20 29 | 30 | 31 | 32 | class DCGAN(object): 33 | def __init__(self, sess, config, devices=None): 34 | """ 35 | Args: 36 | sess: TensorFlow session 37 | batch_size: The size of batch. Should be specified before training. 38 | """ 39 | self.sess = sess 40 | self.c_dim = config.c_dim 41 | self.gpu_num = len(config.gpu.split()) 42 | 43 | 44 | 45 | self.batch_size = config.batch_size 46 | self.image_size = config.image_size 47 | self.sample_size = config.sample_size 48 | self.image_size = 224 #config.image_size 49 | self.texture_size = [192, 224] 50 | self.z_dim = config.z_dim 51 | self.gf_dim = config.gf_dim 52 | self.df_dim = config.df_dim 53 | self.gfc_dim = config.gfc_dim 54 | self.dfc_dim = config.dfc_dim 55 | 56 | self.shape_loss = config.shape_loss if hasattr(config, 'shape_loss') else "l2" 57 | self.tex_loss = config.tex_loss if hasattr(config, 'tex_loss') else "l1" 58 | 59 | self.is_using_landmark = config.is_using_landmark 60 | self.is_using_symetry = config.is_using_symetry 61 | self.is_using_recon = config.is_using_recon 62 | self.is_using_frecon = config.is_using_frecon 63 | self.is_batchwise_white_shading = config.is_batchwise_white_shading 64 | self.is_const_albedo = config.is_const_albedo 65 | self.is_const_local_albedo = config.is_const_local_albedo 66 | self.is_smoothness = config.is_smoothness 67 | 68 | self.mDim = 8 69 | self.ilDim = 27 70 | 71 | self.vertexNum = VERTEX_NUM 72 | self.landmark_num = 68 73 | 74 | 75 | self.checkpoint_dir = config.checkpoint_dir 76 | self.samples_dir = config.samples_dir 77 | 78 | if not os.path.exists(self.samples_dir+"/"+self.model_dir): 79 | os.makedirs(self.samples_dir+"/"+self.model_dir) 80 | if not os.path.exists(self.checkpoint_dir+"/"+self.model_dir): 81 | os.makedirs(self.checkpoint_dir+"/"+self.model_dir) 82 | 83 | self.setupParaStat() 84 | #self.setupValData() 85 | self.build_model() 86 | 87 | def build_model(self): 88 | def filename2image(input_filenames, offset_height = None, offset_width = None, target_height=None, target_width=None): 89 | batch_size = len(input_filenames) 90 | if offset_height != None: 91 | offset_height = tf.split(offset_height, batch_size) 92 | offset_width = tf.split(offset_width, batch_size) 93 | 94 | images = [] 95 | for i in range(batch_size): 96 | file_contents = tf.read_file(input_filenames[i]) 97 | image = tf.image.decode_png(file_contents, channels=3) 98 | if offset_height != None: 99 | image = tf.image.crop_to_bounding_box(image, tf.reshape(offset_height[i], []), tf.reshape(offset_width[i], []), target_height, target_width) 100 | 101 | images.append(image) 102 | return tf.cast(tf.stack(images), tf.float32) 103 | 104 | 105 | self.m_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.mDim], name='m_300W_labels') 106 | self.shape_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.vertexNum * 3], name='shape_300W_labels') 107 | self.texture_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.texture_size[0], self.texture_size[1], self.c_dim], name='tex_300W_labels') 108 | #self.exp_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.expDim], name='exp_300W_labels') 109 | #self.il_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.ilDim], name='lighting_300W_labels') 110 | 111 | self.input_offset_height = tf.placeholder(tf.int32, [self.batch_size], name='input_offset_height') 112 | self.input_offset_width = tf.placeholder(tf.int32, [self.batch_size], name='input_offset_width') 113 | 114 | self.input_images_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)] 115 | self.input_masks_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)] 116 | self.texture_labels_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)] 117 | self.texture_masks_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)] 118 | 119 | 120 | # For const alb loss 121 | self.albedo_indexes_x1 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_x1') 122 | self.albedo_indexes_y1 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_y1') 123 | 124 | self.albedo_indexes_x2 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_x2') 125 | self.albedo_indexes_y2 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_y2') 126 | 127 | self.const_alb_mask = load_const_alb_mask() 128 | 129 | def model_and_loss(input_images_fn_300W, input_masks_fn_300W, texture_labels_fn_300W, texture_masks_fn_300W, input_offset_height, input_offset_width, m_300W_labels, shape_300W_labels, albedo_indexes_x1, albedo_indexes_y1, albedo_indexes_x2, albedo_indexes_y2): 130 | batch_size = self.batch_size / self.gpu_num 131 | input_images_300W_ = filename2image(input_images_fn_300W, offset_height = input_offset_height, offset_width = input_offset_width, target_height=self.image_size, target_width=self.image_size) 132 | input_images_300W = input_images_300W_ /127.5 - 1 133 | 134 | input_masks_300W = filename2image(input_masks_fn_300W, offset_height = input_offset_height, offset_width = input_offset_width, target_height=self.image_size, target_width=self.image_size) 135 | input_masks_300W = input_masks_300W / 255.0 136 | 137 | texture_300W_labels = filename2image(texture_labels_fn_300W) 138 | texture_300W_labels = texture_300W_labels / 127.5 - 1 139 | 140 | texture_mask_300W_labels = filename2image(texture_masks_fn_300W) 141 | texture_mask_300W_labels = texture_mask_300W_labels / 255.0 142 | 143 | 144 | ## ------------------------- Network --------------------------- 145 | shape_fx_300W, tex_fx_300W, m_300W, il_300W = self.generator_encoder( input_images_300W, is_reuse=False) 146 | shape_300W, shape_2d_300W = self.generator_decoder_shape(shape_fx_300W, is_reuse=False, is_training=True) 147 | albedo_300W = self.generator_decoder_albedo(tex_fx_300W, is_reuse=False, is_training=True) 148 | 149 | m_300W_full = m_300W * self.std_m_tf + self.mean_m_tf 150 | shape_300W_full = shape_300W * self.std_shape_tf + self.mean_shape_tf 151 | shape_300W_labels_full = shape_300W_labels * self.std_shape_tf + self.mean_shape_tf 152 | m_300W_labels_full = m_300W_labels * self.std_m_tf + self.mean_m_tf 153 | 154 | shape_for_synthesize = shape_300W_full 155 | m_for_synthesize = m_300W_full 156 | 157 | # Rendering 158 | shade_300W = generate_shade(il_300W, m_for_synthesize, shape_for_synthesize, self.texture_size) 159 | texture_300W = 2.0*tf.multiply( (albedo_300W + 1.0)/2.0, shade_300W) - 1 160 | 161 | 162 | G_images_300W, G_images_300W_mask = warp_texture(texture_300W, m_for_synthesize, shape_for_synthesize, output_size=self.image_size) 163 | 164 | G_images_300W_mask = tf.multiply(input_masks_300W, tf.expand_dims(G_images_300W_mask, -1)) 165 | G_images_300W = tf.multiply(G_images_300W, G_images_300W_mask) + tf.multiply(input_images_300W, 1 - G_images_300W_mask) 166 | 167 | landmark_u_300W, landmark_v_300W = compute_landmarks(m_300W_full, shape_300W_full, output_size=self.image_size) 168 | landmark_u_300W_labels, landmark_v_300W_labels = compute_landmarks(m_300W_labels_full, shape_300W_labels_full, output_size=self.image_size) 169 | 170 | 171 | 172 | 173 | ##---------------- Losses ------------------------- 174 | g_loss = tf.zeros(1) 175 | 176 | G_loss_shape = 10*norm_loss(shape_300W, shape_300W_labels, loss_type = self.shape_loss) #tf.zeros(1) 177 | G_loss_m = 5*norm_loss(m_300W, m_300W_labels, loss_type = 'l2') 178 | 179 | 180 | texture_vis_mask = tf.cast(tf.not_equal(texture_300W_labels, tf.ones_like(texture_300W_labels)*(-1)), tf.float32) 181 | texture_vis_mask = tf.multiply(texture_vis_mask, texture_mask_300W_labels) 182 | texture_ratio = tf.reduce_sum(texture_vis_mask) / (batch_size* self.texture_size[0] * self.texture_size[1] * self.c_dim) 183 | 184 | 185 | 186 | if self.is_batchwise_white_shading: 187 | uv_mask_tf = tf.expand_dims(tf.expand_dims(tf.constant( self.uv_mask, dtype = tf.float32 ), 0), -1) 188 | 189 | mean_shade = tf.reduce_mean( tf.multiply(shade_300W, uv_mask_tf) , axis=[0,1,2]) * 16384 / 10379 190 | G_loss_white_shading = 10*norm_loss(mean_shade, 0.99*tf.ones([1, 3], dtype=tf.float32), loss_type = "l2") 191 | else: 192 | G_loss_white_shading = tf.zeros(1) 193 | 194 | 195 | 196 | G_loss_texture = norm_loss(texture_300W, texture_300W_labels, mask = texture_vis_mask, loss_type = self.tex_loss) / texture_ratio 197 | 198 | G_loss_recon = 10*norm_loss(G_images_300W, input_images_300W, loss_type = self.tex_loss ) / (tf.reduce_sum(G_images_300W_mask)/ (batch_size* self.image_size * self.image_size)) 199 | 200 | g_loss += G_loss_m + G_loss_shape + G_loss_white_shading 201 | 202 | if self.is_smoothness: 203 | G_loss_smoothness = 1000*norm_loss( (shape_2d_300W[:, :-2, 1:-1, :] + shape_2d_300W[:, 2:, 1:-1, :] + shape_2d_300W[:, 1:-1, :-2, :] + shape_2d_300W[:, 1:-1, 2:, :])/4.0, 204 | shape_2d_300W[:, 1:-1, 1:-1, :], loss_type = self.shape_loss) 205 | else: 206 | G_loss_smoothness = tf.zeros(1) 207 | g_loss = g_loss + G_loss_smoothness 208 | 209 | G_landmark_loss = (tf.reduce_mean(tf.nn.l2_loss(landmark_u_300W - landmark_u_300W_labels )) + tf.reduce_mean(tf.nn.l2_loss(landmark_v_300W - landmark_v_300W_labels ))) / self.landmark_num / batch_size / 50 210 | 211 | if self.is_using_symetry: 212 | albedo_300W_flip = tf.map_fn(lambda img: tf.image.flip_left_right(img), albedo_300W) 213 | G_loss_symetry = norm_loss(tf.maximum(tf.abs(albedo_300W-albedo_300W_flip), 0.05), 0, loss_type = self.tex_loss) 214 | else: 215 | G_loss_symetry = tf.zeros(1) 216 | g_loss += G_loss_symetry 217 | 218 | if self.is_const_albedo: 219 | 220 | albedo_1 = get_pixel_value(albedo_300W, albedo_indexes_x1, albedo_indexes_y1) 221 | albedo_2 = get_pixel_value(albedo_300W, albedo_indexes_x2, albedo_indexes_y2) 222 | 223 | G_loss_albedo_const = 5*norm_loss( tf.maximum(tf.abs(albedo_1- albedo_2), 0.05), 0, loss_type = self.tex_loss) 224 | else: 225 | G_loss_albedo_const = tf.zeros(1) 226 | g_loss += G_loss_albedo_const 227 | 228 | if self.is_const_local_albedo: 229 | local_albedo_alpha = 0.9 230 | texture_300W_labels_chromaticity = (texture_300W_labels + 1.0)/2.0 231 | texture_300W_labels_chromaticity = tf.divide(texture_300W_labels_chromaticity, tf.reduce_sum(texture_300W_labels_chromaticity, axis=[-1], keep_dims=True) + 1e-6) 232 | 233 | 234 | w_u = tf.stop_gradient(tf.exp(-15*tf.norm( texture_300W_labels_chromaticity[:, :-1, :, :] - texture_300W_labels_chromaticity[:, 1:, :, :], ord='euclidean', axis=-1, keep_dims=True)) * texture_vis_mask[:, :-1, :, :] ) 235 | G_loss_local_albedo_const_u = tf.reduce_mean(norm_loss( albedo_300W[:, :-1, :, :], albedo_300W[:, 1:, :, :], loss_type = 'l2,1', reduce_mean=False, p=0.8) * w_u) / tf.reduce_sum(w_u+1e-6) 236 | 237 | 238 | w_v = tf.stop_gradient(tf.exp(-15*tf.norm( texture_300W_labels_chromaticity[:, :, :-1, :] - texture_300W_labels_chromaticity[:, :, 1:, :], ord='euclidean', axis=-1, keep_dims=True)) * texture_vis_mask[:, :, :-1, :] ) 239 | G_loss_local_albedo_const_v = tf.reduce_mean(norm_loss( albedo_300W[:, :, :-1, :], albedo_300W[:, :, 1:, :], loss_type = 'l2,1', reduce_mean=False, p=0.8) * w_v) / tf.reduce_sum(w_v+1e-6) 240 | 241 | G_loss_local_albedo_const = (G_loss_local_albedo_const_u + G_loss_local_albedo_const_v)*10 242 | else: 243 | G_loss_local_albedo_const = tf.zeros(1) 244 | g_loss += G_loss_local_albedo_const 245 | 246 | if self.is_using_recon: 247 | g_loss += G_loss_recon 248 | else: 249 | g_loss += G_loss_texture 250 | 251 | G_loss_frecon = tf.zeros(1) 252 | 253 | 254 | if self.is_using_landmark: 255 | g_loss_wlandmark = g_loss + G_landmark_loss 256 | else: 257 | g_loss_wlandmark = g_loss 258 | 259 | 260 | return g_loss, g_loss_wlandmark, G_loss_m, G_loss_shape, G_loss_texture, G_loss_recon, G_loss_frecon, G_landmark_loss, G_loss_symetry, G_loss_white_shading, G_loss_albedo_const, G_loss_smoothness, G_loss_local_albedo_const, \ 261 | G_images_300W, texture_300W, albedo_300W, shade_300W, texture_300W_labels, input_images_300W 262 | 263 | g_loss, g_loss_wlandmark, G_loss_m, G_loss_shape, G_loss_texture, G_loss_recon, G_loss_frecon, G_landmark_loss, G_loss_symetry, G_loss_white_shading, G_loss_albedo_const, G_loss_smoothness, G_loss_local_albedo_const, \ 264 | G_images_300W, texture_300W, albedo_300W, shade_300W, texture_300W_labels, input_images_300W \ 265 | = make_parallel(model_and_loss, self.gpu_num, 266 | input_images_fn_300W= self.input_images_fn_300W, input_masks_fn_300W=self.input_masks_fn_300W, 267 | texture_labels_fn_300W=self.texture_labels_fn_300W, texture_masks_fn_300W=self.texture_masks_fn_300W, 268 | input_offset_height=self.input_offset_height, input_offset_width=self.input_offset_width, 269 | m_300W_labels = self.m_300W_labels, shape_300W_labels=self.shape_300W_labels, 270 | albedo_indexes_x1= self.albedo_indexes_x1, albedo_indexes_y1 = self.albedo_indexes_y1, 271 | albedo_indexes_x2=self.albedo_indexes_x2, albedo_indexes_y2 = self.albedo_indexes_y2) 272 | 273 | self.G_loss = tf.reduce_mean(g_loss) 274 | self.G_loss_wlandmark = tf.reduce_mean(g_loss_wlandmark) 275 | self.G_loss_m = tf.reduce_mean(G_loss_m) 276 | self.G_loss_shape = tf.reduce_mean(G_loss_shape) 277 | self.G_loss_texture = tf.reduce_mean(G_loss_texture) 278 | self.G_loss_recon = tf.reduce_mean(G_loss_recon) 279 | self.G_loss_frecon = tf.reduce_mean(G_loss_frecon) 280 | self.G_landmark_loss = tf.reduce_mean(G_landmark_loss) 281 | self.G_loss_symetry = tf.reduce_mean(G_loss_symetry) 282 | self.G_loss_white_shading = tf.reduce_mean(G_loss_white_shading) 283 | self.G_loss_albedo_const = tf.reduce_mean(G_loss_albedo_const) 284 | self.G_loss_local_albedo_const = tf.reduce_mean(G_loss_local_albedo_const) 285 | self.G_loss_smoothness = tf.reduce_mean(G_loss_smoothness) 286 | 287 | self.G_images_300W = tf.clip_by_value(tf.concat(G_images_300W, axis=0), -1, 1) 288 | self.texture_300W = tf.clip_by_value(tf.concat(texture_300W, axis=0), -1, 1) 289 | self.albedo_300W = tf.concat(albedo_300W, axis=0) 290 | self.shade_300W = tf.concat(shade_300W, axis=0) 291 | self.texture_300W_labels = tf.concat(texture_300W_labels, axis=0) 292 | self.input_images_300W = tf.concat(input_images_300W, axis=0) 293 | 294 | 295 | 296 | t_vars = tf.trainable_variables() 297 | self.d_vars = [var for var in t_vars if 'd_' in var.name] 298 | self.g_vars = [var for var in t_vars if 'g_' in var.name] 299 | 300 | self.g_en_vars = [var for var in t_vars if 'g_k' in var.name] 301 | self.g_tex_de_vars = [var for var in t_vars if 'g_h' in var.name] 302 | self.g_shape_de_vars = [var for var in t_vars if 'g_s' in var.name] 303 | 304 | self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep = 10) 305 | 306 | 307 | def setupParaStat(self): 308 | self.tri = load_3DMM_tri() 309 | self.vertex_tri = load_3DMM_vertex_tri() 310 | self.vt2pixel_u, self.vt2pixel_v = load_3DMM_vt2pixel() 311 | self.uv_tri, self.uv_mask = load_3DMM_tri_2d(with_mask = True) 312 | 313 | 314 | 315 | 316 | 317 | # Basis 318 | mu_shape, w_shape = load_Basel_basic('shape') 319 | mu_exp, w_exp = load_Basel_basic('exp') 320 | 321 | self.mean_shape = mu_shape + mu_exp 322 | self.std_shape = np.tile(np.array([1e4, 1e4, 1e4]), self.vertexNum) 323 | #self.std_shape = np.load('std_shape.npy') 324 | 325 | self.mean_shape_tf = tf.constant(self.mean_shape, tf.float32) 326 | self.std_shape_tf = tf.constant(self.std_shape, tf.float32) 327 | 328 | self.mean_m = np.load('mean_m.npy') 329 | self.std_m = np.load('std_m.npy') 330 | 331 | self.mean_m_tf = tf.constant(self.mean_m, tf.float32) 332 | self.std_m_tf = tf.constant(self.std_m, tf.float32) 333 | 334 | self.w_shape = w_shape 335 | self.w_exp = w_exp 336 | 337 | 338 | 339 | def m2full(self, m): 340 | return m * self.std_m_tf + self.mean_m_tf 341 | 342 | def shape2full(self, shape): 343 | return shape * self.std_shape_tf + self.mean_shape_tf 344 | 345 | 346 | 347 | def setupTrainingData(self): 348 | # Training data - 300W 349 | 350 | dataset = ['AFW', 'AFW_Flip', 'HELEN', 'HELEN_Flip', 'IBUG', 'IBUG_Flip', 'LFPW', 'LFPW_Flip'] 351 | dataset_num = len(dataset) 352 | 353 | 354 | images = [0] * dataset_num 355 | pid = [0] * dataset_num 356 | m = [0] * dataset_num 357 | pose = [0] * dataset_num 358 | shape = [0] * dataset_num 359 | exp = [0] * dataset_num 360 | tex_para = [0] * dataset_num 361 | tex = [0] * dataset_num 362 | il = [0] * dataset_num 363 | alb = [0] * dataset_num 364 | mask = [0] * dataset_num 365 | 366 | for i in range(dataset_num): 367 | images[i], pid[i], m[i], pose[i], shape[i], exp[i], tex_para[i], _ = load_300W_LP_dataset(dataset[i]) 368 | 369 | 370 | self.image_filenames = np.concatenate(images, axis=0) 371 | images = None 372 | 373 | all_m = np.concatenate(m, axis=0) 374 | 375 | all_shape_para = np.concatenate(shape, axis=0) 376 | all_exp_para = np.concatenate(exp, axis=0) 377 | self.all_tex_para = np.concatenate(tex_para, axis=0) 378 | self.pids_300W = np.concatenate(pid, axis=0) 379 | #self.all_il = np.concatenate(il, axis=0) 380 | 381 | 382 | self.all_m = np.divide(np.subtract(all_m, self.mean_m), self.std_m) 383 | 384 | self.mean_shape_para = np.mean(all_shape_para, axis=0) 385 | self.std_shape_para = np.std(all_shape_para, axis=0) 386 | self.all_shape_para = all_shape_para #np.divide(np.subtract(all_shape_para, self.mean_shape_para), self.std_shape_para) 387 | 388 | 389 | self.mean_exp_para = np.mean(all_exp_para, axis=0) 390 | self.std_exp_para = np.std(all_exp_para, axis=0) 391 | self.all_exp_para = all_exp_para #np.divide(np.subtract(all_exp_para, self.mean_exp_para), self.std_exp_para) 392 | 393 | return 394 | 395 | 396 | 397 | 398 | 399 | def train(self, config): 400 | 401 | # Training data 402 | self.setupTrainingData() 403 | 404 | valid_idx = range(self.image_filenames.shape[0]) 405 | print("Valid images %d/%d" % ( len(valid_idx), self.image_filenames.shape[0] )) 406 | 407 | 408 | 409 | np.random.shuffle(valid_idx) 410 | 411 | 412 | # Using 2 separated optim for with and withou landmark losses 413 | g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.G_loss, var_list=self.g_vars, colocate_gradients_with_ops=True) 414 | g_en_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.G_loss_wlandmark, var_list=self.g_en_vars, colocate_gradients_with_ops=True) 415 | tf.global_variables_initializer().run() 416 | 417 | 418 | 419 | """Train DCGAN""" 420 | could_load, checkpoint_counter = self.load(self.checkpoint_dir) 421 | if could_load: 422 | epoch0 = checkpoint_counter + 1 423 | print(" [*] Load SUCCESS") 424 | else: 425 | epoch0 = 1 426 | print(" [!] Load failed...") 427 | 428 | 429 | start_time = time.time() 430 | 431 | for epoch in xrange(epoch0, config.epoch): 432 | 433 | batch_idxs = min(len(valid_idx), config.train_size) // config.batch_size 434 | 435 | for idx in xrange(0, batch_idxs): 436 | ''' 437 | Data processing. Create feed_dict 438 | ''' 439 | 440 | # 300W 441 | batch_idx = valid_idx[idx*config.batch_size:(idx+1)*config.batch_size] 442 | 443 | 444 | tx = np.random.random_integers(0, 32, size=config.batch_size) 445 | ty = np.random.random_integers(0, 32, size=config.batch_size) 446 | 447 | batch_300W_images_fn = [self.image_filenames[batch_idx[i]] for i in range(config.batch_size)] 448 | 449 | 450 | 451 | delta_m = np.zeros([config.batch_size, 8]) 452 | delta_m[:,6] = np.divide(ty, self.std_m[6]) 453 | delta_m[:,7] = np.divide(32 - tx, self.std_m[7]) 454 | 455 | 456 | batch_m = self.all_m[batch_idx,:] - delta_m 457 | 458 | batch_shape_para = self.all_shape_para[batch_idx,:] 459 | batch_exp_para = self.all_exp_para[batch_idx,:] 460 | 461 | batch_shape = np.divide( np.matmul(batch_shape_para, np.transpose(self.w_shape)) + np.matmul(batch_exp_para, np.transpose(self.w_exp)), self.std_shape) 462 | 463 | ffeed_dict={ self.m_300W_labels: batch_m, self.shape_300W_labels: batch_shape, self.input_offset_height: tx, self.input_offset_width: ty} 464 | for i in range(self.batch_size): 465 | ffeed_dict[self.input_images_fn_300W[i]] = _300W_LP_DIR + 'image/'+ batch_300W_images_fn[i] 466 | ffeed_dict[self.input_masks_fn_300W[i]] = _300W_LP_DIR + 'mask_img/'+ batch_300W_images_fn[i] 467 | ffeed_dict[self.texture_labels_fn_300W[i]] = _300W_LP_DIR + 'texture/'+ image2texture_fn(batch_300W_images_fn[i]) 468 | ffeed_dict[self.texture_masks_fn_300W[i]] = _300W_LP_DIR + 'mask/'+ image2texture_fn(batch_300W_images_fn[i]) 469 | 470 | if self.is_const_albedo: 471 | indexes1 = np.random.randint(low=0, high=self.const_alb_mask.shape[0], size=[self.batch_size* CONST_PIXELS_NUM]) 472 | indexes2 = np.random.randint(low=0, high=self.const_alb_mask.shape[0], size=[self.batch_size* CONST_PIXELS_NUM]) 473 | 474 | 475 | ffeed_dict[self.albedo_indexes_x1] = np.reshape(self.const_alb_mask[indexes1, 1], [self.batch_size, CONST_PIXELS_NUM, 1]) 476 | ffeed_dict[self.albedo_indexes_y1] = np.reshape(self.const_alb_mask[indexes1, 0], [self.batch_size, CONST_PIXELS_NUM, 1]) 477 | ffeed_dict[self.albedo_indexes_x2] = np.reshape(self.const_alb_mask[indexes2, 1], [self.batch_size, CONST_PIXELS_NUM, 1]) 478 | ffeed_dict[self.albedo_indexes_y2] = np.reshape(self.const_alb_mask[indexes2, 0], [self.batch_size, CONST_PIXELS_NUM, 1]) 479 | 480 | 481 | if np.mod(idx, 2) == 0: 482 | # Update G 483 | self.sess.run([g_optim], feed_dict=ffeed_dict) 484 | else: 485 | # Update G encoder only 486 | self.sess.run([g_en_optim], feed_dict=ffeed_dict) 487 | 488 | 489 | 490 | self.save(config.checkpoint_dir, epoch) 491 | 492 | 493 | 494 | 495 | def generator_encoder(self, image, is_reuse=False, is_training = True): 496 | 497 | ''' 498 | Creating a encoder network 499 | 500 | Output: shape_fx, tex_fc, m, il 501 | 502 | ''' 503 | 504 | 505 | if not is_reuse: 506 | self.g_bn0_0 = batch_norm(name='g_k_bn0_0') 507 | self.g_bn0_1 = batch_norm(name='g_k_bn0_1') 508 | self.g_bn0_2 = batch_norm(name='g_k_bn0_2') 509 | self.g_bn0_3 = batch_norm(name='g_k_bn0_3') 510 | self.g_bn1_0 = batch_norm(name='g_k_bn1_0') 511 | self.g_bn1_1 = batch_norm(name='g_k_bn1_1') 512 | self.g_bn1_2 = batch_norm(name='g_k_bn1_2') 513 | self.g_bn1_3 = batch_norm(name='g_k_bn1_3') 514 | self.g_bn2_0 = batch_norm(name='g_k_bn2_0') 515 | self.g_bn2_1 = batch_norm(name='g_k_bn2_1') 516 | self.g_bn2_2 = batch_norm(name='g_k_bn2_2') 517 | self.g_bn2_3 = batch_norm(name='g_k_bn2_3') 518 | self.g_bn3_0 = batch_norm(name='g_k_bn3_0') 519 | self.g_bn3_1 = batch_norm(name='g_k_bn3_1') 520 | self.g_bn3_2 = batch_norm(name='g_k_bn3_2') 521 | self.g_bn3_3 = batch_norm(name='g_k_bn3_3') 522 | self.g_bn4_0 = batch_norm(name='g_k_bn4_0') 523 | self.g_bn4_1 = batch_norm(name='g_k_bn4_1') 524 | self.g_bn4_2 = batch_norm(name='g_k_bn4_2') 525 | self.g_bn4_c = batch_norm(name='g_h_bn4_c') 526 | self.g_bn5 = batch_norm(name='g_k_bn5') 527 | self.g_bn5_m = batch_norm(name='g_k_bn5_m') 528 | self.g_bn5_il = batch_norm(name='g_k_bn5_il') 529 | self.g_bn5_shape = batch_norm(name='g_k_bn5_shape') 530 | self.g_bn5_shape_linear = batch_norm(name='g_k_bn5_shape_linear') 531 | self.g_bn5_tex = batch_norm(name='g_k_bn5_tex') 532 | 533 | 534 | 535 | k0_1 = elu(self.g_bn0_1(conv2d(image, self.gf_dim*1, k_h=7, k_w=7, d_h=2, d_w =2, use_bias = False, name='g_k01_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 536 | k0_2 = elu(self.g_bn0_2(conv2d(k0_1, self.gf_dim*2, d_h=1, d_w =1, use_bias = False, name='g_k02_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 537 | 538 | k1_0 = elu(self.g_bn1_0(conv2d(k0_2, self.gf_dim*2, d_h=2, d_w =2, use_bias = False, name='g_k10_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 539 | k1_1 = elu(self.g_bn1_1(conv2d(k1_0, self.gf_dim*2, d_h=1, d_w =1, use_bias = False, name='g_k11_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 540 | k1_2 = elu(self.g_bn1_2(conv2d(k1_1, self.gf_dim*4, d_h=1, d_w =1, use_bias = False, name='g_k12_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 541 | #k1_3 = maxpool2d(k1_2, k=2, padding='VALID') 542 | k2_0 = elu(self.g_bn2_0(conv2d(k1_2, self.gf_dim*4, d_h=2, d_w =2, use_bias = False, name='g_k20_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 543 | k2_1 = elu(self.g_bn2_1(conv2d(k2_0, self.gf_dim*3, d_h=1, d_w =1, use_bias = False, name='g_k21_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 544 | k2_2 = elu(self.g_bn2_2(conv2d(k2_1, self.gf_dim*6, d_h=1, d_w =1, use_bias = False, name='g_k22_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 545 | #k2_3 = maxpool2d(k2_2, k=2, padding='VALID') 546 | k3_0 = elu(self.g_bn3_0(conv2d(k2_2, self.gf_dim*6, d_h=2, d_w =2, use_bias = False, name='g_k30_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 547 | k3_1 = elu(self.g_bn3_1(conv2d(k3_0, self.gf_dim*4, d_h=1, d_w =1, use_bias = False, name='g_k31_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 548 | k3_2 = elu(self.g_bn3_2(conv2d(k3_1, self.gf_dim*8, d_h=1, d_w =1, use_bias = False, name='g_k32_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 549 | #k3_3 = maxpool2d(k3_2, k=2, padding='VALID') 550 | k4_0 = elu(self.g_bn4_0(conv2d(k3_2, self.gf_dim*8, d_h=2, d_w =2, use_bias = False, name='g_k40_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 551 | k4_1 = elu(self.g_bn4_1(conv2d(k4_0, self.gf_dim*5, d_h=1, d_w =1, use_bias = False, name='g_k41_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)) 552 | 553 | 554 | # M 555 | k51_m = self.g_bn5_m( conv2d(k4_1, int(self.gfc_dim/5), d_h=1, d_w =1, name='g_k5_m_conv', reuse = is_reuse), train=is_training, reuse = is_reuse) 556 | k51_shape_ = get_shape(k51_m) 557 | k52_m = tf.nn.avg_pool(k51_m, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID') 558 | k52_m = tf.reshape(k52_m, [-1, int(self.gfc_dim/5)]) 559 | k6_m = linear(k52_m, self.mDim, 'g_k6_m_lin', reuse = is_reuse) 560 | 561 | # Il 562 | k51_il = self.g_bn5_il( conv2d(k4_1, int(self.gfc_dim/5), d_h=1, d_w =1, name='g_k5_il_conv', reuse = is_reuse), train=is_training, reuse = is_reuse) 563 | k52_il = tf.nn.avg_pool(k51_il, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID') 564 | k52_il = tf.reshape(k52_il, [-1, int(self.gfc_dim/5)]) 565 | k6_il = linear(k52_il, self.ilDim, 'g_k6_il_lin', reuse = is_reuse) 566 | 567 | # Shape 568 | k51_shape = self.g_bn5_shape(conv2d(k4_1, self.gfc_dim/2, d_h=1, d_w =1, name='g_k5_shape_conv', reuse = is_reuse), train=is_training, reuse = is_reuse) 569 | k52_shape = tf.nn.avg_pool(k51_shape, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID') 570 | k52_shape = tf.reshape(k52_shape, [-1, int(self.gfc_dim/2)]) 571 | 572 | # Albedo 573 | k51_tex = self.g_bn5_tex( conv2d(k4_1, self.gfc_dim/2, d_h=1, d_w =1, name='g_k5_tex_conv', reuse = is_reuse), train=is_training, reuse = is_reuse) 574 | k52_tex = tf.nn.avg_pool(k51_tex, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID') 575 | k52_tex = tf.reshape(k52_tex, [-1, int(self.gfc_dim/2)]) 576 | 577 | return k52_shape, k52_tex, k6_m, k6_il 578 | 579 | def generator_decoder_shape(self, k52_shape, is_reuse=False, is_training=True): 580 | if False: ## This is for shape decoder as fully connected network (NOT FULLY COMPATIBLE WITH THE REST OF THE CODE) 581 | return self.generator_decoder_shape_1d(k52_shape, is_reuse, is_training) 582 | else: 583 | 584 | n_size = get_shape(k52_shape) 585 | n_size = n_size[0] 586 | 587 | vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel() 588 | 589 | 590 | #Vt2pix 591 | vt2pixel_u_const = tf.constant(vt2pixel_u[:-1], tf.float32) 592 | vt2pixel_v_const = tf.constant(vt2pixel_v[:-1], tf.float32) 593 | 594 | #if self.is_partbase_albedo: 595 | # shape_2d = self.generator_decoder_shape_2d_partbase(k52_shape, is_reuse, is_training) 596 | #else: 597 | # shape_2d = self.generator_decoder_shape_2d_v1(k52_shape, is_reuse, is_training) 598 | shape_2d = self.generator_decoder_shape_2d(k52_shape, is_reuse, is_training) 599 | 600 | vt2pixel_v_const_ = tf.tile(tf.reshape(vt2pixel_v_const, shape =[1,1,-1]), [n_size, 1,1]) 601 | vt2pixel_u_const_ = tf.tile(tf.reshape(vt2pixel_u_const, shape =[1,1,-1]), [n_size, 1,1]) 602 | 603 | shape_1d = tf.reshape(bilinear_sampler( shape_2d, vt2pixel_v_const_, vt2pixel_u_const_), shape=[n_size, -1]) 604 | 605 | return shape_1d, shape_2d 606 | 607 | 608 | def generator_decoder_shape_1d(self, shape_fx, is_reuse=False, is_training=True): 609 | s6 = elu(self.g1_bn6(linear(k52_shape, 1000, scope= 'g_s6_lin', reuse = is_reuse), train=is_training, reuse = is_reuse), name="g_s6_prelu") 610 | s7 = linear(s6, self.vertexNum*3, scope= 'g_s7_lin', reuse = is_reuse) 611 | 612 | return s7 613 | 614 | 615 | def generator_decoder_shape_2d(self, shape_fx, is_reuse=False, is_training=True): 616 | ''' 617 | Create shape decoder network 618 | Output: 3d_shape [N, (self.vertexNum*3)] 619 | ''' 620 | 621 | if not is_reuse: 622 | self.g2_bn0_0 = batch_norm(name='g_s_bn0_0') 623 | self.g2_bn0_1 = batch_norm(name='g_s_bn0_1') 624 | self.g2_bn0_2 = batch_norm(name='g_s_bn0_2') 625 | self.g2_bn1_0 = batch_norm(name='g_s_bn1_0') 626 | self.g2_bn1_1 = batch_norm(name='g_s_bn1_1') 627 | self.g2_bn1_2 = batch_norm(name='g_s_bn1_2') 628 | self.g2_bn2_0 = batch_norm(name='g_s_bn2_0') 629 | self.g2_bn2_1 = batch_norm(name='g_s_bn2_1') 630 | self.g2_bn2_2 = batch_norm(name='g_s_bn2_2') 631 | self.g2_bn3_0 = batch_norm(name='g_s_bn3_0') 632 | self.g2_bn3_1 = batch_norm(name='g_s_bn3_1') 633 | self.g2_bn3_2 = batch_norm(name='g_s_bn3_2') 634 | self.g2_bn4_0 = batch_norm(name='g_s_bn4_0') 635 | self.g2_bn4 = batch_norm(name='g_s_bn4') 636 | self.g2_bn5 = batch_norm(name='g_s_bn5') 637 | 638 | s_h = int(self.texture_size[0]) 639 | s_w = int(self.texture_size[1]) 640 | s32_h= int(s_h/32) 641 | s32_w= int(s_w/32) 642 | 643 | # project `z` and reshape 644 | h5 = linear(shape_fx, self.gfc_dim*s32_h*s32_w, scope= 'g_s5_lin', reuse = is_reuse) 645 | h5 = tf.reshape(h5, [-1, s32_h, s32_w, self.gfc_dim]) 646 | h5 = elu(self.g2_bn5(h5, train=is_training, reuse = is_reuse)) 647 | 648 | h4_1 = deconv2d(h5, self.gf_dim*5, name='g_s4', reuse = is_reuse) 649 | h4_1 = elu(self.g2_bn4(h4_1, train=is_training, reuse = is_reuse)) 650 | h4_0 = deconv2d(h4_1, self.gf_dim*8, strides=[1,1], name='g_s40', reuse = is_reuse) 651 | h4_0 = elu(self.g2_bn4_0(h4_0, train=is_training, reuse = is_reuse)) 652 | 653 | h3_2 = deconv2d(h4_0, self.gf_dim*8, strides=[2,2], name='g_s32', reuse = is_reuse) 654 | h3_2 = elu(self.g2_bn3_2(h3_2, train=is_training, reuse = is_reuse)) 655 | h3_1 = deconv2d(h3_2, self.gf_dim*4, strides=[1,1], name='g_s31', reuse = is_reuse) 656 | h3_1 = elu(self.g2_bn3_1(h3_1, train=is_training, reuse = is_reuse)) 657 | h3_0 = deconv2d(h3_1, self.gf_dim*6, strides=[1,1], name='g_s30', reuse = is_reuse) 658 | h3_0 = elu(self.g2_bn3_0(h3_0, train=is_training, reuse = is_reuse)) 659 | 660 | h2_2 = deconv2d(h3_0, self.gf_dim*6, strides=[2,2], name='g_s22', reuse = is_reuse) 661 | h2_2 = elu(self.g2_bn2_2(h2_2, train=is_training, reuse = is_reuse)) 662 | h2_1 = deconv2d(h2_2, self.gf_dim*3, strides=[1,1], name='g_s21', reuse = is_reuse) 663 | h2_1 = elu(self.g2_bn2_1(h2_1, train=is_training, reuse = is_reuse)) 664 | h2_0 = deconv2d(h2_1, self.gf_dim*4, strides=[1,1], name='g_s20', reuse = is_reuse) 665 | h2_0 = elu(self.g2_bn2_0(h2_0, train=is_training, reuse = is_reuse)) 666 | 667 | h1_2 = deconv2d(h2_0, self.gf_dim*4, strides=[2,2], name='g_s12', reuse = is_reuse) 668 | h1_2 = elu(self.g2_bn1_2(h1_2, train=is_training, reuse = is_reuse)) 669 | h1_1 = deconv2d(h1_2, self.gf_dim*2, strides=[1,1], name='g_s11', reuse = is_reuse) 670 | h1_1 = elu(self.g2_bn1_1(h1_1, train=is_training, reuse = is_reuse)) 671 | h1_0 = deconv2d(h1_1,self.gf_dim*2, strides=[1,1], name='g_s10', reuse = is_reuse) 672 | h1_0 = elu(self.g2_bn1_0(h1_0, train=is_training, reuse = is_reuse)) 673 | 674 | h0_2 = deconv2d(h1_0, self.gf_dim*2, strides=[2,2], name='g_s02', reuse = is_reuse) 675 | h0_2 = elu(self.g2_bn0_2(h0_2, train=is_training, reuse = is_reuse)) 676 | h0_1 = deconv2d(h0_2, self.gf_dim, strides=[1,1], name='g_s01', reuse = is_reuse) 677 | h0_1 = elu(self.g2_bn0_1(h0_1, train=is_training, reuse = is_reuse)) 678 | 679 | h0 = 2*tf.nn.tanh(deconv2d(h0_1, self.c_dim, strides=[1,1], name='g_s0', reuse = is_reuse)) 680 | 681 | return h0 682 | 683 | 684 | 685 | def generator_decoder_albedo(self, tex_fx, is_reuse=False, is_training=True): 686 | ''' 687 | Create texture decoder network 688 | Output: uv_texture [N, self.texture_sz[0], self.texture_sz[1], self.c_dim] 689 | ''' 690 | 691 | if not is_reuse: 692 | self.g1_bn0_0 = batch_norm(name='g_h_bn0_0') 693 | self.g1_bn0_1 = batch_norm(name='g_h_bn0_1') 694 | self.g1_bn0_2 = batch_norm(name='g_h_bn0_2') 695 | self.g1_bn1_0 = batch_norm(name='g_h_bn1_0') 696 | self.g1_bn1_1 = batch_norm(name='g_h_bn1_1') 697 | self.g1_bn1_2 = batch_norm(name='g_h_bn1_2') 698 | self.g1_bn2_0 = batch_norm(name='g_h_bn2_0') 699 | self.g1_bn2_1 = batch_norm(name='g_h_bn2_1') 700 | self.g1_bn2_2 = batch_norm(name='g_h_bn2_2') 701 | self.g1_bn3_0 = batch_norm(name='g_h_bn3_0') 702 | self.g1_bn3_1 = batch_norm(name='g_h_bn3_1') 703 | self.g1_bn3_2 = batch_norm(name='g_h_bn3_2') 704 | self.g1_bn4_0 = batch_norm(name='g_h_bn4_0') 705 | self.g1_bn4 = batch_norm(name='g_h_bn4') 706 | self.g1_bn5 = batch_norm(name='g_h_bn5') 707 | #self.g1_bn6 = batch_norm(name='g_s_bn6') 708 | 709 | s_h = int(self.texture_size[0]) 710 | s_w = int(self.texture_size[1]) 711 | s32_h= int(s_h/32) 712 | s32_w= int(s_w/32) 713 | 714 | df = int(self.gf_dim) 715 | 716 | # project `z` and reshape 717 | h5 = linear(tex_fx, df*10*s32_h*s32_w, scope= 'g_h5_lin', reuse = is_reuse) 718 | h5 = tf.reshape(h5, [-1, s32_h, s32_w, df*10]) 719 | h5 = elu(self.g1_bn5(h5, train=is_training, reuse = is_reuse)) 720 | 721 | h4_1 = deconv2d(h5, df*5, name='g_h4', reuse = is_reuse) 722 | h4_1 = elu(self.g1_bn4(h4_1, train=is_training, reuse = is_reuse)) 723 | h4_0 = deconv2d(h4_1, df*8, strides=[1,1], name='g_h40', reuse = is_reuse) 724 | h4_0 = elu(self.g1_bn4_0(h4_0, train=is_training, reuse = is_reuse)) 725 | 726 | h3_2 = deconv2d(h4_0, df*8, strides=[2,2], name='g_h32', reuse = is_reuse) 727 | h3_2 = elu(self.g1_bn3_2(h3_2, train=is_training, reuse = is_reuse)) 728 | h3_1 = deconv2d(h3_2, df*4, strides=[1,1], name='g_h31', reuse = is_reuse) 729 | h3_1 = elu(self.g1_bn3_1(h3_1, train=is_training, reuse = is_reuse)) 730 | h3_0 = deconv2d(h3_1, df*6, strides=[1,1], name='g_h30', reuse = is_reuse) 731 | h3_0 = elu(self.g1_bn3_0(h3_0, train=is_training, reuse = is_reuse)) 732 | 733 | h2_2 = deconv2d(h3_0, df*6, strides=[2,2], name='g_h22', reuse = is_reuse) 734 | h2_2 = elu(self.g1_bn2_2(h2_2, train=is_training, reuse = is_reuse)) 735 | h2_1 = deconv2d(h2_2, df*3, strides=[1,1], name='g_h21', reuse = is_reuse) 736 | h2_1 = elu(self.g1_bn2_1(h2_1, train=is_training, reuse = is_reuse)) 737 | h2_0 = deconv2d(h2_1, df*4, strides=[1,1], name='g_h20', reuse = is_reuse) 738 | h2_0 = elu(self.g1_bn2_0(h2_0, train=is_training, reuse = is_reuse)) 739 | 740 | h1_2 = deconv2d(h2_0, df*4, strides=[2,2], name='g_h12', reuse = is_reuse) 741 | h1_2 = elu(self.g1_bn1_2(h1_2, train=is_training, reuse = is_reuse)) 742 | h1_1 = deconv2d(h1_2, df*2, strides=[1,1], name='g_h11', reuse = is_reuse) 743 | h1_1 = elu(self.g1_bn1_1(h1_1, train=is_training, reuse = is_reuse)) 744 | h1_0 = deconv2d(h1_1,df*2, strides=[1,1], name='g_h10', reuse = is_reuse) 745 | h1_0 = elu(self.g1_bn1_0(h1_0, train=is_training, reuse = is_reuse)) 746 | 747 | h0_2 = deconv2d(h1_0, df*2, strides=[2,2], name='g_h02', reuse = is_reuse) 748 | h0_2 = elu(self.g1_bn0_2(h0_2, train=is_training, reuse = is_reuse)) 749 | h0_1 = deconv2d(h0_2, df, strides=[1,1], name='g_h01', reuse = is_reuse) 750 | h0_1 = elu(self.g1_bn0_1(h0_1, train=is_training, reuse = is_reuse)) 751 | 752 | h0 = tf.nn.tanh(deconv2d(h0_1, self.c_dim, strides=[1,1], name='g_h0', reuse = is_reuse)) 753 | 754 | return h0 755 | 756 | 757 | @property 758 | def model_dir(self): 759 | return "" # "%s_%s_%s_%s_%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size, self.gf_dim, self.gfc_dim, self.df_dim, self.dfc_dim) 760 | 761 | def save(self, checkpoint_dir, step): 762 | model_name = "Nonlinear3DMM.model" 763 | checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) 764 | 765 | if not os.path.exists(checkpoint_dir): 766 | os.makedirs(checkpoint_dir) 767 | 768 | self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step) 769 | print(" Saved checkpoint %s-%d" % (os.path.join(checkpoint_dir, model_name), step)) 770 | 771 | def load(self, checkpoint_dir): 772 | import re 773 | print(" [*] Reading checkpoints...") 774 | checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) 775 | 776 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 777 | if ckpt and ckpt.model_checkpoint_path: 778 | ckpt_name = os.path.basename(ckpt.model_checkpoint_path) 779 | 780 | self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) 781 | counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0)) 782 | print(" [*] Success to read {}".format(ckpt_name)) 783 | 784 | 785 | return True, counter 786 | else: 787 | print(" [*] Failed to find a checkpoint") 788 | 789 | return False, 0 790 | 791 | 792 | 793 | 794 | -------------------------------------------------------------------------------- /ops.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | from tensorflow.python.framework import ops 6 | 7 | from utils import * 8 | 9 | def make_parallel(fn, num_gpus, **kwargs): 10 | print("Making make_parallel for %d gpu(s)" % num_gpus) 11 | in_splits = {} 12 | for k, v in kwargs.items(): 13 | if type(v) == list: 14 | in_splits[k] = zip(*[iter(v)]*( int( len(v) / num_gpus ) )) 15 | else: 16 | in_splits[k] = tf.split(v, num_gpus) 17 | 18 | out_split = [] 19 | for i in range(num_gpus): 20 | with tf.device(tf.DeviceSpec(device_type="GPU", device_index=i)): 21 | with tf.variable_scope(tf.get_variable_scope(), reuse= i!=0):# tf.AUTO_REUSE): 22 | out_split.append(fn(**{k : v[i] for k, v in in_splits.items()})) 23 | 24 | output_num = len(out_split[0]) 25 | output = [] 26 | for i in range(output_num): 27 | output.append([]) 28 | for j in range(num_gpus): 29 | output[i].append(out_split[j][i]) 30 | 31 | return output 32 | 33 | class batch_norm(object): 34 | """Code modification of http://stackoverflow.com/a/33950177""" 35 | def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): 36 | with tf.variable_scope(name): 37 | self.epsilon = epsilon 38 | self.momentum = momentum 39 | self.name = name 40 | 41 | def __call__(self, x, train=True, reuse=False, trainable=True ): 42 | return tf.contrib.layers.batch_norm(x, 43 | decay=self.momentum, 44 | updates_collections=None, 45 | epsilon=self.epsilon, 46 | fused=True, 47 | scale=True, 48 | trainable=trainable, 49 | reuse = reuse, 50 | is_training=train, 51 | scope=self.name) 52 | 53 | def get_shape(tensor): 54 | static_shape = tensor.shape.as_list() 55 | dynamic_shape = tf.unstack(tf.shape(tensor)) 56 | dims = [s[1] if s[0] is None else s[0] 57 | for s in zip(static_shape, dynamic_shape)] 58 | return dims 59 | 60 | def binary_cross_entropy(preds, targets, name=None): 61 | """Computes binary cross entropy given `preds`. 62 | 63 | For brevity, let `x = `, `z = targets`. The logistic loss is 64 | 65 | loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i])) 66 | 67 | Args: 68 | preds: A `Tensor` of type `float32` or `float64`. 69 | targets: A `Tensor` of the same type and shape as `preds`. 70 | """ 71 | eps = 1e-12 72 | with ops.op_scope([preds, targets], name, "bce_loss") as name: 73 | preds = ops.convert_to_tensor(preds, name="preds") 74 | targets = ops.convert_to_tensor(targets, name="targets") 75 | return tf.reduce_mean(-(targets * tf.log(preds + eps) + 76 | (1. - targets) * tf.log(1. - preds + eps))) 77 | 78 | def conv_cond_concat(x, y): 79 | """Concatenate conditioning vector on feature map axis.""" 80 | x_shapes = x.get_shape() 81 | y_shapes = y.get_shape() 82 | return tf.concat(axis=3, values=[x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])]) 83 | 84 | def conv2d(input_, output_dim, 85 | k_h=3, k_w=3, d_h=2, d_w=2, use_bias=True, stddev=0.02, 86 | name="conv2d", reuse = False): 87 | with tf.variable_scope(name, reuse = reuse): 88 | w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], 89 | initializer=tf.truncated_normal_initializer(stddev=stddev)) 90 | conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') 91 | 92 | if use_bias: 93 | biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) 94 | conv = tf.reshape(tf.nn.bias_add(conv, biases), get_shape(conv)) 95 | 96 | return conv 97 | 98 | def deconv2d(input_, output_shape, 99 | kernel_size=(3,3), strides=(2,2), stddev=0.02, use_bias = True, 100 | name="deconv2d", with_w=False, reuse = False): 101 | ''' 102 | with tf.variable_scope(name, reuse = reuse): 103 | # filter : [height, width, output_channels, in_channels] 104 | w = tf.get_variable('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]], 105 | initializer=tf.random_normal_initializer(stddev=stddev)) 106 | 107 | try: 108 | deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, 109 | strides=[1, d_h, d_w, 1]) 110 | 111 | # Support for verisons of TensorFlow before 0.7.0 112 | except AttributeError: 113 | deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, 114 | strides=[1, d_h, d_w, 1]) 115 | 116 | biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) 117 | deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) 118 | 119 | if with_w: 120 | return deconv, w, biases 121 | else: 122 | return deconv 123 | ''' 124 | if type(output_shape) == list(): 125 | output_shape = output_shape[-1] 126 | return tf.layers.conv2d_transpose(input_, output_shape, kernel_size, strides, padding='SAME', data_format='channels_last', activation=None, use_bias=use_bias, 127 | kernel_initializer=tf.random_normal_initializer(stddev=stddev), bias_initializer=tf.zeros_initializer(), 128 | trainable=True, name=name, reuse=reuse) 129 | 130 | def maxpool2d(x, k=2, padding='VALID'): 131 | return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding=padding) 132 | 133 | 134 | def prelu(x, name, reuse = False): 135 | shape = x.get_shape().as_list()[-1:] 136 | 137 | with tf.variable_scope(name, reuse = reuse): 138 | alphas = tf.get_variable('alpha', shape, tf.float32, 139 | initializer=tf.constant_initializer(value=0.2)) 140 | 141 | return tf.nn.relu(x) + tf.multiply(alphas, (x - tf.abs(x))) * 0.5 142 | 143 | def relu(x, name='relu'): 144 | return tf.nn.relu(x, name) 145 | 146 | def lrelu(x, leak=0.2, name="lrelu"): 147 | return tf.maximum(x, leak*x) 148 | 149 | def elu(x, name='elu'): 150 | return tf.nn.elu(x, name) 151 | 152 | def linear(input_, output_size, scope="Linear", reuse = False, stddev=0.02, bias_start=0.0, with_w=False): 153 | shape = input_.get_shape().as_list() 154 | print(shape) 155 | 156 | with tf.variable_scope(scope or "Linear", reuse = reuse): 157 | matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, 158 | tf.random_normal_initializer(stddev=stddev)) 159 | bias = tf.get_variable("bias", [output_size], 160 | initializer=tf.constant_initializer(bias_start)) 161 | if with_w: 162 | return tf.matmul(input_, matrix) + bias, matrix, bias 163 | else: 164 | return tf.matmul(input_, matrix) + bias 165 | 166 | def triplet_loss(anchor_output, positive_output, negative_output, margin = 0.2 ): 167 | d_pos = tf.reduce_mean(tf.square(anchor_output - positive_output), 1) 168 | d_neg = tf.reduce_mean(tf.square(anchor_output - negative_output), 1) 169 | 170 | loss = tf.maximum(0., margin + d_pos - d_neg) 171 | 172 | return loss 173 | 174 | def cosine_loss(anchor_output, positive_output): 175 | anchor_output_norm = tf.nn.l2_normalize(anchor_output, 1) 176 | positive_output_norm = tf.nn.l2_normalize(positive_output, 1) 177 | loss = 1 - tf.reduce_sum(tf.multiply(anchor_output_norm, positive_output_norm), 1) 178 | 179 | return loss 180 | 181 | def cosine_triplet_loss(anchor_output, positive_output, negative_output, margin = 0.2 ): 182 | anchor_output_norm = tf.nn.l2_normalize(anchor_output, 1) 183 | positive_output_norm = tf.nn.l2_normalize(positive_output, 1) 184 | negative_output_norm = tf.nn.l2_normalize(negative_output, 1) 185 | 186 | sim_pos = tf.reduce_sum(tf.multiply(anchor_output_norm, positive_output_norm), 1) 187 | sim_neg = tf.reduce_sum(tf.multiply(anchor_output_norm, negative_output_norm), 1) 188 | 189 | loss = tf.maximum(0., margin - sim_pos + sim_neg) 190 | 191 | return loss 192 | 193 | def norm_loss(predictions, labels, mask = None, loss_type = 'l1', reduce_mean = True, p=1): 194 | from tensorflow.python.ops import array_ops 195 | 196 | assert (loss_type in ['l1', 'l2', 'l2,1']), "Suporting loss type is ['l1', 'l2', 'l2,1']" 197 | 198 | diff = predictions - labels 199 | if mask != None: 200 | diff = tf.multiply(diff, mask) 201 | 202 | inputs_shape = array_ops.shape(diff) 203 | 204 | if loss_type == 'l1': 205 | loss = tf.reduce_sum( tf.abs(diff) ) 206 | 207 | elif loss_type == 'l2': 208 | loss = tf.nn.l2_loss(diff) 209 | 210 | elif loss_type == 'l2,1': 211 | #inputs_rank = tf.cast(labels.get_shape().ndims, tf.int32) 212 | #spatial_dims = array_ops.slice(inputs_shape, [1], [2]) 213 | #batch_dim = array_ops.slice(inputs_shape, [0], [1]) 214 | 215 | loss = tf.sqrt( tf.reduce_sum ( tf.square (diff) + 1e-16, axis = [-1] ) ) 216 | if p!= 1: 217 | loss = tf.pow(loss, p) 218 | loss = tf.reduce_sum(loss) 219 | 220 | if reduce_mean: 221 | numel = tf.cast(tf.reduce_prod(inputs_shape), diff.dtype) 222 | loss = tf.div(loss, numel) 223 | 224 | return loss 225 | 226 | def total_variation(images, mask, name=None): 227 | 228 | ndims = images.get_shape().ndims 229 | if ndims == 3: 230 | # The input is a single image with shape [height, width, channels]. 231 | 232 | # Calculate the difference of neighboring pixel-values. 233 | # The images are shifted one pixel along the height and width by slicing. 234 | pixel_dif1 = images[1:, :, :] - images[:-1, :, :] 235 | pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] 236 | 237 | # Sum for all axis. (None is an alias for all axis.) 238 | sum_axis = None 239 | elif ndims == 4: 240 | # The input is a batch of images with shape: 241 | # [batch, height, width, channels]. 242 | 243 | # Calculate the difference of neighboring pixel-values. 244 | # The images are shifted one pixel along the height and width by slicing. 245 | pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] 246 | pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] 247 | 248 | # Only sum for the last 3 axis. 249 | # This results in a 1-D tensor with the total variation for each image. 250 | sum_axis = [1, 2, 3] 251 | else: 252 | raise ValueError('\'images\' must be either 3 or 4-dimensional.') 253 | 254 | pixel_dif1 = tf.multiply(pixel_dif1, mask[:, 1:, :, :]) 255 | pixel_dif2 = tf.multiply(pixel_dif2, mask[:, :, 1:, :]) 256 | 257 | # Calculate the total variation by taking the absolute value of the 258 | # pixel-differences and summing over the appropriate axis. 259 | tot_var = (tf.reduce_sum(tf.abs(pixel_dif1), axis=sum_axis) + 260 | tf.reduce_sum(tf.abs(pixel_dif2), axis=sum_axis)) 261 | 262 | return tot_var -------------------------------------------------------------------------------- /rendering_example.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | from rendering_ops import * 3 | import tensorflow as tf 4 | import numpy as np 5 | 6 | VERTEX_NUM = 53215 7 | 8 | def main(_): 9 | 10 | batch_size = 16 11 | output_size = 224 12 | texture_size = [192, 224] 13 | mDim = 8 14 | vertexNum = VERTEX_NUM 15 | channel_num = 3 16 | 17 | data = np.load('sample_data.npz') 18 | 19 | 20 | gpu_options = tf.GPUOptions(visible_device_list ="0", allow_growth = True) 21 | 22 | with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)) as sess: 23 | 24 | """ Graph """ 25 | m_ph = tf.placeholder(tf.float32, [batch_size, mDim]) 26 | shape_ph = tf.placeholder(tf.float32, [batch_size, vertexNum*3]) 27 | texture_ph = tf.placeholder(tf.float32, [batch_size]+texture_size +[channel_num]) 28 | images, foreground_mask = warp_texture(texture_ph, m_ph, shape_ph, output_size = output_size) 29 | 30 | s_img = sess.run( images, feed_dict={ texture_ph: data['sample_texture'], shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 31 | 32 | save_images(s_img, [4, -1], './rendered_img.png') 33 | save_images(data['sample_texture'], [4, -1], './texture.png') 34 | 35 | 36 | 37 | 38 | if __name__ == '__main__': 39 | tf.app.run() 40 | -------------------------------------------------------------------------------- /rendering_example_dev.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | from rendering_ops import * 3 | import tensorflow as tf 4 | import numpy as np 5 | import time 6 | 7 | VERTEX_NUM = 53215 8 | 9 | def main(_): 10 | 11 | batch_size = 16 12 | output_size = 224 13 | texture_size = [192, 224] 14 | mDim = 8 15 | vertexNum = VERTEX_NUM 16 | channel_num = 3 17 | 18 | data = np.load('sample_data.npz') 19 | 20 | 21 | gpu_options = tf.GPUOptions(visible_device_list ="0", allow_growth = True) 22 | 23 | with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)) as sess: 24 | 25 | """ Graph """ 26 | m_ph = tf.placeholder(tf.float32, [batch_size, mDim]) 27 | shape_ph = tf.placeholder(tf.float32, [batch_size, vertexNum*3]) 28 | texture_ph = tf.placeholder(tf.float32, [batch_size]+texture_size +[channel_num]) 29 | 30 | image_ph = tf.placeholder(tf.float32, [batch_size, output_size, output_size, channel_num]) 31 | 32 | 33 | 34 | ''' 35 | normal_v1 = DEPRECATED_compute_normalf(m_ph, shape_ph, output_size=output_size) 36 | normal_v2 = NEW_compute_normalf(m_ph, shape_ph, output_size=output_size) 37 | 38 | s_land1, s_land2 = sess.run( [normal_v1, normal_v2], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 39 | 40 | print('s_land1') 41 | print(s_land1.shape) 42 | print(s_land1[10,0:3,100:103]) 43 | 44 | print('s_land2') 45 | print(s_land2.shape) 46 | print(s_land2[10,0:3,100:103]) 47 | ''' 48 | 49 | 50 | 51 | 52 | 53 | 54 | #images_v1, foreground_mask_v1 = DEPRECATED_warp_texture(texture_ph, m_ph, shape_ph, output_size = output_size) 55 | images, foreground_mask = warp_texture(texture_ph, m_ph, shape_ph, output_size = output_size) 56 | 57 | #s_time = time.time() 58 | #for i in range(1): 59 | # s_img_v1 = sess.run( images_v1, feed_dict={ texture_ph: data['sample_texture'], shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 60 | #print("Time landmark v1: %f" % (time.time() - s_time)) 61 | #save_images(s_img_v1, [4, -1], './rendered_img_v1.png') 62 | 63 | s_time = time.time() 64 | for i in range(1): 65 | s_img = sess.run( images, feed_dict={ texture_ph: data['sample_texture'], shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 66 | print("Time landmark v2: %f" % (time.time() - s_time)) 67 | 68 | save_images(s_img, [4, -1], './rendered_img.png') 69 | 70 | 71 | 72 | 73 | 74 | #save_images(data['sample_texture'], [4, -1], './texture.png') 75 | 76 | 77 | 78 | ''' 79 | upwarped_texture, texture_mask = unwarp_texture(image_ph, m_ph, shape_ph, output_size=output_size) 80 | s_texture, s_mask = sess.run( [upwarped_texture, texture_mask], feed_dict={ image_ph: s_img, shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 81 | save_images(s_mask, [4, -1], './texture_pred_mask.png') 82 | save_images(s_texture, [4, -1], './texture_pred.png') 83 | ''' 84 | 85 | ''' 86 | land1_x, land1_y = _DEPRECATED_compute_landmarks(m_ph, shape_ph, output_size=224) 87 | land2_x, land2_y = compute_landmarks(m_ph, shape_ph, output_size=224) 88 | s_land1, s_land2 = sess.run( [land1_y, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 89 | 90 | print('s_land1') 91 | print(s_land1.shape) 92 | print(s_land1[10,0:5]) 93 | 94 | print('s_land2') 95 | print(s_land2.shape) 96 | print(s_land2[10,0:5]) 97 | ''' 98 | 99 | ''' 100 | s_time = time.time() 101 | for i in range(100): 102 | sess.run( [land1_x, land1_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 103 | print("Time landmark v1: %f" % (time.time() - s_time)) 104 | 105 | s_time = time.time() 106 | for i in range(100): 107 | sess.run( [land2_x, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 108 | print("Time landmark v2: %f" % (time.time() - s_time)) 109 | 110 | s_time = time.time() 111 | for i in range(100): 112 | sess.run( [land1_x, land1_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 113 | print("Time landmark v1: %f" % (time.time() - s_time)) 114 | 115 | s_time = time.time() 116 | for i in range(100): 117 | sess.run( [land2_x, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 118 | print("Time landmark v2: %f" % (time.time() - s_time)) 119 | 120 | s_time = time.time() 121 | for i in range(100): 122 | sess.run( [land1_x, land1_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 123 | print("Time landmark v1: %f" % (time.time() - s_time)) 124 | 125 | s_time = time.time() 126 | for i in range(100): 127 | sess.run( [land2_x, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 128 | print("Time landmark v2: %f" % (time.time() - s_time)) 129 | 130 | s_time = time.time() 131 | for i in range(100): 132 | sess.run( [land1_x, land1_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 133 | print("Time landmark v1: %f" % (time.time() - s_time)) 134 | 135 | s_time = time.time() 136 | for i in range(100): 137 | sess.run( [land2_x, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 138 | print("Time landmark v2: %f" % (time.time() - s_time)) 139 | 140 | s_time = time.time() 141 | for i in range(1000): 142 | sess.run( [land1_x, land1_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 143 | print("Time landmark v1: %f" % (time.time() - s_time)) 144 | 145 | s_time = time.time() 146 | for i in range(1000): 147 | sess.run( [land2_x, land2_y], feed_dict={ shape_ph:data['sample_shape'], m_ph:data['sample_m']}) 148 | print("Time landmark v2: %f" % (time.time() - s_time)) 149 | ''' 150 | 151 | 152 | 153 | 154 | if __name__ == '__main__': 155 | tf.app.run() 156 | -------------------------------------------------------------------------------- /rendering_ops.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | import os.path 5 | import numpy as np 6 | 7 | import tensorflow as tf 8 | from tensorflow.python.framework import ops 9 | 10 | from _3dmm_utils import * 11 | 12 | def get_shape(tensor): 13 | static_shape = tensor.shape.as_list() 14 | dynamic_shape = tf.unstack(tf.shape(tensor)) 15 | dims = [s[1] if s[0] is None else s[0] 16 | for s in zip(static_shape, dynamic_shape)] 17 | return dims 18 | 19 | _cuda_op_module_v2_sz224 = tf.load_op_library(os.path.join(tf.resource_loader.get_data_files_path(), 'TF_newop/cuda_op_kernel_v2_sz224.so')) 20 | zbuffer_tri_v2_sz224 = _cuda_op_module_v2_sz224.zbuffer_tri_v2_sz224 21 | 22 | def ZBuffer_Rendering_CUDA_op_v2_sz224(s2d, tri, vis): 23 | tri_map, zbuffer = zbuffer_tri_v2_sz224(s2d, tri, vis) 24 | return tri_map, zbuffer 25 | ops.NotDifferentiable("ZbufferTriV2Sz224") 26 | 27 | 28 | def warp_texture(texture, m, mshape, output_size=224): 29 | def flatten(x): 30 | return tf.reshape(x, [-1]) 31 | 32 | n_size = get_shape(texture) 33 | n_size = n_size[0] 34 | 35 | s = output_size 36 | 37 | # Tri, tri2vt 38 | tri = load_3DMM_tri() 39 | vertex_tri = load_3DMM_vertex_tri() 40 | vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel() 41 | 42 | tri2vt1_const = tf.constant(tri[0,:], tf.int32) 43 | tri2vt2_const = tf.constant(tri[1,:], tf.int32) 44 | tri2vt3_const = tf.constant(tri[2,:], tf.int32) 45 | 46 | tri_const = tf.constant(tri, tf.int32) 47 | vertex_tri_const = tf.constant(vertex_tri, tf.int32) 48 | 49 | 50 | #Vt2pix 51 | vt2pixel_u_const = tf.constant(vt2pixel_u, tf.float32) 52 | vt2pixel_v_const = tf.constant(vt2pixel_v, tf.float32) 53 | 54 | 55 | 56 | 57 | # Convert projection matrix into 4x3 matrices 58 | m = tf.reshape(m, [-1,4,2]) # batch_size x 4 x 2 59 | 60 | m_row1 = tf.nn.l2_normalize(m[:,0:3,0], axis = 1) 61 | m_row2 = tf.nn.l2_normalize(m[:,0:3,1], axis = 1) 62 | m_row3 = tf.pad(tf.cross(m_row1, m_row2), [[0,0],[0,1]], mode='CONSTANT', constant_values=0) 63 | m_row3 = tf.expand_dims(m_row3, axis=2) 64 | 65 | m = tf.concat([m, m_row3], axis = 2) # batch_size x 4 x 3 66 | 67 | 68 | 69 | 70 | vertex3d = tf.reshape( mshape, shape = [n_size, -1, 3] ) # batch_size x vertex_num x 3 71 | vertex4d = tf.concat(axis = 2, values = [vertex3d, tf.ones(get_shape(vertex3d)[0:2] +[1], tf.float32)]) # batch_size x vertex_num x 4 72 | 73 | 74 | vertex2d = tf.matmul(m, vertex4d, True, True) # batch_size x 3 x vertex_num 75 | vertex2d = tf.transpose(vertex2d, perm=[0,2,1]) # batch_size x vertex_num x 2 76 | 77 | 78 | 79 | normal, normalf = compute_normal(vertex3d, tri_const, vertex_tri_const) # normal: batch_size x vertex_num x 3 & normalf: batch_size x tri_num x 3 80 | normalf4d = tf.concat(axis = 2, values = [normalf, tf.ones(get_shape(normalf)[0:2] +[1], tf.float32)]) # batch_size x tri_num x 4 81 | 82 | 83 | rotated_normalf = tf.matmul(m, normalf4d, True, True) # batch_size x 3 x tri_num 84 | _, _, rotated_normalf_z = tf.split(axis=1, num_or_size_splits=3, value=rotated_normalf) # batch_size x 1 x tri_num 85 | 86 | visible_tri = tf.greater(rotated_normalf_z, 0) 87 | 88 | 89 | vertex2d_single = tf.split(axis = 0, num_or_size_splits = n_size, value = vertex2d) 90 | visible_tri_single = tf.split(axis = 0, num_or_size_splits = n_size, value = visible_tri) 91 | 92 | pixel_u = [] 93 | pixel_v = [] 94 | 95 | masks = [] 96 | 97 | u, v = tf.meshgrid( tf.linspace(0.0, output_size-1.0, output_size), tf.linspace(0.0, output_size-1.0, output_size)) 98 | u = flatten(u) 99 | v = flatten(v) 100 | 101 | 102 | for i in range(n_size): 103 | vertex2d_i = tf.squeeze(vertex2d_single[i], axis=0) # vertex_num x 3 104 | visible_tri_i = tf.squeeze(visible_tri_single[i], axis=0) # 1 x tri_num 105 | 106 | [vertex2d_u, vertex2d_v, vertex2d_z] = tf.split(axis=1, num_or_size_splits=3, value=vertex2d_i) 107 | vertex2d_u = vertex2d_u - 1 108 | vertex2d_v = s - vertex2d_v 109 | 110 | vertex2d_i = tf.concat(axis=1, values=[vertex2d_v, vertex2d_u, vertex2d_z]) 111 | vertex2d_i = tf.transpose(vertex2d_i) 112 | 113 | # Applying Z-buffer 114 | tri_map_2d, mask_i = ZBuffer_Rendering_CUDA_op_v2_sz224(vertex2d_i, tri_const, visible_tri_i) 115 | 116 | tri_map_2d_flat = tf.cast(tf.reshape(tri_map_2d, [-1]), 'int32') 117 | 118 | 119 | # Calculate barycentric coefficient 120 | vt1 = tf.gather( tri2vt1_const, tri_map_2d_flat ) 121 | vt2 = tf.gather( tri2vt2_const, tri_map_2d_flat ) 122 | vt3 = tf.gather( tri2vt3_const, tri_map_2d_flat ) 123 | 124 | 125 | pixel1_uu = flatten(tf.gather( vertex2d_u, vt1 )) 126 | pixel2_uu = flatten(tf.gather( vertex2d_u, vt2 )) 127 | pixel3_uu = flatten(tf.gather( vertex2d_u, vt3 )) 128 | 129 | pixel1_vv = flatten(tf.gather( vertex2d_v, vt1 )) 130 | pixel2_vv = flatten(tf.gather( vertex2d_v, vt2 )) 131 | pixel3_vv = flatten(tf.gather( vertex2d_v, vt3 )) 132 | c1, c2, c3 = barycentric(pixel1_uu, pixel2_uu, pixel3_uu, pixel1_vv, pixel2_vv, pixel3_vv, u, v) 133 | 134 | 135 | ## 136 | pixel1_u = tf.gather( vt2pixel_u_const, vt1 ) 137 | pixel2_u = tf.gather( vt2pixel_u_const, vt2 ) 138 | pixel3_u = tf.gather( vt2pixel_u_const, vt3 ) 139 | 140 | pixel1_v = tf.gather( vt2pixel_v_const, vt1 ) 141 | pixel2_v = tf.gather( vt2pixel_v_const, vt2 ) 142 | pixel3_v = tf.gather( vt2pixel_v_const, vt3 ) 143 | 144 | 145 | pixel_u_i = tf.reshape(pixel1_u * c1 + pixel2_u * c2 + pixel3_u* c3, [output_size, output_size]) 146 | pixel_v_i = tf.reshape(pixel1_v * c1 + pixel2_v * c2 + pixel3_v* c3, [output_size, output_size]) 147 | 148 | 149 | pixel_u.append(pixel_u_i) 150 | pixel_v.append(pixel_v_i) 151 | 152 | masks.append(mask_i) 153 | 154 | images = bilinear_sampler(texture, pixel_v, pixel_u) 155 | masks = tf.stack(masks) 156 | 157 | return images, masks 158 | 159 | def barycentric(pixel1_u, pixel2_u, pixel3_u, pixel1_v, pixel2_v, pixel3_v, u, v): 160 | 161 | v0_u = pixel2_u - pixel1_u 162 | v0_v = pixel2_v - pixel1_v 163 | 164 | v1_u = pixel3_u - pixel1_u 165 | v1_v = pixel3_v - pixel1_v 166 | 167 | v2_u = u - pixel1_u 168 | v2_v = v - pixel1_v 169 | 170 | invDenom = 1.0/(v0_u * v1_v - v1_u * v0_v + 1e-6) 171 | c2 = (v2_u * v1_v - v1_u * v2_v) * invDenom 172 | c3 = (v0_u * v2_v - v2_u * v0_v) * invDenom 173 | c1 = 1.0 - c2 - c3 174 | 175 | return c1, c2, c3 176 | 177 | def barycentric_alternative(pixel1_u, pixel2_u, pixel3_u, pixel1_v, pixel2_v, pixel3_v, u, v): 178 | ''' 179 | More complicated version 180 | ''' 181 | v0_u = pixel2_u - pixel1_u 182 | v0_v = pixel2_v - pixel1_v 183 | 184 | v1_u = pixel3_u - pixel1_u 185 | v1_v = pixel3_v - pixel1_v 186 | 187 | v2_u = u - pixel1_u 188 | v2_v = v - pixel1_v 189 | 190 | d00 = v0_u * v0_u + v0_v*v0_v 191 | d01 = v0_u * v1_u + v0_v*v1_v 192 | d11 = v1_u * v1_u + v1_v*v1_v 193 | d20 = v2_u * v0_u + v2_v*v0_v 194 | d21 = v2_u * v1_u + v2_v*v1_v 195 | 196 | invDenom = 1.0 / (d00 * d11 - d01 * d01 + 1e-6) 197 | c3 = (d11 * d20 - d01 * d21) * invDenom 198 | c2 = (d00 * d21 - d01 * d20) * invDenom 199 | c1 = 1.0 - c2 - c3 200 | 201 | return c1, c2, c3 202 | 203 | 204 | def compute_normal(vertex, tri, vertex_tri): 205 | # Unit normals to the faces 206 | # Parameters: 207 | # vertex : batch_size x vertex_num x 3 208 | # tri : 3xtri_num 209 | # vertex_tri: T x vertex_num (T=8: maxium number of triangle each vertex can belong to) 210 | # Output 211 | # normal: batch_size x vertex_num x 3 212 | # normalf: batch_size x tri_num x 3 213 | 214 | vt1_indices, vt2_indices, vt3_indices = tf.split(tri, num_or_size_splits = 3, axis = 0) 215 | 216 | 217 | # Dimensions 218 | batch_size = tf.shape(vertex)[0] 219 | tri_num = tf.shape(tri)[1] 220 | vertex_num = tf.shape(vertex_tri)[1] 221 | T = tf.shape(vertex_tri)[0] 222 | 223 | 224 | # Create batch indices for tf.gather_nd 225 | batch_idx = tf.range(0, batch_size) 226 | batch_idx = tf.reshape(batch_idx, (batch_size, 1)) 227 | b = tf.tile(batch_idx, (1, tri_num)) 228 | 229 | k1 = tf.tile(vt1_indices, (batch_size, 1)) 230 | k2 = tf.tile(vt2_indices, (batch_size, 1)) 231 | k3 = tf.tile(vt3_indices, (batch_size, 1)) 232 | 233 | vt1_indices = tf.stack([b, k1], 2) 234 | vt2_indices = tf.stack([b, k2], 2) 235 | vt3_indices = tf.stack([b, k3], 2) 236 | 237 | 238 | # Compute triangle normal using its vertices 3dlocation 239 | vt1 = tf.gather_nd(vertex, vt1_indices) #batch_size x tri_num x 3 240 | vt2 = tf.gather_nd(vertex, vt2_indices) 241 | vt3 = tf.gather_nd(vertex, vt3_indices) 242 | 243 | 244 | normalf = tf.cross(vt2 - vt1, vt3 - vt1) 245 | normalf = tf.nn.l2_normalize(normalf, dim = 2) 246 | 247 | mask = tf.expand_dims(tf.tile( tf.expand_dims( tf.not_equal(vertex_tri, tri.shape[1] - 1), 2), multiples = [1, 1, 3]), 0) 248 | mask = tf.cast( mask, vertex.dtype ) 249 | 250 | 251 | # Compute vertices normal 252 | vertex_tri = tf.reshape(vertex_tri, shape = [1, -1]) 253 | 254 | b = tf.tile(batch_idx, (1, T * vertex_num)) 255 | k = tf.tile(vertex_tri, (batch_size, 1)) 256 | 257 | indices = tf.stack([b, k], 2) 258 | 259 | 260 | normal = tf.gather_nd(normalf, indices) 261 | normal = tf.reshape(normal, shape = [-1, T, vertex_num, 3]) 262 | 263 | normal = tf.reduce_sum( tf.multiply( normal, mask ), axis = 1) 264 | normal = tf.nn.l2_normalize(normal, dim = 2) 265 | 266 | 267 | # Enforce that the normal are outward 268 | 269 | v = vertex - tf.reduce_mean(vertex,1, keepdims=True) 270 | s = tf.reduce_sum( tf.multiply(v, normal), 1, keepdims=True ) 271 | 272 | count_s_greater_0 = tf.count_nonzero( tf.greater(s, 0), axis=0, keepdims=True ) 273 | count_s_less_0 = tf.count_nonzero( tf.less(s, 0), axis=0, keepdims=True ) 274 | 275 | sign = 2 * tf.cast(tf.greater(count_s_greater_0, count_s_less_0), tf.float32) - 1 276 | normal = tf.multiply(normal, sign) 277 | normalf = tf.multiply(normalf, sign) 278 | 279 | 280 | return normal, normalf 281 | 282 | 283 | def compute_tri_normal(vertex,tri, vertex_tri): 284 | # Unit normals to the faces 285 | # vertex : 3xvertex_num 286 | # tri : 3xtri_num 287 | 288 | vertex = tf.transpose(vertex) 289 | 290 | vt1_indices, vt2_indices, vt3_indices = tf.split(tf.transpose(tri), num_or_size_splits = 3, axis = 1) 291 | 292 | vt1 = tf.gather_nd(vertex, vt1_indices) 293 | vt2 = tf.gather_nd(vertex, vt2_indices) 294 | vt3 = tf.gather_nd(vertex, vt3_indices) 295 | 296 | normalf = tf.cross(vt2 - vt1, vt3 - vt1) 297 | normalf = tf.nn.l2_normalize(normalf, dim = 1) 298 | 299 | return normalf 300 | 301 | compute_normal2 = compute_tri_normal 302 | 303 | 304 | def compute_landmarks(m, shape, output_size=224): 305 | # m: rotation matrix [batch_size x (4x2)] 306 | # shape: 3d vertices location [batch_size x (vertex_num x 3)] 307 | 308 | n_size = get_shape(m) 309 | n_size = n_size[0] 310 | 311 | s = output_size 312 | 313 | 314 | # Tri, tri2vt 315 | kpts = load_3DMM_kpts() 316 | kpts_num = kpts.shape[0] 317 | 318 | indices = np.zeros([n_size, kpts_num,2], np.int32) 319 | for i in range(n_size): 320 | indices[i,:,0] = i 321 | indices[i,:,1:2] = kpts 322 | 323 | indices = tf.constant(indices, tf.int32) 324 | 325 | kpts_const = tf.constant(kpts, tf.int32) 326 | 327 | vertex3d = tf.reshape( shape, shape = [n_size, -1, 3] ) # batch_size x vertex_num x 3 328 | vertex3d = tf.gather_nd(vertex3d, indices) # Keypointd selection # batch_size x kpts_num x 3 329 | vertex4d = tf.concat(axis = 2, values = [vertex3d, tf.ones(get_shape(vertex3d)[0:2] +[1], tf.float32)]) # batch_size x kpts_num x 4 330 | 331 | m = tf.reshape( m, shape = [n_size, 4, 2] ) 332 | vertex2d = tf.matmul(m, vertex4d, True, True) # batch_size x 2 x kpts_num 333 | vertex2d = tf.transpose(vertex2d, perm=[0,2,1]) # batch_size x kpts_num x 2 334 | 335 | [vertex2d_u, vertex2d_v] = tf.split(axis=2, num_or_size_splits=2, value=vertex2d) 336 | vertex2d_u = vertex2d_u - 1 337 | vertex2d_v = s - vertex2d_v 338 | 339 | return vertex2d_u, vertex2d_v 340 | 341 | 342 | 343 | def rotate_shape(m, mshape, output_size = 224): 344 | 345 | n_size = get_shape(m) 346 | n_size = n_size[0] 347 | 348 | m_single = tf.split(axis = 0, num_or_size_splits = n_size, value = m) 349 | shape_single = tf.split(axis = 0, num_or_size_splits = n_size, value = mshape) 350 | 351 | vertex2ds = [] 352 | 353 | for i in range(n_size): 354 | 355 | m_i = tf.transpose(tf.reshape(m_single[i], [4,2])) 356 | m_i_row1 = tf.nn.l2_normalize(m_i[0,0:3], dim = 0) 357 | m_i_row2 = tf.nn.l2_normalize(m_i[1,0:3], dim = 0) 358 | m_i_row3 = tf.concat([tf.reshape(tf.cross(m_i_row1, m_i_row2), shape = [1, 3]), tf.zeros([1, 1])], axis = 1) 359 | 360 | m_i = tf.concat([m_i, m_i_row3], axis = 0) 361 | 362 | vertex3d_rs = tf.transpose(tf.reshape( shape_single[i], shape = [-1, 3] )) 363 | 364 | vertex4d = tf.concat(axis = 0, values = [vertex3d_rs, tf.ones([1, get_shape(vertex3d_rs)[1]], tf.float32)]) 365 | 366 | vertex2d = tf.matmul(m_i, vertex4d, False, False) 367 | vertex2d = tf.transpose(vertex2d) 368 | 369 | [vertex2d_u, vertex2d_v, vertex2d_z] = tf.split(axis=1, num_or_size_splits=3, value=vertex2d) 370 | vertex2d_u = vertex2d_u - 1 371 | vertex2d_v = output_size - vertex2d_v 372 | 373 | vertex2d = tf.concat(axis=1, values=[vertex2d_v, vertex2d_u, vertex2d_z]) 374 | vertex2d = tf.transpose(vertex2d) 375 | 376 | vertex2ds.append(vertex2d) 377 | 378 | return tf.stack(vertex2ds) 379 | 380 | 381 | def get_pixel_value(img, x, y): 382 | """ 383 | Utility function to get pixel value for coordinate 384 | vectors x and y from a 4D tensor image. 385 | Input 386 | ----- 387 | - img: tensor of shape (B, H, W, C) 388 | - x: flattened tensor of shape (B*H*W, ) 389 | - y: flattened tensor of shape (B*H*W, ) 390 | Returns 391 | ------- 392 | - output: tensor of shape (B, H, W, C) 393 | """ 394 | shape = tf.shape(x) 395 | batch_size = shape[0] 396 | height = shape[1] 397 | width = shape[2] 398 | 399 | 400 | batch_idx = tf.range(0, batch_size) 401 | batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1)) 402 | b = tf.tile(batch_idx, (1, height, width)) 403 | 404 | 405 | indices = tf.stack([b, y, x], 3) 406 | 407 | return tf.gather_nd(img, indices) 408 | 409 | 410 | 411 | def bilinear_sampler(img, x, y): 412 | """ 413 | Performs bilinear sampling of the input images according to the 414 | normalized coordinates provided by the sampling grid. Note that 415 | the sampling is done identically for each channel of the input. 416 | To test if the function works properly, output image should be 417 | identical to input image when theta is initialized to identity 418 | transform. 419 | Input 420 | ----- 421 | - img: batch of images in (B, H, W, C) layout. 422 | - grid: x, y which is the output of affine_grid_generator. 423 | Returns 424 | ------- 425 | - interpolated images according to grids. Same size as grid. 426 | """ 427 | # prepare useful params 428 | B = tf.shape(img)[0] 429 | H = tf.shape(img)[1] 430 | W = tf.shape(img)[2] 431 | C = tf.shape(img)[3] 432 | 433 | max_y = tf.cast(H - 1, 'int32') 434 | max_x = tf.cast(W - 1, 'int32') 435 | zero = tf.zeros([], dtype='int32') 436 | 437 | # cast indices as float32 (for rescaling) 438 | x = tf.cast(x, 'float32') 439 | y = tf.cast(y, 'float32') 440 | 441 | # grab 4 nearest corner points for each (x_i, y_i) 442 | # i.e. we need a rectangle around the point of interest 443 | x0 = tf.cast(tf.floor(x), 'int32') 444 | x1 = x0 + 1 445 | y0 = tf.cast(tf.floor(y), 'int32') 446 | y1 = y0 + 1 447 | 448 | # clip to range [0, H/W] to not violate img boundaries 449 | x0 = tf.clip_by_value(x0, zero, max_x) 450 | x1 = tf.clip_by_value(x1, zero, max_x) 451 | y0 = tf.clip_by_value(y0, zero, max_y) 452 | y1 = tf.clip_by_value(y1, zero, max_y) 453 | 454 | # get pixel value at corner coords 455 | Ia = get_pixel_value(img, x0, y0) 456 | Ib = get_pixel_value(img, x0, y1) 457 | Ic = get_pixel_value(img, x1, y0) 458 | Id = get_pixel_value(img, x1, y1) 459 | 460 | # recast as float for delta calculation 461 | x0 = tf.cast(x0, 'float32') 462 | x1 = tf.cast(x1, 'float32') 463 | y0 = tf.cast(y0, 'float32') 464 | y1 = tf.cast(y1, 'float32') 465 | 466 | # calculate deltas 467 | wa = (x1-x) * (y1-y) 468 | wb = (x1-x) * (y-y0) 469 | wc = (x-x0) * (y1-y) 470 | wd = (x-x0) * (y-y0) 471 | 472 | # add dimension for addition 473 | wa = tf.expand_dims(wa, axis=3) 474 | wb = tf.expand_dims(wb, axis=3) 475 | wc = tf.expand_dims(wc, axis=3) 476 | wd = tf.expand_dims(wd, axis=3) 477 | 478 | # compute output 479 | out = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id]) 480 | return out 481 | 482 | 483 | def generate_shade(il, m, mshape, texture_size = [192, 224], is_with_normal=False): 484 | ''' 485 | print("get_shape(il) ") 486 | print(get_shape(il) ) 487 | print("get_shape(m) ") 488 | print(get_shape(m) ) 489 | print("get_shape(mshape) ") 490 | print(get_shape(mshape) ) 491 | ''' 492 | 493 | n_size = get_shape(il) 494 | n_size = n_size[0] 495 | 496 | # Tri, tri2vt 497 | tri = load_3DMM_tri() 498 | vertex_tri = load_3DMM_vertex_tri() 499 | vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel() 500 | tri_2d = load_3DMM_tri_2d() 501 | tri_2d_barycoord = load_3DMM_tri_2d_barycoord() 502 | 503 | 504 | 505 | tri_const = tf.constant(tri, tf.int32) 506 | vertex_tri_const = tf.constant(vertex_tri, tf.int32) 507 | 508 | 509 | tri_2d_const = tf.constant(tri_2d, tf.int32) 510 | tri_2d_const_flat = tf.reshape(tri_2d_const, shape=[-1,1]) 511 | 512 | tri2vt1_const = tf.constant(tri[0,:], tf.int32) 513 | tri2vt2_const = tf.constant(tri[1,:], tf.int32) 514 | tri2vt3_const = tf.constant(tri[2,:], tf.int32) 515 | 516 | vt1 = tf.gather( tri2vt1_const, tri_2d_const_flat ) 517 | vt2 = tf.gather( tri2vt2_const, tri_2d_const_flat ) 518 | vt3 = tf.gather( tri2vt3_const, tri_2d_const_flat ) 519 | 520 | vt1_coeff = tf.reshape(tf.constant(tri_2d_barycoord[:,:,0], tf.float32), shape=[-1,1]) 521 | vt2_coeff = tf.reshape(tf.constant(tri_2d_barycoord[:,:,1], tf.float32), shape=[-1,1]) 522 | vt3_coeff = tf.reshape(tf.constant(tri_2d_barycoord[:,:,2], tf.float32), shape=[-1,1]) 523 | 524 | 525 | 526 | #mshape = mshape * tf.constant(self.std_shape) + tf.constant(self.mean_shape) 527 | 528 | m_single = tf.split(axis = 0, num_or_size_splits = n_size, value = m) 529 | shape_single = tf.split(axis = 0, num_or_size_splits = n_size, value = mshape) 530 | 531 | #def get_normal_flat(shape_single): 532 | # vertex3d_rs = tf.transpose(tf.reshape( shape_single, shape = [-1, 3] )) 533 | # normal, normalf = compute_normal(vertex3d_rs, tri_const, vertex_tri_const) 534 | # normalf_flat = tf.gather_nd(normalf, tri_2d_const_flat) 535 | # normalf_flats.append(normalf_flat) 536 | 537 | 538 | #normalf_flats = tf.map_fn( lambda ss: get_normal_flat(ss), shape_single ) 539 | 540 | normalf_flats = [] 541 | for i in range(n_size): 542 | m_i = tf.transpose(tf.reshape(m_single[i], [4,2])) 543 | 544 | m_i_row1 = tf.nn.l2_normalize(m_i[0,0:3], dim = 0) 545 | m_i_row2 = tf.nn.l2_normalize(m_i[1,0:3], dim = 0) 546 | m_i_row3 = tf.cross(m_i_row1, m_i_row2) 547 | m_i = tf.concat([ tf.expand_dims(m_i_row1, 0), tf.expand_dims(m_i_row2, 0), tf.expand_dims(m_i_row3, 0)], axis = 0) 548 | 549 | 550 | 551 | 552 | ''' 553 | m_i_row1 = tf.nn.l2_normalize(m_i[0,0:3], dim = 0) 554 | m_i_row2 = tf.nn.l2_normalize(m_i[1,0:3], dim = 0) 555 | m_i_row3 = tf.concat([tf.reshape(tf.cross(m_i_row1, m_i_row2), shape = [1, 3]), tf.zeros([1, 1])], axis = 1) 556 | 557 | m_i = tf.concat([m_i, m_i_row3], axis = 0) 558 | print('m_i.shape()') 559 | print(m_i.get_shape()) 560 | ''' 561 | 562 | vertex3d_rs = tf.transpose(tf.reshape( shape_single[i], shape = [-1, 3] )) 563 | 564 | normal, normalf = _DEPRECATED_compute_normal(vertex3d_rs, tri_const, vertex_tri_const) 565 | 566 | 567 | ### 568 | ''' 569 | normalf = tf.transpose(normalf) 570 | rotated_normalf = tf.matmul(m_i, normalf, False, False) 571 | rotated_normalf = tf.transpose(rotated_normalf) 572 | 573 | normalf_flat = tf.gather_nd(rotated_normalf, tri_2d_const_flat) 574 | normalf_flats.append(normalf_flat) 575 | ''' 576 | 577 | 578 | 579 | 580 | ### 581 | normal = tf.transpose(normal) 582 | rotated_normal = tf.matmul(m_i, normal, False, False) 583 | rotated_normal = tf.transpose(rotated_normal) 584 | normal_flat_vt1 = tf.gather_nd(rotated_normal, vt1) 585 | normal_flat_vt2 = tf.gather_nd(rotated_normal, vt2) 586 | normal_flat_vt3 = tf.gather_nd(rotated_normal, vt3) 587 | 588 | normalf_flat = normal_flat_vt1*vt1_coeff + normal_flat_vt2*vt2_coeff + normal_flat_vt3*vt3_coeff 589 | normalf_flats.append(normalf_flat) 590 | 591 | 592 | 593 | 594 | normalf_flats = tf.stack(normalf_flats) 595 | 596 | #print("normalf_flats.get_shape()") 597 | #print(normalf_flats.get_shape()) 598 | 599 | #print("il.get_shape()") 600 | #print(il.get_shape()) 601 | 602 | shade = shading(il, normalf_flats) 603 | 604 | #print("shade.get_shape()") 605 | #print(shade.get_shape()) 606 | 607 | if is_with_normal: 608 | return tf.reshape(shade, shape = [-1, texture_size[0], texture_size[1], 3]), tf.reshape(normalf_flats, shape = [-1, texture_size[0], texture_size[1], 3]), 609 | 610 | 611 | 612 | return tf.reshape(shade, shape = [-1, texture_size[0], texture_size[1], 3]) 613 | 614 | 615 | 616 | def shading(L, normal): 617 | 618 | 619 | shape = normal.get_shape().as_list() 620 | 621 | normal_x, normal_y, normal_z = tf.split(tf.expand_dims(normal, -1), axis=2, num_or_size_splits=3) 622 | pi = math.pi 623 | 624 | sh=[0]*9 625 | sh[0] = 1/math.sqrt(4*pi) * tf.ones_like(normal_x) 626 | sh[1] = ((2*pi)/3)*(math.sqrt(3/(4*pi)))* normal_z 627 | sh[2] = ((2*pi)/3)*(math.sqrt(3/(4*pi)))* normal_y 628 | sh[3] = ((2*pi)/3)*(math.sqrt(3/(4*pi)))* normal_x 629 | sh[4] = (pi/4)*(1/2)*(math.sqrt(5/(4*pi)))*(2*tf.square(normal_z)-tf.square(normal_x)-tf.square(normal_y)) 630 | sh[5] = (pi/4)*(3) *(math.sqrt(5/(12*pi)))*(normal_y*normal_z) 631 | sh[6] = (pi/4)*(3) *(math.sqrt(5/(12*pi)))*(normal_x*normal_z) 632 | sh[7] = (pi/4)*(3) *(math.sqrt(5/(12*pi)))*(normal_x*normal_y) 633 | sh[8] = (pi/4)*(3/2)*(math.sqrt(5/(12*pi)))*( tf.square(normal_x)-tf.square(normal_y)) 634 | 635 | sh = tf.concat(sh, axis=3) 636 | print('sh.get_shape()') 637 | print(sh.get_shape()) 638 | 639 | L1, L2, L3 = tf.split(L, num_or_size_splits = 3, axis=1) 640 | L1 = tf.expand_dims(L1, 1) 641 | L1 = tf.tile(L1, multiples=[1, shape[1], 1] ) 642 | L1 = tf.expand_dims(L1, -1) 643 | 644 | L2 = tf.expand_dims(L2, 1) 645 | L2 = tf.tile(L2, multiples=[1, shape[1], 1] ) 646 | L2 = tf.expand_dims(L2, -1) 647 | 648 | L3 = tf.expand_dims(L3, 1) 649 | L3 = tf.tile(L3, multiples=[1, shape[1], 1] ) 650 | L3 = tf.expand_dims(L3, -1) 651 | 652 | print('L1.get_shape()') 653 | print(L1.get_shape()) 654 | 655 | B1 = tf.matmul(sh, L1) 656 | B2 = tf.matmul(sh, L2) 657 | B3 = tf.matmul(sh, L3) 658 | 659 | B = tf.squeeze(tf.concat([B1, B2, B3], axis = 2)) 660 | 661 | return B 662 | 663 | 664 | ## _DEPRECATED_ 665 | 666 | def _DEPRECATED_warp_texture(texture, m, mshape, output_size=224): 667 | def flatten(x): 668 | return tf.reshape(x, [-1]) 669 | 670 | n_size = get_shape(texture) 671 | n_size = n_size[0] 672 | 673 | s = output_size 674 | 675 | # Tri, tri2vt 676 | tri = load_3DMM_tri() 677 | vertex_tri = load_3DMM_vertex_tri() 678 | vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel() 679 | 680 | tri2vt1_const = tf.constant(tri[0,:], tf.int32) 681 | tri2vt2_const = tf.constant(tri[1,:], tf.int32) 682 | tri2vt3_const = tf.constant(tri[2,:], tf.int32) 683 | 684 | tri_const = tf.constant(tri, tf.int32) 685 | vertex_tri_const = tf.constant(vertex_tri, tf.int32) 686 | 687 | 688 | #Vt2pix 689 | vt2pixel_u_const = tf.constant(vt2pixel_u, tf.float32) 690 | vt2pixel_v_const = tf.constant(vt2pixel_v, tf.float32) 691 | 692 | 693 | m_single = tf.split(axis = 0, num_or_size_splits = n_size, value = m) 694 | shape_single = tf.split(axis = 0, num_or_size_splits = n_size, value = mshape) 695 | 696 | pixel_u = [] 697 | pixel_v = [] 698 | 699 | masks = [] 700 | 701 | u, v = tf.meshgrid( tf.linspace(0.0, output_size-1.0, output_size), tf.linspace(0.0, output_size-1.0, output_size)) 702 | u = flatten(u) 703 | v = flatten(v) 704 | 705 | for i in range(n_size): 706 | 707 | m_i = tf.transpose(tf.reshape(m_single[i], [4,2])) 708 | m_i_row1 = tf.nn.l2_normalize(m_i[0,0:3], dim = 0) 709 | m_i_row2 = tf.nn.l2_normalize(m_i[1,0:3], dim = 0) 710 | m_i_row3 = tf.concat([tf.reshape(tf.cross(m_i_row1, m_i_row2), shape = [1, 3]), tf.zeros([1, 1])], axis = 1) 711 | 712 | m_i = tf.concat([m_i, m_i_row3], axis = 0) 713 | 714 | # Compute 2d vertex 715 | vertex3d_rs = tf.transpose(tf.reshape( shape_single[i], shape = [-1, 3] )) 716 | 717 | normal, normalf = _DEPRECATED_compute_normal(vertex3d_rs,tri_const, vertex_tri_const) 718 | normalf = tf.transpose(normalf) 719 | normalf4d = tf.concat(axis=0, values=[normalf, tf.ones([1, normalf.get_shape()[-1]], tf.float32)]) 720 | rotated_normalf = tf.matmul(m_i, normalf4d, False, False) 721 | _, _, rotated_normalf_z = tf.split(axis=0, num_or_size_splits=3, value=rotated_normalf) 722 | visible_tri = tf.greater(rotated_normalf_z, 0) 723 | 724 | 725 | vertex4d = tf.concat(axis = 0, values = [vertex3d_rs, tf.ones([1, vertex3d_rs.get_shape()[-1]], tf.float32)]) 726 | 727 | vertex2d = tf.matmul(m_i, vertex4d, False, False) 728 | vertex2d = tf.transpose(vertex2d) 729 | 730 | [vertex2d_u, vertex2d_v, vertex2d_z] = tf.split(axis=1, num_or_size_splits=3, value=vertex2d) 731 | vertex2d_u = vertex2d_u - 1 732 | vertex2d_v = s - vertex2d_v 733 | 734 | vertex2d = tf.concat(axis=1, values=[vertex2d_v, vertex2d_u, vertex2d_z]) 735 | vertex2d = tf.transpose(vertex2d) 736 | 737 | # Applying Z-buffer 738 | tri_map_2d, mask_i = ZBuffer_Rendering_CUDA_op_v2_sz224(vertex2d, tri_const, visible_tri) 739 | 740 | tri_map_2d_flat = tf.cast(tf.reshape(tri_map_2d, [-1]), 'int32') 741 | 742 | 743 | # Calculate barycentric coefficient 744 | 745 | vt1 = tf.gather( tri2vt1_const, tri_map_2d_flat ) 746 | vt2 = tf.gather( tri2vt2_const, tri_map_2d_flat ) 747 | vt3 = tf.gather( tri2vt3_const, tri_map_2d_flat ) 748 | 749 | 750 | pixel1_uu = flatten(tf.gather( vertex2d_u, vt1 )) 751 | pixel2_uu = flatten(tf.gather( vertex2d_u, vt2 )) 752 | pixel3_uu = flatten(tf.gather( vertex2d_u, vt3 )) 753 | 754 | pixel1_vv = flatten(tf.gather( vertex2d_v, vt1 )) 755 | pixel2_vv = flatten(tf.gather( vertex2d_v, vt2 )) 756 | pixel3_vv = flatten(tf.gather( vertex2d_v, vt3 )) 757 | c1, c2, c3 = barycentric(pixel1_uu, pixel2_uu, pixel3_uu, pixel1_vv, pixel2_vv, pixel3_vv, u, v) 758 | 759 | 760 | ## 761 | pixel1_u = tf.gather( vt2pixel_u_const, vt1 ) 762 | pixel2_u = tf.gather( vt2pixel_u_const, vt2 ) 763 | pixel3_u = tf.gather( vt2pixel_u_const, vt3 ) 764 | 765 | pixel1_v = tf.gather( vt2pixel_v_const, vt1 ) 766 | pixel2_v = tf.gather( vt2pixel_v_const, vt2 ) 767 | pixel3_v = tf.gather( vt2pixel_v_const, vt3 ) 768 | 769 | 770 | pixel_u_i = tf.reshape(pixel1_u * c1 + pixel2_u * c2 + pixel3_u* c3, [output_size, output_size]) 771 | pixel_v_i = tf.reshape(pixel1_v * c1 + pixel2_v * c2 + pixel3_v* c3, [output_size, output_size]) 772 | 773 | 774 | pixel_u.append(pixel_u_i) 775 | pixel_v.append(pixel_v_i) 776 | 777 | masks.append(mask_i) 778 | 779 | images = bilinear_sampler(texture, pixel_v, pixel_u) 780 | masks = tf.stack(masks) 781 | 782 | return images, masks 783 | 784 | def _DEPRECATED_compute_landmarks(m, mshape, output_size=224): 785 | # This is a deprecated version of compute landmarks which is not optimized 786 | 787 | n_size = get_shape(m) 788 | n_size = n_size[0] 789 | 790 | s = output_size 791 | 792 | 793 | # Tri, tri2vt 794 | kpts = load_3DMM_kpts() 795 | 796 | kpts_const = tf.constant(kpts, tf.int32) 797 | 798 | m_single = tf.split(axis = 0, num_or_size_splits = n_size, value = m) 799 | shape_single = tf.split(axis = 0, num_or_size_splits = n_size, value = mshape) 800 | 801 | landmarks_u = [] 802 | landmarks_v = [] 803 | 804 | for i in range(n_size): 805 | # Compute 2d vertex 806 | #vertex3d = tf.transpose(tf.reshape( mu_const + tf.matmul(w_shape_const, p_shape_single[i], False, True) + tf.matmul(w_exp_const, p_exp_single[i], False, True), shape = [-1, 3] )) 807 | 808 | vertex3d_rs = tf.reshape( shape_single[i], shape = [-1, 3] ) 809 | vertex3d_rs = tf.transpose(tf.gather_nd(vertex3d_rs, kpts_const)) 810 | #print(get_shape(vertex3d_rs)) 811 | vertex4d = tf.concat(axis = 0, values = [vertex3d_rs, tf.ones([1, get_shape(vertex3d_rs)[1]], tf.float32)]) 812 | 813 | m_single_i = tf.transpose(tf.reshape(m_single[i], [4,2])) 814 | vertex2d = tf.matmul(m_single_i, vertex4d, False, False) 815 | vertex2d = tf.transpose(vertex2d) 816 | 817 | [vertex2d_u, vertex2d_v] = tf.split(axis=1, num_or_size_splits=2, value=vertex2d) #[vertex2d_u, vertex2d_v] = tf.split(1, 2, vertex2d) 818 | vertex2d_u = vertex2d_u - 1 819 | vertex2d_v = s - vertex2d_v 820 | 821 | landmarks_u.append(vertex2d_u) 822 | landmarks_v.append(vertex2d_v) 823 | 824 | return tf.stack(landmarks_u), tf.stack(landmarks_v) 825 | 826 | def _DEPRECATED_compute_normal(vertex, tri, vertex_tri): 827 | # Unit normals to the faces 828 | # vertex : 3xvertex_num 829 | # tri : 3xtri_num 830 | 831 | vertex = tf.transpose(vertex) 832 | 833 | vt1_indices, vt2_indices, vt3_indices = tf.split(tf.transpose(tri), num_or_size_splits = 3, axis = 1) 834 | 835 | 836 | vt1 = tf.gather_nd(vertex, vt1_indices) 837 | #print('get_shape(vt1)') 838 | #print(get_shape(vt1)) 839 | vt2 = tf.gather_nd(vertex, vt2_indices) 840 | vt3 = tf.gather_nd(vertex, vt3_indices) 841 | 842 | 843 | normalf = tf.cross(vt2 - vt1, vt3 - vt1) 844 | normalf = tf.nn.l2_normalize(normalf, dim = 1) 845 | 846 | mask = tf.tile( tf.expand_dims( tf.not_equal(vertex_tri, tri.shape[1] - 1), 2), multiples = [1, 1, 3]) 847 | mask = tf.cast( mask, vertex.dtype ) 848 | vertex_tri = tf.reshape(vertex_tri, shape = [-1, 1]) 849 | normal = tf.reshape(tf.gather_nd(normalf, vertex_tri), shape = [8, -1, 3]) 850 | 851 | normal = tf.reduce_sum( tf.multiply( normal, mask ), axis = 0) 852 | normal = tf.nn.l2_normalize(normal, dim = 1) 853 | 854 | 855 | #print('get_shape(normalf)') 856 | #print(get_shape(normalf)) 857 | 858 | 859 | #print('get_shape(normal)') 860 | #print(get_shape(normal)) 861 | 862 | 863 | # enforce that the normal are outward 864 | v = vertex - tf.reduce_mean(vertex,0) 865 | s = tf.reduce_sum( tf.multiply(v, normal), 0 ) 866 | 867 | count_s_greater_0 = tf.count_nonzero( tf.greater(s, 0) ) 868 | count_s_less_0 = tf.count_nonzero( tf.less(s, 0) ) 869 | 870 | sign = 2 * tf.cast(tf.greater(count_s_greater_0, count_s_less_0), tf.float32) - 1 871 | normal = tf.multiply(normal, sign) 872 | normalf = tf.multiply(normalf, sign) 873 | 874 | return normal, normalf 875 | 876 | 877 | def unwarp_texture(image, m, mshape, output_size=224, is_reduce = False): 878 | #TO Do: correct the mask 879 | print("TODO: correct the mask in unwarp_texture(image, m, mshape, output_size=124, is_reduce = False)") 880 | 881 | 882 | n_size = get_shape(image) 883 | n_size = n_size[0] 884 | s = output_size 885 | 886 | # Tri, tri2vt 887 | tri = load_3DMM_tri() 888 | vertex_tri = load_3DMM_vertex_tri() 889 | vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel() 890 | 891 | 892 | tri2vt1_const = tf.constant(tri[0,:], tf.int32) 893 | tri2vt2_const = tf.constant(tri[1,:], tf.int32) 894 | tri2vt3_const = tf.constant(tri[2,:], tf.int32) 895 | 896 | tri_const = tf.constant(tri, tf.int32) 897 | #tri_2d_const = tf.constant(tri_2d, tf.int32) 898 | vertex_tri_const = tf.constant(vertex_tri, tf.int32) 899 | 900 | 901 | #Vt2pix 902 | vt2pixel_u_const = tf.constant(vt2pixel_u, tf.float32) 903 | vt2pixel_v_const = tf.constant(vt2pixel_v, tf.float32) 904 | 905 | #indicies = np.zeros([s*s,2]) 906 | #for i in range(s): 907 | # for j in range(s): 908 | # indicies[i*s+j ] = [i,j] 909 | 910 | #indicies_const = tf.constant(indicies, tf.float32) 911 | #[indicies_const_u, indicies_const_v] = tf.split(1, 2, indicies_const) 912 | 913 | ###########m = m * tf.constant(self.std_m) + tf.constant(self.mean_m) 914 | ###########mshape = mshape * tf.constant(self.std_shape) + tf.constant(self.mean_shape) 915 | 916 | m_single = tf.split(axis = 0, num_or_size_splits = n_size, value = m) 917 | shape_single = tf.split(axis = 0, num_or_size_splits = n_size, value = mshape) 918 | 919 | pixel_u = [] 920 | pixel_v = [] 921 | 922 | masks = [] 923 | for i in range(n_size): 924 | 925 | m_i = tf.transpose(tf.reshape(m_single[i], [4,2])) 926 | m_i_row1 = tf.nn.l2_normalize(m_i[0,0:3], dim = 0) 927 | m_i_row2 = tf.nn.l2_normalize(m_i[1,0:3], dim = 0) 928 | m_i_row3 = tf.concat([tf.reshape(tf.cross(m_i_row1, m_i_row2), shape = [1, 3]), tf.zeros([1, 1])], axis = 1) 929 | 930 | m_i = tf.concat([m_i, m_i_row3], axis = 0) 931 | 932 | # Compute 2d vertex 933 | #vertex3d = tf.transpose(tf.reshape( mu_const + tf.matmul(w_shape_const, p_shape_single[i], False, True) + tf.matmul(w_exp_const, p_exp_single[i], False, True), shape = [-1, 3] )) 934 | 935 | vertex3d_rs = tf.transpose(tf.reshape( shape_single[i], shape = [-1, 3] )) 936 | 937 | normal, normalf = compute_normal(vertex3d_rs,tri_const, vertex_tri_const) 938 | normalf = tf.transpose(normalf) 939 | normalf4d = tf.concat(axis=0, values=[normalf, tf.ones([1, normalf.get_shape()[-1]], tf.float32)]) 940 | rotated_normalf = tf.matmul(m_i, normalf4d, False, False) 941 | _, _, rotated_normalf_z = tf.split(axis=0, num_or_size_splits=3, value=rotated_normalf) 942 | visible_tri = tf.greater(rotated_normalf_z, 0) 943 | 944 | mask_i = tf.gather( tf.cast(visible_tri, dtype=tf.float32), tri_2d_const ) 945 | #print("get_shape(mask_i)") 946 | #print(get_shape(mask_i)) 947 | 948 | 949 | vertex4d = tf.concat(axis = 0, values = [vertex3d_rs, tf.ones([1, vertex3d_rs.get_shape()[-1]], tf.float32)]) 950 | 951 | vertex2d = tf.matmul(m_i, vertex4d, False, False) 952 | vertex2d = tf.transpose(vertex2d) 953 | 954 | [vertex2d_u, vertex2d_v, vertex2d_z] = tf.split(axis=1, num_or_size_splits=3, value=vertex2d) 955 | vertex2d_u = tf.squeeze(vertex2d_u - 1) 956 | vertex2d_v = tf.squeeze(s - vertex2d_v) 957 | 958 | #vertex2d = tf.concat(axis=1, values=[vertex2d_v, vertex2d_u, vertex2d_z]) 959 | #vertex2d = tf.transpose(vertex2d) 960 | 961 | #vertex2d_u = tf.transpose(vertex2d_u) 962 | #vertex2d_V = tf.transpose(vertex2d_v) 963 | 964 | 965 | 966 | vt1 = tf.gather( tri2vt1_const, tri_2d_const ) 967 | vt2 = tf.gather( tri2vt2_const, tri_2d_const ) 968 | vt3 = tf.gather( tri2vt3_const, tri_2d_const ) 969 | 970 | 971 | 972 | 973 | pixel1_u = tf.gather( vertex2d_u, vt1 ) #tf.gather( vt2pixel_u_const, vt1 ) 974 | pixel2_u = tf.gather( vertex2d_u, vt2 ) 975 | pixel3_u = tf.gather( vertex2d_u, vt3 ) 976 | 977 | pixel1_v = tf.gather( vertex2d_v, vt1 ) 978 | pixel2_v = tf.gather( vertex2d_v, vt2 ) 979 | pixel3_v = tf.gather( vertex2d_v, vt3 ) 980 | 981 | pixel_u_i = tf.scalar_mul(scalar = 1.0/3.0, x = tf.add_n([pixel1_u, pixel2_u, pixel3_u])) 982 | pixel_v_i = tf.scalar_mul(scalar = 1.0/3.0, x = tf.add_n([pixel1_v, pixel2_v, pixel3_v])) 983 | 984 | pixel_u.append(pixel_u_i) 985 | pixel_v.append(pixel_v_i) 986 | 987 | masks.append(mask_i) 988 | 989 | 990 | 991 | texture = bilinear_sampler(image, pixel_u, pixel_v) 992 | masks = tf.stack(masks) 993 | 994 | return texture, masks -------------------------------------------------------------------------------- /sample_data.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/sample_data.npz -------------------------------------------------------------------------------- /std_exp_para.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/std_exp_para.npy -------------------------------------------------------------------------------- /std_m.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/std_m.npy -------------------------------------------------------------------------------- /std_shape.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tranluan/Nonlinear_Face_3DMM/662098a602d542c3505cd16ba01dd302f33eeee8/std_shape.npy -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some codes from https://github.com/Newmu/dcgan_code 3 | """ 4 | from __future__ import division 5 | import math 6 | #import csv 7 | import json 8 | import random 9 | import pprint 10 | import scipy.misc 11 | import numpy as np 12 | from glob import glob 13 | import os 14 | #import matplotlib.pyplot as plt 15 | from time import gmtime, strftime 16 | from config import _300W_LP_DIR 17 | 18 | pp = pprint.PrettyPrinter() 19 | 20 | get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) 21 | 22 | def get_image(image_path, image_size, is_crop=True, is_random_crop = False, resize_w=64, is_grayscale = False): 23 | return transform(imread(image_path, is_grayscale), image_size, is_crop, is_random_crop, resize_w) 24 | 25 | def save_images(images, size, image_path, inverse = True): 26 | if len(size) == 1: 27 | size= [size, -1] 28 | if size[1] == -1: 29 | size[1] = int(math.ceil(images.shape[0]/size[0])) 30 | if size[0] == -1: 31 | size[0] = int(math.ceil(images.shape[0]/size[1])) 32 | if (inverse): 33 | images = inverse_transform(images) 34 | 35 | return imsave(images, size, image_path) 36 | 37 | def imread(path, is_grayscale = False): 38 | if (is_grayscale): 39 | return scipy.misc.imread(path, flatten = True).astype(np.float) 40 | else: 41 | return scipy.misc.imread(path).astype(np.float) 42 | 43 | def merge_images(images, size): 44 | return inverse_transform(images) 45 | 46 | def merge(images, size): 47 | h, w = images.shape[1], images.shape[2] 48 | nn = images.shape[0] 49 | 50 | if size[1] < 0: 51 | size[1] = int(math.ceil(nn/size[0])) 52 | if size[0] < 0: 53 | size[0] = int(math.ceil(nn/size[1])) 54 | 55 | 56 | if (images.ndim == 4): 57 | img = np.zeros((h * size[0], w * size[1], 3)) 58 | for idx, image in enumerate(images): 59 | i = idx % size[1] 60 | j = idx // size[1] 61 | img[j*h:j*h+h, i*w:i*w+w, :] = image 62 | else: 63 | img = images 64 | 65 | 66 | return img 67 | 68 | def imresize(img, sz): 69 | return scipy.misc.imresize(img, sz) 70 | 71 | def imsave(images, size, path): 72 | img = merge(images, size) 73 | 74 | #plt.imshow(img) 75 | #plt.show() 76 | 77 | return scipy.misc.imsave(path, img) 78 | 79 | def center_crop(x, crop_h, crop_w=None, resize_w=64): 80 | if crop_w is None: 81 | crop_w = crop_h 82 | h, w = x.shape[:2] 83 | j = int(round((h - crop_h)/2.)) 84 | i = int(round((w - crop_w)/2.)) 85 | return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], 86 | [resize_w, resize_w]) 87 | 88 | def random_crop(x, crop_h, crop_w=None, with_crop_size=None ): 89 | if crop_w is None: 90 | crop_w = crop_h 91 | if with_crop_size is None: 92 | with_crop_size = False 93 | h, w = x.shape[:2] 94 | 95 | j = random.randint(0, h - crop_h) 96 | i = random.randint(0, w - crop_w) 97 | 98 | if with_crop_size: 99 | return x[j:j+crop_h, i:i+crop_w,:], j, i 100 | else: 101 | return x[j:j+crop_h, i:i+crop_w,:] 102 | 103 | def crop(x, crop_h, crop_w, j, i): 104 | if crop_w is None: 105 | crop_w = crop_h 106 | 107 | return x[j:j+crop_h, i:i+crop_w] 108 | 109 | 110 | #return scipy.misc.imresize(x, [96, 96] ) 111 | 112 | def transform(image, npx=64, is_crop=True, is_random_crop=True, resize_w=64): 113 | # npx : # of pixels width/height of image 114 | if is_crop: 115 | if is_random_crop: 116 | cropped_image = random_crop(image, npx) 117 | else: 118 | cropped_image = center_crop(image, npx, resize_w=resize_w) 119 | else: 120 | cropped_image = image 121 | return np.array(cropped_image)/127.5 - 1. 122 | 123 | def inverse_transform(images): 124 | return (images+1.)/2. 125 | 126 | 127 | def to_json(output_path, *layers): 128 | with open(output_path, "w") as layer_f: 129 | lines = "" 130 | for w, b, bn in layers: 131 | layer_idx = w.name.split('/')[0].split('h')[1] 132 | 133 | B = b.eval() 134 | 135 | if "lin/" in w.name: 136 | W = w.eval() 137 | depth = W.shape[1] 138 | else: 139 | W = np.rollaxis(w.eval(), 2, 0) 140 | depth = W.shape[0] 141 | 142 | biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]} 143 | if bn != None: 144 | gamma = bn.gamma.eval() 145 | beta = bn.beta.eval() 146 | 147 | gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]} 148 | beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]} 149 | else: 150 | gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []} 151 | beta = {"sy": 1, "sx": 1, "depth": 0, "w": []} 152 | 153 | if "lin/" in w.name: 154 | fs = [] 155 | for w in W.T: 156 | fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]}) 157 | 158 | lines += """ 159 | var layer_%s = { 160 | "layer_type": "fc", 161 | "sy": 1, "sx": 1, 162 | "out_sx": 1, "out_sy": 1, 163 | "stride": 1, "pad": 0, 164 | "out_depth": %s, "in_depth": %s, 165 | "biases": %s, 166 | "gamma": %s, 167 | "beta": %s, 168 | "filters": %s 169 | };""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs) 170 | else: 171 | fs = [] 172 | for w_ in W: 173 | fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]}) 174 | 175 | lines += """ 176 | var layer_%s = { 177 | "layer_type": "deconv", 178 | "sy": 5, "sx": 5, 179 | "out_sx": %s, "out_sy": %s, 180 | "stride": 2, "pad": 1, 181 | "out_depth": %s, "in_depth": %s, 182 | "biases": %s, 183 | "gamma": %s, 184 | "beta": %s, 185 | "filters": %s 186 | };""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2), 187 | W.shape[0], W.shape[3], biases, gamma, beta, fs) 188 | layer_f.write(" ".join(lines.replace("'","").split())) 189 | 190 | def make_gif(images, fname, duration=2, true_image=False): 191 | import moviepy.editor as mpy 192 | 193 | def make_frame(t): 194 | try: 195 | x = images[int(len(images)/duration*t)] 196 | except: 197 | x = images[-1] 198 | 199 | if true_image: 200 | return x.astype(np.uint8) 201 | else: 202 | return ((x+1)/2*255).astype(np.uint8) 203 | 204 | clip = mpy.VideoClip(make_frame, duration=duration) 205 | clip.write_gif(fname, fps = len(images) / duration) 206 | 207 | def visualize(sess, dcgan, config, option): 208 | if option == 0: 209 | z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim)) 210 | samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) 211 | save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime())) 212 | elif option == 1: 213 | values = np.arange(0, 1, 1./config.batch_size) 214 | for idx in xrange(100): 215 | print(" [*] %d" % idx) 216 | z_sample = np.zeros([config.batch_size, dcgan.z_dim]) 217 | for kdx, z in enumerate(z_sample): 218 | z[idx] = values[kdx] 219 | 220 | samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) 221 | save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx)) 222 | elif option == 2: 223 | values = np.arange(0, 1, 1./config.batch_size) 224 | for idx in [random.randint(0, 99) for _ in xrange(100)]: 225 | print(" [*] %d" % idx) 226 | z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim)) 227 | z_sample = np.tile(z, (config.batch_size, 1)) 228 | #z_sample = np.zeros([config.batch_size, dcgan.z_dim]) 229 | for kdx, z in enumerate(z_sample): 230 | z[idx] = values[kdx] 231 | 232 | samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) 233 | make_gif(samples, './samples/test_gif_%s.gif' % (idx)) 234 | elif option == 3: 235 | values = np.arange(0, 1, 1./config.batch_size) 236 | for idx in xrange(100): 237 | print(" [*] %d" % idx) 238 | z_sample = np.zeros([config.batch_size, dcgan.z_dim]) 239 | for kdx, z in enumerate(z_sample): 240 | z[idx] = values[kdx] 241 | 242 | samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}) 243 | make_gif(samples, './samples/test_gif_%s.gif' % (idx)) 244 | elif option == 4: 245 | image_set = [] 246 | values = np.arange(0, 1, 1./config.batch_size) 247 | 248 | for idx in xrange(100): 249 | print(" [*] %d" % idx) 250 | z_sample = np.zeros([config.batch_size, dcgan.z_dim]) 251 | for kdx, z in enumerate(z_sample): z[idx] = values[kdx] 252 | 253 | image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})) 254 | make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx)) 255 | 256 | new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \ 257 | for idx in range(64) + range(63, -1, -1)] 258 | make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8) 259 | 260 | 261 | 262 | def load_300W_LP_dataset(dataset): 263 | print 'Loading ' + dataset +' ...' 264 | 265 | fd = open(_300W_LP_DIR+'/filelist/'+dataset+'_filelist.txt', 'r') 266 | all_images = [] 267 | for line in fd: 268 | all_images.append(line.strip()) 269 | fd.close() 270 | print ' DONE. Finish loading ' + dataset +' with ' + str(len(all_images)) + ' images' 271 | 272 | fd = open(_300W_LP_DIR+'/filelist/'+dataset+'_param.dat') 273 | all_paras = np.fromfile(file=fd, dtype=np.float32) 274 | fd.close() 275 | 276 | idDim = 1 277 | mDim = idDim + 8 278 | poseDim = mDim + 7 279 | shapeDim = poseDim + 199 280 | expDim = shapeDim + 29 281 | texDim = expDim + 40 282 | ilDim = texDim + 10 283 | #colorDim = ilDim + 7 284 | 285 | all_paras = all_paras.reshape((-1,ilDim)).astype(np.float32) 286 | pid = all_paras[:,0:idDim] 287 | m = all_paras[:,idDim:mDim] 288 | pose = all_paras[:,mDim:poseDim] 289 | shape = all_paras[:,poseDim:shapeDim] 290 | exp = all_paras[:,shapeDim:expDim] 291 | tex = all_paras[:,expDim:texDim] 292 | il = all_paras[:,texDim:ilDim] 293 | #color = all_paras[:,ilDim:colorDim] 294 | 295 | assert (len(all_images) == all_paras.shape[0]),"Number of samples must be the same between images and paras" 296 | 297 | return all_images, pid, m, pose, shape, exp, tex, il 298 | 299 | def image2texture_fn(image_fn): 300 | last = image_fn[-7:].find('_') 301 | if (last < 0): 302 | return image_fn 303 | else: 304 | return image_fn[:-7 + last] + '_0.png' 305 | --------------------------------------------------------------------------------