├── Data ├── 037 │ ├── 037_0000.pk │ ├── 037_0001.pk │ ├── 037_0002.pk │ ├── 037_0003.pk │ ├── 037_0004.pk │ ├── 037_0005.pk │ ├── 037_0006.pk │ ├── 037_0007.pk │ ├── 037_0008.pk │ └── 037_0009.pk └── 038 │ ├── 038_0000.pk │ ├── 038_0001.pk │ ├── 038_0002.pk │ ├── 038_0003.pk │ ├── 038_0004.pk │ ├── 038_0005.pk │ ├── 038_0006.pk │ ├── 038_0007.pk │ ├── 038_0008.pk │ └── 038_0009.pk ├── LICENSE ├── README.md ├── demo.jpg ├── demo_mask.jpg ├── iiw_test_ids.npy ├── illu_pca ├── mean.npy ├── pcaMean.npy ├── pcaVariance.npy └── pcaVector.npy ├── model ├── SfMNet.py ├── dataloader.py ├── lambSH_layer.py ├── loss_layer.py ├── pred_illuDecomp_layer.py ├── reproj_layer.py └── sup_illuDecomp_layer.py ├── pre_train_model └── .keep ├── test_demo.py ├── test_iiw.py ├── train.py └── utils ├── render_sphere_nm.py └── whdr.py /Data/037/037_0000.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0000.pk -------------------------------------------------------------------------------- /Data/037/037_0001.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0001.pk -------------------------------------------------------------------------------- /Data/037/037_0002.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0002.pk -------------------------------------------------------------------------------- /Data/037/037_0003.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0003.pk -------------------------------------------------------------------------------- /Data/037/037_0004.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0004.pk -------------------------------------------------------------------------------- /Data/037/037_0005.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0005.pk -------------------------------------------------------------------------------- /Data/037/037_0006.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0006.pk -------------------------------------------------------------------------------- /Data/037/037_0007.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0007.pk -------------------------------------------------------------------------------- /Data/037/037_0008.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0008.pk -------------------------------------------------------------------------------- /Data/037/037_0009.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/037/037_0009.pk -------------------------------------------------------------------------------- /Data/038/038_0000.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0000.pk -------------------------------------------------------------------------------- /Data/038/038_0001.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0001.pk -------------------------------------------------------------------------------- /Data/038/038_0002.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0002.pk -------------------------------------------------------------------------------- /Data/038/038_0003.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0003.pk -------------------------------------------------------------------------------- /Data/038/038_0004.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0004.pk -------------------------------------------------------------------------------- /Data/038/038_0005.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0005.pk -------------------------------------------------------------------------------- /Data/038/038_0006.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0006.pk -------------------------------------------------------------------------------- /Data/038/038_0007.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0007.pk -------------------------------------------------------------------------------- /Data/038/038_0008.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0008.pk -------------------------------------------------------------------------------- /Data/038/038_0009.pk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/Data/038/038_0009.pk -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InverseRenderNet: Learning single image inverse rendering 2 | 3 | ***!! Check out our new work InverseRenderNet++ [paper](https://arxiv.org/abs/2102.06591) and [code](https://github.com/YeeU/InverseRenderNet_v2), which improves the inverse rendering results and shadow handling.*** 4 | 5 | This is the implementation of the paper "InverseRenderNet: Learning single image inverse rendering". The model is implemented in tensorflow. 6 | 7 | If you use our code, please cite the following paper: 8 | 9 | @inproceedings{yu19inverserendernet, 10 | title={InverseRenderNet: Learning single image inverse rendering}, 11 | author={Yu, Ye and Smith, William AP}, 12 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, 13 | year={2019} 14 | } 15 | 16 | ## Evaluation 17 | 18 | #### Dependencies 19 | To run our evaluation code, please create your environment based on following dependencies: 20 | 21 | tensorflow 1.12.0 22 | python 3.6 23 | skimage 24 | cv2 25 | numpy 26 | 27 | #### Pretrained model 28 | * Download our pretrained model from: [Link](https://drive.google.com/uc?export=download&id=1VKeByvprmWWXSig-7-fxfXs3KA-HG_-P) 29 | * Unzip the downloaded file 30 | * Make sure the model files are placed in a folder named "irn_model" 31 | 32 | 33 | #### Test on demo image 34 | You can perform inverse rendering on random RGB image by our pretrained model. To run the demo code, you need to specify the path to pretrained model, path to RGB image and corresponding mask which masked out sky in the image. The mask can be generated by PSPNet, which you can find on https://github.com/hszhao/PSPNet. Finally inverse rendering results will be saved to the output folder named by your argument. 35 | 36 | ```bash 37 | python3 test_demo.py --model /PATH/TO/irn_model --image demo.jpg --mask demo_mask.jpg --output test_results 38 | ``` 39 | 40 | 41 | #### Test on IIW 42 | * IIW dataset should be downloaded firstly from http://opensurfaces.cs.cornell.edu/publications/intrinsic/#download 43 | 44 | * Run testing code where you need to specify the path to model and IIW data: 45 | ```bash 46 | python3 test_iiw.py --model /PATH/TO/irn_model --iiw /PATH/TO/iiw-dataset 47 | ``` 48 | 49 | ## Training 50 | 51 | #### Train from scratch 52 | The training for InverseRenderNet contains two stages: pre-train and self-train. 53 | * To begin with pre-train stage, you need to use training command specifying option `-m` to `pre-train`. 54 | * After finishing pre-train stage, you can run self-train by specifying option `-m` to `self-train`. 55 | 56 | In addition, you can control the size of batch in training, and the path to training data should be specified. 57 | 58 | An example for training command: 59 | ```bash 60 | python3 train.py -n 2 -p Data -m pre-train 61 | ``` 62 | 63 | #### Data for training 64 | To directly use our code for training, you need to pre-process the training data to match the data format as shown in examples in `Data` folder. 65 | 66 | In particular, we pre-process the data before training, such that five images with great overlaps are bundled up into one mini-batch, and images are resized and cropped to a shape of 200 * 200 pixels. Along with input images associated depth maps, camera parameters, sky masks and normal maps are stored in the same mini-batch. For efficiency, every mini-batch containing all training elements for 5 involved images are saved as a pickle file. While training the data feeding thread directly load each mini-batch from corresponding pickle file. 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /demo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/demo.jpg -------------------------------------------------------------------------------- /demo_mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/demo_mask.jpg -------------------------------------------------------------------------------- /iiw_test_ids.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/iiw_test_ids.npy -------------------------------------------------------------------------------- /illu_pca/mean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/illu_pca/mean.npy -------------------------------------------------------------------------------- /illu_pca/pcaMean.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/illu_pca/pcaMean.npy -------------------------------------------------------------------------------- /illu_pca/pcaVariance.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/illu_pca/pcaVariance.npy -------------------------------------------------------------------------------- /illu_pca/pcaVector.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/illu_pca/pcaVector.npy -------------------------------------------------------------------------------- /model/SfMNet.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import tensorflow as tf 3 | import numpy as np 4 | import tensorflow.contrib.layers as layers 5 | 6 | def SfMNet(inputs, height, width, name='', n_layers=12, n_pools=2, is_training=True, depth_base=64): 7 | conv_layers = np.int32(n_layers/2) -1 8 | deconv_layers = np.int32(n_layers/2) 9 | # number of layers before perform pooling 10 | nlayers_befPool = np.int32(np.ceil((conv_layers-1)/n_pools)-1) 11 | 12 | max_depth = 512 13 | 14 | if depth_base*2**n_pools < max_depth: 15 | tail = conv_layers - nlayers_befPool*n_pools 16 | 17 | 18 | tail_deconv = deconv_layers - nlayers_befPool*n_pools 19 | else: 20 | maxNum_pool = np.log2(max_depth / depth_base) 21 | tail = np.int32(conv_layers - nlayers_befPool * maxNum_pool) 22 | tail_deconv = np.int32(deconv_layers - nlayers_befPool * maxNum_pool) 23 | 24 | f_in_conv = [3] + [np.int32(depth_base*2**(np.ceil(i/nlayers_befPool)-1)) for i in range(1, conv_layers-tail+1)] + [np.int32(depth_base*2**maxNum_pool) for i in range(conv_layers-tail+1, conv_layers+1)] 25 | f_out_conv = [64] + [np.int32(depth_base*2**(np.floor(i/nlayers_befPool))) for i in range(1, conv_layers-tail+1)] + [np.int32(depth_base*2**maxNum_pool) for i in range(conv_layers-tail+1, conv_layers+1)] 26 | 27 | f_in_deconv = f_out_conv[:0:-1] + [64] 28 | f_out_amDeconv = f_in_conv[:0:-1] + [3] 29 | f_out_MaskDeconv = f_in_conv[:0:-1] + [2] 30 | f_out_nmDeconv = f_in_conv[:0:-1] + [2] 31 | 32 | 33 | 34 | batch_norm_params = {'decay':0.9, 'center':True, 'scale':True, 'epsilon':1e-4, 'param_initializers':{'beta_initializer':tf.zeros_initializer(),'gamma_initializer':tf.ones_initializer(),'moving_variance_initializer':tf.ones_initializer(),'moving_average_initializer':tf.zeros_initializer()}, 'param_regularizers':{'beta_regularizer':None,'gamma_regularizer':layers.l2_regularizer(scale=1e-5)},'is_training':is_training,'trainable':is_training} 35 | 36 | ### contractive conv_layer block 37 | conv_out = inputs 38 | conv_out_list = [] 39 | for i,f_in,f_out in zip(range(1,conv_layers+2),f_in_conv,f_out_conv): 40 | scope = name+'conv'+str(i) 41 | 42 | if np.mod(i-1,nlayers_befPool)==0 and i<=n_pools*nlayers_befPool+1 and i != 1: 43 | conv_out_list.append(conv_out) 44 | conv_out = layers.conv2d(conv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope, trainable=is_training) 45 | conv_out = tf.nn.max_pool(conv_out, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 46 | 47 | else: 48 | 49 | conv_out = layers.conv2d(conv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope, trainable=is_training) 50 | 51 | 52 | ### expanding deconv_layer block succeeding conv_layer block 53 | am_deconv_out = conv_out 54 | for i,f_in,f_out in zip(range(1,deconv_layers+1),f_in_deconv,f_out_amDeconv): 55 | scope = name+'am/am_deconv'+str(i) 56 | 57 | # expand resolution every after nlayers_befPool deconv_layer 58 | if np.mod(i,nlayers_befPool)==0 and i<=n_pools*nlayers_befPool: 59 | with tf.variable_scope(scope): 60 | W = tf.get_variable(regularizer=layers.l2_regularizer(scale=1e-5),initializer=get_bilinear_filter([3,3,f_out,f_in],2),shape=[3,3,f_out,f_in],name='filter', trainable=is_training) 61 | # import ipdb; ipdb.set_trace() 62 | # attach previous convolutional output to upsampling/deconvolutional output 63 | tmp = conv_out_list[-np.int32(i/nlayers_befPool)] 64 | output_shape = tf.shape(tmp) 65 | am_deconv_out = tf.nn.conv2d_transpose(am_deconv_out,filter=W,output_shape=output_shape,strides=[1,2,2,1],padding='SAME') 66 | am_deconv_out = layers.batch_norm(scope=scope,activation_fn=tf.nn.relu,inputs=am_deconv_out,decay=0.9, center=True, scale=True, param_initializers={'beta_initializer':tf.zeros_initializer(),'gamma_initializer':tf.ones_initializer(),'moving_variance_initializer':tf.ones_initializer(),'moving_average_initializer':tf.zeros_initializer()}, param_regularizers={'beta_regularizer':None,'gamma_regularizer':layers.l2_regularizer(scale=1e-5)},is_training=is_training,trainable=is_training) 67 | 68 | 69 | tmp = layers.conv2d(tmp,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope,trainable=is_training) 70 | am_deconv_out = tmp + am_deconv_out 71 | 72 | 73 | elif i==deconv_layers: 74 | am_deconv_out = layers.conv2d(am_deconv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=None,activation_fn=None,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),scope=scope,trainable=is_training) 75 | 76 | 77 | else: 78 | am_deconv_out = layers.conv2d(am_deconv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope,trainable=is_training) 79 | 80 | 81 | 82 | ### deconvolution net for nm estimates 83 | nm_deconv_out = conv_out 84 | for i,f_in,f_out in zip(range(1,deconv_layers+1),f_in_deconv,f_out_nmDeconv): 85 | scope = name+'nm/nm'+str(i) 86 | 87 | # expand resolution every after nlayers_befPool deconv_layer 88 | if np.mod(i,nlayers_befPool)==0 and i<=n_pools*nlayers_befPool: 89 | with tf.variable_scope(scope): 90 | W = tf.get_variable(regularizer=layers.l2_regularizer(scale=1e-5),initializer=get_bilinear_filter([3,3,f_out,f_in],2),shape=[3,3,f_out,f_in],name='filter',trainable=is_training) 91 | 92 | # attach previous convolutional output to upsampling/deconvolutional output 93 | tmp = conv_out_list[-np.int32(i/nlayers_befPool)] 94 | output_shape = tf.shape(tmp) 95 | nm_deconv_out = tf.nn.conv2d_transpose(nm_deconv_out,filter=W,output_shape=output_shape,strides=[1,2,2,1],padding='SAME') 96 | nm_deconv_out = layers.batch_norm(scope=scope,activation_fn=tf.nn.relu,inputs=nm_deconv_out,decay=0.9, center=True, scale=True, epsilon=1e-4, param_initializers={'beta_initializer':tf.zeros_initializer(),'gamma_initializer':tf.ones_initializer(),'moving_variance_initializer':tf.ones_initializer(),'moving_average_initializer':tf.zeros_initializer()}, param_regularizers={'beta_regularizer':None,'gamma_regularizer':layers.l2_regularizer(scale=1e-5)},is_training=is_training,trainable=is_training) 97 | 98 | 99 | tmp = layers.conv2d(tmp,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope,trainable=is_training) 100 | nm_deconv_out = tmp + nm_deconv_out 101 | 102 | 103 | elif i==deconv_layers: 104 | nm_deconv_out = layers.conv2d(nm_deconv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=None,activation_fn=None,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope,trainable=is_training) 105 | 106 | 107 | else: 108 | nm_deconv_out = layers.conv2d(nm_deconv_out,num_outputs=f_out,kernel_size=[3,3],stride=[1,1],padding='SAME',normalizer_fn=layers.batch_norm, normalizer_params=batch_norm_params,weights_initializer=tf.random_normal_initializer(mean=0,stddev=np.sqrt(2/9/f_in)),weights_regularizer=layers.l2_regularizer(scale=1e-5),biases_initializer=None,scope=scope,trainable=is_training) 109 | 110 | 111 | 112 | return am_deconv_out, nm_deconv_out 113 | 114 | 115 | 116 | def get_bilinear_filter(filter_shape, upscale_factor): 117 | ##filter_shape is [width, height, num_in_channels, num_out_channels] 118 | kernel_size = filter_shape[1] 119 | ### Centre location of the filter for which value is calculated 120 | if kernel_size % 2 == 1: 121 | centre_location = upscale_factor - 1 122 | else: 123 | centre_location = upscale_factor - 0.5 124 | 125 | x,y = np.meshgrid(np.arange(kernel_size),np.arange(kernel_size)) 126 | bilinear = (1 - abs((x - centre_location)/ upscale_factor)) * (1 - abs((y - centre_location)/ upscale_factor)) 127 | weights = np.tile(bilinear[:,:,None,None],(1,1,filter_shape[2],filter_shape[3])) 128 | 129 | return tf.constant_initializer(weights) 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /model/dataloader.py: -------------------------------------------------------------------------------- 1 | import pickle as pk 2 | import os 3 | import numpy as np 4 | import tensorflow as tf 5 | import skimage.transform as imgTform 6 | import glob 7 | from scipy import io 8 | 9 | 10 | def megaDepth_dataPipeline(num_subbatch_input, dir): 11 | # import ipdb; ipdb.set_trace() 12 | # locate all scenes 13 | data_scenes1 = np.array(sorted(glob.glob(os.path.join(dir, '*')))) 14 | 15 | # scan scenes 16 | # sort scenes by number of training images in each 17 | scenes_size1 = np.array([len(os.listdir(i)) for i in data_scenes1]) 18 | scenes_sorted1 = np.argsort(scenes_size1) 19 | 20 | # define scenes for training and testing 21 | train_scenes = data_scenes1[scenes_sorted1] 22 | 23 | 24 | # load data from each scene 25 | # locate each data minibatch in each sorted sc 26 | train_scenes_items = [sorted(glob.glob(os.path.join(sc, '*.pk'))) for sc in train_scenes] 27 | train_scenes_items = np.concatenate(train_scenes_items, axis=0) 28 | 29 | train_items = train_scenes_items 30 | 31 | ### contruct training data pipeline 32 | # remove residual data over number of data in one epoch 33 | res_train_items = len(train_items) - (len(train_items) % num_subbatch_input) 34 | train_items = train_items[:res_train_items] 35 | train_data = md_construct_inputPipeline(train_items, flag_shuffle=True, batch_size=num_subbatch_input) 36 | 37 | # define re-initialisable iterator 38 | iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes) 39 | next_element = iterator.get_next() 40 | 41 | # define initialisation for each iterator 42 | trainData_init_op = iterator.make_initializer(train_data) 43 | 44 | return next_element, trainData_init_op, len(train_items) 45 | 46 | 47 | def _read_pk_function(filename): 48 | with open(filename, 'rb') as f: 49 | batch_data = pk.load(f) 50 | input = np.float32(batch_data['input']) 51 | dm = batch_data['dm'] 52 | nm = np.float32(batch_data['nm']) 53 | cam = np.float32(batch_data['cam']) 54 | scaleX= batch_data['scaleX'] 55 | scaleY = batch_data['scaleY'] 56 | mask = np.float32(batch_data['mask']) 57 | 58 | return input, dm, nm, cam, scaleX, scaleY, mask 59 | 60 | def md_read_func(filename): 61 | 62 | input, dm, nm, cam, scaleX, scaleY, mask = tf.py_func(_read_pk_function, [filename], [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]) 63 | 64 | input = tf.data.Dataset.from_tensor_slices(input[None]) 65 | dm = tf.data.Dataset.from_tensor_slices(dm[None]) 66 | nm = tf.data.Dataset.from_tensor_slices(nm[None]) 67 | cam = tf.data.Dataset.from_tensor_slices(cam[None]) 68 | scaleX = tf.data.Dataset.from_tensor_slices(scaleX[None]) 69 | scaleY = tf.data.Dataset.from_tensor_slices(scaleY[None]) 70 | mask = tf.data.Dataset.from_tensor_slices(mask[None]) 71 | 72 | return tf.data.Dataset.zip((input, dm, nm, cam, scaleX, scaleY, mask)) 73 | 74 | 75 | def md_preprocess_func(input, dm, nm, cam, scaleX, scaleY, mask): 76 | 77 | input = input/255. 78 | 79 | nm = nm/127 80 | 81 | return input, dm, nm, cam, scaleX, scaleY, mask 82 | 83 | 84 | def md_construct_inputPipeline(items, batch_size, flag_shuffle=True): 85 | data = tf.data.Dataset.from_tensor_slices(items) 86 | if flag_shuffle: 87 | data = data.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=100000)) 88 | else: 89 | data = data.repeat() 90 | data = data.apply(tf.contrib.data.parallel_interleave(md_read_func, cycle_length=batch_size, block_length=1, sloppy=False )) 91 | data = data.map(md_preprocess_func, num_parallel_calls=8 ) 92 | data = data.batch(batch_size).prefetch(4) 93 | 94 | return data 95 | 96 | 97 | -------------------------------------------------------------------------------- /model/lambSH_layer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | # am is the albedo map, which has shape (batch, height, width, 3[rgb]) 5 | # nm is the sparse normal map, which has shape (batch, height, width, 3[x,y,z]) 6 | # L_SHcoeff contains the SH coefficients for environment illumination, using 2nd order SH. L_SHcoeff has shape (batch, 9, 3[rgb]) 7 | def lambSH_layer(am, nm, L_SHcoeffs, gamma): 8 | 9 | """ 10 | i = albedo * irradiance 11 | the multiplication is elementwise 12 | albedo is given 13 | irraidance = n.T * M * n, where n is (x,y,z,1) 14 | M is contructed from some precomputed constants and L_SHcoeff, where M contains information about illuminations, clamped cosine and SH basis 15 | """ 16 | 17 | # M is only related with lighting 18 | c1 = tf.constant(0.429043,dtype=tf.float32) 19 | c2 = tf.constant(0.511664,dtype=tf.float32) 20 | c3 = tf.constant(0.743125,dtype=tf.float32) 21 | c4 = tf.constant(0.886227,dtype=tf.float32) 22 | c5 = tf.constant(0.247708,dtype=tf.float32) 23 | 24 | # each row have shape (batch, 4, 3) 25 | M_row1 = tf.stack([c1*L_SHcoeffs[:,8,:], c1*L_SHcoeffs[:,4,:], c1*L_SHcoeffs[:,7,:], c2*L_SHcoeffs[:,3,:]],axis=1) 26 | M_row2 = tf.stack([c1*L_SHcoeffs[:,4,:], -c1*L_SHcoeffs[:,8,:], c1*L_SHcoeffs[:,5,:], c2*L_SHcoeffs[:,1,:]],axis=1) 27 | M_row3 = tf.stack([c1*L_SHcoeffs[:,7,:], c1*L_SHcoeffs[:,5,:], c3*L_SHcoeffs[:,6,:], c2*L_SHcoeffs[:,2,:]],axis=1) 28 | M_row4 = tf.stack([c2*L_SHcoeffs[:,3,:], c2*L_SHcoeffs[:,1,:], c2*L_SHcoeffs[:,2,:], c4*L_SHcoeffs[:,0,:]-c5*L_SHcoeffs[:,6,:]],axis=1) 29 | 30 | # M is a 5d tensot with shape (batch,4,4,3[rgb]), the axis 1 and 2 are transposely equivalent 31 | M = tf.stack([M_row1,M_row2,M_row3,M_row4], axis=1) 32 | 33 | # find batch-spatial three dimensional mask of defined normals over nm 34 | # mask = tf.logical_not(tf.is_nan(nm[:,:,:,0])) 35 | mask = tf.not_equal(tf.reduce_sum(nm,axis=-1),0) 36 | 37 | 38 | # extend Cartesian to homogeneous coords and extend its last for rgb individual multiplication dimension, nm_homo have shape (total_npix, 4) 39 | total_npix = tf.shape(nm)[:3] 40 | ones = tf.ones(total_npix) 41 | nm_homo = tf.concat([nm,tf.expand_dims(ones,axis=-1)], axis=-1) 42 | 43 | # contruct batch-wise flatten M corresponding with nm_homo, such that multiplication between them is batch-wise 44 | M = tf.expand_dims(tf.expand_dims(M,axis=1),axis=1) 45 | 46 | 47 | # expand M for broadcasting, such that M has shape (npix,4,4,3) 48 | # expand nm_homo, such that nm_homo has shape (npix,4,1,1) 49 | nm_homo = tf.expand_dims(tf.expand_dims(nm_homo,axis=-1),axis=-1) 50 | # tmp have shape (npix, 4, 3[rgb]) 51 | tmp = tf.reduce_sum(nm_homo*M,axis=-3) 52 | # E has shape (npix, 3[rbg]) 53 | E = tf.reduce_sum(tmp*nm_homo[:,:,:,:,0,:],axis=-2) 54 | 55 | 56 | # compute intensity by product between irradiance and albedo 57 | i = E*am 58 | 59 | # gamma correction 60 | i = tf.clip_by_value(i, 0., 1.) + tf.constant(1e-4) 61 | i = tf.pow(i,1./gamma) 62 | 63 | return i, mask 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /model/loss_layer.py: -------------------------------------------------------------------------------- 1 | # formulate loss function based on supplied ground truth and outputs from network 2 | 3 | import importlib 4 | import tensorflow as tf 5 | import numpy as np 6 | import os 7 | from model import SfMNet, lambSH_layer, pred_illuDecomp_layer, sup_illuDecomp_layer, reproj_layer 8 | 9 | def loss_formulate(albedos, nm_pred, am_sup, nm_gt, inputs, dms, cams, scale_xs, scale_ys, masks, pair_label, preTrain_flag, am_smt_w_var, reproj_w_var, reg_loss_flag=True): 10 | 11 | # define gamma nonlinear mapping factor 12 | gamma = tf.constant(2.2) 13 | 14 | albedos = tf.nn.sigmoid(albedos) * masks + tf.constant(1e-4) 15 | 16 | ### pre-process nm_pred such that in range (-1,1) 17 | nm_pred_norm = tf.sqrt(tf.reduce_sum(nm_pred**2, axis=-1, keepdims=True)+tf.constant(1.)) 18 | nm_pred_xy = nm_pred / nm_pred_norm 19 | nm_pred_z = tf.constant(1.) / nm_pred_norm 20 | nm_pred_xyz = tf.concat([nm_pred_xy, nm_pred_z], axis=-1) * masks 21 | 22 | # selete normal map used in rendering - gt or pred 23 | normals = nm_gt if preTrain_flag else nm_pred_xyz 24 | 25 | 26 | # reconstruct SH lightings from predicted statistical SH lighting model 27 | lighting_model = '../hdr_illu_pca' 28 | lighting_vectors = tf.constant(np.load(os.path.join(lighting_model,'pcaVector.npy')),dtype=tf.float32) 29 | lighting_means = tf.constant(np.load(os.path.join(lighting_model,'mean.npy')),dtype=tf.float32) 30 | lightings_var = tf.constant(np.load(os.path.join(lighting_model,'pcaVariance.npy')),dtype=tf.float32) 31 | 32 | if preTrain_flag: 33 | lightings = sup_illuDecomp_layer.illuDecomp(inputs,albedos,nm_gt,gamma) 34 | else: 35 | lightings =pred_illuDecomp_layer.illuDecomp(inputs,albedos,nm_pred_xyz,gamma,masks) 36 | 37 | lightings_pca = tf.matmul((lightings - lighting_means), pinv(lighting_vectors)) 38 | 39 | # recompute lightings from lightins_pca which could add weak constraint on lighting reconstruction 40 | lightings = tf.matmul(lightings_pca,lighting_vectors) + lighting_means 41 | 42 | # reshape 27-D lightings to 9*3 lightings 43 | lightings = tf.reshape(lightings,[tf.shape(lightings)[0],9,3]) 44 | 45 | 46 | ### lighting prior loss 47 | var = tf.reduce_mean(lightings_pca**2,axis=0) 48 | 49 | illu_prior_loss = tf.losses.absolute_difference(var, lightings_var) 50 | 51 | illu_prior_loss = tf.log(illu_prior_loss + 1.) 52 | 53 | 54 | ### stereo supervision based on albedos reprojection consistancy 55 | reproj_tb = tf.to_float(tf.equal(pair_label,tf.transpose(pair_label))) 56 | reproj_tb = tf.cast(tf.matrix_set_diag(reproj_tb, tf.zeros([tf.shape(inputs)[0]])),tf.bool) 57 | reproj_list = tf.where(reproj_tb) 58 | img1_inds = tf.expand_dims(reproj_list[:,0],axis=-1) 59 | img2_inds = tf.expand_dims(reproj_list[:,1],axis=-1) 60 | albedo1 = tf.gather_nd(albedos,img1_inds) 61 | dms1 = tf.gather_nd(dms,img1_inds) 62 | cams1 = tf.gather_nd(cams,img1_inds) 63 | albedo2 = tf.gather_nd(albedos,img2_inds) 64 | cams2 = tf.gather_nd(cams,img2_inds) 65 | scale_xs1 = tf.gather_nd(scale_xs, img1_inds) 66 | scale_xs2 = tf.gather_nd(scale_xs, img2_inds) 67 | scale_ys1 = tf.gather_nd(scale_ys, img1_inds) 68 | scale_ys2 = tf.gather_nd(scale_ys, img2_inds) 69 | 70 | input1 = tf.gather_nd(inputs, img1_inds) 71 | 72 | # mask_indices contains indices for image index inside batch and spatial locations, and ignores the rgb channel index 73 | reproj_albedo1, reproj_mask = reproj_layer.map_reproj(dms1,albedo2,cams1,cams2,scale_xs1,scale_xs2,scale_ys1,scale_ys2) 74 | 75 | reproj_albedo1 = reproj_albedo1+tf.constant(1e-4) # numerical stable constant 76 | 77 | 78 | 79 | ### scale intensities for each image 80 | num_imgs = tf.shape(reproj_mask)[0] 81 | im_ = tf.constant(0) 82 | output = tf.TensorArray(dtype=tf.float32,size=num_imgs) 83 | 84 | def body(im_, output): 85 | reproj_mask_ = reproj_mask[im_] 86 | albedo1_ = tf.boolean_mask(albedo1[im_],reproj_mask_) 87 | reproj_albedo1_ = tf.boolean_mask(reproj_albedo1[im_],reproj_mask_) 88 | 89 | 90 | k = tf.reduce_sum(albedo1_*reproj_albedo1_,keepdims=True)/(tf.reduce_sum(reproj_albedo1_**2,keepdims=True)+tf.constant(1e-4)) 91 | 92 | output = output.write(im_,k) 93 | im_ += tf.constant(1) 94 | 95 | return im_, output 96 | 97 | def condition(im_, output): 98 | return tf.less(im_,num_imgs) 99 | 100 | _,output = tf.while_loop(condition, body, loop_vars=[im_, output]) 101 | 102 | 103 | ks = tf.expand_dims(output.stack(), axis=-1) 104 | 105 | 106 | 107 | albedo1_pixels = tf.boolean_mask(albedo1, reproj_mask) 108 | reproj_albedo1_pixels = tf.boolean_mask(reproj_albedo1*ks, reproj_mask) 109 | reproj_err = tf.losses.mean_squared_error(cvtLab(albedo1_pixels), cvtLab(reproj_albedo1_pixels)) 110 | 111 | 112 | ### formulate loss based on paired batches ### 113 | # self-supervision based on intensity reconstruction 114 | shadings, renderings_mask = lambSH_layer.lambSH_layer(tf.ones_like(albedos), normals, lightings, 1.) 115 | 116 | # compare rendering intensity by Lab 117 | inputs_pixels = cvtLab(tf.boolean_mask(inputs,renderings_mask)) 118 | renderings = cvtLab(tf.boolean_mask(tf.pow(albedos*shadings,1./gamma),renderings_mask)) 119 | render_err = tf.losses.mean_squared_error(inputs_pixels,renderings) 120 | 121 | 122 | ### compute rendering loss from cross-projected alebdo map 123 | cross_shadings = tf.gather_nd(shadings, img1_inds) 124 | inputs_pixels = cvtLab(tf.boolean_mask(input1,reproj_mask)) 125 | cross_renderings = cvtLab(tf.boolean_mask(tf.pow(tf.nn.relu(cross_shadings*reproj_albedo1*ks), 1./gamma),reproj_mask)) 126 | cross_render_err = tf.losses.mean_squared_error(inputs_pixels,cross_renderings) 127 | 128 | 129 | ### measure smoothness of albedo map 130 | Gx = tf.constant(1/2)*tf.expand_dims(tf.expand_dims(tf.constant([[-1,1]], dtype=tf.float32), axis=-1), axis=-1) 131 | Gy = tf.constant(1/2)*tf.expand_dims(tf.expand_dims(tf.constant([[-1],[1]], dtype=tf.float32), axis=-1), axis=-1) 132 | Gx_3 = tf.tile(Gx, multiples=(1,1,3,1)) 133 | Gy_3 = tf.tile(Gy, multiples=(1,1,3,1)) 134 | albedo_lab = tf.reshape(cvtLab(tf.reshape(albedos,[-1,3])),[-1,200,200,3]) 135 | 136 | aGx = tf.nn.conv2d(albedos, Gx_3, padding='SAME', strides=(1,1,1,1)) 137 | aGy = tf.nn.conv2d(albedos, Gy_3, padding='SAME', strides=(1,1,1,1)) 138 | aGxy = tf.concat([aGx,aGy], axis=-1) 139 | 140 | 141 | # compute pixel-wise smoothness weights by angle distance between neighbour pixels' chromaticities 142 | inputs_pad = tf.pad(inputs, paddings=tf.constant([[0,0], [0,1], [0,1], [0,0]])) 143 | chroma_pad = tf.nn.l2_normalize(inputs_pad, axis=-1) 144 | 145 | chroma = chroma_pad[:,:-1,:-1,:] 146 | chroma_X = chroma_pad[:,:-1,1:,:] 147 | chroma_Y = chroma_pad[:,1:,:-1,:] 148 | chroma_Gx = tf.reduce_sum(chroma*chroma_X, axis=-1, keepdims=True)**tf.constant(2.) - tf.constant(1.) 149 | chroma_Gy = tf.reduce_sum(chroma*chroma_Y, axis=-1, keepdims=True)**tf.constant(2.) - tf.constant(1.) 150 | chroma_Gx = tf.exp(chroma_Gx / tf.constant(0.0001)) 151 | chroma_Gy = tf.exp(chroma_Gy / tf.constant(0.0001)) 152 | chroma_Gxy = tf.concat([chroma_Gx, chroma_Gy], axis=-1) 153 | 154 | int_pad = tf.reduce_sum(inputs_pad**tf.constant(2.), axis=-1, keepdims=True) 155 | int = int_pad[:,:-1,:-1,:] 156 | int_X = int_pad[:,:-1,1:,:] 157 | int_Y = int_pad[:,1:,:-1,:] 158 | 159 | int_Gx = tf.where(condition=int < int_X, x=int, y=int_X) 160 | int_Gy = tf.where(condition=int < int_Y, x=int, y=int_Y) 161 | int_Gx = tf.constant(1.) + tf.exp(- int_Gx / tf.constant(.8)) 162 | int_Gy = tf.constant(1.) + tf.exp(- int_Gy / tf.constant(.8)) 163 | int_Gxy = tf.concat([int_Gx, int_Gy], axis=-1) 164 | 165 | Gxy_weights = int_Gxy * chroma_Gxy 166 | albedo_smt_error = tf.reduce_mean(tf.abs(aGxy)*Gxy_weights) 167 | 168 | 169 | ### albedo map pseudo-supervision loss 170 | if preTrain_flag: 171 | am_loss = tf.constant(0.) 172 | else: 173 | amSup_mask = tf.not_equal(tf.reduce_sum(nm_gt,axis=-1),0) 174 | am_sup_pixel = cvtLab(tf.boolean_mask(am_sup, amSup_mask)) 175 | albedos_pixel = cvtLab(tf.boolean_mask(albedos, amSup_mask)) 176 | am_loss = tf.losses.mean_squared_error(am_sup_pixel, albedos_pixel) 177 | 178 | 179 | 180 | ### regualarisation loss 181 | reg_loss = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) 182 | 183 | 184 | ### compute nm_pred error 185 | nmSup_mask = tf.not_equal(tf.reduce_sum(nm_gt,axis=-1),0) 186 | nm_gt_pixel = tf.boolean_mask(nm_gt, nmSup_mask) 187 | nm_pred_pixel = tf.boolean_mask(nm_pred_xyz, nmSup_mask) 188 | nm_prod = tf.reduce_sum(nm_pred_pixel * nm_gt_pixel, axis=-1, keepdims=True) 189 | nm_cosValue = tf.constant(0.9999) 190 | nm_prod = tf.clip_by_value(nm_prod, -nm_cosValue, nm_cosValue) 191 | nm_angle = tf.acos(nm_prod) + tf.constant(1e-4) 192 | nm_loss = tf.reduce_mean(nm_angle**2) 193 | 194 | 195 | 196 | ### compute gradient loss 197 | nm_pred_Gx = conv2d_nosum(nm_pred_xyz, Gx) 198 | nm_pred_Gy = conv2d_nosum(nm_pred_xyz, Gy) 199 | nm_pred_Gxy = tf.concat([nm_pred_Gx, nm_pred_Gy], axis=-1) 200 | normals_Gx = conv2d_nosum(nm_gt, Gx) 201 | normals_Gy = conv2d_nosum(nm_gt, Gy) 202 | normals_Gxy = tf.concat([normals_Gx, normals_Gy], axis=-1) 203 | 204 | nm_pred_smt_error = tf.losses.mean_squared_error(nm_pred_Gxy, normals_Gxy) 205 | 206 | 207 | ### total loss 208 | render_err *= tf.constant(.1) 209 | reproj_err *= tf.constant(.05) * reproj_w_var 210 | cross_render_err *= tf.constant(.1) 211 | am_loss *= tf.constant(.1) 212 | illu_prior_loss *= tf.constant(.01) 213 | albedo_smt_error *= tf.constant(50.) * am_smt_w_var 214 | nm_pred_smt_error *= tf.constant(1.) 215 | nm_loss *= tf.constant(1.) 216 | 217 | 218 | 219 | if reg_loss_flag == True: 220 | loss = render_err + reproj_err + cross_render_err + reg_loss + illu_prior_loss + albedo_smt_error + nm_pred_smt_error + nm_loss + am_loss 221 | else: 222 | loss = render_err + reproj_err + cross_render_err + illu_prior_loss + albedo_smt_error + nm_pred_smt_error + nm_loss + am_loss 223 | 224 | return lightings, albedos, nm_pred_xyz, loss, render_err, reproj_err, cross_render_err, reg_loss, illu_prior_loss, albedo_smt_error, nm_pred_smt_error, nm_loss, am_loss 225 | 226 | 227 | 228 | # input RGB is 2d tensor with shape (n_pix, 3) 229 | def cvtLab(RGB): 230 | 231 | # threshold definition 232 | T = tf.constant(0.008856) 233 | 234 | # matrix for converting RGB to LUV color space 235 | cvt_XYZ = tf.constant([[0.412453,0.35758,0.180423],[0.212671,0.71516,0.072169],[0.019334,0.119193,0.950227]]) 236 | 237 | # convert RGB to XYZ 238 | XYZ = tf.matmul(RGB,tf.transpose(cvt_XYZ)) 239 | 240 | # normalise for D65 white point 241 | XYZ /= tf.constant([[0.950456, 1., 1.088754]])*100 242 | 243 | mask = tf.to_float(tf.greater(XYZ,T)) 244 | 245 | fXYZ = XYZ**(1/3)*mask + (1.-mask)*(tf.constant(7.787)*XYZ + tf.constant(0.137931)) 246 | 247 | M_cvtLab = tf.constant([[0., 116., 0.], [500., -500., 0.], [0., 200., -200.]]) 248 | 249 | Lab = tf.matmul(fXYZ, tf.transpose(M_cvtLab)) + tf.constant([[-16., 0., 0.]]) 250 | mask = tf.to_float(tf.equal(Lab, tf.constant(0.))) 251 | 252 | Lab += mask * tf.constant(1e-4) 253 | 254 | return Lab 255 | 256 | 257 | 258 | 259 | 260 | # compute pseudo inverse for input matrix 261 | def pinv(A, reltol=1e-6): 262 | # compute SVD of input A 263 | s, u, v = tf.svd(A) 264 | 265 | # invert s and clear entries lower than reltol*s_max 266 | atol = tf.reduce_max(s) * reltol 267 | s = tf.where(s>atol, s, atol*tf.ones_like(s)) 268 | s_inv = tf.diag(1./s) 269 | 270 | # compute v * s_inv * u_t as psuedo inverse 271 | return tf.matmul(v, tf.matmul(s_inv, tf.transpose(u))) 272 | 273 | 274 | 275 | # compute regular 2d convolution on 3d data 276 | def conv2d_nosum(input, kernel): 277 | input_x = input[:,:,:,0:1] 278 | input_y = input[:,:,:,1:2] 279 | input_z = input[:,:,:,2:3] 280 | 281 | output_x = tf.nn.conv2d(input_x, kernel, strides=(1,1,1,1), padding='SAME') 282 | output_y = tf.nn.conv2d(input_y, kernel, strides=(1,1,1,1), padding='SAME') 283 | output_z = tf.nn.conv2d(input_z, kernel, strides=(1,1,1,1), padding='SAME') 284 | 285 | return tf.concat([output_x,output_y,output_z], axis=-1) 286 | 287 | 288 | 289 | # compute regular 2d convolution on 3d data 290 | def conv2d_nosum_2ch(input, kernel): 291 | input_x = input[:,:,:,0:1] 292 | input_y = input[:,:,:,1:2] 293 | 294 | output_x = tf.nn.conv2d(input_x, kernel, strides=(1,1,1,1), padding='SAME') 295 | output_y = tf.nn.conv2d(input_y, kernel, strides=(1,1,1,1), padding='SAME') 296 | 297 | return tf.concat([output_x,output_y], axis=-1) 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | -------------------------------------------------------------------------------- /model/pred_illuDecomp_layer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | # am is the albedo map, which has shape (batch, height, width, 3[rgb]) 5 | # nm is the sparse normal map, which has shape (batch, height, width, 3[x,y,z]) 6 | # L_SHcoeff contains the SH coefficients for environment illumination, using 2nd order SH. L_SHcoeff has shape (batch, 9, 3[rgb]) 7 | def illuDecomp(input, am, nm, gamma, masks): 8 | 9 | """ 10 | i = albedo * irradiance 11 | the multiplication is elementwise 12 | albedo is given 13 | irraidance = n.T * M * n, where n is (x,y,z,1) 14 | M is contructed from some precomputed constants and L_SHcoeff, where M contains information about illuminations, clamped cosine and SH basis 15 | """ 16 | 17 | # compute shading by dividing input by albedo 18 | shadings = tf.pow(input,gamma)/am 19 | # perform clamping on resulted shading to guarantee its numerical range 20 | shadings = (tf.clip_by_value(shadings, 0., 1.) + tf.constant(1e-4)) * masks 21 | 22 | 23 | # compute shading by linear equation regarding nm and L_SHcoeffs 24 | # E(n) = c1*L22*(x**2-y**2) + (c3*z**2 - c5)*L20 + c4*L00 + 2*c1*L2-2*x*y + 2*c1*L21*x*z + 2*c1*L2-1*y*z + 2*c2*L11*x + 2*c2*L1-1*y + 2*c2*L10*z 25 | # E(n) = c4*L00 + 2*c2*y*L1-1 + 2*c2*z*L10 + 2*c2*x*L11 + 2*c1*x*y*L2-2 + 2*c1*y*z*L2-1 + (c3*z**2 - c5)*L20 + 2*c1*x*z*L21 + c1*(x**2-y**2)*L22 26 | c1 = tf.constant(0.429043,dtype=tf.float32) 27 | c2 = tf.constant(0.511664,dtype=tf.float32) 28 | c3 = tf.constant(0.743125,dtype=tf.float32) 29 | c4 = tf.constant(0.886227,dtype=tf.float32) 30 | c5 = tf.constant(0.247708,dtype=tf.float32) 31 | 32 | 33 | # find defined pixels 34 | num_iter = tf.shape(nm)[0] 35 | output = tf.TensorArray(dtype=tf.float32, size=num_iter) 36 | i = tf.constant(0) 37 | 38 | def condition(i, output): 39 | return iatol) 71 | s_inv = tf.diag(1./s) 72 | 73 | # compute v * s_inv * u_t as psuedo inverse 74 | return tf.matmul(v, tf.matmul(s_inv, tf.transpose(u))) 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /model/reproj_layer.py: -------------------------------------------------------------------------------- 1 | # apply error mask in albedo reprojection 2 | 3 | 4 | # no rotation involved 5 | 6 | 7 | #### directly output flatten reprojected pixels and the reconstruction mask 8 | 9 | # the differentiable layer performing reprojection 10 | 11 | import tensorflow as tf 12 | import numpy as np 13 | 14 | # pc is n-by-3 matrix containing point could three locations 15 | # cam is the new camera parameters, whose f and p_a have shape (batch) and c has shape (batch, 2) 16 | # dm1 is the depth map associated with cam1 that is camera for output image, which has shape (batch, height, width) 17 | # img2 is the input image that acts as source image for reprojection, which has shape (batch, height, width, 3) 18 | def map_reproj(dm1,map2,cam1,cam2,scale_x1,scale_x2,scale_y1,scale_y2): 19 | batch_size = tf.shape(dm1)[0] 20 | 21 | # read camera parameters 22 | c1 = cam1[:,2:4] 23 | f1 = cam1[:,0] 24 | p_a1 = cam1[:,1] # ratio is width divided by height 25 | R1 = tf.reshape(cam1[:,4:13],[-1,3,3]) 26 | t1 = cam1[:,13:] 27 | 28 | c2 = cam2[:,2:4] 29 | f2 = cam2[:,0] 30 | p_a2 = cam2[:,1] 31 | R2 = tf.reshape(cam2[:,4:13],[-1,3,3]) 32 | t2 = cam2[:,13:] 33 | 34 | # project pixel points back to camera coords 35 | # u is the height and v is the width 36 | # u and v are scalars 37 | u1 = tf.shape(dm1)[1] 38 | v1 = tf.shape(dm1)[2] 39 | 40 | # convert u1 and v1 to float, convenient for computation 41 | u1 = tf.to_float(u1) 42 | v1 = tf.to_float(v1) 43 | 44 | ### regular grid in output image 45 | # x increase towards right, y increase toward down 46 | vm,um = tf.meshgrid(tf.range(1.,v1+1.), tf.range(1.,u1+1.)) 47 | 48 | 49 | # apply scaling factors on f 50 | # f1 = f1/(scale_x1+scale_y1)*2 51 | # f1 = tf.stack([f1, f1*p_a1],axis=-1) 52 | f1 = tf.stack([f1/scale_x1, f1/scale_y1*p_a1],axis=-1) 53 | 54 | # expand f1 (batch,2,1,1), to be consistant with dm 55 | f1 = tf.expand_dims(tf.expand_dims(f1,axis=-1),axis=-1) 56 | # expand c1 dimension (batch,2,1,1) 57 | c1 = tf.expand_dims(tf.expand_dims(c1,axis=-1),axis=-1) 58 | # expand vm and um to have shape (1,height,width) 59 | vm = tf.expand_dims(vm,axis=0) 60 | um = tf.expand_dims(um,axis=0) 61 | 62 | # compute 3D point x and y coordinates 63 | # Xm and Ym have shape (batch, height, width) 64 | Xm = (vm-c1[:,0])/f1[:,0]*dm1 65 | Ym = (um-c1[:,1])/f1[:,1]*dm1 66 | 67 | # the point cloud is (batch, 3, npix) matrix, each row is XYZ cam coords for one point 68 | pc = tf.stack([tf.contrib.layers.flatten(Xm), tf.contrib.layers.flatten(Ym), tf.contrib.layers.flatten(dm1)], axis=1) 69 | 70 | ### transfer pc from coords of cam1 to cam2 71 | # construct homogeneous point cloud with shape batch-4-by-num_pix 72 | num_pix = tf.shape(pc)[-1] 73 | homo_pc_c1 = tf.concat([pc, tf.ones((batch_size,1,num_pix), dtype=tf.float32)], axis=1) 74 | 75 | # both transformation matrix have shape batch-by-4-by-4, valid for multiplication with defined homogeneous point cloud 76 | last_row = tf.tile(tf.constant([[[0,0,0,1]]],dtype=tf.float32), multiples=[batch_size,1,1]) 77 | W_C_R_t1 = tf.concat([R1,tf.expand_dims(t1,axis=2)],axis=2) 78 | W_C_trans1 = tf.concat([W_C_R_t1, last_row], axis=1) 79 | W_C_R_t2 = tf.concat([R2,tf.expand_dims(t2,axis=2)],axis=2) 80 | W_C_trans2 = tf.concat([W_C_R_t2, last_row], axis=1) 81 | 82 | # batch dot product, output has shape (batch, 4, npix) 83 | homo_pc_c2 = tf.matmul(W_C_trans2, tf.matmul(tf.matrix_inverse(W_C_trans1), homo_pc_c1)) 84 | 85 | ### project point cloud to cam2 pixel coordinates 86 | # u in vertical and v in horizontal 87 | u2 = tf.shape(map2)[1] 88 | v2 = tf.shape(map2)[2] 89 | 90 | # convert u2 and v2 to float 91 | u2 = tf.to_float(u2) 92 | v2 = tf.to_float(v2) 93 | 94 | # f2 = f2/(scale_x2+scale_y2)*2 95 | # f2 = tf.stack([f2, f2*p_a2],axis=-1) 96 | f2 = tf.stack([f2/scale_x2, f2/scale_y2*p_a2],axis=-1) 97 | 98 | # construct intrics matrics, which has shape (batch, 3, 4) 99 | zeros = tf.zeros_like(f2[:,0],dtype=tf.float32) 100 | ones = tf.ones_like(f2[:,0],tf.float32) 101 | k2 = tf.stack([tf.stack([f2[:,0],zeros,c2[:,0],zeros],axis=1), tf.stack([zeros,f2[:,1],c2[:,1],zeros],axis=1), tf.stack([zeros,zeros,ones,zeros],axis=1)],axis=1) 102 | 103 | ## manual batch dot product 104 | k2 = tf.expand_dims(k2,axis=-1) 105 | homo_pc_c2 = tf.expand_dims(homo_pc_c2,axis=1) 106 | # homo_uv2 has shape (batch, 3, npix) 107 | homo_uv2 = tf.reduce_sum(k2*homo_pc_c2,axis=2) 108 | 109 | # the reprojected locations of regular grid in output image 110 | # both have shape (batch, npix) 111 | v_reproj = homo_uv2[:,0,:]/homo_uv2[:,2,:] 112 | u_reproj = homo_uv2[:,1,:]/homo_uv2[:,2,:] 113 | 114 | # u and v are flatten vector containing reprojected pixel locations 115 | # the u and v on same index compose one pixel 116 | u_valid = tf.logical_and(tf.logical_and(tf.logical_not(tf.is_nan(u_reproj)), u_reproj>0), u_reproj0), v_reprojatol) 70 | s_inv = tf.diag(1./s) 71 | 72 | # compute v * s_inv * u_t as psuedo inverse 73 | return tf.matmul(v, tf.matmul(s_inv, tf.transpose(u))) 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /pre_train_model/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YeeU/InverseRenderNet/af96b366ffdafad52f3b280df8a0398e294066d3/pre_train_model/.keep -------------------------------------------------------------------------------- /test_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | import cv2 5 | from skimage import io 6 | import argparse 7 | from model import SfMNet, lambSH_layer, pred_illuDecomp_layer 8 | from utils import render_sphere_nm 9 | 10 | 11 | parser = argparse.ArgumentParser(description='InverseRenderNet') 12 | parser.add_argument('--image', help='Path to test image') 13 | parser.add_argument('--mask', help='Path to image mask') 14 | parser.add_argument('--model', help='Path to trained model') 15 | parser.add_argument('--output', help='Folder saving outputs') 16 | 17 | 18 | args = parser.parse_args() 19 | 20 | img_path = args.image 21 | mask_path = args.mask 22 | 23 | img = io.imread(img_path) 24 | mask = io.imread(mask_path) 25 | 26 | 27 | dst_dir = args.output 28 | os.makedirs(dst_dir) 29 | 30 | input_height = 200 31 | input_width = 200 32 | ori_height, ori_width = img.shape[:2] 33 | 34 | if ori_height / ori_width >1: 35 | scale = ori_width / 200 36 | input_height = np.int32(scale * 200) 37 | else: 38 | scale = ori_height / 200 39 | input_width = np.int32(scale * 200) 40 | 41 | 42 | # compute pseudo inverse for input matrix 43 | def pinv(A, reltol=1e-6): 44 | # compute SVD of input A 45 | s, u, v = tf.svd(A) 46 | 47 | # invert s and clear entries lower than reltol*s_max 48 | atol = tf.reduce_max(s) * reltol 49 | s = tf.boolean_mask(s, s>atol) 50 | s_inv = tf.diag(1./s) 51 | 52 | # compute v * s_inv * u_t as psuedo inverse 53 | return tf.matmul(v, tf.matmul(s_inv, tf.transpose(u))) 54 | 55 | 56 | import ipdb; ipdb.set_trace() 57 | inputs_var = tf.placeholder(tf.float32, (None, input_height, input_width, 3)) 58 | masks_var = tf.placeholder(tf.float32, (None, input_height, input_width, 1)) 59 | am_deconvOut, nm_deconvOut = SfMNet.SfMNet(inputs=inputs_var,is_training=False, height=input_height, width=input_width, n_layers=30, n_pools=4, depth_base=32) 60 | 61 | 62 | # separate albedo, error mask and shadow mask from deconvolutional output 63 | albedos = am_deconvOut 64 | nm_pred = nm_deconvOut 65 | 66 | gamma = tf.constant(2.2) 67 | 68 | # post-process on raw albedo and nm_pred 69 | albedos = tf.nn.sigmoid(albedos) * masks_var + tf.constant(1e-4) 70 | 71 | nm_pred_norm = tf.sqrt(tf.reduce_sum(nm_pred**2, axis=-1, keepdims=True)+tf.constant(1.)) 72 | nm_pred_xy = nm_pred / nm_pred_norm 73 | nm_pred_z = tf.constant(1.) / nm_pred_norm 74 | nm_pred_xyz = tf.concat([nm_pred_xy, nm_pred_z], axis=-1) * masks_var 75 | 76 | 77 | # compute illumination 78 | lighting_model = 'illu_pca' 79 | lighting_vectors = tf.constant(np.load(os.path.join(lighting_model,'pcaVector.npy')),dtype=tf.float32) 80 | lighting_means = tf.constant(np.load(os.path.join(lighting_model,'mean.npy')),dtype=tf.float32) 81 | lightings = pred_illuDecomp_layer.illuDecomp(inputs_var, albedos, nm_pred_xyz, gamma, masks_var) 82 | 83 | 84 | lightings_pca = tf.matmul((lightings - lighting_means), pinv(lighting_vectors)) 85 | lightings = tf.matmul(lightings_pca,lighting_vectors) + lighting_means 86 | # reshape 27-D lightings to 9*3 lightings 87 | lightings = tf.reshape(lightings,[tf.shape(lightings)[0],9,3]) 88 | 89 | # visualisations 90 | shading, _ = lambSH_layer.lambSH_layer(tf.ones_like(albedos), nm_pred_xyz, lightings, 1.) 91 | nm_sphere = tf.constant(render_sphere_nm.render_sphere_nm(100,1),dtype=tf.float32) 92 | nm_sphere = tf.tile(nm_sphere, (tf.shape(inputs_var)[0],1,1,1)) 93 | lighting_recon, _ = lambSH_layer.lambSH_layer(tf.ones_like(nm_sphere), nm_sphere, lightings, 1.) 94 | 95 | 96 | irn_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='am') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='nm') 97 | model_path = tf.train.get_checkpoint_state(args.model).model_checkpoint_path 98 | 99 | total_loss = 0 100 | sess = tf.InteractiveSession() 101 | saver = tf.train.Saver(irn_vars) 102 | saver.restore(sess, model_path) 103 | 104 | 105 | # evaluation 106 | ori_img = img 107 | ori_height, ori_width = ori_img.shape[:2] 108 | img = cv2.resize(img, (input_width, input_height)) 109 | img = np.float32(img)/255. 110 | img = img[None, :, :, :] 111 | mask = cv2.resize(mask, (input_width, input_height), cv2.INTER_NEAREST) 112 | mask = np.float32(mask==255)[None,:,:,None] 113 | 114 | [albedos_val, nm_pred_val, lighting_recon_val, shading_val] = sess.run([albedos, nm_pred_xyz, lighting_recon, shading], feed_dict={inputs_var:img, masks_var:mask}) 115 | 116 | 117 | # post-process results 118 | nm_pred_val = (nm_pred_val+1.)/2. 119 | 120 | albedos_val = cv2.resize(albedos_val[0], (ori_width, ori_height)) 121 | shading_val = cv2.resize(shading_val[0], (ori_width, ori_height)) 122 | lighting_recon_val = lighting_recon_val[0] 123 | nm_pred_val = cv2.resize(nm_pred_val[0], (ori_width, ori_height)) 124 | 125 | 126 | albedos_val = (albedos_val-albedos_val.min()) / (albedos_val.max()-albedos_val.min()) 127 | 128 | albedos_val = np.uint8(albedos_val*255.) 129 | shading_val = np.uint8(shading_val*255.) 130 | lighting_recon_val = np.uint8(lighting_recon_val*255.) 131 | nm_pred_val = np.uint8(nm_pred_val*255.) 132 | 133 | input_path = os.path.join(dst_dir, 'img.png') 134 | io.imsave(input_path, ori_img) 135 | albedo_path = os.path.join(dst_dir, 'albedo.png') 136 | io.imsave(albedo_path, albedos_val) 137 | shading_path = os.path.join(dst_dir, 'shading.png') 138 | io.imsave(shading_path, shading_val) 139 | nm_pred_path = os.path.join(dst_dir, 'nm_pred.png') 140 | io.imsave(nm_pred_path, nm_pred_val) 141 | lighting_path = os.path.join(dst_dir, 'lighting.png') 142 | io.imsave(lighting_path, lighting_recon_val) 143 | 144 | 145 | -------------------------------------------------------------------------------- /test_iiw.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import tensorflow as tf 5 | import importlib 6 | import cv2 7 | from skimage import io 8 | import argparse 9 | from model import SfMNet, lambSH_layer, pred_illuDecomp_layer 10 | from glob import glob 11 | from utils.whdr import compute_whdr 12 | 13 | 14 | parser = argparse.ArgumentParser(description='InverseRenderNet') 15 | parser.add_argument('--iiw', help='Root directory for iiw-dataset') 16 | parser.add_argument('--model', help='Path to trained model') 17 | 18 | 19 | args = parser.parse_args() 20 | 21 | iiw = args.iiw 22 | test_ids = np.load('iiw_test_ids.npy') 23 | 24 | 25 | 26 | input_height = 200 27 | input_width = 200 28 | 29 | 30 | 31 | # compute pseudo inverse for input matrix 32 | def pinv(A, reltol=1e-6): 33 | # compute SVD of input A 34 | s, u, v = tf.svd(A) 35 | 36 | # invert s and clear entries lower than reltol*s_max 37 | atol = tf.reduce_max(s) * reltol 38 | s = tf.boolean_mask(s, s>atol) 39 | s_inv = tf.diag(1./s) 40 | 41 | # compute v * s_inv * u_t as psuedo inverse 42 | return tf.matmul(v, tf.matmul(s_inv, tf.transpose(u))) 43 | 44 | 45 | 46 | inputs_var = tf.placeholder(tf.float32, (None, input_height, input_width, 3)) 47 | masks_var = tf.placeholder(tf.float32, (None, input_height, input_width, 1)) 48 | train_flag = tf.placeholder(tf.bool, ()) 49 | am_deconvOut, _ = SfMNet.SfMNet(inputs=inputs_var,is_training=train_flag, height=input_height, width=input_width, n_layers=30, n_pools=4, depth_base=32) 50 | 51 | 52 | # separate albedo, error mask and shadow mask from deconvolutional output 53 | albedos = am_deconvOut 54 | 55 | # post-process on raw albedo and nm_pred 56 | albedos = tf.nn.sigmoid(albedos) * masks_var + tf.constant(1e-4) 57 | 58 | irn_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='am') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='nm') 59 | model_path = tf.train.get_checkpoint_state(args.model).model_checkpoint_path 60 | 61 | total_loss = 0 62 | sess = tf.InteractiveSession() 63 | saver = tf.train.Saver(irn_vars) 64 | saver.restore(sess, model_path) 65 | 66 | 67 | for counter, test_id in enumerate(test_ids): 68 | img_file = str(test_id)+'.png' 69 | judgement_file = str(test_id)+'.json' 70 | 71 | img_path = os.path.join(iiw, 'data', img_file) 72 | judgement_path = os.path.join(iiw, 'data', judgement_file) 73 | 74 | img = io.imread(img_path) 75 | judgement = json.load(open(judgement_path)) 76 | 77 | ori_width, ori_height = img.shape[:2] 78 | 79 | img = cv2.resize(img, (input_width, input_height)) 80 | img = np.float32(img)/255. 81 | img = img[None, :, :, :] 82 | mask = np.ones((1, input_height, input_width, 1), np.bool) 83 | 84 | 85 | [albedos_val] = sess.run([albedos], feed_dict={train_flag:False, inputs_var:img, masks_var:mask}) 86 | 87 | albedos_val = cv2.resize(albedos_val[0], (ori_width, ori_height)) 88 | 89 | albedos_val = (albedos_val-albedos_val.min()) / (albedos_val.max()-albedos_val.min()) 90 | albedos_val = albedos_val/2+.5 91 | 92 | 93 | loss = compute_whdr(albedos_val, judgement) 94 | total_loss += loss 95 | print('whdr:{:f}\twhdr_avg:{:f}'.format(loss, total_loss/(counter+1))) 96 | 97 | 98 | print("IIW TEST WHDR %f"%(total_loss/len(test_ids))) 99 | 100 | 101 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # also predict shadow mask and error mask 2 | 3 | # no rotation 4 | 5 | 6 | #### compute albedo reproj loss only on reprojection available area; compute reconstruction and its loss only based on defined area 7 | 8 | 9 | import tensorflow as tf 10 | import importlib 11 | import os 12 | import pickle as pk 13 | import sys 14 | import numpy as np 15 | import time 16 | import argparse 17 | from PIL import Image 18 | import glob 19 | from model import SfMNet, lambSH_layer, pred_illuDecomp_layer, loss_layer, dataloader 20 | 21 | 22 | parser = argparse.ArgumentParser(description='InverseRenderNet') 23 | parser.add_argument('--n_batch', '-n', help='number of minibatch', type=int) 24 | parser.add_argument('--data_path', '-p', help='Path to training data') 25 | parser.add_argument('--train_mode', '-m', help='specify the phase for training (pre-train/self-train)', choices={'pre-train', 'self-train'}) 26 | 27 | 28 | args = parser.parse_args() 29 | 30 | def main(): 31 | 32 | inputs_shape = (5,200,200,3) 33 | 34 | next_element, trainData_init_op, num_train_batches = dataloader.megaDepth_dataPipeline(args.n_batch, args.data_path) 35 | 36 | inputs_var = tf.reshape(next_element[0], (-1, inputs_shape[1], inputs_shape[2], inputs_shape[3])) 37 | dms_var = tf.reshape(next_element[1], (-1, inputs_shape[1], inputs_shape[2])) 38 | nms_var = tf.reshape(next_element[2], (-1, inputs_shape[1], inputs_shape[2], 3)) 39 | cams_var = tf.reshape(next_element[3], (-1, 16)) 40 | scaleXs_var = tf.reshape(next_element[4], (-1,)) 41 | scaleYs_var = tf.reshape(next_element[5], (-1,)) 42 | masks_var = tf.reshape(next_element[6], (-1, inputs_shape[1], inputs_shape[2])) 43 | 44 | # var helping cross projection 45 | pair_label_var = tf.constant(np.repeat(np.arange(args.n_batch),inputs_shape[0])[:,None], dtype=tf.float32) 46 | # weights for smooth loss and am_consistency loss 47 | am_smt_w_var = tf.placeholder(tf.float32, ()) 48 | reproj_w_var = tf.placeholder(tf.float32, ()) 49 | 50 | # mask out sky in inputs and nms 51 | masks_var_4d = tf.expand_dims(masks_var, axis=-1) 52 | inputs_var *= masks_var_4d 53 | nms_var *= masks_var_4d 54 | 55 | # inverserendernet 56 | if args.train_mode == 'pre-train': 57 | am_deconvOut, nm_deconvOut = SfMNet.SfMNet(inputs=inputs_var,is_training=True, height=inputs_shape[1], width=inputs_shape[2], name='pre_train_IRN/', n_layers=30, n_pools=4, depth_base=32) 58 | 59 | am_sup = tf.zeros_like(am_deconvOut) 60 | preTrain_flag = True 61 | 62 | 63 | elif args.train_mode == 'self-train': 64 | am_deconvOut, nm_deconvOut = SfMNet.SfMNet(inputs=inputs_var,is_training=True, height=inputs_shape[1], width=inputs_shape[2], name='IRN/', n_layers=30, n_pools=4, depth_base=32) 65 | 66 | am_sup, _ = SfMNet.SfMNet(inputs=inputs_var,is_training=False, height=inputs_shape[1], width=inputs_shape[2], name='pre_train_IRN/', n_layers=30, n_pools=4, depth_base=32) 67 | am_sup = tf.nn.sigmoid(am_sup) * masks_var_4d + tf.constant(1e-4) 68 | 69 | preTrain_flag = False 70 | 71 | # separate albedo, error mask and shadow mask from deconvolutional output 72 | albedoMaps = am_deconvOut[:,:,:,:3] 73 | 74 | # formulate loss 75 | light_SHCs, albedoMaps, nm_preds, loss, render_err, reproj_err, cross_render_err, reg_loss, illu_prior_loss, albedo_smt_error, nm_smt_loss, nm_loss, am_loss = loss_layer.loss_formulate(albedoMaps, nm_deconvOut, am_sup, nms_var, inputs_var, dms_var, cams_var, scaleXs_var, scaleYs_var, masks_var_4d, pair_label_var, True, am_smt_w_var, reproj_w_var, reg_loss_flag=True) 76 | 77 | 78 | # defined traning loop 79 | epochs = 30 80 | num_batches = num_train_batches 81 | num_subbatch = args.n_batch 82 | num_iters = np.int32(np.ceil(num_batches/num_subbatch)) 83 | 84 | 85 | # training op 86 | global_step = tf.Variable(1,name='global_step',trainable=False) 87 | 88 | train_step = tf.contrib.layers.optimize_loss(loss, optimizer=tf.train.AdamOptimizer(learning_rate=.05, epsilon=1e-1), learning_rate=None, global_step=global_step) 89 | 90 | # define saver for saving and restoring 91 | irn_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='IRN') if args.train_mode == 'self-train' else tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='pre_train_IRN') 92 | saver = tf.train.Saver(irn_vars) 93 | 94 | # define session 95 | config = tf.ConfigProto(allow_soft_placement=True) 96 | config.gpu_options.allow_growth = True 97 | sess = tf.InteractiveSession(config=config) 98 | 99 | # train from scratch or keep training trained model 100 | tf.local_variables_initializer().run() 101 | tf.global_variables_initializer().run() 102 | 103 | assignOps = [] 104 | if args.train_mode == 'self-train': 105 | # load am_sup net 106 | preTrain_irn_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='pre_train_IRN') 107 | saver_loadOldVar = tf.train.Saver(preTrain_irn_vars) 108 | saver_loadOldVar.restore(sess, 'pre_train_model/model.ckpt') 109 | 110 | # import ipdb; ipdb.set_trace() 111 | # duplicate pre_train model 112 | with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): 113 | vars = tf.contrib.framework.list_variables('pre_train_model') 114 | for var_name, _ in vars: 115 | var = tf.contrib.framework.load_variable('pre_train_model', var_name) 116 | new_var_name = var_name.replace('pre_train_IRN', 'IRN') 117 | 118 | new_var = tf.get_variable(name=new_var_name) 119 | assignOps += [new_var.assign(var)] 120 | 121 | sess.run(assignOps) 122 | 123 | 124 | 125 | 126 | # start training 127 | trainData_init_op.run() 128 | dst_dir = 'irn_model' if args.train_mode == 'self-train' else 'pre_train_model' 129 | for i in range(1,epochs+1): 130 | 131 | loss_avg = 0 132 | f = open('cost.txt','a') 133 | 134 | # graduately update weights if pre-training 135 | reproj_weight = .2 + np.clip(.8 * (i-16)/14, 0., .8) if args.train_mode == 'pre-train' else 1. 136 | am_smt_weight = .2 + np.clip(.8 * (i-1)/14, 0., .8) if args.train_mode == 'pre-train' else 1. 137 | 138 | for j in range(1,num_iters+1): 139 | start_time = time.time() 140 | 141 | # train 142 | [loss_val, reg_loss_val, render_err_val, reproj_err_val, cross_render_err_val, illu_prior_val, albedo_smt_error_val, nm_smt_loss_val, nm_loss_val, am_loss_val] = sess.run([train_step, reg_loss, render_err, reproj_err, cross_render_err, illu_prior_loss, albedo_smt_error, nm_smt_loss, nm_loss, am_loss], feed_dict={am_smt_w_var:am_smt_weight, reproj_w_var:reproj_weight}) 143 | loss_avg += loss_val 144 | 145 | # log 146 | if j % 1 == 0: 147 | print('iter %d/%d loop %d/%d took %.3fs' % (i,epochs,j,num_iters,time.time()-start_time)) 148 | print('\tloss_avg = %f, loss = %f' % (loss_avg / j,loss_val)) 149 | print('\t\treg_loss = %f, render_err = %f, reproj_err = %f, cross_render_err = %f, illu_prior = %f, albedo_smt_error = %f, nm_smt_loss = %f, nm_loss = %f, am_loss = %f' % (reg_loss_val, render_err_val, reproj_err_val, cross_render_err_val, illu_prior_val, albedo_smt_error_val, nm_smt_loss_val, nm_loss_val, am_loss_val)) 150 | 151 | f.write('iter %d/%d loop %d/%d took %.3fs\n\tloss_avg = %f, loss = %f\n\t\treg_loss = %f, render_err = %f, reproj_err = %f, cross_render_err = %f, illu_prior = %f, albedo_smt_error = %f, nm_smt_loss = %f, nm_loss = %f, am_loss = %f\n' % (i,epochs,j,num_iters,time.time()-start_time,loss_avg/j, loss_val, reg_loss_val, render_err_val, reproj_err_val, cross_render_err_val, illu_prior_val, albedo_smt_error_val, nm_smt_loss_val, nm_loss_val, am_loss_val)) 152 | 153 | f.close() 154 | 155 | # save model every 10 iterations 156 | saver.save(sess,os.path.join(dst_dir, 'model.ckpt')) 157 | 158 | 159 | if __name__ == '__main__': 160 | main() 161 | 162 | 163 | 164 | 165 | 166 | -------------------------------------------------------------------------------- /utils/render_sphere_nm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def render_sphere_nm(radius, num): 4 | # nm is a batch of normal maps 5 | nm = [] 6 | 7 | for i in range(num): 8 | ### hemisphere 9 | height = 2*radius 10 | width = 2*radius 11 | centre = radius 12 | x_grid, y_grid = np.meshgrid(np.arange(1.,2*radius+1), np.arange(1.,2*radius+1)) 13 | # grids are (-radius, radius) 14 | x_grid -= centre 15 | # y_grid -= centre 16 | y_grid = centre - y_grid 17 | # scale range of h and w grid in (-1,1) 18 | x_grid /= radius 19 | y_grid /= radius 20 | dist = 1 - (x_grid**2+y_grid**2) 21 | mask = dist > 0 22 | z_grid = np.ones_like(mask) * np.nan 23 | z_grid[mask] = np.sqrt(dist[mask]) 24 | 25 | # remove xs and ys by masking out nans in zs 26 | x_grid[~(mask)] = np.nan 27 | y_grid[~(mask)] = np.nan 28 | 29 | # concatenate normal map 30 | nm.append(np.stack([x_grid,y_grid,z_grid],axis=2)) 31 | 32 | 33 | 34 | ### sphere 35 | # span the regular grid for computing azimuth and zenith angular map 36 | # height = 2*radius 37 | # width = 2*radius 38 | # centre = radius 39 | # h_grid, v_grid = np.meshgrid(np.arange(1.,2*radius+1), np.arange(1.,2*radius+1)) 40 | # # grids are (-radius, radius) 41 | # h_grid -= centre 42 | # # v_grid -= centre 43 | # v_grid = centre - v_grid 44 | # # scale range of h and v grid in (-1,1) 45 | # h_grid /= radius 46 | # v_grid /= radius 47 | 48 | # # z_grid is linearly spread along theta/zenith in range (0,pi) 49 | # dist_grid = np.sqrt(h_grid**2+v_grid**2) 50 | # dist_grid[dist_grid>1] = np.nan 51 | # theta_grid = dist_grid * np.pi 52 | # z_grid = np.cos(theta_grid) 53 | 54 | # rho_grid = np.arctan2(v_grid,h_grid) 55 | # x_grid = np.sin(theta_grid)*np.cos(rho_grid) 56 | # y_grid = np.sin(theta_grid)*np.sin(rho_grid) 57 | 58 | # # concatenate normal map 59 | # nm.append(np.stack([x_grid,y_grid,z_grid],axis=2)) 60 | 61 | 62 | # construct batch 63 | nm = np.stack(nm,axis=0) 64 | 65 | 66 | 67 | return nm 68 | 69 | -------------------------------------------------------------------------------- /utils/whdr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | # 3 | # This is an implementation of the WHDR metric proposed in this paper: 4 | # 5 | # Sean Bell, Kavita Bala, Noah Snavely. "Intrinsic Images in the Wild". ACM 6 | # Transactions on Graphics (SIGGRAPH 2014). http://intrinsic.cs.cornell.edu. 7 | # 8 | # Please cite the above paper if you find this code useful. This code is 9 | # released under the MIT license (http://opensource.org/licenses/MIT). 10 | # 11 | 12 | 13 | import sys 14 | import json 15 | import argparse 16 | import numpy as np 17 | from PIL import Image 18 | 19 | 20 | def compute_whdr(reflectance, judgements, delta=0.10): 21 | """ Return the WHDR score for a reflectance image, evaluated against human 22 | judgements. The return value is in the range 0.0 to 1.0, or None if there 23 | are no judgements for the image. See section 3.5 of our paper for more 24 | details. 25 | 26 | :param reflectance: a numpy array containing the linear RGB 27 | reflectance image. 28 | 29 | :param judgements: a JSON object loaded from the Intrinsic Images in 30 | the Wild dataset. 31 | 32 | :param delta: the threshold where humans switch from saying "about the 33 | same" to "one point is darker." 34 | """ 35 | 36 | points = judgements['intrinsic_points'] 37 | comparisons = judgements['intrinsic_comparisons'] 38 | id_to_points = {p['id']: p for p in points} 39 | rows, cols = reflectance.shape[0:2] 40 | 41 | error_sum = 0.0 42 | weight_sum = 0.0 43 | 44 | for c in comparisons: 45 | # "darker" is "J_i" in our paper 46 | darker = c['darker'] 47 | if darker not in ('1', '2', 'E'): 48 | continue 49 | 50 | # "darker_score" is "w_i" in our paper 51 | weight = c['darker_score'] 52 | if weight <= 0 or weight is None: 53 | continue 54 | 55 | point1 = id_to_points[c['point1']] 56 | point2 = id_to_points[c['point2']] 57 | if not point1['opaque'] or not point2['opaque']: 58 | continue 59 | 60 | # convert to grayscale and threshold 61 | l1 = max(1e-10, np.mean(reflectance[ 62 | int(point1['y'] * rows), int(point1['x'] * cols), ...])) 63 | l2 = max(1e-10, np.mean(reflectance[ 64 | int(point2['y'] * rows), int(point2['x'] * cols), ...])) 65 | 66 | # convert algorithm value to the same units as human judgements 67 | if l2 / l1 > 1.0 + delta: 68 | alg_darker = '1' 69 | elif l1 / l2 > 1.0 + delta: 70 | alg_darker = '2' 71 | else: 72 | alg_darker = 'E' 73 | 74 | if darker != alg_darker: 75 | error_sum += weight 76 | weight_sum += weight 77 | 78 | if weight_sum: 79 | return error_sum / weight_sum 80 | else: 81 | return None 82 | 83 | 84 | def load_image(filename, is_srgb=True): 85 | """ Load an image that is either linear or sRGB-encoded. """ 86 | 87 | if not filename: 88 | raise ValueError("Empty filename") 89 | image = np.asarray(Image.open(filename)).astype(np.float) / 255.0 90 | if is_srgb: 91 | return srgb_to_rgb(image) 92 | else: 93 | return image 94 | 95 | 96 | def srgb_to_rgb(srgb): 97 | """ Convert an sRGB image to a linear RGB image """ 98 | 99 | ret = np.zeros_like(srgb) 100 | idx0 = srgb <= 0.04045 101 | idx1 = srgb > 0.04045 102 | ret[idx0] = srgb[idx0] / 12.92 103 | ret[idx1] = np.power((srgb[idx1] + 0.055) / 1.055, 2.4) 104 | return ret 105 | 106 | 107 | if __name__ == "__main__": 108 | parser = argparse.ArgumentParser( 109 | description=( 110 | 'Evaluate an intrinsic image decomposition using the WHDR metric presented in:\n' 111 | ' Sean Bell, Kavita Bala, Noah Snavely. "Intrinsic Images in the Wild".\n' 112 | ' ACM Transactions on Graphics (SIGGRAPH 2014).\n' 113 | ' http://intrinsic.cs.cornell.edu.\n' 114 | '\n' 115 | 'The output is in the range 0.0 to 1.0.' 116 | ) 117 | ) 118 | 119 | parser.add_argument( 120 | 'reflectance', metavar='', 121 | help='reflectance image to be evaluated') 122 | 123 | parser.add_argument( 124 | 'judgements', metavar='', 125 | help='human judgements JSON file') 126 | 127 | parser.add_argument( 128 | '-l', '--linear', action='store_true', required=False, 129 | help='assume the reflectance image is linear, otherwise assume sRGB') 130 | 131 | parser.add_argument( 132 | '-d', '--delta', metavar='', type=float, required=False, default=0.10, 133 | help='delta threshold (default 0.10)') 134 | 135 | if len(sys.argv) < 2: 136 | parser.print_help() 137 | sys.exit(1) 138 | 139 | args = parser.parse_args() 140 | reflectance = load_image(filename=args.reflectance, is_srgb=(not args.linear)) 141 | judgements = json.load(open(args.judgements)) 142 | 143 | whdr = compute_whdr(reflectance, judgements, args.delta) 144 | print(whdr) 145 | --------------------------------------------------------------------------------