├── examples └── fig1.png ├── models ├── bsdf_256_256.bin ├── embedder.py ├── tracer_o3d.py ├── mat_mlp.py ├── mat_nvdiffrast_invrender.py ├── tracer_o3d_irrf.py ├── tracer_o3d_pil.py ├── mat_redner.py └── mat_nvdiffrast_rec.py ├── requirements.txt ├── tools ├── hdr2ldr.py ├── padding_texture.py ├── relighting_varying.py └── arrange_syn.py ├── configs ├── test.conf ├── test_novel.conf ├── test_relighting.conf ├── test_error.conf ├── irrf_hdrhouse.conf ├── syn_rec.conf ├── syn_invrender.conf ├── syn_neilf.conf ├── mat_hdrhouse_invrender.conf ├── mat_hdrhouse_neilf.conf ├── mat_hdrhouse_rec.conf ├── syn.conf ├── mat_hdrhouse.conf └── hdrhouse_ir_texture.conf ├── tester ├── exp_runner.py ├── test_novel.py ├── test_error.py └── test_relighting.py ├── trainer ├── generate_ir_texture.py ├── exp_runner.py └── train_irf.py ├── utils ├── plots.py ├── general.py ├── sample_util.py └── Pano2Cube.py └── README.md /examples/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LZleejean/TexIR_code/HEAD/examples/fig1.png -------------------------------------------------------------------------------- /models/bsdf_256_256.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LZleejean/TexIR_code/HEAD/models/bsdf_256_256.bin -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | GPUtil 2 | tensorboardX 3 | pyhocon 4 | open3d 5 | pytorch_msssim 6 | redner_gpu 7 | siren_pytorch -------------------------------------------------------------------------------- /tools/hdr2ldr.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | 4 | 5 | root_path = "/home/SecondDisk/Code/ours/InverseHouse/exps/Mat-hdr11/2022_10_22_08_26_15/plots/novel_view" 6 | 7 | 8 | # all_items = sorted(os.listdir(root_path), key= lambda x: int(os.path.splitext(x)[0].split('_')[-2])) 9 | 10 | # for item in all_items: 11 | # full_path = os.path.join(root_path, item) 12 | 13 | # img = cv2.imread(full_path, -1) 14 | # img = img**(1/2.2) 15 | # cv2.imwrite(full_path.replace('.hdr','.png'), img*255) 16 | 17 | root_path = root_path.replace('novel_view','editing') 18 | all_items = sorted(os.listdir(root_path), key= lambda x: int(os.path.splitext(x)[0].split('_')[-2])) 19 | 20 | for item in all_items: 21 | full_path = os.path.join(root_path, item) 22 | 23 | img = cv2.imread(full_path, -1) 24 | img = img**(1/2.2) 25 | cv2.imwrite(full_path.replace('.hdr','.png'), img*255) 26 | 27 | 28 | # cmd = "" -------------------------------------------------------------------------------- /configs/test.conf: -------------------------------------------------------------------------------- 1 | test{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.test_nvdiffrast.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | 8 | pano_img_res = [256,512] 9 | 10 | sample_light = [1024, 256] 11 | 12 | hdr_exposure = 5 13 | 14 | val_sample_res = [16, 32] 15 | 16 | path_mesh = ../reproject/result_master/master.obj 17 | path_mesh_open3d = ../data/inverse/cyclops-8RDdZ75Q1l0Moa3L_1-47070f9567484e67c7d920785ba0a21c/vrproc/hdr_texture/out1.obj 18 | results = ../results/test_house/ 19 | } 20 | 21 | val{ 22 | dataset_class = datasets.dataset.ImageMeshPoint 23 | env_res = [512,1024] 24 | batch_size = 512 25 | } 26 | 27 | loss{ 28 | idr_rgb_weight = 1.0 29 | eikonal_weight = 0.1 30 | mask_weight = 100.0 31 | alpha = 50.0 32 | 33 | sg_rgb_weight = 1.0 34 | kl_weight = 0.01 35 | latent_smooth_weight = 0.1 36 | 37 | loss_type = L1 38 | } 39 | 40 | irf_loss 41 | { 42 | loss_type = L1 43 | } 44 | 45 | render_loss 46 | { 47 | loss_type = L1 48 | w_gradient = 1 49 | } 50 | 51 | models{ 52 | feature_vector_size = 256 53 | tracer{ 54 | 55 | } 56 | render{ 57 | sample_type = [ uniform, importance] 58 | } 59 | 60 | 61 | incident_radiance_network 62 | { 63 | points_multires = 10 64 | dirs_multires = 4 65 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 66 | } 67 | 68 | material_network 69 | { 70 | points_multires = 10 71 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 72 | } 73 | 74 | irrf_network 75 | { 76 | points_multires = 10 77 | dims = [ 512, 512, 512, 512] 78 | p_input_dim = 3 79 | p_out_dim = 3 80 | } 81 | } -------------------------------------------------------------------------------- /configs/test_novel.conf: -------------------------------------------------------------------------------- 1 | test{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeNovel 4 | model_class = models.test_nvdiffrast.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | 8 | pano_img_res = [256,512] 9 | 10 | sample_light = [1024, 256] 11 | 12 | hdr_exposure = 5 13 | 14 | val_sample_res = [16, 32] 15 | 16 | path_mesh = ../reproject/result_master/master.obj 17 | path_mesh_open3d = ../data/inverse/cyclops-8RDdZ75Q1l0Moa3L_1-47070f9567484e67c7d920785ba0a21c/vrproc/hdr_texture/out1.obj 18 | results = ../results/test_house/ 19 | } 20 | 21 | val{ 22 | dataset_class = datasets.dataset.ImageMeshPoint 23 | env_res = [512,1024] 24 | batch_size = 512 25 | } 26 | 27 | loss{ 28 | idr_rgb_weight = 1.0 29 | eikonal_weight = 0.1 30 | mask_weight = 100.0 31 | alpha = 50.0 32 | 33 | sg_rgb_weight = 1.0 34 | kl_weight = 0.01 35 | latent_smooth_weight = 0.1 36 | 37 | loss_type = L1 38 | } 39 | 40 | irf_loss 41 | { 42 | loss_type = L1 43 | } 44 | 45 | render_loss 46 | { 47 | loss_type = L1 48 | w_gradient = 1 49 | } 50 | 51 | models{ 52 | feature_vector_size = 256 53 | tracer{ 54 | 55 | } 56 | render{ 57 | sample_type = [ uniform, importance] 58 | } 59 | 60 | 61 | incident_radiance_network 62 | { 63 | points_multires = 10 64 | dirs_multires = 4 65 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 66 | } 67 | 68 | material_network 69 | { 70 | points_multires = 10 71 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 72 | } 73 | 74 | irrf_network 75 | { 76 | points_multires = 10 77 | dims = [ 512, 512, 512, 512] 78 | p_input_dim = 3 79 | p_out_dim = 3 80 | } 81 | } -------------------------------------------------------------------------------- /configs/test_relighting.conf: -------------------------------------------------------------------------------- 1 | test{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageDerived 4 | model_class = models.test_redner.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | 8 | pano_img_res = [256,512] 9 | 10 | sample_light = [1024, 256] 11 | 12 | hdr_exposure = 5 13 | 14 | val_sample_res = [16, 32] 15 | 16 | path_mesh = ../reproject/result_master/master.obj 17 | path_mesh_open3d = ../data/inverse/cyclops-8RDdZ75Q1l0Moa3L_1-47070f9567484e67c7d920785ba0a21c/vrproc/hdr_texture/out1.obj 18 | results = ../results/test_house/ 19 | } 20 | 21 | val{ 22 | dataset_class = datasets.dataset.ImageMeshPoint 23 | env_res = [512,1024] 24 | batch_size = 512 25 | } 26 | 27 | loss{ 28 | idr_rgb_weight = 1.0 29 | eikonal_weight = 0.1 30 | mask_weight = 100.0 31 | alpha = 50.0 32 | 33 | sg_rgb_weight = 1.0 34 | kl_weight = 0.01 35 | latent_smooth_weight = 0.1 36 | 37 | loss_type = L1 38 | } 39 | 40 | irf_loss 41 | { 42 | loss_type = L1 43 | } 44 | 45 | render_loss 46 | { 47 | loss_type = L1 48 | w_gradient = 1 49 | } 50 | 51 | models{ 52 | feature_vector_size = 256 53 | tracer{ 54 | 55 | } 56 | render{ 57 | sample_type = [ uniform, importance] 58 | } 59 | 60 | 61 | incident_radiance_network 62 | { 63 | points_multires = 10 64 | dirs_multires = 4 65 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 66 | } 67 | 68 | material_network 69 | { 70 | points_multires = 10 71 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 72 | } 73 | 74 | irrf_network 75 | { 76 | points_multires = 10 77 | dims = [ 512, 512, 512, 512] 78 | p_input_dim = 3 79 | p_out_dim = 3 80 | } 81 | } -------------------------------------------------------------------------------- /configs/test_error.conf: -------------------------------------------------------------------------------- 1 | test{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.test_nvdiffrast.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | 8 | pano_img_res = [256,512] 9 | 10 | sample_light = [1024, 256] 11 | 12 | hdr_exposure = 5 13 | 14 | val_sample_res = [16, 32] 15 | 16 | path_mesh = ../reproject/result_master/master.obj 17 | path_mesh_open3d = ../data/inverse/cyclops-8RDdZ75Q1l0Moa3L_1-47070f9567484e67c7d920785ba0a21c/vrproc/hdr_texture/out1.obj 18 | results = ../results/test_house/ 19 | } 20 | 21 | val{ 22 | dataset_class = datasets.dataset.ImageMeshPoint 23 | env_res = [512,1024] 24 | batch_size = 512 25 | } 26 | 27 | loss{ 28 | idr_rgb_weight = 1.0 29 | eikonal_weight = 0.1 30 | mask_weight = 100.0 31 | alpha = 50.0 32 | 33 | sg_rgb_weight = 1.0 34 | kl_weight = 0.01 35 | latent_smooth_weight = 0.1 36 | 37 | loss_type = L1 38 | } 39 | 40 | irf_loss 41 | { 42 | loss_type = L1 43 | } 44 | 45 | render_loss 46 | { 47 | loss_type = L1 48 | w_gradient = 1 49 | } 50 | 51 | models{ 52 | feature_vector_size = 256 53 | tracer{ 54 | 55 | } 56 | render{ 57 | sample_type = [ uniform, importance] 58 | } 59 | 60 | 61 | incident_radiance_network 62 | { 63 | points_multires = 10 64 | dirs_multires = 4 65 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 66 | } 67 | 68 | material_network 69 | { 70 | points_multires = 10 71 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 72 | } 73 | 74 | irrf_network 75 | { 76 | points_multires = 10 77 | dims = [ 512, 512, 512, 512] 78 | p_input_dim = 3 79 | p_out_dim = 3 80 | } 81 | } -------------------------------------------------------------------------------- /configs/irrf_hdrhouse.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.MeshPoint 4 | model_class = models.tracer_o3d_irrf.TracerO3d 5 | irf_loss_class = models.loss.IRFLoss 6 | 7 | plot_freq = 10000 # iterations 8 | ckpt_freq = 20000 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | irf_learning_rate = 1e-4 16 | irf_sched_milestones = [800,1600,2400,3200] # iterations 17 | irf_sched_step = 800 18 | irf_sched_factor = 0.8 19 | irf_epoch = 4000 20 | 21 | is_hdr_texture = True 22 | hdr_exposure = 3 23 | 24 | std_jit = 5e-2 25 | 26 | env_res = [32,64] 27 | val_sample_res = [32, 64] 28 | samples_point_mesh = 1024 29 | batch_size = 16 30 | 31 | path_mesh = ../reproject/result_master/master.obj 32 | path_mesh_open3d = ../data/inverse/cyclops-nPOdM9m9ByLM53j4_1/vrproc/hdr_texture/out1.obj 33 | results = ../results/test_house/ 34 | } 35 | 36 | val{ 37 | dataset_class = datasets.dataset.ImageMeshPoint 38 | env_res = [512,1024] 39 | batch_size = 512 40 | } 41 | 42 | loss{ 43 | idr_rgb_weight = 1.0 44 | eikonal_weight = 0.1 45 | mask_weight = 100.0 46 | alpha = 50.0 47 | 48 | sg_rgb_weight = 1.0 49 | kl_weight = 0.01 50 | latent_smooth_weight = 0.1 51 | 52 | loss_type = L1 53 | } 54 | 55 | irf_loss 56 | { 57 | loss_type = L1 58 | } 59 | 60 | models{ 61 | feature_vector_size = 256 62 | tracer{ 63 | 64 | } 65 | 66 | incident_radiance_network 67 | { 68 | points_multires = 10 69 | dirs_multires = 4 70 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 71 | } 72 | 73 | irrf_network 74 | { 75 | points_multires = 10 76 | dims = [ 512, 512, 512, 512] 77 | p_input_dim = 3 78 | p_out_dim = 3 79 | } 80 | } -------------------------------------------------------------------------------- /configs/syn_rec.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeSyn 4 | model_class = models.mat_nvdiffrast_recMLP.MaterialModel 5 | irf_loss_class = models.loss.NvDiffRecLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 100 16 | mat_learning_rate = 1e-2 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/customHouse/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/syn_invrender.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeSyn 4 | model_class = models.mat_nvdiffrast_invrender.MaterialModel 5 | irf_loss_class = models.loss.InvLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 100 16 | mat_learning_rate = 5e-4 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/customHouse/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/syn_neilf.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeSyn 4 | model_class = models.mat_nvdiffrast_neilf.MaterialModel 5 | irf_loss_class = models.loss.NeILFLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 50 16 | mat_learning_rate = 0.002 17 | mat_sched_step = [20, 20] 18 | mat_sched_factor = 0.2 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/customHouse/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/mat_hdrhouse_invrender.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.mat_nvdiffrast_invrender.MaterialModel 5 | irf_loss_class = models.loss.InvLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 100 16 | mat_learning_rate = 5e-4 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 3 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/cyclops-nPOdM9m9ByLM53j4_1/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/mat_hdrhouse_neilf.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.mat_nvdiffrast_neilf.MaterialModel 5 | irf_loss_class = models.loss.NeILFLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 50 16 | mat_learning_rate = 0.002 17 | mat_sched_step = [20, 20] 18 | mat_sched_factor = 0.2 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/cyclops-nPOdM9m9ByLM53j4_1/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/mat_hdrhouse_rec.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.mat_nvdiffrast_recMLP.MaterialModel 5 | irf_loss_class = models.loss.NvDiffRecLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 100 16 | mat_learning_rate = 1e-2 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 3 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/cyclops-nPOdM9m9ByLM53j4_1/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | loss{ 39 | idr_rgb_weight = 1.0 40 | eikonal_weight = 0.1 41 | mask_weight = 100.0 42 | alpha = 50.0 43 | 44 | sg_rgb_weight = 1.0 45 | kl_weight = 0.01 46 | latent_smooth_weight = 0.1 47 | 48 | loss_type = L1 49 | } 50 | 51 | irf_loss 52 | { 53 | loss_type = L1 54 | } 55 | 56 | render_loss 57 | { 58 | loss_type = L1 59 | w_gradient = 1 60 | } 61 | 62 | models{ 63 | feature_vector_size = 256 64 | 65 | incident_radiance_network 66 | { 67 | points_multires = 10 68 | dirs_multires = 4 69 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 70 | } 71 | 72 | material_network 73 | { 74 | points_multires = 10 75 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 76 | } 77 | 78 | irrf_network 79 | { 80 | points_multires = 10 81 | dims = [ 512, 512, 512, 512] 82 | p_input_dim = 3 83 | p_out_dim = 3 84 | } 85 | } -------------------------------------------------------------------------------- /configs/syn.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeSyn 4 | model_class = models.mat_nvdiffrast.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 40 16 | mat_learning_rate = 3e-2 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/customHouse/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | val{ 39 | dataset_class = datasets.dataset.ImageMeshPoint 40 | env_res = [512,1024] 41 | batch_size = 512 42 | } 43 | 44 | loss{ 45 | idr_rgb_weight = 1.0 46 | eikonal_weight = 0.1 47 | mask_weight = 100.0 48 | alpha = 50.0 49 | 50 | sg_rgb_weight = 1.0 51 | kl_weight = 0.01 52 | latent_smooth_weight = 0.1 53 | 54 | loss_type = L1 55 | } 56 | 57 | irf_loss 58 | { 59 | loss_type = L1 60 | } 61 | 62 | render_loss 63 | { 64 | loss_type = L1 65 | w_gradient = 1 66 | } 67 | 68 | models{ 69 | feature_vector_size = 256 70 | tracer{ 71 | 72 | } 73 | render{ 74 | sample_type = [ uniform, importance] 75 | } 76 | 77 | 78 | incident_radiance_network 79 | { 80 | points_multires = 10 81 | dirs_multires = 4 82 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 83 | } 84 | 85 | material_network 86 | { 87 | points_multires = 10 88 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 89 | } 90 | 91 | irrf_network 92 | { 93 | points_multires = 10 94 | dims = [ 512, 512, 512, 512] 95 | p_input_dim = 3 96 | p_out_dim = 3 97 | } 98 | } -------------------------------------------------------------------------------- /configs/mat_hdrhouse.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.mat_nvdiffrast.MaterialModel 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 40 16 | mat_learning_rate = 3e-2 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [32, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/cyclops-7AGzV3bqKNRMXe6Q_2-4829c2937400a719f3bc85a22bd10085/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | val{ 39 | dataset_class = datasets.dataset.ImageMeshPoint 40 | env_res = [512,1024] 41 | batch_size = 512 42 | } 43 | 44 | loss{ 45 | idr_rgb_weight = 1.0 46 | eikonal_weight = 0.1 47 | mask_weight = 100.0 48 | alpha = 50.0 49 | 50 | sg_rgb_weight = 1.0 51 | kl_weight = 0.01 52 | latent_smooth_weight = 0.1 53 | 54 | loss_type = L1 55 | } 56 | 57 | irf_loss 58 | { 59 | loss_type = L1 60 | } 61 | 62 | render_loss 63 | { 64 | loss_type = L1 65 | w_gradient = 1 66 | } 67 | 68 | models{ 69 | feature_vector_size = 256 70 | tracer{ 71 | 72 | } 73 | render{ 74 | sample_type = [ uniform, importance] 75 | } 76 | 77 | 78 | incident_radiance_network 79 | { 80 | points_multires = 10 81 | dirs_multires = 4 82 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 83 | } 84 | 85 | material_network 86 | { 87 | points_multires = 10 88 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 89 | } 90 | 91 | irrf_network 92 | { 93 | points_multires = 10 94 | dims = [ 512, 512, 512, 512] 95 | p_input_dim = 3 96 | p_out_dim = 3 97 | } 98 | } -------------------------------------------------------------------------------- /configs/hdrhouse_ir_texture.conf: -------------------------------------------------------------------------------- 1 | train{ 2 | expname = default 3 | dataset_class = datasets.dataset.ImageCubeDerived 4 | model_class = models.tracer_o3d_irt.TracerO3d 5 | irf_loss_class = models.loss.RenderLoss 6 | 7 | plot_freq = 10 # iterations 8 | ckpt_freq = 10 # iterations 9 | num_pixels = 1024 10 | illum_num_pixels = 256 11 | 12 | alpha_milestones = [25000,50000,75000,100000,125000] # iterations 13 | alpha_factor = 2 14 | 15 | mat_epoch = 40 16 | mat_learning_rate = 3e-2 17 | mat_sched_step = 20 18 | mat_sched_factor = 0.8 19 | 20 | optim_cam = False 21 | 22 | pano_img_res = [256,512] 23 | 24 | sample_light = [2048, 16] 25 | 26 | hdr_exposure = 5 27 | 28 | env_res = [8,16] 29 | val_sample_res = [16, 32] 30 | samples_point_mesh = 1024 31 | batch_size = 1 32 | 33 | path_mesh = ../reproject/result_master/master.obj 34 | path_mesh_open3d = ../data/inverse/cyclops-7AGzV3bqKNRMXe6Q_2-4829c2937400a719f3bc85a22bd10085/vrproc/hdr_texture/out1.obj 35 | results = ../results/test_house/ 36 | } 37 | 38 | val{ 39 | dataset_class = datasets.dataset.ImageMeshPoint 40 | env_res = [512,1024] 41 | batch_size = 512 42 | } 43 | 44 | loss{ 45 | idr_rgb_weight = 1.0 46 | eikonal_weight = 0.1 47 | mask_weight = 100.0 48 | alpha = 50.0 49 | 50 | sg_rgb_weight = 1.0 51 | kl_weight = 0.01 52 | latent_smooth_weight = 0.1 53 | 54 | loss_type = L1 55 | } 56 | 57 | irf_loss 58 | { 59 | loss_type = L1 60 | } 61 | 62 | render_loss 63 | { 64 | loss_type = L1 65 | w_gradient = 1 66 | } 67 | 68 | models{ 69 | feature_vector_size = 256 70 | tracer{ 71 | 72 | } 73 | render{ 74 | sample_type = [ uniform, importance] 75 | } 76 | 77 | 78 | incident_radiance_network 79 | { 80 | points_multires = 10 81 | dirs_multires = 4 82 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512 ] 83 | } 84 | 85 | material_network 86 | { 87 | points_multires = 10 88 | dims = [ 512, 512, 512, 512 ,512, 512, 512, 512] 89 | } 90 | 91 | irrf_network 92 | { 93 | points_multires = 10 94 | dims = [ 512, 512, 512, 512] 95 | p_input_dim = 3 96 | p_out_dim = 3 97 | } 98 | } -------------------------------------------------------------------------------- /tester/exp_runner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | import GPUtil 4 | import torch 5 | 6 | 7 | from tester.test_editing import MatEditingRunner 8 | from tester.test_novel import NovelViewRunner 9 | from tester.test_relighting import RelightingRunner 10 | from tester.test_error import MatErrorRunner 11 | 12 | torch.autograd.set_detect_anomaly(True) 13 | 14 | if __name__ == '__main__': 15 | 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--conf', type=str, default='') 18 | parser.add_argument('--exps_folder_name', type=str, default='exps') 19 | parser.add_argument('--expname', type=str, default='') 20 | parser.add_argument('--teststage', type=str, default='IRF', help='') 21 | 22 | parser.add_argument('--frame_skip', type=int, default=1, help='skip frame when training') 23 | # parser.add_argument('--nepoch', type=int, default=2000, help='number of epochs to train for') 24 | parser.add_argument('--max_niter', type=int, default=200001, help='max number of iterations to train for') 25 | parser.add_argument('--is_continue', default=False, action="store_true", 26 | help='If set, indicates continuing from a previous run.') 27 | parser.add_argument('--timestamp', default='latest', type=str, 28 | help='The timestamp of the run to be used in case of continuing from a previous run.') 29 | parser.add_argument('--checkpoint', default='latest', type=str, 30 | help='The checkpoint epoch number of the run to be used in case of continuing from a previous run.') 31 | parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]') 32 | 33 | opt = parser.parse_args() 34 | 35 | if opt.gpu == "auto": 36 | deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, 37 | maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[]) 38 | gpu = deviceIDs[0] 39 | else: 40 | gpu = opt.gpu 41 | 42 | runder_dict = { 43 | 'Editing': MatEditingRunner, 44 | 'View': NovelViewRunner, 45 | 'Relighting': RelightingRunner, 46 | 'Error': MatErrorRunner 47 | } 48 | 49 | trainrunner = runder_dict[opt.teststage](conf=opt.conf, 50 | exps_folder_name=opt.exps_folder_name, 51 | expname=opt.expname, 52 | frame_skip=opt.frame_skip, 53 | max_niters=opt.max_niter, 54 | is_continue=True, 55 | timestamp=opt.timestamp, 56 | checkpoint=opt.checkpoint, 57 | gpu_index=gpu 58 | ) 59 | 60 | trainrunner.run() -------------------------------------------------------------------------------- /trainer/generate_ir_texture.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : generate_ir_texture.py 3 | @Time : 2023/02/27 12:11:17 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import sys 14 | from datetime import datetime 15 | import time 16 | import itertools 17 | from utils.sample_util import TINY_NUMBER 18 | 19 | import cv2 20 | import numpy as np 21 | import torch 22 | from torch.nn import functional as F 23 | from pyhocon import ConfigFactory 24 | from tensorboardX import SummaryWriter 25 | 26 | import utils.general as utils 27 | import utils.plots as plt 28 | from models.loss import IRFLoss 29 | from utils.Cube2Pano import Cube2Pano 30 | 31 | class IrrTextureRunner(): 32 | def __init__(self,**kwargs): 33 | torch.set_default_dtype(torch.float32) 34 | torch.set_num_threads(1) 35 | 36 | self.conf = ConfigFactory.parse_file(kwargs['conf']) 37 | self.exps_folder_name = kwargs['exps_folder_name'] 38 | self.train_batch_size = self.conf.get_int('train.batch_size') 39 | self.nepochs = self.conf.get_int('train.mat_epoch') 40 | 41 | self.max_niters = kwargs['max_niters'] 42 | self.GPU_INDEX = kwargs['gpu_index'] 43 | 44 | # fix random seed 45 | torch.manual_seed(666) 46 | torch.cuda.manual_seed(666) 47 | np.random.seed(666) 48 | 49 | if (not self.GPU_INDEX == 'ignore'): 50 | os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(self.GPU_INDEX) 51 | 52 | print('shell command : {0}'.format(' '.join(sys.argv))) 53 | 54 | print('Loading data ...') 55 | self.train_dataset = utils.get_class(self.conf.get_string('train.dataset_class'))( 56 | self.conf.get_string('train.path_mesh_open3d'), self.conf.get_list('train.pano_img_res'), self.conf.get_float('train.hdr_exposure')) 57 | print('Finish loading data ...') 58 | 59 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, 60 | batch_size=1, 61 | shuffle=True 62 | ) 63 | 64 | self.model = utils.get_class(self.conf.get_string('train.model_class'))(conf=self.conf, \ 65 | ids=self.train_dataset.ids, extrinsics = self.train_dataset.extrinsics_list, optim_cam=self.conf.get_bool('train.optim_cam')) 66 | if torch.cuda.is_available(): 67 | self.model.cuda() 68 | 69 | self.start_epoch = 0 70 | # self.room_meta_scale, self.room_meta_w, self.room_meta_h, self.room_meta_xmin, self.room_meta_zmin, self.room_img= utils.parse_roomseg(\ 71 | # os.path.join(os.path.dirname(os.path.dirname(self.conf.get_string('train.path_mesh_open3d'))), 'roomseg')) 72 | 73 | 74 | 75 | def run(self): 76 | print("generating...") 77 | irr_texture = self.model() 78 | target_texture_path = self.conf.get_string('train.path_mesh_open3d').replace('out1.obj', '0_irr_texture.hdr') 79 | 80 | irr_texture_numpy = irr_texture.cpu().numpy()[:,:,::-1] 81 | print(irr_texture_numpy.shape) 82 | cv2.imwrite(target_texture_path, irr_texture_numpy) 83 | -------------------------------------------------------------------------------- /utils/plots.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : plots.py 3 | @Time : 2023/02/27 12:08:12 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | 13 | import numpy as np 14 | from PIL import Image 15 | import cv2 16 | 17 | import torch 18 | import torchvision 19 | 20 | from utils.general import hdr_recover 21 | 22 | tonemap_img = lambda x: torch.pow(x, 1./2.2) 23 | clip_img = lambda x: torch.clamp(x, min=0., max=1.) 24 | 25 | 26 | 27 | def plot_irf(path, iters, gt_irf, pred_irf, name='rendering'): 28 | """write irf results when val env points. 29 | 30 | Args: 31 | path (_type_): _description_ 32 | gt_irf (torch.float): [h, w, 3] 33 | pred_irf (torch.float): [h, w, 3] 34 | """ 35 | 36 | # gt_irf = clip_img(tonemap_img(gt_irf)) 37 | # pred_irf = clip_img(tonemap_img((pred_irf))) 38 | gt_irf = gt_irf #* (2**(-6)) 39 | pred_irf = (pred_irf) #* (2**(-6)) 40 | out = torch.stack((pred_irf, gt_irf), dim=0).permute(0,3,1,2) # shape: (2, c, h, w) 41 | out = torchvision.utils.make_grid(out, 42 | scale_each=False, 43 | normalize=False, 44 | nrow=1).permute(1,2,0).numpy() # shape: (h, w, c) 45 | 46 | # write the rgb image via opencv-python 47 | # cv2.imwrite('{0}/rendering_{1}.png'.format(path, iters), out[:,:,::-1]*255) 48 | cv2.imwrite('{0}/{1}_{2}.exr'.format(path, name, iters), out[:,:,::-1]) 49 | print('saving render img to {0}/{1}_{2}.png'.format(path, name, iters)) 50 | 51 | def plot_gbuffer(path, iters, pred_gbuffer_mat, is_tonemapped=True): 52 | """write gbuffer material results. 53 | 54 | Args: 55 | path (_type_): _description_ 56 | gt_irf (torch.float): [h, w, 3] 57 | pred_irf (torch.float): [h, w, 3] 58 | """ 59 | if is_tonemapped: 60 | pred_gbuffer_mat = clip_img(tonemap_img(pred_gbuffer_mat)).numpy() # shape: (h, w, c) 61 | pred_gbuffer_mat = pred_gbuffer_mat.numpy() 62 | cv2.imwrite('{0}/gbuffer_{1}.png'.format(path, iters), pred_gbuffer_mat[:,:,::-1]*255) 63 | else: 64 | #pred_gbuffer_mat = clip_img((pred_gbuffer_mat)).numpy() # shape: (h, w, c) 65 | pred_gbuffer_mat = pred_gbuffer_mat.numpy() 66 | cv2.imwrite('{0}/gbuffer_{1}.exr'.format(path, iters), pred_gbuffer_mat[:,:,::-1]) 67 | print('saving render img to {0}/gbuffer_{1}.exr'.format(path, iters)) 68 | 69 | def plot_mat(path, iters, pred_gbuffer_mat, name='mat', is_tonemapped=True): 70 | """write gbuffer material results. 71 | 72 | Args: 73 | path (_type_): _description_ 74 | gt_irf (torch.float): [h, w, 3] 75 | pred_irf (torch.float): [h, w, 3] 76 | """ 77 | if is_tonemapped: 78 | pred_gbuffer_mat = clip_img(tonemap_img(pred_gbuffer_mat)).numpy() # shape: (h, w, c) 79 | 80 | cv2.imwrite('{0}/{1}_{2}.png'.format(path, name, iters), pred_gbuffer_mat[:,:,::-1]*255) 81 | else: 82 | #pred_gbuffer_mat = clip_img((pred_gbuffer_mat)).numpy() # shape: (h, w, c) 83 | pred_gbuffer_mat = pred_gbuffer_mat.numpy() 84 | cv2.imwrite('{0}/{1}_{2}.hdr'.format(path, name, iters), pred_gbuffer_mat[:,:,::-1]) 85 | print('saving render img to {0}/{1}_{2}.png'.format(path, name, iters)) -------------------------------------------------------------------------------- /trainer/exp_runner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("../TexIR_code") 3 | import argparse 4 | import GPUtil 5 | import torch 6 | 7 | from trainer.train_irf import IRFTrainRunner 8 | from trainer.train_material import MatTrainRunner 9 | from trainer.train_irrf import IRRFTrainRunner 10 | from trainer.train_pil import PILTrainRunner 11 | 12 | from trainer.generate_ir_texture import IrrTextureRunner 13 | 14 | from trainer.train_material_invrender import MatInvTrainRunner 15 | from trainer.train_material_neilf import MatNeilfTrainRunner 16 | from trainer.train_material_recMLP import MatRecMLPTrainRunner 17 | 18 | from trainer.train_material_syn import MatTrainSynRunner 19 | from trainer.train_material_recMLP_syn import MatRecMLPTrainSynRunner 20 | from trainer.train_material_neilf_syn import MatNeilfTrainSynRunner 21 | from trainer.train_material_invrender_syn import MatInvTrainSynRunner 22 | 23 | 24 | torch.autograd.set_detect_anomaly(True) 25 | 26 | if __name__ == '__main__': 27 | 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument('--conf', type=str, default='') 30 | parser.add_argument('--exps_folder_name', type=str, default='exps') 31 | parser.add_argument('--expname', type=str, default='') 32 | parser.add_argument('--trainstage', type=str, default='IRF', help='') 33 | 34 | parser.add_argument('--frame_skip', type=int, default=1, help='skip frame when training') 35 | # parser.add_argument('--nepoch', type=int, default=2000, help='number of epochs to train for') 36 | parser.add_argument('--max_niter', type=int, default=200001, help='max number of iterations to train for') 37 | parser.add_argument('--is_continue', default=False, action="store_true", 38 | help='If set, indicates continuing from a previous run.') 39 | parser.add_argument('--timestamp', default='latest', type=str, 40 | help='The timestamp of the run to be used in case of continuing from a previous run.') 41 | parser.add_argument('--checkpoint', default='latest', type=str, 42 | help='The checkpoint epoch number of the run to be used in case of continuing from a previous run.') 43 | parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]') 44 | 45 | opt = parser.parse_args() 46 | 47 | if opt.gpu == "auto": 48 | deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, 49 | maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[]) 50 | gpu = deviceIDs[0] 51 | else: 52 | gpu = opt.gpu 53 | 54 | runder_dict = { 55 | 'IRF': IRFTrainRunner, 56 | 'Mat': MatTrainRunner, 57 | 'IRRF': IRRFTrainRunner, 58 | 'PIL': PILTrainRunner, 59 | 'Inv': MatInvTrainRunner, 60 | 'Neilf': MatNeilfTrainRunner, 61 | 'IrrT': IrrTextureRunner, 62 | 'RecMLP': MatRecMLPTrainRunner, 63 | 'MatSyn': MatTrainSynRunner, 64 | 'RecMLPSyn': MatRecMLPTrainSynRunner, 65 | 'NeilfSyn': MatNeilfTrainSynRunner, 66 | 'InvSyn': MatInvTrainSynRunner 67 | } 68 | 69 | trainrunner = runder_dict[opt.trainstage](conf=opt.conf, 70 | exps_folder_name=opt.exps_folder_name, 71 | expname=opt.expname, 72 | frame_skip=opt.frame_skip, 73 | max_niters=opt.max_niter, 74 | is_continue=opt.is_continue, 75 | timestamp=opt.timestamp, 76 | checkpoint=opt.checkpoint, 77 | gpu_index=gpu 78 | ) 79 | 80 | trainrunner.run() -------------------------------------------------------------------------------- /tools/padding_texture.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from scipy import ndimage 4 | import torch 5 | import torch.nn.functional as F 6 | import os 7 | 8 | def padding_index_texture(path): 9 | img = cv2.imread(path,-1) 10 | img = np.asarray(img, np.float32) 11 | h, w, c = img.shape 12 | 13 | intensity = img[:,:,0] + img[:,:,1] + img[:,:, 2] 14 | 15 | mask = np.asarray(intensity==0.0, dtype=np.uint8) 16 | 17 | # # 3 is better than 7 18 | # kernel = np.ones((3,3), np.uint8) 19 | # mask_large = cv2.dilate(mask, kernel) 20 | mask_large = mask 21 | 22 | distance, indices = ndimage.distance_transform_edt(mask_large, return_indices=True) 23 | indices = torch.from_numpy(indices).permute(1,2,0).reshape(-1,2) 24 | 25 | # indexes = np.argwhere(distance==0) 26 | # indexes = distance == 0 27 | 28 | img_torch = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0) 29 | 30 | uv_init = torch.zeros((h*w, 2), dtype=torch.float32) 31 | 32 | permute = [1,0] 33 | uv_init[mask_large.reshape(-1)] = indices[mask_large.reshape(-1)][:,permute].float()/torch.tensor([w, h]).unsqueeze(0) * 2. -1. 34 | 35 | uv_init = uv_init.reshape(h, w, 2).unsqueeze(0) 36 | 37 | res = F.grid_sample(img_torch, uv_init, mode='nearest')[0].permute(1,2,0).numpy() 38 | res = res * np.asarray(mask,np.float32)[:,:,np.newaxis] + img * np.asarray(1-mask, np.float32)[:,:,np.newaxis] 39 | 40 | # kernel = np.ones((3,3), np.uint16) 41 | # res = cv2.dilate(res, kernel) 42 | res = np.asarray(res, np.uint16) 43 | cv2.imwrite(path.replace('.png','_padding.png'), res) 44 | # cv2.imwrite(path.replace("0_irr_texture", 't'), res) 45 | # denoise 46 | # cmd = "/home/SecondDisk/Code/opensource/oidn/build/oidnDenoise --hdr {} -o {}".format(path.replace("0_irr_texture", 't'), path.replace("0_irr_texture", '0_irr_texture_denoised_padding_1')) 47 | # os.system(cmd) 48 | 49 | def padding_texture(path): 50 | img = cv2.imread(path,-1) 51 | img = np.asarray(img, np.float32) 52 | h, w, c = img.shape 53 | 54 | intensity = img[:,:,0] + img[:,:,1] + img[:,:, 2] 55 | 56 | mask = np.asarray(intensity==0.0, dtype=np.uint8) 57 | 58 | # # 3 is better than 7 59 | # kernel = np.ones((3,3), np.uint8) 60 | # mask_large = cv2.dilate(mask, kernel) 61 | mask_large = mask 62 | 63 | distance, indices = ndimage.distance_transform_edt(mask_large, return_indices=True) 64 | indices = torch.from_numpy(indices).permute(1,2,0).reshape(-1,2) 65 | 66 | # indexes = np.argwhere(distance==0) 67 | # indexes = distance == 0 68 | 69 | img_torch = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0) 70 | 71 | uv_init = torch.zeros((h*w, 2), dtype=torch.float32) 72 | 73 | permute = [1,0] 74 | uv_init[mask_large.reshape(-1)] = indices[mask_large.reshape(-1)][:,permute].float()/torch.tensor([w, h]).unsqueeze(0) * 2. -1. 75 | 76 | uv_init = uv_init.reshape(h, w, 2).unsqueeze(0) 77 | 78 | res = F.grid_sample(img_torch, uv_init, mode='nearest')[0].permute(1,2,0).numpy() 79 | res = res * np.asarray(mask,np.float32)[:,:,np.newaxis] + img * np.asarray(1-mask, np.float32)[:,:,np.newaxis] 80 | 81 | # kernel = np.ones((3,3), np.uint16) 82 | # res = cv2.dilate(res, kernel) 83 | 84 | cv2.imwrite(path.replace("0_irr_texture", 't'), res) 85 | # denoise 86 | cmd = "/home/SecondDisk/Code/opensource/oidn/build/oidnDenoise --hdr {} -o {}".format(path.replace("0_irr_texture", 't'), path.replace("0_irr_texture", '0_irr_texture_denoised_padding')) 87 | os.system(cmd) 88 | 89 | if __name__=="__main__": 90 | padding_texture("/home/SecondDisk/test_data/galois_model/inverse/hdrhouse/customHouse/vrproc/hdr_texture/0_irr_texture.hdr") 91 | # padding_index_texture("/home/SecondDisk/test_data/galois_model/inverse/hdrhouse/customHouse/vrproc/hdr_texture/source/output0000_padding.png") -------------------------------------------------------------------------------- /utils/general.py: -------------------------------------------------------------------------------- 1 | import os 2 | from glob import glob 3 | import torch 4 | import numpy as np 5 | import cv2 6 | 7 | def mkdir_ifnotexists(directory): 8 | if not os.path.exists(directory): 9 | os.makedirs(directory) 10 | 11 | 12 | def get_class(kls): 13 | parts = kls.split('.') 14 | module = ".".join(parts[:-1]) 15 | m = __import__(module) 16 | for comp in parts[1:]: 17 | m = getattr(m, comp) 18 | return m 19 | 20 | 21 | def glob_imgs(path): 22 | imgs = [] 23 | for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG', '*.exr']: 24 | imgs.extend(glob(os.path.join(path, ext))) 25 | return imgs 26 | 27 | 28 | def split_input(model_input, total_pixels): 29 | ''' 30 | Split the input to fit Cuda memory for large resolution. 31 | Can decrease the value of n_pixels in case of cuda out of memory error. 32 | ''' 33 | # n_pixels = 20000 34 | n_pixels = 2000 35 | split = [] 36 | for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)): 37 | data = model_input.copy() 38 | data['uv'] = torch.index_select(model_input['uv'], 1, indx) 39 | data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx) 40 | split.append(data) 41 | return split 42 | 43 | 44 | def merge_output(res, total_pixels, batch_size): 45 | ''' Merge the split output. ''' 46 | 47 | model_outputs = {} 48 | for entry in res[0]: 49 | if res[0][entry] is None: 50 | continue 51 | if len(res[0][entry].shape) == 1: 52 | model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res], 53 | 1).reshape(batch_size * total_pixels) 54 | else: 55 | model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res], 56 | 1).reshape(batch_size * total_pixels, -1) 57 | 58 | return model_outputs 59 | 60 | 61 | def hdr_scale(img, base=np.e): 62 | # if scale_model=="e": 63 | # return torch.log1p(img) 64 | # tensor_two = torch.full_like(img,2) 65 | # return tensor_two.min(torch.log10(1+img))-1 66 | return torch.log(img+1) / np.math.log(base) 67 | 68 | def hdr_recover(img, base=np.e): 69 | return torch.pow(base, img)-1 70 | 71 | def mse_to_psnr(mse): 72 | """Compute PSNR given an MSE (we assume the maximum pixel value is 1).""" 73 | return -10. / np.log(10.) * torch.log(mse) 74 | 75 | def psnr_to_mse(psnr): 76 | """Compute MSE given a PSNR (we assume the maximum pixel value is 1).""" 77 | return torch.exp(-0.1 * np.log(10.) * psnr) 78 | 79 | def tonemapping(img): 80 | """_summary_ 81 | 82 | Args: 83 | img (torch.tensor): [b, h, w, c] 84 | """ 85 | return torch.clamp(img**(1/2.2), 0., 1.) 86 | 87 | 88 | def get_mip_level(n): 89 | count = 0 90 | while not (n & 1 or n==1): 91 | n = n>>1 92 | count = count + 1 93 | return count 94 | 95 | def rgb_to_intensity(tensor, dim=-1): 96 | """_summary_ 97 | 98 | Args: 99 | tensor (torch.float32): shape: [h, w, 3] / [b,h,w,3] 100 | 101 | Returns: 102 | _type_: _description_ 103 | """ 104 | if dim== -1: 105 | return 0.29900 * tensor[...,0:1] + 0.58700 * tensor[...,1:2] + 0.11400 * tensor[...,2:3] 106 | # assume that the shape of input is [b,h,w,3] 107 | elif dim == 0: 108 | return 0.29900 * tensor[0:1,...] + 0.58700 * tensor[1:2,...] + 0.11400 * tensor[2:3,...] 109 | elif dim ==1: 110 | return 0.29900 * tensor[:,0:1,...] + 0.58700 * tensor[:,1:2,...] + 0.11400 * tensor[:,2:3,...] 111 | elif dim ==2: 112 | return 0.29900 * tensor[:,:,0:1,...] + 0.58700 * tensor[:,:,1:2,...] + 0.11400 * tensor[:,:,2:3,...] 113 | 114 | 115 | def parse_roomseg(path): 116 | with open(os.path.join(path, 'originOccupancyGrid_f0.meta'), 'r') as f: 117 | first_line = f.readline() 118 | 119 | _, _w, _h, x_min, z_min = first_line.strip().split(" ") 120 | 121 | roomsegs = cv2.imread(os.path.join(path, 'roomSegs_uchar_f0.png')) 122 | roomsegs = np.asarray(roomsegs, np.float32) 123 | roomsegs = torch.from_numpy(roomsegs)[:,:,0:1].unsqueeze(0).permute(0, 3, 1, 2) # shape: [1, 1, h, w] 124 | 125 | return float(_), float(_w), float(_h), float(x_min), float(z_min), roomsegs 126 | 127 | 128 | def scale_compute(gt, prediction): 129 | scale, _ = torch.lstsq(gt.flatten().unsqueeze(1), prediction.flatten().unsqueeze(1)) 130 | return scale[0, 0].clone().detach() -------------------------------------------------------------------------------- /tools/relighting_varying.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import cv2 4 | import glob 5 | 6 | from scipy.interpolate import interp1d 7 | 8 | 9 | 10 | def change_color(frame_per_color, colors): 11 | new_colors = [] 12 | new_colors.append(','.join(colors[0])) 13 | for i in range(1, len(colors)): 14 | x = np.array([0, 1]) 15 | r = np.array([colors[i-1][0], colors[i][0]]) 16 | f1 = interp1d(x, r) 17 | new_r = f1(np.linspace(0, 1, frame_per_color)) 18 | 19 | g = np.array([colors[i-1][1], colors[i][1]]) 20 | f2 = interp1d(x, g) 21 | new_g = f2(np.linspace(0, 1, frame_per_color)) 22 | 23 | b = np.array([colors[i-1][2], colors[i][2]]) 24 | f1 = interp1d(x, b) 25 | new_b = f1(np.linspace(0, 1, frame_per_color)) 26 | 27 | for j in range(frame_per_color): 28 | new_colors.append('{},{},{}'.format(new_r[j], new_g[j], new_b[j])) 29 | 30 | return new_colors 31 | 32 | 33 | # varying 34 | lists=[6] 35 | for index in lists: 36 | root_path = "/home/SecondDisk/Material/mitsuba-scenes/room_relighting/hdrhouse{}/varying".format(index) 37 | 38 | 39 | with open(os.path.join(root_path, 'cbox_pano_optix.xml'), 'r') as f: 40 | all_info = f.read() 41 | 42 | # # hdrhouse 1 43 | # colors = [ 44 | # list(map(str,[2.2, 12.38, 24.14])), 45 | # list(map(str,[24.2, 2.38, 12.14])), 46 | # list(map(str,[24.2, 2.38, 2.14])), 47 | # list(map(str,[2.2, 24.38, 2.14])), 48 | # list(map(str,[2.2, 24.38, 12.14])), 49 | # list(map(str,[2.2, 12.38, 24.14])) 50 | # ] 51 | 52 | # hdrhouse 6 53 | colors = [ 54 | list(map(str,[6.38, 12.14, 1.2])), 55 | list(map(str,[6.2, 1.38, 12.14])), 56 | list(map(str,[1.2, 12.38, 12.14])), 57 | list(map(str,[12.2, 12.38, 6.14])), 58 | list(map(str,[12.2, 1.38, 12.14])), 59 | list(map(str,[6.38, 12.14, 1.2])) 60 | ] 61 | 62 | 63 | frame_per_color = 5 64 | 65 | new_colors = change_color(frame_per_color, colors) 66 | 67 | for i in range(len(new_colors)): 68 | target_xml = os.path.join(root_path, 'cbox_pano_optix_{}.xml'.format(i)) 69 | with open(target_xml, 'w') as f: 70 | f.write(all_info.replace('$$', new_colors[i])) 71 | 72 | cmd = "/home/lz/WorkSpace/code/OptixRender_lingli/OptixRenderer/build/bin/optixRenderer -f {} -o test_{}.png".format(target_xml,i) 73 | print(cmd) 74 | os.system(cmd) 75 | 76 | cmd = "/home/SecondDisk/Code/opensource/oidn/build/oidnDenoise --ldr {} --alb {} -o {}"\ 77 | .format(os.path.join(root_path, 'test_{}_1.png'.format(i)),\ 78 | os.path.join(root_path, 'test_{}baseColor_1.png'.format(i)),\ 79 | os.path.join(root_path, 'test_denoise_{}.png'.format(i+1)) ) 80 | os.system(cmd) 81 | 82 | 83 | # moving 84 | lists=[6] 85 | for index in lists: 86 | root_path = "/home/SecondDisk/Material/mitsuba-scenes/room_relighting/hdrhouse{}/moving".format(index) 87 | 88 | 89 | with open(os.path.join(root_path, 'cbox_pano_optix.xml'), 'r') as f: 90 | all_info = f.read() 91 | # # hdrhouse 1 92 | # colors = [ 93 | # list(map(str,[-1.54852, -0.69413831, 6.56525])), 94 | # list(map(str,[-1.54852, 0, 6.56525])), 95 | # list(map(str,[-2.54852, 0, 5.56525])), 96 | # list(map(str,[-2.54852, 0, 7.06525])), 97 | # list(map(str,[-0.54852, 0, 7.06525])), 98 | # list(map(str,[-1.54852, -0.69413831, 6.56525])) 99 | # ] 100 | 101 | # hdrhouse 6 102 | colors = [ 103 | list(map(str,[-8.22709, -0.69413831, -3.45957])), 104 | list(map(str,[-8.22709, 0, -3.45957])), 105 | list(map(str,[-9.22709, 0, -3.95957])), 106 | list(map(str,[-9.22709, 0, -2.45957])), 107 | list(map(str,[-7.22709, 0, -2.45957])), 108 | list(map(str,[-8.22709, -0.69413831, -3.45957])) 109 | ] 110 | frame_per_color = 5 111 | 112 | new_colors = change_color(frame_per_color, colors) 113 | 114 | for i in range(len(new_colors)): 115 | target_xml = os.path.join(root_path, 'cbox_pano_optix_{}.xml'.format(i)) 116 | rgb = new_colors[i].split(',') 117 | with open(target_xml, 'w') as f: 118 | f.write(all_info.replace('$x$', rgb[0]).replace('$y$', rgb[1]).replace('$z$', rgb[2]) ) 119 | 120 | cmd = "/home/lz/WorkSpace/code/OptixRender_lingli/OptixRenderer/build/bin/optixRenderer -f {} -o test_{}.png".format(target_xml,i) 121 | print(cmd) 122 | os.system(cmd) 123 | 124 | cmd = "/home/SecondDisk/Code/opensource/oidn/build/oidnDenoise --ldr {} --alb {} -o {}"\ 125 | .format(os.path.join(root_path, 'test_{}_1.png'.format(i)),\ 126 | os.path.join(root_path, 'test_{}baseColor_1.png'.format(i)),\ 127 | os.path.join(root_path, 'test_denoise_{}.png'.format(i+1)) ) 128 | os.system(cmd) 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## TexIR 2 | 3 | This is official code for the paper accepted to CVPR 2023, "Multi-view Inverse Rendering for Large-scale Real-world Indoor Scenes". 4 | 5 | [Project Page](http://yodlee.top/TexIR/) | [Paper](https://arxiv.org/abs/2211.10206) 6 | 7 | ![](./examples/fig1.png) 8 | 9 | ---- 10 | 11 | #### 1. Installation 12 | 13 | We encourage users to run our code in the Docker, which ensures the reproducibility of our implementation. 14 | 15 | First, please follow [nvdiffrec](https://github.com/NVlabs/nvdiffrec#server-usage-through-docker) to build a basic docker image. 16 | 17 | Second, starting an interactive docker container: `docker run --runtime nvidia -it --rm --user root --gpus device=0 -v /your_data_folder:/data -v /code_folder:/code nvdiffrec:v1 /bin/bash ` 18 | 19 | Finally, installing python packages in the container. `pip install -r requirements.txt` 20 | 21 | #### 2. Dataset 22 | 23 | We release the real dataset and synthetic dataset proposed in the paper. 24 | 25 | Please send an email to ~~yodlee@mail.nwpu.edu.cn~~ wanglingli008@realsee.com or pancihui001@realsee.com with some necessary information and we will send a download link to you. 26 | 27 | ``` 28 | # the necessary information of the request email 29 | dataset: texir 30 | name: xx 31 | organization: xx 32 | email for receiving link: xx 33 | ``` 34 | 35 | #### 3. Running in our data 36 | 37 | We also release our precomputed irradiance, both NIrF and IrT. Therefore, users could directly run the **Material estimation** with IrT. Users also could reproduce the NIrF and IrT via **PIR**. 38 | 39 | ##### 3.1 Material estimation 40 | 41 | For the synthetic dataset, please change the value of 'path_mesh_open3d' into the actual path in the container and run: 42 | 43 | ``` 44 | python3 trainer/exp_runner.py --conf configs/syn.conf --expname syn1 --trainstage MatSyn --gpu auto 45 | ``` 46 | 47 | For the real dataset, please change the value of 'path_mesh_open3d' into the actual path in the container and change the value of 'hdr_exposure'. Scene 1 and Scene 7 use exposure 3 and others use exposure 5. By the way, please change the threshold in the [loss.py]() according to comments. 48 | 49 | Finally, please run: 50 | 51 | ``` 52 | python3 trainer/exp_runner.py --conf configs/mat_hdrhouse.conf --expname scene1 --trainstage Mat --gpu auto 53 | ``` 54 | 55 | ##### 3.2 PIR (precomputed irradiance) 56 | 57 | ###### NIrF 58 | 59 | For the real dataset, please change the value of 'path_mesh_open3d', similar to material estimation. Please run: 60 | 61 | ``` 62 | # NIrF 63 | python3 trainer/exp_runner.py --conf configs/irrf_hdrhouse.conf --expname scene1 --trainstage IRRF --gpu auto 64 | ``` 65 | 66 | ###### IrT 67 | 68 | First, generating the IrT via ray tracing with 2048 samples. 69 | 70 | There are some black regions in the IrT due to the error of UV mapping. Therefore, we do a simple padding to fill these regions in the texture space. 71 | 72 | Finally, we denoise the IrT to reduce noise produced by monte carlo sampling. 73 | 74 | Please run: 75 | 76 | ``` 77 | # 1. generate noised IrT, change hdr_exposure in hdrhouse_ir_texture.conf 78 | python3 trainer/exp_runner.py --conf configs/hdrhouse_ir_texture.conf --expname scene1 --trainstage IrrT --gpu auto 79 | # 2. padding IrT. change the path of tools/padding_texture.py and denoise the IrT via a denoiser, e.g. oidn, optix. 80 | python3 tools/padding_texture.py 81 | ``` 82 | 83 | ##### 3.3 Evaluating previous methods 84 | 85 | The sota multi-view object-centric neural rendering methods are [InvRender](https://github.com/zju3dv/invrender), [nvdiffrec](https://github.com/NVlabs/nvdiffrec) and [NeILF](https://github.com/apple/ml-neilf). 86 | 87 | As described in the paper, we integrate their material optimization strategies with our lighting representation to handle large-scale indoor scenes. 88 | 89 | Please run: 90 | 91 | ``` 92 | # note change exposure in .conf 93 | # # real data 94 | # invrender 95 | python3 trainer/exp_runner.py --conf configs/mat_hdrhouse_invrender.conf --expname scene1 --trainstage Inv --gpu auto 96 | # nvdiffrec 97 | python3 trainer/exp_runner.py --conf configs/mat_hdrhouse_rec.conf --expname scene1 --trainstage RecMLP --gpu auto 98 | # neilf 99 | python3 trainer/exp_runner.py --conf configs/mat_hdrhouse_neilf.conf --expname scene1 --trainstage Neilf --gpu auto 100 | 101 | # # synthetic data 102 | # invrender 103 | python3 trainer/exp_runner.py --conf configs/syn_invrender.conf --expname scene1 --trainstage InvSyn --gpu auto 104 | # nvdiffrec 105 | python3 trainer/exp_runner.py --conf configs/syn_rec.conf --expname scene1 --trainstage RecMLPSyn --gpu auto 106 | # neilf 107 | python3 trainer/exp_runner.py --conf configs/syn_neilf.conf --expname scene1 --trainstage NeilfSyn --gpu auto 108 | ``` 109 | 110 | #### 4. Running in custom data 111 | 112 | Our code is not support to run in other custom datasets, e.g. Replica. If you want to run our method with your data, please re-implement the interface or convert your data into our supported format, which consists of meshs, HDR images, UV-mapping, poses and segmentations. 113 | 114 | #### Citation 115 | 116 | We use the basic docker provided in [nvdiffrec](https://github.com/NVlabs/nvdiffrec), please cite both our paper and their paper if you use our code. 117 | 118 | ``` 119 | @inproceedings{li2022texir, 120 | title={Multi-view Inverse Rendering for Large-scale Real-world Indoor Scenes}, 121 | author={Li, Zhen and Wang, Lingli and Cheng, Mofang and Pan, Cihui and Yang, Jiaqi.}, 122 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition.}, 123 | year={2023} 124 | } 125 | 126 | @inproceedings{munkberg2021nvdiffrec, 127 | author = {Munkberg, Jacob and Hasselgren, Jon and Shen, Tianchang and Gao, Jun and Chen, Wenzheng and Evans, Alex and Mueller, Thomas and Fidler, Sanja}, 128 | title = {Extracting Triangular 3D Models, Materials, and Lighting From Images}, 129 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition.}, 130 | year = {2022} 131 | } 132 | ``` 133 | 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /models/embedder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | # import tinycudann as tcnn 3 | import numpy as np 4 | 5 | """ Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. """ 6 | class Embedder: 7 | def __init__(self, **kwargs): 8 | self.kwargs = kwargs 9 | self.create_embedding_fn() 10 | 11 | def create_embedding_fn(self): 12 | embed_fns = [] 13 | d = self.kwargs['input_dims'] 14 | out_dim = 0 15 | if self.kwargs['include_input']: 16 | embed_fns.append(lambda x: x) 17 | out_dim += d 18 | 19 | max_freq = self.kwargs['max_freq_log2'] 20 | N_freqs = self.kwargs['num_freqs'] 21 | 22 | if self.kwargs['log_sampling']: 23 | freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs) 24 | else: 25 | freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs) 26 | 27 | for freq in freq_bands: 28 | for p_fn in self.kwargs['periodic_fns']: 29 | embed_fns.append(lambda x, p_fn=p_fn, 30 | freq=freq: p_fn(x * freq)) 31 | out_dim += d 32 | 33 | self.embed_fns = embed_fns 34 | self.out_dim = out_dim 35 | print("Encoder output: %d dims" % (out_dim)) 36 | 37 | def embed(self, inputs): 38 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1) 39 | 40 | 41 | def get_embedder(multires): 42 | embed_kwargs = { 43 | 'include_input': True, 44 | 'input_dims': 3, 45 | 'max_freq_log2': multires-1, 46 | 'num_freqs': multires, 47 | 'log_sampling': True, 48 | 'periodic_fns': [torch.sin, torch.cos], 49 | } 50 | 51 | embedder_obj = Embedder(**embed_kwargs) 52 | def embed(x, eo=embedder_obj): return eo.embed(x) 53 | return embed, embedder_obj.out_dim 54 | 55 | 56 | 57 | class HashGridEmbedder(torch.nn.Module): 58 | def __init__(self, **kwargs): 59 | super(HashGridEmbedder, self).__init__() 60 | self.kwargs = kwargs 61 | 62 | if self.kwargs['mode']=='material': 63 | 64 | desired_resolution = 4096 65 | base_grid_resolution = 16 66 | num_levels = 16 67 | per_level_scale = np.exp(np.log(desired_resolution / base_grid_resolution) / (num_levels-1)) 68 | elif self.kwargs['mode']=='sdf': 69 | desired_resolution = 2048*256 # assume our scene size is 256, the corresponding per level scale is 2.0 70 | base_grid_resolution = 16 71 | num_levels = 16 72 | per_level_scale = np.exp(np.log(desired_resolution / base_grid_resolution) / (num_levels-1)) 73 | else: 74 | raise Exception("unrecognized embedder mode, please use material or sdf!") 75 | 76 | enc_cfg = { 77 | "otype": "HashGrid", 78 | "n_levels": 16, 79 | "n_features_per_level": 2, 80 | "log2_hashmap_size": 19, 81 | "base_resolution": base_grid_resolution, 82 | "per_level_scale" : per_level_scale 83 | } 84 | self.encoder = tcnn.Encoding(self.kwargs['input_dims'], enc_cfg) 85 | print("Encoder output: %d dims" % (self.encoder.n_output_dims)) 86 | if self.kwargs['AABB'] is None: 87 | raise Exception(" None AABB.") 88 | self.AABB = torch.from_numpy(self.kwargs['AABB']).cuda() 89 | self.max_len = torch.max(torch.abs(self.AABB))*2 90 | # gradient_scaling = 128.0 91 | # self.encoder.register_full_backward_hook(lambda module, grad_i, grad_o: (grad_i[0] / gradient_scaling, )) 92 | def forward(self, inputs): 93 | _inputs = (inputs- self.AABB[0:1,:]) / (self.AABB[1:2,:] - self.AABB[0:1,:]) 94 | # _inputs = (inputs + self.max_len/2.) / self.max_len 95 | _inputs = torch.clamp(_inputs, min=0, max=1) 96 | return self.encoder(_inputs.contiguous()).to(torch.float32) 97 | def get_outdim(self): 98 | return self.encoder.n_output_dims 99 | 100 | 101 | def get_hashgrid_embedder(mode, AABB): 102 | embed_kwargs = { 103 | 'include_input': True, 104 | 'input_dims': 3, 105 | 'mode':mode, 106 | 'AABB': AABB 107 | } 108 | 109 | embeder = HashGridEmbedder(**embed_kwargs) 110 | def embed(x, eo=embeder): return eo.forward(x) 111 | return embeder.encoder.parameters(), embed, embeder.get_outdim() 112 | 113 | 114 | class FrequencyEmbedder: 115 | def __init__(self, **kwargs): 116 | self.kwargs = kwargs 117 | 118 | if self.kwargs['mode']=='material': 119 | 120 | desired_resolution = 4096 121 | base_grid_resolution = 16 122 | num_levels = 16 123 | per_level_scale = np.exp(np.log(desired_resolution / base_grid_resolution) / (num_levels-1)) 124 | elif self.kwargs['mode']=='sdf': 125 | desired_resolution = 2048*256 # assume our scene size is 256, the corresponding per level scale is 2.0 126 | base_grid_resolution = 16 127 | num_levels = 16 128 | per_level_scale = np.exp(np.log(desired_resolution / base_grid_resolution) / (num_levels-1)) 129 | 130 | enc_cfg = { 131 | "otype": "Frequency", 132 | "n_frequencies": 6 133 | } 134 | self.encoder = tcnn.Encoding(self.kwargs['input_dims'], enc_cfg) 135 | print("Encoder output: %d dims" % (self.encoder.n_output_dims)) 136 | self.AABB = torch.from_numpy(self.kwargs['AABB']).cuda() 137 | self.max_len = torch.max(torch.abs(self.AABB))*2 138 | # gradient_scaling = 128.0 139 | # self.encoder.register_full_backward_hook(lambda module, grad_i, grad_o: (grad_i[0] / gradient_scaling, )) 140 | def forward(self, inputs): 141 | # _inputs = (inputs- self.AABB[0:1,:]) / (self.AABB[1:2,:] - self.AABB[0:1,:]) 142 | # _inputs = (inputs + self.max_len/2.) / self.max_len 143 | # _inputs = torch.clamp(_inputs, min=0, max=1) 144 | return self.encoder(inputs.contiguous()).to(torch.float32) 145 | def get_outdim(self): 146 | return self.encoder.n_output_dims 147 | 148 | 149 | def get_frequency_embedder(mode, AABB): 150 | embed_kwargs = { 151 | 'include_input': True, 152 | 'input_dims': 3, 153 | 'mode':mode, 154 | 'AABB': AABB 155 | } 156 | 157 | embeder = FrequencyEmbedder(**embed_kwargs) 158 | def embed(x, eo=embeder): return eo.forward(x) 159 | return embed, embeder.get_outdim() -------------------------------------------------------------------------------- /utils/sample_util.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : sample_util.py 3 | @Time : 2023/02/27 12:08:01 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import cv2 14 | import numpy as np 15 | import math 16 | 17 | import torch 18 | import torch.nn as nn 19 | import torch.nn.functional as F 20 | from torch.autograd import Variable 21 | import torchvision.transforms as tranforms 22 | from urllib3 import Retry 23 | 24 | 25 | TINY_NUMBER = 1e-6 26 | TINY_TINY_NUMBER = 1e-14 27 | 28 | def RadicalInverse(bits): 29 | #reverse bit 30 | #高低16位换位置 31 | bits = (bits << 16) | (bits >> 16) 32 | #A是5的按位取反 33 | bits = ((bits & 0x55555555) << 1) | ((bits & 0xAAAAAAAA) >> 1) 34 | #C是3的按位取反 35 | bits = ((bits & 0x33333333) << 2) | ((bits & 0xCCCCCCCC) >> 2) 36 | bits = ((bits & 0x0F0F0F0F) << 4) | ((bits & 0xF0F0F0F0) >> 4) 37 | bits = ((bits & 0x00FF00FF) << 8) | ((bits & 0xFF00FF00) >> 8) 38 | return float(bits) * 2.3283064365386963e-10 39 | 40 | def Hammersley(i,N): 41 | return [float(i)/float(N),RadicalInverse(i)] 42 | 43 | 44 | def generate_fixed_samples(b, num_sample_dir): 45 | samples=np.zeros((num_sample_dir,2),dtype=np.float32) 46 | for i in range(0,num_sample_dir): 47 | s = Hammersley(i,num_sample_dir) 48 | samples[i][0] = s[0] 49 | samples[i][1] = s[1] 50 | samples = torch.from_numpy(samples).unsqueeze(0).cuda() #size:(batch_size, samples, 2) 51 | samples = samples.repeat(b, 1, 1).detach() 52 | # samples[:,:, 0:1] = torch.clamp(samples[:,:,0:1] + torch.rand_like(samples[:,:,0:1])*0.09, 0., 1.) 53 | shift = torch.rand(b, 1, 2).cuda() 54 | samples = samples + shift 55 | index1 = samples > 1. 56 | samples[index1] = samples[index1]-1. 57 | index2 = samples < 0. 58 | samples[index2] = samples[index2] + 1 59 | samples = torch.clamp(samples, 0+TINY_NUMBER, 1-TINY_NUMBER) # avoid NAN in roughness backward. 60 | return samples 61 | 62 | # shift = torch.rand(98304, 1, 2).cuda() 63 | def generate_dir(normals, num_sample_dir, samples=None, mode='uniform', roughness=None, pre_mode='Hammersley'): 64 | """_summary_ 65 | 66 | Args: 67 | normals (torch.float32): [b, 3] (h * w, 3) 68 | num_sample_dir (int): 1 69 | mode (str, optional): sampling mode. Defaults to 'uniform'. 70 | roughness (torch.float32, optional): [b, 1] (h * w, 1). Defaults to None. 71 | pre_mode (str, optional): pre-sampling mode. Defaults to 'Hammersley'. 72 | 73 | Returns: 74 | _type_: _description_ 75 | """ 76 | b, c = normals.shape 77 | normals = normals.unsqueeze(1) 78 | # compute projection axis 79 | # x_axis = torch.zeros_like(normals).cuda().expand(b, num_sample_dir, 3) #size:(batch_size, samples, 3) 80 | normals = normals.expand(b, num_sample_dir, 3) 81 | # mask = torch.abs(normals[:,:,0]) > 0.99 82 | # x_axis[mask, :] = torch.tensor([0., 1., 0.],dtype=torch.float32, device=normals.get_device()) 83 | # x_axis[~mask, :] = torch.tensor([1., 0., 0.],dtype=torch.float32, device=normals.get_device()) 84 | x_axis = torch.where(torch.abs(normals[:,:,0:1]) > 0.99, torch.tensor([0, 1., 0.]).cuda(), torch.tensor([1., 0., 0.]).cuda()) 85 | 86 | def norm_axis(x): 87 | return x / (torch.norm(x, dim=-1, keepdim=True) + TINY_NUMBER) 88 | 89 | normals = norm_axis(normals) 90 | U = norm_axis(torch.cross(x_axis, normals)) 91 | V = norm_axis(torch.cross( normals, U)) 92 | 93 | if pre_mode == "Hammersley": 94 | samples=np.zeros((num_sample_dir,2),dtype=np.float32) 95 | for i in range(0,num_sample_dir): 96 | s = Hammersley(i,num_sample_dir) 97 | samples[i][0] = s[0] 98 | samples[i][1] = s[1] 99 | samples = torch.from_numpy(samples).unsqueeze(0).cuda() #size:(batch_size, samples, 2) 100 | samples = samples.repeat(b, 1, 1).detach() 101 | # samples[:,:, 0:1] = torch.clamp(samples[:,:,0:1] + torch.rand_like(samples[:,:,0:1])*0.09, 0., 1.) 102 | shift = torch.rand(b, 1, 2).cuda() 103 | samples = samples + shift 104 | index1 = samples > 1. 105 | samples[index1] = samples[index1]-1. 106 | index2 = samples < 0. 107 | samples[index2] = samples[index2] + 1 108 | samples = torch.clamp(samples, 0+TINY_NUMBER, 1-TINY_NUMBER) # avoid NAN in roughness backward. 109 | else: 110 | # independent sample 111 | samples = torch.rand((b, num_sample_dir, 2)).cuda() 112 | 113 | # uniform sample, attention: we generate sampled dir via y as up axis. translate to our coor: 114 | # phi - np.pi; y = sin((np.pi/2-theta)) = costheta; y_projected = cos((np.pi/2-theta)) = sintheta 115 | if mode =='uniform': 116 | phi = 2 * np.pi * samples[:,:,1:2] - np.pi 117 | cosTheta = (1.0 - samples[:,:,0:1]) 118 | sinTheta = torch.sqrt(1.0 - cosTheta * cosTheta) 119 | L = V * (torch.sin(phi) * sinTheta) \ 120 | + normals * cosTheta \ 121 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 3] 122 | elif mode =='cosine': 123 | phi = 2 * np.pi * samples[:,:,1:2] - np.pi 124 | cosTheta = torch.sqrt(1.0 - samples[:,:,0:1]) 125 | sinTheta = torch.sqrt(1.0 - cosTheta * cosTheta) 126 | # phi = (torch.rand(1, num_sample_dir, 1).cuda() * 2 * np.pi - np.pi ).repeat(b, 1, 1) 127 | # Theta = (torch.rand(1, num_sample_dir, 1).cuda() * np.pi/2. ).repeat(b, 1, 1) 128 | # cosTheta = torch.cos(Theta) 129 | # sinTheta = torch.sin(Theta) 130 | L = V * (torch.sin(phi) * sinTheta) \ 131 | + normals * cosTheta \ 132 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 3] 133 | elif mode =='importance': 134 | a = roughness * roughness 135 | a = a.unsqueeze(1).expand(b, num_sample_dir, 1) 136 | 137 | phi = 2 * np.pi * samples[:,:,1:2] - np.pi 138 | cosTheta = torch.sqrt( (1.0-samples[:,:,0:1]) / (1.0 + (a*a-1) * samples[:,:,0:1]) ) 139 | cosTheta = torch.clamp(cosTheta, min=-1.0+TINY_NUMBER,max=1.0-TINY_NUMBER) # avoid NAN in backward. 140 | sinTheta = torch.clamp(torch.sqrt(1.0 - cosTheta * cosTheta) , min=-1.0+TINY_NUMBER,max=1.0-TINY_NUMBER) 141 | L = V * (torch.sin(phi) * sinTheta) \ 142 | + normals * cosTheta \ 143 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 3] 144 | 145 | 146 | return L 147 | 148 | -------------------------------------------------------------------------------- /utils/Pano2Cube.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : Pano2Cube.py 3 | @Time : 2023/02/27 12:07:51 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | 13 | import os 14 | import cv2 15 | import numpy as np 16 | 17 | import torch 18 | import torch.nn.functional as F 19 | from torch.autograd import Variable 20 | import torchvision.transforms as tranforms 21 | 22 | 23 | 24 | class Pano2Cube: 25 | """ 26 | Differentiable panorama2cubemap op. It supports gpu and batch processing. 27 | """ 28 | def __init__(self,batch_size=1,pano_width = 256,pano_height=128,cube_lenth=128,cube_channel =3,is_cuda=False) -> None: 29 | 30 | self.pano_width = pano_width 31 | self.pano_height = pano_height 32 | self.batch_size = batch_size 33 | self.cube_lenth = cube_lenth 34 | self.cube_channel = cube_channel 35 | self.is_cuda = is_cuda 36 | # face order: 0-left,1-front,2-right,3-back,4-top,5-bottom 37 | self.horizon_angle = np.array([-90.0,0.0,90.0,180.0,])/180.0*np.pi 38 | self.vertical_angle = np.array([-90.0,90.0])/180.0*np.pi 39 | self.rorate_list = [] 40 | for h in self.horizon_angle: 41 | temp = h*np.array([0,1,0],np.float32) # rotate as y axis 42 | rotate_vector,_ = cv2.Rodrigues(temp) 43 | self.rorate_list.append(rotate_vector) 44 | 45 | for v in self.vertical_angle: 46 | temp = v*np.array([1,0,0],np.float32) # rotate as x axis 47 | rotate_vector,_ = cv2.Rodrigues(temp) 48 | self.rorate_list.append(rotate_vector) 49 | 50 | self.rorate_list = [Variable(torch.FloatTensor(x)) for x in self.rorate_list] 51 | 52 | scl = 1.0 53 | sample_x, sample_y = np.meshgrid( 54 | np.linspace(-scl, scl, cube_lenth), 55 | np.linspace(scl, -scl, cube_lenth) 56 | ) 57 | 58 | r = np.sqrt(sample_y * sample_y + sample_x * sample_x + 1) 59 | sample_x /= r 60 | sample_y /= r 61 | sample_z = np.sqrt(1 - sample_y * sample_y - sample_x * sample_x) 62 | 63 | # xyz = torch.cat([Variable(torch.FloatTensor(sample_x)),Variable(torch.FloatTensor(sample_y)),Variable(torch.FloatTensor(sample_z))],dim=2) 64 | # xyz = xyz.view(cube_lenth*cube_lenth,3).permute(1,0) # size: (3,cube_lenth*cube_lenth) 65 | # xyz = torch.FloatTensor([sample_x,sample_y,sample_z]) 66 | xyz = torch.from_numpy(np.array([sample_x,sample_y,sample_z], dtype=np.float32)) 67 | xyz = xyz.view(3,cube_lenth*cube_lenth) 68 | self.uv = [] 69 | for i,R_matrix in enumerate(self.rorate_list): 70 | temp_xyz = torch.matmul(R_matrix,xyz).permute(1,0) # size: (cube_lenth*cube_lenth,3) 71 | # convert to polar 72 | azimuth = torch.atan2(temp_xyz[:,0], temp_xyz[:,2]) # [-Pi, Pi] 73 | elevation = torch.asin(temp_xyz[:,1]) # [-Pi/2, Pi/2] 74 | azimuth = azimuth.view(1,cube_lenth,cube_lenth,1) 75 | elevation = elevation.view(1,cube_lenth,cube_lenth,1) 76 | # convert to uv, because grid_sample's grid value range:[-1,1]. -1 correspoding left-top,1 corresponding right-down 77 | # self.u = (azimuth+np.pi)*1/np.pi/2 78 | # self.v = (np.pi-elevation)*1/np.pi 79 | u = azimuth/np.pi 80 | v = -elevation/(np.pi/2) 81 | 82 | self.uv.append(torch.cat([u.repeat(batch_size,1,1,1),v.repeat(batch_size,1,1,1)],dim=3)) 83 | 84 | def Tocube(self,input,mode='bilinear'): 85 | """ 86 | pano 2 cube. 87 | 88 | Args: 89 | input (torch.float32): shape: [b, c, h, w] 90 | mode (str, optional): interpolation mode (nearest, bilinear). Defaults to 'bilinear'. 91 | 92 | Returns: 93 | torch.float32: shape: [b, c*6, h, w] 94 | """ 95 | assert mode in ['bilinear', 'nearest'] 96 | out = [] 97 | for i in range(6): 98 | uv = self.uv[i].cuda() if self.is_cuda else self.uv[i] 99 | result = F.grid_sample(input,uv,mode=mode,padding_mode="border",align_corners=False) # there is one problem, for back face(theta = +-pi), not warp interperation.scipy.ndimage.map_coordinates has this op but its not differential 100 | out.append(result) 101 | out = torch.cat(out,dim=1) #size:(batch_size,6*3,out,out) 102 | return out 103 | 104 | 105 | 106 | # def ToCubeTensor(self, batch, mode='bilinear'): 107 | # assert mode in ['bilinear', 'nearest'] 108 | # batch_size = batch.size()[0] 109 | # out_batch = self.Tocube(batch, mode=mode) 110 | # out_batch = torch.cat(out_batch,dim=1) # batch_size,channel*6,h,w 111 | 112 | # return out_batch 113 | 114 | def cube2full(self,batch): 115 | out = torch.zeros([self.batch_size,self.cube_channel,self.cube_lenth*3,self.cube_lenth*4]) 116 | # id:0 117 | out[:,:,self.cube_lenth:self.cube_lenth*2,0:self.cube_lenth] = batch[:,0:3,:,:] 118 | out[:,:,self.cube_lenth:self.cube_lenth*2,self.cube_lenth:self.cube_lenth*2] = batch[:,3:6,:,:] 119 | out[:,:,self.cube_lenth:self.cube_lenth*2,self.cube_lenth*2:self.cube_lenth*3] = batch[:,6:9,:,:] 120 | out[:,:,self.cube_lenth:self.cube_lenth*2,self.cube_lenth*3:self.cube_lenth*4] = batch[:,9:12,:,:] 121 | out[:,:,0:self.cube_lenth,self.cube_lenth:self.cube_lenth*2] = batch[:,12:15,:,:] 122 | out[:,:,self.cube_lenth*2:self.cube_lenth*3,self.cube_lenth:self.cube_lenth*2] = batch[:,15:18,:,:] 123 | 124 | return out 125 | 126 | def saveCube(self,path,result,model="full"): # define batch_size=1 127 | fname,file_type = os.path.splitext(path) 128 | 129 | if model =="full": 130 | result = result.permute(0,2,3,1) 131 | for i in range(result.size()[0]): 132 | one = np.asarray(result[i],dtype=np.float32) 133 | cv2.imwrite(fname+str(i)+file_type,one*255) 134 | elif model=="6": 135 | for i in range(6): 136 | one = result[:,i*self.cube_channel:(i+1)*self.cube_channel,:,:].permute(0,2,3,1) 137 | one = np.asarray(one[0].cpu(),dtype=np.float32) 138 | cv2.imwrite(fname+"_"+str(i)+file_type,one*255) 139 | 140 | 141 | if __name__=="__main__": 142 | 143 | 144 | #----------------------------- 145 | 146 | path = "/home/SecondDisk/Code/colleague/reproject/derived/1646101296/panoImage.jpg" 147 | img = cv2.imread(path,cv2.IMREAD_UNCHANGED)[:,:,0:3] 148 | h,w,c= img.shape 149 | # img = rotate_pano(img,0,0,0) 150 | img = np.asarray(img,dtype=np.float32) 151 | fname,file_type = os.path.splitext(path) 152 | if(file_type == ".hdr" or file_type == ".exr"): 153 | # img = img*pow(2,3.4) 154 | img = img**(1/2.2) 155 | else: 156 | img = img/255.0 157 | img = tranforms.ToTensor()(img) 158 | 159 | img = torch.reshape(img,(1,c,h,w)) 160 | img = img.repeat(1,1,1,1).cuda() 161 | trans = Pano2Cube(batch_size=img.size()[0],pano_height=h,pano_width=w,is_cuda=True,cube_lenth=1024) 162 | 163 | # out = trans.Tocube(img) 164 | # # out = trans.ToCubeTensor(img) 165 | # for i in range(6): 166 | # one = out[i].permute(0,2,3,1) 167 | # one = np.asarray(one[0],dtype=np.float32) 168 | 169 | # cv2.imwrite("test_cube_{}.jpg".format(i),one*255) 170 | 171 | # trans.saveCube("test_cube_full.png",trans.cube2full(trans.Tocube(img))) 172 | trans.saveCube("/home/SecondDisk/Code/colleague/reproject/derived/1646101296/cube.jpg",trans.Tocube(img),model="6") 173 | 174 | -------------------------------------------------------------------------------- /tester/test_novel.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : test_novel.py 3 | @Time : 2023/02/27 12:10:58 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import sys 14 | from datetime import datetime 15 | import time 16 | import itertools 17 | from utils.sample_util import TINY_NUMBER 18 | 19 | import imageio 20 | import numpy as np 21 | import torch 22 | from torch.nn import functional as F 23 | from pyhocon import ConfigFactory 24 | from tensorboardX import SummaryWriter 25 | 26 | import utils.general as utils 27 | import utils.plots as plt 28 | from models.loss import IRFLoss 29 | from utils.Cube2Pano import Cube2Pano 30 | 31 | class NovelViewRunner(): 32 | def __init__(self,**kwargs): 33 | torch.set_default_dtype(torch.float32) 34 | torch.set_num_threads(1) 35 | 36 | self.conf = ConfigFactory.parse_file(kwargs['conf']) 37 | self.exps_folder_name = kwargs['exps_folder_name'] 38 | 39 | self.max_niters = kwargs['max_niters'] 40 | self.GPU_INDEX = kwargs['gpu_index'] 41 | 42 | self.expname = 'Mat-' + kwargs['expname'] 43 | 44 | if kwargs['is_continue'] and kwargs['timestamp'] == 'latest': 45 | print(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 46 | if os.path.exists(os.path.join('../',kwargs['exps_folder_name'],self.expname)): 47 | timestamps = os.listdir(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 48 | if (len(timestamps)) == 0: 49 | is_continue = False 50 | timestamp = None 51 | else: 52 | timestamp = sorted(timestamps)[-1] 53 | is_continue = True 54 | else: 55 | is_continue = False 56 | timestamp = None 57 | else: 58 | timestamp = kwargs['timestamp'] 59 | is_continue = kwargs['is_continue'] 60 | 61 | utils.mkdir_ifnotexists(os.path.join('../',self.exps_folder_name)) 62 | self.expdir = os.path.join('../', self.exps_folder_name, self.expname) 63 | utils.mkdir_ifnotexists(self.expdir) 64 | 65 | self.timestamp = timestamp 66 | print(timestamp) 67 | utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp)) 68 | 69 | self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots') 70 | utils.mkdir_ifnotexists(self.plots_dir) 71 | 72 | self.editing_dir = os.path.join(self.plots_dir,'novel_view') 73 | utils.mkdir_ifnotexists(self.editing_dir) 74 | 75 | # create checkpoints dirs 76 | self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints') 77 | utils.mkdir_ifnotexists(self.checkpoints_path) 78 | self.model_params_subdir = "ModelParameters" 79 | self.mat_optimizer_params_subdir = "MatOptimizerParameters" 80 | self.mat_scheduler_params_subdir = "MatSchedulerParameters" 81 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir)) 82 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_optimizer_params_subdir)) 83 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_scheduler_params_subdir)) 84 | 85 | # fix random seed 86 | torch.manual_seed(666) 87 | torch.cuda.manual_seed(666) 88 | np.random.seed(666) 89 | 90 | if (not self.GPU_INDEX == 'ignore'): 91 | os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(self.GPU_INDEX) 92 | 93 | print('shell command : {0}'.format(' '.join(sys.argv))) 94 | 95 | print('Loading data ...') 96 | self.train_dataset = utils.get_class(self.conf.get_string('test.dataset_class'))( 97 | self.conf.get_string('test.path_mesh_open3d'), self.conf.get_list('test.pano_img_res'), self.conf.get_float('test.hdr_exposure')) 98 | print('Finish loading data ...') 99 | 100 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, 101 | batch_size=1, 102 | shuffle=True 103 | ) 104 | self.plot_dataloader = torch.utils.data.DataLoader(self.train_dataset, 105 | batch_size=1, 106 | shuffle=True 107 | ) 108 | 109 | self.model = utils.get_class(self.conf.get_string('test.model_class'))(conf=self.conf, \ 110 | cam_position_list = self.train_dataset.cam_position_list, checkpoint_material=self.plots_dir) 111 | if torch.cuda.is_available(): 112 | self.model.cuda() 113 | self.model.eval() 114 | 115 | self.mat_loss = utils.get_class(self.conf.get_string('test.irf_loss_class'))(**self.conf.get_config('render_loss')) 116 | 117 | 118 | # geo_dir = os.path.join('../',kwargs['exps_folder_name'], 'IRRF-' + kwargs['expname']) 119 | # if os.path.exists(geo_dir): 120 | # timestamps = os.listdir(geo_dir) 121 | # timestamp = sorted(timestamps)[-1] # using the newest training result 122 | # else: 123 | # print('No IRF pretrain, please train IRF first!') 124 | # exit(0) 125 | # # reloading IRRF 126 | # geo_path = os.path.join(geo_dir, timestamp) + '/checkpoints/ModelParameters/latest.pth' 127 | # print('Reloading IRRF from: ', geo_path) 128 | # model = torch.load(geo_path)['model_state_dict'] 129 | # ir = {k.split('network.')[1]: v for k, v in model.items() if 'ir_radiance_network' in k} 130 | # self.model.ir_radiance_network.load_state_dict(ir) 131 | # for parm in self.model.ir_radiance_network.parameters(): 132 | # parm.requires_grad = False 133 | 134 | 135 | self.n_batches = len(self.train_dataloader) 136 | 137 | self.pano_res = self.conf.get_list('test.pano_img_res') 138 | self.cube_lenth = int(self.pano_res[1]/4) 139 | self.cube2pano = Cube2Pano(pano_width=self.pano_res[1], pano_height=self.pano_res[0], cube_lenth=self.cube_lenth) 140 | self.first_val = True 141 | self.floor_max_mask = {} 142 | self.seg_mask = {} 143 | self.seg_tag = torch.from_numpy(np.array( list(range(0, 49)), np.float32) ) 144 | # self.seg_tag = torch.tensor([46.], dtype=torch.float32) 145 | self.room_seg_mask = {} 146 | self.room_meta_scale, self.room_meta_w, self.room_meta_h, self.room_meta_xmin, self.room_meta_zmin, self.room_img= utils.parse_roomseg(\ 147 | os.path.join(os.path.dirname(os.path.dirname(self.conf.get_string('test.path_mesh_open3d'))), 'roomseg')) 148 | 149 | 150 | def plot_to_disk_material(self): 151 | 152 | for i in range(len(self.train_dataset.ids)): 153 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 154 | gt_img = self.train_dataset.images_items[i]['color'] 155 | gt_img = gt_img.permute(0,3,1,2).reshape(1,-1, self.cube_lenth, self.cube_lenth) # shape: [1, 6*c, cube_len, cube_len] 156 | gt_img = self.cube2pano.ToPano(gt_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 157 | 158 | derived_id = self.train_dataset.ids[i] 159 | cam_position = self.train_dataset.cam_position_list[i].cuda() 160 | 161 | res = self.model(cam_to_world, derived_id, cam_position, 2) 162 | pred_albedo = res['albedo'].cpu().detach() # shape: (6, h, w, c) 163 | pred_albedo = pred_albedo.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 164 | pred_albedo = self.cube2pano.ToPano(pred_albedo)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 165 | 166 | pred_r = res['roughness'].cpu().detach().expand(-1,-1,-1,3) # shape: (6, h, w, c) 167 | pred_r = pred_r.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 168 | pred_r = self.cube2pano.ToPano(pred_r)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 169 | plt.plot_gbuffer(self.plots_dir, "albedo_{}".format(i), pred_albedo, False) 170 | plt.plot_gbuffer(self.plots_dir, "roughness_{}".format(i), pred_r, False) 171 | 172 | 173 | def plot_to_disk_cube(self): 174 | 175 | # index = torch.randint(0, len(self.train_dataset.ids),(1,)) 176 | index = -1 177 | for i in range(len(self.train_dataset.extrinsics_list)): 178 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 179 | cam_position = self.train_dataset.cam_position_list[i].cuda() 180 | 181 | res = self.model(cam_to_world, 0, cam_position, True) 182 | pred_img = res['rgb'].cpu().detach() # shape: (6, h, w, c) 183 | pred_img = pred_img.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 184 | pred_img = self.cube2pano.ToPano(pred_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 185 | 186 | plt.plot_mat(self.editing_dir, 0, pred_img, "novel_view_{}".format(i), False) 187 | 188 | 189 | def run(self): 190 | print("testing...") 191 | self.plot_to_disk_cube() -------------------------------------------------------------------------------- /tools/arrange_syn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import cv2 4 | import numpy as np 5 | import torch 6 | from PIL import Image 7 | 8 | 9 | # total 49 views, 49-11=38, 38-14=24. 10 | # 11 views 11 | skip_list = [ 12 | 18,# 13 | 25,# 14 | 32, 15 | 39, 16 | 40, 17 | 41, 18 | 42, 19 | 46, 20 | 47, 21 | 48, 22 | 49 23 | ] 24 | 25 | # 14 views 26 | novel_view = [ 27 | 1, 28 | 2, 29 | 6, 30 | 9, 31 | 11, 32 | 13, 33 | 16, 34 | 17, 35 | #19,# 36 | 20, 37 | 22, 38 | #25, 39 | 27, 40 | 30, 41 | 34, 42 | 38 43 | ] 44 | 45 | def colorize_mask(mask, palette): 46 | zero_pad = 256 * 3 - len(palette) 47 | for i in range(zero_pad): 48 | palette.append(0) 49 | new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') 50 | new_mask.putpalette(palette) 51 | return new_mask 52 | palette = [0, 0, 0, 128, 0, 0, 192, 0, 0, 128, 128, 0, 0, 128, 0, 128, 0, 128, 0, 128, 128, 128, 128, 128, 53 | 64, 0, 0, 0, 0, 128, 64, 128, 0, 64, 128, 192, 64, 0, 128, 192, 0, 128, 64, 128, 128, 192, 128, 54 | 128, 0, 64, 0, 128, 64, 0, 0, 192, 0, 128, 192, 0, 0, 64, 128, 128, 64, 128, 64, 128, 64, 55 | 192, 128, 64, 192, 0, 192, 192, 128, 0, 64, 192, 0, 192, 192, 0, 64, 64, 128, 192, 64, 128, 64, 56 | 192, 128, 192, 192, 128, 0, 0, 64, 128, 0, 64, 0, 128, 64, 128, 128, 64, 0, 0, 192, 128, 0, 192, 0, 57 | 128, 58 | 192, 128, 128, 192, 64, 0, 64, 192, 0, 64, 0, 192, 128, 128, 192, 128, 64, 0, 192, 64, 64, 0, 59 | 192, 64, 0, 192, 128, 192, 0, 64, 64, 0, 64, 104] 60 | 61 | 62 | root_path = "/home/SecondDisk/test_data/galois_model/inverse/hdrhouse/customHouse/mr_fur_optix" 63 | target_root_path = "/home/SecondDisk/test_data/galois_model/inverse/hdrhouse/customHouse/vrproc" 64 | 65 | all_items = glob.glob(os.path.join(root_path, '00[0-9]*')) 66 | all_items.sort(key= lambda x: int(os.path.split(x)[1])) 67 | 68 | target_derived_root_path = os.path.join(target_root_path, 'derived') 69 | target_info_path = os.path.join(target_root_path, 'info') 70 | target_hdr_root_path = os.path.join(target_root_path, 'hdr') 71 | if not os.path.exists(target_derived_root_path): 72 | os.makedirs(target_derived_root_path) 73 | if not os.path.exists(target_info_path): 74 | os.makedirs(target_info_path) 75 | if not os.path.exists(target_hdr_root_path): 76 | os.makedirs(target_hdr_root_path) 77 | 78 | # generate derived 79 | for i in range(len(all_items)): 80 | if i+1 in skip_list: 81 | continue 82 | target_derived_path = os.path.join(target_derived_root_path, '{}'.format(i+1)) 83 | if not os.path.exists(target_derived_path): 84 | os.makedirs(target_derived_path) 85 | target_hdr_path = os.path.join(target_hdr_root_path, '{}'.format(i+1)) 86 | if not os.path.exists(target_hdr_path): 87 | os.makedirs(target_hdr_path) 88 | 89 | # generate color 90 | img = cv2.imread(os.path.join(all_items[i], 'scene_ldr.png')) 91 | h, w, c = img.shape 92 | # img = cv2.resize(img, (6720,3360)) 93 | cv2.imwrite(os.path.join(target_derived_path, 'panoImage.jpg'), img) 94 | img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA) 95 | cv2.imwrite(os.path.join(target_derived_path, 'panoImage_orig.png'), img) 96 | os.rename(os.path.join(target_derived_path, 'panoImage_orig.png'), os.path.join(target_derived_path, 'panoImage_orig.jpg')) 97 | 98 | # # read depth 99 | # depth_array = np.fromfile(os.path.join(root_path, 'render', 'scene_alldepth_{}.dat'.format(i+1)), np.float32) 100 | # depth_img = depth_array[2:].reshape(h, w) 101 | # depth_img = cv2.resize(depth_img, (1600,800)) 102 | # depth_img = depth_img*5000 103 | # depth_img = np.asarray(depth_img, np.uint16) 104 | # cv2.imwrite(os.path.join(target_derived_path, 'depth_image.png'), depth_img) 105 | 106 | # generate segmentation from basecolor 107 | # basecolor = cv2.imread(os.path.join(root_path, 'render', 'scene_allbaseColor_{}.png'.format(i+1)), -1)[:,:,::-1] # bgr2rgb 108 | # h, w, c = basecolor.shape 109 | # seg_gray = np.zeros(basecolor.shape, dtype=np.uint8) 110 | # basecolor = torch.from_numpy(basecolor.copy()) 111 | # seg_gray = torch.from_numpy(seg_gray.copy()) 112 | # # floor 113 | # seg_gray = torch.where(basecolor==torch.tensor([255,0,0], dtype=torch.uint8), torch.tensor([46,46,46], dtype=torch.uint8), seg_gray) 114 | # # wall 115 | # seg_gray = torch.where(basecolor==torch.tensor([216,216,216], dtype=torch.uint8), torch.tensor([45,45,45], dtype=torch.uint8), seg_gray) 116 | # # door 117 | # seg_gray = torch.where(basecolor==torch.tensor([186,186,186], dtype=torch.uint8), torch.tensor([1,1,1], dtype=torch.uint8), seg_gray) 118 | # # ceiling 119 | # seg_gray = torch.where(basecolor==torch.tensor([243,243,243], dtype=torch.uint8), torch.tensor([43,43,43], dtype=torch.uint8), seg_gray) 120 | # # lamp 121 | # seg_gray = torch.where(basecolor==torch.tensor([230,230,230], dtype=torch.uint8), torch.tensor([27,27,27], dtype=torch.uint8), seg_gray) 122 | # # gray sphere -> 26 123 | # seg_gray = torch.where(basecolor==torch.tensor([155,160,159], dtype=torch.uint8), torch.tensor([26,26,26], dtype=torch.uint8), seg_gray) 124 | # # yellow sphere -> 25 125 | # seg_gray = torch.where(basecolor==torch.tensor([247,225,74], dtype=torch.uint8), torch.tensor([25,25,25], dtype=torch.uint8), seg_gray) 126 | # # blue sphere -> 24 127 | # seg_gray = torch.where(basecolor==torch.tensor([54,138,208], dtype=torch.uint8), torch.tensor([24,24,24], dtype=torch.uint8), seg_gray) 128 | # # green sphere -> 23 129 | # seg_gray = torch.where(basecolor==torch.tensor([82,199,82], dtype=torch.uint8), torch.tensor([23,23,23], dtype=torch.uint8), seg_gray) 130 | # seg_color = colorize_mask(seg_gray.numpy()[:,:,0], palette) 131 | # cv2.imwrite(os.path.join(target_derived_path, 'panoImage_gray.png'), seg_gray.numpy()[:,:,0]) 132 | # seg_color.save(os.path.join(target_derived_path, 'panoImage_seg.png')) 133 | 134 | # # generate basecolor 135 | # basecolor = cv2.imread(os.path.join(root_path, 'render', 'scene_allbaseColor_{}.png'.format(i+1)), -1) 136 | # cv2.imwrite(os.path.join(target_derived_path, 'albedo.png'), basecolor) 137 | # generate roughness 138 | roughness = cv2.imread(os.path.join(root_path, 'render', 'scene_allroughness_{}.png'.format(i+1)), -1) 139 | cv2.imwrite(os.path.join(target_derived_path, 'roughness.png'), roughness) 140 | # # generate normal 141 | # normal = cv2.imread(os.path.join(root_path, 'render', 'scene_allnormal_{}.png'.format(i+1)), -1) 142 | # cv2.imwrite(os.path.join(target_derived_path, 'normal.png'), normal) 143 | 144 | # generate hdr 145 | img = cv2.imread(os.path.join(all_items[i], 'scene_hdr.hdr'),-1) 146 | img = img*(2**(-5)) # note that we scale the intensity 147 | cv2.imwrite(os.path.join(target_hdr_path, 'ccm.hdr'), img) 148 | 149 | 150 | 151 | # # generate info 152 | # aligned_path = os.path.join(target_info_path, 'aligned.txt') 153 | # with open(aligned_path, 'w') as f: 154 | # for i in range(len(all_items)): 155 | # if i+1 in skip_list: 156 | # continue 157 | # if i+1 in novel_view: 158 | # continue 159 | # f.write('{}'.format(i+1)) 160 | # if not i == len(all_items)-1: 161 | # f.write('\n') 162 | # with open(os.path.join(target_info_path, 'novel.txt'), 'w') as f: 163 | # for i in range(len(all_items)): 164 | # if i+1 in novel_view: 165 | # f.write('{}'.format(i+1)) 166 | # if not i == len(all_items)-1: 167 | # f.write('\n') 168 | 169 | # with open(os.path.join(root_path, 'cameraFile_optix.txt'), 'r') as f: 170 | # lines = f.readlines() 171 | # # fit cyclops's final extrinsics. 172 | # lines = [i.replace(' \n', '\n') for i in lines] 173 | # extrinsics = np.loadtxt(lines[1:], delimiter=' ') # shape: (N*4, 4) 174 | 175 | # with open(os.path.join(target_info_path, 'final_extrinsics.txt'), 'w') as f: 176 | # f.write("{}\n".format(len(all_items)-len(skip_list)-len(novel_view))) 177 | # for i in range(len(all_items)): 178 | # if i+1 in skip_list: 179 | # continue 180 | # if i+1 in novel_view: 181 | # continue 182 | # # f.write(lines[i*4+1:(i+1)*4+1]) 183 | # camera_position = lines[i*3+1] 184 | # camera_position = camera_position.strip().split(' ') 185 | # # ensure consistency for vrproc (-y as positive) 186 | # up = [0.0, -1.0, 0.0] 187 | # front = [0.0, 0.0, 1.0] 188 | # right = [-1.0, 0.0, 0.0] 189 | # f.write("{} {} {} {}\n".format(right[0], up[0], front[0], -float(camera_position[0]))) 190 | # f.write("{} {} {} {}\n".format(right[1], up[1], front[1], float(camera_position[1]))) 191 | # f.write("{} {} {} {}\n".format(right[2], up[2], front[2], -float(camera_position[2]))) 192 | # f.write("0 0 0 1\n") 193 | 194 | 195 | # with open(os.path.join(target_info_path, 'novel_extrinsics.txt'), 'w') as f: 196 | # f.write("{}\n".format(len(novel_view))) 197 | # for i in range(len(all_items)): 198 | # if i+1 in novel_view: 199 | # # f.write(lines[i*4+1:(i+1)*4+1]) 200 | # camera_position = lines[i*3+1] 201 | # camera_position = camera_position.strip().split(' ') 202 | # # ensure consistency for vrproc (-y as positive) 203 | # up = [0.0, -1.0, 0.0] 204 | # front = [0.0, 0.0, 1.0] 205 | # right = [-1.0, 0.0, 0.0] 206 | # f.write("{} {} {} {}\n".format(right[0], up[0], front[0], -float(camera_position[0]))) 207 | # f.write("{} {} {} {}\n".format(right[1], up[1], front[1], float(camera_position[1]))) 208 | # f.write("{} {} {} {}\n".format(right[2], up[2], front[2], -float(camera_position[2]))) 209 | # f.write("0 0 0 1\n") -------------------------------------------------------------------------------- /tester/test_error.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : test_error.py 3 | @Time : 2023/02/27 12:10:54 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import sys 14 | from datetime import datetime 15 | import time 16 | import itertools 17 | from utils.sample_util import TINY_NUMBER 18 | 19 | import imageio 20 | import numpy as np 21 | import torch 22 | from torch.nn import functional as F 23 | from pyhocon import ConfigFactory 24 | from tensorboardX import SummaryWriter 25 | 26 | import utils.general as utils 27 | import utils.plots as plt 28 | from models.loss import IRFLoss, SSIMLoss 29 | from utils.Cube2Pano import Cube2Pano 30 | 31 | class MatErrorRunner(): 32 | def __init__(self,**kwargs): 33 | torch.set_default_dtype(torch.float32) 34 | torch.set_num_threads(1) 35 | 36 | self.conf = ConfigFactory.parse_file(kwargs['conf']) 37 | self.exps_folder_name = kwargs['exps_folder_name'] 38 | 39 | self.max_niters = kwargs['max_niters'] 40 | self.GPU_INDEX = kwargs['gpu_index'] 41 | 42 | self.expname = 'Mat-' + kwargs['expname'] 43 | 44 | if kwargs['is_continue'] and kwargs['timestamp'] == 'latest': 45 | print(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 46 | if os.path.exists(os.path.join('../',kwargs['exps_folder_name'],self.expname)): 47 | timestamps = os.listdir(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 48 | if (len(timestamps)) == 0: 49 | is_continue = False 50 | timestamp = None 51 | else: 52 | timestamp = sorted(timestamps)[-1] 53 | is_continue = True 54 | else: 55 | is_continue = False 56 | timestamp = None 57 | else: 58 | timestamp = kwargs['timestamp'] 59 | is_continue = kwargs['is_continue'] 60 | 61 | utils.mkdir_ifnotexists(os.path.join('../',self.exps_folder_name)) 62 | self.expdir = os.path.join('../', self.exps_folder_name, self.expname) 63 | utils.mkdir_ifnotexists(self.expdir) 64 | 65 | self.timestamp = timestamp 66 | print(timestamp) 67 | utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp)) 68 | 69 | self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots') 70 | utils.mkdir_ifnotexists(self.plots_dir) 71 | 72 | self.editing_dir = os.path.join(self.plots_dir,'error') 73 | utils.mkdir_ifnotexists(self.editing_dir) 74 | 75 | # create checkpoints dirs 76 | self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints') 77 | utils.mkdir_ifnotexists(self.checkpoints_path) 78 | self.model_params_subdir = "ModelParameters" 79 | self.mat_optimizer_params_subdir = "MatOptimizerParameters" 80 | self.mat_scheduler_params_subdir = "MatSchedulerParameters" 81 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir)) 82 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_optimizer_params_subdir)) 83 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_scheduler_params_subdir)) 84 | 85 | # fix random seed 86 | torch.manual_seed(666) 87 | torch.cuda.manual_seed(666) 88 | np.random.seed(666) 89 | 90 | if (not self.GPU_INDEX == 'ignore'): 91 | os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(self.GPU_INDEX) 92 | 93 | print('shell command : {0}'.format(' '.join(sys.argv))) 94 | 95 | print('Loading data ...') 96 | self.train_dataset = utils.get_class(self.conf.get_string('test.dataset_class'))( 97 | self.conf.get_string('test.path_mesh_open3d'), self.conf.get_list('test.pano_img_res'), self.conf.get_float('test.hdr_exposure')) 98 | print('Finish loading data ...') 99 | 100 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, 101 | batch_size=1, 102 | shuffle=True 103 | ) 104 | self.plot_dataloader = torch.utils.data.DataLoader(self.train_dataset, 105 | batch_size=1, 106 | shuffle=True 107 | ) 108 | 109 | self.model = utils.get_class(self.conf.get_string('test.model_class'))(conf=self.conf, \ 110 | cam_position_list = self.train_dataset.cam_position_list, checkpoint_material=self.plots_dir) 111 | if torch.cuda.is_available(): 112 | self.model.cuda() 113 | self.model.eval() 114 | 115 | self.mat_loss = utils.get_class(self.conf.get_string('test.irf_loss_class'))(**self.conf.get_config('render_loss')) 116 | 117 | 118 | geo_dir = os.path.join('../',kwargs['exps_folder_name'], 'IRRF-' + kwargs['expname']) 119 | if os.path.exists(geo_dir): 120 | timestamps = os.listdir(geo_dir) 121 | timestamp = sorted(timestamps)[-1] # using the newest training result 122 | else: 123 | print('No IRF pretrain, please train IRF first!') 124 | exit(0) 125 | # # reloading IRRF 126 | # geo_path = os.path.join(geo_dir, timestamp) + '/checkpoints/ModelParameters/latest.pth' 127 | # print('Reloading IRRF from: ', geo_path) 128 | # model = torch.load(geo_path)['model_state_dict'] 129 | # ir = {k.split('network.')[1]: v for k, v in model.items() if 'ir_radiance_network' in k} 130 | # self.model.ir_radiance_network.load_state_dict(ir) 131 | # for parm in self.model.ir_radiance_network.parameters(): 132 | # parm.requires_grad = False 133 | 134 | 135 | self.n_batches = len(self.train_dataloader) 136 | 137 | self.pano_res = self.conf.get_list('test.pano_img_res') 138 | self.cube_lenth = int(self.pano_res[1]/4) 139 | self.cube2pano = Cube2Pano(pano_width=self.pano_res[1], pano_height=self.pano_res[0], cube_lenth=self.cube_lenth) 140 | self.first_val = True 141 | 142 | self.ssim_loss = SSIMLoss() 143 | self.mse_loss = torch.nn.MSELoss() 144 | 145 | 146 | def plot_to_disk_material(self): 147 | 148 | for i in range(len(self.train_dataset.ids)): 149 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 150 | gt_img = self.train_dataset.images_items[i]['color'] 151 | gt_img = gt_img.permute(0,3,1,2).reshape(1,-1, self.cube_lenth, self.cube_lenth) # shape: [1, 6*c, cube_len, cube_len] 152 | gt_img = self.cube2pano.ToPano(gt_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 153 | 154 | derived_id = self.train_dataset.ids[i] 155 | cam_position = self.train_dataset.cam_position_list[i].cuda() 156 | 157 | res = self.model(cam_to_world, derived_id, cam_position, 2) 158 | pred_albedo = res['albedo'].cpu().detach() # shape: (6, h, w, c) 159 | pred_albedo = pred_albedo.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 160 | pred_albedo = self.cube2pano.ToPano(pred_albedo)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 161 | 162 | pred_r = res['roughness'].cpu().detach().expand(-1,-1,-1,3) # shape: (6, h, w, c) 163 | pred_r = pred_r.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 164 | pred_r = self.cube2pano.ToPano(pred_r)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 165 | plt.plot_gbuffer(self.plots_dir, "albedo_{}".format(i), pred_albedo, False) 166 | plt.plot_gbuffer(self.plots_dir, "roughness_{}".format(i), pred_r, False) 167 | 168 | 169 | def plot_to_disk_cube(self): 170 | 171 | mse_error = 0.0 172 | psnr_error = 0.0 173 | ssim_error = 0.0 174 | 175 | for i in range(len(self.train_dataset.ids)): 176 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 177 | gt_img = self.train_dataset.images_items[i]['color'] 178 | gt_img = gt_img.permute(0,3,1,2).reshape(1,-1, self.cube_lenth, self.cube_lenth) # shape: [1, 6*c, cube_len, cube_len] 179 | gt_img = self.cube2pano.ToPano(gt_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 180 | 181 | derived_id = self.train_dataset.ids[i] 182 | cam_position = self.train_dataset.cam_position_list[i].cuda() 183 | 184 | res = self.model(cam_to_world, derived_id, cam_position, False) 185 | pred_img = res['rgb'].cpu().detach() # shape: (6, h, w, c) 186 | pred_img = pred_img.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 187 | pred_img = self.cube2pano.ToPano(pred_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 188 | 189 | ssim_error += 1. - self.ssim_loss(utils.tonemapping(gt_img.unsqueeze(0)), utils.tonemapping(pred_img.unsqueeze(0))).item() 190 | mse_error += self.mse_loss(utils.tonemapping(gt_img.unsqueeze(0)), utils.tonemapping(pred_img.unsqueeze(0))).item() 191 | psnr_error += utils.mse_to_psnr(torch.tensor(mse_error)).item() 192 | 193 | plt.plot_mat(self.editing_dir, 0, pred_img, "rendering_{}".format(i), False) 194 | 195 | print("re-rendering error: mse: {}, psnr: {}, ssim: {}".format(mse_error/len(self.train_dataset.ids), \ 196 | psnr_error/len(self.train_dataset.ids), (ssim_error)/len(self.train_dataset.ids))) 197 | 198 | 199 | def run(self): 200 | print("testing...") 201 | self.plot_to_disk_cube() -------------------------------------------------------------------------------- /models/tracer_o3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : tracer_o3d.py 3 | @Time : 2023/02/27 12:10:36 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import open3d as o3d 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | 21 | import pyredner 22 | pyredner.set_print_timing(False) 23 | # Use GPU if available 24 | pyredner.set_use_gpu(torch.cuda.is_available()) 25 | 26 | from models.incidentNet import IRNetwork, NeILFMLP 27 | from utils.sample_util import * 28 | 29 | class TracerO3d(nn.Module): 30 | def __init__(self, conf, AABB=None, is_hdr_texture=False): 31 | super().__init__() 32 | # self.incident_radiance_network = IRNetwork(**conf.get_config('models.incident_radiance_network'), AABB=AABB) 33 | self.incident_radiance_network = NeILFMLP() 34 | 35 | # self.resolution = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 36 | # self.resolution[0] = int(self.resolution[0]*8) # like 8spp 37 | 38 | # self.num_sample_dir = int(self.resolution[0]*self.resolution[1]) #conf.get_int('train.num_sample_dir', default=128) 39 | 40 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 41 | 42 | 43 | # init ray casting scene 44 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 45 | trianglemesh.compute_vertex_normals() 46 | # vertices = np.asarray(trianglemesh.vertices) 47 | # vertices = vertices * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 48 | # trianglemesh.vertices = o3d.utility.Vector3dVector(vertices) 49 | # normals = np.asarray(trianglemesh.vertex_normals) 50 | # normals = normals * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 51 | # trianglemesh.vertex_normals = o3d.utility.Vector3dVector(normals) 52 | # read extra hdr texture because open3d cannot read .hdr/.exr 53 | if is_hdr_texture: 54 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","hdr_texture.hdr"), -1)[:,:,::-1] 55 | texture = cv2.flip(texture, 0) 56 | texture = np.asarray(texture, np.float32) 57 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, H, W, 3) 58 | self.texture = self.texture * (2**conf.get_float('train.hdr_exposure')) 59 | else: 60 | texture = np.asarray(trianglemesh.textures[1]) 61 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, H, W, 3) 62 | 63 | 64 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 65 | 66 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 67 | # Create a scene and add the triangle mesh. 68 | self.scene = o3d.t.geometry.RaycastingScene() 69 | self.scene.add_triangles(mesh) 70 | 71 | 72 | 73 | def forward(self, points, normals, resolution, isnot_first_val=False): 74 | """ 75 | 76 | Args: 77 | points (torch.float32): shape: [b, 3] 78 | normals (torch.float32, optional): shape: [b, 3] . Defaults to None. 79 | isnot_first_val (bool): if set true, do not trace gt ir and only predict ir. 80 | 81 | Returns: 82 | gt_ir (torch.float32): shape: [b, h*w, 3] 83 | predicted_ir (torch.float32): shape: [b, h*w, 3] 84 | """ 85 | b, c = points.shape 86 | 87 | directions = self.generate_dir(normals, resolution) # shape: (b, num_sample, 1, 3) 88 | rays = torch.cat([points.unsqueeze(1).unsqueeze(1).expand_as(directions), directions], dim=-1) 89 | if not isnot_first_val: 90 | 91 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 92 | 93 | intersections = self.scene.cast_rays(ray) 94 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 95 | 96 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 97 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 98 | 99 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 100 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 101 | prim_uvs = np.clip(prim_uvs, 0., 1.) 102 | 103 | prim_id[~mask] = 0 104 | 105 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 106 | tmp = tmp.reshape(-1) 107 | index = self.triangle_uvs[tmp] 108 | index = index.reshape(3, b, resolution[0],resolution[1], 2) # shape: (3, b, num_sample, 1, 2) 109 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 110 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 111 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 112 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 113 | 114 | gt_ir = F.grid_sample(self.texture.expand([b]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 115 | ner_mask = ~mask 116 | gt_ir[ner_mask,:] = 0 117 | 118 | 119 | predicted_mask_ir = self.incident_radiance_network(torch.cat([rays[:,:,:,0:3].reshape(-1, 3), rays[:,:,:,3:6].reshape(-1, 3)], dim=-1)) # shape: (b*num_sample*1, 3) 120 | predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 121 | predicted_mask_ir[~mask,:] = 0 122 | 123 | res = { 124 | 'gt': gt_ir.reshape(b, -1, 3).cuda() , 125 | 'pred': predicted_mask_ir.reshape(b, -1, 3) 126 | } 127 | 128 | return res 129 | else: 130 | predicted_mask_ir = self.incident_radiance_network(torch.cat([rays[:,:,:,0:3].reshape(-1, 3), rays[:,:,:,3:6].reshape(-1, 3)], dim=-1)) # shape: (b*num_sample*1, 3) 131 | predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 132 | res = { 133 | 'pred': predicted_mask_ir.reshape(b, -1, 3) 134 | } 135 | return res 136 | 137 | 138 | def RadicalInverse(self,bits): 139 | #reverse bit 140 | #高低16位换位置 141 | bits = (bits << 16) | (bits >> 16) 142 | #A是5的按位取反 143 | bits = ((bits & 0x55555555) << 1) | ((bits & 0xAAAAAAAA) >> 1) 144 | #C是3的按位取反 145 | bits = ((bits & 0x33333333) << 2) | ((bits & 0xCCCCCCCC) >> 2) 146 | bits = ((bits & 0x0F0F0F0F) << 4) | ((bits & 0xF0F0F0F0) >> 4) 147 | bits = ((bits & 0x00FF00FF) << 8) | ((bits & 0xFF00FF00) >> 8) 148 | return float(bits) * 2.3283064365386963e-10 149 | 150 | def Hammersley(self,i,N): 151 | return [float(i)/float(N),self.RadicalInverse(i)] 152 | 153 | def generate_dir(self, normals, resolution): 154 | b, c = normals.shape 155 | normals = normals.unsqueeze(1).unsqueeze(1).expand(b, resolution[0],resolution[1], 3) 156 | # compute projection axis 157 | x_axis = torch.zeros_like(normals).cuda() #size:(batch_size, samples, 1, 3) 158 | mask = torch.abs(normals[:,:,:,0]) > 0.99 159 | x_axis[mask, :] = torch.tensor([0., 1., 0.],dtype=torch.float32, device=normals.get_device()) 160 | x_axis[~mask, :] = torch.tensor([1., 0., 0.],dtype=torch.float32, device=normals.get_device()) 161 | 162 | def norm_axis(x): 163 | return x / (torch.norm(x, dim=-1, keepdim=True) + TINY_NUMBER) 164 | 165 | normals = norm_axis(normals) 166 | U = norm_axis(torch.cross(x_axis, normals)) 167 | V = norm_axis(torch.cross( normals, U)) 168 | 169 | num_sample_dir = resolution[0]*resolution[1] 170 | samples=np.zeros((num_sample_dir,2),dtype=np.float32) 171 | for i in range(0,num_sample_dir): 172 | s = Hammersley(i,num_sample_dir) 173 | samples[i][0] = s[0] 174 | samples[i][1] = s[1] 175 | samples = torch.from_numpy(samples).unsqueeze(0).unsqueeze(-2).cuda() #size:(batch_size, samples, 1, 2) 176 | samples = samples.repeat(b, 1, 1, 1).detach() 177 | # samples[:,:, 0:1] = torch.clamp(samples[:,:,0:1] + torch.rand_like(samples[:,:,0:1])*0.09, 0., 1.) 178 | shift = torch.rand(b, 1, 1, 2).cuda() 179 | samples = samples + shift 180 | index1 = samples > 1. 181 | samples[index1] = samples[index1]-1. 182 | index2 = samples < 0. 183 | samples[index2] = samples[index2] + 1 184 | samples = torch.clamp(samples, 0+TINY_NUMBER, 1-TINY_NUMBER) # avoid NAN in roughness backward. 185 | samples = samples.expand(b, num_sample_dir, 1, 2).reshape(b, resolution[0], resolution[1], 2) 186 | 187 | # ############ test sample for ordered variable, attention: the cosTheta is uniformly generated instead elevation. so the res pano will have distortion in elevation. 188 | # azimuth = torch.linspace(0.,1.,256).cuda() 189 | # elevation = torch.linspace(0.,1.,128).cuda() 190 | # elevation,azimuth = torch.meshgrid([elevation,azimuth]) 191 | # samples = torch.stack([elevation,azimuth], dim=-1).unsqueeze(0) #size:(batch_size, h, w, 2) 192 | 193 | # uniform sample, attention: we generate sampled dir via y as up axis. translate to our coor: 194 | # phi - np.pi; y = sin((np.pi/2-theta)) = costheta; y_projected = cos((np.pi/2-theta)) = sintheta 195 | phi = 2 * np.pi * samples[:,:,:,1:2] - np.pi 196 | cosTheta = (1.0 - samples[:,:,:,0:1]) 197 | sinTheta = torch.sqrt(1.0 - cosTheta * cosTheta) 198 | L = V * (torch.sin(phi) * sinTheta) \ 199 | + normals * cosTheta \ 200 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 1, 3] 201 | 202 | return L 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | if __name__=="__main__": 211 | conf = ConfigFactory.parse_file('./configs/default.conf') 212 | tracer = TracerO3d(conf) 213 | test_points = torch.tensor([-0.295, 0.104, -1.523]).unsqueeze(0) 214 | test_points = (test_points + 0.1 * torch.tensor([-1., 0., 0.]).unsqueeze(0)).cuda() 215 | radiance = tracer(test_points, torch.tensor([-1., 0., 0.]).unsqueeze(0).cuda()) 216 | env_res = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 217 | radiance = radiance[0].reshape(env_res[0], env_res[1], 3) 218 | print(radiance.shape) 219 | cv2.imwrite("../results/test_house/env_hemi_open3d.jpg", radiance.cpu().numpy()[:,:,::-1]*255.0) 220 | 221 | -------------------------------------------------------------------------------- /tester/test_relighting.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : test_relighting.py 3 | @Time : 2023/02/27 12:11:02 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import sys 14 | from datetime import datetime 15 | import time 16 | import itertools 17 | from utils.sample_util import TINY_NUMBER 18 | 19 | import imageio 20 | import numpy as np 21 | import torch 22 | from torch.nn import functional as F 23 | from pyhocon import ConfigFactory 24 | from tensorboardX import SummaryWriter 25 | 26 | import utils.general as utils 27 | import utils.plots as plt 28 | from models.loss import IRFLoss 29 | from utils.Cube2Pano import Cube2Pano 30 | 31 | class RelightingRunner(): 32 | def __init__(self,**kwargs): 33 | torch.set_default_dtype(torch.float32) 34 | torch.set_num_threads(1) 35 | 36 | self.conf = ConfigFactory.parse_file(kwargs['conf']) 37 | self.exps_folder_name = kwargs['exps_folder_name'] 38 | 39 | self.max_niters = kwargs['max_niters'] 40 | self.GPU_INDEX = kwargs['gpu_index'] 41 | 42 | self.expname = 'Mat-' + kwargs['expname'] 43 | 44 | if kwargs['is_continue'] and kwargs['timestamp'] == 'latest': 45 | print(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 46 | if os.path.exists(os.path.join('../',kwargs['exps_folder_name'],self.expname)): 47 | timestamps = os.listdir(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 48 | if (len(timestamps)) == 0: 49 | is_continue = False 50 | timestamp = None 51 | else: 52 | timestamp = sorted(timestamps)[-1] 53 | is_continue = True 54 | else: 55 | is_continue = False 56 | timestamp = None 57 | else: 58 | timestamp = kwargs['timestamp'] 59 | is_continue = kwargs['is_continue'] 60 | 61 | utils.mkdir_ifnotexists(os.path.join('../',self.exps_folder_name)) 62 | self.expdir = os.path.join('../', self.exps_folder_name, self.expname) 63 | utils.mkdir_ifnotexists(self.expdir) 64 | 65 | self.timestamp = timestamp 66 | print(timestamp) 67 | utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp)) 68 | 69 | self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots') 70 | utils.mkdir_ifnotexists(self.plots_dir) 71 | 72 | self.editing_dir = os.path.join(self.plots_dir,'relighting') 73 | utils.mkdir_ifnotexists(self.editing_dir) 74 | 75 | # create checkpoints dirs 76 | self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints') 77 | utils.mkdir_ifnotexists(self.checkpoints_path) 78 | self.model_params_subdir = "ModelParameters" 79 | self.mat_optimizer_params_subdir = "MatOptimizerParameters" 80 | self.mat_scheduler_params_subdir = "MatSchedulerParameters" 81 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir)) 82 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_optimizer_params_subdir)) 83 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.mat_scheduler_params_subdir)) 84 | 85 | # fix random seed 86 | torch.manual_seed(666) 87 | torch.cuda.manual_seed(666) 88 | np.random.seed(666) 89 | 90 | if (not self.GPU_INDEX == 'ignore'): 91 | os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(self.GPU_INDEX) 92 | 93 | print('shell command : {0}'.format(' '.join(sys.argv))) 94 | 95 | print('Loading data ...') 96 | self.train_dataset = utils.get_class(self.conf.get_string('test.dataset_class'))( 97 | self.conf.get_string('test.path_mesh_open3d'), self.conf.get_list('test.pano_img_res'), self.conf.get_float('test.hdr_exposure')) 98 | print('Finish loading data ...') 99 | 100 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, 101 | batch_size=1, 102 | shuffle=True 103 | ) 104 | self.plot_dataloader = torch.utils.data.DataLoader(self.train_dataset, 105 | batch_size=1, 106 | shuffle=True 107 | ) 108 | 109 | self.model = utils.get_class(self.conf.get_string('test.model_class'))(conf=self.conf, \ 110 | cam_position_list = self.train_dataset.cam_position_list, checkpoint_material=self.plots_dir, relighting=True) 111 | if torch.cuda.is_available(): 112 | self.model.cuda() 113 | self.model.eval() 114 | 115 | self.mat_loss = utils.get_class(self.conf.get_string('test.irf_loss_class'))(**self.conf.get_config('render_loss')) 116 | 117 | 118 | geo_dir = os.path.join('../',kwargs['exps_folder_name'], 'IRRF-' + kwargs['expname']) 119 | if os.path.exists(geo_dir): 120 | timestamps = os.listdir(geo_dir) 121 | timestamp = sorted(timestamps)[-1] # using the newest training result 122 | else: 123 | print('No IRF pretrain, please train IRF first!') 124 | exit(0) 125 | # reloading IRRF 126 | geo_path = os.path.join(geo_dir, timestamp) + '/checkpoints/ModelParameters/latest.pth' 127 | print('Reloading IRRF from: ', geo_path) 128 | model = torch.load(geo_path)['model_state_dict'] 129 | ir = {k.split('network.')[1]: v for k, v in model.items() if 'ir_radiance_network' in k} 130 | self.model.ir_radiance_network.load_state_dict(ir) 131 | for parm in self.model.ir_radiance_network.parameters(): 132 | parm.requires_grad = False 133 | 134 | 135 | self.n_batches = len(self.train_dataloader) 136 | 137 | self.pano_res = self.conf.get_list('test.pano_img_res') 138 | self.cube_lenth = int(self.pano_res[1]/4) 139 | self.cube2pano = Cube2Pano(pano_width=self.pano_res[1], pano_height=self.pano_res[0], cube_lenth=self.cube_lenth) 140 | self.first_val = True 141 | self.floor_max_mask = {} 142 | self.seg_mask = {} 143 | self.seg_tag = torch.from_numpy(np.array( list(range(0, 49)), np.float32) ) 144 | # self.seg_tag = torch.tensor([46.], dtype=torch.float32) 145 | self.room_seg_mask = {} 146 | self.room_meta_scale, self.room_meta_w, self.room_meta_h, self.room_meta_xmin, self.room_meta_zmin, self.room_img= utils.parse_roomseg(\ 147 | os.path.join(os.path.dirname(os.path.dirname(self.conf.get_string('test.path_mesh_open3d'))), 'roomseg')) 148 | 149 | 150 | def plot_to_disk_material(self): 151 | 152 | for i in range(len(self.train_dataset.ids)): 153 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 154 | gt_img = self.train_dataset.images_items[i]['color'] 155 | gt_img = gt_img.permute(0,3,1,2).reshape(1,-1, self.cube_lenth, self.cube_lenth) # shape: [1, 6*c, cube_len, cube_len] 156 | gt_img = self.cube2pano.ToPano(gt_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 157 | 158 | derived_id = self.train_dataset.ids[i] 159 | cam_position = self.train_dataset.cam_position_list[i].cuda() 160 | 161 | res = self.model(cam_to_world, derived_id, cam_position, 2) 162 | pred_albedo = res['albedo'].cpu().detach() # shape: (6, h, w, c) 163 | pred_albedo = pred_albedo.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 164 | pred_albedo = self.cube2pano.ToPano(pred_albedo)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 165 | 166 | pred_r = res['roughness'].cpu().detach().expand(-1,-1,-1,3) # shape: (6, h, w, c) 167 | pred_r = pred_r.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 168 | pred_r = self.cube2pano.ToPano(pred_r)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 169 | plt.plot_gbuffer(self.plots_dir, "albedo_{}".format(i), pred_albedo, False) 170 | plt.plot_gbuffer(self.plots_dir, "roughness_{}".format(i), pred_r, False) 171 | 172 | 173 | def plot_to_disk_cube(self): 174 | 175 | # index = torch.randint(0, len(self.train_dataset.ids),(1,)) 176 | index = -1 177 | for i in range(len(self.train_dataset.ids)): 178 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 179 | gt_img = self.train_dataset.images_items[i]['color'] 180 | gt_img = gt_img.permute(0,3,1,2).reshape(1,-1, self.cube_lenth, self.cube_lenth) # shape: [1, 6*c, cube_len, cube_len] 181 | gt_img = self.cube2pano.ToPano(gt_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 182 | 183 | derived_id = self.train_dataset.ids[i] 184 | cam_position = self.train_dataset.cam_position_list[i].cuda() 185 | 186 | res = self.model(cam_to_world, derived_id, cam_position, False) 187 | pred_img = res['rgb'].cpu().detach() # shape: (6, h, w, c) 188 | pred_img = pred_img.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 189 | pred_img = self.cube2pano.ToPano(pred_img)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 190 | 191 | pred_r = res['normal'].cpu().detach().expand(-1,-1,-1,3) # shape: (6, h, w, c) 192 | pred_r = pred_r.permute(0,3,1,2).reshape(1,-1,self.cube_lenth,self.cube_lenth) 193 | pred_r = self.cube2pano.ToPano(pred_r)[0].permute(1,2,0) # shape: [pano_h, pano_w, 3] 194 | # plt.plot_gbuffer(self.plots_dir, "{}_{}".format(i, self.cur_iter), pred_r, False) 195 | 196 | plt.plot_mat(self.editing_dir, 0, pred_img, "relighting_{}".format(i), False) 197 | 198 | def plot_to_disk(self): 199 | 200 | # index = torch.randint(0, len(self.train_dataset.ids),(1,)) 201 | index = -1 202 | for i in range(len(self.train_dataset.ids)): 203 | cam_to_world = self.train_dataset.extrinsics_list[i].cuda() 204 | derived_id = self.train_dataset.ids[i] 205 | cam_position = self.train_dataset.cam_position_list[i].cuda() 206 | 207 | res = self.model(cam_to_world, derived_id, cam_position) 208 | pred_img = res['rgb'].cpu().detach() # shape: (h, w, 3) 209 | 210 | # plt.plot_gbuffer(self.plots_dir, "{}_{}".format(i, self.cur_iter), pred_r, False) 211 | 212 | plt.plot_mat(self.editing_dir, 0, pred_img, "relighting_{}".format(i), False) 213 | 214 | def run(self): 215 | print("testing...") 216 | # self.plot_to_disk_cube() 217 | self.plot_to_disk() -------------------------------------------------------------------------------- /models/mat_mlp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : mat_mlp.py 3 | @Time : 2023/02/27 12:09:22 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import open3d as o3d 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | 21 | import pyredner 22 | pyredner.set_print_timing(False) 23 | # Use GPU if available 24 | pyredner.set_use_gpu(torch.cuda.is_available()) 25 | 26 | from models.incidentNet import IRNetwork, MatNetwork 27 | from utils.sample_util import TINY_NUMBER, generate_dir 28 | 29 | 30 | class MaterialMLP(nn.Module): 31 | def __init__(self, conf, ids, extrinsics, optim_cam=False, gt_irf=True): 32 | super().__init__() 33 | # self.incident_radiance_network = IRNetwork(**conf.get_config('models.incident_radiance_network')) 34 | self.material_network = MatNetwork(**conf.get_config('models.material_network')) 35 | 36 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 37 | self.pano_res = conf.get_list('train.pano_img_res', default=[256,512]) # shape : (height, width) 38 | self.sample_l = conf.get_list('train.sample_light', default=[64,64]) # number of samples : (diffuse, specular) 39 | self.optim_cam = optim_cam 40 | self.ids = ids 41 | self.extrinsics = extrinsics 42 | 43 | self.object_list = self.generate_obj(self.path_traced_mesh) 44 | 45 | 46 | 47 | if optim_cam: 48 | self.param_extrinsics = {} 49 | for i in range(len(ids)): 50 | self.param_extrinsics.update({ 51 | self.ids[i]: nn.Parameter(self.extrinsics[i], requires_grad=True) 52 | }) 53 | self.param_extrinsics = nn.ParameterDict(self.param_extrinsics) 54 | 55 | if gt_irf: 56 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 57 | # init ray casting scene 58 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 59 | trianglemesh.compute_vertex_normals() 60 | texture = cv2.imread(self.path_traced_mesh.replace("1.obj","_hdr_ccm.hdr"), -1)[:,:,::-1] 61 | texture = cv2.flip(texture, 0) 62 | texture = np.asarray(texture, np.float32) 63 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, H, W, 3) 64 | self.texture = self.texture * (2**7) 65 | 66 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 67 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 68 | # Create a scene and add the triangle mesh. 69 | self.scene = o3d.t.geometry.RaycastingScene() 70 | self.scene.add_triangles(mesh) 71 | self.mat = None 72 | 73 | 74 | 75 | 76 | def forward(self, cam_to_world, id): 77 | """ assume that the batch_size is 1. 78 | 79 | Args: 80 | cam_to_world (torch.float32): shape: [4, 4]. 81 | 82 | Returns: 83 | predicted_ir (torch.float32): shape: [1, h, w, 3] 84 | """ 85 | 86 | if self.optim_cam: 87 | camera = pyredner.Camera(cam_to_world=self.param_extrinsics[id], 88 | camera_type=pyredner.camera_type.panorama, 89 | resolution=(self.pano_res[0],self.pano_res[1]), 90 | clip_near = 1e-2, # needs to > 0 91 | fisheye = False 92 | ) 93 | else: 94 | camera = pyredner.Camera(cam_to_world=cam_to_world, 95 | camera_type=pyredner.camera_type.panorama, 96 | resolution=(self.pano_res[0],self.pano_res[1]), 97 | clip_near = 1e-2, # needs to > 0 98 | fisheye = False 99 | ) 100 | 101 | scene = pyredner.Scene(camera=camera, objects=self.object_list) 102 | # imgs = pyredner.render_g_buffer(scene=scene, channels=[pyredner.channels.diffuse_reflectance], num_samples=[1,1]) # shape: (env_h, env_w, 3) 103 | # albedo: [0:3], roughness: [3:4], position: [4:7], normal: [7:10] 104 | g_buffers = pyredner.render_g_buffer(scene=scene, \ 105 | channels=[ 106 | pyredner.channels.position, 107 | pyredner.channels.geometry_normal], num_samples=[2,2]) 108 | positions = g_buffers[:,:,0:3].detach()+1e-2*g_buffers[:,:,3:6].detach() 109 | mat = self.material_network(positions) 110 | self.mat = mat 111 | img = self.render(g_buffers[:,:,3:6].detach(), mat[:,:,0:3], mat[:,:,3:4], positions, cam_to_world[0:3, -1]) 112 | return img.unsqueeze(0) 113 | 114 | 115 | def generate_obj(self, path_mesh): 116 | object_list = pyredner.load_obj(path_mesh, obj_group=True, return_objects=True) 117 | 118 | # for object in object_list: 119 | # object.vertices = object.vertices * torch.tensor([-1.0,-1.0,1.0]).unsqueeze(0).cuda() 120 | # object.normals = object.normals * torch.tensor([-1.0,-1.0,1.0]).unsqueeze(0).cuda() 121 | 122 | return object_list 123 | 124 | def render(self, normal: torch.Tensor, albedo: torch.Tensor, roughness: torch.Tensor, points: torch.Tensor, cam_position: torch.Tensor): 125 | """render final color according to g buffers and IRF. 126 | 127 | Args: 128 | normal (torch.float32): [env_h, env_w, 3] 129 | albedo (torch.float32): [env_h, env_w, 3] 130 | roughness (torch.float32): [env_h, env_w, 1] 131 | points (torch.float32): [env_h, env_w, 3] 132 | cam_position (torch.float32): [3] 133 | """ 134 | 135 | env_h, env_w, c = normal.shape 136 | normal = normal.reshape(-1, 3) 137 | albedo = albedo.reshape(-1, 3) 138 | roughness = roughness.reshape(-1, 1) 139 | points = points.reshape(-1, 3) 140 | view = F.normalize(cam_position.unsqueeze(0) - points, eps=1e-4) 141 | 142 | light_dir_diff = generate_dir(normal, self.sample_l[0]) # shape: [env_h*env_w, n_sample, 3] 143 | # with torch.no_grad(): 144 | # diffuse_lighting = hdr_recover(self.incident_radiance_network(points.unsqueeze(1).expand_as(light_dir_diff), light_dir_diff)) 145 | diffuse_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_diff), light_dir_diff.unsqueeze(-2), self.sample_l[0]) 146 | 147 | diffuse = self.diffuse_reflectance(diffuse_lighting, light_dir_diff, normal, albedo) / self.sample_l[0] 148 | # diffuse = torch.sum(diffuse_lighting, dim=1) / self.sample_l[0] 149 | 150 | h_dir_specular = generate_dir(normal, self.sample_l[1], 'importance', roughness) 151 | vdh = torch.clamp(torch.sum( h_dir_specular * view.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 152 | light_dir_spec = 2 * vdh * h_dir_specular - view.unsqueeze(1) 153 | # with torch.no_grad(): 154 | # specular_lighting = hdr_recover(self.incident_radiance_network(points.unsqueeze(1).expand_as(h_dir_specular), light_dir_spec)) 155 | specular_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_spec), light_dir_spec.unsqueeze(-2).detach(), self.sample_l[1]) 156 | 157 | specular = self.specular_reflectance(specular_lighting, h_dir_specular, normal, view, light_dir_spec, roughness) / self.sample_l[1] 158 | 159 | return (diffuse + specular).reshape(env_h, env_w, 3) 160 | 161 | 162 | def diffuse_reflectance(self, lighting, l, n, albedo): 163 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 164 | brdf = albedo.unsqueeze(1) / np.pi 165 | 166 | return torch.sum( lighting * brdf * ndl * 2*np.pi , dim=1) 167 | 168 | def specular_reflectance(self, lighting, h, n, v, l, roughness, albedo=None): 169 | 170 | vdh = torch.clamp(torch.sum( h * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 171 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 172 | ndh = torch.clamp(torch.sum( n.unsqueeze(1) * h, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 173 | ndv = torch.clamp(torch.sum( n.unsqueeze(1) * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 174 | vdl = torch.clamp(torch.sum( v.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 175 | 176 | f = 0.04 + 0.96 * torch.pow(2.0,((-5.55472*vdh-6.98316)*vdh)) 177 | 178 | k = (roughness.unsqueeze(1) + 1) * (roughness.unsqueeze(1) + 1) / 8 179 | g1_ndv = ndv / torch.clamp( ndv *(1-k) + k , min=TINY_NUMBER) 180 | g1_ndl = ndl / torch.clamp( ndl *(1-k) + k , min=TINY_NUMBER) 181 | g = g1_ndl * g1_ndv 182 | 183 | # brdf: f * d * g / (4*ndl*ndv) 184 | brdf = f * g / torch.clamp(4 * ndl * ndv, min=TINY_NUMBER) 185 | 186 | # pdf : D*ndh / (4*vdh) 187 | # equation: L = lighing * brdf * ndl 188 | return torch.sum( lighting * brdf * ndl * 4 * vdh / torch.clamp(ndh, TINY_NUMBER), dim=1) 189 | 190 | 191 | def query_irf(self, points, directions, num_sample): 192 | rays = torch.cat([points.unsqueeze(-2).expand_as(directions), directions], dim=-1) 193 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 194 | intersections = self.scene.cast_rays(ray) 195 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 196 | 197 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 198 | mask = np.logical_or(hit < 1e-4, np.isfinite(hit)) 199 | 200 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 201 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 202 | prim_uvs = np.clip(prim_uvs, 0., 1.) 203 | 204 | prim_id[~mask] = 0 205 | 206 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 207 | tmp = tmp.reshape(-1) 208 | index = self.triangle_uvs[tmp] 209 | index = index.reshape(3, self.pano_res[0]*self.pano_res[1], num_sample,1, 2) # shape: (3, b, num_sample, 1, 2) 210 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 211 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 212 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 213 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 214 | 215 | gt_ir = F.grid_sample(self.texture.expand([self.pano_res[0]*self.pano_res[1]]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 216 | ner_mask = ~mask 217 | gt_ir[ner_mask,:] = 0 218 | gt_ir = gt_ir.reshape(self.pano_res[0]*self.pano_res[1], num_sample, 3) 219 | return gt_ir.cuda() 220 | 221 | 222 | if __name__=="__main__": 223 | conf = ConfigFactory.parse_file('./configs/default.conf') 224 | mm = MaterialMLP(conf).cuda() 225 | radiance = mm().cpu().detach() 226 | print(radiance.shape) 227 | cv2.imwrite("../results/test_house/pano.jpg", radiance[0].numpy()[:,:,::-1]*255.0) 228 | 229 | -------------------------------------------------------------------------------- /models/mat_nvdiffrast_invrender.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : mat_nvdiffrast_invrender.py 3 | @Time : 2023/02/27 12:09:28 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import math 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | import open3d as o3d 21 | 22 | import nvdiffrast.torch as dr 23 | 24 | import pyredner 25 | pyredner.set_use_gpu(False) 26 | 27 | from utils.general import get_mip_level, rgb_to_intensity 28 | 29 | 30 | from models.incidentNet import IRNetwork, MatNetwork, EnvmapMaterialNetwork 31 | from utils.sample_util import TINY_NUMBER, generate_dir, generate_fixed_samples 32 | from utils.general import hdr_recover 33 | 34 | 35 | class MaterialModel(nn.Module): 36 | def __init__(self, conf, ids, extrinsics, optim_cam=False, gt_irf=True, gt_irrt=True): 37 | super().__init__() 38 | # self.incident_radiance_network = IRNetwork(**conf.get_config('models.incident_radiance_network')) 39 | # self.ir_radiance_network = MatNetwork(**conf.get_config('models.irrf_network')) 40 | self.material_network = EnvmapMaterialNetwork() 41 | 42 | self.resolution = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 43 | 44 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 45 | self.pano_res = conf.get_list('train.pano_img_res', default=[1000,2000]) # shape : (height, width) 46 | self.cube_res = int(self.pano_res[1]/4) 47 | self.sample_l = conf.get_list('train.sample_light', default=[64,64]) # number of samples : (diffuse, specular) 48 | self.sample_type = conf.get_list('models.render.sample_type', default=['uniform','importance']) 49 | self.optim_cam = optim_cam 50 | self.ids = ids 51 | self.extrinsics = extrinsics 52 | self.conf = conf 53 | # assume that there only exists one mesh and one texture 54 | self.object_list = self.generate_obj(self.path_traced_mesh)[0] 55 | 56 | 57 | self.triangles = (nn.Parameter(self.object_list.indices, requires_grad=False)) 58 | self.uvs = (nn.Parameter(self.object_list.uvs, requires_grad=False)) 59 | self.uv_indices = (nn.Parameter(self.object_list.uv_indices, requires_grad=False)) 60 | self.w = torch.ones([self.object_list.vertices.shape[0], 1]) 61 | self.vertices = (nn.Parameter(torch.cat([self.object_list.vertices, self.w], dim=-1).repeat(6, 1, 1), requires_grad=False)) # shape: [5, n, 4] 62 | self.normals = (nn.Parameter(self.object_list.normals, requires_grad=False)) # shape: [n, 3] 63 | 64 | texture = self.object_list.material.diffuse_reflectance.texels 65 | 66 | # self.max_mip_level = int(np.log2(texture.shape[0])) 67 | self.max_mip_level = (get_mip_level(texture.shape[0])) 68 | 69 | self.gt_irrt = gt_irrt 70 | if gt_irrt: 71 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","irt.hdr"), -1)[:,:,::-1] 72 | 73 | texture = np.asarray(texture, np.float32).copy() 74 | self.irrt = nn.Parameter(torch.from_numpy(texture), requires_grad=False) 75 | 76 | # self.samples_diff = nn.Parameter(generate_fixed_samples(self.cube_res*self.cube_res*6, self.sample_l[0]), requires_grad=False) 77 | # self.samples_spec = nn.Parameter(generate_fixed_samples(self.cube_res*self.cube_res*6, self.sample_l[1]), requires_grad=False) 78 | 79 | self.glctx = dr.RasterizeGLContext() 80 | 81 | if gt_irf: 82 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 83 | # init ray casting scene 84 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 85 | # trianglemesh.compute_vertex_normals() 86 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","hdr_texture.hdr"), -1)[:,:,::-1] 87 | texture = cv2.flip(texture, 0) 88 | texture = np.asarray(texture, np.float32) 89 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, 3, H, W) 90 | self.texture = self.texture * (2**conf.get_float('train.hdr_exposure')) 91 | # texture = np.asarray(trianglemesh.textures[1]) 92 | # self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, 3, H, W) 93 | 94 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 95 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 96 | # Create a scene and add the triangle mesh. 97 | self.scene = o3d.t.geometry.RaycastingScene() 98 | self.scene.add_triangles(mesh) 99 | 100 | 101 | 102 | 103 | 104 | def forward(self, mvp, id, cam_position, stage=1): 105 | """ assume that the batch_size is 1. 106 | 107 | Args: 108 | mvp (torch.float32): shape: [6, 4, 4]. 109 | cam_position (torch.float32): shape: [3]. 110 | 111 | Returns: 112 | color (torch.float32): shape: [6, cube_res, cube_res, 3] 113 | """ 114 | 115 | 116 | clip_vertexes = torch.einsum('ijk,ikl->ijl', self.vertices, mvp) 117 | 118 | rast_out, rast_out_db = dr.rasterize(self.glctx, clip_vertexes, self.triangles, resolution=[self.cube_res, self.cube_res]) 119 | features = torch.cat([self.vertices[:,:,0:3], self.normals.unsqueeze(0).expand(6, -1, -1)], dim=-1) 120 | g_buffers, _ = dr.interpolate(features.contiguous(), rast_out, self.triangles) # [6, h, w, 6] 121 | # disable antialias, because the artifacts of edges will appear between different objects. 122 | g_buffers = torch.where(rast_out[..., 3:] > 0, g_buffers, torch.tensor([1.,0.,0., 1.,0.,0.]).cuda()) # give a fix position and normal. 123 | mask = (rast_out[..., 3:] > 0).float() 124 | 125 | # get irr from irrt 126 | texc, texd = dr.interpolate(self.uvs[None, ...], rast_out, self.uv_indices, rast_db=rast_out_db, diff_attrs='all') 127 | irr = dr.texture(self.irrt[None, ...], texc, texd, filter_mode='linear-mipmap-linear', max_mip_level=self.max_mip_level) 128 | 129 | materials_dict = self.material_network(g_buffers[:,:,:,0:3].detach()) 130 | albedo = materials_dict['diffuse_albedo'] 131 | roughness = materials_dict['roughness'] 132 | 133 | res = self.render(g_buffers[:,:,:,3:6].detach(), albedo, roughness, g_buffers[:,:,:,0:3].detach()+1e-2*g_buffers[:,:,:,3:6].detach(), cam_position, irr) 134 | 135 | 136 | return dict(materials_dict, **res) 137 | 138 | 139 | def generate_obj(self, path_mesh): 140 | object_list = pyredner.load_obj(path_mesh, obj_group=True, return_objects=True) 141 | 142 | # for object in object_list: 143 | # object.normals = pyredner.compute_vertex_normal(object.vertices, object.indices) 144 | 145 | return object_list 146 | 147 | def render(self, normal: torch.Tensor, albedo: torch.Tensor, roughness: torch.Tensor, points: torch.Tensor, cam_position: torch.Tensor, irr): 148 | """render final color according to g buffers and IRF. 149 | 150 | Args: 151 | normal (torch.float32): [6, cube_len, cube_len, 3] 152 | albedo (torch.float32): [6, cube_len, cube_len, 3] 153 | roughness (torch.float32): [6, cube_len, cube_len, 1] 154 | points (torch.float32): [6, cube_len, cube_len, 3] 155 | cam_position (torch.float32): [3] 156 | """ 157 | 158 | face, h, w, c = normal.shape 159 | normal = normal.reshape(-1, 3) 160 | albedo = albedo.reshape(-1, 3) 161 | roughness = roughness.reshape(-1, 1) 162 | points = points.reshape(-1, 3) 163 | irr = irr.reshape(-1, 3) 164 | view = F.normalize(cam_position.unsqueeze(0) - points, eps=1e-4) 165 | 166 | # with torch.no_grad(): 167 | # irr = hdr_recover(self.ir_radiance_network(points)) # shape : [b, 3] 168 | diffuse = irr * albedo / np.pi 169 | 170 | h_dir_specular = generate_dir(normal, self.sample_l[1], None, self.sample_type[1], roughness) 171 | vdh = torch.clamp(torch.sum( h_dir_specular * view.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 172 | light_dir_spec = 2 * vdh * h_dir_specular - view.unsqueeze(1) 173 | specular_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_spec), light_dir_spec.unsqueeze(-2).detach(), self.sample_l[1]) 174 | 175 | specular = self.specular_reflectance(specular_lighting, h_dir_specular, normal, view, light_dir_spec, roughness) / self.sample_l[1] 176 | 177 | res ={ 178 | 'rgb': ( diffuse +specular ).reshape(face, h, w, 3), 179 | 'albedo': albedo.reshape(face, h, w, 3), 180 | 'normal': normal.reshape(face, h, w, 3).detach(), 181 | 'position': (points).reshape(face, h, w, 3).detach() 182 | } 183 | return res 184 | 185 | 186 | def diffuse_reflectance(self, lighting, l, n, albedo, sample_type='uniform'): 187 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 188 | brdf = albedo.unsqueeze(1) / np.pi 189 | 190 | if sample_type=='cosine': 191 | return torch.sum( lighting * brdf * np.pi , dim=1) 192 | return torch.sum( lighting * brdf * ndl * 2*np.pi , dim=1) 193 | 194 | def specular_reflectance(self, lighting, h, n, v, l, roughness, albedo=None): 195 | 196 | vdh = torch.clamp(torch.sum( h * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 197 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 198 | ndh = torch.clamp(torch.sum( n.unsqueeze(1) * h, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 199 | ndv = torch.clamp(torch.sum( n.unsqueeze(1) * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 200 | vdl = torch.clamp(torch.sum( v.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 201 | 202 | f = 0.04 + 0.96 * torch.pow(2.0,((-5.55472*vdh-6.98316)*vdh)) 203 | 204 | k = (roughness.unsqueeze(1) + 1) * (roughness.unsqueeze(1) + 1) / 8 205 | g1_ndv = ndv / torch.clamp( ndv *(1-k) + k , min=TINY_NUMBER) 206 | g1_ndl = ndl / torch.clamp( ndl *(1-k) + k , min=TINY_NUMBER) 207 | g = g1_ndl * g1_ndv 208 | 209 | # brdf: f * d * g / (4*ndl*ndv) 210 | brdf = f * g / torch.clamp(4 * ndl * ndv, min=TINY_NUMBER) 211 | 212 | # pdf : D*ndh / (4*vdh) 213 | # equation: L = lighing * brdf * ndl 214 | return torch.sum( lighting * brdf * ndl * 4 * vdh / torch.clamp(ndh, TINY_NUMBER), dim=1) 215 | 216 | 217 | def query_irf(self, points, directions, num_sample): 218 | rays = torch.cat([points.unsqueeze(-2).expand_as(directions), directions], dim=-1) 219 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 220 | intersections = self.scene.cast_rays(ray) 221 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 222 | 223 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 224 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 225 | 226 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 227 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 228 | prim_uvs = np.clip(prim_uvs, 0., 1.) 229 | 230 | prim_id[~mask] = 0 231 | 232 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 233 | tmp = tmp.reshape(-1) 234 | index = self.triangle_uvs[tmp] 235 | index = index.reshape(3, self.cube_res*self.cube_res*6, num_sample,1, 2) # shape: (3, b, num_sample, 1, 2) 236 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 237 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 238 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 239 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 240 | 241 | gt_ir = F.grid_sample(self.texture.expand([self.cube_res*self.cube_res*6]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 242 | ner_mask = ~mask 243 | gt_ir[ner_mask,:] = 0 244 | gt_ir = gt_ir.reshape(self.cube_res*self.cube_res*6, num_sample, 3) 245 | return gt_ir.cuda() -------------------------------------------------------------------------------- /models/tracer_o3d_irrf.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : tracer_o3d_irrf.py 3 | @Time : 2023/02/27 12:10:14 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import open3d as o3d 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | 21 | import pyredner 22 | pyredner.set_print_timing(False) 23 | # Use GPU if available 24 | pyredner.set_use_gpu(torch.cuda.is_available()) 25 | 26 | from models.incidentNet import IRNetwork, MatNetwork 27 | from utils.sample_util import * 28 | 29 | class TracerO3d(nn.Module): 30 | def __init__(self, conf, AABB=None, is_hdr_texture=False): 31 | super().__init__() 32 | self.ir_radiance_network = MatNetwork(**conf.get_config('models.irrf_network'), AABB=AABB) 33 | 34 | # self.resolution = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 35 | # self.resolution[0] = int(self.resolution[0]*8) # like 8spp 36 | 37 | # self.num_sample_dir = int(self.resolution[0]*self.resolution[1]) #conf.get_int('train.num_sample_dir', default=128) 38 | 39 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 40 | self.std_jit = conf.get_float('train.std_jit') 41 | 42 | # init ray casting scene 43 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 44 | trianglemesh.compute_vertex_normals() 45 | # vertices = np.asarray(trianglemesh.vertices) 46 | # vertices = vertices * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 47 | # trianglemesh.vertices = o3d.utility.Vector3dVector(vertices) 48 | # normals = np.asarray(trianglemesh.vertex_normals) 49 | # normals = normals * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 50 | # trianglemesh.vertex_normals = o3d.utility.Vector3dVector(normals) 51 | # read extra hdr texture because open3d cannot read .hdr/.exr 52 | if is_hdr_texture: 53 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","hdr_texture.hdr"), -1)[:,:,::-1] 54 | texture = cv2.flip(texture, 0) 55 | texture = np.asarray(texture, np.float32) 56 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, H, W, 3) 57 | self.texture = self.texture * (2**conf.get_float('train.hdr_exposure')) 58 | else: 59 | texture = np.asarray(trianglemesh.textures[1]) 60 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, H, W, 3) 61 | 62 | 63 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 64 | 65 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 66 | # Create a scene and add the triangle mesh. 67 | self.scene = o3d.t.geometry.RaycastingScene() 68 | self.scene.add_triangles(mesh) 69 | 70 | 71 | 72 | def forward(self, points, normals, resolution, isnot_first_val=False): 73 | """ 74 | 75 | Args: 76 | points (torch.float32): shape: [b, 3] 77 | normals (torch.float32, optional): shape: [b, 3] . Defaults to None. 78 | isnot_first_val (bool): if set true, do not trace gt ir and only predict ir. 79 | 80 | Returns: 81 | gt_ir (torch.float32): shape: [b, h*w, 3] 82 | predicted_ir (torch.float32): shape: [b, h*w, 3] 83 | """ 84 | b, c = points.shape 85 | 86 | directions = self.generate_dir(normals, resolution) # shape: (b, num_sample, 1, 3) 87 | rays = torch.cat([points.unsqueeze(1).unsqueeze(1).expand_as(directions), directions], dim=-1) 88 | if not isnot_first_val: 89 | 90 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 91 | 92 | intersections = self.scene.cast_rays(ray) 93 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 94 | 95 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 96 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 97 | 98 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 99 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 100 | prim_uvs = np.clip(prim_uvs, 0., 1.) 101 | 102 | prim_id[~mask] = 0 103 | 104 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 105 | tmp = tmp.reshape(-1) 106 | index = self.triangle_uvs[tmp] 107 | index = index.reshape(3, b, resolution[0],resolution[1], 2) # shape: (3, b, num_sample, 1, 2) 108 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 109 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 110 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 111 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 112 | 113 | gt_ir = F.grid_sample(self.texture.expand([b]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 114 | ner_mask = ~mask 115 | gt_ir[ner_mask,:] = 0 116 | # # consider decay 117 | # distances = np.repeat(hit[..., np.newaxis], 3, -1) 118 | # attenuation = 1.0 / (1. + 0.14 * distances + 0.07*distances*distances) 119 | # gt_ir[mask,:] = gt_ir[mask,:] * attenuation[mask,:] 120 | # gt_irr = torch.mean(gt_ir.reshape(b, -1, 3), dim=-2) 121 | ndl = torch.clamp(torch.sum( normals.unsqueeze(1) * directions.reshape(b, -1, 3), dim=-1, keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 122 | gt_irr = torch.sum(gt_ir.reshape(b, -1, 3).cuda() * ndl, dim=1) * 2 * np.pi / (resolution[0]*resolution[1]) 123 | 124 | 125 | predicted_mask_ir = self.ir_radiance_network(points) # shape: (b, 3) 126 | # predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 127 | # predicted_mask_ir[~mask,:] = 0 128 | predicted_mask_ir_jit = self.ir_radiance_network(points + torch.normal(mean=0, std=self.std_jit, size=points.shape, device="cuda")) # shape: (b, 3) 129 | 130 | res = { 131 | 'gt': gt_irr , 132 | 'pred': predicted_mask_ir, 133 | 'pred_jit': predicted_mask_ir_jit 134 | } 135 | 136 | return res 137 | else: 138 | predicted_mask_ir = self.ir_radiance_network(points) # shape: (b, 3) 139 | # predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 140 | predicted_mask_ir_jit = self.ir_radiance_network(points + torch.normal(mean=0, std=self.std_jit, size=points.shape, device="cuda")) # shape: (b, 3) 141 | res = { 142 | 'pred': predicted_mask_ir, 143 | 'pred_jit': predicted_mask_ir_jit 144 | } 145 | return res 146 | 147 | 148 | def RadicalInverse(self,bits): 149 | #reverse bit 150 | #高低16位换位置 151 | bits = (bits << 16) | (bits >> 16) 152 | #A是5的按位取反 153 | bits = ((bits & 0x55555555) << 1) | ((bits & 0xAAAAAAAA) >> 1) 154 | #C是3的按位取反 155 | bits = ((bits & 0x33333333) << 2) | ((bits & 0xCCCCCCCC) >> 2) 156 | bits = ((bits & 0x0F0F0F0F) << 4) | ((bits & 0xF0F0F0F0) >> 4) 157 | bits = ((bits & 0x00FF00FF) << 8) | ((bits & 0xFF00FF00) >> 8) 158 | return float(bits) * 2.3283064365386963e-10 159 | 160 | def Hammersley(self,i,N): 161 | return [float(i)/float(N),self.RadicalInverse(i)] 162 | 163 | def generate_dir(self, normals, resolution): 164 | b, c = normals.shape 165 | normals = normals.unsqueeze(1).unsqueeze(1).expand(b, resolution[0],resolution[1], 3) 166 | # compute projection axis 167 | x_axis = torch.zeros_like(normals).cuda() #size:(batch_size, samples, 1, 3) 168 | mask = torch.abs(normals[:,:,:,0]) > 0.99 169 | x_axis[mask, :] = torch.tensor([0., 1., 0.],dtype=torch.float32, device=normals.get_device()) 170 | x_axis[~mask, :] = torch.tensor([1., 0., 0.],dtype=torch.float32, device=normals.get_device()) 171 | 172 | def norm_axis(x): 173 | return x / (torch.norm(x, dim=-1, keepdim=True) + TINY_NUMBER) 174 | 175 | normals = norm_axis(normals) 176 | U = norm_axis(torch.cross(x_axis, normals)) 177 | V = norm_axis(torch.cross( normals, U)) 178 | 179 | num_sample_dir = resolution[0]*resolution[1] 180 | samples=np.zeros((num_sample_dir,2),dtype=np.float32) 181 | for i in range(0,num_sample_dir): 182 | s = Hammersley(i,num_sample_dir) 183 | samples[i][0] = s[0] 184 | samples[i][1] = s[1] 185 | samples = torch.from_numpy(samples).unsqueeze(0).unsqueeze(-2).cuda() #size:(batch_size, samples, 1, 2) 186 | samples = samples.repeat(b, 1, 1, 1).detach() 187 | # samples[:,:, 0:1] = torch.clamp(samples[:,:,0:1] + torch.rand_like(samples[:,:,0:1])*0.09, 0., 1.) 188 | shift = torch.rand(b, 1, 1, 2).cuda() 189 | samples = samples + shift 190 | index1 = samples > 1. 191 | samples[index1] = samples[index1]-1. 192 | index2 = samples < 0. 193 | samples[index2] = samples[index2] + 1 194 | samples = torch.clamp(samples, 0+TINY_NUMBER, 1-TINY_NUMBER) # avoid NAN in roughness backward. 195 | samples = samples.expand(b, num_sample_dir, 1, 2).reshape(b, resolution[0], resolution[1], 2) 196 | 197 | # ############ test sample for ordered variable, attention: the cosTheta is uniformly generated instead elevation. so the res pano will have distortion in elevation. 198 | # azimuth = torch.linspace(0.,1.,256).cuda() 199 | # elevation = torch.linspace(0.,1.,128).cuda() 200 | # elevation,azimuth = torch.meshgrid([elevation,azimuth]) 201 | # samples = torch.stack([elevation,azimuth], dim=-1).unsqueeze(0) #size:(batch_size, h, w, 2) 202 | 203 | # uniform sample, attention: we generate sampled dir via y as up axis. translate to our coor: 204 | # phi - np.pi; y = sin((np.pi/2-theta)) = costheta; y_projected = cos((np.pi/2-theta)) = sintheta 205 | phi = 2 * np.pi * samples[:,:,:,1:2] - np.pi 206 | cosTheta = (1.0 - samples[:,:,:,0:1]) 207 | sinTheta = torch.sqrt(1.0 - cosTheta * cosTheta) 208 | L = V * (torch.sin(phi) * sinTheta) \ 209 | + normals * cosTheta \ 210 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 1, 3] 211 | 212 | 213 | # ######### test sample directions 1, via z as up direction 214 | # theta = torch.linspace(-np.pi,np.pi,256).cuda() 215 | # phi = -torch.linspace(-np.pi/2.0,np.pi/2.0,128).cuda() 216 | # phi,theta = torch.meshgrid([phi,theta]) 217 | 218 | # v_x = torch.cos(phi) * torch.sin(theta) 219 | # v_y = torch.sin(phi) 220 | # v_z = -torch.cos(phi) * torch.cos(theta) 221 | # samples = nn.Parameter(torch.stack([v_x,v_y,v_z],dim=-1).unsqueeze(0), requires_grad=False) #size: (b, h, w ,3) 222 | 223 | # L = V * (samples[:,:,:,0:1]) \ 224 | # + normals * (samples[:,:,:,1:2]) \ 225 | # + U* samples[:,:,:,2:3] # [batch, num_samples, 1, 3] 226 | 227 | # ######### test sample direction 2 228 | # theta = torch.linspace(-np.pi,np.pi,256).cuda() 229 | # phi = -torch.linspace(-np.pi/2.0,np.pi/2.0,128).cuda() 230 | # phi,theta = torch.meshgrid([phi,theta]) 231 | 232 | # v_x = torch.cos(phi) * torch.sin(theta) 233 | # v_y = torch.sin(phi) 234 | # v_z = -torch.cos(phi) * torch.cos(theta) 235 | 236 | # L = nn.Parameter(torch.stack([v_x,v_y,v_z],dim=-1).unsqueeze(0), requires_grad=False) #size: (b, h, w ,3) 237 | 238 | return L 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | if __name__=="__main__": 247 | conf = ConfigFactory.parse_file('./configs/default.conf') 248 | tracer = TracerO3d(conf) 249 | test_points = torch.tensor([-0.295, 0.104, -1.523]).unsqueeze(0) 250 | test_points = (test_points + 0.1 * torch.tensor([-1., 0., 0.]).unsqueeze(0)).cuda() 251 | radiance = tracer(test_points, torch.tensor([-1., 0., 0.]).unsqueeze(0).cuda()) 252 | env_res = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 253 | radiance = radiance[0].reshape(env_res[0], env_res[1], 3) 254 | print(radiance.shape) 255 | cv2.imwrite("../results/test_house/env_hemi_open3d.jpg", radiance.cpu().numpy()[:,:,::-1]*255.0) 256 | 257 | -------------------------------------------------------------------------------- /trainer/train_irf.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : train_irf.py 3 | @Time : 2023/02/27 12:11:21 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import os 13 | import sys 14 | from datetime import datetime 15 | import time 16 | import itertools 17 | 18 | import imageio 19 | import numpy as np 20 | import torch 21 | from pyhocon import ConfigFactory 22 | from tensorboardX import SummaryWriter 23 | 24 | import utils.general as utils 25 | import utils.plots as plt 26 | from models.loss import IRFLoss 27 | 28 | class IRFTrainRunner(): 29 | def __init__(self,**kwargs): 30 | torch.set_default_dtype(torch.float32) 31 | torch.set_num_threads(1) 32 | 33 | self.conf = ConfigFactory.parse_file(kwargs['conf']) 34 | self.exps_folder_name = kwargs['exps_folder_name'] 35 | self.train_batch_size = self.conf.get_int('train.batch_size') 36 | self.val_batch_size = self.conf.get_int('val.batch_size') 37 | self.nepochs = self.conf.get_int('train.irf_epoch') 38 | self.max_niters = kwargs['max_niters'] 39 | self.GPU_INDEX = kwargs['gpu_index'] 40 | self.is_hdr_texture = self.conf.get_bool('train.is_hdr_texture') 41 | 42 | self.expname = 'IRF-' + kwargs['expname'] 43 | 44 | if kwargs['is_continue'] and kwargs['timestamp'] == 'latest': 45 | if os.path.exists(os.path.join('../',kwargs['exps_folder_name'],self.expname)): 46 | timestamps = os.listdir(os.path.join('../',kwargs['exps_folder_name'],self.expname)) 47 | if (len(timestamps)) == 0: 48 | is_continue = False 49 | timestamp = None 50 | else: 51 | timestamp = sorted(timestamps)[-1] 52 | is_continue = True 53 | else: 54 | is_continue = False 55 | timestamp = None 56 | else: 57 | timestamp = kwargs['timestamp'] 58 | is_continue = kwargs['is_continue'] 59 | 60 | utils.mkdir_ifnotexists(os.path.join('../',self.exps_folder_name)) 61 | self.expdir = os.path.join('../', self.exps_folder_name, self.expname) 62 | utils.mkdir_ifnotexists(self.expdir) 63 | self.timestamp = '{:%Y_%m_%d_%H_%M_%S}'.format(datetime.now()) 64 | utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp)) 65 | 66 | self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots') 67 | utils.mkdir_ifnotexists(self.plots_dir) 68 | 69 | # create checkpoints dirs 70 | self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints') 71 | utils.mkdir_ifnotexists(self.checkpoints_path) 72 | self.model_params_subdir = "ModelParameters" 73 | self.irf_optimizer_params_subdir = "IRFOptimizerParameters" 74 | self.irf_scheduler_params_subdir = "IRFSchedulerParameters" 75 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir)) 76 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.irf_optimizer_params_subdir)) 77 | utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.irf_scheduler_params_subdir)) 78 | 79 | 80 | print('Write tensorboard to: ', os.path.join(self.expdir, self.timestamp)) 81 | self.writer = SummaryWriter(os.path.join(self.expdir, self.timestamp)) 82 | 83 | os.system("""cp -r {0} "{1}" """.format(kwargs['conf'], os.path.join(self.expdir, self.timestamp, 'runconf.conf'))) 84 | 85 | if (not self.GPU_INDEX == 'ignore'): 86 | os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(self.GPU_INDEX) 87 | 88 | print('shell command : {0}'.format(' '.join(sys.argv))) 89 | 90 | print('Loading training data ...') 91 | self.train_dataset = utils.get_class(self.conf.get_string('train.dataset_class'))( 92 | self.conf.get_string('train.path_mesh_open3d'), self.conf.get_int('train.samples_point_mesh')) 93 | print("train data len: {}".format(self.train_dataset.__len__())) 94 | self.AABB = self.train_dataset.get_AABB() 95 | print('Finish loading training data ...') 96 | print('Loading val data ...') 97 | self.val_dataset = utils.get_class(self.conf.get_string('val.dataset_class'))( 98 | self.conf.get_string('train.path_mesh_open3d'), self.conf.get_list('val.env_res')) 99 | print("val data len: {}".format(self.val_dataset.__len__())) 100 | print('Finish loading val data ...') 101 | 102 | 103 | self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, 104 | batch_size=self.train_batch_size, 105 | shuffle=True 106 | ) 107 | self.plot_dataloader = torch.utils.data.DataLoader(self.val_dataset, 108 | batch_size=self.val_batch_size, 109 | shuffle=False 110 | ) 111 | 112 | self.model = utils.get_class(self.conf.get_string('train.model_class'))(conf=self.conf, AABB=self.AABB, is_hdr_texture=self.is_hdr_texture) 113 | 114 | if torch.cuda.is_available(): 115 | self.model.cuda() 116 | 117 | self.irf_loss = utils.get_class(self.conf.get_string('train.irf_loss_class'))(**self.conf.get_config('irf_loss')) 118 | self.irf_optimizer = torch.optim.Adam(self.model.incident_radiance_network.parameters(), 119 | lr=self.conf.get_float('train.irf_learning_rate')) 120 | # self.irf_optimizer = torch.optim.Adam([ 121 | # {'params': self.model.incident_radiance_network.parameters(), 'lr': self.conf.get_float('train.irf_learning_rate'), 'eps': 1e-15}, 122 | # {'params': self.model.incident_radiance_network.embeder_param, 'lr': self.conf.get_float('train.irf_learning_rate'), 'eps': 1e-15, 'weight_decay': 1e-6} 123 | # ]) 124 | # self.irf_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.irf_optimizer, 125 | # self.conf.get_list('train.irf_sched_milestones', default=[]), 126 | # gamma=self.conf.get_float('train.irf_sched_factor', default=0.0)) 127 | self.irf_scheduler = torch.optim.lr_scheduler.StepLR(self.irf_optimizer, 128 | self.conf.get_int('train.irf_sched_step', default=1000), 129 | gamma=self.conf.get_float('train.irf_sched_factor', default=0.0)) 130 | 131 | 132 | self.start_epoch = 0 133 | if is_continue: 134 | old_checkpnts_dir = os.path.join(self.expdir, timestamp, 'checkpoints') 135 | 136 | print('Loading pretrained model: ', os.path.join( 137 | old_checkpnts_dir, self.model_params_subdir, str(kwargs['checkpoint']) + ".pth")) 138 | 139 | saved_model_state = torch.load( 140 | os.path.join(old_checkpnts_dir, self.model_params_subdir, str(kwargs['checkpoint']) + ".pth")) 141 | self.model.load_state_dict(saved_model_state["model_state_dict"]) 142 | self.start_epoch = saved_model_state['epoch'] 143 | 144 | data = torch.load( 145 | os.path.join(old_checkpnts_dir, self.irf_optimizer_params_subdir, str(kwargs['checkpoint']) + ".pth")) 146 | self.irf_optimizer.load_state_dict(data["optimizer_state_dict"]) 147 | 148 | data = torch.load( 149 | os.path.join(old_checkpnts_dir, self.irf_scheduler_params_subdir, str(kwargs['checkpoint']) + ".pth")) 150 | self.irf_scheduler.load_state_dict(data["scheduler_state_dict"]) 151 | 152 | 153 | self.n_batches = len(self.train_dataloader) 154 | self.plot_freq = self.conf.get_int('train.plot_freq') 155 | self.ckpt_freq = self.conf.get_int('train.ckpt_freq') 156 | self.val_gt = None 157 | self.first_val = True 158 | 159 | self.train_resolution = self.conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 160 | self.val_resolution = self.conf.get_list('train.val_sample_res', default=[8,16]) # shape : (height, width) 161 | 162 | 163 | def save_checkpoints(self, epoch): 164 | # torch.save( 165 | # {"epoch": epoch, "model_state_dict": self.model.state_dict()}, 166 | # os.path.join(self.checkpoints_path, self.model_params_subdir, str(epoch) + ".pth")) 167 | torch.save( 168 | {"epoch": epoch, "model_state_dict": self.model.state_dict()}, 169 | os.path.join(self.checkpoints_path, self.model_params_subdir, "latest.pth")) 170 | 171 | 172 | def plot_to_disk(self): 173 | self.model.eval() 174 | 175 | env_res = self.conf.get_list('val.env_res') 176 | self.val_dataset.arrange_buffers() 177 | 178 | len_val = len(self.plot_dataloader) 179 | print("val data num_batch: {}".format(len_val)) 180 | 181 | if self.first_val: 182 | self.val_gt = torch.zeros(env_res[0]*env_res[1], 3) 183 | pred_ir = torch.zeros(env_res[0]*env_res[1], 3) 184 | for data_index, (one_sample) in enumerate(self.plot_dataloader): 185 | 186 | if(data_index % len_val == int(len_val/2)): 187 | print("val : {}/{} finished!".format(data_index, len_val)) 188 | points = one_sample['point'].float().cuda() 189 | normals = one_sample['normal'].float().cuda() 190 | 191 | if self.first_val: 192 | res = self.model(points, normals, self.val_resolution) 193 | batch_gt_ir, batch_predicted_ir = res['gt'], res['pred'] 194 | b, num, c = batch_gt_ir.shape 195 | batch_gt_ir = torch.sum(batch_gt_ir, dim=1)/num # multiply 2 if use render pano because only half directions are used. 196 | batch_predicted_ir = torch.sum(batch_predicted_ir, dim=1)/num 197 | 198 | self.val_gt[data_index*b:(data_index+1)*b] = batch_gt_ir.cpu().detach() 199 | pred_ir[data_index*b:(data_index+1)*b] = batch_predicted_ir.cpu().detach() 200 | else: 201 | res = self.model(points, normals, self.val_resolution, True) 202 | batch_predicted_ir = res['pred'] 203 | b, num, c = batch_predicted_ir.shape 204 | batch_predicted_ir = torch.sum(batch_predicted_ir, dim=1)/num 205 | 206 | pred_ir[data_index*b:(data_index+1)*b] = batch_predicted_ir.cpu().detach() 207 | 208 | gt_ir = self.val_gt.reshape(env_res[0], env_res[1], 3) 209 | pred_ir = utils.hdr_recover(pred_ir.reshape(env_res[0], env_res[1], 3)) 210 | 211 | plt.plot_irf( 212 | self.plots_dir, 213 | self.cur_iter, 214 | gt_ir, 215 | pred_ir 216 | ) 217 | 218 | self.model.train() 219 | self.first_val = False 220 | 221 | def run(self): 222 | print("training...") 223 | self.cur_iter = self.start_epoch * len(self.train_dataloader) 224 | 225 | for epoch in range(self.start_epoch, self.nepochs + 1): 226 | self.train_dataset.change_points() 227 | 228 | # if self.cur_iter > self.max_niters: 229 | # self.save_checkpoints(epoch) 230 | # self.plot_to_disk() 231 | # print('Training has reached max number of iterations: {}; exiting...'.format(self.cur_iter)) 232 | # exit(0) 233 | 234 | for data_index, (one_sample) in enumerate(self.train_dataloader): 235 | one_batch_start_time = time.time() 236 | self.model.train() 237 | 238 | if self.cur_iter % self.ckpt_freq == 0: 239 | self.save_checkpoints(epoch) 240 | 241 | # if self.cur_iter % self.plot_freq == 0 and not self.cur_iter == 0: 242 | if self.cur_iter % self.plot_freq == 0: 243 | self.plot_to_disk() 244 | 245 | points = one_sample['point'].float().cuda() 246 | normals = one_sample['normal'].float().cuda() 247 | res= self.model(points, normals, self.train_resolution) 248 | radiance_loss = self.irf_loss(res) 249 | 250 | # update irf 251 | self.irf_optimizer.zero_grad() 252 | radiance_loss.backward() 253 | self.irf_optimizer.step() 254 | 255 | 256 | if self.cur_iter % 50 == 0: 257 | print('{0} [{1}] ({2}/{3}): radiance_loss = {4}, batch cost time : {5:.4f}s' 258 | .format(self.expname, epoch, data_index, self.n_batches, 259 | radiance_loss.item(), time.time()-one_batch_start_time )) 260 | self.writer.add_scalar('radiance_loss', radiance_loss.item(), self.cur_iter) 261 | 262 | self.cur_iter += 1 263 | self.irf_scheduler.step() 264 | 265 | -------------------------------------------------------------------------------- /models/tracer_o3d_pil.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : tracer_o3d_pil.py 3 | @Time : 2023/02/27 12:10:31 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import open3d as o3d 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | 21 | import pyredner 22 | pyredner.set_print_timing(False) 23 | # Use GPU if available 24 | pyredner.set_use_gpu(torch.cuda.is_available()) 25 | 26 | from models.incidentNet import IRNetwork, MatNetwork, PILNetwork 27 | from utils.sample_util import * 28 | 29 | class TracerO3d(nn.Module): 30 | def __init__(self, conf, AABB=None, is_hdr_texture=False): 31 | super().__init__() 32 | self.ir_radiance_network = PILNetwork(**conf.get_config('models.irrf_network'), AABB=AABB) 33 | 34 | # self.resolution = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 35 | # self.resolution[0] = int(self.resolution[0]*8) # like 8spp 36 | 37 | # self.num_sample_dir = int(self.resolution[0]*self.resolution[1]) #conf.get_int('train.num_sample_dir', default=128) 38 | 39 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 40 | self.path_lut = os.path.join(os.path.abspath(__file__), 'bsdf_256_256.bin') 41 | 42 | # init ray casting scene 43 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 44 | trianglemesh.compute_vertex_normals() 45 | # vertices = np.asarray(trianglemesh.vertices) 46 | # vertices = vertices * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 47 | # trianglemesh.vertices = o3d.utility.Vector3dVector(vertices) 48 | # normals = np.asarray(trianglemesh.vertex_normals) 49 | # normals = normals * np.expand_dims(np.array([-1., -1., 1.]), axis=0) 50 | # trianglemesh.vertex_normals = o3d.utility.Vector3dVector(normals) 51 | # read extra hdr texture because open3d cannot read .hdr/.exr 52 | if is_hdr_texture: 53 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","hdr_texture.hdr"), -1)[:,:,::-1] 54 | texture = cv2.flip(texture, 0) 55 | texture = np.asarray(texture, np.float32) 56 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, H, W, 3) 57 | self.texture = self.texture * (2**conf.get_float('train.hdr_exposure')) 58 | else: 59 | texture = np.asarray(trianglemesh.textures[1]) 60 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, H, W, 3) 61 | 62 | 63 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 64 | 65 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 66 | # Create a scene and add the triangle mesh. 67 | self.scene = o3d.t.geometry.RaycastingScene() 68 | self.scene.add_triangles(mesh) 69 | 70 | 71 | 72 | def forward(self, points, normals, resolution, isnot_first_val=False): 73 | """ 74 | 75 | Args: 76 | points (torch.float32): shape: [b, 3] 77 | normals (torch.float32, optional): shape: [b, 3] . Defaults to None. 78 | isnot_first_val (bool): if set true, do not trace gt ir and only predict ir. 79 | 80 | Returns: 81 | gt_ir (torch.float32): shape: [b, h*w, 3] 82 | predicted_ir (torch.float32): shape: [b, h*w, 3] 83 | """ 84 | b, c = points.shape 85 | 86 | n_r = 1 87 | roughness = torch.rand(b, n_r).reshape(-1, 1).cuda() # shape: [b, 1] 88 | # points = points.repeat(n_r, 1) # shape: [b*n_r, 3] 89 | # normals = normals.repeat(n_r, 1) # shape: [b*n_r, 3] 90 | h_samples = generate_dir(normals, resolution[0]*resolution[1], None, 'importance', roughness).reshape(b, resolution[0], resolution[1], 3) 91 | 92 | n_wo = 1 93 | wo = generate_dir(normals, n_wo, samples=None, mode='uniform', roughness=None, pre_mode='independent') # shape: [b, n_wo, 3] 94 | 95 | vdh = torch.clamp(torch.sum( wo.unsqueeze(1) * h_samples, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample[0], n_sample[1], 1] 96 | directions = 2 * vdh * h_samples - wo.unsqueeze(1) # shape: (b, n_sample[0], n_sample[1], 3) 97 | # directions = self.generate_dir(normals, resolution) # shape: (b, num_sample, 1, 3) 98 | rays = torch.cat([points.unsqueeze(1).unsqueeze(1).expand_as(directions), directions], dim=-1) 99 | if not isnot_first_val: 100 | 101 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 102 | 103 | intersections = self.scene.cast_rays(ray) 104 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 105 | 106 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 107 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 108 | 109 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 110 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 111 | prim_uvs = np.clip(prim_uvs, 0., 1.) 112 | 113 | prim_id[~mask] = 0 114 | 115 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 116 | tmp = tmp.reshape(-1) 117 | index = self.triangle_uvs[tmp] 118 | index = index.reshape(3, b, resolution[0],resolution[1], 2) # shape: (3, b, num_sample, 1, 2) 119 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 120 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 121 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 122 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 123 | 124 | gt_ir = F.grid_sample(self.texture.expand([b]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 125 | ner_mask = ~mask 126 | gt_ir[ner_mask,:] = 0 127 | # # consider decay 128 | # distances = np.repeat(hit[..., np.newaxis], 3, -1) 129 | # attenuation = 1.0 / (1. + 0.14 * distances + 0.07*distances*distances) 130 | # gt_ir[mask,:] = gt_ir[mask,:] * attenuation[mask,:] 131 | # gt_irr = torch.mean(gt_ir.reshape(b, -1, 3), dim=-2) 132 | ndl = torch.clamp(torch.sum( normals.unsqueeze(1) * directions.reshape(b, -1, 3), dim=-1, keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 133 | gt_irr = torch.sum(gt_ir.reshape(b, -1, 3).cuda() * ndl, dim=1) * 2 * np.pi / (resolution[0]*resolution[1]) 134 | 135 | 136 | predicted_mask_ir = self.ir_radiance_network(points, wo.squeeze(1), roughness) # shape: (b, 3) 137 | # predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 138 | # predicted_mask_ir[~mask,:] = 0 139 | 140 | return gt_irr, predicted_mask_ir 141 | else: 142 | predicted_mask_ir = self.ir_radiance_network(points, wo.squeeze(1), roughness) # shape: (b, 3) 143 | # predicted_mask_ir = predicted_mask_ir.reshape(b, resolution[0],resolution[1], 3) 144 | return predicted_mask_ir 145 | 146 | 147 | def RadicalInverse(self,bits): 148 | #reverse bit 149 | #高低16位换位置 150 | bits = (bits << 16) | (bits >> 16) 151 | #A是5的按位取反 152 | bits = ((bits & 0x55555555) << 1) | ((bits & 0xAAAAAAAA) >> 1) 153 | #C是3的按位取反 154 | bits = ((bits & 0x33333333) << 2) | ((bits & 0xCCCCCCCC) >> 2) 155 | bits = ((bits & 0x0F0F0F0F) << 4) | ((bits & 0xF0F0F0F0) >> 4) 156 | bits = ((bits & 0x00FF00FF) << 8) | ((bits & 0xFF00FF00) >> 8) 157 | return float(bits) * 2.3283064365386963e-10 158 | 159 | def Hammersley(self,i,N): 160 | return [float(i)/float(N),self.RadicalInverse(i)] 161 | 162 | def generate_dir(self, normals, resolution): 163 | b, c = normals.shape 164 | normals = normals.unsqueeze(1).unsqueeze(1).expand(b, resolution[0],resolution[1], 3) 165 | # compute projection axis 166 | x_axis = torch.zeros_like(normals).cuda() #size:(batch_size, samples, 1, 3) 167 | mask = torch.abs(normals[:,:,:,0]) > 0.99 168 | x_axis[mask, :] = torch.tensor([0., 1., 0.],dtype=torch.float32, device=normals.get_device()) 169 | x_axis[~mask, :] = torch.tensor([1., 0., 0.],dtype=torch.float32, device=normals.get_device()) 170 | 171 | def norm_axis(x): 172 | return x / (torch.norm(x, dim=-1, keepdim=True) + TINY_NUMBER) 173 | 174 | normals = norm_axis(normals) 175 | U = norm_axis(torch.cross(x_axis, normals)) 176 | V = norm_axis(torch.cross( normals, U)) 177 | 178 | num_sample_dir = resolution[0]*resolution[1] 179 | samples=np.zeros((num_sample_dir,2),dtype=np.float32) 180 | for i in range(0,num_sample_dir): 181 | s = Hammersley(i,num_sample_dir) 182 | samples[i][0] = s[0] 183 | samples[i][1] = s[1] 184 | samples = torch.from_numpy(samples).unsqueeze(0).unsqueeze(-2).cuda() #size:(batch_size, samples, 1, 2) 185 | samples = samples.repeat(b, 1, 1, 1).detach() 186 | # samples[:,:, 0:1] = torch.clamp(samples[:,:,0:1] + torch.rand_like(samples[:,:,0:1])*0.09, 0., 1.) 187 | shift = torch.rand(b, 1, 1, 2).cuda() 188 | samples = samples + shift 189 | index1 = samples > 1. 190 | samples[index1] = samples[index1]-1. 191 | index2 = samples < 0. 192 | samples[index2] = samples[index2] + 1 193 | samples = torch.clamp(samples, 0+TINY_NUMBER, 1-TINY_NUMBER) # avoid NAN in roughness backward. 194 | samples = samples.expand(b, num_sample_dir, 1, 2).reshape(b, resolution[0], resolution[1], 2) 195 | 196 | # ############ test sample for ordered variable, attention: the cosTheta is uniformly generated instead elevation. so the res pano will have distortion in elevation. 197 | # azimuth = torch.linspace(0.,1.,256).cuda() 198 | # elevation = torch.linspace(0.,1.,128).cuda() 199 | # elevation,azimuth = torch.meshgrid([elevation,azimuth]) 200 | # samples = torch.stack([elevation,azimuth], dim=-1).unsqueeze(0) #size:(batch_size, h, w, 2) 201 | 202 | # uniform sample, attention: we generate sampled dir via y as up axis. translate to our coor: 203 | # phi - np.pi; y = sin((np.pi/2-theta)) = costheta; y_projected = cos((np.pi/2-theta)) = sintheta 204 | phi = 2 * np.pi * samples[:,:,:,1:2] - np.pi 205 | cosTheta = (1.0 - samples[:,:,:,0:1]) 206 | sinTheta = torch.sqrt(1.0 - cosTheta * cosTheta) 207 | L = V * (torch.sin(phi) * sinTheta) \ 208 | + normals * cosTheta \ 209 | + U * -(torch.cos(phi) * sinTheta) # [batch, num_samples, 1, 3] 210 | 211 | 212 | # ######### test sample directions 1, via z as up direction 213 | # theta = torch.linspace(-np.pi,np.pi,256).cuda() 214 | # phi = -torch.linspace(-np.pi/2.0,np.pi/2.0,128).cuda() 215 | # phi,theta = torch.meshgrid([phi,theta]) 216 | 217 | # v_x = torch.cos(phi) * torch.sin(theta) 218 | # v_y = torch.sin(phi) 219 | # v_z = -torch.cos(phi) * torch.cos(theta) 220 | # samples = nn.Parameter(torch.stack([v_x,v_y,v_z],dim=-1).unsqueeze(0), requires_grad=False) #size: (b, h, w ,3) 221 | 222 | # L = V * (samples[:,:,:,0:1]) \ 223 | # + normals * (samples[:,:,:,1:2]) \ 224 | # + U* samples[:,:,:,2:3] # [batch, num_samples, 1, 3] 225 | 226 | # ######### test sample direction 2 227 | # theta = torch.linspace(-np.pi,np.pi,256).cuda() 228 | # phi = -torch.linspace(-np.pi/2.0,np.pi/2.0,128).cuda() 229 | # phi,theta = torch.meshgrid([phi,theta]) 230 | 231 | # v_x = torch.cos(phi) * torch.sin(theta) 232 | # v_y = torch.sin(phi) 233 | # v_z = -torch.cos(phi) * torch.cos(theta) 234 | 235 | # L = nn.Parameter(torch.stack([v_x,v_y,v_z],dim=-1).unsqueeze(0), requires_grad=False) #size: (b, h, w ,3) 236 | 237 | return L 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | if __name__=="__main__": 246 | conf = ConfigFactory.parse_file('./configs/default.conf') 247 | tracer = TracerO3d(conf) 248 | test_points = torch.tensor([-0.295, 0.104, -1.523]).unsqueeze(0) 249 | test_points = (test_points + 0.1 * torch.tensor([-1., 0., 0.]).unsqueeze(0)).cuda() 250 | radiance = tracer(test_points, torch.tensor([-1., 0., 0.]).unsqueeze(0).cuda()) 251 | env_res = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 252 | radiance = radiance[0].reshape(env_res[0], env_res[1], 3) 253 | print(radiance.shape) 254 | cv2.imwrite("../results/test_house/env_hemi_open3d.jpg", radiance.cpu().numpy()[:,:,::-1]*255.0) 255 | 256 | -------------------------------------------------------------------------------- /models/mat_redner.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : mat_redner.py 3 | @Time : 2023/02/27 12:10:00 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | 15 | import torch 16 | import torch.nn as nn 17 | import torch.nn.functional as F 18 | from pyhocon import ConfigFactory 19 | import open3d as o3d 20 | 21 | import pyredner 22 | pyredner.set_print_timing(False) 23 | # Use GPU if available 24 | pyredner.set_use_gpu(torch.cuda.is_available()) 25 | # pyredner.set_use_gpu(False) 26 | 27 | from models.incidentNet import IRNetwork 28 | from utils.sample_util import TINY_NUMBER, generate_dir 29 | from utils.general import hdr_recover 30 | 31 | 32 | class MaterialModel(nn.Module): 33 | def __init__(self, conf, ids, extrinsics, optim_cam=False, gt_irf=True): 34 | super().__init__() 35 | self.incident_radiance_network = IRNetwork(**conf.get_config('models.incident_radiance_network')) 36 | 37 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 38 | self.pano_res = conf.get_list('train.pano_img_res', default=[256,512]) # shape : (height, width) 39 | self.sample_l = conf.get_list('train.sample_light', default=[64,64]) # number of samples : (diffuse, specular) 40 | self.optim_cam = optim_cam 41 | self.ids = ids 42 | self.extrinsics = extrinsics 43 | self.sample_type = conf.get_list('models.render.sample_type', default=['uniform','importance']) 44 | 45 | self.object_list = self.generate_obj(self.path_traced_mesh) 46 | # assume that there only exists one mesh and one texture 47 | # self.materials = nn.Parameter(torch.ones((3600, 3600, 3))*0.5, requires_grad=True) 48 | self.materials_albedo = [] 49 | for object in self.object_list: 50 | # self.materials_albedo.append(nn.Parameter( torch.ones_like(object.material.diffuse_reflectance.texels)*0.5, requires_grad=True)) 51 | self.materials_albedo.append(nn.Parameter( torch.ones((2048, 2048, 3))*0.5, requires_grad=True)) 52 | self.materials_albedo = nn.ParameterList(self.materials_albedo) 53 | 54 | self.materials_roughness = [] 55 | for object in self.object_list: 56 | # self.materials_roughness.append(nn.Parameter( torch.ones(list(object.material.diffuse_reflectance.texels.shape[:-1]) + [1])*0.5, requires_grad=True)) 57 | self.materials_roughness.append(nn.Parameter( torch.ones((2048, 2048, 1))*0.4, requires_grad=True)) 58 | self.materials_roughness = nn.ParameterList(self.materials_roughness) 59 | 60 | 61 | if optim_cam: 62 | self.param_extrinsics = {} 63 | for i in range(len(ids)): 64 | self.param_extrinsics.update({ 65 | self.ids[i]: nn.Parameter(self.extrinsics[i], requires_grad=True) 66 | }) 67 | self.param_extrinsics = nn.ParameterDict(self.param_extrinsics) 68 | 69 | if gt_irf: 70 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 71 | # init ray casting scene 72 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 73 | # trianglemesh.compute_vertex_normals() 74 | texture = cv2.imread(self.path_traced_mesh.replace("_smooth.obj","_hdr_ccm.hdr"), -1)[:,:,::-1] 75 | texture = cv2.flip(texture, 0) 76 | texture = np.asarray(texture, np.float32) 77 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, H, W, 3) 78 | self.texture = self.texture * (2**7) 79 | # texture = np.asarray(trianglemesh.textures[1]) 80 | # self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, H, W, 3) 81 | 82 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 83 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 84 | # Create a scene and add the triangle mesh. 85 | self.scene = o3d.t.geometry.RaycastingScene() 86 | self.scene.add_triangles(mesh) 87 | 88 | 89 | 90 | 91 | def forward(self, cam_to_world, id, cam_position): 92 | """ assume that the batch_size is 1. 93 | 94 | Args: 95 | cam_to_world (torch.float32): shape: [4, 4]. 96 | id (key):. 97 | cam_position (torch.float32): shape: [3]. 98 | 99 | Returns: 100 | predicted_ir (torch.float32): shape: [1, h, w, 3] 101 | """ 102 | 103 | for index in range(len(self.object_list)): 104 | self.object_list[index].material.diffuse_reflectance = pyredner.Texture(self.materials_albedo[index]) 105 | self.object_list[index].material.generic_texture = pyredner.Texture(self.materials_roughness[index]) 106 | if self.optim_cam: 107 | camera = pyredner.Camera(cam_to_world=self.param_extrinsics[id], 108 | camera_type=pyredner.camera_type.panorama, 109 | resolution=(self.pano_res[0],self.pano_res[1]), 110 | clip_near = 1e-2, # needs to > 0 111 | fisheye = False 112 | ) 113 | else: 114 | camera = pyredner.Camera(cam_to_world=cam_to_world, 115 | camera_type=pyredner.camera_type.panorama, 116 | resolution=(self.pano_res[0],self.pano_res[1]), 117 | clip_near = 1e-2, # needs to > 0 118 | fisheye = False 119 | ) 120 | 121 | scene = pyredner.Scene(camera=camera, objects=self.object_list) 122 | # imgs = pyredner.render_g_buffer(scene=scene, channels=[pyredner.channels.diffuse_reflectance], num_samples=[1,1]) # shape: (env_h, env_w, 3) 123 | # albedo: [0:3], roughness: [3:4], position: [4:7], normal: [7:10] 124 | g_buffers = pyredner.render_g_buffer(scene=scene, \ 125 | channels=[ 126 | pyredner.channels.diffuse_reflectance, \ 127 | pyredner.channels.generic_texture, \ 128 | pyredner.channels.position, 129 | pyredner.channels.geometry_normal], num_samples=[4,2]) 130 | 131 | img = self.render(g_buffers[:,:,7:10].detach(), g_buffers[:,:,0:3], g_buffers[:,:,3:4], g_buffers[:,:,4:7].detach()+1e-2*g_buffers[:,:,7:10].detach(), cam_position) 132 | return img.unsqueeze(0) 133 | # return g_buffers[:,:,7:10].detach().unsqueeze(0) 134 | 135 | 136 | def generate_obj(self, path_mesh): 137 | object_list = pyredner.load_obj(path_mesh, obj_group=True, return_objects=True) 138 | 139 | # for object in object_list: 140 | # object.vertices = object.vertices * torch.tensor([-1.0,-1.0,1.0]).unsqueeze(0).cuda() 141 | # object.normals = object.normals * torch.tensor([-1.0,-1.0,1.0]).unsqueeze(0).cuda() 142 | 143 | return object_list 144 | 145 | def render(self, normal: torch.Tensor, albedo: torch.Tensor, roughness: torch.Tensor, points: torch.Tensor, cam_position: torch.Tensor): 146 | """render final color according to g buffers and IRF. 147 | 148 | Args: 149 | normal (torch.float32): [env_h, env_w, 3] 150 | albedo (torch.float32): [env_h, env_w, 3] 151 | roughness (torch.float32): [env_h, env_w, 1] 152 | points (torch.float32): [env_h, env_w, 3] 153 | cam_position (torch.float32): [3] 154 | """ 155 | 156 | env_h, env_w, c = normal.shape 157 | normal = normal.reshape(-1, 3) 158 | albedo = albedo.reshape(-1, 3) 159 | roughness = roughness.reshape(-1, 1) 160 | points = points.reshape(-1, 3) 161 | view = F.normalize(cam_position.unsqueeze(0) - points, eps=1e-4) 162 | 163 | light_dir_diff = generate_dir(normal, self.sample_l[0], mode=self.sample_type[0]) # shape: [env_h*env_w, n_sample, 3] 164 | with torch.no_grad(): 165 | diffuse_lighting = hdr_recover(self.incident_radiance_network(points.unsqueeze(1).expand_as(light_dir_diff), light_dir_diff))* (2**7) 166 | # diffuse_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_diff), light_dir_diff.unsqueeze(-2), self.sample_l[0]) 167 | 168 | diffuse = self.diffuse_reflectance(diffuse_lighting, light_dir_diff, normal, albedo, self.sample_type[0]) / self.sample_l[0] 169 | # diffuse = torch.sum(diffuse_lighting, dim=1) / self.sample_l[0] 170 | 171 | h_dir_specular = generate_dir(normal, self.sample_l[1], self.sample_type[1], roughness) 172 | vdh = torch.clamp(torch.sum( h_dir_specular * view.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 173 | light_dir_spec = 2 * vdh * h_dir_specular - view.unsqueeze(1) 174 | with torch.no_grad(): 175 | specular_lighting = hdr_recover(self.incident_radiance_network(points.unsqueeze(1).expand_as(h_dir_specular), light_dir_spec))* (2**7) 176 | # specular_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_spec), light_dir_spec.unsqueeze(-2).detach(), self.sample_l[1]) 177 | 178 | specular = self.specular_reflectance(specular_lighting, h_dir_specular, normal, view, light_dir_spec, roughness) / self.sample_l[1] 179 | 180 | return (diffuse + specular).reshape(env_h, env_w, 3) 181 | 182 | 183 | def diffuse_reflectance(self, lighting, l, n, albedo, sample_type='uniform'): 184 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 185 | brdf = albedo.unsqueeze(1) / np.pi 186 | 187 | if sample_type=='cosine': 188 | return torch.sum( lighting * brdf * np.pi , dim=1) 189 | return torch.sum( lighting * brdf * ndl * 2*np.pi , dim=1) 190 | 191 | def specular_reflectance(self, lighting, h, n, v, l, roughness, albedo=None): 192 | 193 | vdh = torch.clamp(torch.sum( h * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 194 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 195 | ndh = torch.clamp(torch.sum( n.unsqueeze(1) * h, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 196 | ndv = torch.clamp(torch.sum( n.unsqueeze(1) * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 197 | vdl = torch.clamp(torch.sum( v.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 198 | 199 | f = 0.04 + 0.96 * torch.pow(2.0,((-5.55472*vdh-6.98316)*vdh)) 200 | 201 | k = (roughness.unsqueeze(1) + 1) * (roughness.unsqueeze(1) + 1) / 8 202 | g1_ndv = ndv / torch.clamp( ndv *(1-k) + k , min=TINY_NUMBER) 203 | g1_ndl = ndl / torch.clamp( ndl *(1-k) + k , min=TINY_NUMBER) 204 | g = g1_ndl * g1_ndv 205 | 206 | # brdf: f * d * g / (4*ndl*ndv) 207 | brdf = f * g / torch.clamp(4 * ndl * ndv, min=TINY_NUMBER) 208 | 209 | # pdf : D*ndh / (4*vdh) 210 | # equation: L = lighing * brdf * ndl 211 | return torch.sum( lighting * brdf * ndl * 4 * vdh / torch.clamp(ndh, TINY_NUMBER), dim=1) 212 | 213 | 214 | def query_irf(self, points, directions, num_sample): 215 | rays = torch.cat([points.unsqueeze(-2).expand_as(directions), directions], dim=-1) 216 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 217 | intersections = self.scene.cast_rays(ray) 218 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 219 | 220 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 221 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 222 | 223 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 224 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 225 | prim_uvs = np.clip(prim_uvs, 0., 1.) 226 | 227 | prim_id[~mask] = 0 228 | 229 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 230 | tmp = tmp.reshape(-1) 231 | index = self.triangle_uvs[tmp] 232 | index = index.reshape(3, self.pano_res[0]*self.pano_res[1], num_sample,1, 2) # shape: (3, b, num_sample, 1, 2) 233 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 234 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 235 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 236 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 237 | 238 | gt_ir = F.grid_sample(self.texture.expand([self.pano_res[0]*self.pano_res[1]]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 239 | ner_mask = ~mask 240 | gt_ir[ner_mask,:] = 0 241 | gt_ir = gt_ir.reshape(self.pano_res[0]*self.pano_res[1], num_sample, 3) 242 | return gt_ir.cuda() 243 | 244 | 245 | 246 | 247 | if __name__=="__main__": 248 | conf = ConfigFactory.parse_file('./configs/mat.conf') 249 | mm = MaterialModel(conf).cuda() 250 | radiance = mm().cpu().detach() 251 | print(radiance.shape) 252 | cv2.imwrite("../results/test_house/pano.jpg", radiance[0].numpy()[:,:,::-1]*255.0) 253 | 254 | -------------------------------------------------------------------------------- /models/mat_nvdiffrast_rec.py: -------------------------------------------------------------------------------- 1 | ''' 2 | @File : mat_nvdiffrast_rec.py 3 | @Time : 2023/02/27 12:09:42 4 | @Author : Zhen Li 5 | @Contact : yodlee@mail.nwpu.edu.cn 6 | @Institution: Realsee 7 | @License : GNU General Public License v2.0 8 | @Desc : None 9 | ''' 10 | 11 | 12 | import cv2 13 | import numpy as np 14 | import math 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from pyhocon import ConfigFactory 20 | import open3d as o3d 21 | 22 | import nvdiffrast.torch as dr 23 | 24 | import pyredner 25 | pyredner.set_use_gpu(False) 26 | 27 | from utils.general import get_mip_level, rgb_to_intensity 28 | 29 | 30 | from models.incidentNet import MLPTexture3D 31 | from utils.sample_util import TINY_NUMBER, generate_dir, generate_fixed_samples 32 | from utils.general import hdr_recover 33 | 34 | 35 | class MaterialModel(nn.Module): 36 | def __init__(self, conf, ids, extrinsics, optim_cam=False, gt_irf=True, gt_irrt=True): 37 | super().__init__() 38 | # self.incident_radiance_network = IRNetwork(**conf.get_config('models.incident_radiance_network')) 39 | # self.ir_radiance_network = MatNetwork(**conf.get_config('models.irrf_network')) 40 | 41 | 42 | self.resolution = conf.get_list('train.env_res', default=[8,16]) # shape : (height, width) 43 | 44 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 45 | self.pano_res = conf.get_list('train.pano_img_res', default=[1000,2000]) # shape : (height, width) 46 | self.cube_res = int(self.pano_res[1]/4) 47 | self.sample_l = conf.get_list('train.sample_light', default=[64,64]) # number of samples : (diffuse, specular) 48 | self.sample_type = conf.get_list('models.render.sample_type', default=['uniform','importance']) 49 | self.optim_cam = optim_cam 50 | self.ids = ids 51 | self.extrinsics = extrinsics 52 | self.conf = conf 53 | # assume that there only exists one mesh and one texture 54 | self.object_list = self.generate_obj(self.path_traced_mesh)[0] 55 | self.AABB = torch.stack([ torch.min(self.object_list.vertices, dim=0)[0], torch.max(self.object_list.vertices, dim=0)[0] ], dim=0) # shape: (2, 3) 56 | # self.AABB = self.AABB.unsqueeze(1).unsqueeze(1).expand(-1, self.cube_res, self.cube_res, 3) 57 | # self.material_network = MLPTexture3D(self.AABB) 58 | 59 | 60 | self.triangles = (nn.Parameter(self.object_list.indices, requires_grad=False)) 61 | self.uvs = (nn.Parameter(self.object_list.uvs, requires_grad=False)) 62 | self.uv_indices = (nn.Parameter(self.object_list.uv_indices, requires_grad=False)) 63 | self.w = torch.ones([self.object_list.vertices.shape[0], 1]) 64 | self.vertices = (nn.Parameter(torch.cat([self.object_list.vertices, self.w], dim=-1).repeat(6, 1, 1), requires_grad=False)) # shape: [5, n, 4] 65 | self.normals = (nn.Parameter(self.object_list.normals, requires_grad=False)) # shape: [n, 3] 66 | 67 | texture = self.object_list.material.diffuse_reflectance.texels 68 | 69 | # self.max_mip_level = int(np.log2(texture.shape[0])) 70 | self.max_mip_level = (get_mip_level(texture.shape[0])) 71 | 72 | self.gt_irrt = gt_irrt 73 | if gt_irrt: 74 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","irt.hdr"), -1)[:,:,::-1] 75 | 76 | texture = np.asarray(texture, np.float32).copy() 77 | self.irrt = nn.Parameter(torch.from_numpy(texture), requires_grad=False) 78 | 79 | # self.samples_diff = nn.Parameter(generate_fixed_samples(self.cube_res*self.cube_res*6, self.sample_l[0]), requires_grad=False) 80 | # self.samples_spec = nn.Parameter(generate_fixed_samples(self.cube_res*self.cube_res*6, self.sample_l[1]), requires_grad=False) 81 | 82 | self.glctx = dr.RasterizeGLContext() 83 | 84 | if gt_irf: 85 | self.path_traced_mesh = conf.get_string('train.path_mesh_open3d') 86 | # init ray casting scene 87 | trianglemesh = o3d.io.read_triangle_mesh(self.path_traced_mesh) # o3d tracer must use the mesh with one texture map. 88 | # trianglemesh.compute_vertex_normals() 89 | texture = cv2.imread(self.path_traced_mesh.replace("out1.obj","hdr_texture.hdr"), -1)[:,:,::-1] 90 | texture = cv2.flip(texture, 0) 91 | texture = np.asarray(texture, np.float32) 92 | self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()) # shape: (1, 3, H, W) 93 | self.texture = self.texture * (2**conf.get_float('train.hdr_exposure')) 94 | # texture = np.asarray(trianglemesh.textures[1]) 95 | # self.texture = (torch.from_numpy(texture).permute(2, 0, 1).unsqueeze(0).float()/255.)**(2.2) # shape: (1, 3, H, W) 96 | 97 | self.triangle_uvs = np.asarray(trianglemesh.triangle_uvs) 98 | mesh = o3d.t.geometry.TriangleMesh.from_legacy(trianglemesh) 99 | # Create a scene and add the triangle mesh. 100 | self.scene = o3d.t.geometry.RaycastingScene() 101 | self.scene.add_triangles(mesh) 102 | 103 | 104 | 105 | 106 | 107 | def forward(self, mvp, id, cam_position, stage=1): 108 | """ assume that the batch_size is 1. 109 | 110 | Args: 111 | mvp (torch.float32): shape: [6, 4, 4]. 112 | cam_position (torch.float32): shape: [3]. 113 | 114 | Returns: 115 | color (torch.float32): shape: [6, cube_res, cube_res, 3] 116 | """ 117 | 118 | 119 | clip_vertexes = torch.einsum('ijk,ikl->ijl', self.vertices, mvp) 120 | 121 | rast_out, rast_out_db = dr.rasterize(self.glctx, clip_vertexes, self.triangles, resolution=[self.cube_res, self.cube_res]) 122 | features = torch.cat([self.vertices[:,:,0:3], self.normals.unsqueeze(0).expand(6, -1, -1)], dim=-1) 123 | g_buffers, _ = dr.interpolate(features.contiguous(), rast_out, self.triangles) # [6, h, w, 6] 124 | # disable antialias, because the artifacts of edges will appear between different objects. 125 | g_buffers = torch.where(rast_out[..., 3:] > 0, g_buffers, torch.tensor([1.,0.,0., 1.,0.,0.]).cuda()) # give a fix position and normal. 126 | mask = (rast_out[..., 3:] > 0).float() 127 | 128 | # get irr from irrt 129 | texc, texd = dr.interpolate(self.uvs[None, ...], rast_out, self.uv_indices, rast_db=rast_out_db, diff_attrs='all') 130 | irr = dr.texture(self.irrt[None, ...], texc, texd, filter_mode='linear-mipmap-linear', max_mip_level=self.max_mip_level) 131 | 132 | materials = self.material_network.sample(g_buffers[:,:,:,0:3]) 133 | albedo = materials[...,0:3] 134 | roughness = materials[...,3:4] 135 | 136 | materials_jit = self.material_network.sample(g_buffers[:,:,:,0:3] + torch.normal(mean=0, std=0.01, size=g_buffers[:,:,:,0:3].shape, device="cuda")) 137 | 138 | kd_grad = torch.sum(torch.abs(materials_jit[...,0:3] - albedo), dim=-1, keepdim=True) / 3 #shape:[b, h, w, 1] 139 | 140 | res = self.render(g_buffers[:,:,:,3:6].detach(), albedo, roughness, g_buffers[:,:,:,0:3].detach()+1e-2*g_buffers[:,:,:,3:6].detach(), cam_position, irr) 141 | 142 | res.update({ 143 | 'roughness': roughness, 144 | 'kd_grad': kd_grad 145 | }) 146 | 147 | return res 148 | 149 | 150 | def generate_obj(self, path_mesh): 151 | object_list = pyredner.load_obj(path_mesh, obj_group=True, return_objects=True) 152 | 153 | # for object in object_list: 154 | # object.normals = pyredner.compute_vertex_normal(object.vertices, object.indices) 155 | 156 | return object_list 157 | 158 | def render(self, normal: torch.Tensor, albedo: torch.Tensor, roughness: torch.Tensor, points: torch.Tensor, cam_position: torch.Tensor, irr): 159 | """render final color according to g buffers and IRF. 160 | 161 | Args: 162 | normal (torch.float32): [6, cube_len, cube_len, 3] 163 | albedo (torch.float32): [6, cube_len, cube_len, 3] 164 | roughness (torch.float32): [6, cube_len, cube_len, 1] 165 | points (torch.float32): [6, cube_len, cube_len, 3] 166 | cam_position (torch.float32): [3] 167 | """ 168 | 169 | face, h, w, c = normal.shape 170 | normal = normal.reshape(-1, 3) 171 | albedo = albedo.reshape(-1, 3) 172 | roughness = roughness.reshape(-1, 1) 173 | points = points.reshape(-1, 3) 174 | irr = irr.reshape(-1, 3) 175 | view = F.normalize(cam_position.unsqueeze(0) - points, eps=1e-4) 176 | 177 | # with torch.no_grad(): 178 | # irr = hdr_recover(self.ir_radiance_network(points)) # shape : [b, 3] 179 | diffuse = irr * albedo / np.pi 180 | 181 | h_dir_specular = generate_dir(normal, self.sample_l[1], None, self.sample_type[1], roughness) 182 | vdh = torch.clamp(torch.sum( h_dir_specular * view.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 183 | light_dir_spec = 2 * vdh * h_dir_specular - view.unsqueeze(1) 184 | specular_lighting = self.query_irf(points.unsqueeze(1).expand_as(light_dir_spec), light_dir_spec.unsqueeze(-2).detach(), self.sample_l[1]) 185 | 186 | specular = self.specular_reflectance(specular_lighting, h_dir_specular, normal, view, light_dir_spec, roughness) / self.sample_l[1] 187 | 188 | res ={ 189 | 'rgb': ( diffuse +specular ).reshape(face, h, w, 3), 190 | 'albedo': albedo.reshape(face, h, w, 3), 191 | 'normal': normal.reshape(face, h, w, 3).detach(), 192 | 'position': (points).reshape(face, h, w, 3).detach() 193 | } 194 | return res 195 | 196 | 197 | def diffuse_reflectance(self, lighting, l, n, albedo, sample_type='uniform'): 198 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 199 | brdf = albedo.unsqueeze(1) / np.pi 200 | 201 | if sample_type=='cosine': 202 | return torch.sum( lighting * brdf * np.pi , dim=1) 203 | return torch.sum( lighting * brdf * ndl * 2*np.pi , dim=1) 204 | 205 | def specular_reflectance(self, lighting, h, n, v, l, roughness, albedo=None): 206 | 207 | vdh = torch.clamp(torch.sum( h * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 208 | ndl = torch.clamp(torch.sum( n.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 209 | ndh = torch.clamp(torch.sum( n.unsqueeze(1) * h, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 210 | ndv = torch.clamp(torch.sum( n.unsqueeze(1) * v.unsqueeze(1), dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 211 | vdl = torch.clamp(torch.sum( v.unsqueeze(1) * l, dim=-1,keepdim=True), 0.0, 1.0) # shape: [b, n_sample, 1] 212 | 213 | f = 0.04 + 0.96 * torch.pow(2.0,((-5.55472*vdh-6.98316)*vdh)) 214 | 215 | k = (roughness.unsqueeze(1) + 1) * (roughness.unsqueeze(1) + 1) / 8 216 | g1_ndv = ndv / torch.clamp( ndv *(1-k) + k , min=TINY_NUMBER) 217 | g1_ndl = ndl / torch.clamp( ndl *(1-k) + k , min=TINY_NUMBER) 218 | g = g1_ndl * g1_ndv 219 | 220 | # brdf: f * d * g / (4*ndl*ndv) 221 | brdf = f * g / torch.clamp(4 * ndl * ndv, min=TINY_NUMBER) 222 | 223 | # pdf : D*ndh / (4*vdh) 224 | # equation: L = lighing * brdf * ndl 225 | return torch.sum( lighting * brdf * ndl * 4 * vdh / torch.clamp(ndh, TINY_NUMBER), dim=1) 226 | 227 | 228 | def query_irf(self, points, directions, num_sample): 229 | rays = torch.cat([points.unsqueeze(-2).expand_as(directions), directions], dim=-1) 230 | ray = o3d.core.Tensor(rays.cpu().numpy(), dtype=o3d.core.Dtype.Float32) # attention: RaycastingScene only support cpu 231 | intersections = self.scene.cast_rays(ray) 232 | hit = intersections['t_hit'].numpy() # shape: (b, num_sample, 1) 233 | 234 | # mask = np.isfinite(hit) # shape: (b, num_sample, 1) 235 | mask = np.logical_and(hit > 1e-4, np.isfinite(hit)) 236 | 237 | prim_id = intersections['primitive_ids'].numpy() # shape: (b, num_sample, 1) 238 | prim_uvs = intersections['primitive_uvs'].numpy() # shape: (b, num_sample, 1, 2) 239 | prim_uvs = np.clip(prim_uvs, 0., 1.) 240 | 241 | prim_id[~mask] = 0 242 | 243 | tmp = np.stack([prim_id*3+0, prim_id*3+1, prim_id*3+2], axis=0) # shape: (3, b, num_sample, 1) 244 | tmp = tmp.reshape(-1) 245 | index = self.triangle_uvs[tmp] 246 | index = index.reshape(3, self.cube_res*self.cube_res*6, num_sample,1, 2) # shape: (3, b, num_sample, 1, 2) 247 | grid = index[0,:,:,:,:] * (1-prim_uvs[:,:,:,0:1]-prim_uvs[:,:,:,1:2]) + index[1,:,:,:,:] * prim_uvs[:,:,:,0:1] + index[2,:,:,:,:] * prim_uvs[:,:,:,1:2] # shape: (b, num_sample, 1, 2) 248 | grid = torch.from_numpy(grid).float() # shape: (b, num_sample, 1, 2) 249 | grid[:,:,:,0:1] = grid[:,:,:,0:1]*2-1 250 | grid[:,:,:,1:2] = -(1-grid[:,:,:,1:2]*2) 251 | 252 | gt_ir = F.grid_sample(self.texture.expand([self.cube_res*self.cube_res*6]+list(self.texture.shape[1:])), grid, mode='bilinear', padding_mode="border",align_corners=False).permute(0,2,3,1) # shape: (b, num_sample, 1, 3) 253 | ner_mask = ~mask 254 | gt_ir[ner_mask,:] = 0 255 | gt_ir = gt_ir.reshape(self.cube_res*self.cube_res*6, num_sample, 3) 256 | return gt_ir.cuda() --------------------------------------------------------------------------------