├── .idea
└── workspace.xml
├── LICENSE
├── README.md
├── assets
└── teaser.png
├── camera_utils.py
├── data_preprocess
├── Deep3DFaceRecon_pytorch
│ ├── LICENSE
│ ├── README.md
│ ├── data
│ │ ├── __init__.py
│ │ ├── base_dataset.py
│ │ ├── flist_dataset.py
│ │ ├── image_folder.py
│ │ └── template_dataset.py
│ ├── data_preparation.py
│ ├── datasets
│ │ └── examples
│ │ │ ├── 000002.jpg
│ │ │ ├── 000006.jpg
│ │ │ ├── 000007.jpg
│ │ │ ├── 000031.jpg
│ │ │ ├── 000033.jpg
│ │ │ ├── 000037.jpg
│ │ │ ├── 000050.jpg
│ │ │ ├── 000055.jpg
│ │ │ ├── 000114.jpg
│ │ │ ├── 000125.jpg
│ │ │ ├── 000126.jpg
│ │ │ ├── 015259.jpg
│ │ │ ├── 015270.jpg
│ │ │ ├── 015309.jpg
│ │ │ ├── 015310.jpg
│ │ │ ├── 015316.jpg
│ │ │ ├── 015384.jpg
│ │ │ ├── detections
│ │ │ ├── 000002.txt
│ │ │ ├── 000006.txt
│ │ │ ├── 000007.txt
│ │ │ ├── 000031.txt
│ │ │ ├── 000033.txt
│ │ │ ├── 000037.txt
│ │ │ ├── 000050.txt
│ │ │ ├── 000055.txt
│ │ │ ├── 000114.txt
│ │ │ ├── 000125.txt
│ │ │ ├── 000126.txt
│ │ │ ├── 015259.txt
│ │ │ ├── 015270.txt
│ │ │ ├── 015309.txt
│ │ │ ├── 015310.txt
│ │ │ ├── 015316.txt
│ │ │ ├── 015384.txt
│ │ │ ├── vd006.txt
│ │ │ ├── vd025.txt
│ │ │ ├── vd026.txt
│ │ │ ├── vd034.txt
│ │ │ ├── vd051.txt
│ │ │ ├── vd070.txt
│ │ │ ├── vd092.txt
│ │ │ └── vd102.txt
│ │ │ ├── vd006.png
│ │ │ ├── vd025.png
│ │ │ ├── vd026.png
│ │ │ ├── vd034.png
│ │ │ ├── vd051.png
│ │ │ ├── vd070.png
│ │ │ ├── vd092.png
│ │ │ └── vd102.png
│ ├── environment.yml
│ ├── models
│ │ ├── __init__.py
│ │ ├── arcface_torch
│ │ │ ├── README.md
│ │ │ ├── backbones
│ │ │ │ ├── __init__.py
│ │ │ │ ├── iresnet.py
│ │ │ │ ├── iresnet2060.py
│ │ │ │ ├── mobilefacenet.py
│ │ │ │ └── vit.py
│ │ │ ├── configs
│ │ │ │ ├── 3millions.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base.py
│ │ │ │ ├── glint360k_mbf.py
│ │ │ │ ├── glint360k_r100.py
│ │ │ │ ├── glint360k_r50.py
│ │ │ │ ├── ms1mv2_mbf.py
│ │ │ │ ├── ms1mv2_r100.py
│ │ │ │ ├── ms1mv2_r50.py
│ │ │ │ ├── ms1mv3_mbf.py
│ │ │ │ ├── ms1mv3_r100.py
│ │ │ │ ├── ms1mv3_r50.py
│ │ │ │ ├── wf12m_conflict_r50.py
│ │ │ │ ├── wf12m_conflict_r50_pfc03_filter04.py
│ │ │ │ ├── wf12m_flip_pfc01_filter04_r50.py
│ │ │ │ ├── wf12m_flip_r50.py
│ │ │ │ ├── wf12m_mbf.py
│ │ │ │ ├── wf12m_pfc02_r100.py
│ │ │ │ ├── wf12m_r100.py
│ │ │ │ ├── wf12m_r50.py
│ │ │ │ ├── wf42m_pfc0008_32gpu_r100.py
│ │ │ │ ├── wf42m_pfc02_16gpus_mbf_bs8k.py
│ │ │ │ ├── wf42m_pfc02_16gpus_r100.py
│ │ │ │ ├── wf42m_pfc02_16gpus_r50_bs8k.py
│ │ │ │ ├── wf42m_pfc02_32gpus_r50_bs4k.py
│ │ │ │ ├── wf42m_pfc02_8gpus_r50_bs4k.py
│ │ │ │ ├── wf42m_pfc02_r100.py
│ │ │ │ ├── wf42m_pfc02_r100_16gpus.py
│ │ │ │ ├── wf42m_pfc02_r100_32gpus.py
│ │ │ │ ├── wf42m_pfc03_32gpu_r100.py
│ │ │ │ ├── wf42m_pfc03_32gpu_r18.py
│ │ │ │ ├── wf42m_pfc03_32gpu_r200.py
│ │ │ │ ├── wf42m_pfc03_32gpu_r50.py
│ │ │ │ ├── wf42m_pfc03_40epoch_64gpu_vit_b.py
│ │ │ │ ├── wf42m_pfc03_40epoch_64gpu_vit_l.py
│ │ │ │ ├── wf42m_pfc03_40epoch_64gpu_vit_s.py
│ │ │ │ ├── wf42m_pfc03_40epoch_64gpu_vit_t.py
│ │ │ │ ├── wf42m_pfc03_40epoch_8gpu_vit_b.py
│ │ │ │ ├── wf42m_pfc03_40epoch_8gpu_vit_t.py
│ │ │ │ ├── wf4m_mbf.py
│ │ │ │ ├── wf4m_r100.py
│ │ │ │ └── wf4m_r50.py
│ │ │ ├── dataset.py
│ │ │ ├── dist.sh
│ │ │ ├── docs
│ │ │ │ ├── eval.md
│ │ │ │ ├── install.md
│ │ │ │ ├── install_dali.md
│ │ │ │ ├── modelzoo.md
│ │ │ │ ├── prepare_webface42m.md
│ │ │ │ └── speed_benchmark.md
│ │ │ ├── eval
│ │ │ │ ├── __init__.py
│ │ │ │ └── verification.py
│ │ │ ├── eval_ijbc.py
│ │ │ ├── flops.py
│ │ │ ├── inference.py
│ │ │ ├── losses.py
│ │ │ ├── lr_scheduler.py
│ │ │ ├── onnx_helper.py
│ │ │ ├── onnx_ijbc.py
│ │ │ ├── partial_fc.py
│ │ │ ├── partial_fc_v2.py
│ │ │ ├── requirement.txt
│ │ │ ├── run.sh
│ │ │ ├── torch2onnx.py
│ │ │ ├── train.py
│ │ │ ├── train_v2.py
│ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── plot.py
│ │ │ │ ├── utils_callbacks.py
│ │ │ │ ├── utils_config.py
│ │ │ │ ├── utils_distributed_sampler.py
│ │ │ │ └── utils_logging.py
│ │ ├── base_model.py
│ │ ├── bfm.py
│ │ ├── facerecon_model.py
│ │ ├── losses.py
│ │ ├── networks.py
│ │ └── template_model.py
│ ├── nvdiffrast
│ │ ├── LICENSE.txt
│ │ ├── README.md
│ │ ├── build
│ │ │ └── lib
│ │ │ │ └── nvdiffrast
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common
│ │ │ │ ├── antialias.cu
│ │ │ │ ├── antialias.h
│ │ │ │ ├── common.cpp
│ │ │ │ ├── common.h
│ │ │ │ ├── cudaraster
│ │ │ │ │ ├── CudaRaster.hpp
│ │ │ │ │ └── impl
│ │ │ │ │ │ ├── BinRaster.inl
│ │ │ │ │ │ ├── Buffer.cpp
│ │ │ │ │ │ ├── Buffer.hpp
│ │ │ │ │ │ ├── CoarseRaster.inl
│ │ │ │ │ │ ├── Constants.hpp
│ │ │ │ │ │ ├── CudaRaster.cpp
│ │ │ │ │ │ ├── Defs.hpp
│ │ │ │ │ │ ├── FineRaster.inl
│ │ │ │ │ │ ├── PrivateDefs.hpp
│ │ │ │ │ │ ├── RasterImpl.cpp
│ │ │ │ │ │ ├── RasterImpl.cu
│ │ │ │ │ │ ├── RasterImpl.hpp
│ │ │ │ │ │ ├── TriangleSetup.inl
│ │ │ │ │ │ └── Util.inl
│ │ │ │ ├── framework.h
│ │ │ │ ├── glutil.cpp
│ │ │ │ ├── glutil.h
│ │ │ │ ├── glutil_extlist.h
│ │ │ │ ├── interpolate.cu
│ │ │ │ ├── interpolate.h
│ │ │ │ ├── rasterize.cu
│ │ │ │ ├── rasterize.h
│ │ │ │ ├── rasterize_gl.cpp
│ │ │ │ ├── rasterize_gl.h
│ │ │ │ ├── texture.cpp
│ │ │ │ ├── texture.cu
│ │ │ │ └── texture.h
│ │ │ │ ├── tensorflow
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ops.py
│ │ │ │ ├── plugin_loader.py
│ │ │ │ ├── tf_all.cu
│ │ │ │ ├── tf_antialias.cu
│ │ │ │ ├── tf_interpolate.cu
│ │ │ │ ├── tf_rasterize.cu
│ │ │ │ └── tf_texture.cu
│ │ │ │ └── torch
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ops.py
│ │ │ │ ├── torch_antialias.cpp
│ │ │ │ ├── torch_bindings.cpp
│ │ │ │ ├── torch_bindings_gl.cpp
│ │ │ │ ├── torch_common.inl
│ │ │ │ ├── torch_interpolate.cpp
│ │ │ │ ├── torch_rasterize.cpp
│ │ │ │ ├── torch_rasterize_gl.cpp
│ │ │ │ ├── torch_texture.cpp
│ │ │ │ └── torch_types.h
│ │ ├── docker
│ │ │ ├── 10_nvidia.json
│ │ │ └── Dockerfile
│ │ ├── docs
│ │ │ ├── img
│ │ │ │ ├── cube.png
│ │ │ │ ├── earth.png
│ │ │ │ ├── envphong.png
│ │ │ │ ├── logo.png
│ │ │ │ ├── pipe_cube.png
│ │ │ │ ├── pipe_earth.png
│ │ │ │ ├── pipe_envphong.png
│ │ │ │ ├── pose.png
│ │ │ │ ├── spot_aa.png
│ │ │ │ ├── spot_crop1.png
│ │ │ │ ├── spot_crop2.png
│ │ │ │ ├── spot_diff1.png
│ │ │ │ ├── spot_diff2.png
│ │ │ │ ├── spot_peel1.png
│ │ │ │ ├── spot_peel2.png
│ │ │ │ ├── spot_st.png
│ │ │ │ ├── spot_tex.png
│ │ │ │ ├── spot_texture.png
│ │ │ │ ├── spot_texw.png
│ │ │ │ ├── spot_tri.png
│ │ │ │ ├── spot_uv.png
│ │ │ │ ├── teaser.png
│ │ │ │ ├── teaser1.png
│ │ │ │ ├── teaser2.png
│ │ │ │ ├── teaser3.png
│ │ │ │ ├── teaser4.png
│ │ │ │ ├── teaser5.png
│ │ │ │ ├── thumb.jpg
│ │ │ │ └── tri.png
│ │ │ └── index.html
│ │ ├── nvdiffrast.egg-info
│ │ │ ├── PKG-INFO
│ │ │ ├── SOURCES.txt
│ │ │ ├── dependency_links.txt
│ │ │ ├── requires.txt
│ │ │ └── top_level.txt
│ │ ├── nvdiffrast
│ │ │ ├── __init__.py
│ │ │ ├── common
│ │ │ │ ├── antialias.cu
│ │ │ │ ├── antialias.h
│ │ │ │ ├── common.cpp
│ │ │ │ ├── common.h
│ │ │ │ ├── cudaraster
│ │ │ │ │ ├── CudaRaster.hpp
│ │ │ │ │ └── impl
│ │ │ │ │ │ ├── BinRaster.inl
│ │ │ │ │ │ ├── Buffer.cpp
│ │ │ │ │ │ ├── Buffer.hpp
│ │ │ │ │ │ ├── CoarseRaster.inl
│ │ │ │ │ │ ├── Constants.hpp
│ │ │ │ │ │ ├── CudaRaster.cpp
│ │ │ │ │ │ ├── Defs.hpp
│ │ │ │ │ │ ├── FineRaster.inl
│ │ │ │ │ │ ├── PrivateDefs.hpp
│ │ │ │ │ │ ├── RasterImpl.cpp
│ │ │ │ │ │ ├── RasterImpl.cu
│ │ │ │ │ │ ├── RasterImpl.hpp
│ │ │ │ │ │ ├── TriangleSetup.inl
│ │ │ │ │ │ └── Util.inl
│ │ │ │ ├── framework.h
│ │ │ │ ├── glutil.cpp
│ │ │ │ ├── glutil.h
│ │ │ │ ├── glutil_extlist.h
│ │ │ │ ├── interpolate.cu
│ │ │ │ ├── interpolate.h
│ │ │ │ ├── rasterize.cu
│ │ │ │ ├── rasterize.h
│ │ │ │ ├── rasterize_gl.cpp
│ │ │ │ ├── rasterize_gl.h
│ │ │ │ ├── texture.cpp
│ │ │ │ ├── texture.cu
│ │ │ │ └── texture.h
│ │ │ ├── lib
│ │ │ │ └── setgpu.lib
│ │ │ ├── tensorflow
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ops.py
│ │ │ │ ├── plugin_loader.py
│ │ │ │ ├── tf_all.cu
│ │ │ │ ├── tf_antialias.cu
│ │ │ │ ├── tf_interpolate.cu
│ │ │ │ ├── tf_rasterize.cu
│ │ │ │ └── tf_texture.cu
│ │ │ └── torch
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ops.py
│ │ │ │ ├── torch_antialias.cpp
│ │ │ │ ├── torch_bindings.cpp
│ │ │ │ ├── torch_bindings_gl.cpp
│ │ │ │ ├── torch_common.inl
│ │ │ │ ├── torch_interpolate.cpp
│ │ │ │ ├── torch_rasterize.cpp
│ │ │ │ ├── torch_rasterize_gl.cpp
│ │ │ │ ├── torch_texture.cpp
│ │ │ │ └── torch_types.h
│ │ ├── run_sample.sh
│ │ ├── samples
│ │ │ ├── data
│ │ │ │ ├── NOTICE.txt
│ │ │ │ ├── cube_c.npz
│ │ │ │ ├── cube_d.npz
│ │ │ │ ├── cube_p.npz
│ │ │ │ ├── earth.npz
│ │ │ │ └── envphong.npz
│ │ │ ├── tensorflow
│ │ │ │ ├── cube.py
│ │ │ │ ├── earth.py
│ │ │ │ ├── envphong.py
│ │ │ │ ├── pose.py
│ │ │ │ ├── triangle.py
│ │ │ │ └── util.py
│ │ │ └── torch
│ │ │ │ ├── cube.py
│ │ │ │ ├── earth.py
│ │ │ │ ├── envphong.py
│ │ │ │ ├── pose.py
│ │ │ │ ├── triangle.py
│ │ │ │ └── util.py
│ │ └── setup.py
│ ├── options
│ │ ├── __init__.py
│ │ ├── base_options.py
│ │ ├── test_options.py
│ │ └── train_options.py
│ ├── test.py
│ ├── test_video.py
│ ├── test_video_noAlign.py
│ ├── train.py
│ └── util
│ │ ├── BBRegressorParam_r.mat
│ │ ├── __init__.py
│ │ ├── detect_lm68.py
│ │ ├── generate_list.py
│ │ ├── html.py
│ │ ├── load_mats.py
│ │ ├── nvdiffrast.py
│ │ ├── preprocess.py
│ │ ├── skin_mask.py
│ │ ├── test_mean_face.txt
│ │ ├── util.py
│ │ └── visualizer.py
├── FaceVerse
│ ├── FaceVerseModel_v3.py
│ ├── __init__.py
│ ├── renderer.py
│ └── v3
│ │ ├── dense_uv_expanded_mask_onlyFace.png
│ │ ├── fv2fl_30.npy
│ │ └── v31_face_mask_new.npy
├── Hillary.mp4
├── Obama.mp4
├── align_in_the_wild.py
├── batch_mtcnn_video.py
├── make_dataset_pipe.py
├── preprocess.py
├── preprocess_faceverse.py
├── preprocess_person_video_dataset.py
└── render_utils
│ ├── ortho_renderer.py
│ └── renderer.py
├── dnnlib
├── __init__.py
└── util.py
├── encoder_inversion
├── config
│ ├── train_e4e_real.yaml
│ ├── train_textureUnet_real.yaml
│ └── train_textureUnet_video.yaml
├── criteria
│ ├── __init__.py
│ ├── contextual_loss
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── functional.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── contextual.py
│ │ │ ├── contextual_bilateral.py
│ │ │ └── vgg.py
│ │ └── test_cx.py
│ ├── cx_loss.py
│ ├── id_loss.py
│ ├── lpips
│ │ ├── __init__.py
│ │ ├── lpips.py
│ │ ├── networks.py
│ │ └── utils.py
│ ├── moco_loss.py
│ └── ms_ssim.py
├── dataset_video.py
├── models
│ ├── DeepLabV3Plus
│ │ ├── .gitignore
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── cityscapes.py
│ │ │ ├── data
│ │ │ │ └── train_aug.txt
│ │ │ ├── utils.py
│ │ │ └── voc.py
│ │ ├── main.py
│ │ ├── metrics
│ │ │ ├── __init__.py
│ │ │ └── stream_metrics.py
│ │ ├── network
│ │ │ ├── __init__.py
│ │ │ ├── _deeplab.py
│ │ │ ├── backbone
│ │ │ │ ├── __init__.py
│ │ │ │ ├── hrnetv2.py
│ │ │ │ ├── mobilenetv2.py
│ │ │ │ ├── resnet.py
│ │ │ │ └── xception.py
│ │ │ ├── modeling.py
│ │ │ └── utils.py
│ │ ├── predict.py
│ │ ├── requirements.txt
│ │ ├── samples
│ │ │ ├── 114_image.png
│ │ │ ├── 114_overlay.png
│ │ │ ├── 114_pred.png
│ │ │ ├── 114_target.png
│ │ │ ├── 1_image.png
│ │ │ ├── 1_overlay.png
│ │ │ ├── 1_pred.png
│ │ │ ├── 1_target.png
│ │ │ ├── 23_image.png
│ │ │ ├── 23_overlay.png
│ │ │ ├── 23_pred.png
│ │ │ ├── 23_target.png
│ │ │ ├── city_1_overlay.png
│ │ │ ├── city_1_target.png
│ │ │ ├── city_6_overlay.png
│ │ │ ├── city_6_target.png
│ │ │ └── visdom-screenshoot.png
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── ext_transforms.py
│ │ │ ├── loss.py
│ │ │ ├── scheduler.py
│ │ │ ├── utils.py
│ │ │ └── visualizer.py
│ ├── attention.py
│ ├── e4e.py
│ ├── helpers.py
│ ├── mmseg
│ │ └── mix_transformer.py
│ ├── model_irse.py
│ ├── networks_styleunet.py
│ ├── unet_encoders.py
│ ├── unet_transformer.py
│ ├── uvnet.py
│ └── uvnet_new.py
├── restyle_w_loss.py
├── train.py
├── training_loop.py
├── training_loop_unet_interval.py
├── training_loop_video.py
├── unet_loss.py
├── video_loss.py
├── w_loss.py
├── w_loss_bk.py
├── w_loss_new.py
└── w_loss_ori.py
├── environment.yml
├── eval_seq.py
├── eval_updated_os.py
├── infer.sh
├── inversion
├── configs
│ ├── __init__.py
│ ├── global_config.py
│ ├── hyperparameters.py
│ └── paths_config.py
├── criteria
│ ├── __init__.py
│ └── localitly_regulizer.py
├── dataset.py
├── dataset_new.py
├── flow_util.py
├── model_utils.py
├── projectors
│ ├── __init__.py
│ └── w_plus_projector_ide3d.py
└── volumetric_rendering.py
├── legacy.py
├── metrics
├── __init__.py
├── equivariance.py
├── frechet_inception_distance.py
├── inception_score.py
├── kernel_inception_distance.py
├── metric_main.py
├── metric_utils.py
├── perceptual_path_length.py
└── precision_recall.py
├── reenact_avatar_next3d.py
├── torch_utils
├── __init__.py
├── custom_ops.py
├── debug_utils.py
├── misc.py
├── ops
│ ├── __init__.py
│ ├── bias_act.cpp
│ ├── bias_act.cu
│ ├── bias_act.h
│ ├── bias_act.py
│ ├── conv2d_gradfix.py
│ ├── conv2d_resample.py
│ ├── filtered_lrelu.cpp
│ ├── filtered_lrelu.cu
│ ├── filtered_lrelu.h
│ ├── filtered_lrelu.py
│ ├── filtered_lrelu_ns.cu
│ ├── filtered_lrelu_rd.cu
│ ├── filtered_lrelu_wr.cu
│ ├── fma.py
│ ├── grid_sample_gradfix.py
│ ├── upfirdn2d.cpp
│ ├── upfirdn2d.cu
│ ├── upfirdn2d.h
│ └── upfirdn2d.py
├── persistence.py
└── training_stats.py
├── train_3dgan.sh
├── train_avatar_texture.py
├── train_inversion.sh
├── training
├── __init__.py
├── augment.py
├── crosssection_utils.py
├── dual_discriminator.py
└── networks_stylegan2.py
└── training_avatar_texture
├── __init__.py
├── camera_utils.py
├── dataset_new.py
├── dual_discriminator.py
├── embedder.py
├── loss.py
├── networks_stylegan2.py
├── networks_stylegan2_new.py
├── networks_stylegan2_next3d.py
├── networks_stylegan2_styleunet_next3d.py
├── networks_stylegan3.py
├── next3d_triplane.py
├── superresolution.py
├── training_loop.py
├── triplane.py
├── triplane_v20.py
└── volumetric_rendering
├── __init__.py
├── math_utils.py
├── ortho_renderer.py
├── ray_marcher.py
├── ray_sampler.py
├── renderer.py
└── renderer_next3d.py
/assets/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/assets/teaser.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Sicheng Xu
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/data/image_folder.py:
--------------------------------------------------------------------------------
1 | """A modified image folder class
2 |
3 | We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4 | so that this class can load images from both current directory and its subdirectories.
5 | """
6 | import numpy as np
7 | import torch.utils.data as data
8 |
9 | from PIL import Image
10 | import os
11 | import os.path
12 |
13 | IMG_EXTENSIONS = [
14 | '.jpg', '.JPG', '.jpeg', '.JPEG',
15 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
16 | '.tif', '.TIF', '.tiff', '.TIFF',
17 | ]
18 |
19 |
20 | def is_image_file(filename):
21 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
22 |
23 |
24 | def make_dataset(dir, max_dataset_size=float("inf")):
25 | images = []
26 | assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
27 |
28 | for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
29 | for fname in fnames:
30 | if is_image_file(fname):
31 | path = os.path.join(root, fname)
32 | images.append(path)
33 | return images[:min(max_dataset_size, len(images))]
34 |
35 |
36 | def default_loader(path):
37 | return Image.open(path).convert('RGB')
38 |
39 |
40 | class ImageFolder(data.Dataset):
41 |
42 | def __init__(self, root, transform=None, return_paths=False,
43 | loader=default_loader):
44 | imgs = make_dataset(root)
45 | if len(imgs) == 0:
46 | raise(RuntimeError("Found 0 images in: " + root + "\n"
47 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
48 |
49 | self.root = root
50 | self.imgs = imgs
51 | self.transform = transform
52 | self.return_paths = return_paths
53 | self.loader = loader
54 |
55 | def __getitem__(self, index):
56 | path = self.imgs[index]
57 | img = self.loader(path)
58 | if self.transform is not None:
59 | img = self.transform(img)
60 | if self.return_paths:
61 | return img, path
62 | else:
63 | return img
64 |
65 | def __len__(self):
66 | return len(self.imgs)
67 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/data_preparation.py:
--------------------------------------------------------------------------------
1 | """This script is the data preparation script for Deep3DFaceRecon_pytorch
2 | """
3 |
4 | import os
5 | import numpy as np
6 | import argparse
7 | from util.detect_lm68 import detect_68p,load_lm_graph
8 | from util.skin_mask import get_skin_mask
9 | from util.generate_list import check_list, write_list
10 | import warnings
11 | warnings.filterwarnings("ignore")
12 |
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('--data_root', type=str, default='datasets', help='root directory for training data')
15 | parser.add_argument('--img_folder', nargs="+", required=True, help='folders of training images')
16 | parser.add_argument('--mode', type=str, default='train', help='train or val')
17 | opt = parser.parse_args()
18 |
19 | os.environ['CUDA_VISIBLE_DEVICES'] = '0'
20 |
21 | def data_prepare(folder_list,mode):
22 |
23 | lm_sess,input_op,output_op = load_lm_graph('./checkpoints/lm_model/68lm_detector.pb') # load a tensorflow version 68-landmark detector
24 |
25 | for img_folder in folder_list:
26 | detect_68p(img_folder,lm_sess,input_op,output_op) # detect landmarks for images
27 | get_skin_mask(img_folder) # generate skin attention mask for images
28 |
29 | # create files that record path to all training data
30 | msks_list = []
31 | for img_folder in folder_list:
32 | path = os.path.join(img_folder, 'mask')
33 | msks_list += ['/'.join([img_folder, 'mask', i]) for i in sorted(os.listdir(path)) if 'jpg' in i or
34 | 'png' in i or 'jpeg' in i or 'PNG' in i]
35 |
36 | imgs_list = [i.replace('mask/', '') for i in msks_list]
37 | lms_list = [i.replace('mask', 'landmarks') for i in msks_list]
38 | lms_list = ['.'.join(i.split('.')[:-1]) + '.txt' for i in lms_list]
39 |
40 | lms_list_final, imgs_list_final, msks_list_final = check_list(lms_list, imgs_list, msks_list) # check if the path is valid
41 | write_list(lms_list_final, imgs_list_final, msks_list_final, mode=mode) # save files
42 |
43 | if __name__ == '__main__':
44 | print('Datasets:',opt.img_folder)
45 | data_prepare([os.path.join(opt.data_root,folder) for folder in opt.img_folder],opt.mode)
46 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000002.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000006.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000007.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000007.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000031.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000031.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000033.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000033.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000037.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000037.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000050.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000050.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000055.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000055.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000114.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000114.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000125.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000125.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000126.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/000126.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015259.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015259.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015270.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015270.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015309.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015309.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015310.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015310.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015316.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015316.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015384.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/015384.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000002.txt:
--------------------------------------------------------------------------------
1 | 142.84 207.18
2 | 222.02 203.9
3 | 159.24 253.57
4 | 146.59 290.93
5 | 227.52 284.74
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000006.txt:
--------------------------------------------------------------------------------
1 | 199.93 158.28
2 | 255.34 166.54
3 | 236.08 198.92
4 | 198.83 229.24
5 | 245.23 234.52
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000007.txt:
--------------------------------------------------------------------------------
1 | 129.36 198.28
2 | 204.47 191.47
3 | 164.42 240.51
4 | 140.74 277.77
5 | 205.4 270.9
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000031.txt:
--------------------------------------------------------------------------------
1 | 151.23 240.71
2 | 274.05 235.52
3 | 217.37 305.99
4 | 158.03 346.06
5 | 272.17 341.09
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000033.txt:
--------------------------------------------------------------------------------
1 | 119.09 94.291
2 | 158.31 96.472
3 | 136.76 121.4
4 | 119.33 134.49
5 | 154.66 136.68
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000037.txt:
--------------------------------------------------------------------------------
1 | 147.37 159.39
2 | 196.94 163.26
3 | 190.68 194.36
4 | 153.72 228.44
5 | 193.94 229.7
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000050.txt:
--------------------------------------------------------------------------------
1 | 150.4 94.799
2 | 205.14 102.07
3 | 179.54 131.16
4 | 144.45 147.42
5 | 193.39 154.14
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000055.txt:
--------------------------------------------------------------------------------
1 | 114.26 193.42
2 | 205.8 190.27
3 | 154.15 244.02
4 | 124.69 295.22
5 | 200.88 292.69
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000114.txt:
--------------------------------------------------------------------------------
1 | 217.52 152.95
2 | 281.48 147.14
3 | 253.02 196.03
4 | 225.79 221.6
5 | 288.25 214.44
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000125.txt:
--------------------------------------------------------------------------------
1 | 90.928 99.858
2 | 146.87 100.33
3 | 114.22 130.36
4 | 91.579 153.32
5 | 143.63 153.56
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/000126.txt:
--------------------------------------------------------------------------------
1 | 307.56 166.54
2 | 387.06 159.62
3 | 335.52 222.26
4 | 319.3 248.85
5 | 397.71 239.14
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015259.txt:
--------------------------------------------------------------------------------
1 | 226.38 193.65
2 | 319.12 208.97
3 | 279.99 245.88
4 | 213.79 290.55
5 | 303.03 302.1
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015270.txt:
--------------------------------------------------------------------------------
1 | 208.4 410.08
2 | 364.41 388.68
3 | 291.6 503.57
4 | 244.82 572.86
5 | 383.18 553.49
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015309.txt:
--------------------------------------------------------------------------------
1 | 284.61 496.57
2 | 562.77 550.78
3 | 395.85 712.84
4 | 238.92 786.8
5 | 495.61 827.22
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015310.txt:
--------------------------------------------------------------------------------
1 | 153.95 153.43
2 | 211.13 161.54
3 | 197.28 190.26
4 | 150.82 215.98
5 | 202.32 223.12
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015316.txt:
--------------------------------------------------------------------------------
1 | 481.31 396.88
2 | 667.75 392.43
3 | 557.81 440.55
4 | 490.44 586.28
5 | 640.56 583.2
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/015384.txt:
--------------------------------------------------------------------------------
1 | 191.79 143.97
2 | 271.86 151.23
3 | 191.25 210.29
4 | 187.82 257.12
5 | 258.82 261.96
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd006.txt:
--------------------------------------------------------------------------------
1 | 123.12 117.58
2 | 176.59 122.09
3 | 126.99 144.68
4 | 117.61 183.43
5 | 163.94 186.41
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd025.txt:
--------------------------------------------------------------------------------
1 | 180.12 116.13
2 | 263.18 98.397
3 | 230.48 154.72
4 | 201.37 199.01
5 | 279.18 182.56
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd026.txt:
--------------------------------------------------------------------------------
1 | 171.27 263.54
2 | 286.58 263.88
3 | 203.35 333.02
4 | 170.6 389.42
5 | 281.73 386.84
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd034.txt:
--------------------------------------------------------------------------------
1 | 136.01 167.83
2 | 195.25 151.71
3 | 152.89 191.45
4 | 149.85 235.5
5 | 201.16 222.8
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd051.txt:
--------------------------------------------------------------------------------
1 | 161.92 292.04
2 | 254.21 283.81
3 | 212.75 342.06
4 | 170.78 387.28
5 | 254.6 379.82
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd070.txt:
--------------------------------------------------------------------------------
1 | 276.53 290.35
2 | 383.38 294.75
3 | 314.48 354.66
4 | 275.08 407.72
5 | 364.94 411.48
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd092.txt:
--------------------------------------------------------------------------------
1 | 108.59 149.07
2 | 157.35 143.85
3 | 134.4 173.2
4 | 117.88 200.79
5 | 159.56 196.36
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/detections/vd102.txt:
--------------------------------------------------------------------------------
1 | 121.62 225.96
2 | 186.73 223.07
3 | 162.99 269.82
4 | 132.12 302.62
5 | 186.42 299.21
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd006.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd025.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd026.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd034.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd034.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd051.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd051.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd070.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd070.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd092.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd092.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd102.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/datasets/examples/vd102.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/environment.yml:
--------------------------------------------------------------------------------
1 | name: deep3d_pytorch
2 | channels:
3 | - pytorch
4 | - conda-forge
5 | - defaults
6 | dependencies:
7 | - python=3.6
8 | - pytorch=1.6.0
9 | - torchvision=0.7.0
10 | - numpy=1.18.1
11 | - scikit-image=0.16.2
12 | - scipy=1.4.1
13 | - pillow=6.2.1
14 | - pip=20.0.2
15 | - ipython=7.13.0
16 | - yaml=0.1.7
17 | - pip:
18 | - matplotlib==2.2.5
19 | - opencv-python==3.4.9.33
20 | - tensorboard==1.15.0
21 | - tensorflow==1.15.0
22 | - kornia==0.5.5
23 | - dominate==2.6.0
24 | - trimesh==3.9.20
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/3millions.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # configs for test speed
4 |
5 | config = edict()
6 | config.margin_list = (1.0, 0.0, 0.4)
7 | config.network = "mbf"
8 | config.resume = False
9 | config.output = None
10 | config.embedding_size = 512
11 | config.sample_rate = 0.1
12 | config.fp16 = True
13 | config.momentum = 0.9
14 | config.weight_decay = 5e-4
15 | config.batch_size = 512 # total_batch_size = batch_size * num_gpus
16 | config.lr = 0.1 # batch size is 512
17 |
18 | config.rec = "synthetic"
19 | config.num_classes = 30 * 10000
20 | config.num_image = 100000
21 | config.num_epoch = 30
22 | config.warmup_epoch = -1
23 | config.val_targets = []
24 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/__init__.py
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/base.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 |
9 | # Margin Base Softmax
10 | config.margin_list = (1.0, 0.5, 0.0)
11 | config.network = "r50"
12 | config.resume = False
13 | config.save_all_states = False
14 | config.output = "ms1mv3_arcface_r50"
15 |
16 | config.embedding_size = 512
17 |
18 | # Partial FC
19 | config.sample_rate = 1
20 | config.interclass_filtering_threshold = 0
21 |
22 | config.fp16 = False
23 | config.batch_size = 128
24 |
25 | # For SGD
26 | config.optimizer = "sgd"
27 | config.lr = 0.1
28 | config.momentum = 0.9
29 | config.weight_decay = 5e-4
30 |
31 | # For AdamW
32 | # config.optimizer = "adamw"
33 | # config.lr = 0.001
34 | # config.weight_decay = 0.1
35 |
36 | config.verbose = 2000
37 | config.frequent = 10
38 |
39 | # For Large Sacle Dataset, such as WebFace42M
40 | config.dali = False
41 |
42 | # Gradient ACC
43 | config.gradient_acc = 1
44 |
45 | # setup seed
46 | config.seed = 2048
47 |
48 | # dataload numworkers
49 | config.num_workers = 2
50 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/glint360k_mbf.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/glint360k"
23 | config.num_classes = 360232
24 | config.num_image = 17091657
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/glint360k_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/glint360k"
23 | config.num_classes = 360232
24 | config.num_image = 17091657
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/glint360k_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/glint360k"
23 | config.num_classes = 360232
24 | config.num_image = 17091657
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv2_mbf.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/faces_emore"
23 | config.num_classes = 85742
24 | config.num_image = 5822653
25 | config.num_epoch = 40
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv2_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/faces_emore"
23 | config.num_classes = 85742
24 | config.num_image = 5822653
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv2_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/faces_emore"
23 | config.num_classes = 85742
24 | config.num_image = 5822653
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv3_mbf.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/ms1m-retinaface-t1"
23 | config.num_classes = 93431
24 | config.num_image = 5179510
25 | config.num_epoch = 40
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv3_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/ms1m-retinaface-t1"
23 | config.num_classes = 93431
24 | config.num_image = 5179510
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/ms1mv3_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.5, 0.0)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/ms1m-retinaface-t1"
23 | config.num_classes = 93431
24 | config.num_image = 5179510
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_conflict_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.interclass_filtering_threshold = 0
15 | config.fp16 = True
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M_Conflict"
24 | config.num_classes = 1017970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = config.num_epoch // 10
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.interclass_filtering_threshold = 0.4
15 | config.fp16 = True
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M_Conflict"
24 | config.num_classes = 1017970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = config.num_epoch // 10
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_flip_pfc01_filter04_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.1
14 | config.interclass_filtering_threshold = 0.4
15 | config.fp16 = True
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M_FLIP40"
24 | config.num_classes = 617970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = config.num_epoch // 10
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_flip_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.interclass_filtering_threshold = 0
15 | config.fp16 = True
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M_FLIP40"
24 | config.num_classes = 617970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = config.num_epoch // 10
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_mbf.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.interclass_filtering_threshold = 0
15 | config.fp16 = True
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M"
24 | config.num_classes = 617970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = 0
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_pfc02_r100.py:
--------------------------------------------------------------------------------
1 |
2 | from easydict import EasyDict as edict
3 |
4 | # make training faster
5 | # our RAM is 256G
6 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
7 |
8 | config = edict()
9 | config.margin_list = (1.0, 0.0, 0.4)
10 | config.network = "r100"
11 | config.resume = False
12 | config.output = None
13 | config.embedding_size = 512
14 | config.sample_rate = 0.2
15 | config.interclass_filtering_threshold = 0
16 | config.fp16 = True
17 | config.weight_decay = 5e-4
18 | config.batch_size = 128
19 | config.optimizer = "sgd"
20 | config.lr = 0.1
21 | config.verbose = 2000
22 | config.dali = False
23 |
24 | config.rec = "/train_tmp/WebFace12M"
25 | config.num_classes = 617970
26 | config.num_image = 12720066
27 | config.num_epoch = 20
28 | config.warmup_epoch = 0
29 | config.val_targets = []
30 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_r100.py:
--------------------------------------------------------------------------------
1 |
2 | from easydict import EasyDict as edict
3 |
4 | # make training faster
5 | # our RAM is 256G
6 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
7 |
8 | config = edict()
9 | config.margin_list = (1.0, 0.0, 0.4)
10 | config.network = "r100"
11 | config.resume = False
12 | config.output = None
13 | config.embedding_size = 512
14 | config.sample_rate = 1.0
15 | config.interclass_filtering_threshold = 0
16 | config.fp16 = True
17 | config.weight_decay = 5e-4
18 | config.batch_size = 128
19 | config.optimizer = "sgd"
20 | config.lr = 0.1
21 | config.verbose = 2000
22 | config.dali = False
23 |
24 | config.rec = "/train_tmp/WebFace12M"
25 | config.num_classes = 617970
26 | config.num_image = 12720066
27 | config.num_epoch = 20
28 | config.warmup_epoch = 0
29 | config.val_targets = []
30 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.interclass_filtering_threshold = 0
15 | config.fp16 = True
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.optimizer = "sgd"
19 | config.lr = 0.1
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace12M"
24 | config.num_classes = 617970
25 | config.num_image = 12720066
26 | config.num_epoch = 20
27 | config.warmup_epoch = 0
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc0008_32gpu_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 512
18 | config.lr = 0.4
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 512
18 | config.lr = 0.4
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 2
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_16gpus_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 256
18 | config.lr = 0.3
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 1
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 512
18 | config.lr = 0.6
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 4
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_32gpus_r50_bs4k.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 2
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_8gpus_r50_bs4k.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 512
18 | config.lr = 0.4
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 2
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_r100_16gpus.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.2
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc02_r100_32gpus.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.2
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 10000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_32gpu_r18.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r18"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_32gpu_r200.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r200"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_32gpu_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.4
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 20
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_b.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_b_dp005_mask_005"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 384
17 | config.optimizer = "adamw"
18 | config.lr = 0.001
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 40
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_l.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_l_dp005_mask_005"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 384
17 | config.optimizer = "adamw"
18 | config.lr = 0.001
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 40
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_s.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_s_dp005_mask_0"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 384
17 | config.optimizer = "adamw"
18 | config.lr = 0.001
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 40
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_t.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_t_dp005_mask0"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 384
17 | config.optimizer = "adamw"
18 | config.lr = 0.001
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 40
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_b_dp005_mask_005"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 256
17 | config.gradient_acc = 12 # total batchsize is 256 * 12
18 | config.optimizer = "adamw"
19 | config.lr = 0.001
20 | config.verbose = 2000
21 | config.dali = False
22 |
23 | config.rec = "/train_tmp/WebFace42M"
24 | config.num_classes = 2059906
25 | config.num_image = 42474557
26 | config.num_epoch = 40
27 | config.warmup_epoch = config.num_epoch // 10
28 | config.val_targets = []
29 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_t.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "vit_t_dp005_mask0"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 0.3
14 | config.fp16 = True
15 | config.weight_decay = 0.1
16 | config.batch_size = 512
17 | config.optimizer = "adamw"
18 | config.lr = 0.001
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace42M"
23 | config.num_classes = 2059906
24 | config.num_image = 42474557
25 | config.num_epoch = 40
26 | config.warmup_epoch = config.num_epoch // 10
27 | config.val_targets = []
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf4m_mbf.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "mbf"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 1e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace4M"
23 | config.num_classes = 205990
24 | config.num_image = 4235242
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf4m_r100.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r100"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace4M"
23 | config.num_classes = 205990
24 | config.num_image = 4235242
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf4m_r50.py:
--------------------------------------------------------------------------------
1 | from easydict import EasyDict as edict
2 |
3 | # make training faster
4 | # our RAM is 256G
5 | # mount -t tmpfs -o size=140G tmpfs /train_tmp
6 |
7 | config = edict()
8 | config.margin_list = (1.0, 0.0, 0.4)
9 | config.network = "r50"
10 | config.resume = False
11 | config.output = None
12 | config.embedding_size = 512
13 | config.sample_rate = 1.0
14 | config.fp16 = True
15 | config.momentum = 0.9
16 | config.weight_decay = 5e-4
17 | config.batch_size = 128
18 | config.lr = 0.1
19 | config.verbose = 2000
20 | config.dali = False
21 |
22 | config.rec = "/train_tmp/WebFace4M"
23 | config.num_classes = 205990
24 | config.num_image = 4235242
25 | config.num_epoch = 20
26 | config.warmup_epoch = 0
27 | config.val_targets = ['lfw', 'cfp_fp', "agedb_30"]
28 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/dist.sh:
--------------------------------------------------------------------------------
1 | ip_list=("ip1" "ip2" "ip3" "ip4")
2 |
3 | config=wf42m_pfc03_32gpu_r100
4 |
5 | for((node_rank=0;node_rank<${#ip_list[*]};node_rank++));
6 | do
7 | ssh face@${ip_list[node_rank]} "cd `pwd`;PATH=$PATH \
8 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
9 | python -m torch.distributed.launch \
10 | --nproc_per_node=8 \
11 | --nnodes=${#ip_list[*]} \
12 | --node_rank=$node_rank \
13 | --master_addr=${ip_list[0]} \
14 | --master_port=22345 train.py configs/$config" &
15 | done
16 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/eval.md:
--------------------------------------------------------------------------------
1 | ## Eval on ICCV2021-MFR
2 |
3 | coming soon.
4 |
5 |
6 | ## Eval IJBC
7 | You can eval ijbc with pytorch or onnx.
8 |
9 |
10 | 1. Eval IJBC With Onnx
11 | ```shell
12 | CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50
13 | ```
14 |
15 | 2. Eval IJBC With Pytorch
16 | ```shell
17 | CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \
18 | --model-prefix ms1mv3_arcface_r50/backbone.pth \
19 | --image-path IJB_release/IJBC \
20 | --result-dir ms1mv3_arcface_r50 \
21 | --batch-size 128 \
22 | --job ms1mv3_arcface_r50 \
23 | --target IJBC \
24 | --network iresnet50
25 | ```
26 |
27 |
28 | ## Inference
29 |
30 | ```shell
31 | python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50
32 | ```
33 |
34 |
35 | ## Result
36 |
37 | | Datasets | Backbone | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) |
38 | |:---------------|:--------------------|:------------|:------------|:------------|
39 | | WF12M-PFC-0.05 | r100 | 94.05 | 97.51 | 95.75 |
40 | | WF12M-PFC-0.1 | r100 | 94.49 | 97.56 | 95.92 |
41 | | WF12M-PFC-0.2 | r100 | 94.75 | 97.60 | 95.90 |
42 | | WF12M-PFC-0.3 | r100 | 94.71 | 97.64 | 96.01 |
43 | | WF12M | r100 | 94.69 | 97.59 | 95.97 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/install.md:
--------------------------------------------------------------------------------
1 | ## [v1.11.0](https://pytorch.org/)
2 |
3 | ## [v1.9.0](https://pytorch.org/get-started/previous-versions/#linux-and-windows-7)
4 | ### Linux and Windows
5 | ```shell
6 | # CUDA 11.1
7 | pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
8 |
9 | # CUDA 10.2
10 | pip install torch==1.9.0+cu102 torchvision==0.10.0+cu102 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
11 | ```
12 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/install_dali.md:
--------------------------------------------------------------------------------
1 | TODO
2 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/modelzoo.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/modelzoo.md
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/docs/prepare_webface42m.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ## 1. Download Datasets and Unzip
5 |
6 | Download WebFace42M from [https://www.face-benchmark.org/download.html](https://www.face-benchmark.org/download.html).
7 | The raw data of `WebFace42M` will have 10 directories after being unarchived:
8 | `WebFace4M` contains 1 directory: `0`.
9 | `WebFace12M` contains 3 directories: `0,1,2`.
10 | `WebFace42M` contains 10 directories: `0,1,2,3,4,5,6,7,8,9`.
11 |
12 | ## 2. Create Shuffled Rec File for DALI
13 |
14 | Note: Shuffled rec is very important to DALI, and rec without shuffled can cause performance degradation, origin insightface style rec file
15 | do not support Nvidia DALI, you must follow this command [mxnet.tools.im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) to generate a shuffled rec file.
16 |
17 | ```shell
18 | # directories and files for yours datsaets
19 | /WebFace42M_Root
20 | ├── 0_0_0000000
21 | │ ├── 0_0.jpg
22 | │ ├── 0_1.jpg
23 | │ ├── 0_2.jpg
24 | │ ├── 0_3.jpg
25 | │ └── 0_4.jpg
26 | ├── 0_0_0000001
27 | │ ├── 0_5.jpg
28 | │ ├── 0_6.jpg
29 | │ ├── 0_7.jpg
30 | │ ├── 0_8.jpg
31 | │ └── 0_9.jpg
32 | ├── 0_0_0000002
33 | │ ├── 0_10.jpg
34 | │ ├── 0_11.jpg
35 | │ ├── 0_12.jpg
36 | │ ├── 0_13.jpg
37 | │ ├── 0_14.jpg
38 | │ ├── 0_15.jpg
39 | │ ├── 0_16.jpg
40 | │ └── 0_17.jpg
41 | ├── 0_0_0000003
42 | │ ├── 0_18.jpg
43 | │ ├── 0_19.jpg
44 | │ └── 0_20.jpg
45 | ├── 0_0_0000004
46 |
47 |
48 |
49 | # 1) create train.lst using follow command
50 | python -m mxnet.tools.im2rec --list --recursive train WebFace42M_Root
51 |
52 | # 2) create train.rec and train.idx using train.lst using following command
53 | python -m mxnet.tools.im2rec --num-thread 16 --quality 100 train WebFace42M_Root
54 | ```
55 |
56 | Finally, you will get three files: `train.lst`, `train.rec`, `train.idx`. which `train.idx`, `train.rec` are using for training.
57 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/eval/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/eval/__init__.py
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/flops.py:
--------------------------------------------------------------------------------
1 | from ptflops import get_model_complexity_info
2 | from backbones import get_model
3 | import argparse
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='')
7 | parser.add_argument('n', type=str, default="r100")
8 | args = parser.parse_args()
9 | net = get_model(args.n)
10 | macs, params = get_model_complexity_info(
11 | net, (3, 112, 112), as_strings=False,
12 | print_per_layer_stat=True, verbose=True)
13 | gmacs = macs / (1000**3)
14 | print("%.3f GFLOPs"%gmacs)
15 | print("%.3f Mparams"%(params/(1000**2)))
16 |
17 | if hasattr(net, "extra_gflops"):
18 | print("%.3f Extra-GFLOPs"%net.extra_gflops)
19 | print("%.3f Total-GFLOPs"%(gmacs+net.extra_gflops))
20 |
21 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/inference.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import cv2
4 | import numpy as np
5 | import torch
6 |
7 | from backbones import get_model
8 |
9 |
10 | @torch.no_grad()
11 | def inference(weight, name, img):
12 | if img is None:
13 | img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
14 | else:
15 | img = cv2.imread(img)
16 | img = cv2.resize(img, (112, 112))
17 |
18 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
19 | img = np.transpose(img, (2, 0, 1))
20 | img = torch.from_numpy(img).unsqueeze(0).float()
21 | img.div_(255).sub_(0.5).div_(0.5)
22 | net = get_model(name, fp16=False)
23 | net.load_state_dict(torch.load(weight))
24 | net.eval()
25 | feat = net(img).numpy()
26 | print(feat)
27 |
28 |
29 | if __name__ == "__main__":
30 | parser = argparse.ArgumentParser(description='PyTorch ArcFace Training')
31 | parser.add_argument('--network', type=str, default='r50', help='backbone network')
32 | parser.add_argument('--weight', type=str, default='')
33 | parser.add_argument('--img', type=str, default=None)
34 | args = parser.parse_args()
35 | inference(args.weight, args.network, args.img)
36 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/lr_scheduler.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler
2 |
3 |
4 | class PolyScheduler(_LRScheduler):
5 | def __init__(self, optimizer, base_lr, max_steps, warmup_steps, last_epoch=-1):
6 | self.base_lr = base_lr
7 | self.warmup_lr_init = 0.0001
8 | self.max_steps: int = max_steps
9 | self.warmup_steps: int = warmup_steps
10 | self.power = 2
11 | super(PolyScheduler, self).__init__(optimizer, -1, False)
12 | self.last_epoch = last_epoch
13 |
14 | def get_warmup_lr(self):
15 | alpha = float(self.last_epoch) / float(self.warmup_steps)
16 | return [self.base_lr * alpha for _ in self.optimizer.param_groups]
17 |
18 | def get_lr(self):
19 | if self.last_epoch == -1:
20 | return [self.warmup_lr_init for _ in self.optimizer.param_groups]
21 | if self.last_epoch < self.warmup_steps:
22 | return self.get_warmup_lr()
23 | else:
24 | alpha = pow(
25 | 1
26 | - float(self.last_epoch - self.warmup_steps)
27 | / float(self.max_steps - self.warmup_steps),
28 | self.power,
29 | )
30 | return [self.base_lr * alpha for _ in self.optimizer.param_groups]
31 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/requirement.txt:
--------------------------------------------------------------------------------
1 | tensorboard
2 | easydict
3 | mxnet
4 | onnx
5 | sklearn
6 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/run.sh:
--------------------------------------------------------------------------------
1 |
2 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m torch.distributed.launch \
3 | --nproc_per_node=8 \
4 | --nnodes=1 \
5 | --node_rank=0 \
6 | --master_addr="127.0.0.1" \
7 | --master_port=12345 train.py $@
8 |
9 | ps -ef | grep "train" | grep -v grep | awk '{print "kill -9 "$2}' | sh
10 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/torch2onnx.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import onnx
3 | import torch
4 |
5 |
6 | def convert_onnx(net, path_module, output, opset=11, simplify=False):
7 | assert isinstance(net, torch.nn.Module)
8 | img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
9 | img = img.astype(np.float)
10 | img = (img / 255. - 0.5) / 0.5 # torch style norm
11 | img = img.transpose((2, 0, 1))
12 | img = torch.from_numpy(img).unsqueeze(0).float()
13 |
14 | weight = torch.load(path_module)
15 | net.load_state_dict(weight, strict=True)
16 | net.eval()
17 | torch.onnx.export(net, img, output, input_names=["data"], keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
18 | model = onnx.load(output)
19 | graph = model.graph
20 | graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
21 | if simplify:
22 | from onnxsim import simplify
23 | model, check = simplify(model)
24 | assert check, "Simplified ONNX model could not be validated"
25 | onnx.save(model, output)
26 |
27 |
28 | if __name__ == '__main__':
29 | import os
30 | import argparse
31 | from backbones import get_model
32 |
33 | parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
34 | parser.add_argument('input', type=str, help='input backbone.pth file or path')
35 | parser.add_argument('--output', type=str, default=None, help='output onnx path')
36 | parser.add_argument('--network', type=str, default=None, help='backbone network')
37 | parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
38 | args = parser.parse_args()
39 | input_file = args.input
40 | if os.path.isdir(input_file):
41 | input_file = os.path.join(input_file, "model.pt")
42 | assert os.path.exists(input_file)
43 | # model_name = os.path.basename(os.path.dirname(input_file)).lower()
44 | # params = model_name.split("_")
45 | # if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
46 | # if args.network is None:
47 | # args.network = params[2]
48 | assert args.network is not None
49 | print(args)
50 | backbone_onnx = get_model(args.network, dropout=0.0, fp16=False, num_features=512)
51 | if args.output is None:
52 | args.output = os.path.join(os.path.dirname(args.input), "model.onnx")
53 | convert_onnx(backbone_onnx, input_file, args.output, simplify=args.simplify)
54 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/__init__.py
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/plot.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import pandas as pd
7 | from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
8 | from prettytable import PrettyTable
9 | from sklearn.metrics import roc_curve, auc
10 |
11 | with open(sys.argv[1], "r") as f:
12 | files = f.readlines()
13 |
14 | files = [x.strip() for x in files]
15 | image_path = "/train_tmp/IJB_release/IJBC"
16 |
17 |
18 | def read_template_pair_list(path):
19 | pairs = pd.read_csv(path, sep=' ', header=None).values
20 | t1 = pairs[:, 0].astype(np.int)
21 | t2 = pairs[:, 1].astype(np.int)
22 | label = pairs[:, 2].astype(np.int)
23 | return t1, t2, label
24 |
25 |
26 | p1, p2, label = read_template_pair_list(
27 | os.path.join('%s/meta' % image_path,
28 | '%s_template_pair_label.txt' % 'ijbc'))
29 |
30 | methods = []
31 | scores = []
32 | for file in files:
33 | methods.append(file)
34 | scores.append(np.load(file))
35 |
36 | methods = np.array(methods)
37 | scores = dict(zip(methods, scores))
38 | colours = dict(
39 | zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
40 | x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]
41 | tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
42 | fig = plt.figure()
43 | for method in methods:
44 | fpr, tpr, _ = roc_curve(label, scores[method])
45 | roc_auc = auc(fpr, tpr)
46 | fpr = np.flipud(fpr)
47 | tpr = np.flipud(tpr) # select largest tpr at same fpr
48 | plt.plot(fpr,
49 | tpr,
50 | color=colours[method],
51 | lw=1,
52 | label=('[%s (AUC = %0.4f %%)]' %
53 | (method.split('-')[-1], roc_auc * 100)))
54 | tpr_fpr_row = []
55 | tpr_fpr_row.append(method)
56 | for fpr_iter in np.arange(len(x_labels)):
57 | _, min_index = min(
58 | list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
59 | tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
60 | tpr_fpr_table.add_row(tpr_fpr_row)
61 | plt.xlim([10 ** -6, 0.1])
62 | plt.ylim([0.3, 1.0])
63 | plt.grid(linestyle='--', linewidth=1)
64 | plt.xticks(x_labels)
65 | plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
66 | plt.xscale('log')
67 | plt.xlabel('False Positive Rate')
68 | plt.ylabel('True Positive Rate')
69 | plt.title('ROC on IJB')
70 | plt.legend(loc="lower right")
71 | print(tpr_fpr_table)
72 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/utils_config.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import os.path as osp
3 |
4 |
5 | def get_config(config_file):
6 | assert config_file.startswith('configs/'), 'config file setting must start with configs/'
7 | temp_config_name = osp.basename(config_file)
8 | temp_module_name = osp.splitext(temp_config_name)[0]
9 | config = importlib.import_module("configs.base")
10 | cfg = config.config
11 | config = importlib.import_module("configs.%s" % temp_module_name)
12 | job_cfg = config.config
13 | cfg.update(job_cfg)
14 | if cfg.output is None:
15 | cfg.output = osp.join('work_dirs', temp_module_name)
16 | return cfg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/utils_logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import sys
4 |
5 |
6 | class AverageMeter(object):
7 | """Computes and stores the average and current value
8 | """
9 |
10 | def __init__(self):
11 | self.val = None
12 | self.avg = None
13 | self.sum = None
14 | self.count = None
15 | self.reset()
16 |
17 | def reset(self):
18 | self.val = 0
19 | self.avg = 0
20 | self.sum = 0
21 | self.count = 0
22 |
23 | def update(self, val, n=1):
24 | self.val = val
25 | self.sum += val * n
26 | self.count += n
27 | self.avg = self.sum / self.count
28 |
29 |
30 | def init_logging(rank, models_root):
31 | if rank == 0:
32 | log_root = logging.getLogger()
33 | log_root.setLevel(logging.INFO)
34 | formatter = logging.Formatter("Training: %(asctime)s-%(message)s")
35 | handler_file = logging.FileHandler(os.path.join(models_root, "training.log"))
36 | handler_stream = logging.StreamHandler(sys.stdout)
37 | handler_file.setFormatter(formatter)
38 | handler_stream.setFormatter(formatter)
39 | log_root.addHandler(handler_file)
40 | log_root.addHandler(handler_stream)
41 | log_root.info('rank_id: %d' % rank)
42 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/README.md:
--------------------------------------------------------------------------------
1 | ## Nvdiffrast – Modular Primitives for High-Performance Differentiable Rendering
2 |
3 | 
4 |
5 | **Modular Primitives for High-Performance Differentiable Rendering**
6 | Samuli Laine, Janne Hellsten, Tero Karras, Yeongho Seol, Jaakko Lehtinen, Timo Aila
7 | [http://arxiv.org/abs/2011.03277](http://arxiv.org/abs/2011.03277)
8 |
9 | Nvdiffrast is a PyTorch/TensorFlow library that provides high-performance primitive operations for rasterization-based differentiable rendering.
10 | Please refer to ☞☞ [nvdiffrast documentation](https://nvlabs.github.io/nvdiffrast) ☜☜ for more information.
11 |
12 | ## Licenses
13 |
14 | Copyright © 2020–2022, NVIDIA Corporation. All rights reserved.
15 |
16 | This work is made available under the [Nvidia Source Code License](https://github.com/NVlabs/nvdiffrast/blob/main/LICENSE.txt).
17 |
18 | For business inquiries, please visit our website and submit the form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/)
19 |
20 | We do not currently accept outside code contributions in the form of pull requests.
21 |
22 | Environment map stored as part of `samples/data/envphong.npz` is derived from a Wave Engine
23 | [sample material](https://github.com/WaveEngine/Samples-2.5/tree/master/Materials/EnvironmentMap/Content/Assets/CubeMap.cubemap)
24 | originally shared under
25 | [MIT License](https://github.com/WaveEngine/Samples-2.5/blob/master/LICENSE.md).
26 | Mesh and texture stored as part of `samples/data/earth.npz` are derived from
27 | [3D Earth Photorealistic 2K](https://www.turbosquid.com/3d-models/3d-realistic-earth-photorealistic-2k-1279125)
28 | model originally made available under
29 | [TurboSquid 3D Model License](https://blog.turbosquid.com/turbosquid-3d-model-license/#3d-model-license).
30 |
31 | ## Citation
32 |
33 | ```
34 | @article{Laine2020diffrast,
35 | title = {Modular Primitives for High-Performance Differentiable Rendering},
36 | author = {Samuli Laine and Janne Hellsten and Tero Karras and Yeongho Seol and Jaakko Lehtinen and Timo Aila},
37 | journal = {ACM Transactions on Graphics},
38 | year = {2020},
39 | volume = {39},
40 | number = {6}
41 | }
42 | ```
43 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | __version__ = '0.3.0'
10 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/common.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 |
11 | //------------------------------------------------------------------------
12 | // Block and grid size calculators for kernel launches.
13 |
14 | dim3 getLaunchBlockSize(int maxWidth, int maxHeight, int width, int height)
15 | {
16 | int maxThreads = maxWidth * maxHeight;
17 | if (maxThreads <= 1 || (width * height) <= 1)
18 | return dim3(1, 1, 1); // Degenerate.
19 |
20 | // Start from max size.
21 | int bw = maxWidth;
22 | int bh = maxHeight;
23 |
24 | // Optimizations for weirdly sized buffers.
25 | if (width < bw)
26 | {
27 | // Decrease block width to smallest power of two that covers the buffer width.
28 | while ((bw >> 1) >= width)
29 | bw >>= 1;
30 |
31 | // Maximize height.
32 | bh = maxThreads / bw;
33 | if (bh > height)
34 | bh = height;
35 | }
36 | else if (height < bh)
37 | {
38 | // Halve height and double width until fits completely inside buffer vertically.
39 | while (bh > height)
40 | {
41 | bh >>= 1;
42 | if (bw < width)
43 | bw <<= 1;
44 | }
45 | }
46 |
47 | // Done.
48 | return dim3(bw, bh, 1);
49 | }
50 |
51 | dim3 getLaunchGridSize(dim3 blockSize, int width, int height, int depth)
52 | {
53 | dim3 gridSize;
54 | gridSize.x = (width - 1) / blockSize.x + 1;
55 | gridSize.y = (height - 1) / blockSize.y + 1;
56 | gridSize.z = (depth - 1) / blockSize.z + 1;
57 | return gridSize;
58 | }
59 |
60 | //------------------------------------------------------------------------
61 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/cudaraster/impl/Buffer.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "../../framework.h"
10 | #include "Buffer.hpp"
11 |
12 | using namespace CR;
13 |
14 | //------------------------------------------------------------------------
15 |
16 | Buffer::Buffer(void)
17 | : m_gpuPtr(NULL),
18 | m_bytes (0)
19 | {
20 | // empty
21 | }
22 |
23 | Buffer::~Buffer(void)
24 | {
25 | if (m_gpuPtr)
26 | cudaFree(m_gpuPtr); // Don't throw an exception.
27 | }
28 |
29 | //------------------------------------------------------------------------
30 |
31 | void Buffer::reset(size_t bytes)
32 | {
33 | if (bytes == m_bytes)
34 | return;
35 |
36 | if (m_gpuPtr)
37 | {
38 | NVDR_CHECK_CUDA_ERROR(cudaFree(m_gpuPtr));
39 | m_gpuPtr = NULL;
40 | }
41 |
42 | if (bytes > 0)
43 | NVDR_CHECK_CUDA_ERROR(cudaMalloc(&m_gpuPtr, bytes));
44 |
45 | m_bytes = bytes;
46 | }
47 |
48 | //------------------------------------------------------------------------
49 |
50 | void Buffer::grow(size_t bytes)
51 | {
52 | if (bytes > m_bytes)
53 | reset(bytes);
54 | }
55 |
56 | //------------------------------------------------------------------------
57 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/cudaraster/impl/Buffer.hpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 | #include "Defs.hpp"
11 |
12 | namespace CR
13 | {
14 | //------------------------------------------------------------------------
15 |
16 | class Buffer
17 | {
18 | public:
19 | Buffer (void);
20 | ~Buffer (void);
21 |
22 | void reset (size_t bytes);
23 | void grow (size_t bytes);
24 | void* getPtr (void) { return m_gpuPtr; }
25 | size_t getSize (void) const { return m_bytes; }
26 |
27 | void setPtr (void* ptr) { m_gpuPtr = ptr; }
28 |
29 | private:
30 | void* m_gpuPtr;
31 | size_t m_bytes;
32 | };
33 |
34 | //------------------------------------------------------------------------
35 | }
36 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/cudaraster/impl/CudaRaster.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "Defs.hpp"
10 | #include "../CudaRaster.hpp"
11 | #include "RasterImpl.hpp"
12 |
13 | using namespace CR;
14 |
15 | //------------------------------------------------------------------------
16 | // Stub interface implementation.
17 | //------------------------------------------------------------------------
18 |
19 | CudaRaster::CudaRaster()
20 | {
21 | m_impl = new RasterImpl();
22 | }
23 |
24 | CudaRaster::~CudaRaster()
25 | {
26 | delete m_impl;
27 | }
28 |
29 | void CudaRaster::setViewportSize(int width, int height, int numImages)
30 | {
31 | m_impl->setViewportSize(Vec3i(width, height, numImages));
32 | }
33 |
34 | void CudaRaster::setRenderModeFlags(U32 flags)
35 | {
36 | m_impl->setRenderModeFlags(flags);
37 | }
38 |
39 | void CudaRaster::deferredClear(U32 clearColor)
40 | {
41 | m_impl->deferredClear(clearColor);
42 | }
43 |
44 | void CudaRaster::setVertexBuffer(void* vertices, int numVertices)
45 | {
46 | m_impl->setVertexBuffer(vertices, numVertices);
47 | }
48 |
49 | void CudaRaster::setIndexBuffer(void* indices, int numTriangles)
50 | {
51 | m_impl->setIndexBuffer(indices, numTriangles);
52 | }
53 |
54 | bool CudaRaster::drawTriangles(const int* ranges, cudaStream_t stream)
55 | {
56 | return m_impl->drawTriangles((const Vec2i*)ranges, stream);
57 | }
58 |
59 | void* CudaRaster::getColorBuffer(void)
60 | {
61 | return m_impl->getColorBuffer();
62 | }
63 |
64 | void* CudaRaster::getDepthBuffer(void)
65 | {
66 | return m_impl->getDepthBuffer();
67 | }
68 |
69 | void CudaRaster::swapDepthAndPeel(void)
70 | {
71 | m_impl->swapDepthAndPeel();
72 | }
73 |
74 | //------------------------------------------------------------------------
75 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/cudaraster/impl/RasterImpl.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "../CudaRaster.hpp"
10 | #include "PrivateDefs.hpp"
11 | #include "Constants.hpp"
12 | #include "Util.inl"
13 |
14 | namespace CR
15 | {
16 |
17 | //------------------------------------------------------------------------
18 | // Stage implementations.
19 | //------------------------------------------------------------------------
20 |
21 | #include "TriangleSetup.inl"
22 | #include "BinRaster.inl"
23 | #include "CoarseRaster.inl"
24 | #include "FineRaster.inl"
25 |
26 | }
27 |
28 | //------------------------------------------------------------------------
29 | // Stage entry points.
30 | //------------------------------------------------------------------------
31 |
32 | __global__ void __launch_bounds__(CR_SETUP_WARPS * 32, CR_SETUP_OPT_BLOCKS) triangleSetupKernel (const CR::CRParams p) { CR::triangleSetupImpl(p); }
33 | __global__ void __launch_bounds__(CR_BIN_WARPS * 32, 1) binRasterKernel (const CR::CRParams p) { CR::binRasterImpl(p); }
34 | __global__ void __launch_bounds__(CR_COARSE_WARPS * 32, 1) coarseRasterKernel (const CR::CRParams p) { CR::coarseRasterImpl(p); }
35 | __global__ void __launch_bounds__(CR_FINE_MAX_WARPS * 32, 1) fineRasterKernel (const CR::CRParams p) { CR::fineRasterImpl(p); }
36 |
37 | //------------------------------------------------------------------------
38 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/common/framework.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 |
11 | // Framework-specific macros to enable code sharing.
12 |
13 | //------------------------------------------------------------------------
14 | // Tensorflow.
15 |
16 | #ifdef NVDR_TENSORFLOW
17 | #define EIGEN_USE_GPU
18 | #include "tensorflow/core/framework/op.h"
19 | #include "tensorflow/core/framework/op_kernel.h"
20 | #include "tensorflow/core/framework/shape_inference.h"
21 | #include "tensorflow/core/platform/default/logging.h"
22 | using namespace tensorflow;
23 | using namespace tensorflow::shape_inference;
24 | #define NVDR_CTX_ARGS OpKernelContext* _nvdr_ctx
25 | #define NVDR_CTX_PARAMS _nvdr_ctx
26 | #define NVDR_CHECK(COND, ERR) OP_REQUIRES(_nvdr_ctx, COND, errors::Internal(ERR))
27 | #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) OP_CHECK_CUDA_ERROR(_nvdr_ctx, CUDA_CALL)
28 | #define NVDR_CHECK_GL_ERROR(GL_CALL) OP_CHECK_GL_ERROR(_nvdr_ctx, GL_CALL)
29 | #endif
30 |
31 | //------------------------------------------------------------------------
32 | // PyTorch.
33 |
34 | #ifdef NVDR_TORCH
35 | #ifndef __CUDACC__
36 | #include
37 | #include
38 | #include
39 | #include
40 | #include
41 | #endif
42 | #define NVDR_CTX_ARGS int _nvdr_ctx_dummy
43 | #define NVDR_CTX_PARAMS 0
44 | #define NVDR_CHECK(COND, ERR) do { TORCH_CHECK(COND, ERR) } while(0)
45 | #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) do { cudaError_t err = CUDA_CALL; TORCH_CHECK(!err, "Cuda error: ", cudaGetLastError(), "[", #CUDA_CALL, ";]"); } while(0)
46 | #define NVDR_CHECK_GL_ERROR(GL_CALL) do { GL_CALL; GLenum err = glGetError(); TORCH_CHECK(err == GL_NO_ERROR, "OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]"); } while(0)
47 | #endif
48 |
49 | //------------------------------------------------------------------------
50 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | from .ops import rasterize, interpolate, texture, antialias
10 | from .plugin_loader import set_cache_dir
11 |
12 | __all__ = ["rasterize", "interpolate", "texture", "antialias", "set_cache_dir"]
13 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/tensorflow/tf_all.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | // TF-specific helpers.
10 |
11 | #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal("Cuda error: ", cudaGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0)
12 | #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0)
13 |
14 | // Cuda kernels and CPP all together. What an absolute compilation unit.
15 |
16 | #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
17 | #include "../common/framework.h"
18 | #include "../common/glutil.cpp"
19 |
20 | #include "../common/common.h"
21 | #include "../common/common.cpp"
22 |
23 | #include "../common/rasterize.h"
24 | #include "../common/rasterize_gl.cpp"
25 | #include "../common/rasterize.cu"
26 | #include "tf_rasterize.cu"
27 |
28 | #include "../common/interpolate.cu"
29 | #include "tf_interpolate.cu"
30 |
31 | #include "../common/texture.cpp"
32 | #include "../common/texture.cu"
33 | #include "tf_texture.cu"
34 |
35 | #include "../common/antialias.cu"
36 | #include "tf_antialias.cu"
37 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/torch/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | from .ops import RasterizeCudaContext, RasterizeGLContext, get_log_level, set_log_level, rasterize, DepthPeeler, interpolate, texture, texture_construct_mip, antialias, antialias_construct_topology_hash
10 | __all__ = ["RasterizeCudaContext", "RasterizeGLContext", "get_log_level", "set_log_level", "rasterize", "DepthPeeler", "interpolate", "texture", "texture_construct_mip", "antialias", "antialias_construct_topology_hash"]
11 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/torch/torch_bindings_gl.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "torch_common.inl"
10 | #include "torch_types.h"
11 | #include
12 |
13 | //------------------------------------------------------------------------
14 | // Op prototypes.
15 |
16 | std::tuple rasterize_fwd_gl(RasterizeGLStateWrapper& stateWrapper, torch::Tensor pos, torch::Tensor tri, std::tuple resolution, torch::Tensor ranges, int peeling_idx);
17 |
18 | //------------------------------------------------------------------------
19 |
20 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
21 | // State classes.
22 | pybind11::class_(m, "RasterizeGLStateWrapper").def(pybind11::init())
23 | .def("set_context", &RasterizeGLStateWrapper::setContext)
24 | .def("release_context", &RasterizeGLStateWrapper::releaseContext);
25 |
26 | // Ops.
27 | m.def("rasterize_fwd_gl", &rasterize_fwd_gl, "rasterize forward op (opengl)");
28 | }
29 |
30 | //------------------------------------------------------------------------
31 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/torch/torch_common.inl:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 | #include "../common/framework.h"
11 |
12 | //------------------------------------------------------------------------
13 | // Input check helpers.
14 | //------------------------------------------------------------------------
15 |
16 | #ifdef _MSC_VER
17 | #define __func__ __FUNCTION__
18 | #endif
19 |
20 | #define NVDR_CHECK_DEVICE(...) do { TORCH_CHECK(at::cuda::check_device({__VA_ARGS__}), __func__, "(): Inputs " #__VA_ARGS__ " must reside on the same GPU device") } while(0)
21 | #define NVDR_CHECK_CPU(...) do { nvdr_check_cpu({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must reside on CPU"); } while(0)
22 | #define NVDR_CHECK_CONTIGUOUS(...) do { nvdr_check_contiguous({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be contiguous tensors"); } while(0)
23 | #define NVDR_CHECK_F32(...) do { nvdr_check_f32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be float32 tensors"); } while(0)
24 | #define NVDR_CHECK_I32(...) do { nvdr_check_i32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be int32 tensors"); } while(0)
25 | inline void nvdr_check_cpu(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.device().type() == c10::DeviceType::CPU, func, err_msg); }
26 | inline void nvdr_check_contiguous(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.is_contiguous(), func, err_msg); }
27 | inline void nvdr_check_f32(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.dtype() == torch::kFloat32, func, err_msg); }
28 | inline void nvdr_check_i32(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.dtype() == torch::kInt32, func, err_msg); }
29 | //------------------------------------------------------------------------
30 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/build/lib/nvdiffrast/torch/torch_types.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "torch_common.inl"
10 |
11 | //------------------------------------------------------------------------
12 | // Python GL state wrapper.
13 |
14 | class RasterizeGLState;
15 | class RasterizeGLStateWrapper
16 | {
17 | public:
18 | RasterizeGLStateWrapper (bool enableDB, bool automatic, int cudaDeviceIdx);
19 | ~RasterizeGLStateWrapper (void);
20 |
21 | void setContext (void);
22 | void releaseContext (void);
23 |
24 | RasterizeGLState* pState;
25 | bool automatic;
26 | int cudaDeviceIdx;
27 | };
28 |
29 | //------------------------------------------------------------------------
30 | // Python CudaRaster state wrapper.
31 |
32 | namespace CR { class CudaRaster; }
33 | class RasterizeCRStateWrapper
34 | {
35 | public:
36 | RasterizeCRStateWrapper (int cudaDeviceIdx);
37 | ~RasterizeCRStateWrapper (void);
38 |
39 | CR::CudaRaster* cr;
40 | int cudaDeviceIdx;
41 | };
42 |
43 | //------------------------------------------------------------------------
44 | // Mipmap wrapper to prevent intrusion from Python side.
45 |
46 | class TextureMipWrapper
47 | {
48 | public:
49 | torch::Tensor mip;
50 | int max_mip_level;
51 | std::vector texture_size; // For error checking.
52 | bool cube_mode; // For error checking.
53 | };
54 |
55 |
56 | //------------------------------------------------------------------------
57 | // Antialias topology hash wrapper to prevent intrusion from Python side.
58 |
59 | class TopologyHashWrapper
60 | {
61 | public:
62 | torch::Tensor ev_hash;
63 | };
64 |
65 | //------------------------------------------------------------------------
66 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docker/10_nvidia.json:
--------------------------------------------------------------------------------
1 | {
2 | "file_format_version" : "1.0.0",
3 | "ICD" : {
4 | "library_path" : "libEGL_nvidia.so.0"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | # Note: Should also work with NVIDIA's Docker image builds such as
10 | #
11 | # nvcr.io/nvidia/pytorch:20.09-py3
12 | #
13 | # This file defaults to pytorch/pytorch as it works on slightly older
14 | # driver versions.
15 | ARG BASE_IMAGE=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-devel
16 | FROM $BASE_IMAGE
17 |
18 | RUN apt-get update && apt-get install -y --no-install-recommends \
19 | pkg-config \
20 | libglvnd0 \
21 | libgl1 \
22 | libglx0 \
23 | libegl1 \
24 | libgles2 \
25 | libglvnd-dev \
26 | libgl1-mesa-dev \
27 | libegl1-mesa-dev \
28 | libgles2-mesa-dev \
29 | cmake \
30 | curl
31 |
32 | ENV PYTHONDONTWRITEBYTECODE=1
33 | ENV PYTHONUNBUFFERED=1
34 |
35 | # for GLEW
36 | ENV LD_LIBRARY_PATH /usr/lib64:$LD_LIBRARY_PATH
37 |
38 | # nvidia-container-runtime
39 | ENV NVIDIA_VISIBLE_DEVICES all
40 | ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,graphics
41 |
42 | # Default pyopengl to EGL for good headless rendering support
43 | ENV PYOPENGL_PLATFORM egl
44 |
45 | COPY docker/10_nvidia.json /usr/share/glvnd/egl_vendor.d/10_nvidia.json
46 |
47 | RUN pip install --upgrade pip
48 | RUN pip install ninja imageio imageio-ffmpeg
49 |
50 | COPY nvdiffrast /tmp/pip/nvdiffrast/
51 | COPY README.md setup.py /tmp/pip/
52 | RUN cd /tmp/pip && pip install .
53 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/cube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/cube.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/earth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/earth.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/envphong.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/envphong.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/logo.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_cube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_cube.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_earth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_earth.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_envphong.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pipe_envphong.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/pose.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_aa.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_aa.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_crop1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_crop1.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_crop2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_crop2.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_diff1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_diff1.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_diff2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_diff2.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_peel1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_peel1.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_peel2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_peel2.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_st.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_st.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_tex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_tex.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_texture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_texture.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_texw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_texw.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_tri.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_tri.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/spot_uv.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser1.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser2.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser3.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser4.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/teaser5.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/thumb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/thumb.jpg
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/tri.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/docs/img/tri.png
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | LICENSE.txt
2 | README.md
3 | setup.py
4 | nvdiffrast/__init__.py
5 | nvdiffrast.egg-info/PKG-INFO
6 | nvdiffrast.egg-info/SOURCES.txt
7 | nvdiffrast.egg-info/dependency_links.txt
8 | nvdiffrast.egg-info/requires.txt
9 | nvdiffrast.egg-info/top_level.txt
10 | nvdiffrast/common/antialias.cu
11 | nvdiffrast/common/antialias.h
12 | nvdiffrast/common/common.cpp
13 | nvdiffrast/common/common.h
14 | nvdiffrast/common/framework.h
15 | nvdiffrast/common/glutil.cpp
16 | nvdiffrast/common/glutil.h
17 | nvdiffrast/common/glutil_extlist.h
18 | nvdiffrast/common/interpolate.cu
19 | nvdiffrast/common/interpolate.h
20 | nvdiffrast/common/rasterize.cu
21 | nvdiffrast/common/rasterize.h
22 | nvdiffrast/common/rasterize_gl.cpp
23 | nvdiffrast/common/rasterize_gl.h
24 | nvdiffrast/common/texture.cpp
25 | nvdiffrast/common/texture.cu
26 | nvdiffrast/common/texture.h
27 | nvdiffrast/common/cudaraster/CudaRaster.hpp
28 | nvdiffrast/common/cudaraster/impl/BinRaster.inl
29 | nvdiffrast/common/cudaraster/impl/Buffer.cpp
30 | nvdiffrast/common/cudaraster/impl/Buffer.hpp
31 | nvdiffrast/common/cudaraster/impl/CoarseRaster.inl
32 | nvdiffrast/common/cudaraster/impl/Constants.hpp
33 | nvdiffrast/common/cudaraster/impl/CudaRaster.cpp
34 | nvdiffrast/common/cudaraster/impl/Defs.hpp
35 | nvdiffrast/common/cudaraster/impl/FineRaster.inl
36 | nvdiffrast/common/cudaraster/impl/PrivateDefs.hpp
37 | nvdiffrast/common/cudaraster/impl/RasterImpl.cpp
38 | nvdiffrast/common/cudaraster/impl/RasterImpl.cu
39 | nvdiffrast/common/cudaraster/impl/RasterImpl.hpp
40 | nvdiffrast/common/cudaraster/impl/TriangleSetup.inl
41 | nvdiffrast/common/cudaraster/impl/Util.inl
42 | nvdiffrast/tensorflow/__init__.py
43 | nvdiffrast/tensorflow/ops.py
44 | nvdiffrast/tensorflow/plugin_loader.py
45 | nvdiffrast/tensorflow/tf_all.cu
46 | nvdiffrast/tensorflow/tf_antialias.cu
47 | nvdiffrast/tensorflow/tf_interpolate.cu
48 | nvdiffrast/tensorflow/tf_rasterize.cu
49 | nvdiffrast/tensorflow/tf_texture.cu
50 | nvdiffrast/torch/__init__.py
51 | nvdiffrast/torch/ops.py
52 | nvdiffrast/torch/torch_antialias.cpp
53 | nvdiffrast/torch/torch_bindings.cpp
54 | nvdiffrast/torch/torch_bindings_gl.cpp
55 | nvdiffrast/torch/torch_common.inl
56 | nvdiffrast/torch/torch_interpolate.cpp
57 | nvdiffrast/torch/torch_rasterize.cpp
58 | nvdiffrast/torch/torch_rasterize_gl.cpp
59 | nvdiffrast/torch/torch_texture.cpp
60 | nvdiffrast/torch/torch_types.h
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | numpy
2 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | nvdiffrast
2 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | __version__ = '0.3.0'
10 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/common.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include
10 |
11 | //------------------------------------------------------------------------
12 | // Block and grid size calculators for kernel launches.
13 |
14 | dim3 getLaunchBlockSize(int maxWidth, int maxHeight, int width, int height)
15 | {
16 | int maxThreads = maxWidth * maxHeight;
17 | if (maxThreads <= 1 || (width * height) <= 1)
18 | return dim3(1, 1, 1); // Degenerate.
19 |
20 | // Start from max size.
21 | int bw = maxWidth;
22 | int bh = maxHeight;
23 |
24 | // Optimizations for weirdly sized buffers.
25 | if (width < bw)
26 | {
27 | // Decrease block width to smallest power of two that covers the buffer width.
28 | while ((bw >> 1) >= width)
29 | bw >>= 1;
30 |
31 | // Maximize height.
32 | bh = maxThreads / bw;
33 | if (bh > height)
34 | bh = height;
35 | }
36 | else if (height < bh)
37 | {
38 | // Halve height and double width until fits completely inside buffer vertically.
39 | while (bh > height)
40 | {
41 | bh >>= 1;
42 | if (bw < width)
43 | bw <<= 1;
44 | }
45 | }
46 |
47 | // Done.
48 | return dim3(bw, bh, 1);
49 | }
50 |
51 | dim3 getLaunchGridSize(dim3 blockSize, int width, int height, int depth)
52 | {
53 | dim3 gridSize;
54 | gridSize.x = (width - 1) / blockSize.x + 1;
55 | gridSize.y = (height - 1) / blockSize.y + 1;
56 | gridSize.z = (depth - 1) / blockSize.z + 1;
57 | return gridSize;
58 | }
59 |
60 | //------------------------------------------------------------------------
61 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "../../framework.h"
10 | #include "Buffer.hpp"
11 |
12 | using namespace CR;
13 |
14 | //------------------------------------------------------------------------
15 |
16 | Buffer::Buffer(void)
17 | : m_gpuPtr(NULL),
18 | m_bytes (0)
19 | {
20 | // empty
21 | }
22 |
23 | Buffer::~Buffer(void)
24 | {
25 | if (m_gpuPtr)
26 | cudaFree(m_gpuPtr); // Don't throw an exception.
27 | }
28 |
29 | //------------------------------------------------------------------------
30 |
31 | void Buffer::reset(size_t bytes)
32 | {
33 | if (bytes == m_bytes)
34 | return;
35 |
36 | if (m_gpuPtr)
37 | {
38 | NVDR_CHECK_CUDA_ERROR(cudaFree(m_gpuPtr));
39 | m_gpuPtr = NULL;
40 | }
41 |
42 | if (bytes > 0)
43 | NVDR_CHECK_CUDA_ERROR(cudaMalloc(&m_gpuPtr, bytes));
44 |
45 | m_bytes = bytes;
46 | }
47 |
48 | //------------------------------------------------------------------------
49 |
50 | void Buffer::grow(size_t bytes)
51 | {
52 | if (bytes > m_bytes)
53 | reset(bytes);
54 | }
55 |
56 | //------------------------------------------------------------------------
57 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.hpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 | #include "Defs.hpp"
11 |
12 | namespace CR
13 | {
14 | //------------------------------------------------------------------------
15 |
16 | class Buffer
17 | {
18 | public:
19 | Buffer (void);
20 | ~Buffer (void);
21 |
22 | void reset (size_t bytes);
23 | void grow (size_t bytes);
24 | void* getPtr (void) { return m_gpuPtr; }
25 | size_t getSize (void) const { return m_bytes; }
26 |
27 | void setPtr (void* ptr) { m_gpuPtr = ptr; }
28 |
29 | private:
30 | void* m_gpuPtr;
31 | size_t m_bytes;
32 | };
33 |
34 | //------------------------------------------------------------------------
35 | }
36 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/cudaraster/impl/CudaRaster.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "Defs.hpp"
10 | #include "../CudaRaster.hpp"
11 | #include "RasterImpl.hpp"
12 |
13 | using namespace CR;
14 |
15 | //------------------------------------------------------------------------
16 | // Stub interface implementation.
17 | //------------------------------------------------------------------------
18 |
19 | CudaRaster::CudaRaster()
20 | {
21 | m_impl = new RasterImpl();
22 | }
23 |
24 | CudaRaster::~CudaRaster()
25 | {
26 | delete m_impl;
27 | }
28 |
29 | void CudaRaster::setViewportSize(int width, int height, int numImages)
30 | {
31 | m_impl->setViewportSize(Vec3i(width, height, numImages));
32 | }
33 |
34 | void CudaRaster::setRenderModeFlags(U32 flags)
35 | {
36 | m_impl->setRenderModeFlags(flags);
37 | }
38 |
39 | void CudaRaster::deferredClear(U32 clearColor)
40 | {
41 | m_impl->deferredClear(clearColor);
42 | }
43 |
44 | void CudaRaster::setVertexBuffer(void* vertices, int numVertices)
45 | {
46 | m_impl->setVertexBuffer(vertices, numVertices);
47 | }
48 |
49 | void CudaRaster::setIndexBuffer(void* indices, int numTriangles)
50 | {
51 | m_impl->setIndexBuffer(indices, numTriangles);
52 | }
53 |
54 | bool CudaRaster::drawTriangles(const int* ranges, cudaStream_t stream)
55 | {
56 | return m_impl->drawTriangles((const Vec2i*)ranges, stream);
57 | }
58 |
59 | void* CudaRaster::getColorBuffer(void)
60 | {
61 | return m_impl->getColorBuffer();
62 | }
63 |
64 | void* CudaRaster::getDepthBuffer(void)
65 | {
66 | return m_impl->getDepthBuffer();
67 | }
68 |
69 | void CudaRaster::swapDepthAndPeel(void)
70 | {
71 | m_impl->swapDepthAndPeel();
72 | }
73 |
74 | //------------------------------------------------------------------------
75 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "../CudaRaster.hpp"
10 | #include "PrivateDefs.hpp"
11 | #include "Constants.hpp"
12 | #include "Util.inl"
13 |
14 | namespace CR
15 | {
16 |
17 | //------------------------------------------------------------------------
18 | // Stage implementations.
19 | //------------------------------------------------------------------------
20 |
21 | #include "TriangleSetup.inl"
22 | #include "BinRaster.inl"
23 | #include "CoarseRaster.inl"
24 | #include "FineRaster.inl"
25 |
26 | }
27 |
28 | //------------------------------------------------------------------------
29 | // Stage entry points.
30 | //------------------------------------------------------------------------
31 |
32 | __global__ void __launch_bounds__(CR_SETUP_WARPS * 32, CR_SETUP_OPT_BLOCKS) triangleSetupKernel (const CR::CRParams p) { CR::triangleSetupImpl(p); }
33 | __global__ void __launch_bounds__(CR_BIN_WARPS * 32, 1) binRasterKernel (const CR::CRParams p) { CR::binRasterImpl(p); }
34 | __global__ void __launch_bounds__(CR_COARSE_WARPS * 32, 1) coarseRasterKernel (const CR::CRParams p) { CR::coarseRasterImpl(p); }
35 | __global__ void __launch_bounds__(CR_FINE_MAX_WARPS * 32, 1) fineRasterKernel (const CR::CRParams p) { CR::fineRasterImpl(p); }
36 |
37 | //------------------------------------------------------------------------
38 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/common/framework.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 |
11 | // Framework-specific macros to enable code sharing.
12 |
13 | //------------------------------------------------------------------------
14 | // Tensorflow.
15 |
16 | #ifdef NVDR_TENSORFLOW
17 | #define EIGEN_USE_GPU
18 | #include "tensorflow/core/framework/op.h"
19 | #include "tensorflow/core/framework/op_kernel.h"
20 | #include "tensorflow/core/framework/shape_inference.h"
21 | #include "tensorflow/core/platform/default/logging.h"
22 | using namespace tensorflow;
23 | using namespace tensorflow::shape_inference;
24 | #define NVDR_CTX_ARGS OpKernelContext* _nvdr_ctx
25 | #define NVDR_CTX_PARAMS _nvdr_ctx
26 | #define NVDR_CHECK(COND, ERR) OP_REQUIRES(_nvdr_ctx, COND, errors::Internal(ERR))
27 | #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) OP_CHECK_CUDA_ERROR(_nvdr_ctx, CUDA_CALL)
28 | #define NVDR_CHECK_GL_ERROR(GL_CALL) OP_CHECK_GL_ERROR(_nvdr_ctx, GL_CALL)
29 | #endif
30 |
31 | //------------------------------------------------------------------------
32 | // PyTorch.
33 |
34 | #ifdef NVDR_TORCH
35 | #ifndef __CUDACC__
36 | #include
37 | #include
38 | #include
39 | #include
40 | #include
41 | #endif
42 | #define NVDR_CTX_ARGS int _nvdr_ctx_dummy
43 | #define NVDR_CTX_PARAMS 0
44 | #define NVDR_CHECK(COND, ERR) do { TORCH_CHECK(COND, ERR) } while(0)
45 | #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) do { cudaError_t err = CUDA_CALL; TORCH_CHECK(!err, "Cuda error: ", cudaGetLastError(), "[", #CUDA_CALL, ";]"); } while(0)
46 | #define NVDR_CHECK_GL_ERROR(GL_CALL) do { GL_CALL; GLenum err = glGetError(); TORCH_CHECK(err == GL_NO_ERROR, "OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]"); } while(0)
47 | #endif
48 |
49 | //------------------------------------------------------------------------
50 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/lib/setgpu.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/lib/setgpu.lib
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | from .ops import rasterize, interpolate, texture, antialias
10 | from .plugin_loader import set_cache_dir
11 |
12 | __all__ = ["rasterize", "interpolate", "texture", "antialias", "set_cache_dir"]
13 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/tensorflow/tf_all.cu:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | // TF-specific helpers.
10 |
11 | #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal("Cuda error: ", cudaGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0)
12 | #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0)
13 |
14 | // Cuda kernels and CPP all together. What an absolute compilation unit.
15 |
16 | #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
17 | #include "../common/framework.h"
18 | #include "../common/glutil.cpp"
19 |
20 | #include "../common/common.h"
21 | #include "../common/common.cpp"
22 |
23 | #include "../common/rasterize.h"
24 | #include "../common/rasterize_gl.cpp"
25 | #include "../common/rasterize.cu"
26 | #include "tf_rasterize.cu"
27 |
28 | #include "../common/interpolate.cu"
29 | #include "tf_interpolate.cu"
30 |
31 | #include "../common/texture.cpp"
32 | #include "../common/texture.cu"
33 | #include "tf_texture.cu"
34 |
35 | #include "../common/antialias.cu"
36 | #include "tf_antialias.cu"
37 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/torch/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | from .ops import RasterizeCudaContext, RasterizeGLContext, get_log_level, set_log_level, rasterize, DepthPeeler, interpolate, texture, texture_construct_mip, antialias, antialias_construct_topology_hash
10 | __all__ = ["RasterizeCudaContext", "RasterizeGLContext", "get_log_level", "set_log_level", "rasterize", "DepthPeeler", "interpolate", "texture", "texture_construct_mip", "antialias", "antialias_construct_topology_hash"]
11 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/torch/torch_bindings_gl.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "torch_common.inl"
10 | #include "torch_types.h"
11 | #include
12 |
13 | //------------------------------------------------------------------------
14 | // Op prototypes.
15 |
16 | std::tuple rasterize_fwd_gl(RasterizeGLStateWrapper& stateWrapper, torch::Tensor pos, torch::Tensor tri, std::tuple resolution, torch::Tensor ranges, int peeling_idx);
17 |
18 | //------------------------------------------------------------------------
19 |
20 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
21 | // State classes.
22 | pybind11::class_(m, "RasterizeGLStateWrapper").def(pybind11::init())
23 | .def("set_context", &RasterizeGLStateWrapper::setContext)
24 | .def("release_context", &RasterizeGLStateWrapper::releaseContext);
25 |
26 | // Ops.
27 | m.def("rasterize_fwd_gl", &rasterize_fwd_gl, "rasterize forward op (opengl)");
28 | }
29 |
30 | //------------------------------------------------------------------------
31 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/torch/torch_common.inl:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #pragma once
10 | #include "../common/framework.h"
11 |
12 | //------------------------------------------------------------------------
13 | // Input check helpers.
14 | //------------------------------------------------------------------------
15 |
16 | #ifdef _MSC_VER
17 | #define __func__ __FUNCTION__
18 | #endif
19 |
20 | #define NVDR_CHECK_DEVICE(...) do { TORCH_CHECK(at::cuda::check_device({__VA_ARGS__}), __func__, "(): Inputs " #__VA_ARGS__ " must reside on the same GPU device") } while(0)
21 | #define NVDR_CHECK_CPU(...) do { nvdr_check_cpu({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must reside on CPU"); } while(0)
22 | #define NVDR_CHECK_CONTIGUOUS(...) do { nvdr_check_contiguous({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be contiguous tensors"); } while(0)
23 | #define NVDR_CHECK_F32(...) do { nvdr_check_f32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be float32 tensors"); } while(0)
24 | #define NVDR_CHECK_I32(...) do { nvdr_check_i32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be int32 tensors"); } while(0)
25 | inline void nvdr_check_cpu(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.device().type() == c10::DeviceType::CPU, func, err_msg); }
26 | inline void nvdr_check_contiguous(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.is_contiguous(), func, err_msg); }
27 | inline void nvdr_check_f32(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.dtype() == torch::kFloat32, func, err_msg); }
28 | inline void nvdr_check_i32(at::ArrayRef ts, const char* func, const char* err_msg) { for (const at::Tensor& t : ts) TORCH_CHECK(t.dtype() == torch::kInt32, func, err_msg); }
29 | //------------------------------------------------------------------------
30 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/nvdiffrast/torch/torch_types.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | //
3 | // NVIDIA CORPORATION and its licensors retain all intellectual property
4 | // and proprietary rights in and to this software, related documentation
5 | // and any modifications thereto. Any use, reproduction, disclosure or
6 | // distribution of this software and related documentation without an express
7 | // license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | #include "torch_common.inl"
10 |
11 | //------------------------------------------------------------------------
12 | // Python GL state wrapper.
13 |
14 | class RasterizeGLState;
15 | class RasterizeGLStateWrapper
16 | {
17 | public:
18 | RasterizeGLStateWrapper (bool enableDB, bool automatic, int cudaDeviceIdx);
19 | ~RasterizeGLStateWrapper (void);
20 |
21 | void setContext (void);
22 | void releaseContext (void);
23 |
24 | RasterizeGLState* pState;
25 | bool automatic;
26 | int cudaDeviceIdx;
27 | };
28 |
29 | //------------------------------------------------------------------------
30 | // Python CudaRaster state wrapper.
31 |
32 | namespace CR { class CudaRaster; }
33 | class RasterizeCRStateWrapper
34 | {
35 | public:
36 | RasterizeCRStateWrapper (int cudaDeviceIdx);
37 | ~RasterizeCRStateWrapper (void);
38 |
39 | CR::CudaRaster* cr;
40 | int cudaDeviceIdx;
41 | };
42 |
43 | //------------------------------------------------------------------------
44 | // Mipmap wrapper to prevent intrusion from Python side.
45 |
46 | class TextureMipWrapper
47 | {
48 | public:
49 | torch::Tensor mip;
50 | int max_mip_level;
51 | std::vector texture_size; // For error checking.
52 | bool cube_mode; // For error checking.
53 | };
54 |
55 |
56 | //------------------------------------------------------------------------
57 | // Antialias topology hash wrapper to prevent intrusion from Python side.
58 |
59 | class TopologyHashWrapper
60 | {
61 | public:
62 | torch::Tensor ev_hash;
63 | };
64 |
65 | //------------------------------------------------------------------------
66 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/run_sample.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4 | #
5 | # NVIDIA CORPORATION and its licensors retain all intellectual property
6 | # and proprietary rights in and to this software, related documentation
7 | # and any modifications thereto. Any use, reproduction, disclosure or
8 | # distribution of this software and related documentation without an express
9 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
10 |
11 | function print_help {
12 | echo "Usage: `basename $0` [--build-container] "
13 | echo ""
14 | echo "Option --build-container will build the Docker container based on"
15 | echo "docker/Dockerfile and tag the image with gltorch:latest."
16 | echo ""
17 | echo "Example: `basename $0` samples/torch/envphong.py"
18 | }
19 |
20 | build_container=0
21 | sample=""
22 | while [[ "$#" -gt 0 ]]; do
23 | case $1 in
24 | --build-container) build_container=1;;
25 | -h|--help) print_help; exit 0 ;;
26 | --*) echo "Unknown parameter passed: $1"; exit 1 ;;
27 | *) sample="$1"; shift; break;
28 | esac
29 | shift
30 | done
31 |
32 | rest=$@
33 |
34 | # Build the docker container
35 | if [ "$build_container" = "1" ]; then
36 | docker build --tag gltorch:latest -f docker/Dockerfile .
37 | docker build --tag gltensorflow:latest --build-arg BASE_IMAGE=tensorflow/tensorflow:1.15.0-gpu-py3 -f docker/Dockerfile .
38 | fi
39 |
40 | if [ ! -f "$sample" ]; then
41 | echo
42 | echo "No python sample given or file '$sample' not found. Exiting."
43 | exit 1
44 | fi
45 |
46 | image="gltorch:latest"
47 | TENSORFLOW_CUDA_CACHE=""
48 | # Magically choose the tensorflow container if running a sample from the samples/tensorflow/ directory
49 | if [[ $sample == *"/tensorflow/"* ]]; then
50 | image="gltensorflow:latest"
51 | TENSORFLOW_CUDA_CACHE="-e NVDIFFRAST_CACHE_DIR=/app/tmp"
52 | fi
53 |
54 | echo "Using container image: $image"
55 | echo "Running command: $sample $rest"
56 |
57 | # Run a sample with docker
58 | docker run --rm -it --gpus all --user $(id -u):$(id -g) \
59 | -v `pwd`:/app --workdir /app -e TORCH_EXTENSIONS_DIR=/app/tmp $TENSORFLOW_CUDA_CACHE $image python3 $sample $rest
60 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_c.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_c.npz
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_d.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_d.npz
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_p.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/cube_p.npz
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/earth.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/earth.npz
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/envphong.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/data/envphong.npz
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/tensorflow/triangle.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import imageio
10 | import logging
11 | import os
12 | import numpy as np
13 | import tensorflow as tf
14 | import nvdiffrast.tensorflow as dr
15 |
16 | # Silence deprecation warnings and debug level logging
17 | logging.getLogger('tensorflow').setLevel(logging.ERROR)
18 | os.environ.setdefault('TF_CPP_MIN_LOG_LEVEL', '1')
19 |
20 | pos = tf.convert_to_tensor([[[-0.8, -0.8, 0, 1], [0.8, -0.8, 0, 1], [-0.8, 0.8, 0, 1]]], dtype=tf.float32)
21 | col = tf.convert_to_tensor([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]], dtype=tf.float32)
22 | tri = tf.convert_to_tensor([[0, 1, 2]], dtype=tf.int32)
23 |
24 | rast, _ = dr.rasterize(pos, tri, resolution=[256, 256])
25 | out, _ = dr.interpolate(col, rast, tri)
26 |
27 | with tf.Session() as sess:
28 | img = sess.run(out)
29 |
30 | img = img[0, ::-1, :, :] # Flip vertically.
31 | img = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8) # Quantize to np.uint8
32 |
33 | print("Saving to 'tri.png'.")
34 | imageio.imsave('tri.png', img)
35 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/samples/torch/triangle.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import imageio
10 | import numpy as np
11 | import torch
12 | import nvdiffrast.torch as dr
13 | import sys
14 |
15 | def tensor(*args, **kwargs):
16 | return torch.tensor(*args, device='cuda', **kwargs)
17 |
18 | if sys.argv[1:] == ['--cuda']:
19 | glctx = dr.RasterizeCudaContext()
20 | elif sys.argv[1:] == ['--opengl']:
21 | glctx = dr.RasterizeGLContext()
22 | else:
23 | print("Specify either --cuda or --opengl")
24 | exit(1)
25 |
26 | pos = tensor([[[-0.8, -0.8, 0, 1], [0.8, -0.8, 0, 1], [-0.8, 0.8, 0, 1]]], dtype=torch.float32)
27 | col = tensor([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]], dtype=torch.float32)
28 | tri = tensor([[0, 1, 2]], dtype=torch.int32)
29 |
30 | rast, _ = dr.rasterize(glctx, pos, tri, resolution=[256, 256])
31 | out, _ = dr.interpolate(col, rast, tri)
32 |
33 | img = out.cpu().numpy()[0, ::-1, :, :] # Flip vertically.
34 | img = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8) # Quantize to np.uint8
35 |
36 | print("Saving to 'tri.png'.")
37 | imageio.imsave('tri.png', img)
38 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/nvdiffrast/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import nvdiffrast
10 | import setuptools
11 | import os
12 |
13 | with open("README.md", "r") as fh:
14 | long_description = fh.read()
15 |
16 | setuptools.setup(
17 | name="nvdiffrast",
18 | version=nvdiffrast.__version__,
19 | author="Samuli Laine",
20 | author_email="slaine@nvidia.com",
21 | description="nvdiffrast - modular primitives for high-performance differentiable rendering",
22 | long_description=long_description,
23 | long_description_content_type="text/markdown",
24 | url="https://github.com/NVlabs/nvdiffrast",
25 | packages=setuptools.find_packages(),
26 | package_data={
27 | 'nvdiffrast': [
28 | 'common/*.h',
29 | 'common/*.inl',
30 | 'common/*.cu',
31 | 'common/*.cpp',
32 | 'common/cudaraster/*.hpp',
33 | 'common/cudaraster/impl/*.cpp',
34 | 'common/cudaraster/impl/*.hpp',
35 | 'common/cudaraster/impl/*.inl',
36 | 'common/cudaraster/impl/*.cu',
37 | 'lib/*.h',
38 | 'torch/*.h',
39 | 'torch/*.inl',
40 | 'torch/*.cpp',
41 | 'tensorflow/*.cu',
42 | ] + (['lib/*.lib'] if os.name == 'nt' else [])
43 | },
44 | include_package_data=True,
45 | install_requires=['numpy'], # note: can't require torch here as it will install torch even for a TensorFlow container
46 | classifiers=[
47 | "Programming Language :: Python :: 3",
48 | "Operating System :: OS Independent",
49 | ],
50 | python_requires='>=3.6',
51 | )
52 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/options/__init__.py:
--------------------------------------------------------------------------------
1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
2 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/options/test_options.py:
--------------------------------------------------------------------------------
1 | """This script contains the test options for Deep3DFaceRecon_pytorch
2 | """
3 |
4 | from .base_options import BaseOptions
5 |
6 |
7 | class TestOptions(BaseOptions):
8 | """This class includes test options.
9 |
10 | It also includes shared options defined in BaseOptions.
11 | """
12 |
13 | def initialize(self, parser):
14 | parser = BaseOptions.initialize(self, parser) # define shared options
15 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
16 | parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]')
17 | parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.')
18 | parser.add_argument('--lm_folder', type=str, default='examples', help='folder for test images.')
19 | parser.add_argument('--save_folder', type=str, default='examples', help='folder for test images.')
20 | parser.add_argument('--valid_video_json', type=str, default=None)
21 | # Dropout and Batchnorm has different behavior during training and test.
22 | self.isTrain = False
23 | return parser
24 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/util/BBRegressorParam_r.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Deep3DFaceRecon_pytorch/util/BBRegressorParam_r.mat
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/util/__init__.py:
--------------------------------------------------------------------------------
1 | """This package includes a miscellaneous collection of useful helper functions."""
2 | from util import *
3 |
--------------------------------------------------------------------------------
/data_preprocess/Deep3DFaceRecon_pytorch/util/generate_list.py:
--------------------------------------------------------------------------------
1 | """This script is to generate training list files for Deep3DFaceRecon_pytorch
2 | """
3 |
4 | import os
5 |
6 | # save path to training data
7 | def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''):
8 | save_path = os.path.join(save_folder, mode)
9 | if not os.path.isdir(save_path):
10 | os.makedirs(save_path)
11 | with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd:
12 | fd.writelines([i + '\n' for i in lms_list])
13 |
14 | with open(os.path.join(save_path, save_name + 'images.txt'), 'w') as fd:
15 | fd.writelines([i + '\n' for i in imgs_list])
16 |
17 | with open(os.path.join(save_path, save_name + 'masks.txt'), 'w') as fd:
18 | fd.writelines([i + '\n' for i in msks_list])
19 |
20 | # check if the path is valid
21 | def check_list(rlms_list, rimgs_list, rmsks_list):
22 | lms_list, imgs_list, msks_list = [], [], []
23 | for i in range(len(rlms_list)):
24 | flag = 'false'
25 | lm_path = rlms_list[i]
26 | im_path = rimgs_list[i]
27 | msk_path = rmsks_list[i]
28 | if os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path):
29 | flag = 'true'
30 | lms_list.append(rlms_list[i])
31 | imgs_list.append(rimgs_list[i])
32 | msks_list.append(rmsks_list[i])
33 | print(i, rlms_list[i], flag)
34 | return lms_list, imgs_list, msks_list
35 |
--------------------------------------------------------------------------------
/data_preprocess/FaceVerse/__init__.py:
--------------------------------------------------------------------------------
1 | from .FaceVerseModel_v3 import FaceVerseModel as FaceVerseModel_v3
2 | import numpy as np
3 |
4 |
5 | def get_recon_model(model_path=None, return_dict=False, **kargs):
6 | model_dict = np.load(model_path, allow_pickle=True).item()
7 | recon_model = FaceVerseModel_v3(model_dict, expr_52=False, **kargs)
8 | if return_dict:
9 | return recon_model, model_dict
10 | else:
11 | return recon_model
12 |
--------------------------------------------------------------------------------
/data_preprocess/FaceVerse/v3/dense_uv_expanded_mask_onlyFace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/FaceVerse/v3/dense_uv_expanded_mask_onlyFace.png
--------------------------------------------------------------------------------
/data_preprocess/FaceVerse/v3/fv2fl_30.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/FaceVerse/v3/fv2fl_30.npy
--------------------------------------------------------------------------------
/data_preprocess/FaceVerse/v3/v31_face_mask_new.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/FaceVerse/v3/v31_face_mask_new.npy
--------------------------------------------------------------------------------
/data_preprocess/Hillary.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Hillary.mp4
--------------------------------------------------------------------------------
/data_preprocess/Obama.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/data_preprocess/Obama.mp4
--------------------------------------------------------------------------------
/dnnlib/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | from .util import EasyDict, make_cache_dir_path
12 |
--------------------------------------------------------------------------------
/encoder_inversion/config/train_e4e_real.yaml:
--------------------------------------------------------------------------------
1 | I_kwargs:
2 | class_name: encoder_inversion.models.e4e.e4e
3 | path_kwargs:
4 | # path_generator: training-runs/v20_pgtraining_wolmsCond/00005-ffhq-images512x512-gpus6-batch24-gamma8/network-snapshot-001362.pkl
5 | path_generator: pretrained_model/ani3dgan512.pkl
6 | path_irse50: pretrained_models/model_ir_se50.pth
7 |
8 | #D_kwargs:
9 | # # class_name: inversion_w.models.multiview_discriminator.MultiviewDiscriminator
10 | # class_name: training.dual_discriminator.DualDiscriminator
11 | # block_kwargs:
12 | # freeze_layers: 0
13 | # epilogue_kwargs:
14 | # mbstd_group_size: 4
15 | # channel_base: 32768
16 | # channel_max: 512
17 | # num_fp16_res: 4
18 | # conv_clamp: 256
19 |
20 |
21 | loss_kwargs:
22 | class_name: encoder_inversion.w_loss.WLoss
23 | r1_gamma: 1
24 | loss_weight:
25 | l1: 1.0
26 | lpips: 0.5
27 | id: 0.25
28 | w_l1: 0.
29 | w_regular: 0.
30 | w_delta: 0.001
31 | w_discriminator: 1.0
32 | multiview_id: 0.25
33 | multiview_cx: 0.0
34 | adv: 0.
35 | raw_l1: 1.0
36 | tri: 0.001
37 | lr_lpips: 1.0
38 | loss_path:
39 | path_ir_se50: pretrained_models/model_ir_se50.pth
40 |
41 |
42 | G_opt_kwargs:
43 | class_name: torch.optim.Adam
44 | module: encoder
45 | betas: [0.5, 0.999]
46 | eps: 1.0e-08
47 | lr: 0.00004
48 |
49 | D_opt_kwargs:
50 | class_name: torch.optim.Adam
51 | betas: [0, 0.99]
52 | eps: 1.0e-08
53 | lr: 0.0001
54 |
55 |
56 |
--------------------------------------------------------------------------------
/encoder_inversion/config/train_textureUnet_real.yaml:
--------------------------------------------------------------------------------
1 | I_kwargs:
2 | class_name: encoder_inversion.models.uvnet.inversionNet
3 | encoding_texture: True
4 | encoding_triplane: True
5 | use_gru: False
6 | use_msfeat: True
7 | path_kwargs:
8 | path_generator: pretrained_model/ani3dgan512.pkl
9 | path_irse50: pretrained_models/model_ir_se50.pth
10 | path_triplanenet: pretrained_models/triplanenet_final.pth
11 |
12 | #D_kwargs:
13 | # # class_name: inversion_w.models.multiview_discriminator.MultiviewDiscriminator
14 | # class_name: training.dual_discriminator.DualDiscriminator
15 | # block_kwargs:
16 | # freeze_layers: 0
17 | # epilogue_kwargs:
18 | # mbstd_group_size: 4
19 | # channel_base: 32768
20 | # channel_max: 512
21 | # num_fp16_res: 4
22 | # conv_clamp: 256
23 |
24 |
25 | loss_kwargs:
26 | class_name: encoder_inversion.unet_loss.WLoss
27 | r1_gamma: 1
28 | loss_weight:
29 | l1: 1.0
30 | lpips: 1.0
31 | raw_l1: 1.0
32 | tri: 0.001
33 | texture: 0.
34 | lr_lpips: 1.0
35 | id: 0.
36 | multiview_id: 0.25
37 | adv: 0.1
38 | loss_path:
39 | path_ir_se50: pretrained_models/model_ir_se50.pth
40 |
41 |
42 | G_opt_kwargs:
43 | class_name: torch.optim.Adam
44 | module: unet_encoder
45 | betas: [0.5, 0.999]
46 | eps: 1.0e-08
47 | lr: 0.000025
48 |
49 | D_opt_kwargs:
50 | class_name: torch.optim.Adam
51 | betas: [0, 0.99]
52 | eps: 1.0e-08
53 | lr: 0.001
54 |
55 |
56 |
--------------------------------------------------------------------------------
/encoder_inversion/config/train_textureUnet_video.yaml:
--------------------------------------------------------------------------------
1 | I_kwargs:
2 | class_name: encoder_inversion.models.uvnet.inversionNet
3 | encoding_texture: True
4 | encoding_triplane: True
5 | use_gru: True
6 | use_msfeat: True
7 | path_kwargs:
8 | path_generator: pretrained_model/ani3dgan512.pkl
9 | path_irse50: pretrained_models/model_ir_se50.pth
10 | path_triplanenet: pretrained_models/triplanenet_final.pth
11 |
12 | #D_kwargs:
13 | # # class_name: inversion_w.models.multiview_discriminator.MultiviewDiscriminator
14 | # class_name: training.dual_discriminator.DualDiscriminator
15 | # block_kwargs:
16 | # freeze_layers: 0
17 | # epilogue_kwargs:
18 | # mbstd_group_size: 4
19 | # channel_base: 32768
20 | # channel_max: 512
21 | # num_fp16_res: 4
22 | # conv_clamp: 256
23 |
24 | loss_kwargs:
25 | class_name: encoder_inversion.video_loss.WLoss
26 | r1_gamma: 1
27 | loss_weight:
28 | l1: 1.0
29 | lpips: 1.0
30 | mouth: 0.
31 | raw_l1: 1.0
32 | tri: 0.001
33 | lr_lpips: 1.0
34 | id: 0.
35 | adv: 0.1
36 | loss_path:
37 | path_ir_se50: pretrained_models/model_ir_se50.pth
38 | multiT_training: False
39 |
40 |
41 | G_opt_kwargs:
42 | class_name: torch.optim.Adam
43 | module: unet_encoder.triplane_unet #.texture_unet #unet_encoder
44 | betas: [0.5, 0.999]
45 | eps: 1.0e-08
46 | lr: 0.000025
47 |
48 | D_opt_kwargs:
49 | class_name: torch.optim.Adam
50 | betas: [0, 0.99]
51 | eps: 1.0e-08
52 | lr: 0.001
53 |
54 | training_set_kwargs:
55 | frm_per_vid: 4
56 |
57 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/criteria/__init__.py
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/__init__.py:
--------------------------------------------------------------------------------
1 | from .modules import *
2 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/config.py:
--------------------------------------------------------------------------------
1 | # TODO: add supports for L1, L2 etc.
2 | LOSS_TYPES = ['cosine', 'l1', 'l2']
3 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .contextual import ContextualLoss
2 | from .contextual_bilateral import ContextualBilateralLoss
3 |
4 | __all__ = ['ContextualLoss', 'ContextualBilateralLoss']
5 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/modules/contextual.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from .vgg import VGG19
5 | from .. import functional as F
6 | from ..config import LOSS_TYPES
7 |
8 |
9 | class ContextualLoss(nn.Module):
10 | """
11 | Creates a criterion that measures the contextual loss.
12 |
13 | Parameters
14 | ---
15 | band_width : int, optional
16 | a band_width parameter described as :math:`h` in the paper.
17 | use_vgg : bool, optional
18 | if you want to use VGG feature, set this `True`.
19 | vgg_layer : str, optional
20 | intermidiate layer name for VGG feature.
21 | Now we support layer names:
22 | `['relu1_2', 'relu2_2', 'relu3_4', 'relu4_4', 'relu5_4']`
23 | """
24 |
25 | def __init__(self,
26 | band_width: float = 0.5,
27 | loss_type: str = 'cosine',
28 | use_vgg: bool = False,
29 | vgg_layer: str = 'relu3_4'):
30 |
31 | super(ContextualLoss, self).__init__()
32 |
33 | assert band_width > 0, 'band_width parameter must be positive.'
34 | assert loss_type in LOSS_TYPES,\
35 | f'select a loss type from {LOSS_TYPES}.'
36 |
37 | self.band_width = band_width
38 |
39 | if use_vgg:
40 | self.vgg_model = VGG19()
41 | self.vgg_layer = vgg_layer
42 | self.register_buffer(
43 | name='vgg_mean',
44 | tensor=torch.tensor(
45 | [[[0.485]], [[0.456]], [[0.406]]], requires_grad=False)
46 | )
47 | self.register_buffer(
48 | name='vgg_std',
49 | tensor=torch.tensor(
50 | [[[0.229]], [[0.224]], [[0.225]]], requires_grad=False)
51 | )
52 |
53 | def forward(self, x, y):
54 | if hasattr(self, 'vgg_model'):
55 | assert x.shape[1] == 3 and y.shape[1] == 3,\
56 | 'VGG model takes 3 chennel images.'
57 |
58 | # normalization
59 | x = x.sub(self.vgg_mean.detach()).div(self.vgg_std.detach())
60 | y = y.sub(self.vgg_mean.detach()).div(self.vgg_std.detach())
61 |
62 | # picking up vgg feature maps
63 | x = getattr(self.vgg_model(x), self.vgg_layer)
64 | y = getattr(self.vgg_model(y), self.vgg_layer)
65 | print(x.shape, y.shape)
66 | return F.contextual_loss(x, y, self.band_width)
67 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/modules/vgg.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | import torch.nn as nn
4 | import torchvision.models.vgg as vgg
5 |
6 |
7 | class VGG19(nn.Module):
8 | def __init__(self, requires_grad=False):
9 | super(VGG19, self).__init__()
10 | vgg_pretrained_features = vgg.vgg19(pretrained=True).features
11 | self.slice1 = nn.Sequential()
12 | self.slice2 = nn.Sequential()
13 | self.slice3 = nn.Sequential()
14 | self.slice4 = nn.Sequential()
15 | self.slice5 = nn.Sequential()
16 | for x in range(4):
17 | self.slice1.add_module(str(x), vgg_pretrained_features[x])
18 | for x in range(4, 9):
19 | self.slice2.add_module(str(x), vgg_pretrained_features[x])
20 | for x in range(9, 18):
21 | self.slice3.add_module(str(x), vgg_pretrained_features[x])
22 | for x in range(18, 27):
23 | self.slice4.add_module(str(x), vgg_pretrained_features[x])
24 | for x in range(27, 36):
25 | self.slice5.add_module(str(x), vgg_pretrained_features[x])
26 | if not requires_grad:
27 | for param in self.parameters():
28 | param.requires_grad = False
29 |
30 | def forward(self, X):
31 | h = self.slice1(X)
32 | h_relu1_2 = h
33 | h = self.slice2(h)
34 | h_relu2_2 = h
35 | h = self.slice3(h)
36 | h_relu3_4 = h
37 | h = self.slice4(h)
38 | h_relu4_4 = h
39 | h = self.slice5(h)
40 | h_relu5_4 = h
41 |
42 | vgg_outputs = namedtuple(
43 | "VggOutputs", ['relu1_2', 'relu2_2',
44 | 'relu3_4', 'relu4_4', 'relu5_4'])
45 | out = vgg_outputs(h_relu1_2, h_relu2_2,
46 | h_relu3_4, h_relu4_4, h_relu5_4)
47 |
48 | return out
49 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/contextual_loss/test_cx.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import os
3 | import sys
4 | sys.path.append(os.path.abspath('.'))
5 | from criterion.contextual_loss.modules import ContextualBilateralLoss, ContextualLoss
6 | from criteria.cx_loss import CXLoss
7 | import time
8 |
9 | contextual_loss = ContextualBilateralLoss(use_vgg=True).to('cuda').eval()
10 | cx_loss = ContextualLoss(use_vgg=True).to('cuda').eval()
11 |
12 |
13 | diy_loss = CXLoss().to('cuda').eval()
14 |
15 | a = torch.randn(2, 3, 224, 224).cuda()
16 | b = torch.randn(2, 3, 224, 224).cuda()
17 |
18 | t1 = time.time()
19 | c = diy_loss(a, b)
20 | t2 = time.time()
21 | print(t2 - t1)
22 |
23 | a = torch.randn(2, 3, 224, 224).cuda()
24 | b = torch.randn(2, 3, 224, 224).cuda()
25 |
26 | t1 = time.time()
27 | c = diy_loss(a, b)
28 | t2 = time.time()
29 | print(t2 - t1)
30 |
31 | a = torch.randn(2, 3, 224, 224).cuda()
32 | b = torch.randn(2, 3, 224, 224).cuda()
33 |
34 | t1 = time.time()
35 | c = contextual_loss(a, b)
36 | t2 = time.time()
37 | print(t2 - t1)
38 |
39 | a = torch.randn(2, 3, 224, 224).cuda()
40 | b = torch.randn(2, 3, 224, 224).cuda()
41 |
42 | t1 = time.time()
43 | c = contextual_loss(a, b)
44 | t2 = time.time()
45 | print(t2 - t1)
46 |
47 | a = torch.randn(2, 3, 224, 224).cuda()
48 | b = torch.randn(2, 3, 224, 224).cuda()
49 |
50 | t1 = time.time()
51 | c = cx_loss(a, b)
52 | t2 = time.time()
53 | print(t2 - t1)
54 |
55 | a = torch.randn(2, 3, 224, 224).cuda()
56 | b = torch.randn(2, 3, 224, 224).cuda()
57 |
58 | t1 = time.time()
59 | c = cx_loss(a, b)
60 | t2 = time.time()
61 | print(t2 - t1)
62 |
63 |
64 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/lpips/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/criteria/lpips/__init__.py
--------------------------------------------------------------------------------
/encoder_inversion/criteria/lpips/lpips.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from criteria.lpips.networks import get_network, LinLayers
5 | from criteria.lpips.utils import get_state_dict
6 |
7 |
8 | class LPIPS(nn.Module):
9 | r"""Creates a criterion that measures
10 | Learned Perceptual Image Patch Similarity (LPIPS).
11 | Arguments:
12 | net_type (str): the network type to compare the features:
13 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
14 | version (str): the version of LPIPS. Default: 0.1.
15 | """
16 | def __init__(self, net_type: str = 'alex', version: str = '0.1'):
17 |
18 | assert version in ['0.1'], 'v0.1 is only supported now'
19 |
20 | super(LPIPS, self).__init__()
21 |
22 | # pretrained network
23 | self.net = get_network(net_type)
24 |
25 | # linear layers
26 | self.lin = LinLayers(self.net.n_channels_list)
27 | self.lin.load_state_dict(get_state_dict(net_type, version))
28 |
29 | def forward(self, x: torch.Tensor, y: torch.Tensor):
30 | feat_x, feat_y = self.net(x), self.net(y)
31 |
32 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
33 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
34 |
35 | return torch.sum(torch.cat(res, 0)) / x.shape[0]
36 |
--------------------------------------------------------------------------------
/encoder_inversion/criteria/lpips/utils.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | import torch
4 |
5 |
6 | def normalize_activation(x, eps=1e-10):
7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
8 | return x / (norm_factor + eps)
9 |
10 |
11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
12 | # build url
13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
14 | + f'master/lpips/weights/v{version}/{net_type}.pth'
15 |
16 | # download
17 | old_state_dict = torch.hub.load_state_dict_from_url(
18 | url, progress=True,
19 | map_location=torch.device('cpu') # zxc:避免多卡训练时各进程在GPU0上分别占一块空间
20 | # map_location=None if torch.cuda.is_available() else torch.device('cpu')
21 | )
22 |
23 | # rename keys
24 | new_state_dict = OrderedDict()
25 | for key, val in old_state_dict.items():
26 | new_key = key
27 | new_key = new_key.replace('lin', '')
28 | new_key = new_key.replace('model.', '')
29 | new_state_dict[new_key] = val
30 |
31 | return new_state_dict
32 |
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | VOCdevkit
3 | checkpoints
4 | .vscode
5 | *.pyc
6 | .idea/
7 | __pycache__
8 | results
9 | checkpoints_bak
10 | cityscapes
11 | test_results
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Gongfan Fang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .voc import VOCSegmentation
2 | from .cityscapes import Cityscapes
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .stream_metrics import StreamSegMetrics, AverageMeter
2 |
3 |
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/network/__init__.py:
--------------------------------------------------------------------------------
1 | from .modeling import *
2 | from ._deeplab import convert_to_separable_conv
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/network/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from . import resnet
2 | from . import mobilenetv2
3 | from . import hrnetv2
4 | from . import xception
5 |
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | numpy
4 | pillow
5 | scikit-learn
6 | tqdm
7 | matplotlib
8 | visdom
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/114_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/114_image.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/114_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/114_overlay.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/114_pred.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/114_pred.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/114_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/114_target.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/1_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/1_image.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/1_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/1_overlay.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/1_pred.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/1_pred.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/1_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/1_target.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/23_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/23_image.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/23_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/23_overlay.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/23_pred.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/23_pred.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/23_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/23_target.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/city_1_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/city_1_overlay.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/city_1_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/city_1_target.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/city_6_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/city_6_overlay.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/city_6_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/city_6_target.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/samples/visdom-screenshoot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/encoder_inversion/models/DeepLabV3Plus/samples/visdom-screenshoot.png
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
2 | from .visualizer import Visualizer
3 | from .scheduler import PolyLR
4 | from .loss import FocalLoss
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/utils/loss.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 | import torch
4 |
5 | class FocalLoss(nn.Module):
6 | def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=255):
7 | super(FocalLoss, self).__init__()
8 | self.alpha = alpha
9 | self.gamma = gamma
10 | self.ignore_index = ignore_index
11 | self.size_average = size_average
12 |
13 | def forward(self, inputs, targets):
14 | ce_loss = F.cross_entropy(
15 | inputs, targets, reduction='none', ignore_index=self.ignore_index)
16 | pt = torch.exp(-ce_loss)
17 | focal_loss = self.alpha * (1-pt)**self.gamma * ce_loss
18 | if self.size_average:
19 | return focal_loss.mean()
20 | else:
21 | return focal_loss.sum()
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/utils/scheduler.py:
--------------------------------------------------------------------------------
1 | from torch.optim.lr_scheduler import _LRScheduler, StepLR
2 |
3 | class PolyLR(_LRScheduler):
4 | def __init__(self, optimizer, max_iters, power=0.9, last_epoch=-1, min_lr=1e-6):
5 | self.power = power
6 | self.max_iters = max_iters # avoid zero lr
7 | self.min_lr = min_lr
8 | super(PolyLR, self).__init__(optimizer, last_epoch)
9 |
10 | def get_lr(self):
11 | return [ max( base_lr * ( 1 - self.last_epoch/self.max_iters )**self.power, self.min_lr)
12 | for base_lr in self.base_lrs]
--------------------------------------------------------------------------------
/encoder_inversion/models/DeepLabV3Plus/utils/utils.py:
--------------------------------------------------------------------------------
1 | from torchvision.transforms.functional import normalize
2 | import torch.nn as nn
3 | import numpy as np
4 | import os
5 |
6 | def denormalize(tensor, mean, std):
7 | mean = np.array(mean)
8 | std = np.array(std)
9 |
10 | _mean = -mean/std
11 | _std = 1/std
12 | return normalize(tensor, _mean, _std)
13 |
14 | class Denormalize(object):
15 | def __init__(self, mean, std):
16 | mean = np.array(mean)
17 | std = np.array(std)
18 | self._mean = -mean/std
19 | self._std = 1/std
20 |
21 | def __call__(self, tensor):
22 | if isinstance(tensor, np.ndarray):
23 | return (tensor - self._mean.reshape(-1,1,1)) / self._std.reshape(-1,1,1)
24 | return normalize(tensor, self._mean, self._std)
25 |
26 | def set_bn_momentum(model, momentum=0.1):
27 | for m in model.modules():
28 | if isinstance(m, nn.BatchNorm2d):
29 | m.momentum = momentum
30 |
31 | def fix_bn(model):
32 | for m in model.modules():
33 | if isinstance(m, nn.BatchNorm2d):
34 | m.eval()
35 |
36 | def mkdir(path):
37 | if not os.path.exists(path):
38 | os.mkdir(path)
39 |
--------------------------------------------------------------------------------
/encoder_inversion/models/attention.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | """
3 | DETR Transformer class.
4 | Borrowed from torch.nn.Transformer with modifications
5 | """
6 |
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 | from typing import Optional
10 | from torch import Tensor
11 |
12 | class CrossAttention(nn.Module):
13 | def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
14 | activation="relu", normalize_before=False, batch_first=True):
15 | super().__init__()
16 |
17 | self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first)
18 |
19 | self.linear1 = nn.Linear(d_model, dim_feedforward)
20 | self.dropout = nn.Dropout(dropout)
21 | self.linear2 = nn.Linear(dim_feedforward, d_model)
22 |
23 | self.norm1 = nn.LayerNorm(d_model, elementwise_affine=False)
24 | self.norm2 = nn.LayerNorm(d_model, elementwise_affine=False)
25 |
26 | self.dropout2 = nn.Dropout(dropout)
27 | self.dropout3 = nn.Dropout(dropout)
28 |
29 | self.activation = _get_activation_fn(activation)
30 | self.normalize_before = normalize_before
31 |
32 |
33 | def forward(self, tgt, memory,
34 | tgt_mask: Optional[Tensor] = None,
35 | memory_mask: Optional[Tensor] = None,
36 | memory_key_padding_mask: Optional[Tensor] = None):
37 | tgt2 = self.multihead_attn(query=tgt,
38 | key=memory,
39 | value=memory,
40 | attn_mask=memory_mask,
41 | key_padding_mask=memory_key_padding_mask)[0]
42 | tgt = tgt + self.dropout2(tgt2)
43 | tgt = self.norm2(tgt)
44 | tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
45 | tgt = tgt + self.dropout3(tgt2)
46 | return tgt
47 |
48 |
49 | def _get_activation_fn(activation):
50 | """Return an activation function given a string"""
51 | if activation == "relu":
52 | return F.relu
53 | if activation == "gelu":
54 | return F.gelu
55 | if activation == "glu":
56 | return F.glu
57 | raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
--------------------------------------------------------------------------------
/infer.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python eval_updated_os.py \
2 | --outdir out/os \
3 | --reload_modules True \
4 | --network pretrained_model/updatedOSInvertAvatar.pkl
5 |
6 | CUDA_VISIBLE_DEVICES=0 python eval_seq.py \
7 | --outdir out/fs \
8 | --reload_modules True \
9 | --network pretrained_model/FSInvertAvatar.pkl
10 |
11 | CUDA_VISIBLE_DEVICES=0 python reenact_avatar_next3d.py \
12 | --drive_root ./data/tgt_data/dataset/images512x512 \
13 | --grid 5x2 \
14 | --seeds 100-108 \
15 | --outdir out/reenact_gan \
16 | --fname obama_reenact_gan \
17 | --trunc 0.7 \
18 | --fixed_camera False \
19 | --network pretrained_model/ani3dgan512.pkl
--------------------------------------------------------------------------------
/inversion/configs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/inversion/configs/__init__.py
--------------------------------------------------------------------------------
/inversion/configs/global_config.py:
--------------------------------------------------------------------------------
1 | ## Device
2 | cuda_visible_devices = '0'
3 | device = 'cuda:0'
4 |
5 | ## Logs
6 | training_step = 1
7 | image_rec_result_log_snapshot = 100
8 | pivotal_training_steps = 0
9 | model_snapshot_interval = 400
10 |
11 | ## Run name to be updated during PTI
12 | run_name = ''
13 |
--------------------------------------------------------------------------------
/inversion/configs/hyperparameters.py:
--------------------------------------------------------------------------------
1 | ## Architechture
2 | lpips_type = 'alex'
3 | first_inv_type = 'w'
4 | optim_type = 'adam'
5 |
6 | ## Locality regularization
7 | latent_ball_num_of_samples = 1
8 | locality_regularization_interval = 1
9 | use_locality_regularization = False
10 | regulizer_l2_lambda = 0.1
11 | regulizer_lpips_lambda = 0.1
12 | regulizer_alpha = 30
13 |
14 | ## Loss
15 | pt_l2_lambda = 0.01
16 | pt_lpips_lambda = 1
17 |
18 | ## Steps
19 | LPIPS_value_threshold = 0.01
20 | max_pti_steps = 400
21 | first_inv_steps = 450
22 | max_images_to_invert = 30000
23 | save_interval = 1000
24 | model_save_interval = 1000
25 |
26 | ## Optimization
27 | pti_learning_rate = 3e-4
28 | first_inv_lr = 5e-3
29 | train_batch_size = 1
30 | use_last_w_pivots = False
--------------------------------------------------------------------------------
/inversion/configs/paths_config.py:
--------------------------------------------------------------------------------
1 | ## Pretrained models paths
2 | e4e = './pretrained_models/e4e_ffhq_encode.pt'
3 | stylegan2_ada_ffhq = './pretrained_models/ffhq.pkl'
4 | style_clip_pretrained_mappers = ''
5 | ir_se50 = './pretrained_models/model_ir_se50.pth'
6 | dlib = './pretrained_models/align.dat'
7 | ide3d_ffhq = './pretrained_models/ide3d-ffhq-64-512.pkl'
8 | resume_encoder = None
9 |
10 | ## Dirs for output files
11 | checkpoints_dir = 'out_inversion/checkpoints'
12 | embedding_base_dir = 'out_inversion/embeddings'
13 | styleclip_output_dir = 'out_inversion/StyleCLIP_results'
14 | experiments_output_dir = 'out_inversion/results'
15 |
16 | ## Input info
17 | ### Input dir, where the images reside
18 | input_data_path = '/media/zxc/hdd2/Dataset/MEAD/front_level2/ori_image/crop'#'examples'
19 | input_mesh_path = ''
20 | ### Inversion identifier, used to keeping track of the inversion results. Both the latent code and the generator
21 | input_data_id = 'ide3d' # 'ide3d_plus_initial_code'
22 |
23 |
24 | ## Keywords
25 | pti_results_keyword = 'PTI'
26 | e4e_results_keyword = 'e4e'
27 | sg2_results_keyword = 'SG2'
28 | sg2_plus_results_keyword = 'SG2_plus'
29 | multi_id_model_type = 'multi_id'
30 |
31 | ## Edit directions
32 | interfacegan_age = 'editings/interfacegan_directions/age.pt'
33 | interfacegan_smile = 'editings/interfacegan_directions/smile.pt'
34 | interfacegan_rotation = 'editings/interfacegan_directions/rotation.pt'
35 | ffhq_pca = 'editings/ganspace_pca/ffhq_pca.pt'
36 |
--------------------------------------------------------------------------------
/inversion/criteria/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/inversion/criteria/__init__.py
--------------------------------------------------------------------------------
/inversion/projectors/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/XChenZ/invertAvatar/2e71361227f86f599f8d8ada788492569ba01540/inversion/projectors/__init__.py
--------------------------------------------------------------------------------
/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
12 |
--------------------------------------------------------------------------------
/metrics/frechet_inception_distance.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | """Frechet Inception Distance (FID) from the paper
12 | "GANs trained by a two time-scale update rule converge to a local Nash
13 | equilibrium". Matches the original implementation by Heusel et al. at
14 | https://github.com/bioinf-jku/TTUR/blob/master/fid.py"""
15 |
16 | import numpy as np
17 | import scipy.linalg
18 | from . import metric_utils
19 |
20 | #----------------------------------------------------------------------------
21 |
22 | def compute_fid(opts, max_real, num_gen):
23 | # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
24 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl'
25 | detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
26 |
27 | mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset(
28 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
29 | rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov()
30 |
31 | mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator(
32 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
33 | rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov()
34 |
35 | if opts.rank != 0:
36 | return float('nan')
37 |
38 | m = np.square(mu_gen - mu_real).sum()
39 | s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
40 | fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
41 | return float(fid)
42 |
43 | #----------------------------------------------------------------------------
44 |
--------------------------------------------------------------------------------
/metrics/inception_score.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | """Inception Score (IS) from the paper "Improved techniques for training
12 | GANs". Matches the original implementation by Salimans et al. at
13 | https://github.com/openai/improved-gan/blob/master/inception_score/model.py"""
14 |
15 | import numpy as np
16 | from . import metric_utils
17 |
18 | #----------------------------------------------------------------------------
19 |
20 | def compute_is(opts, num_gen, num_splits):
21 | # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
22 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl'
23 | detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer.
24 |
25 | gen_probs = metric_utils.compute_feature_stats_for_generator(
26 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
27 | capture_all=True, max_items=num_gen).get_all()
28 |
29 | if opts.rank != 0:
30 | return float('nan'), float('nan')
31 |
32 | scores = []
33 | for i in range(num_splits):
34 | part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits]
35 | kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True)))
36 | kl = np.mean(np.sum(kl, axis=1))
37 | scores.append(np.exp(kl))
38 | return float(np.mean(scores)), float(np.std(scores))
39 |
40 | #----------------------------------------------------------------------------
41 |
--------------------------------------------------------------------------------
/torch_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
12 |
--------------------------------------------------------------------------------
/torch_utils/debug_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def save_obj(path, v, f=None, c=None):
6 | with open(path, 'w') as file:
7 | for i in range(len(v)):
8 | if c is not None:
9 | file.write('v %f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], c[i, 0], c[i, 1], c[i, 2]))
10 | else:
11 | file.write('v %f %f %f %d %d %d\n' % (v[i, 0], v[i, 1], v[i, 2], 1, 1, 1))
12 |
13 | file.write('\n')
14 | if f is not None:
15 | for i in range(len(f)):
16 | file.write('f %d %d %d\n' % (f[i, 0], f[i, 1], f[i, 2]))
17 |
18 | file.close()
19 |
20 | def save_obj_torch(path, v, f=None, c=None):
21 | v_ = v.cpu().numpy().astype(np.float32)
22 | f_ = None if f is None else f.cpu().numpy().astype(np.int32)
23 | c_ = None if c is None else c.cpu().numpy().astype(np.float32)
24 | save_obj(path, v_, f_, c_)
--------------------------------------------------------------------------------
/torch_utils/ops/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
12 |
--------------------------------------------------------------------------------
/torch_utils/ops/bias_act.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4 | *
5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6 | * property and proprietary rights in and to this material, related
7 | * documentation and any modifications thereto. Any use, reproduction,
8 | * disclosure or distribution of this material and related documentation
9 | * without an express license agreement from NVIDIA CORPORATION or
10 | * its affiliates is strictly prohibited.
11 | */
12 |
13 | //------------------------------------------------------------------------
14 | // CUDA kernel parameters.
15 |
16 | struct bias_act_kernel_params
17 | {
18 | const void* x; // [sizeX]
19 | const void* b; // [sizeB] or NULL
20 | const void* xref; // [sizeX] or NULL
21 | const void* yref; // [sizeX] or NULL
22 | const void* dy; // [sizeX] or NULL
23 | void* y; // [sizeX]
24 |
25 | int grad;
26 | int act;
27 | float alpha;
28 | float gain;
29 | float clamp;
30 |
31 | int sizeX;
32 | int sizeB;
33 | int stepB;
34 | int loopX;
35 | };
36 |
37 | //------------------------------------------------------------------------
38 | // CUDA kernel selection.
39 |
40 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p);
41 |
42 | //------------------------------------------------------------------------
43 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_ns.cu:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4 | *
5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6 | * property and proprietary rights in and to this material, related
7 | * documentation and any modifications thereto. Any use, reproduction,
8 | * disclosure or distribution of this material and related documentation
9 | * without an express license agreement from NVIDIA CORPORATION or
10 | * its affiliates is strictly prohibited.
11 | */
12 |
13 | #include "filtered_lrelu.cu"
14 |
15 | // Template/kernel specializations for no signs mode (no gradients required).
16 |
17 | // Full op, 32-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Full op, 64-bit indexing.
22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
24 |
25 | // Activation/signs only for generic variant. 64-bit indexing.
26 | template void* choose_filtered_lrelu_act_kernel(void);
27 | template void* choose_filtered_lrelu_act_kernel(void);
28 | template void* choose_filtered_lrelu_act_kernel(void);
29 |
30 | // Copy filters to constant memory.
31 | template cudaError_t copy_filters(cudaStream_t stream);
32 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_rd.cu:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4 | *
5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6 | * property and proprietary rights in and to this material, related
7 | * documentation and any modifications thereto. Any use, reproduction,
8 | * disclosure or distribution of this material and related documentation
9 | * without an express license agreement from NVIDIA CORPORATION or
10 | * its affiliates is strictly prohibited.
11 | */
12 |
13 | #include "filtered_lrelu.cu"
14 |
15 | // Template/kernel specializations for sign read mode.
16 |
17 | // Full op, 32-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Full op, 64-bit indexing.
22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
24 |
25 | // Activation/signs only for generic variant. 64-bit indexing.
26 | template void* choose_filtered_lrelu_act_kernel(void);
27 | template void* choose_filtered_lrelu_act_kernel(void);
28 | template void* choose_filtered_lrelu_act_kernel(void);
29 |
30 | // Copy filters to constant memory.
31 | template cudaError_t copy_filters(cudaStream_t stream);
32 |
--------------------------------------------------------------------------------
/torch_utils/ops/filtered_lrelu_wr.cu:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4 | *
5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6 | * property and proprietary rights in and to this material, related
7 | * documentation and any modifications thereto. Any use, reproduction,
8 | * disclosure or distribution of this material and related documentation
9 | * without an express license agreement from NVIDIA CORPORATION or
10 | * its affiliates is strictly prohibited.
11 | */
12 |
13 | #include "filtered_lrelu.cu"
14 |
15 | // Template/kernel specializations for sign write mode.
16 |
17 | // Full op, 32-bit indexing.
18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
20 |
21 | // Full op, 64-bit indexing.
22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
24 |
25 | // Activation/signs only for generic variant. 64-bit indexing.
26 | template void* choose_filtered_lrelu_act_kernel(void);
27 | template void* choose_filtered_lrelu_act_kernel(void);
28 | template void* choose_filtered_lrelu_act_kernel(void);
29 |
30 | // Copy filters to constant memory.
31 | template cudaError_t copy_filters(cudaStream_t stream);
32 |
--------------------------------------------------------------------------------
/torch_utils/ops/fma.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | """Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
12 |
13 | import torch
14 |
15 | #----------------------------------------------------------------------------
16 |
17 | def fma(a, b, c): # => a * b + c
18 | return _FusedMultiplyAdd.apply(a, b, c)
19 |
20 | #----------------------------------------------------------------------------
21 |
22 | class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
23 | @staticmethod
24 | def forward(ctx, a, b, c): # pylint: disable=arguments-differ
25 | out = torch.addcmul(c, a, b)
26 | ctx.save_for_backward(a, b)
27 | ctx.c_shape = c.shape
28 | return out
29 |
30 | @staticmethod
31 | def backward(ctx, dout): # pylint: disable=arguments-differ
32 | a, b = ctx.saved_tensors
33 | c_shape = ctx.c_shape
34 | da = None
35 | db = None
36 | dc = None
37 |
38 | if ctx.needs_input_grad[0]:
39 | da = _unbroadcast(dout * b, a.shape)
40 |
41 | if ctx.needs_input_grad[1]:
42 | db = _unbroadcast(dout * a, b.shape)
43 |
44 | if ctx.needs_input_grad[2]:
45 | dc = _unbroadcast(dout, c_shape)
46 |
47 | return da, db, dc
48 |
49 | #----------------------------------------------------------------------------
50 |
51 | def _unbroadcast(x, shape):
52 | extra_dims = x.ndim - len(shape)
53 | assert extra_dims >= 0
54 | dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
55 | if len(dim):
56 | x = x.sum(dim=dim, keepdim=True)
57 | if extra_dims:
58 | x = x.reshape(-1, *x.shape[extra_dims+1:])
59 | assert x.shape == shape
60 | return x
61 |
62 | #----------------------------------------------------------------------------
63 |
--------------------------------------------------------------------------------
/torch_utils/ops/upfirdn2d.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4 | *
5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6 | * property and proprietary rights in and to this material, related
7 | * documentation and any modifications thereto. Any use, reproduction,
8 | * disclosure or distribution of this material and related documentation
9 | * without an express license agreement from NVIDIA CORPORATION or
10 | * its affiliates is strictly prohibited.
11 | */
12 |
13 | #include
14 |
15 | //------------------------------------------------------------------------
16 | // CUDA kernel parameters.
17 |
18 | struct upfirdn2d_kernel_params
19 | {
20 | const void* x;
21 | const float* f;
22 | void* y;
23 |
24 | int2 up;
25 | int2 down;
26 | int2 pad0;
27 | int flip;
28 | float gain;
29 |
30 | int4 inSize; // [width, height, channel, batch]
31 | int4 inStride;
32 | int2 filterSize; // [width, height]
33 | int2 filterStride;
34 | int4 outSize; // [width, height, channel, batch]
35 | int4 outStride;
36 | int sizeMinor;
37 | int sizeMajor;
38 |
39 | int loopMinor;
40 | int loopMajor;
41 | int loopX;
42 | int launchMinor;
43 | int launchMajor;
44 | };
45 |
46 | //------------------------------------------------------------------------
47 | // CUDA kernel specialization.
48 |
49 | struct upfirdn2d_kernel_spec
50 | {
51 | void* kernel;
52 | int tileOutW;
53 | int tileOutH;
54 | int loopMinor;
55 | int loopX;
56 | };
57 |
58 | //------------------------------------------------------------------------
59 | // CUDA kernel selection.
60 |
61 | template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p);
62 |
63 | //------------------------------------------------------------------------
64 |
--------------------------------------------------------------------------------
/train_3dgan.sh:
--------------------------------------------------------------------------------
1 | python train_avatar_texture.py \
2 | --outdir training-runs/next3d++ \
3 | --cfg ffhq \
4 | --data /data/zhaoxiaochen/Dataset/FFHQ/images512x512 \
5 | --rdata /data/zhaoxiaochen/Dataset/FFHQ/orthRender256x256_face \
6 | --gpus 8 \
7 | --batch 32 \
8 | --mbstd-group 4 \
9 | --gamma 8 \
10 | --model_version v20 \
11 | --gen_pose_cond 1 \
12 | --neural_rendering_resolution_initial 64 \
13 | --neural_rendering_resolution_final 128 \
14 | --metrics fid50k_full \
15 | --resume pretrained_models/eg3d/ffhqrebalanced512-128.pkl
--------------------------------------------------------------------------------
/train_inversion.sh:
--------------------------------------------------------------------------------
1 | ################E4E training############################################
2 | python encoder_inversion/train.py \
3 | --outdir=training-runs/encoder_inversion/e4e \
4 | --cfg=ffhq \
5 | --data /data/zhaoxiaochen/Dataset/FFHQ/images512x512 \
6 | --rdata /data/zhaoxiaochen/Dataset/FFHQ/orthRender256x256_face \
7 | --gpus=8 \
8 | --batch=32 \
9 | --mbstd-group=4 \
10 | --gamma=1 \
11 | --snap=10 \
12 | --gen_pose_cond=1 \
13 | --model_version v20 \
14 | --configs_path=encoder_inversion/config/train_e4e_real.yaml \
15 | --gen_lms_cond 0 \
16 | --training_state e4e \
17 | --gen_mask_cond 0
18 |
19 | ################Few-shot Training###################################
20 |
21 | python encoder_inversion/train.py \
22 | --outdir=training-runs/encoder_inversion/few-shot \ #VFHQ/gru/both_TriNetSFT/multiT_onlyIreal \
23 | --cfg=ffhq \
24 | --data /data/zhaoxiaochen/Dataset/VFHQ/dataset/images512x512 \
25 | --rdata /data/zhaoxiaochen/Dataset/VFHQ/dataset/orthRender256x256_face_eye \
26 | --dataset_class_name encoder_inversion.dataset_video.VideoFolderDataset \
27 | --gpus=8 \
28 | --batch=8 \
29 | --mbstd-group=1 \
30 | --gamma=1 \
31 | --tick=1 \
32 | --snap=4 \
33 | --gen_pose_cond=1 \
34 | --model_version v20 \
35 | --configs_path=encoder_inversion/config/train_textureUnet_video.yaml \
36 | --gen_lms_cond 0 \
37 | --gen_mask_cond 1 \
38 | --gen_uv_cond 1 \
39 | --training_state fewshot \
40 | --resume training-runs/encoder_inversion/e4e/path-to-your-pkl.pkl
41 |
42 |
43 | ################Improved One-shot Training###################################
44 | python encoder_inversion/train.py \
45 | --outdir=training-runs/encoder_inversion/v20_128/both_TriSFT_TriplaneTexNet_uvFInp/BothSegFormerDecoder \
46 | --cfg=ffhq \
47 | --data /data/zhaoxiaochen/Dataset/FFHQ/images512x512 \
48 | --rdata /data/zhaoxiaochen/Dataset/FFHQ/orthRender256x256_face_eye \
49 | --gpus=8 \
50 | --batch=16 \
51 | --mbstd-group=2 \
52 | --gamma=8 \
53 | --snap=10 \
54 | --gen_pose_cond=1 \
55 | --model_version v20 \
56 | --configs_path=encoder_inversion/config/train_textureUnet_real.yaml \
57 | --gen_lms_cond 0 \
58 | --gen_mask_cond 0 \
59 | --gen_uv_cond 1 \
60 | --training_state oneshot \
61 | --resume training-runs/encoder_inversion/e4e/path-to-your-pkl.pkl
62 |
--------------------------------------------------------------------------------
/training/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
12 |
--------------------------------------------------------------------------------
/training/crosssection_utils.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | import torch
12 |
13 | def sample_cross_section(G, ws, resolution=256, w=1.2):
14 | axis=0
15 | A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij')
16 | A, B = A.reshape(-1, 1), B.reshape(-1, 1)
17 | C = torch.zeros_like(A)
18 | coordinates = [A, B]
19 | coordinates.insert(axis, C)
20 | coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1)
21 |
22 | sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma']
23 | return sigma.reshape(-1, 1, resolution, resolution)
24 |
25 | # if __name__ == '__main__':
26 | # sample_crossection(None)
--------------------------------------------------------------------------------
/training_avatar_texture/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
12 |
--------------------------------------------------------------------------------
/training_avatar_texture/embedder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class Embedder:
6 | def __init__(self, **kwargs):
7 |
8 | self.kwargs = kwargs
9 | self.create_embedding_fn()
10 |
11 | def create_embedding_fn(self):
12 |
13 | embed_fns = []
14 | d = self.kwargs['input_dims']
15 | out_dim = 0
16 | if self.kwargs['include_input']:
17 | embed_fns.append(lambda x: x)
18 | out_dim += d
19 |
20 | max_freq = self.kwargs['max_freq_log2']
21 | N_freqs = self.kwargs['num_freqs']
22 |
23 | if self.kwargs['log_sampling']:
24 | freq_bands = 2. ** torch.linspace(0., max_freq, steps=N_freqs)
25 | else:
26 | freq_bands = torch.linspace(2. ** 0., 2. ** max_freq, steps=N_freqs)
27 |
28 | for freq in freq_bands:
29 | for p_fn in self.kwargs['periodic_fns']:
30 | embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
31 | out_dim += d
32 |
33 | self.embed_fns = embed_fns
34 | self.out_dim = out_dim
35 |
36 | def embed(self, inputs):
37 | return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
38 |
39 |
40 | def get_embedder(multires, i=0, input_dims=3, include_input=True):
41 | if i == -1:
42 | return nn.Identity(), input_dims
43 |
44 | embed_kwargs = {
45 | 'include_input': include_input,
46 | 'input_dims': input_dims,
47 | 'max_freq_log2': multires - 1,
48 | 'num_freqs': multires,
49 | 'log_sampling': True,
50 | 'periodic_fns': [torch.sin, torch.cos],
51 | }
52 |
53 | embedder_obj = Embedder(**embed_kwargs)
54 | embed = lambda x, eo=embedder_obj: eo.embed(x)
55 | return embed, embedder_obj.out_dim
56 |
--------------------------------------------------------------------------------
/training_avatar_texture/volumetric_rendering/__init__.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary
3 | #
4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
5 | # property and proprietary rights in and to this material, related
6 | # documentation and any modifications thereto. Any use, reproduction,
7 | # disclosure or distribution of this material and related documentation
8 | # without an express license agreement from NVIDIA CORPORATION or
9 | # its affiliates is strictly prohibited.
10 |
11 | # empty
--------------------------------------------------------------------------------