├── portrait4d ├── models │ ├── FLAME │ │ ├── __init__.py │ │ ├── flame_addition_info.npy │ │ ├── simplify_baricentric.npy │ │ ├── simplify_baricentric2.npy │ │ ├── geometry │ │ │ ├── landmark_embedding.npy │ │ │ └── landmark_embedding_with_eyes.npy │ │ ├── flame_addition_info_new_smallface.npy │ │ ├── flame_addition_info_new_nosmallface.npy │ │ ├── ca_pose.txt │ │ ├── cfg.yaml │ │ ├── ca_exp.txt │ │ └── ca_shape.txt │ ├── pdfgc │ │ ├── __init__.py │ │ └── encoder.py │ ├── deeplabv3 │ │ └── __init__.py │ ├── mix_transformer │ │ └── __init__.py │ └── lpips │ │ └── LICENSE ├── training │ ├── utils │ │ └── __init__.py │ ├── __init__.py │ ├── volumetric_rendering │ │ ├── __init__.py │ │ └── ray_marcher.py │ ├── crosssection_utils.py │ ├── loss │ │ └── loss_utils.py │ ├── deformer │ │ └── mesh_renderer.py │ └── dataloader │ │ └── protocols │ │ ├── datum_genhead_pb2.py │ │ └── datum_portrait_ffhq_pb2.py ├── configs │ ├── yacs │ │ └── __init__.py │ ├── __init__.py │ ├── genhead-ffhq512.yaml │ ├── genhead-ffhq512-toy.yaml │ ├── portrait4d-genhead512.yaml │ ├── portrait4d-static-genhead512.yaml │ ├── config.py │ ├── portrait4d-v2-vfhq512.yaml │ └── portrait4d-v2-vfhq512-toy.yaml ├── examples │ ├── 2dldmks_align │ │ ├── Yeoh.npy │ │ ├── 491976.npy │ │ ├── MV5BMT.npy │ │ ├── UX1000_.npy │ │ ├── tom-hanks.npy │ │ ├── wp7332247.npy │ │ ├── GettyImages.npy │ │ ├── Zoe_Saldana.npy │ │ ├── anne-hathaway.npy │ │ ├── michelle-yeoh.npy │ │ ├── tom_hanks_ranks.npy │ │ ├── ANNEHATHAWAYMOVIE.npy │ │ ├── leonardo-dicaprio.npy │ │ ├── 112217-2017-angelina.npy │ │ ├── tom-cruise-net-worth.npy │ │ ├── 123708_leonardo-dicaprio.npy │ │ ├── Leonardo-DiCaprio-2016.npy │ │ ├── NANeN75ykf8ozohXrz8ycd.npy │ │ ├── 1280-elizabeth-olsen-main.npy │ │ ├── gal-gadot-gucci-photshoot.npy │ │ ├── Pictures-of-Jennifer-Lawrence.npy │ │ ├── BradPitt-GettyImages-1158782727.npy │ │ ├── rachel-mcadams-actress-pictures.npy │ │ ├── c551bce446ed70343fff6ceb75a9c398.npy │ │ ├── d13b1ab4b2c3df7e644a12541597fc51.npy │ │ ├── 90741dde2685-gettyimages-1470263599.npy │ │ ├── brad-pitt-actor-man-smile-wallpaper.npy │ │ ├── jennifer lawrence extreme side part hero.npy │ │ ├── 02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy │ │ ├── 126467186_214084556756081_9216829119240203400_n.npy │ │ ├── leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy │ │ └── elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy │ ├── 3dldmks_align │ │ ├── Yeoh.npy │ │ ├── 491976.npy │ │ ├── MV5BMT.npy │ │ ├── UX1000_.npy │ │ ├── tom-hanks.npy │ │ ├── wp7332247.npy │ │ ├── GettyImages.npy │ │ ├── Zoe_Saldana.npy │ │ ├── anne-hathaway.npy │ │ ├── michelle-yeoh.npy │ │ ├── tom_hanks_ranks.npy │ │ ├── ANNEHATHAWAYMOVIE.npy │ │ ├── leonardo-dicaprio.npy │ │ ├── 112217-2017-angelina.npy │ │ ├── tom-cruise-net-worth.npy │ │ ├── 123708_leonardo-dicaprio.npy │ │ ├── Leonardo-DiCaprio-2016.npy │ │ ├── NANeN75ykf8ozohXrz8ycd.npy │ │ ├── 1280-elizabeth-olsen-main.npy │ │ ├── gal-gadot-gucci-photshoot.npy │ │ ├── Pictures-of-Jennifer-Lawrence.npy │ │ ├── BradPitt-GettyImages-1158782727.npy │ │ ├── rachel-mcadams-actress-pictures.npy │ │ ├── c551bce446ed70343fff6ceb75a9c398.npy │ │ ├── d13b1ab4b2c3df7e644a12541597fc51.npy │ │ ├── 90741dde2685-gettyimages-1470263599.npy │ │ ├── brad-pitt-actor-man-smile-wallpaper.npy │ │ ├── jennifer lawrence extreme side part hero.npy │ │ ├── 02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy │ │ ├── 126467186_214084556756081_9216829119240203400_n.npy │ │ ├── leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy │ │ └── elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy │ ├── align_images │ │ ├── Yeoh.jpg │ │ ├── 491976.jpg │ │ ├── MV5BMT.jpg │ │ ├── UX1000_.jpg │ │ ├── tom-hanks.jpg │ │ ├── wp7332247.jpg │ │ ├── GettyImages.jpg │ │ ├── Zoe_Saldana.jpg │ │ ├── anne-hathaway.jpg │ │ ├── michelle-yeoh.jpg │ │ ├── tom_hanks_ranks.jpg │ │ ├── ANNEHATHAWAYMOVIE.jpg │ │ ├── leonardo-dicaprio.jpg │ │ ├── 112217-2017-angelina.jpg │ │ ├── tom-cruise-net-worth.jpg │ │ ├── Leonardo-DiCaprio-2016.png │ │ ├── NANeN75ykf8ozohXrz8ycd.jpg │ │ ├── 123708_leonardo-dicaprio.jpg │ │ ├── 1280-elizabeth-olsen-main.jpg │ │ ├── gal-gadot-gucci-photshoot.jpg │ │ ├── Pictures-of-Jennifer-Lawrence.jpg │ │ ├── BradPitt-GettyImages-1158782727.jpg │ │ ├── c551bce446ed70343fff6ceb75a9c398.jpg │ │ ├── d13b1ab4b2c3df7e644a12541597fc51.jpg │ │ ├── rachel-mcadams-actress-pictures.jpg │ │ ├── 90741dde2685-gettyimages-1470263599.jpg │ │ ├── brad-pitt-actor-man-smile-wallpaper.jpg │ │ ├── jennifer lawrence extreme side part hero.jpg │ │ ├── 02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.jpg │ │ ├── 126467186_214084556756081_9216829119240203400_n.jpg │ │ ├── leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.jpg │ │ └── elizabeth-olsen-american-actress-portrait-2880x1800-5138.jpg │ ├── flame_optim_params │ │ ├── Yeoh.npy │ │ ├── 491976.npy │ │ ├── MV5BMT.npy │ │ ├── UX1000_.npy │ │ ├── tom-hanks.npy │ │ ├── wp7332247.npy │ │ ├── GettyImages.npy │ │ ├── Zoe_Saldana.npy │ │ ├── anne-hathaway.npy │ │ ├── michelle-yeoh.npy │ │ ├── tom_hanks_ranks.npy │ │ ├── ANNEHATHAWAYMOVIE.npy │ │ ├── leonardo-dicaprio.npy │ │ ├── 112217-2017-angelina.npy │ │ ├── tom-cruise-net-worth.npy │ │ ├── 123708_leonardo-dicaprio.npy │ │ ├── Leonardo-DiCaprio-2016.npy │ │ ├── NANeN75ykf8ozohXrz8ycd.npy │ │ ├── 1280-elizabeth-olsen-main.npy │ │ ├── gal-gadot-gucci-photshoot.npy │ │ ├── Pictures-of-Jennifer-Lawrence.npy │ │ ├── BradPitt-GettyImages-1158782727.npy │ │ ├── rachel-mcadams-actress-pictures.npy │ │ ├── c551bce446ed70343fff6ceb75a9c398.npy │ │ ├── d13b1ab4b2c3df7e644a12541597fc51.npy │ │ ├── 90741dde2685-gettyimages-1470263599.npy │ │ ├── brad-pitt-actor-man-smile-wallpaper.npy │ │ ├── jennifer lawrence extreme side part hero.npy │ │ ├── 02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy │ │ ├── 126467186_214084556756081_9216829119240203400_n.npy │ │ ├── leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy │ │ └── elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy │ └── bfm2flame_params_simplified │ │ ├── Yeoh.npy │ │ ├── 491976.npy │ │ ├── MV5BMT.npy │ │ ├── UX1000_.npy │ │ ├── tom-hanks.npy │ │ ├── wp7332247.npy │ │ ├── GettyImages.npy │ │ ├── Zoe_Saldana.npy │ │ ├── anne-hathaway.npy │ │ ├── michelle-yeoh.npy │ │ ├── tom_hanks_ranks.npy │ │ ├── ANNEHATHAWAYMOVIE.npy │ │ ├── leonardo-dicaprio.npy │ │ ├── 112217-2017-angelina.npy │ │ ├── tom-cruise-net-worth.npy │ │ ├── Leonardo-DiCaprio-2016.npy │ │ ├── NANeN75ykf8ozohXrz8ycd.npy │ │ ├── 123708_leonardo-dicaprio.npy │ │ ├── 1280-elizabeth-olsen-main.npy │ │ ├── gal-gadot-gucci-photshoot.npy │ │ ├── Pictures-of-Jennifer-Lawrence.npy │ │ ├── BradPitt-GettyImages-1158782727.npy │ │ ├── c551bce446ed70343fff6ceb75a9c398.npy │ │ ├── d13b1ab4b2c3df7e644a12541597fc51.npy │ │ ├── rachel-mcadams-actress-pictures.npy │ │ ├── 90741dde2685-gettyimages-1470263599.npy │ │ ├── brad-pitt-actor-man-smile-wallpaper.npy │ │ ├── jennifer lawrence extreme side part hero.npy │ │ ├── 02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy │ │ ├── 126467186_214084556756081_9216829119240203400_n.npy │ │ ├── leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy │ │ └── elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy ├── metrics │ ├── __init__.py │ ├── inception_score.py │ ├── frechet_inception_distance.py │ ├── kernel_inception_distance.py │ └── precision_recall.py ├── viz │ ├── __init__.py │ ├── backbone_cache_widget.py │ ├── render_type_widget.py │ ├── render_depth_sample_widget.py │ ├── zoom_widget.py │ ├── conditioning_pose_widget.py │ ├── stylemix_widget.py │ ├── performance_widget.py │ ├── capture_widget.py │ ├── trunc_noise_widget.py │ ├── latent_widget.py │ └── pose_widget.py ├── gui_utils │ ├── __init__.py │ └── imgui_window.py ├── torch_utils │ ├── __init__.py │ └── ops │ │ ├── __init__.py │ │ ├── bias_act.h │ │ ├── filtered_lrelu_rd.cu │ │ ├── filtered_lrelu_wr.cu │ │ ├── filtered_lrelu_ns.cu │ │ ├── upfirdn2d.h │ │ ├── fma.py │ │ ├── grid_sample_gradfix.py │ │ └── filtered_lrelu.h ├── dnnlib │ └── __init__.py └── shape_utils.py ├── data_preprocess ├── lib │ ├── models │ │ ├── fd │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── nms │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── py_cpu_nms.py │ │ │ │ └── timer.py │ │ │ ├── layers │ │ │ │ ├── modules │ │ │ │ │ └── __init__.py │ │ │ │ ├── __init__.py │ │ │ │ └── functions │ │ │ │ │ └── prior_box.py │ │ │ └── config.py │ │ ├── facerecon │ │ │ ├── util │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── cropping.py │ │ ├── networks │ │ │ ├── __init__.py │ │ │ └── vision_network.py │ │ └── __init__.py │ ├── config │ │ └── __init__.py │ └── model_builder.py ├── bfm_to_flame │ ├── smpl_webuser │ │ ├── __init__.py │ │ ├── posemapper.py │ │ ├── lbs.py │ │ └── verts.py │ └── run_convert.py ├── assets │ └── bfm2flame_mapper │ │ └── model-iter200000.pth ├── FLAME │ └── cfg.yaml ├── preprocess_dir.py ├── configs │ └── pipeline_config1.yaml ├── cropping │ └── one_euro_filter.py ├── extract_pdfgc.py └── bfm2flame_mapper.py ├── assets └── teaser.jpg ├── .gitignore ├── requirements.txt └── LICENSE /portrait4d/models/FLAME/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /portrait4d/models/pdfgc/__init__.py: -------------------------------------------------------------------------------- 1 | # empty -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portrait4d/models/deeplabv3/__init__.py: -------------------------------------------------------------------------------- 1 | # empty -------------------------------------------------------------------------------- /portrait4d/training/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # empty -------------------------------------------------------------------------------- /data_preprocess/bfm_to_flame/smpl_webuser/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/utils/nms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /portrait4d/models/mix_transformer/__init__.py: -------------------------------------------------------------------------------- 1 | # empty -------------------------------------------------------------------------------- /portrait4d/configs/yacs/__init__.py: -------------------------------------------------------------------------------- 1 | from .yacs import CfgNode -------------------------------------------------------------------------------- /data_preprocess/lib/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import cfg, args -------------------------------------------------------------------------------- /portrait4d/configs/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import cfg, args, make_cfg -------------------------------------------------------------------------------- /assets/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/assets/teaser.jpg -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/layers/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .multibox_loss import MultiBoxLoss 2 | 3 | __all__ = ['MultiBoxLoss'] 4 | -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/Yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/Yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/Yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/Yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/Yeoh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/Yeoh.jpg -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/491976.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/491976.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/MV5BMT.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/MV5BMT.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/491976.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/491976.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/MV5BMT.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/MV5BMT.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/491976.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/491976.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/MV5BMT.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/MV5BMT.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/UX1000_.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/UX1000_.jpg -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from lib.models.fd.layers.functions import * 2 | from lib.models.fd.layers.modules import * 3 | -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/UX1000_.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/UX1000_.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/tom-hanks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/tom-hanks.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/wp7332247.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/wp7332247.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/UX1000_.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/UX1000_.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/tom-hanks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/tom-hanks.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/wp7332247.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/wp7332247.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/tom-hanks.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/tom-hanks.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/wp7332247.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/wp7332247.jpg -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/Yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/Yeoh.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/flame_addition_info.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/flame_addition_info.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/GettyImages.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/GettyImages.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/Zoe_Saldana.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/Zoe_Saldana.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/GettyImages.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/GettyImages.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/Zoe_Saldana.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/Zoe_Saldana.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/GettyImages.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/GettyImages.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/Zoe_Saldana.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/Zoe_Saldana.jpg -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/491976.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/491976.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/MV5BMT.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/MV5BMT.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/simplify_baricentric.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/simplify_baricentric.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/simplify_baricentric2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/simplify_baricentric2.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/anne-hathaway.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/anne-hathaway.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/michelle-yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/michelle-yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/anne-hathaway.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/anne-hathaway.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/michelle-yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/michelle-yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/anne-hathaway.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/anne-hathaway.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/michelle-yeoh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/michelle-yeoh.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/tom_hanks_ranks.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/tom_hanks_ranks.jpg -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/UX1000_.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/UX1000_.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/tom-hanks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/tom-hanks.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/wp7332247.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/wp7332247.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/tom_hanks_ranks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/tom_hanks_ranks.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/tom_hanks_ranks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/tom_hanks_ranks.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/ANNEHATHAWAYMOVIE.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/ANNEHATHAWAYMOVIE.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/leonardo-dicaprio.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/leonardo-dicaprio.jpg -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/GettyImages.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/GettyImages.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/Zoe_Saldana.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/Zoe_Saldana.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/ANNEHATHAWAYMOVIE.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/ANNEHATHAWAYMOVIE.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/ANNEHATHAWAYMOVIE.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/ANNEHATHAWAYMOVIE.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/112217-2017-angelina.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/112217-2017-angelina.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/tom-cruise-net-worth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/tom-cruise-net-worth.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/Yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/Yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/anne-hathaway.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/anne-hathaway.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/michelle-yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/michelle-yeoh.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/geometry/landmark_embedding.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/geometry/landmark_embedding.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/112217-2017-angelina.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/112217-2017-angelina.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/tom-cruise-net-worth.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/tom-cruise-net-worth.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/112217-2017-angelina.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/112217-2017-angelina.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/tom-cruise-net-worth.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/tom-cruise-net-worth.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/Leonardo-DiCaprio-2016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/Leonardo-DiCaprio-2016.png -------------------------------------------------------------------------------- /portrait4d/examples/align_images/NANeN75ykf8ozohXrz8ycd.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/NANeN75ykf8ozohXrz8ycd.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/491976.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/491976.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/MV5BMT.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/MV5BMT.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/UX1000_.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/UX1000_.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/tom_hanks_ranks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/tom_hanks_ranks.npy -------------------------------------------------------------------------------- /data_preprocess/assets/bfm2flame_mapper/model-iter200000.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/data_preprocess/assets/bfm2flame_mapper/model-iter200000.pth -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/123708_leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/123708_leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/Leonardo-DiCaprio-2016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/Leonardo-DiCaprio-2016.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/NANeN75ykf8ozohXrz8ycd.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/NANeN75ykf8ozohXrz8ycd.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/123708_leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/123708_leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/Leonardo-DiCaprio-2016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/Leonardo-DiCaprio-2016.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/NANeN75ykf8ozohXrz8ycd.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/NANeN75ykf8ozohXrz8ycd.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/123708_leonardo-dicaprio.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/123708_leonardo-dicaprio.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/1280-elizabeth-olsen-main.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/1280-elizabeth-olsen-main.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/gal-gadot-gucci-photshoot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/gal-gadot-gucci-photshoot.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/tom-hanks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/tom-hanks.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/wp7332247.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/wp7332247.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/ANNEHATHAWAYMOVIE.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/ANNEHATHAWAYMOVIE.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/flame_addition_info_new_smallface.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/flame_addition_info_new_smallface.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/1280-elizabeth-olsen-main.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/1280-elizabeth-olsen-main.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/gal-gadot-gucci-photshoot.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/gal-gadot-gucci-photshoot.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/1280-elizabeth-olsen-main.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/1280-elizabeth-olsen-main.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/gal-gadot-gucci-photshoot.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/gal-gadot-gucci-photshoot.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/GettyImages.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/GettyImages.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/Zoe_Saldana.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/Zoe_Saldana.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/112217-2017-angelina.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/112217-2017-angelina.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/tom-cruise-net-worth.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/tom-cruise-net-worth.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/flame_addition_info_new_nosmallface.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/flame_addition_info_new_nosmallface.npy -------------------------------------------------------------------------------- /data_preprocess/lib/models/facerecon/util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | from lib.models.facerecon.util import * 3 | -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/Pictures-of-Jennifer-Lawrence.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/Pictures-of-Jennifer-Lawrence.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/Pictures-of-Jennifer-Lawrence.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/Pictures-of-Jennifer-Lawrence.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/Pictures-of-Jennifer-Lawrence.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/Pictures-of-Jennifer-Lawrence.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/anne-hathaway.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/anne-hathaway.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/michelle-yeoh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/michelle-yeoh.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/tom_hanks_ranks.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/tom_hanks_ranks.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/123708_leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/123708_leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/Leonardo-DiCaprio-2016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/Leonardo-DiCaprio-2016.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/NANeN75ykf8ozohXrz8ycd.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/NANeN75ykf8ozohXrz8ycd.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/geometry/landmark_embedding_with_eyes.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/models/FLAME/geometry/landmark_embedding_with_eyes.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/BradPitt-GettyImages-1158782727.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/BradPitt-GettyImages-1158782727.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/rachel-mcadams-actress-pictures.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/rachel-mcadams-actress-pictures.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/BradPitt-GettyImages-1158782727.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/BradPitt-GettyImages-1158782727.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/rachel-mcadams-actress-pictures.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/rachel-mcadams-actress-pictures.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/BradPitt-GettyImages-1158782727.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/BradPitt-GettyImages-1158782727.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/c551bce446ed70343fff6ceb75a9c398.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/c551bce446ed70343fff6ceb75a9c398.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/d13b1ab4b2c3df7e644a12541597fc51.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/d13b1ab4b2c3df7e644a12541597fc51.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/rachel-mcadams-actress-pictures.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/rachel-mcadams-actress-pictures.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/ANNEHATHAWAYMOVIE.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/ANNEHATHAWAYMOVIE.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/1280-elizabeth-olsen-main.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/1280-elizabeth-olsen-main.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/gal-gadot-gucci-photshoot.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/gal-gadot-gucci-photshoot.npy -------------------------------------------------------------------------------- /portrait4d/models/FLAME/ca_pose.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 3.000000000000000000e-01 0.000000000000000000e+00 0.000000000000000000e+00 2 | -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/c551bce446ed70343fff6ceb75a9c398.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/c551bce446ed70343fff6ceb75a9c398.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/d13b1ab4b2c3df7e644a12541597fc51.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/d13b1ab4b2c3df7e644a12541597fc51.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/c551bce446ed70343fff6ceb75a9c398.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/c551bce446ed70343fff6ceb75a9c398.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/d13b1ab4b2c3df7e644a12541597fc51.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/d13b1ab4b2c3df7e644a12541597fc51.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/90741dde2685-gettyimages-1470263599.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/90741dde2685-gettyimages-1470263599.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/brad-pitt-actor-man-smile-wallpaper.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/brad-pitt-actor-man-smile-wallpaper.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/112217-2017-angelina.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/112217-2017-angelina.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/tom-cruise-net-worth.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/tom-cruise-net-worth.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/Pictures-of-Jennifer-Lawrence.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/Pictures-of-Jennifer-Lawrence.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/90741dde2685-gettyimages-1470263599.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/90741dde2685-gettyimages-1470263599.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/brad-pitt-actor-man-smile-wallpaper.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/brad-pitt-actor-man-smile-wallpaper.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/90741dde2685-gettyimages-1470263599.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/90741dde2685-gettyimages-1470263599.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/brad-pitt-actor-man-smile-wallpaper.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/brad-pitt-actor-man-smile-wallpaper.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/Leonardo-DiCaprio-2016.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/Leonardo-DiCaprio-2016.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/NANeN75ykf8ozohXrz8ycd.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/NANeN75ykf8ozohXrz8ycd.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/BradPitt-GettyImages-1158782727.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/BradPitt-GettyImages-1158782727.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/rachel-mcadams-actress-pictures.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/rachel-mcadams-actress-pictures.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/jennifer lawrence extreme side part hero.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/jennifer lawrence extreme side part hero.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/123708_leonardo-dicaprio.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/123708_leonardo-dicaprio.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/1280-elizabeth-olsen-main.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/1280-elizabeth-olsen-main.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/gal-gadot-gucci-photshoot.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/gal-gadot-gucci-photshoot.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/c551bce446ed70343fff6ceb75a9c398.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/c551bce446ed70343fff6ceb75a9c398.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/d13b1ab4b2c3df7e644a12541597fc51.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/d13b1ab4b2c3df7e644a12541597fc51.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/jennifer lawrence extreme side part hero.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/jennifer lawrence extreme side part hero.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/jennifer lawrence extreme side part hero.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/jennifer lawrence extreme side part hero.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/90741dde2685-gettyimages-1470263599.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/90741dde2685-gettyimages-1470263599.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/brad-pitt-actor-man-smile-wallpaper.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/brad-pitt-actor-man-smile-wallpaper.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/Pictures-of-Jennifer-Lawrence.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/Pictures-of-Jennifer-Lawrence.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.jpg -------------------------------------------------------------------------------- /portrait4d/examples/align_images/126467186_214084556756081_9216829119240203400_n.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/126467186_214084556756081_9216829119240203400_n.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/BradPitt-GettyImages-1158782727.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/BradPitt-GettyImages-1158782727.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/c551bce446ed70343fff6ceb75a9c398.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/c551bce446ed70343fff6ceb75a9c398.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/d13b1ab4b2c3df7e644a12541597fc51.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/d13b1ab4b2c3df7e644a12541597fc51.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/rachel-mcadams-actress-pictures.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/rachel-mcadams-actress-pictures.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/jennifer lawrence extreme side part hero.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/jennifer lawrence extreme side part hero.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/126467186_214084556756081_9216829119240203400_n.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/126467186_214084556756081_9216829119240203400_n.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/126467186_214084556756081_9216829119240203400_n.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/126467186_214084556756081_9216829119240203400_n.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/90741dde2685-gettyimages-1470263599.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/90741dde2685-gettyimages-1470263599.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/brad-pitt-actor-man-smile-wallpaper.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/brad-pitt-actor-man-smile-wallpaper.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.jpg -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/jennifer lawrence extreme side part hero.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/jennifer lawrence extreme side part hero.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/126467186_214084556756081_9216829119240203400_n.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/126467186_214084556756081_9216829119240203400_n.npy -------------------------------------------------------------------------------- /portrait4d/examples/2dldmks_align/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/2dldmks_align/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy -------------------------------------------------------------------------------- /portrait4d/examples/3dldmks_align/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/3dldmks_align/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy -------------------------------------------------------------------------------- /portrait4d/examples/align_images/elizabeth-olsen-american-actress-portrait-2880x1800-5138.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/align_images/elizabeth-olsen-american-actress-portrait-2880x1800-5138.jpg -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/02xp-tomhanks1-gbth-videoSixteenByNineJumbo1600.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/126467186_214084556756081_9216829119240203400_n.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/126467186_214084556756081_9216829119240203400_n.npy -------------------------------------------------------------------------------- /portrait4d/examples/flame_optim_params/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/flame_optim_params/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/leonardo-dicaprio-bright-blue-eyes-nwn6qtcqp0pbn3a7.npy -------------------------------------------------------------------------------- /portrait4d/examples/bfm2flame_params_simplified/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/YuDeng/Portrait-4D/HEAD/portrait4d/examples/bfm2flame_params_simplified/elizabeth-olsen-american-actress-portrait-2880x1800-5138.npy -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pkl 3 | *.mat 4 | *.zip 5 | **/.ipynb_checkpoints 6 | portrait4d/pretrained_models/ 7 | portrait4d/data/ 8 | portrait4d/experiments/ 9 | portrait4d/test-imgs/ 10 | portrait4d/training-runs-genhead/ 11 | portrait4d/training-runs-portrait4d/ 12 | portrait4d/training-runs-portrait4d-v2/ 13 | data_preprocess/bfm_to_flame/data/ 14 | data_preprocess/bfm_to_flame/model/ 15 | data_preprocess/bfm_to_flame/mesh/ 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.11.0+cu113 2 | torchvision==0.12.0+cu113 3 | numpy==1.23.5 4 | chumpy==0.70 5 | scipy==1.8.1 6 | scikit-image==0.19.2 7 | scikit-learn==1.1.1 8 | opencv-python==4.6.0.66 9 | Pillow==9.2.0 10 | termcolor==1.1.0 11 | PyYAML==6.0 12 | tqdm==4.64.0 13 | absl-py==1.1.0 14 | tensorboard==2.11.0 15 | tensorboardX==2.5 16 | PyOpenGL==3.1.0 17 | pyrender==0.1.45 18 | trimesh==3.12.5 19 | click==8.1.7 20 | omegaconf==2.3.0 21 | segmentation_models_pytorch==0.3.3 22 | timm==0.9.2 23 | psutil==5.8.0 24 | lmdb==1.2.1 25 | einops==0.3.2 26 | kornia==0.6.12 27 | gdown 28 | plyfile 29 | zmq -------------------------------------------------------------------------------- /portrait4d/configs/genhead-ffhq512.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'genhead-ffhq512' 2 | outdir: './training-runs-genhead' 3 | data: None 4 | gpus: 4 5 | batch: 32 6 | g_module: 'training.generator.triplane.PartTriPlaneGeneratorDeform' 7 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 8 | triplane_resolution: 256 9 | triplane_channels: 192 10 | g_has_superresolution: True 11 | g_has_background: True 12 | g_flame_condition: True 13 | g_random_combine: False 14 | g_dynamic_texture: False 15 | g_flame_full: True 16 | d_has_superresolution: True 17 | d_has_uv: True 18 | d_has_seg: True 19 | patch_scale: 1.0 -------------------------------------------------------------------------------- /portrait4d/models/FLAME/cfg.yaml: -------------------------------------------------------------------------------- 1 | coarse: 2 | model: 3 | flame_model_path: models/FLAME/geometry/generic_model.pkl 4 | flame_lmk_embedding_path: models/FLAME/geometry/landmark_embedding_with_eyes.npy 5 | flame_mask_pkl: models/FLAME/mask/FLAME_masks.pkl 6 | ca_shape_param: models/FLAME/ca_shape.txt 7 | ca_exp_param: models/FLAME/ca_exp.txt 8 | ca_pose_param: models/FLAME/ca_pose.txt 9 | simplify_baricentric: models/FLAME/simplify_baricentric2.npy 10 | addition_info: models/FLAME/flame_addition_info_new_smallface.npy 11 | n_shape: 100 12 | n_tex: 50 13 | n_exp: 50 14 | n_pose: 6 -------------------------------------------------------------------------------- /portrait4d/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/viz/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/gui_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/training/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty 12 | -------------------------------------------------------------------------------- /portrait4d/configs/genhead-ffhq512-toy.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'genhead-ffhq512-toy' 2 | outdir: './training-runs-genhead' 3 | data: './data/FFHQ_512_50/' 4 | gpus: 1 5 | batch: 4 6 | g_module: 'training.generator.triplane.PartTriPlaneGeneratorDeform' 7 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 8 | triplane_resolution: 256 9 | triplane_channels: 192 10 | g_has_superresolution: True 11 | g_has_background: True 12 | g_flame_condition: True 13 | g_random_combine: False 14 | g_dynamic_texture: False 15 | g_flame_full: True 16 | d_has_superresolution: True 17 | d_has_uv: True 18 | d_has_seg: True 19 | patch_scale: 1.0 -------------------------------------------------------------------------------- /portrait4d/training/volumetric_rendering/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | # empty -------------------------------------------------------------------------------- /portrait4d/dnnlib/__init__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | from .util import EasyDict, make_cache_dir_path 12 | -------------------------------------------------------------------------------- /data_preprocess/FLAME/cfg.yaml: -------------------------------------------------------------------------------- 1 | coarse: 2 | model: 3 | flame_model_path: ../portrait4d/models/FLAME/geometry/generic_model.pkl 4 | flame_lmk_embedding_path: ../portrait4d/models/FLAME/geometry/landmark_embedding_with_eyes.npy 5 | flame_mask_pkl: ../portrait4d/models/FLAME/mask/FLAME_masks.pkl 6 | ca_shape_param: ../portrait4d/models/FLAME/ca_shape.txt 7 | ca_exp_param: ../portrait4d/models/FLAME/ca_exp.txt 8 | ca_pose_param: ../portrait4d/models/FLAME/ca_pose.txt 9 | simplify_baricentric: ../portrait4d/models/FLAME/simplify_baricentric2.npy 10 | addition_info: ../portrait4d/models/FLAME/flame_addition_info_new_smallface.npy 11 | n_shape: 100 12 | n_tex: 50 13 | n_exp: 50 14 | n_pose: 6 -------------------------------------------------------------------------------- /data_preprocess/preprocess_dir.py: -------------------------------------------------------------------------------- 1 | ################################################# 2 | # Copyright (c) 2021-present, xiaobing.ai, Inc. # 3 | # All rights reserved. # 4 | ################################################# 5 | # CV Research, DEV(USA) xiaobing. # 6 | # written by wangduomin@xiaobing.ai # 7 | ################################################# 8 | 9 | ##### python internal and external package 10 | import os 11 | import cv2 12 | import random 13 | import numpy as np 14 | import warnings 15 | warnings.filterwarnings('ignore') 16 | 17 | ##### self defined package 18 | from lib.config.config import cfg 19 | from lib.inferencer import Tester as Tester 20 | 21 | def main(): 22 | os.makedirs(cfg.save_dir, exist_ok=True) 23 | tester = Tester(cfg) 24 | tester.inference(cfg.input_dir, cfg.save_dir, video=cfg.is_video) 25 | 26 | if __name__ == "__main__": 27 | main() 28 | 29 | 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 YuDeng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/config.py: -------------------------------------------------------------------------------- 1 | # config.py 2 | 3 | cfg_mnet = { 4 | 'name': 'mobilenet0.25', 5 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 6 | 'steps': [8, 16, 32], 7 | 'variance': [0.1, 0.2], 8 | 'clip': False, 9 | 'loc_weight': 2.0, 10 | 'gpu_train': True, 11 | 'batch_size': 32, 12 | 'ngpu': 1, 13 | 'epoch': 250, 14 | 'decay1': 190, 15 | 'decay2': 220, 16 | 'image_size': 640, 17 | 'pretrain': True, 18 | 'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3}, 19 | 'in_channel': 32, 20 | 'out_channel': 64 21 | } 22 | 23 | cfg_re50 = { 24 | 'name': 'Resnet50', 25 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 26 | 'steps': [8, 16, 32], 27 | 'variance': [0.1, 0.2], 28 | 'clip': False, 29 | 'loc_weight': 2.0, 30 | 'gpu_train': True, 31 | 'batch_size': 24, 32 | 'ngpu': 4, 33 | 'epoch': 100, 34 | 'decay1': 70, 35 | 'decay2': 90, 36 | 'image_size': 840, 37 | 'pretrain': True, 38 | 'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3}, 39 | 'in_channel': 256, 40 | 'out_channel': 256 41 | } 42 | 43 | -------------------------------------------------------------------------------- /portrait4d/models/FLAME/ca_exp.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 2 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/utils/nms/py_cpu_nms.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | 10 | def py_cpu_nms(dets, thresh): 11 | """Pure Python NMS baseline.""" 12 | x1 = dets[:, 0] 13 | y1 = dets[:, 1] 14 | x2 = dets[:, 2] 15 | y2 = dets[:, 3] 16 | scores = dets[:, 4] 17 | 18 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 19 | order = scores.argsort()[::-1] 20 | 21 | keep = [] 22 | while order.size > 0: 23 | i = order[0] 24 | keep.append(i) 25 | xx1 = np.maximum(x1[i], x1[order[1:]]) 26 | yy1 = np.maximum(y1[i], y1[order[1:]]) 27 | xx2 = np.minimum(x2[i], x2[order[1:]]) 28 | yy2 = np.minimum(y2[i], y2[order[1:]]) 29 | 30 | w = np.maximum(0.0, xx2 - xx1 + 1) 31 | h = np.maximum(0.0, yy2 - yy1 + 1) 32 | inter = w * h 33 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 34 | 35 | inds = np.where(ovr <= thresh)[0] 36 | order = order[inds + 1] 37 | 38 | return keep 39 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/utils/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import time 9 | 10 | 11 | class Timer(object): 12 | """A simple timer.""" 13 | def __init__(self): 14 | self.total_time = 0. 15 | self.calls = 0 16 | self.start_time = 0. 17 | self.diff = 0. 18 | self.average_time = 0. 19 | 20 | def tic(self): 21 | # using time.time instead of time.clock because time time.clock 22 | # does not normalize for multithreading 23 | self.start_time = time.time() 24 | 25 | def toc(self, average=True): 26 | self.diff = time.time() - self.start_time 27 | self.total_time += self.diff 28 | self.calls += 1 29 | self.average_time = self.total_time / self.calls 30 | if average: 31 | return self.average_time 32 | else: 33 | return self.diff 34 | 35 | def clear(self): 36 | self.total_time = 0. 37 | self.calls = 0 38 | self.start_time = 0. 39 | self.diff = 0. 40 | self.average_time = 0. 41 | -------------------------------------------------------------------------------- /portrait4d/training/crosssection_utils.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import torch 12 | 13 | def sample_cross_section(G, ws, resolution=256, w=1.2): 14 | axis=0 15 | A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij') 16 | A, B = A.reshape(-1, 1), B.reshape(-1, 1) 17 | C = torch.zeros_like(A) 18 | coordinates = [A, B] 19 | coordinates.insert(axis, C) 20 | coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1) 21 | 22 | sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma'] 23 | return sigma.reshape(-1, 1, resolution, resolution) 24 | 25 | # if __name__ == '__main__': 26 | # sample_crossection(None) -------------------------------------------------------------------------------- /data_preprocess/lib/models/networks/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import importlib 3 | from lib.models.networks.discriminator import MultiscaleDiscriminator, ImageDiscriminator 4 | from lib.models.networks.generator import ModulateGenerator 5 | from lib.models.networks.encoder import ResSEAudioEncoder, ResNeXtEncoder, ResSESyncEncoder, FanEncoder 6 | # import util.util as util 7 | 8 | def find_class_in_module(target_cls_name, module): 9 | target_cls_name = target_cls_name.replace('_', '').lower() 10 | clslib = importlib.import_module(module) 11 | cls = None 12 | for name, clsobj in clslib.__dict__.items(): 13 | if name.lower() == target_cls_name: 14 | cls = clsobj 15 | 16 | if cls is None: 17 | print("In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name)) 18 | exit(0) 19 | 20 | return cls 21 | 22 | def find_network_using_name(target_network_name, filename): 23 | target_class_name = target_network_name + filename 24 | module_name = 'lib.models.networks.' + filename 25 | network = find_class_in_module(target_class_name, module_name) 26 | 27 | return network 28 | 29 | def define_networks(opt, name, _type): 30 | net = find_network_using_name(name, _type) 31 | net = net(opt) 32 | return net 33 | 34 | 35 | -------------------------------------------------------------------------------- /data_preprocess/bfm_to_flame/run_convert.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import threading 4 | from time import time 5 | import click 6 | 7 | def execCmd(cmd): 8 | try: 9 | print("command %s start %s" % (cmd, time() )) 10 | os.system(cmd) 11 | print("command %s finish %s" % (cmd, time())) 12 | except Exception as e : 13 | print('command %s\t failed \r\n%s' % (cmd,e)) 14 | 15 | @click.command() 16 | @click.option('--input_dir', type=str, help='input bfm folder', default='') 17 | @click.option('--save_dir', type=str, help='input bfm folder', default='') 18 | @click.option('--n_thread', type=int, help='number of threads', default=20) 19 | @click.option('--instance_per_thread', type=int, help='number of instances per thread', default=1) 20 | def main(input_dir:str, save_dir:str, n_thread:int, instance_per_thread:int): 21 | 22 | root = input_dir 23 | threads = [] 24 | 25 | print("start%s" % time()) 26 | 27 | for g in range(n_thread): 28 | cmd = 'python mesh_convert_from_coeff.py --input_dir %s --save_dir %s --group %d --num %d'%(input_dir, save_dir, g, instance_per_thread) 29 | th = threading.Thread(target=execCmd, args=(cmd,)) 30 | th.start() 31 | threads.append(th) 32 | 33 | for th in threads: 34 | th.join() 35 | 36 | print("end%s" % time()) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | 41 | -------------------------------------------------------------------------------- /portrait4d/models/lpips/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################# 2 | # Copyright (c) 2021-present, xiaobing.ai, Inc. # 3 | # All rights reserved. # 4 | ################################################# 5 | # CV Research, DEV(USA) xiaobing. # 6 | # written by wangduomin@xiaobing.ai # 7 | ################################################# 8 | 9 | ##### python internal and external package 10 | import importlib 11 | ##### self defined package 12 | from lib.models.fd.fd import faceDetector 13 | from lib.models.ldmk.ldmk import ldmkDetector 14 | 15 | 16 | def find_class_in_module(target_cls_name, module): 17 | # target_cls_name = target_cls_name.replace('_', '').lower() 18 | clslib = importlib.import_module(module) 19 | cls = None 20 | for name, clsobj in clslib.__dict__.items(): 21 | if target_cls_name == name: 22 | cls = clsobj 23 | 24 | if cls is None: 25 | print("In %s, there should be a class whose name matches %s without underscore(_)" % (module, target_cls_name)) 26 | exit(0) 27 | 28 | return cls 29 | 30 | def find_network_using_name(target_class_name, filename): 31 | module_name = 'lib.models.{}.{}'.format(filename, filename) 32 | network = find_class_in_module(target_class_name, module_name) 33 | 34 | return network 35 | 36 | def define_networks(opt, _type, _cls): 37 | net = find_network_using_name(_cls, _type) 38 | net = net(opt) 39 | return net -------------------------------------------------------------------------------- /data_preprocess/lib/models/fd/layers/functions/prior_box.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from itertools import product as product 3 | import numpy as np 4 | from math import ceil 5 | 6 | 7 | class PriorBox(object): 8 | def __init__(self, cfg, image_size=None, phase='train'): 9 | super(PriorBox, self).__init__() 10 | self.min_sizes = cfg['min_sizes'] 11 | self.steps = cfg['steps'] 12 | self.clip = cfg['clip'] 13 | self.image_size = image_size 14 | self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps] 15 | self.name = "s" 16 | 17 | def forward(self): 18 | anchors = [] 19 | for k, f in enumerate(self.feature_maps): 20 | min_sizes = self.min_sizes[k] 21 | for i, j in product(range(f[0]), range(f[1])): 22 | for min_size in min_sizes: 23 | s_kx = min_size / self.image_size[1] 24 | s_ky = min_size / self.image_size[0] 25 | dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]] 26 | dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]] 27 | for cy, cx in product(dense_cy, dense_cx): 28 | anchors += [cx, cy, s_kx, s_ky] 29 | 30 | # back to torch land 31 | output = torch.Tensor(anchors).view(-1, 4) 32 | if self.clip: 33 | output.clamp_(max=1, min=0) 34 | return output 35 | -------------------------------------------------------------------------------- /portrait4d/training/loss/loss_utils.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Loss functions.""" 12 | 13 | import torch 14 | from torch_utils import training_stats 15 | from torch_utils.ops import conv2d_gradfix 16 | from torch_utils.ops import upfirdn2d 17 | 18 | # Distortion loss in MipNeRF-360: https://github.com/google-research/multinerf 19 | def lossfun_distortion(t, w, reduction='mean'): 20 | """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" 21 | # The loss incurred between all pairs of intervals. 22 | # print("t:",t.shape) 23 | # print("w:",w.shape) 24 | ut = (t[..., 1:] + t[..., :-1]) / 2 25 | dut = torch.abs(ut[..., :, None] - ut[..., None, :]) 26 | loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1) 27 | 28 | # The loss incurred within each individual interval with itself. 29 | loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3 30 | 31 | if reduction is None: 32 | return loss_inter + loss_intra 33 | return (loss_inter + loss_intra).mean() -------------------------------------------------------------------------------- /portrait4d/configs/portrait4d-genhead512.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'portrait4d-genhead512' 2 | cfg: '' 3 | outdir: './training-runs-portrait4d' 4 | shape_n_c_params_path1: './data/ffhq_all_shape_n_c_params.npy' 5 | shape_n_c_params_path2: './data/vfhq_all_shape_n_c_params.npy' 6 | motion_params_path1: './data/ffhq_all_motion_params.npy' 7 | motion_params_path2: './data/vfhq_all_motion_params.npy' 8 | gpus: 8 9 | batch: 32 10 | kimg: 15000 11 | g_module: 'training.reconstructor.triplane_reconstruct.TriPlaneReconstructorNeutralize' 12 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 13 | glr: 1e-4 14 | dlr: 1e-4 15 | g_has_superresolution: True 16 | g_has_background: True 17 | g_flame_full: True 18 | g_num_blocks_neutral: 4 19 | g_num_blocks_motion: 4 20 | g_motion_map_layers: 2 21 | d_has_superresolution: True 22 | d_has_uv: True 23 | d_has_seg: True 24 | patch_scale: 1.0 25 | use_ws_ones: False # If true, ws==one for superresolution 26 | use_flame_mot: False # If true, use flame parameters as motion embedding 27 | truncation_psi: 0.95 # Truncation rate for GenHead synthesized data 28 | cross_lr_scale: 1.0 # Learning rate scaling factor for motion-related layers 29 | static: False # If true, disable all motion-control and learn static 3d reconstruction model instead 30 | resume_syn: './pretrained_models/genhead-ffhq512.pkl' # Checkpoint of pre-trained GenHead for training data synthesis 31 | snap: 10 32 | density_reg_every: 16 33 | neural_rendering_resolution_initial: 64 34 | neural_rendering_resolution_final: 128 -------------------------------------------------------------------------------- /portrait4d/viz/backbone_cache_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import imgui 12 | from gui_utils import imgui_utils 13 | 14 | #---------------------------------------------------------------------------- 15 | 16 | class BackboneCacheWidget: 17 | def __init__(self, viz): 18 | self.viz = viz 19 | self.cache_backbone = True 20 | 21 | @imgui_utils.scoped_by_object_id 22 | def __call__(self, show=True): 23 | viz = self.viz 24 | 25 | if show: 26 | imgui.text('Cache Backbone') 27 | imgui.same_line(viz.label_w + viz.spacing * 4) 28 | _clicked, self.cache_backbone = imgui.checkbox('##backbonecache', self.cache_backbone) 29 | imgui.same_line(viz.label_w + viz.spacing * 10) 30 | imgui.text('Note that when enabled, you may be unable to view intermediate backbone weights below') 31 | 32 | viz.args.do_backbone_caching = self.cache_backbone 33 | 34 | #---------------------------------------------------------------------------- 35 | -------------------------------------------------------------------------------- /portrait4d/configs/portrait4d-static-genhead512.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'portrait4d-static-genhead512' 2 | cfg: '' 3 | outdir: './training-runs-portrait4d' 4 | shape_n_c_params_path1: './data/ffhq_all_shape_n_c_params.npy' 5 | shape_n_c_params_path2: './data/vfhq_all_shape_n_c_params.npy' 6 | motion_params_path1: './data/ffhq_all_motion_params.npy' 7 | motion_params_path2: './data/vfhq_all_motion_params.npy' 8 | gpus: 8 9 | batch: 32 10 | kimg: 15000 11 | g_module: 'training.reconstructor.triplane_reconstruct.TriPlaneReconstructorNeutralize' 12 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 13 | glr: 1e-4 14 | dlr: 1e-4 15 | g_has_superresolution: True 16 | g_has_background: True 17 | g_flame_full: True 18 | g_num_blocks_neutral: 4 19 | g_num_blocks_motion: 4 20 | g_motion_map_layers: 2 21 | d_has_superresolution: True 22 | d_has_uv: True 23 | d_has_seg: True 24 | patch_scale: 1.0 25 | use_ws_ones: False # If true, ws==one for superresolution 26 | use_flame_mot: False # If true, use flame parameters as motion embedding 27 | truncation_psi: 0.95 # Truncation rate for GenHead synthesized data 28 | cross_lr_scale: 1.0 # Learning rate scaling factor for motion-related layers 29 | static: True # If true, disable all motion-control and learn static 3d reconstruction model instead 30 | resume_syn: './pretrained_models/genhead-ffhq512.pkl' # Checkpoint of pre-trained GenHead for training data synthesis 31 | snap: 10 32 | density_reg_every: 16 33 | neural_rendering_resolution_initial: 64 34 | neural_rendering_resolution_final: 128 -------------------------------------------------------------------------------- /portrait4d/viz/render_type_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import imgui 12 | from gui_utils import imgui_utils 13 | 14 | #---------------------------------------------------------------------------- 15 | 16 | class RenderTypeWidget: 17 | def __init__(self, viz): 18 | self.viz = viz 19 | self.render_type = 0 20 | self.render_types = ['image', 'image_depth', 'image_raw'] 21 | self.labels = ['RGB Image', 'Depth Image', 'Neural Rendered Image'] 22 | 23 | @imgui_utils.scoped_by_object_id 24 | def __call__(self, show=True): 25 | viz = self.viz 26 | 27 | if show: 28 | imgui.text('Render Type') 29 | imgui.same_line(viz.label_w) 30 | with imgui_utils.item_width(viz.font_size * 10): 31 | _clicked, self.render_type = imgui.combo('', self.render_type, self.labels) 32 | 33 | viz.args.render_type = self.render_types[self.render_type] 34 | 35 | #---------------------------------------------------------------------------- 36 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/bias_act.h: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | //------------------------------------------------------------------------ 14 | // CUDA kernel parameters. 15 | 16 | struct bias_act_kernel_params 17 | { 18 | const void* x; // [sizeX] 19 | const void* b; // [sizeB] or NULL 20 | const void* xref; // [sizeX] or NULL 21 | const void* yref; // [sizeX] or NULL 22 | const void* dy; // [sizeX] or NULL 23 | void* y; // [sizeX] 24 | 25 | int grad; 26 | int act; 27 | float alpha; 28 | float gain; 29 | float clamp; 30 | 31 | int sizeX; 32 | int sizeB; 33 | int stepB; 34 | int loopX; 35 | }; 36 | 37 | //------------------------------------------------------------------------ 38 | // CUDA kernel selection. 39 | 40 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p); 41 | 42 | //------------------------------------------------------------------------ 43 | -------------------------------------------------------------------------------- /portrait4d/configs/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import torch 4 | 5 | from .yacs import CfgNode as CN 6 | 7 | # pylint: disable=redefined-outer-name 8 | 9 | _C = CN() 10 | 11 | def get_cfg_defaults(): 12 | return _C.clone() 13 | 14 | 15 | def parse_cfg(cfg): 16 | cfg.logdir = os.path.join('out', cfg.experiment) 17 | 18 | 19 | def determine_primary_secondary_gpus(cfg): 20 | print("------------------ GPU Configurations ------------------") 21 | cfg.n_gpus = torch.cuda.device_count() 22 | if cfg.n_gpus > 0: 23 | all_gpus = list(range(cfg.n_gpus)) 24 | cfg.primary_gpus = [0] 25 | if cfg.n_gpus > 1: 26 | cfg.secondary_gpus = [g for g in all_gpus]# if g not in cfg.primary_gpus] 27 | else: 28 | cfg.secondary_gpus = cfg.primary_gpus 29 | print(f"Primary GPUs: {cfg.primary_gpus}") 30 | print(f"Secondary GPUs: {cfg.secondary_gpus}") 31 | else: 32 | print(f"CPU job") 33 | print("--------------------------------------------------------") 34 | 35 | 36 | def make_cfg(args): 37 | cfg = get_cfg_defaults() 38 | cfg.merge_from_file('configs/default.yaml') 39 | cfg.merge_from_file(args.cfg) 40 | cfg.merge_from_list(args.opts) 41 | cfg.file_path = args.cfg 42 | parse_cfg(cfg) 43 | 44 | # determine_primary_secondary_gpus(cfg) 45 | 46 | return cfg 47 | 48 | 49 | parser = argparse.ArgumentParser() 50 | parser.add_argument("--cfg", default='configs/genhead-ffhq512.yaml', type=str) 51 | parser.add_argument("opts", default=None, nargs=argparse.REMAINDER) 52 | args = parser.parse_args() 53 | 54 | cfg = make_cfg(args) -------------------------------------------------------------------------------- /data_preprocess/bfm_to_flame/smpl_webuser/posemapper.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This module defines the mapping of joint-angles to pose-blendshapes. 13 | 14 | Modules included: 15 | - posemap: 16 | computes the joint-to-pose blend shape mapping given a mapping type as input 17 | 18 | ''' 19 | 20 | import chumpy as ch 21 | import numpy as np 22 | import cv2 23 | 24 | class Rodrigues(ch.Ch): 25 | dterms = 'rt' 26 | 27 | def compute_r(self): 28 | return cv2.Rodrigues(self.rt.r)[0] 29 | 30 | def compute_dr_wrt(self, wrt): 31 | if wrt is self.rt: 32 | return cv2.Rodrigues(self.rt.r)[1].T 33 | 34 | 35 | def lrotmin(p): 36 | if isinstance(p, np.ndarray): 37 | p = p.ravel()[3:] 38 | return np.concatenate([(cv2.Rodrigues(np.array(pp))[0]-np.eye(3)).ravel() for pp in p.reshape((-1,3))]).ravel() 39 | if p.ndim != 2 or p.shape[1] != 3: 40 | p = p.reshape((-1,3)) 41 | p = p[1:] 42 | return ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel() 43 | 44 | def posemap(s): 45 | if s == 'lrotmin': 46 | return lrotmin 47 | else: 48 | raise Exception('Unknown posemapping: %s' % (str(s),)) 49 | -------------------------------------------------------------------------------- /data_preprocess/lib/model_builder.py: -------------------------------------------------------------------------------- 1 | ################################################# 2 | # Copyright (c) 2021-present, xiaobing.ai, Inc. # 3 | # All rights reserved. # 4 | ################################################# 5 | # CV Research, DEV(USA) xiaobing. # 6 | # written by wangduomin@xiaobing.ai # 7 | ################################################# 8 | 9 | import math 10 | import torch 11 | import torch.nn.init as init 12 | import numpy as np 13 | import lib.models as models 14 | 15 | def make_model(cfg): 16 | # net object init 17 | facerecon = None 18 | fd = None 19 | ldmk = None 20 | ldmk_3d = None 21 | 22 | 23 | ############################## build model ############################################################# 24 | return_list = [] 25 | 26 | # create facerecon model 27 | facerecon = models.define_networks( 28 | cfg.model.facerecon, 29 | cfg.model.facerecon.model_type, 30 | cfg.model.facerecon.model_cls 31 | ) 32 | return_list.append(facerecon) 33 | 34 | # create fd model 35 | fd = models.define_networks( 36 | cfg, 37 | cfg.model.fd.model_type, 38 | cfg.model.fd.model_cls 39 | ) 40 | return_list.append(fd) 41 | 42 | # create ldmk model 43 | ldmk = models.define_networks( 44 | cfg, 45 | cfg.model.ldmk.model_type, 46 | cfg.model.ldmk.model_cls 47 | ) 48 | return_list.append(ldmk) 49 | 50 | # create ldmk 3d model 51 | ldmk_3d = models.define_networks( 52 | cfg, 53 | cfg.model.ldmk_3d.model_type, 54 | cfg.model.ldmk_3d.model_cls 55 | ) 56 | return_list.append(ldmk_3d) 57 | 58 | return return_list 59 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/filtered_lrelu_rd.cu: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | #include "filtered_lrelu.cu" 14 | 15 | // Template/kernel specializations for sign read mode. 16 | 17 | // Full op, 32-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Full op, 64-bit indexing. 22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 24 | 25 | // Activation/signs only for generic variant. 64-bit indexing. 26 | template void* choose_filtered_lrelu_act_kernel(void); 27 | template void* choose_filtered_lrelu_act_kernel(void); 28 | template void* choose_filtered_lrelu_act_kernel(void); 29 | 30 | // Copy filters to constant memory. 31 | template cudaError_t copy_filters(cudaStream_t stream); 32 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/filtered_lrelu_wr.cu: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | #include "filtered_lrelu.cu" 14 | 15 | // Template/kernel specializations for sign write mode. 16 | 17 | // Full op, 32-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Full op, 64-bit indexing. 22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 24 | 25 | // Activation/signs only for generic variant. 64-bit indexing. 26 | template void* choose_filtered_lrelu_act_kernel(void); 27 | template void* choose_filtered_lrelu_act_kernel(void); 28 | template void* choose_filtered_lrelu_act_kernel(void); 29 | 30 | // Copy filters to constant memory. 31 | template cudaError_t copy_filters(cudaStream_t stream); 32 | -------------------------------------------------------------------------------- /portrait4d/configs/portrait4d-v2-vfhq512.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'portrait4d-v2-vfhq512' 2 | cfg: 'vfhq' 3 | outdir: './training-runs-portrait4d-v2' 4 | data: './data/VFHQ_sub50_512_4/' 5 | gpus: 8 6 | batch: 32 7 | kimg: 18000 8 | g_module: 'training.reconstructor.triplane_reconstruct.TriPlaneReconstructorNeutralize' 9 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 10 | glr: 1e-4 11 | dlr: 1e-4 12 | g_has_superresolution: True 13 | g_has_background: True 14 | g_flame_full: True 15 | g_num_blocks_neutral: 4 16 | g_num_blocks_motion: 4 17 | g_motion_map_layers: 2 18 | d_has_superresolution: True 19 | d_has_uv: True 20 | d_has_seg: True 21 | patch_scale: 1.0 22 | use_ws_ones: False # If true, ws==one for superresolution 23 | use_flame_mot: False # If true, use flame parameters as motion embedding 24 | cross_lr_scale: 2.5 # Learning rate scaling factor for motion-related layers 25 | static: False # If true, disable all motion-control and learn static 3d reconstruction model instead 26 | resume: './pretrained_models/portrait4d-static-genhead512.pkl' # Resume from pre-trained static model 27 | resume_kimg: 10000 28 | resume_fix: './pretrained_models/portrait4d-static-genhead512.pkl' # Checkpoint of pre-trained static model for multi-view data synthesis 29 | snap: 10 30 | density_reg_every: 4 31 | density_reg: 1 32 | reg_type: 'G_fix_img' 33 | mot_aug_prob: 0.9 # Probability of using arbitrary-view driving image for motion embedding extraction 34 | g_fix_reg_img_aug: True # Rescale the synthesized heads of static model to compensate for the scale inconsistency issue between heads with and without eye blink 35 | neural_rendering_resolution_initial: 128 36 | max_num: -1 # 'Max number of videos used for training', metavar='INT', type=click.IntRange(min=-1), default=-1, required=False, show_default=True -------------------------------------------------------------------------------- /portrait4d/configs/portrait4d-v2-vfhq512-toy.yaml: -------------------------------------------------------------------------------- 1 | experiment: 'portrait4d-v2-vfhq512-toy' 2 | cfg: 'vfhq' 3 | outdir: './training-runs-portrait4d-v2' 4 | data: './data/VFHQ_sub50_512_4/' 5 | gpus: 1 6 | batch: 2 7 | kimg: 18000 8 | g_module: 'training.reconstructor.triplane_reconstruct.TriPlaneReconstructorNeutralize' 9 | d_module: 'training.discriminator.dual_discriminator.DualDiscriminatorDeform' 10 | glr: 1e-4 11 | dlr: 1e-4 12 | g_has_superresolution: True 13 | g_has_background: True 14 | g_flame_full: True 15 | g_num_blocks_neutral: 4 16 | g_num_blocks_motion: 4 17 | g_motion_map_layers: 2 18 | d_has_superresolution: True 19 | d_has_uv: True 20 | d_has_seg: True 21 | patch_scale: 1.0 22 | use_ws_ones: False # If true, ws==one for superresolution 23 | use_flame_mot: False # If true, use flame parameters as motion embedding 24 | cross_lr_scale: 2.5 # Learning rate scaling factor for motion-related layers 25 | static: False # If true, disable all motion-control and learn static 3d reconstruction model instead 26 | resume: './pretrained_models/portrait4d-static-genhead512.pkl' # Resume from pre-trained static model 27 | resume_kimg: 10000 28 | resume_fix: './pretrained_models/portrait4d-static-genhead512.pkl' # Checkpoint of pre-trained static model for multi-view data synthesis 29 | snap: 10 30 | density_reg_every: 4 31 | density_reg: 1 32 | reg_type: 'G_fix_img' 33 | mot_aug_prob: 0.9 # Probability of using arbitrary-view driving image for motion embedding extraction 34 | g_fix_reg_img_aug: True # Rescale the synthesized heads of static model to compensate for the scale inconsistency issue between heads with and without eye blink 35 | neural_rendering_resolution_initial: 128 36 | max_num: -1 # 'Max number of videos used for training', metavar='INT', type=click.IntRange(min=-1), default=-1, required=False, show_default=True -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/filtered_lrelu_ns.cu: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | #include "filtered_lrelu.cu" 14 | 15 | // Template/kernel specializations for no signs mode (no gradients required). 16 | 17 | // Full op, 32-bit indexing. 18 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 19 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 20 | 21 | // Full op, 64-bit indexing. 22 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 23 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 24 | 25 | // Activation/signs only for generic variant. 64-bit indexing. 26 | template void* choose_filtered_lrelu_act_kernel(void); 27 | template void* choose_filtered_lrelu_act_kernel(void); 28 | template void* choose_filtered_lrelu_act_kernel(void); 29 | 30 | // Copy filters to constant memory. 31 | template cudaError_t copy_filters(cudaStream_t stream); 32 | -------------------------------------------------------------------------------- /portrait4d/models/pdfgc/encoder.py: -------------------------------------------------------------------------------- 1 | # PD-FGC motion encoder, modified from https://github.com/Dorniwang/PD-FGC-inference 2 | import torch 3 | import torch.nn as nn 4 | import numpy as np 5 | import torch.nn.functional as F 6 | from .FAN_feature_extractor import FAN_use 7 | 8 | class FanEncoder(nn.Module): 9 | def __init__(self): 10 | super(FanEncoder, self).__init__() 11 | # pose_dim = self.opt.model.net_motion.pose_dim 12 | # eye_dim = self.opt.model.net_motion.eye_dim 13 | pose_dim = 6 14 | eye_dim = 6 15 | self.model = FAN_use() 16 | 17 | self.to_mouth = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512)) 18 | self.mouth_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, 512-pose_dim-eye_dim)) 19 | 20 | self.to_headpose = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512)) 21 | self.headpose_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, pose_dim)) 22 | 23 | self.to_eye = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512)) 24 | self.eye_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, eye_dim)) 25 | 26 | self.to_emo = nn.Sequential(nn.Linear(512, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Linear(512, 512)) 27 | self.emo_embed = nn.Sequential(nn.ReLU(), nn.Linear(512, 30)) 28 | 29 | def forward_feature(self, x): 30 | net = self.model(x) 31 | return net 32 | 33 | def forward(self, x): 34 | x = self.model(x) 35 | mouth_feat = self.to_mouth(x) 36 | headpose_feat = self.to_headpose(x) 37 | headpose_emb = self.headpose_embed(headpose_feat) 38 | eye_feat = self.to_eye(x) 39 | eye_embed = self.eye_embed(eye_feat) 40 | emo_feat = self.to_emo(x) 41 | emo_embed = self.emo_embed(emo_feat) 42 | return headpose_emb, eye_embed, emo_embed, mouth_feat 43 | -------------------------------------------------------------------------------- /portrait4d/viz/render_depth_sample_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import imgui 12 | from gui_utils import imgui_utils 13 | 14 | #---------------------------------------------------------------------------- 15 | 16 | class RenderDepthSampleWidget: 17 | def __init__(self, viz): 18 | self.viz = viz 19 | self.depth_mult = 2 20 | self.depth_importance_mult = 2 21 | self.render_types = [.5, 1, 2, 4] 22 | self.labels = ['0.5x', '1x', '2x', '4x'] 23 | 24 | @imgui_utils.scoped_by_object_id 25 | def __call__(self, show=True): 26 | viz = self.viz 27 | 28 | if show: 29 | imgui.text('Render Type') 30 | imgui.same_line(viz.label_w) 31 | with imgui_utils.item_width(viz.font_size * 4): 32 | _clicked, self.depth_mult = imgui.combo('Depth Sample Multiplier', self.depth_mult, self.labels) 33 | imgui.same_line(viz.label_w + viz.font_size * 16 + viz.spacing * 2) 34 | with imgui_utils.item_width(viz.font_size * 4): 35 | _clicked, self.depth_importance_mult = imgui.combo('Depth Sample Importance Multiplier', self.depth_importance_mult, self.labels) 36 | 37 | viz.args.depth_mult = self.render_types[self.depth_mult] 38 | viz.args.depth_importance_mult = self.render_types[self.depth_importance_mult] 39 | 40 | #---------------------------------------------------------------------------- 41 | -------------------------------------------------------------------------------- /portrait4d/viz/zoom_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | from inspect import formatargvalues 12 | import numpy as np 13 | import imgui 14 | import dnnlib 15 | from gui_utils import imgui_utils 16 | 17 | #---------------------------------------------------------------------------- 18 | 19 | class ZoomWidget: 20 | def __init__(self, viz): 21 | self.viz = viz 22 | self.fov = 18.837 23 | self.fov_default = 18.837 24 | 25 | @imgui_utils.scoped_by_object_id 26 | def __call__(self, show=True): 27 | viz = self.viz 28 | if show: 29 | imgui.text('FOV') 30 | imgui.same_line(viz.label_w) 31 | with imgui_utils.item_width(viz.font_size * 10): 32 | _changed, self.fov = imgui.slider_float('##fov', self.fov, 12, 45, format='%.2f Degrees') 33 | 34 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) 35 | snapped = round(self.fov) 36 | if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.fov != snapped)): 37 | self.fov = snapped 38 | imgui.same_line() 39 | if imgui_utils.button('Reset', width=-1, enabled=(abs(self.fov - self.fov_default)) > .01): 40 | self.fov = self.fov_default 41 | 42 | viz.args.focal_length = float(1 / (np.tan(self.fov * 3.14159 / 360) * 1.414)) 43 | #---------------------------------------------------------------------------- 44 | -------------------------------------------------------------------------------- /portrait4d/metrics/inception_score.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Inception Score (IS) from the paper "Improved techniques for training 12 | GANs". Matches the original implementation by Salimans et al. at 13 | https://github.com/openai/improved-gan/blob/master/inception_score/model.py""" 14 | 15 | import numpy as np 16 | from . import metric_utils 17 | 18 | #---------------------------------------------------------------------------- 19 | 20 | def compute_is(opts, num_gen, num_splits): 21 | # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz 22 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' 23 | detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer. 24 | 25 | gen_probs = metric_utils.compute_feature_stats_for_generator( 26 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 27 | capture_all=True, max_items=num_gen).get_all() 28 | 29 | if opts.rank != 0: 30 | return float('nan'), float('nan') 31 | 32 | scores = [] 33 | for i in range(num_splits): 34 | part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits] 35 | kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True))) 36 | kl = np.mean(np.sum(kl, axis=1)) 37 | scores.append(np.exp(kl)) 38 | return float(np.mean(scores)), float(np.std(scores)) 39 | 40 | #---------------------------------------------------------------------------- 41 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/upfirdn2d.h: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | #include 14 | 15 | //------------------------------------------------------------------------ 16 | // CUDA kernel parameters. 17 | 18 | struct upfirdn2d_kernel_params 19 | { 20 | const void* x; 21 | const float* f; 22 | void* y; 23 | 24 | int2 up; 25 | int2 down; 26 | int2 pad0; 27 | int flip; 28 | float gain; 29 | 30 | int4 inSize; // [width, height, channel, batch] 31 | int4 inStride; 32 | int2 filterSize; // [width, height] 33 | int2 filterStride; 34 | int4 outSize; // [width, height, channel, batch] 35 | int4 outStride; 36 | int sizeMinor; 37 | int sizeMajor; 38 | 39 | int loopMinor; 40 | int loopMajor; 41 | int loopX; 42 | int launchMinor; 43 | int launchMajor; 44 | }; 45 | 46 | //------------------------------------------------------------------------ 47 | // CUDA kernel specialization. 48 | 49 | struct upfirdn2d_kernel_spec 50 | { 51 | void* kernel; 52 | int tileOutW; 53 | int tileOutH; 54 | int loopMinor; 55 | int loopX; 56 | }; 57 | 58 | //------------------------------------------------------------------------ 59 | // CUDA kernel selection. 60 | 61 | template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); 62 | 63 | //------------------------------------------------------------------------ 64 | -------------------------------------------------------------------------------- /portrait4d/metrics/frechet_inception_distance.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Frechet Inception Distance (FID) from the paper 12 | "GANs trained by a two time-scale update rule converge to a local Nash 13 | equilibrium". Matches the original implementation by Heusel et al. at 14 | https://github.com/bioinf-jku/TTUR/blob/master/fid.py""" 15 | 16 | import numpy as np 17 | import scipy.linalg 18 | from . import metric_utils 19 | 20 | #---------------------------------------------------------------------------- 21 | 22 | def compute_fid(opts, max_real, num_gen): 23 | # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz 24 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' 25 | detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer. 26 | 27 | mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset( 28 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 29 | rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov() 30 | 31 | mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator( 32 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 33 | rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov() 34 | 35 | if opts.rank != 0: 36 | return float('nan') 37 | 38 | m = np.square(mu_gen - mu_real).sum() 39 | s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member 40 | fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2)) 41 | return float(fid) 42 | 43 | #---------------------------------------------------------------------------- 44 | -------------------------------------------------------------------------------- /data_preprocess/configs/pipeline_config1.yaml: -------------------------------------------------------------------------------- 1 | ################################################# 2 | # Copyright (c) 2021-present, xiaobing.ai, Inc. # 3 | # All rights reserved. # 4 | ################################################# 5 | # CV Research, DEV(USA) xiaobing. # 6 | # written by wangduomin@xiaobing.ai # 7 | ################################################# 8 | 9 | # experiment name 10 | 11 | # train type 12 | trainer_type: "none" 13 | dataset_type: "inference" 14 | 15 | # network configuration and defination 16 | model: 17 | # facerecon network (deep3d) 18 | facerecon: 19 | name: "deep3d_2023" 20 | model_type: "facerecon" 21 | model_cls: "FaceReconModel" 22 | net_recon: "resnet50" 23 | use_last_fc: False 24 | init_path: "" 25 | isTrain: False 26 | checkpoints_dir: "assets/facerecon" 27 | pretrained_name: "deep3d_2023" 28 | epoch: 20 29 | phase: "test" 30 | use_ddp: False 31 | parallel_names: ['net_recon'] 32 | bfm_folder: "assets/facerecon/bfm" 33 | # fd network 34 | fd: 35 | model_name: "retinaface" 36 | model_type: "fd" 37 | model_cls: "faceDetector" 38 | model_path: "assets/facedetect/retinaface/Resnet50_Final.pth" 39 | thres: 0.8 40 | nms_thres: 0.4 41 | img_size: 640 42 | config: { 43 | 'name': 'Resnet50', 44 | 'min_sizes': [[16, 32], [64, 128], [256, 512]], 45 | 'steps': [8, 16, 32], 46 | 'variance': [0.1, 0.2], 47 | 'clip': False, 48 | 'loc_weight': 2.0, 49 | 'gpu_train': True, 50 | 'batch_size': 24, 51 | 'ngpu': 4, 52 | 'epoch': 100, 53 | 'decay1': 70, 54 | 'decay2': 90, 55 | 'image_size': 840, 56 | 'pretrain': True, 57 | 'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3}, 58 | 'in_channel': 256, 59 | 'out_channel': 256 60 | } 61 | 62 | # ldmk network 63 | ldmk: 64 | model_name: "h3r" 65 | model_type: "ldmk" 66 | model_cls: "ldmkDetector" 67 | model_path: "assets/hrnet_w18_wflw/h3r/model.pth" 68 | img_size: 256 69 | 70 | # ldmk 3d network 71 | ldmk_3d: 72 | model_name: "ldmk3d" 73 | model_type: "ldmk" 74 | model_cls: "ldmk3dDetector" 75 | model_path: "assets/landmark3d/3DFAN4-4a694010b9.zip" 76 | model_depth_path: "assets/landmark3d/depth-6c4283c0e0.zip" 77 | img_size: 256 -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/fma.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" 12 | 13 | import torch 14 | 15 | #---------------------------------------------------------------------------- 16 | 17 | def fma(a, b, c): # => a * b + c 18 | return _FusedMultiplyAdd.apply(a, b, c) 19 | 20 | #---------------------------------------------------------------------------- 21 | 22 | class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c 23 | @staticmethod 24 | def forward(ctx, a, b, c): # pylint: disable=arguments-differ 25 | out = torch.addcmul(c, a, b) 26 | ctx.save_for_backward(a, b) 27 | ctx.c_shape = c.shape 28 | return out 29 | 30 | @staticmethod 31 | def backward(ctx, dout): # pylint: disable=arguments-differ 32 | a, b = ctx.saved_tensors 33 | c_shape = ctx.c_shape 34 | da = None 35 | db = None 36 | dc = None 37 | 38 | if ctx.needs_input_grad[0]: 39 | da = _unbroadcast(dout * b, a.shape) 40 | 41 | if ctx.needs_input_grad[1]: 42 | db = _unbroadcast(dout * a, b.shape) 43 | 44 | if ctx.needs_input_grad[2]: 45 | dc = _unbroadcast(dout, c_shape) 46 | 47 | return da, db, dc 48 | 49 | #---------------------------------------------------------------------------- 50 | 51 | def _unbroadcast(x, shape): 52 | extra_dims = x.ndim - len(shape) 53 | assert extra_dims >= 0 54 | dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] 55 | if len(dim): 56 | x = x.sum(dim=dim, keepdim=True) 57 | if extra_dims: 58 | x = x.reshape(-1, *x.shape[extra_dims+1:]) 59 | assert x.shape == shape 60 | return x 61 | 62 | #---------------------------------------------------------------------------- 63 | -------------------------------------------------------------------------------- /portrait4d/models/FLAME/ca_shape.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 2 | -------------------------------------------------------------------------------- /portrait4d/metrics/kernel_inception_distance.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Kernel Inception Distance (KID) from the paper "Demystifying MMD 12 | GANs". Matches the original implementation by Binkowski et al. at 13 | https://github.com/mbinkowski/MMD-GAN/blob/master/gan/compute_scores.py""" 14 | 15 | import numpy as np 16 | from . import metric_utils 17 | 18 | #---------------------------------------------------------------------------- 19 | 20 | def compute_kid(opts, max_real, num_gen, num_subsets, max_subset_size): 21 | # Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz 22 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/inception-2015-12-05.pkl' 23 | detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer. 24 | 25 | real_features = metric_utils.compute_feature_stats_for_dataset( 26 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 27 | rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all() 28 | 29 | gen_features = metric_utils.compute_feature_stats_for_generator( 30 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 31 | rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all() 32 | 33 | if opts.rank != 0: 34 | return float('nan') 35 | 36 | n = real_features.shape[1] 37 | m = min(min(real_features.shape[0], gen_features.shape[0]), max_subset_size) 38 | t = 0 39 | for _subset_idx in range(num_subsets): 40 | x = gen_features[np.random.choice(gen_features.shape[0], m, replace=False)] 41 | y = real_features[np.random.choice(real_features.shape[0], m, replace=False)] 42 | a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3 43 | b = (x @ y.T / n + 1) ** 3 44 | t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m 45 | kid = t / num_subsets / m 46 | return float(kid) 47 | 48 | #---------------------------------------------------------------------------- 49 | -------------------------------------------------------------------------------- /portrait4d/viz/conditioning_pose_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import numpy as np 12 | import imgui 13 | import dnnlib 14 | from gui_utils import imgui_utils 15 | 16 | #---------------------------------------------------------------------------- 17 | 18 | class ConditioningPoseWidget: 19 | def __init__(self, viz): 20 | self.viz = viz 21 | self.pose = dnnlib.EasyDict(yaw=0, pitch=0, anim=False, speed=0.25) 22 | self.pose_def = dnnlib.EasyDict(self.pose) 23 | 24 | def drag(self, dx, dy): 25 | viz = self.viz 26 | self.pose.yaw += -dx / viz.font_size * 3e-2 27 | self.pose.pitch += -dy / viz.font_size * 3e-2 28 | 29 | @imgui_utils.scoped_by_object_id 30 | def __call__(self, show=True): 31 | viz = self.viz 32 | if show: 33 | imgui.text('Cond Pose') 34 | imgui.same_line(viz.label_w) 35 | yaw = self.pose.yaw 36 | pitch = self.pose.pitch 37 | with imgui_utils.item_width(viz.font_size * 5): 38 | changed, (new_yaw, new_pitch) = imgui.input_float2('##frac', yaw, pitch, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) 39 | if changed: 40 | self.pose.yaw = new_yaw 41 | self.pose.pitch = new_pitch 42 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) 43 | _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) 44 | if dragging: 45 | self.drag(dx, dy) 46 | imgui.same_line() 47 | snapped = dnnlib.EasyDict(self.pose, yaw=round(self.pose.yaw, 1), pitch=round(self.pose.pitch, 1)) 48 | if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.pose != snapped)): 49 | self.pose = snapped 50 | imgui.same_line() 51 | if imgui_utils.button('Reset', width=-1, enabled=(self.pose != self.pose_def)): 52 | self.pose = dnnlib.EasyDict(self.pose_def) 53 | 54 | viz.args.conditioning_yaw = self.pose.yaw 55 | viz.args.conditioning_pitch = self.pose.pitch 56 | 57 | #---------------------------------------------------------------------------- 58 | -------------------------------------------------------------------------------- /portrait4d/training/volumetric_rendering/ray_marcher.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """ 12 | The ray marcher takes the raw output of the implicit representation and uses the volume rendering equation to produce composited colors and depths. 13 | Based off of the implementation in MipNeRF (this one doesn't do any cone tracing though!) 14 | """ 15 | 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | from torch_utils import persistence 20 | 21 | @persistence.persistent_class 22 | class MipRayMarcher2(nn.Module): 23 | def __init__(self): 24 | super().__init__() 25 | 26 | 27 | def run_forward(self, colors, densities, depths, rendering_options): 28 | deltas = depths[:, :, 1:] - depths[:, :, :-1] 29 | colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2 30 | densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2 31 | depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2 32 | 33 | 34 | if rendering_options['clamp_mode'] == 'softplus': 35 | densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better 36 | else: 37 | assert False, "MipRayMarcher only supports `clamp_mode`=`softplus`!" 38 | 39 | 40 | 41 | density_delta = densities_mid * deltas 42 | 43 | alpha = 1 - torch.exp(-density_delta) 44 | 45 | alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2) 46 | weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1] 47 | 48 | composite_rgb = torch.sum(weights * colors_mid, -2) 49 | weight_total = weights.sum(2) 50 | composite_depth = torch.sum(weights * depths_mid, -2) / weight_total 51 | 52 | # clip the composite to min/max range of depths 53 | composite_depth = torch.nan_to_num(composite_depth, float('inf')) 54 | composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths)) 55 | 56 | if rendering_options.get('white_back', False): 57 | composite_rgb = composite_rgb + 1 - weight_total 58 | 59 | T_bg = 1 - weight_total 60 | 61 | # composite_rgb = composite_rgb * 2 - 1 # Scale to (-1, 1) 62 | 63 | return composite_rgb, composite_depth, weights, T_bg 64 | 65 | 66 | def forward(self, colors, densities, depths, rendering_options): 67 | composite_rgb, composite_depth, weights, T_bg = self.run_forward(colors, densities, depths, rendering_options) 68 | 69 | return composite_rgb, composite_depth, weights, T_bg -------------------------------------------------------------------------------- /data_preprocess/bfm_to_flame/smpl_webuser/lbs.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines linear blend skinning for the SMPL loader which 13 | defines the effect of bones and blendshapes on the vertices of the template mesh. 14 | 15 | Modules included: 16 | - global_rigid_transformation: 17 | computes global rotation & translation of the model 18 | - verts_core: [overloaded function inherited from verts.verts_core] 19 | computes the blending of joint-influences for each vertex based on type of skinning 20 | 21 | ''' 22 | 23 | from smpl_webuser.posemapper import posemap 24 | import chumpy 25 | import numpy as np 26 | 27 | def global_rigid_transformation(pose, J, kintree_table, xp): 28 | results = {} 29 | pose = pose.reshape((-1,3)) 30 | id_to_col = {kintree_table[1,i] : i for i in range(kintree_table.shape[1])} 31 | parent = {i : id_to_col[kintree_table[0,i]] for i in range(1, kintree_table.shape[1])} 32 | 33 | if xp == chumpy: 34 | from smpl_webuser.posemapper import Rodrigues 35 | rodrigues = lambda x : Rodrigues(x) 36 | else: 37 | import cv2 38 | rodrigues = lambda x : cv2.Rodrigues(x)[0] 39 | 40 | with_zeros = lambda x : xp.vstack((x, xp.array([[0.0, 0.0, 0.0, 1.0]]))) 41 | results[0] = with_zeros(xp.hstack((rodrigues(pose[0,:]), J[0,:].reshape((3,1))))) 42 | 43 | for i in range(1, kintree_table.shape[1]): 44 | results[i] = results[parent[i]].dot(with_zeros(xp.hstack(( 45 | rodrigues(pose[i,:]), 46 | ((J[i,:] - J[parent[i],:]).reshape((3,1))) 47 | )))) 48 | 49 | pack = lambda x : xp.hstack([np.zeros((4, 3)), x.reshape((4,1))]) 50 | 51 | results = [results[i] for i in sorted(results.keys())] 52 | results_global = results 53 | 54 | if True: 55 | results2 = [results[i] - (pack( 56 | results[i].dot(xp.concatenate( ( (J[i,:]), 0 ) ))) 57 | ) for i in range(len(results))] 58 | results = results2 59 | result = xp.dstack(results) 60 | return result, results_global 61 | 62 | 63 | def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False, xp=chumpy): 64 | A, A_global = global_rigid_transformation(pose, J, kintree_table, xp) 65 | T = A.dot(weights.T) 66 | 67 | rest_shape_h = xp.vstack((v.T, np.ones((1, v.shape[0])))) 68 | 69 | v =(T[:,0,:] * rest_shape_h[0, :].reshape((1, -1)) + 70 | T[:,1,:] * rest_shape_h[1, :].reshape((1, -1)) + 71 | T[:,2,:] * rest_shape_h[2, :].reshape((1, -1)) + 72 | T[:,3,:] * rest_shape_h[3, :].reshape((1, -1))).T 73 | 74 | v = v[:,:3] 75 | 76 | if not want_Jtr: 77 | return v 78 | Jtr = xp.vstack([g[:3,3] for g in A_global]) 79 | return (v, Jtr) 80 | 81 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/networks/vision_network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torchvision.models.resnet import ResNet, Bottleneck 5 | # from util import util 6 | 7 | # model_urls = { 8 | # 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 9 | # } 10 | def copy_state_dict(state_dict, model, strip=None, replace=None): 11 | tgt_state = model.state_dict() 12 | copied_names = set() 13 | for name, param in state_dict.items(): 14 | if strip is not None and replace is None and name.startswith(strip): 15 | name = name[len(strip):] 16 | if strip is not None and replace is not None: 17 | name = name.replace(strip, replace) 18 | if name not in tgt_state: 19 | continue 20 | if isinstance(param, torch.nn.Parameter): 21 | param = param.data 22 | if param.size() != tgt_state[name].size(): 23 | print('mismatch:', name, param.size(), tgt_state[name].size()) 24 | continue 25 | tgt_state[name].copy_(param) 26 | copied_names.add(name) 27 | 28 | missing = set(tgt_state.keys()) - copied_names 29 | if len(missing) > 0: 30 | print("missing keys in state_dict:", missing) 31 | 32 | class ResNeXt50(nn.Module): 33 | def __init__(self, opt): 34 | super(ResNeXt50, self).__init__() 35 | self.model = ResNet(Bottleneck, [3, 4, 6, 3], groups=32, width_per_group=4) 36 | self.opt = opt 37 | # self.reduced_id_dim = opt.reduced_id_dim 38 | self.conv1x1 = nn.Conv2d(512 * Bottleneck.expansion, 512, kernel_size=1, padding=0) 39 | self.fc = nn.Linear(512 * Bottleneck.expansion, opt.data.num_classes) 40 | # self.fc_pre = nn.Sequential(nn.Linear(512 * Bottleneck.expansion, self.reduced_id_dim), nn.ReLU()) 41 | 42 | 43 | def load_pretrain(self, load_path): 44 | check_point = torch.load(load_path) 45 | copy_state_dict(check_point, self.model) 46 | 47 | def forward_feature(self, input): 48 | x = self.model.conv1(input) 49 | x = self.model.bn1(x) 50 | x = self.model.relu(x) 51 | x = self.model.maxpool(x) 52 | 53 | x = self.model.layer1(x) 54 | x = self.model.layer2(x) 55 | x = self.model.layer3(x) 56 | x = self.model.layer4(x) 57 | net = self.model.avgpool(x) 58 | net = torch.flatten(net, 1) 59 | x = self.conv1x1(x) 60 | # x = self.fc_pre(x) 61 | return net, x 62 | 63 | def forward(self, input): 64 | input_batch = input.view(-1, self.opt.model.output_nc, self.opt.data.img_size, self.opt.data.img_size) 65 | net, x = self.forward_feature(input_batch) 66 | net = net.view(-1, self.opt.num_inputs, 512 * Bottleneck.expansion) 67 | x = F.adaptive_avg_pool2d(x, (7, 7)) 68 | x = x.view(-1, self.opt.num_inputs, 512, 7, 7) 69 | net = torch.mean(net, 1) 70 | x = torch.mean(x, 1) 71 | cls_scores = self.fc(net) 72 | 73 | return [net, x], cls_scores 74 | # net is feature with dim all from channel; 75 | # x is feature with dim all from channel, but one more conv added and another 7*7 spatial size 76 | -------------------------------------------------------------------------------- /data_preprocess/cropping/one_euro_filter.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | 4 | def smoothing_factor(t_e, cutoff): 5 | r = 2 * math.pi * cutoff * t_e 6 | return r / (r + 1) 7 | 8 | 9 | def exponential_smoothing(a, x, x_prev): 10 | return a * x + (1 - a) * x_prev 11 | 12 | 13 | class OneEuroFilter: 14 | def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0, 15 | d_cutoff=1.0): 16 | """Initialize the one euro filter.""" 17 | # The parameters. 18 | self.min_cutoff = float(min_cutoff) 19 | self.beta = float(beta) 20 | self.d_cutoff = float(d_cutoff) 21 | # Previous values. 22 | self.x_prev = float(x0) 23 | self.dx_prev = float(dx0) 24 | self.t_prev = float(t0) 25 | 26 | def __call__(self, t, x): 27 | """Compute the filtered signal.""" 28 | t_e = t - self.t_prev 29 | 30 | # The filtered derivative of the signal. 31 | a_d = smoothing_factor(t_e, self.d_cutoff) 32 | dx = (x - self.x_prev) / t_e 33 | dx_hat = exponential_smoothing(a_d, dx, self.dx_prev) 34 | 35 | # The filtered signal. 36 | cutoff = self.min_cutoff + self.beta * abs(dx_hat) 37 | a = smoothing_factor(t_e, cutoff) 38 | x_hat = exponential_smoothing(a, x, self.x_prev) 39 | 40 | # Memorize the previous values. 41 | self.x_prev = x_hat 42 | self.dx_prev = dx_hat 43 | self.t_prev = t 44 | 45 | return x_hat 46 | 47 | class Smoother2d(): 48 | def __init__(self, min_cutoff = 0.001, beta = 0.1): 49 | self.x_smoother = [] 50 | 51 | self.min_cutoff = min_cutoff 52 | self.beta = beta 53 | 54 | def reset(self): 55 | # del self.smoother 56 | self.smoother = [] 57 | 58 | def smooth(self, idx, item): 59 | 60 | # 如果高速移动场景下延迟比较严重,那么增大 beta , 如果低速抖动比较严重, 那么减小 min_cutoff 61 | # min_cutoff = 0.001 62 | # beta = 0.01 63 | 64 | shape = item.shape 65 | 66 | return_item = item.copy() 67 | if idx == 0: 68 | for i in range(shape[0]): 69 | self.x_smoother.append(OneEuroFilter(idx, item[i][0], min_cutoff=self.min_cutoff, beta=self.beta)) 70 | else: 71 | for i in range(shape[0]): 72 | return_item[i][0] = self.x_smoother[i](idx, item[i][0]) 73 | 74 | return return_item 75 | 76 | 77 | class SmootherHighdim(): 78 | def __init__(self, min_cutoff = 0.001, beta = 0.1): 79 | self.smoother = None 80 | 81 | self.min_cutoff = min_cutoff 82 | self.beta = beta 83 | 84 | def reset(self): 85 | # del self.smoother 86 | self.smoother = None 87 | 88 | def smooth(self, idx, item): 89 | 90 | # 如果高速移动场景下延迟比较严重,那么增大 beta , 如果低速抖动比较严重, 那么减小 min_cutoff 91 | # min_cutoff = 0.001 92 | # beta = 0.01 93 | 94 | shape = item.shape 95 | 96 | return_item = item.copy() 97 | if idx == 0: 98 | self.smoother = OneEuroFilter(idx, item, min_cutoff=self.min_cutoff, beta=self.beta) 99 | else: 100 | return_item = self.smoother(idx, item) 101 | 102 | return return_item -------------------------------------------------------------------------------- /data_preprocess/lib/models/facerecon/__init__.py: -------------------------------------------------------------------------------- 1 | """This package contains modules related to objective functions, optimizations, and network architectures. 2 | 3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. 4 | You need to implement the following five functions: 5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 6 | -- : unpack data from dataset and apply preprocessing. 7 | -- : produce intermediate results. 8 | -- : calculate loss, gradients, and update network weights. 9 | -- : (optionally) add model-specific options and set default options. 10 | 11 | In the function <__init__>, you need to define four lists: 12 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 13 | -- self.model_names (str list): define networks used in our training. 14 | -- self.visual_names (str list): specify the images that you want to display and save. 15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. 16 | 17 | Now you can use the model class by specifying flag '--model dummy'. 18 | See our template model class 'template_model.py' for more details. 19 | """ 20 | 21 | import importlib 22 | from lib.models.facerecon.base_model import BaseModel 23 | 24 | 25 | def find_model_using_name(model_name): 26 | """Import the module "models/[model_name]_model.py". 27 | 28 | In the file, the class called DatasetNameModel() will 29 | be instantiated. It has to be a subclass of BaseModel, 30 | and it is case-insensitive. 31 | """ 32 | model_filename = "models." + model_name + "_model" 33 | modellib = importlib.import_module(model_filename) 34 | model = None 35 | target_model_name = model_name.replace('_', '') + 'model' 36 | for name, cls in modellib.__dict__.items(): 37 | if name.lower() == target_model_name.lower() \ 38 | and issubclass(cls, BaseModel): 39 | model = cls 40 | 41 | if model is None: 42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 43 | exit(0) 44 | 45 | return model 46 | 47 | 48 | def get_option_setter(model_name): 49 | """Return the static method of the model class.""" 50 | model_class = find_model_using_name(model_name) 51 | return model_class.modify_commandline_options 52 | 53 | 54 | def create_model(opt): 55 | """Create a model given the option. 56 | 57 | This function warps the class CustomDatasetDataLoader. 58 | This is the main interface between this package and 'train.py'/'test.py' 59 | 60 | Example: 61 | >>> from models import create_model 62 | >>> model = create_model(opt) 63 | """ 64 | model = find_model_using_name(opt.model) 65 | instance = model(opt) 66 | print("model [%s] was created" % type(instance).__name__) 67 | return instance 68 | -------------------------------------------------------------------------------- /portrait4d/viz/stylemix_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import imgui 12 | from gui_utils import imgui_utils 13 | 14 | #---------------------------------------------------------------------------- 15 | 16 | class StyleMixingWidget: 17 | def __init__(self, viz): 18 | self.viz = viz 19 | self.seed_def = 1000 20 | self.seed = self.seed_def 21 | self.animate = False 22 | self.enables = [] 23 | 24 | @imgui_utils.scoped_by_object_id 25 | def __call__(self, show=True): 26 | viz = self.viz 27 | num_ws = viz.result.get('num_ws', 0) 28 | num_enables = viz.result.get('num_ws', 18) 29 | self.enables += [False] * max(num_enables - len(self.enables), 0) 30 | 31 | if show: 32 | imgui.text('Stylemix') 33 | imgui.same_line(viz.label_w) 34 | with imgui_utils.item_width(viz.font_size * 8), imgui_utils.grayed_out(num_ws == 0): 35 | _changed, self.seed = imgui.input_int('##seed', self.seed) 36 | imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) 37 | with imgui_utils.grayed_out(num_ws == 0): 38 | _clicked, self.animate = imgui.checkbox('Anim', self.animate) 39 | 40 | pos2 = imgui.get_content_region_max()[0] - 1 - viz.button_w 41 | pos1 = pos2 - imgui.get_text_line_height() - viz.spacing 42 | pos0 = viz.label_w + viz.font_size * 12 43 | imgui.push_style_var(imgui.STYLE_FRAME_PADDING, [0, 0]) 44 | for idx in range(num_enables): 45 | imgui.same_line(round(pos0 + (pos1 - pos0) * (idx / (num_enables - 1)))) 46 | if idx == 0: 47 | imgui.set_cursor_pos_y(imgui.get_cursor_pos_y() + 3) 48 | with imgui_utils.grayed_out(num_ws == 0): 49 | _clicked, self.enables[idx] = imgui.checkbox(f'##{idx}', self.enables[idx]) 50 | if imgui.is_item_hovered(): 51 | imgui.set_tooltip(f'{idx}') 52 | imgui.pop_style_var(1) 53 | 54 | imgui.same_line(pos2) 55 | imgui.set_cursor_pos_y(imgui.get_cursor_pos_y() - 3) 56 | with imgui_utils.grayed_out(num_ws == 0): 57 | if imgui_utils.button('Reset', width=-1, enabled=(self.seed != self.seed_def or self.animate or any(self.enables[:num_enables]))): 58 | self.seed = self.seed_def 59 | self.animate = False 60 | self.enables = [False] * num_enables 61 | 62 | if any(self.enables[:num_ws]): 63 | viz.args.stylemix_idx = [idx for idx, enable in enumerate(self.enables) if enable] 64 | viz.args.stylemix_seed = self.seed & ((1 << 32) - 1) 65 | if self.animate: 66 | self.seed += 1 67 | 68 | #---------------------------------------------------------------------------- 69 | -------------------------------------------------------------------------------- /data_preprocess/extract_pdfgc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../portrait4d/'))) 5 | 6 | import click 7 | import dnnlib 8 | import numpy as np 9 | import PIL.Image 10 | import torch 11 | from tqdm import tqdm 12 | 13 | from portrait4d.training.utils.preprocess import estimate_norm_torch_pdfgc 14 | from portrait4d.models.pdfgc.encoder import FanEncoder 15 | from kornia.geometry import warp_affine 16 | import torch.nn.functional as F 17 | 18 | #---------------------------------------------------------------------------- 19 | 20 | def get_motion_feature(pd_fgc, imgs, lmks, crop_size=224, crop_len=16, reverse_y=False): 21 | 22 | trans_m = estimate_norm_torch_pdfgc(lmks, imgs.shape[-1], reverse_y=reverse_y) 23 | imgs_warp = warp_affine(imgs, trans_m, dsize=(224, 224)) 24 | imgs_warp = imgs_warp[:,:,:crop_size - crop_len*2, crop_len:crop_size - crop_len] 25 | imgs_warp = torch.clamp(F.interpolate(imgs_warp,size=[crop_size,crop_size],mode='bilinear'),-1,1) 26 | 27 | out = pd_fgc(imgs_warp) 28 | motions = torch.cat([out[1],out[2],out[3]],dim=-1) 29 | 30 | return motions 31 | 32 | #---------------------------------------------------------------------------- 33 | 34 | @click.command() 35 | @click.option('--input_dir', help='Where to save the output images', type=str, default='', metavar='DIR') 36 | @click.option('--save_dir', help='Where to save the motion embeddings', type=str, default='', metavar='DIR') 37 | def extract_motion_feature( 38 | input_dir: str, 39 | save_dir: str, 40 | ): 41 | device = torch.device('cuda') 42 | 43 | save_dir_mot = os.path.join(save_dir,'motion_feats') 44 | os.makedirs(save_dir_mot, exist_ok=True) 45 | 46 | # load motion encoder 47 | pd_fgc = FanEncoder() 48 | weight_dict = torch.load('../portrait4d/models/pdfgc/weights/motion_model.pth') 49 | pd_fgc.load_state_dict(weight_dict, strict=False) 50 | pd_fgc = pd_fgc.eval().to(device) 51 | 52 | img_list = sorted(os.listdir(os.path.join(input_dir,'align_images'))) 53 | 54 | with torch.no_grad(): 55 | for idx, img_name in enumerate(img_list): 56 | print('Extracting motion embedding for image %s (%d/%d) ...' % (img_name, idx, len(img_list))) 57 | 58 | # source image 59 | img = np.array(PIL.Image.open(os.path.join(input_dir, 'align_images', img_name))) 60 | img = torch.from_numpy((img.astype(np.float32)/127.5 - 1)).to(device) 61 | img = img.permute([2,0,1]).unsqueeze(0) 62 | 63 | # source landmarks: y axis points downwards 64 | lmks = np.load(os.path.join(input_dir,'3dldmks_align',img_name.replace('.png','.npy').replace('.jpg','.npy'))) 65 | lmks = torch.from_numpy(lmks).to(device).unsqueeze(0) 66 | 67 | # calculate motion embedding 68 | motion = get_motion_feature(pd_fgc, img, lmks).squeeze(0).cpu().numpy() 69 | np.save(os.path.join(save_dir_mot,img_name.replace('.png','.npy').replace('.jpg','.npy')), motion) 70 | 71 | 72 | #---------------------------------------------------------------------------- 73 | 74 | if __name__ == "__main__": 75 | extract_motion_feature() # pylint: disable=no-value-for-parameter 76 | 77 | #---------------------------------------------------------------------------- 78 | -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/grid_sample_gradfix.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Custom replacement for `torch.nn.functional.grid_sample` that 12 | supports arbitrarily high order gradients between the input and output. 13 | Only works on 2D images and assumes 14 | `mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" 15 | 16 | import torch 17 | 18 | # pylint: disable=redefined-builtin 19 | # pylint: disable=arguments-differ 20 | # pylint: disable=protected-access 21 | 22 | #---------------------------------------------------------------------------- 23 | 24 | enabled = False # Enable the custom op by setting this to true. 25 | 26 | #---------------------------------------------------------------------------- 27 | 28 | def grid_sample(input, grid): 29 | if _should_use_custom_op(): 30 | return _GridSample2dForward.apply(input, grid) 31 | return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) 32 | 33 | #---------------------------------------------------------------------------- 34 | 35 | def _should_use_custom_op(): 36 | return enabled 37 | 38 | #---------------------------------------------------------------------------- 39 | 40 | class _GridSample2dForward(torch.autograd.Function): 41 | @staticmethod 42 | def forward(ctx, input, grid): 43 | assert input.ndim == 4 44 | assert grid.ndim == 4 45 | output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) 46 | ctx.save_for_backward(input, grid) 47 | return output 48 | 49 | @staticmethod 50 | def backward(ctx, grad_output): 51 | input, grid = ctx.saved_tensors 52 | grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) 53 | return grad_input, grad_grid 54 | 55 | #---------------------------------------------------------------------------- 56 | 57 | class _GridSample2dBackward(torch.autograd.Function): 58 | @staticmethod 59 | def forward(ctx, grad_output, input, grid): 60 | op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') 61 | grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) 62 | ctx.save_for_backward(grid) 63 | return grad_input, grad_grid 64 | 65 | @staticmethod 66 | def backward(ctx, grad2_grad_input, grad2_grad_grid): 67 | _ = grad2_grad_grid # unused 68 | grid, = ctx.saved_tensors 69 | grad2_grad_output = None 70 | grad2_input = None 71 | grad2_grid = None 72 | 73 | if ctx.needs_input_grad[0]: 74 | grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) 75 | 76 | assert not ctx.needs_input_grad[2] 77 | return grad2_grad_output, grad2_input, grad2_grid 78 | 79 | #---------------------------------------------------------------------------- 80 | -------------------------------------------------------------------------------- /data_preprocess/bfm2flame_mapper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import torch 4 | from torch import nn 5 | import torch.optim as optim 6 | import torch.nn.functional as F 7 | 8 | class Mapper(nn.Module): 9 | def __init__(self, in_dim=144, hidden_dim=256, out_dim=403, layers=1): 10 | super(Mapper, self).__init__() 11 | self.maps = nn.ModuleList([]) 12 | for i in range(layers): 13 | in_dim_ = in_dim if i == 0 else hidden_dim 14 | out_dim_ = out_dim if i == layers-1 else hidden_dim 15 | self.maps.append(nn.Linear(in_dim_, out_dim_, bias=True)) 16 | if not i == layers-1: 17 | self.maps.append(nn.LeakyReLU(negative_slope=0.01, inplace=True)) 18 | 19 | self.maps[-1].apply(self._zero_weights) 20 | 21 | def _zero_weights(self,m): 22 | if isinstance(m, nn.Linear): 23 | nn.init.constant_(m.weight,0) 24 | if isinstance(m, nn.Linear) and m.bias is not None: 25 | nn.init.constant_(m.bias,0) 26 | 27 | def forward(self, x): 28 | for layer in self.maps: 29 | x = layer(x) 30 | return x 31 | 32 | def train(): 33 | torch.manual_seed(0) 34 | device = torch.device('cuda') 35 | save_path = os.path.join('assets','bfm2flame_mapper') 36 | os.makedirs(save_path,exist_ok=True) 37 | 38 | bfm_params = np.load('../portrait4d/data/ffhq_all_shape_n_motion_bfm_params.npy') 39 | flame_params = np.load('../portrait4d/data/ffhq_all_shape_n_motion_params.npy') 40 | bfm_params = torch.from_numpy(bfm_params).reshape(-1,bfm_params.shape[-1]).to(device) 41 | flame_params = torch.from_numpy(flame_params).reshape(-1,flame_params.shape[-1]).to(device) 42 | 43 | bfm_params_train = bfm_params[:130000] 44 | bfm_params_val = bfm_params[130000:] 45 | 46 | flame_params_train = flame_params[:130000] 47 | flame_params_val = flame_params[130000:] 48 | 49 | in_dim = bfm_params.shape[-1] 50 | out_dim = flame_params.shape[-1] 51 | 52 | print('Define models') 53 | model = Mapper(in_dim=in_dim, hidden_dim=512, out_dim=out_dim, layers=2).to(device) 54 | optimizer = optim.Adam(model.parameters(), lr=0.001) 55 | criterion = nn.L1Loss() 56 | 57 | batchsize = 512 58 | iterations = 200001 59 | 60 | print('Start training') 61 | for iter in range(iterations): 62 | cur_idx = torch.randperm(len(bfm_params_train))[:batchsize] 63 | x = bfm_params_train[cur_idx] 64 | y = flame_params_train[cur_idx] 65 | 66 | model.train() 67 | optimizer.zero_grad() 68 | outputs = model(x) 69 | loss = criterion(outputs, y) 70 | loss.backward() 71 | optimizer.step() 72 | 73 | if iter % 200 == 0: 74 | print(f'Train: Iter [{iter+1}/{iterations}], Loss: {loss.item():.4f}') 75 | 76 | if iter % 200 == 0: 77 | with torch.no_grad(): 78 | x_val = bfm_params_val 79 | y_val = flame_params_val 80 | model.eval() 81 | outputs_val = model(x_val) 82 | loss = criterion(outputs_val, y_val) 83 | 84 | print(f'Validation: Iter [{iter+1}/{iterations}], Loss: {loss.item():.4f}') 85 | 86 | if iter % 50000 == 0: 87 | torch.save(model.state_dict(), os.path.join(save_path,f"model-iter{iter:06d}.pth")) 88 | 89 | if __name__ == "__main__": 90 | train() 91 | -------------------------------------------------------------------------------- /data_preprocess/bfm_to_flame/smpl_webuser/verts.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. 3 | This software is provided for research purposes only. 4 | By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license 5 | 6 | More information about SMPL is available here http://smpl.is.tue.mpg. 7 | For comments or questions, please email us at: smpl@tuebingen.mpg.de 8 | 9 | 10 | About this file: 11 | ================ 12 | This file defines the basic skinning modules for the SMPL loader which 13 | defines the effect of bones and blendshapes on the vertices of the template mesh. 14 | 15 | Modules included: 16 | - verts_decorated: 17 | creates an instance of the SMPL model which inherits model attributes from another 18 | SMPL model. 19 | - verts_core: [overloaded function inherited by lbs.verts_core] 20 | computes the blending of joint-influences for each vertex based on type of skinning 21 | 22 | ''' 23 | 24 | import chumpy 25 | import smpl_webuser.lbs as lbs 26 | from smpl_webuser.posemapper import posemap 27 | import scipy.sparse as sp 28 | from chumpy.ch import MatVecMult 29 | 30 | def ischumpy(x): return hasattr(x, 'dterms') 31 | 32 | def verts_decorated(trans, pose, 33 | v_template, J, weights, kintree_table, bs_style, f, 34 | bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False): 35 | 36 | for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]: 37 | if which is not None: 38 | assert ischumpy(which) 39 | 40 | v = v_template 41 | 42 | if shapedirs is not None: 43 | if betas is None: 44 | betas = chumpy.zeros(shapedirs.shape[-1]) 45 | v_shaped = v + shapedirs.dot(betas) 46 | else: 47 | v_shaped = v 48 | 49 | if posedirs is not None: 50 | v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose)) 51 | else: 52 | v_posed = v_shaped 53 | 54 | v = v_posed 55 | 56 | if sp.issparse(J): 57 | regressor = J 58 | J_tmpx = MatVecMult(regressor, v_shaped[:,0]) 59 | J_tmpy = MatVecMult(regressor, v_shaped[:,1]) 60 | J_tmpz = MatVecMult(regressor, v_shaped[:,2]) 61 | J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T 62 | else: 63 | assert(ischumpy(J)) 64 | 65 | assert(bs_style=='lbs') 66 | result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy) 67 | 68 | tr = trans.reshape((1,3)) 69 | result = result + tr 70 | Jtr = Jtr + tr 71 | 72 | result.trans = trans 73 | result.f = f 74 | result.pose = pose 75 | result.v_template = v_template 76 | result.J = J 77 | result.weights = weights 78 | result.kintree_table = kintree_table 79 | result.bs_style = bs_style 80 | result.bs_type =bs_type 81 | if posedirs is not None: 82 | result.posedirs = posedirs 83 | result.v_posed = v_posed 84 | if shapedirs is not None: 85 | result.shapedirs = shapedirs 86 | result.betas = betas 87 | result.v_shaped = v_shaped 88 | if want_Jtr: 89 | result.J_transformed = Jtr 90 | return result 91 | 92 | def verts_core(pose, v, J, weights, kintree_table, bs_style, want_Jtr=False, xp=chumpy): 93 | 94 | if xp == chumpy: 95 | assert(hasattr(pose, 'dterms')) 96 | assert(hasattr(v, 'dterms')) 97 | assert(hasattr(J, 'dterms')) 98 | assert(hasattr(weights, 'dterms')) 99 | 100 | assert(bs_style=='lbs') 101 | result = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr, xp) 102 | 103 | return result 104 | -------------------------------------------------------------------------------- /portrait4d/viz/performance_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import array 12 | import numpy as np 13 | import imgui 14 | from gui_utils import imgui_utils 15 | 16 | #---------------------------------------------------------------------------- 17 | 18 | class PerformanceWidget: 19 | def __init__(self, viz): 20 | self.viz = viz 21 | self.gui_times = [float('nan')] * 60 22 | self.render_times = [float('nan')] * 30 23 | self.fps_limit = 60 24 | self.use_vsync = False 25 | self.is_async = False 26 | self.force_fp32 = False 27 | 28 | @imgui_utils.scoped_by_object_id 29 | def __call__(self, show=True): 30 | viz = self.viz 31 | self.gui_times = self.gui_times[1:] + [viz.frame_delta] 32 | if 'render_time' in viz.result: 33 | self.render_times = self.render_times[1:] + [viz.result.render_time] 34 | del viz.result.render_time 35 | 36 | if show: 37 | imgui.text('GUI') 38 | imgui.same_line(viz.label_w) 39 | with imgui_utils.item_width(viz.font_size * 8): 40 | imgui.plot_lines('##gui_times', array.array('f', self.gui_times), scale_min=0) 41 | imgui.same_line(viz.label_w + viz.font_size * 9) 42 | t = [x for x in self.gui_times if x > 0] 43 | t = np.mean(t) if len(t) > 0 else 0 44 | imgui.text(f'{t*1e3:.1f} ms' if t > 0 else 'N/A') 45 | imgui.same_line(viz.label_w + viz.font_size * 14) 46 | imgui.text(f'{1/t:.1f} FPS' if t > 0 else 'N/A') 47 | imgui.same_line(viz.label_w + viz.font_size * 18 + viz.spacing * 3) 48 | with imgui_utils.item_width(viz.font_size * 6): 49 | _changed, self.fps_limit = imgui.input_int('FPS limit', self.fps_limit, flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) 50 | self.fps_limit = min(max(self.fps_limit, 5), 1000) 51 | imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w * 2 - viz.spacing) 52 | _clicked, self.use_vsync = imgui.checkbox('Vertical sync', self.use_vsync) 53 | 54 | if show: 55 | imgui.text('Render') 56 | imgui.same_line(viz.label_w) 57 | with imgui_utils.item_width(viz.font_size * 8): 58 | imgui.plot_lines('##render_times', array.array('f', self.render_times), scale_min=0) 59 | imgui.same_line(viz.label_w + viz.font_size * 9) 60 | t = [x for x in self.render_times if x > 0] 61 | t = np.mean(t) if len(t) > 0 else 0 62 | imgui.text(f'{t*1e3:.1f} ms' if t > 0 else 'N/A') 63 | imgui.same_line(viz.label_w + viz.font_size * 14) 64 | imgui.text(f'{1/t:.1f} FPS' if t > 0 else 'N/A') 65 | imgui.same_line(viz.label_w + viz.font_size * 18 + viz.spacing * 3) 66 | _clicked, self.is_async = imgui.checkbox('Separate process', self.is_async) 67 | imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w * 2 - viz.spacing) 68 | _clicked, self.force_fp32 = imgui.checkbox('Force FP32', self.force_fp32) 69 | 70 | viz.set_fps_limit(self.fps_limit) 71 | viz.set_vsync(self.use_vsync) 72 | viz.set_async(self.is_async) 73 | viz.args.force_fp32 = self.force_fp32 74 | 75 | #---------------------------------------------------------------------------- 76 | -------------------------------------------------------------------------------- /portrait4d/metrics/precision_recall.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | """Precision/Recall (PR) from the paper "Improved Precision and Recall 12 | Metric for Assessing Generative Models". Matches the original implementation 13 | by Kynkaanniemi et al. at 14 | https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py""" 15 | 16 | import torch 17 | from . import metric_utils 18 | 19 | #---------------------------------------------------------------------------- 20 | 21 | def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size): 22 | assert 0 <= rank < num_gpus 23 | num_cols = col_features.shape[0] 24 | num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus 25 | col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches) 26 | dist_batches = [] 27 | for col_batch in col_batches[rank :: num_gpus]: 28 | dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0] 29 | for src in range(num_gpus): 30 | dist_broadcast = dist_batch.clone() 31 | if num_gpus > 1: 32 | torch.distributed.broadcast(dist_broadcast, src=src) 33 | dist_batches.append(dist_broadcast.cpu() if rank == 0 else None) 34 | return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None 35 | 36 | #---------------------------------------------------------------------------- 37 | 38 | def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size): 39 | detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl' 40 | detector_kwargs = dict(return_features=True) 41 | 42 | real_features = metric_utils.compute_feature_stats_for_dataset( 43 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 44 | rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device) 45 | 46 | gen_features = metric_utils.compute_feature_stats_for_generator( 47 | opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, 48 | rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device) 49 | 50 | results = dict() 51 | for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]: 52 | kth = [] 53 | for manifold_batch in manifold.split(row_batch_size): 54 | dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) 55 | kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None) 56 | kth = torch.cat(kth) if opts.rank == 0 else None 57 | pred = [] 58 | for probes_batch in probes.split(row_batch_size): 59 | dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size) 60 | pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None) 61 | results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan') 62 | return results['precision'], results['recall'] 63 | 64 | #---------------------------------------------------------------------------- 65 | -------------------------------------------------------------------------------- /portrait4d/viz/capture_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import os 12 | import re 13 | import numpy as np 14 | import imgui 15 | import PIL.Image 16 | from gui_utils import imgui_utils 17 | from . import renderer 18 | 19 | #---------------------------------------------------------------------------- 20 | 21 | class CaptureWidget: 22 | def __init__(self, viz): 23 | self.viz = viz 24 | self.path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_screenshots')) 25 | self.dump_image = False 26 | self.dump_gui = False 27 | self.defer_frames = 0 28 | self.disabled_time = 0 29 | 30 | def dump_png(self, image): 31 | viz = self.viz 32 | try: 33 | _height, _width, channels = image.shape 34 | assert channels in [1, 3] 35 | assert image.dtype == np.uint8 36 | os.makedirs(self.path, exist_ok=True) 37 | file_id = 0 38 | for entry in os.scandir(self.path): 39 | if entry.is_file(): 40 | match = re.fullmatch(r'(\d+).*', entry.name) 41 | if match: 42 | file_id = max(file_id, int(match.group(1)) + 1) 43 | if channels == 1: 44 | pil_image = PIL.Image.fromarray(image[:, :, 0], 'L') 45 | else: 46 | pil_image = PIL.Image.fromarray(image, 'RGB') 47 | pil_image.save(os.path.join(self.path, f'{file_id:05d}.png')) 48 | except: 49 | viz.result.error = renderer.CapturedException() 50 | 51 | @imgui_utils.scoped_by_object_id 52 | def __call__(self, show=True): 53 | viz = self.viz 54 | if show: 55 | with imgui_utils.grayed_out(self.disabled_time != 0): 56 | imgui.text('Capture') 57 | imgui.same_line(viz.label_w) 58 | _changed, self.path = imgui_utils.input_text('##path', self.path, 1024, 59 | flags=(imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE), 60 | width=(-1 - viz.button_w * 2 - viz.spacing * 2), 61 | help_text='PATH') 62 | if imgui.is_item_hovered() and not imgui.is_item_active() and self.path != '': 63 | imgui.set_tooltip(self.path) 64 | imgui.same_line() 65 | if imgui_utils.button('Save image', width=viz.button_w, enabled=(self.disabled_time == 0 and 'image' in viz.result)): 66 | self.dump_image = True 67 | self.defer_frames = 2 68 | self.disabled_time = 0.5 69 | imgui.same_line() 70 | if imgui_utils.button('Save GUI', width=-1, enabled=(self.disabled_time == 0)): 71 | self.dump_gui = True 72 | self.defer_frames = 2 73 | self.disabled_time = 0.5 74 | 75 | self.disabled_time = max(self.disabled_time - viz.frame_delta, 0) 76 | if self.defer_frames > 0: 77 | self.defer_frames -= 1 78 | elif self.dump_image: 79 | if 'image' in viz.result: 80 | self.dump_png(viz.result.image) 81 | self.dump_image = False 82 | elif self.dump_gui: 83 | viz.capture_next_frame() 84 | self.dump_gui = False 85 | captured_frame = viz.pop_captured_frame() 86 | if captured_frame is not None: 87 | self.dump_png(captured_frame) 88 | 89 | #---------------------------------------------------------------------------- 90 | -------------------------------------------------------------------------------- /portrait4d/viz/trunc_noise_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import imgui 12 | from gui_utils import imgui_utils 13 | 14 | #---------------------------------------------------------------------------- 15 | 16 | class TruncationNoiseWidget: 17 | def __init__(self, viz): 18 | self.viz = viz 19 | self.prev_num_ws = 0 20 | self.trunc_psi = 0.7 21 | self.trunc_cutoff = 7 22 | self.noise_enable = True 23 | self.noise_seed = 0 24 | self.noise_anim = False 25 | 26 | @imgui_utils.scoped_by_object_id 27 | def __call__(self, show=True): 28 | viz = self.viz 29 | num_ws = viz.result.get('num_ws', 0) 30 | has_noise = viz.result.get('has_noise', False) 31 | if num_ws > 0 and num_ws != self.prev_num_ws: 32 | if self.trunc_cutoff > num_ws or self.trunc_cutoff == self.prev_num_ws: 33 | self.trunc_cutoff = num_ws 34 | self.prev_num_ws = num_ws 35 | 36 | if show: 37 | imgui.text('Truncate') 38 | imgui.same_line(viz.label_w) 39 | with imgui_utils.item_width(viz.font_size * 10), imgui_utils.grayed_out(num_ws == 0): 40 | _changed, self.trunc_psi = imgui.slider_float('##psi', self.trunc_psi, -1, 2, format='Psi %.2f') 41 | imgui.same_line() 42 | if num_ws == 0: 43 | imgui_utils.button('Cutoff 0', width=(viz.font_size * 8 + viz.spacing), enabled=False) 44 | else: 45 | with imgui_utils.item_width(viz.font_size * 8 + viz.spacing): 46 | changed, new_cutoff = imgui.slider_int('##cutoff', self.trunc_cutoff, 0, num_ws, format='Cutoff %d') 47 | if changed: 48 | self.trunc_cutoff = min(max(new_cutoff, 0), num_ws) 49 | 50 | with imgui_utils.grayed_out(not has_noise): 51 | imgui.same_line() 52 | _clicked, self.noise_enable = imgui.checkbox('Noise##enable', self.noise_enable) 53 | imgui.same_line(viz.font_size * 28.7) 54 | with imgui_utils.grayed_out(not self.noise_enable): 55 | with imgui_utils.item_width(-3 - viz.button_w - viz.spacing - viz.font_size * 4): 56 | _changed, self.noise_seed = imgui.input_int('##seed', self.noise_seed) 57 | imgui.same_line(spacing=0) 58 | _clicked, self.noise_anim = imgui.checkbox('Anim##noise', self.noise_anim) 59 | 60 | is_def_trunc = (self.trunc_psi == 1 and self.trunc_cutoff == num_ws) 61 | is_def_noise = (self.noise_enable and self.noise_seed == 0 and not self.noise_anim) 62 | with imgui_utils.grayed_out(is_def_trunc and not has_noise): 63 | imgui.same_line(imgui.get_content_region_max()[0] - 1 - viz.button_w) 64 | if imgui_utils.button('Reset', width=-1, enabled=(not is_def_trunc or not is_def_noise)): 65 | self.prev_num_ws = num_ws 66 | self.trunc_psi = 0.7 67 | self.trunc_cutoff = 7 68 | self.noise_enable = True 69 | self.noise_seed = 0 70 | self.noise_anim = False 71 | 72 | if self.noise_anim: 73 | self.noise_seed += 1 74 | viz.args.update(trunc_psi=self.trunc_psi, trunc_cutoff=self.trunc_cutoff, random_seed=self.noise_seed) 75 | viz.args.noise_mode = ('none' if not self.noise_enable else 'const' if self.noise_seed == 0 else 'random') 76 | 77 | #---------------------------------------------------------------------------- 78 | -------------------------------------------------------------------------------- /portrait4d/training/deformer/mesh_renderer.py: -------------------------------------------------------------------------------- 1 | # modified from Deep3DFaceRecon_pytorch: https://github.com/sicxu/Deep3DFaceRecon_pytorch 2 | # use pytorch3d as renderer instead of nvdiffrast as in Deep3DFaceRecon_pytorch 3 | 4 | import torch 5 | import torch.nn.functional as F 6 | # import kornia 7 | # from kornia.geometry.camera import pixel2cam 8 | import numpy as np 9 | from typing import List 10 | # import nvdiffrast.torch as dr 11 | from scipy.io import loadmat 12 | from torch import nn 13 | 14 | import pytorch3d.ops 15 | from pytorch3d.structures import Meshes 16 | from pytorch3d.renderer import ( 17 | look_at_view_transform, 18 | FoVPerspectiveCameras, 19 | FoVOrthographicCameras, 20 | DirectionalLights, 21 | RasterizationSettings, 22 | MeshRenderer, 23 | MeshRasterizer, 24 | SoftPhongShader, 25 | TexturesUV, 26 | ) 27 | 28 | # def ndc_projection(x=0.1, n=1.0, f=50.0): 29 | # return np.array([[n/x, 0, 0, 0], 30 | # [ 0, n/-x, 0, 0], 31 | # [ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)], 32 | # [ 0, 0, -1, 0]]).astype(np.float32) 33 | 34 | class MeshRenderer(nn.Module): 35 | def __init__(self, 36 | znear=0.1, 37 | zfar=10): 38 | super(MeshRenderer, self).__init__() 39 | 40 | self.znear = znear 41 | self.zfar = zfar 42 | 43 | self.rasterizer = MeshRasterizer() 44 | 45 | def forward(self, fov, rasterize_size, vertex, tri, feat=None, cull_backfaces=True, perspective=True, scale=1.0): 46 | """ 47 | Return: 48 | mask -- torch.tensor, size (B, 1, H, W) 49 | depth -- torch.tensor, size (B, 1, H, W) 50 | features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None 51 | 52 | Parameters: 53 | vertex -- torch.tensor, size (B, N, 3), z axis points inward 54 | tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles 55 | feat(optional) -- torch.tensor, size (B, C), features 56 | """ 57 | device = vertex.device 58 | rsize = int(rasterize_size) 59 | C = feat.shape[-1] 60 | 61 | if vertex.shape[-1] == 3: 62 | vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1) 63 | vertex[..., 0] = -vertex[..., 0] 64 | 65 | tri = tri.type(torch.int32).contiguous() 66 | 67 | if perspective: 68 | cameras = FoVPerspectiveCameras( 69 | device=device, 70 | fov=fov, 71 | znear=self.znear, 72 | zfar=self.zfar, 73 | ) 74 | else: 75 | cameras = FoVOrthographicCameras( 76 | device=device, 77 | scale_xyz=((scale, scale, scale),), 78 | znear=self.znear, 79 | zfar=self.zfar, 80 | ) 81 | 82 | raster_settings = RasterizationSettings( 83 | cull_backfaces=cull_backfaces, 84 | image_size=rsize 85 | ) 86 | 87 | mesh = Meshes(vertex[...,:3], tri) 88 | 89 | fragments = self.rasterizer(mesh, cameras = cameras, raster_settings = raster_settings) 90 | rast_out = fragments.pix_to_face.squeeze(-1) 91 | depth = fragments.zbuf 92 | 93 | # render depth 94 | depth = depth.permute(0, 3, 1, 2) 95 | mask = (rast_out >= 0).float().unsqueeze(1) 96 | depth = mask * depth 97 | 98 | image = None 99 | if feat is not None: 100 | attributes = feat.reshape(-1,C)[mesh.faces_packed()] 101 | image = pytorch3d.ops.interpolate_face_attributes(fragments.pix_to_face, 102 | fragments.bary_coords, 103 | attributes) 104 | image = image.squeeze(-2).permute(0, 3, 1, 2) 105 | image = mask * image 106 | 107 | return mask, depth, image 108 | 109 | -------------------------------------------------------------------------------- /data_preprocess/lib/models/facerecon/cropping.py: -------------------------------------------------------------------------------- 1 | """This script contains the image preprocessing code for Deep3DFaceRecon_pytorch 2 | """ 3 | 4 | import numpy as np 5 | from scipy.io import loadmat 6 | from PIL import Image 7 | import cv2 8 | import os 9 | from skimage import transform as trans 10 | import torch 11 | import warnings 12 | warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) 13 | warnings.filterwarnings("ignore", category=FutureWarning) 14 | 15 | 16 | # 3D landmarks of BFM mean face 17 | lm3D = np.array([ 18 | [-0.31148657, 0.09036078, 0.13377953], 19 | [ 0.30979887, 0.08972035, 0.13179526], 20 | [ 0.0032535 , -0.24617933, 0.55244243], 21 | [-0.25216928, -0.5813392 , 0.22405732], 22 | [ 0.2484662 , -0.5812824 , 0.22235769], 23 | ]) 24 | 25 | # calculating least square problem for image alignment 26 | def POS(xp, x): 27 | npts = xp.shape[1] 28 | 29 | A = np.zeros([2*npts, 8]) 30 | 31 | A[0:2*npts-1:2, 0:3] = x.transpose() 32 | A[0:2*npts-1:2, 3] = 1 33 | 34 | A[1:2*npts:2, 4:7] = x.transpose() 35 | A[1:2*npts:2, 7] = 1 36 | 37 | b = np.reshape(xp.transpose(), [2*npts, 1]) 38 | 39 | k, _, _, _ = np.linalg.lstsq(A, b) 40 | 41 | R1 = k[0:3] 42 | R2 = k[4:7] 43 | sTx = k[3] 44 | sTy = k[7] 45 | s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2 46 | t = np.stack([sTx, sTy], axis=0) 47 | 48 | return t, s 49 | 50 | # resize and crop images for face reconstruction 51 | def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None): 52 | w0, h0 = img.size 53 | w = (w0*s).astype(np.int32) 54 | h = (h0*s).astype(np.int32) 55 | left = (w/2 - target_size/2 + float((t[0] - w0/2)*s)).astype(np.int32) 56 | right = left + target_size 57 | up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32) 58 | below = up + target_size 59 | 60 | img = img.resize((w, h), resample=Image.BILINEAR) 61 | img = img.crop((left, up, right, below)) 62 | 63 | if mask is not None: 64 | mask = mask.resize((w, h), resample=Image.BILINEAR) 65 | mask = mask.crop((left, up, right, below)) 66 | 67 | lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] - 68 | t[1] + h0/2], axis=1)*s 69 | lm = lm - np.reshape( 70 | np.array([(w/2 - target_size/2), (h/2-target_size/2)]), [1, 2]) 71 | 72 | return img, lm, mask 73 | 74 | # utils for face reconstruction 75 | def extract_5p(lm): 76 | lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1 77 | lm5p = np.stack([lm[lm_idx[0], :], np.mean(lm[lm_idx[[1, 2]], :], 0), np.mean( 78 | lm[lm_idx[[3, 4]], :], 0), lm[lm_idx[5], :], lm[lm_idx[6], :]], axis=0) 79 | lm5p = lm5p[[1, 2, 0, 3, 4], :] 80 | return lm5p 81 | 82 | # utils for face reconstruction 83 | def align_img(img, lm, mask=None, target_size=224., rescale_factor=102.): 84 | """ 85 | Return: 86 | transparams --numpy.array (raw_W, raw_H, scale, tx, ty) 87 | img_new --PIL.Image (target_size, target_size, 3) 88 | lm_new --numpy.array (68, 2), y direction is opposite to v direction 89 | mask_new --PIL.Image (target_size, target_size) 90 | 91 | Parameters: 92 | img --PIL.Image (raw_H, raw_W, 3) 93 | lm --numpy.array (68, 2), y direction is opposite to v direction 94 | lm3D --numpy.array (5, 3) 95 | mask --PIL.Image (raw_H, raw_W, 3) 96 | """ 97 | 98 | w0, h0 = img.size 99 | if lm.shape[0] != 5: 100 | lm5p = extract_5p(lm) 101 | else: 102 | lm5p = lm 103 | 104 | # calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face 105 | t, s = POS(lm5p.transpose(), lm3D.transpose()) 106 | s = rescale_factor/s 107 | 108 | # processing the image 109 | img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask) 110 | # print(w0, h0, s, t[0], t[1]) 111 | trans_params = np.array([w0, h0, s, t[0][0], t[1][0]]) 112 | 113 | return trans_params, img_new, lm_new, mask_new -------------------------------------------------------------------------------- /portrait4d/viz/latent_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import numpy as np 12 | import imgui 13 | import dnnlib 14 | from gui_utils import imgui_utils 15 | 16 | #---------------------------------------------------------------------------- 17 | 18 | class LatentWidget: 19 | def __init__(self, viz): 20 | self.viz = viz 21 | self.latent = dnnlib.EasyDict(x=1, y=0, anim=False, speed=0.25) 22 | self.latent_def = dnnlib.EasyDict(self.latent) 23 | self.step_y = 100 24 | 25 | def drag(self, dx, dy): 26 | viz = self.viz 27 | self.latent.x += dx / viz.font_size * 4e-2 28 | self.latent.y += dy / viz.font_size * 4e-2 29 | 30 | @imgui_utils.scoped_by_object_id 31 | def __call__(self, show=True): 32 | viz = self.viz 33 | if show: 34 | imgui.text('Latent') 35 | imgui.same_line(viz.label_w) 36 | seed = round(self.latent.x) + round(self.latent.y) * self.step_y 37 | with imgui_utils.item_width(viz.font_size * 8): 38 | changed, seed = imgui.input_int('##seed', seed, step=0) 39 | if changed: 40 | self.latent.x = seed 41 | self.latent.y = 0 42 | imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) 43 | frac_x = self.latent.x - round(self.latent.x) 44 | frac_y = self.latent.y - round(self.latent.y) 45 | with imgui_utils.item_width(viz.font_size * 5): 46 | changed, (new_frac_x, new_frac_y) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) 47 | if changed: 48 | self.latent.x += new_frac_x - frac_x 49 | self.latent.y += new_frac_y - frac_y 50 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) 51 | _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) 52 | if dragging: 53 | self.drag(dx, dy) 54 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) 55 | _clicked, self.latent.anim = imgui.checkbox('Anim', self.latent.anim) 56 | imgui.same_line(round(viz.font_size * 28.7)) 57 | with imgui_utils.item_width(-2 - viz.button_w * 2 - viz.spacing * 2), imgui_utils.grayed_out(not self.latent.anim): 58 | changed, speed = imgui.slider_float('##speed', self.latent.speed, -5, 5, format='Speed %.3f', power=3) 59 | if changed: 60 | self.latent.speed = speed 61 | imgui.same_line() 62 | snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y)) 63 | if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)): 64 | self.latent = snapped 65 | imgui.same_line() 66 | if imgui_utils.button('Reset', width=-1, enabled=(self.latent != self.latent_def)): 67 | self.latent = dnnlib.EasyDict(self.latent_def) 68 | 69 | if self.latent.anim: 70 | self.latent.x += viz.frame_delta * self.latent.speed 71 | viz.args.w0_seeds = [] # [[seed, weight], ...] 72 | for ofs_x, ofs_y in [[0, 0], [1, 0], [0, 1], [1, 1]]: 73 | seed_x = np.floor(self.latent.x) + ofs_x 74 | seed_y = np.floor(self.latent.y) + ofs_y 75 | seed = (int(seed_x) + int(seed_y) * self.step_y) & ((1 << 32) - 1) 76 | weight = (1 - abs(self.latent.x - seed_x)) * (1 - abs(self.latent.y - seed_y)) 77 | if weight > 0: 78 | viz.args.w0_seeds.append([seed, weight]) 79 | 80 | #---------------------------------------------------------------------------- 81 | -------------------------------------------------------------------------------- /portrait4d/training/dataloader/protocols/datum_genhead_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: datum_genhead.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='datum_genhead.proto', 20 | package='', 21 | syntax='proto2', 22 | serialized_pb=_b('\n\x13\x64\x61tum_genhead.proto\"z\n\rDatum_genhead\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\r\n\x05image\x18\x04 \x01(\x0c\x12\x0b\n\x03seg\x18\x05 \x01(\x0c\x12\x0e\n\x06labels\x18\x06 \x01(\x0c\x12\x0c\n\x04mots\x18\x07 \x01(\x0c') 23 | ) 24 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 25 | 26 | 27 | 28 | 29 | _DATUM_GENHEAD = _descriptor.Descriptor( 30 | name='Datum_genhead', 31 | full_name='Datum_genhead', 32 | filename=None, 33 | file=DESCRIPTOR, 34 | containing_type=None, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='channels', full_name='Datum_genhead.channels', index=0, 38 | number=1, type=5, cpp_type=1, label=1, 39 | has_default_value=False, default_value=0, 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | options=None), 43 | _descriptor.FieldDescriptor( 44 | name='height', full_name='Datum_genhead.height', index=1, 45 | number=2, type=5, cpp_type=1, label=1, 46 | has_default_value=False, default_value=0, 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | options=None), 50 | _descriptor.FieldDescriptor( 51 | name='width', full_name='Datum_genhead.width', index=2, 52 | number=3, type=5, cpp_type=1, label=1, 53 | has_default_value=False, default_value=0, 54 | message_type=None, enum_type=None, containing_type=None, 55 | is_extension=False, extension_scope=None, 56 | options=None), 57 | _descriptor.FieldDescriptor( 58 | name='image', full_name='Datum_genhead.image', index=3, 59 | number=4, type=12, cpp_type=9, label=1, 60 | has_default_value=False, default_value=_b(""), 61 | message_type=None, enum_type=None, containing_type=None, 62 | is_extension=False, extension_scope=None, 63 | options=None), 64 | _descriptor.FieldDescriptor( 65 | name='seg', full_name='Datum_genhead.seg', index=4, 66 | number=5, type=12, cpp_type=9, label=1, 67 | has_default_value=False, default_value=_b(""), 68 | message_type=None, enum_type=None, containing_type=None, 69 | is_extension=False, extension_scope=None, 70 | options=None), 71 | _descriptor.FieldDescriptor( 72 | name='labels', full_name='Datum_genhead.labels', index=5, 73 | number=6, type=12, cpp_type=9, label=1, 74 | has_default_value=False, default_value=_b(""), 75 | message_type=None, enum_type=None, containing_type=None, 76 | is_extension=False, extension_scope=None, 77 | options=None), 78 | _descriptor.FieldDescriptor( 79 | name='mots', full_name='Datum_genhead.mots', index=6, 80 | number=7, type=12, cpp_type=9, label=1, 81 | has_default_value=False, default_value=_b(""), 82 | message_type=None, enum_type=None, containing_type=None, 83 | is_extension=False, extension_scope=None, 84 | options=None), 85 | ], 86 | extensions=[ 87 | ], 88 | nested_types=[], 89 | enum_types=[ 90 | ], 91 | options=None, 92 | is_extendable=False, 93 | syntax='proto2', 94 | extension_ranges=[], 95 | oneofs=[ 96 | ], 97 | serialized_start=23, 98 | serialized_end=145, 99 | ) 100 | 101 | DESCRIPTOR.message_types_by_name['Datum_genhead'] = _DATUM_GENHEAD 102 | 103 | Datum_genhead = _reflection.GeneratedProtocolMessageType('Datum_genhead', (_message.Message,), dict( 104 | DESCRIPTOR = _DATUM_GENHEAD, 105 | __module__ = 'datum_genhead_pb2' 106 | # @@protoc_insertion_point(class_scope:Datum_genhead) 107 | )) 108 | _sym_db.RegisterMessage(Datum_genhead) 109 | 110 | 111 | # @@protoc_insertion_point(module_scope) 112 | -------------------------------------------------------------------------------- /portrait4d/gui_utils/imgui_window.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import os 12 | import imgui 13 | import imgui.integrations.glfw 14 | 15 | from . import glfw_window 16 | from . import imgui_utils 17 | from . import text_utils 18 | 19 | #---------------------------------------------------------------------------- 20 | 21 | class ImguiWindow(glfw_window.GlfwWindow): 22 | def __init__(self, *, title='ImguiWindow', font=None, font_sizes=range(14,24), **glfw_kwargs): 23 | if font is None: 24 | font = text_utils.get_default_font() 25 | font_sizes = {int(size) for size in font_sizes} 26 | super().__init__(title=title, **glfw_kwargs) 27 | 28 | # Init fields. 29 | self._imgui_context = None 30 | self._imgui_renderer = None 31 | self._imgui_fonts = None 32 | self._cur_font_size = max(font_sizes) 33 | 34 | # Delete leftover imgui.ini to avoid unexpected behavior. 35 | if os.path.isfile('imgui.ini'): 36 | os.remove('imgui.ini') 37 | 38 | # Init ImGui. 39 | self._imgui_context = imgui.create_context() 40 | self._imgui_renderer = _GlfwRenderer(self._glfw_window) 41 | self._attach_glfw_callbacks() 42 | imgui.get_io().ini_saving_rate = 0 # Disable creating imgui.ini at runtime. 43 | imgui.get_io().mouse_drag_threshold = 0 # Improve behavior with imgui_utils.drag_custom(). 44 | self._imgui_fonts = {size: imgui.get_io().fonts.add_font_from_file_ttf(font, size) for size in font_sizes} 45 | self._imgui_renderer.refresh_font_texture() 46 | 47 | def close(self): 48 | self.make_context_current() 49 | self._imgui_fonts = None 50 | if self._imgui_renderer is not None: 51 | self._imgui_renderer.shutdown() 52 | self._imgui_renderer = None 53 | if self._imgui_context is not None: 54 | #imgui.destroy_context(self._imgui_context) # Commented out to avoid creating imgui.ini at the end. 55 | self._imgui_context = None 56 | super().close() 57 | 58 | def _glfw_key_callback(self, *args): 59 | super()._glfw_key_callback(*args) 60 | self._imgui_renderer.keyboard_callback(*args) 61 | 62 | @property 63 | def font_size(self): 64 | return self._cur_font_size 65 | 66 | @property 67 | def spacing(self): 68 | return round(self._cur_font_size * 0.4) 69 | 70 | def set_font_size(self, target): # Applied on next frame. 71 | self._cur_font_size = min((abs(key - target), key) for key in self._imgui_fonts.keys())[1] 72 | 73 | def begin_frame(self): 74 | # Begin glfw frame. 75 | super().begin_frame() 76 | 77 | # Process imgui events. 78 | self._imgui_renderer.mouse_wheel_multiplier = self._cur_font_size / 10 79 | if self.content_width > 0 and self.content_height > 0: 80 | self._imgui_renderer.process_inputs() 81 | 82 | # Begin imgui frame. 83 | imgui.new_frame() 84 | imgui.push_font(self._imgui_fonts[self._cur_font_size]) 85 | imgui_utils.set_default_style(spacing=self.spacing, indent=self.font_size, scrollbar=self.font_size+4) 86 | 87 | def end_frame(self): 88 | imgui.pop_font() 89 | imgui.render() 90 | imgui.end_frame() 91 | self._imgui_renderer.render(imgui.get_draw_data()) 92 | super().end_frame() 93 | 94 | #---------------------------------------------------------------------------- 95 | # Wrapper class for GlfwRenderer to fix a mouse wheel bug on Linux. 96 | 97 | class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer): 98 | def __init__(self, *args, **kwargs): 99 | super().__init__(*args, **kwargs) 100 | self.mouse_wheel_multiplier = 1 101 | 102 | def scroll_callback(self, window, x_offset, y_offset): 103 | self.io.mouse_wheel += y_offset * self.mouse_wheel_multiplier 104 | 105 | #---------------------------------------------------------------------------- 106 | -------------------------------------------------------------------------------- /portrait4d/viz/pose_widget.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | import numpy as np 12 | import imgui 13 | import dnnlib 14 | from gui_utils import imgui_utils 15 | 16 | #---------------------------------------------------------------------------- 17 | 18 | class PoseWidget: 19 | def __init__(self, viz): 20 | self.viz = viz 21 | self.pose = dnnlib.EasyDict(yaw=0, pitch=0, anim=False, speed=0.25) 22 | self.pose_def = dnnlib.EasyDict(self.pose) 23 | 24 | self.lookat_point_choice = 0 25 | self.lookat_point_option = ['auto', 'ffhq', 'shapenet', 'afhq', 'manual'] 26 | self.lookat_point_labels = ['Auto Detect', 'FFHQ Default', 'Shapenet Default', 'AFHQ Default', 'Manual'] 27 | self.lookat_point = (0.0, 0.0, 0.2) 28 | 29 | def drag(self, dx, dy): 30 | viz = self.viz 31 | self.pose.yaw += -dx / viz.font_size * 3e-2 32 | self.pose.pitch += -dy / viz.font_size * 3e-2 33 | 34 | @imgui_utils.scoped_by_object_id 35 | def __call__(self, show=True): 36 | viz = self.viz 37 | if show: 38 | imgui.text('Pose') 39 | imgui.same_line(viz.label_w) 40 | yaw = self.pose.yaw 41 | pitch = self.pose.pitch 42 | with imgui_utils.item_width(viz.font_size * 5): 43 | changed, (new_yaw, new_pitch) = imgui.input_float2('##pose', yaw, pitch, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) 44 | if changed: 45 | self.pose.yaw = new_yaw 46 | self.pose.pitch = new_pitch 47 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) 48 | _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) 49 | if dragging: 50 | self.drag(dx, dy) 51 | imgui.same_line() 52 | snapped = dnnlib.EasyDict(self.pose, yaw=round(self.pose.yaw, 1), pitch=round(self.pose.pitch, 1)) 53 | if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.pose != snapped)): 54 | self.pose = snapped 55 | imgui.same_line() 56 | if imgui_utils.button('Reset', width=-1, enabled=(self.pose != self.pose_def)): 57 | self.pose = dnnlib.EasyDict(self.pose_def) 58 | 59 | # New line starts here 60 | imgui.text('LookAt Point') 61 | imgui.same_line(viz.label_w) 62 | with imgui_utils.item_width(viz.font_size * 8): 63 | _clicked, self.lookat_point_choice = imgui.combo('', self.lookat_point_choice, self.lookat_point_labels) 64 | lookat_point = self.lookat_point_option[self.lookat_point_choice] 65 | if lookat_point == 'auto': 66 | self.lookat_point = None 67 | if lookat_point == 'ffhq': 68 | self.lookat_point = (0.0, 0.0, 0.2) 69 | changes_enabled=False 70 | if lookat_point == 'shapenet': 71 | self.lookat_point = (0.0, 0.0, 0.0) 72 | changes_enabled=False 73 | if lookat_point == 'afhq': 74 | self.lookat_point = (0.0, 0.0, 0.0) 75 | changes_enabled=False 76 | if lookat_point == 'manual': 77 | if self.lookat_point is None: 78 | self.lookat_point = (0.0, 0.0, 0.0) 79 | changes_enabled=True 80 | if lookat_point != 'auto': 81 | imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) 82 | with imgui_utils.item_width(viz.font_size * 16): 83 | with imgui_utils.grayed_out(not changes_enabled): 84 | _changed, self.lookat_point = imgui.input_float3('##lookat', *self.lookat_point, format='%.2f', flags=(imgui.INPUT_TEXT_READ_ONLY if not changes_enabled else 0)) 85 | 86 | 87 | viz.args.yaw = self.pose.yaw 88 | viz.args.pitch = self.pose.pitch 89 | 90 | viz.args.lookat_point = self.lookat_point 91 | 92 | #---------------------------------------------------------------------------- 93 | -------------------------------------------------------------------------------- /portrait4d/shape_utils.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # SPDX-License-Identifier: LicenseRef-NvidiaProprietary 3 | # 4 | # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 5 | # property and proprietary rights in and to this material, related 6 | # documentation and any modifications thereto. Any use, reproduction, 7 | # disclosure or distribution of this material and related documentation 8 | # without an express license agreement from NVIDIA CORPORATION or 9 | # its affiliates is strictly prohibited. 10 | 11 | 12 | """ 13 | Utils for extracting 3D shapes using marching cubes. Based on code from DeepSDF (Park et al.) 14 | 15 | Takes as input an .mrc file and extracts a mesh. 16 | 17 | Ex. 18 | python shape_utils.py my_shape.mrc 19 | Ex. 20 | python shape_utils.py myshapes_directory --level=12 21 | """ 22 | 23 | 24 | import time 25 | import plyfile 26 | import glob 27 | import logging 28 | import numpy as np 29 | import os 30 | import random 31 | import torch 32 | import torch.utils.data 33 | import trimesh 34 | import skimage.measure 35 | import argparse 36 | # import mrcfile 37 | from tqdm import tqdm 38 | 39 | 40 | def convert_sdf_samples_to_ply( 41 | numpy_3d_sdf_tensor, 42 | voxel_grid_origin, 43 | voxel_size, 44 | ply_filename_out, 45 | offset=None, 46 | scale=None, 47 | level=0.0 48 | ): 49 | """ 50 | Convert sdf samples to .ply 51 | :param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n) 52 | :voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid 53 | :voxel_size: float, the size of the voxels 54 | :ply_filename_out: string, path of the filename to save to 55 | This function adapted from: https://github.com/RobotLocomotion/spartan 56 | """ 57 | start_time = time.time() 58 | 59 | verts, faces, normals, values = np.zeros((0, 3)), np.zeros((0, 3)), np.zeros((0, 3)), np.zeros(0) 60 | # try: 61 | verts, faces, normals, values = skimage.measure.marching_cubes( 62 | numpy_3d_sdf_tensor, level=level, spacing=[voxel_size] * 3 63 | ) 64 | # except: 65 | # pass 66 | 67 | # transform from voxel coordinates to camera coordinates 68 | # note x and y are flipped in the output of marching_cubes 69 | mesh_points = np.zeros_like(verts) 70 | mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0] 71 | mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1] 72 | mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2] 73 | 74 | # apply additional offset and scale 75 | if scale is not None: 76 | mesh_points = mesh_points / scale 77 | if offset is not None: 78 | mesh_points = mesh_points - offset 79 | 80 | # try writing to the ply file 81 | 82 | num_verts = verts.shape[0] 83 | num_faces = faces.shape[0] 84 | 85 | verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")]) 86 | 87 | for i in range(0, num_verts): 88 | verts_tuple[i] = tuple(mesh_points[i, :]) 89 | 90 | faces_building = [] 91 | for i in range(0, num_faces): 92 | faces_building.append(((faces[i, ::-1].tolist(),))) 93 | faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))]) 94 | 95 | el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex") 96 | el_faces = plyfile.PlyElement.describe(faces_tuple, "face") 97 | 98 | ply_data = plyfile.PlyData([el_verts, el_faces]) 99 | ply_data.write(ply_filename_out) 100 | print(f"wrote to {ply_filename_out}") 101 | 102 | 103 | # def convert_mrc(input_filename, output_filename, isosurface_level=1): 104 | # with mrcfile.open(input_filename) as mrc: 105 | # convert_sdf_samples_to_ply(np.transpose(mrc.data, (2, 1, 0)), [0, 0, 0], 1, output_filename, level=isosurface_level) 106 | 107 | if __name__ == '__main__': 108 | start_time = time.time() 109 | parser = argparse.ArgumentParser() 110 | parser.add_argument('input_mrc_path') 111 | parser.add_argument('--level', type=float, default=10, help="The isosurface level for marching cubes") 112 | args = parser.parse_args() 113 | 114 | if os.path.isfile(args.input_mrc_path) and args.input_mrc_path.split('.')[-1] == 'ply': 115 | output_obj_path = args.input_mrc_path.split('.mrc')[0] + '.ply' 116 | convert_mrc(args.input_mrc_path, output_obj_path, isosurface_level=1) 117 | 118 | print(f"{time.time() - start_time:02f} s") 119 | else: 120 | assert os.path.isdir(args.input_mrc_path) 121 | 122 | for mrc_path in tqdm(glob.glob(os.path.join(args.input_mrc_path, '*.mrc'))): 123 | output_obj_path = mrc_path.split('.mrc')[0] + '.ply' 124 | convert_mrc(mrc_path, output_obj_path, isosurface_level=args.level) -------------------------------------------------------------------------------- /portrait4d/torch_utils/ops/filtered_lrelu.h: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 | * SPDX-License-Identifier: LicenseRef-NvidiaProprietary 4 | * 5 | * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual 6 | * property and proprietary rights in and to this material, related 7 | * documentation and any modifications thereto. Any use, reproduction, 8 | * disclosure or distribution of this material and related documentation 9 | * without an express license agreement from NVIDIA CORPORATION or 10 | * its affiliates is strictly prohibited. 11 | */ 12 | 13 | #include 14 | 15 | //------------------------------------------------------------------------ 16 | // CUDA kernel parameters. 17 | 18 | struct filtered_lrelu_kernel_params 19 | { 20 | // These parameters decide which kernel to use. 21 | int up; // upsampling ratio (1, 2, 4) 22 | int down; // downsampling ratio (1, 2, 4) 23 | int2 fuShape; // [size, 1] | [size, size] 24 | int2 fdShape; // [size, 1] | [size, size] 25 | 26 | int _dummy; // Alignment. 27 | 28 | // Rest of the parameters. 29 | const void* x; // Input tensor. 30 | void* y; // Output tensor. 31 | const void* b; // Bias tensor. 32 | unsigned char* s; // Sign tensor in/out. NULL if unused. 33 | const float* fu; // Upsampling filter. 34 | const float* fd; // Downsampling filter. 35 | 36 | int2 pad0; // Left/top padding. 37 | float gain; // Additional gain factor. 38 | float slope; // Leaky ReLU slope on negative side. 39 | float clamp; // Clamp after nonlinearity. 40 | int flip; // Filter kernel flip for gradient computation. 41 | 42 | int tilesXdim; // Original number of horizontal output tiles. 43 | int tilesXrep; // Number of horizontal tiles per CTA. 44 | int blockZofs; // Block z offset to support large minibatch, channel dimensions. 45 | 46 | int4 xShape; // [width, height, channel, batch] 47 | int4 yShape; // [width, height, channel, batch] 48 | int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. 49 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. 50 | int swLimit; // Active width of sign tensor in bytes. 51 | 52 | longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. 53 | longlong4 yStride; // 54 | int64_t bStride; // 55 | longlong3 fuStride; // 56 | longlong3 fdStride; // 57 | }; 58 | 59 | struct filtered_lrelu_act_kernel_params 60 | { 61 | void* x; // Input/output, modified in-place. 62 | unsigned char* s; // Sign tensor in/out. NULL if unused. 63 | 64 | float gain; // Additional gain factor. 65 | float slope; // Leaky ReLU slope on negative side. 66 | float clamp; // Clamp after nonlinearity. 67 | 68 | int4 xShape; // [width, height, channel, batch] 69 | longlong4 xStride; // Input/output tensor strides, same order as in shape. 70 | int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. 71 | int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. 72 | }; 73 | 74 | //------------------------------------------------------------------------ 75 | // CUDA kernel specialization. 76 | 77 | struct filtered_lrelu_kernel_spec 78 | { 79 | void* setup; // Function for filter kernel setup. 80 | void* exec; // Function for main operation. 81 | int2 tileOut; // Width/height of launch tile. 82 | int numWarps; // Number of warps per thread block, determines launch block size. 83 | int xrep; // For processing multiple horizontal tiles per thread block. 84 | int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. 85 | }; 86 | 87 | //------------------------------------------------------------------------ 88 | // CUDA kernel selection. 89 | 90 | template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); 91 | template void* choose_filtered_lrelu_act_kernel(void); 92 | template cudaError_t copy_filters(cudaStream_t stream); 93 | 94 | //------------------------------------------------------------------------ 95 | -------------------------------------------------------------------------------- /portrait4d/training/dataloader/protocols/datum_portrait_ffhq_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: datum_portrait_ffhq.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='datum_portrait_ffhq.proto', 20 | package='', 21 | syntax='proto2', 22 | serialized_pb=_b('\n\x19\x64\x61tum_portrait_ffhq.proto\"\x86\x01\n\nDatum_ffhq\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0b\n\x03num\x18\x04 \x01(\x05\x12\x0e\n\x06images\x18\x05 \x01(\x0c\x12\x0c\n\x04segs\x18\x06 \x01(\x0c\x12\x0e\n\x06labels\x18\x07 \x01(\x0c\x12\x0c\n\x04mots\x18\x08 \x01(\x0c') 23 | ) 24 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 25 | 26 | 27 | 28 | 29 | _DATUM_FFHQ = _descriptor.Descriptor( 30 | name='Datum_ffhq', 31 | full_name='Datum_ffhq', 32 | filename=None, 33 | file=DESCRIPTOR, 34 | containing_type=None, 35 | fields=[ 36 | _descriptor.FieldDescriptor( 37 | name='channels', full_name='Datum_ffhq.channels', index=0, 38 | number=1, type=5, cpp_type=1, label=1, 39 | has_default_value=False, default_value=0, 40 | message_type=None, enum_type=None, containing_type=None, 41 | is_extension=False, extension_scope=None, 42 | options=None), 43 | _descriptor.FieldDescriptor( 44 | name='height', full_name='Datum_ffhq.height', index=1, 45 | number=2, type=5, cpp_type=1, label=1, 46 | has_default_value=False, default_value=0, 47 | message_type=None, enum_type=None, containing_type=None, 48 | is_extension=False, extension_scope=None, 49 | options=None), 50 | _descriptor.FieldDescriptor( 51 | name='width', full_name='Datum_ffhq.width', index=2, 52 | number=3, type=5, cpp_type=1, label=1, 53 | has_default_value=False, default_value=0, 54 | message_type=None, enum_type=None, containing_type=None, 55 | is_extension=False, extension_scope=None, 56 | options=None), 57 | _descriptor.FieldDescriptor( 58 | name='num', full_name='Datum_ffhq.num', index=3, 59 | number=4, type=5, cpp_type=1, label=1, 60 | has_default_value=False, default_value=0, 61 | message_type=None, enum_type=None, containing_type=None, 62 | is_extension=False, extension_scope=None, 63 | options=None), 64 | _descriptor.FieldDescriptor( 65 | name='images', full_name='Datum_ffhq.images', index=4, 66 | number=5, type=12, cpp_type=9, label=1, 67 | has_default_value=False, default_value=_b(""), 68 | message_type=None, enum_type=None, containing_type=None, 69 | is_extension=False, extension_scope=None, 70 | options=None), 71 | _descriptor.FieldDescriptor( 72 | name='segs', full_name='Datum_ffhq.segs', index=5, 73 | number=6, type=12, cpp_type=9, label=1, 74 | has_default_value=False, default_value=_b(""), 75 | message_type=None, enum_type=None, containing_type=None, 76 | is_extension=False, extension_scope=None, 77 | options=None), 78 | _descriptor.FieldDescriptor( 79 | name='labels', full_name='Datum_ffhq.labels', index=6, 80 | number=7, type=12, cpp_type=9, label=1, 81 | has_default_value=False, default_value=_b(""), 82 | message_type=None, enum_type=None, containing_type=None, 83 | is_extension=False, extension_scope=None, 84 | options=None), 85 | _descriptor.FieldDescriptor( 86 | name='mots', full_name='Datum_ffhq.mots', index=7, 87 | number=8, type=12, cpp_type=9, label=1, 88 | has_default_value=False, default_value=_b(""), 89 | message_type=None, enum_type=None, containing_type=None, 90 | is_extension=False, extension_scope=None, 91 | options=None), 92 | ], 93 | extensions=[ 94 | ], 95 | nested_types=[], 96 | enum_types=[ 97 | ], 98 | options=None, 99 | is_extendable=False, 100 | syntax='proto2', 101 | extension_ranges=[], 102 | oneofs=[ 103 | ], 104 | serialized_start=30, 105 | serialized_end=164, 106 | ) 107 | 108 | DESCRIPTOR.message_types_by_name['Datum_ffhq'] = _DATUM_FFHQ 109 | 110 | Datum_ffhq = _reflection.GeneratedProtocolMessageType('Datum_ffhq', (_message.Message,), dict( 111 | DESCRIPTOR = _DATUM_FFHQ, 112 | __module__ = 'datum_portrait_ffhq_pb2' 113 | # @@protoc_insertion_point(class_scope:Datum_ffhq) 114 | )) 115 | _sym_db.RegisterMessage(Datum_ffhq) 116 | 117 | 118 | # @@protoc_insertion_point(module_scope) 119 | --------------------------------------------------------------------------------