├── README.md ├── cross_view_v2 ├── 256_256 │ ├── README.md │ ├── data │ │ ├── data.lua │ │ ├── dataset.lua │ │ └── donkey_folder.lua │ ├── models.lua │ ├── scripts │ │ └── download_plus_model.sh │ ├── test_fork.lua │ ├── test_pix2pix.lua │ ├── test_segmap_seq.lua │ ├── test_seq.lua │ ├── train_fork.lua │ ├── train_pix2pix.lua │ ├── train_seq.lua │ └── util │ │ ├── cudnn_convert_custom.lua │ │ └── util.lua ├── 64_64 │ ├── README.md │ ├── data │ │ ├── data.lua │ │ ├── dataset.lua │ │ └── donkey_folder.lua │ ├── models.lua │ ├── scripts │ │ └── download_plus_model.sh │ ├── test_fork.lua │ ├── test_pix2pix.lua │ ├── test_segmap_seq.lua │ ├── test_seq.lua │ ├── train_fork.lua │ ├── train_pix2pix.lua │ ├── train_seq.lua │ └── util │ │ ├── cudnn_convert_custom.lua │ │ └── util.lua └── README.md ├── gaugan_pix2pixhd_guided ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── aligned_dataset.cpython-37.pyc │ │ ├── base_dataset.cpython-37.pyc │ │ ├── coco_dataset.cpython-37.pyc │ │ ├── custom_dataset.cpython-37.pyc │ │ └── image_folder.cpython-37.pyc │ ├── aligned_dataset.py │ ├── base_dataset.py │ ├── custom_dataset.py │ ├── image_folder.py │ └── pix2pix_dataset.py ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── pix2pix_model.cpython-37.pyc │ ├── networks │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── architecture.cpython-37.pyc │ │ │ ├── base_network.cpython-37.pyc │ │ │ ├── discriminator.cpython-37.pyc │ │ │ ├── encoder.cpython-37.pyc │ │ │ ├── generator.cpython-37.pyc │ │ │ ├── loss.cpython-37.pyc │ │ │ └── normalization.cpython-37.pyc │ │ ├── architecture.py │ │ ├── base_network.py │ │ ├── discriminator.py │ │ ├── encoder.py │ │ ├── generator.py │ │ ├── loss.py │ │ └── normalization.py │ └── pix2pix_model.py ├── options │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── base_options.cpython-37.pyc │ │ ├── test_options.cpython-37.pyc │ │ └── train_options.cpython-37.pyc │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── scripts │ └── download_gaugan_pix2pixhd_model.sh ├── test.py ├── test_ntu.sh ├── test_ntu_pix2pixhd.sh ├── train.py ├── train_ntu.sh ├── train_ntu_pix2pixhd.sh ├── train_rafd.sh ├── train_rafd_pix2pixhd.sh ├── train_sva.sh ├── train_sva_pix2pixhd.sh ├── trainers │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── pix2pix_trainer.cpython-37.pyc │ └── pix2pix_trainer.py └── util │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── coco.cpython-37.pyc │ ├── html.cpython-37.pyc │ ├── iter_counter.cpython-37.pyc │ ├── util.cpython-37.pyc │ └── visualizer.cpython-37.pyc │ ├── coco.py │ ├── html.py │ ├── iter_counter.py │ ├── util.py │ └── visualizer.py ├── imgs └── motivation.jpg ├── person_transfer ├── README.md ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── base_data_loader.cpython-36.pyc │ │ ├── base_dataset.cpython-36.pyc │ │ ├── custom_dataset_data_loader.cpython-36.pyc │ │ ├── data_loader.cpython-36.pyc │ │ ├── image_folder.cpython-36.pyc │ │ └── keypoint.cpython-36.pyc │ ├── base_data_loader.py │ ├── base_dataset.py │ ├── custom_dataset_data_loader.py │ ├── data_loader.py │ ├── image_folder.py │ └── keypoint.py ├── datasets │ └── download_selectiongan_dataset.sh ├── fashion │ └── rename.py ├── losses │ ├── L1_plus_perceptualLoss.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── L1_plus_perceptualLoss.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ └── ssim.cpython-36.pyc │ └── ssim.py ├── models │ ├── PATN.py │ ├── __init__.py │ ├── __pycache__ │ │ ├── PATN.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── base_model.cpython-36.pyc │ │ ├── model_variants.cpython-36.pyc │ │ ├── models.cpython-36.pyc │ │ └── networks.cpython-36.pyc │ ├── base_model.py │ ├── model_variants.py │ ├── models.py │ ├── networks.py │ └── test_model.py ├── options │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── base_options.cpython-36.pyc │ │ ├── test_options.cpython-36.pyc │ │ └── train_options.cpython-36.pyc │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── scripts │ └── download_selectiongan_model.sh ├── ssd_score │ ├── __init__.py │ ├── compute_ssd_score_fashion.py │ ├── compute_ssd_score_market.py │ └── deploy.prototxt ├── test.py ├── test_fashion.sh ├── test_market.sh ├── tool │ ├── __pycache__ │ │ └── inception_score.cpython-36.pyc │ ├── calPCKH_fashion.py │ ├── calPCKH_market.py │ ├── cmd.py │ ├── compute_coordinates.py │ ├── crop_fashion.py │ ├── crop_market.py │ ├── generate_fashion_datasets.py │ ├── generate_pose_map_fashion.py │ ├── generate_pose_map_market.py │ ├── getMetrics_fashion.py │ ├── getMetrics_market.py │ ├── inception_score.py │ ├── pose_utils.py │ ├── resize_fashion.py │ └── rm_insnorm_running_vars.py ├── train.py ├── train_fashion.sh ├── train_market.sh └── util │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── html.cpython-36.pyc │ ├── image_pool.cpython-36.pyc │ ├── util.cpython-36.pyc │ └── visualizer.cpython-36.pyc │ ├── get_data.py │ ├── html.py │ ├── image_pool.py │ ├── png.py │ ├── util.py │ └── visualizer.py ├── selectiongan_v1 ├── LICENSE.md ├── README.md ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── aligned_dataset.cpython-36.pyc │ │ ├── aligned_dataset.cpython-37.pyc │ │ ├── base_data_loader.cpython-36.pyc │ │ ├── base_data_loader.cpython-37.pyc │ │ ├── base_dataset.cpython-36.pyc │ │ ├── base_dataset.cpython-37.pyc │ │ ├── image_folder.cpython-36.pyc │ │ ├── image_folder.cpython-37.pyc │ │ └── single_dataset.cpython-36.pyc │ ├── aligned_dataset.py │ ├── base_data_loader.py │ ├── base_dataset.py │ └── image_folder.py ├── datasets │ ├── dayton_ablation_split │ │ ├── dayton_ablation_test.txt │ │ └── dayton_ablation_train.txt │ ├── dayton_split │ │ ├── dayton_test.txt │ │ └── dayton_train.txt │ ├── download_selectiongan_dataset.sh │ ├── samples │ │ ├── cvusa │ │ │ ├── test │ │ │ │ ├── 0000002.jpg │ │ │ │ ├── 0000730.jpg │ │ │ │ ├── 0001780.jpg │ │ │ │ └── 0002792.jpg │ │ │ └── train │ │ │ │ ├── 0000777.jpg │ │ │ │ ├── 0002067.jpg │ │ │ │ ├── 0002664.jpg │ │ │ │ └── 0003158.jpg │ │ ├── dayton │ │ │ ├── test │ │ │ │ ├── --oXLKfmtepqJ5OHQ84jZg.x832.y508.a-86.a2g.png │ │ │ │ ├── -19hmUez9cLpI3Sq1-HXJw.x107.y483.a-42.a2g.png │ │ │ │ ├── 08RqiB4xvi3kCBsCB85FQA.x503.y406.a154.a2g.png │ │ │ │ └── 0APOIZqVHuFXTlUyk1Zdqw.x1148.y423.a-108.a2g.png │ │ │ └── train │ │ │ │ ├── --oXLKfmtepqJ5OHQ84jZg.x1066.y499.a-36.a2g.png │ │ │ │ ├── -2SMmA-p4vE0f7f1wcVc7Q.x355.y438.a40.a2g.png │ │ │ │ ├── -AZ2_ts7HTdh_SghAzJCmQ.x706.y453.a142.a2g.png │ │ │ │ └── -Ab8tr0-y0kZrAbMq8iUPA.x910.y482.a-86.a2g.png │ │ └── ego2top │ │ │ ├── test │ │ │ ├── Case16_Egocentric_5_00138_Egocentric_5_00298.png │ │ │ ├── Case19_Egocentric_5_00264_TopView_00426.png │ │ │ ├── Case24_Egocentric_1_00001_Egocentric_4_00304.png │ │ │ ├── Case29_Egocentric_5_00249_Egocentric_6_00150.png │ │ │ ├── Case39_Egocentric_1_00481_Egocentric_5_00424.png │ │ │ ├── Case44_Egocentric_1_00161_TopView_00451.png │ │ │ ├── Case46_Egocentric_4_00008_Egocentric_5_00339.png │ │ │ └── Case9_Egocentric_4_01326_Egocentric_6_00202.png │ │ │ └── train │ │ │ ├── Case10_Egocentric_1_00000_Egocentric_1_01059.png │ │ │ ├── Case10_Egocentric_3_00040_Egocentric_5_00374.png │ │ │ ├── Case4_Egocentric_2_01243_Egocentric_5_01485.png │ │ │ └── Case9_TopView_02108_TopView_02148.png │ └── sva_split │ │ ├── sva_test.txt │ │ └── sva_train.txt ├── imgs │ ├── SelectionGAN.gif │ ├── SelectionGAN.png │ ├── framework.jpg │ ├── method.jpg │ └── supp_dayton_a2g.jpg ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── base_model.cpython-36.pyc │ │ ├── base_model.cpython-37.pyc │ │ ├── cycle_gan_model.cpython-36.pyc │ │ ├── networks.cpython-36.pyc │ │ ├── networks.cpython-37.pyc │ │ ├── pix2pix_model.cpython-36.pyc │ │ ├── selectiongan_model.cpython-37.pyc │ │ └── test_model.cpython-36.pyc │ ├── base_model.py │ ├── networks.py │ └── selectiongan_model.py ├── options │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── base_options.cpython-36.pyc │ │ ├── base_options.cpython-37.pyc │ │ ├── test_options.cpython-36.pyc │ │ ├── test_options.cpython-37.pyc │ │ ├── train_options.cpython-36.pyc │ │ └── train_options.cpython-37.pyc │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── requirements.txt ├── scripts │ ├── Image_ids.txt │ ├── change_order.m │ ├── conda_deps.sh │ ├── convert_semantic_map_cvusa.m │ ├── cvusa_prepare.m │ ├── download_selectiongan_model.sh │ ├── evaluation │ │ ├── KL_model_data.py │ │ ├── calculate_LPIPS.m │ │ ├── compute_accuracies.py │ │ ├── compute_ssim_psnr_sharpness.lua │ │ ├── compute_topK_KL.py │ │ ├── my_image_error_measures.lua │ │ └── split_real_fake.m │ ├── script.txt │ └── split_real_fake.m ├── test.py ├── test_selectiongan.sh ├── train.py ├── train_selectiongan.sh └── util │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── html.cpython-36.pyc │ ├── html.cpython-37.pyc │ ├── image_pool.cpython-36.pyc │ ├── image_pool.cpython-37.pyc │ ├── util.cpython-36.pyc │ ├── util.cpython-37.pyc │ ├── visualizer.cpython-36.pyc │ └── visualizer.cpython-37.pyc │ ├── get_data.py │ ├── html.py │ ├── image_pool.py │ ├── util.py │ └── visualizer.py ├── selectiongan_v2 ├── LICENSE.md ├── README.md ├── data │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── aligned_dataset.cpython-36.pyc │ │ ├── aligned_dataset.cpython-37.pyc │ │ ├── base_data_loader.cpython-36.pyc │ │ ├── base_data_loader.cpython-37.pyc │ │ ├── base_dataset.cpython-36.pyc │ │ ├── base_dataset.cpython-37.pyc │ │ ├── image_folder.cpython-36.pyc │ │ ├── image_folder.cpython-37.pyc │ │ └── single_dataset.cpython-36.pyc │ ├── aligned_dataset.py │ ├── base_data_loader.py │ ├── base_dataset.py │ └── image_folder.py ├── models │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── base_model.cpython-36.pyc │ │ ├── base_model.cpython-37.pyc │ │ ├── cycle_gan_model.cpython-36.pyc │ │ ├── networks.cpython-36.pyc │ │ ├── networks.cpython-37.pyc │ │ ├── pix2pix_model.cpython-36.pyc │ │ ├── selectiongan_model.cpython-36.pyc │ │ ├── selectiongan_model.cpython-37.pyc │ │ └── test_model.cpython-36.pyc │ ├── base_model.py │ ├── networks.py │ └── selectiongan_model.py ├── options │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── base_options.cpython-36.pyc │ │ ├── base_options.cpython-37.pyc │ │ ├── test_options.cpython-36.pyc │ │ ├── test_options.cpython-37.pyc │ │ ├── train_options.cpython-36.pyc │ │ └── train_options.cpython-37.pyc │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── requirements.txt ├── scripts │ ├── conda_deps.sh │ └── download_selectiongan_model.sh ├── test.py ├── test.sh ├── train.py ├── train.sh └── util │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── html.cpython-36.pyc │ ├── html.cpython-37.pyc │ ├── image_pool.cpython-36.pyc │ ├── image_pool.cpython-37.pyc │ ├── util.cpython-36.pyc │ ├── util.cpython-37.pyc │ ├── visualizer.cpython-36.pyc │ └── visualizer.cpython-37.pyc │ ├── get_data.py │ ├── html.py │ ├── image_pool.py │ ├── util.py │ └── visualizer.py └── semantic_synthesis ├── README.md ├── data ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── ade20k_dataset.cpython-36.pyc │ ├── ade20k_dataset.cpython-37.pyc │ ├── base_dataset.cpython-36.pyc │ ├── base_dataset.cpython-37.pyc │ ├── cityscapes_dataset.cpython-36.pyc │ ├── image_folder.cpython-36.pyc │ ├── image_folder.cpython-37.pyc │ ├── pix2pix_dataset.cpython-36.pyc │ └── pix2pix_dataset.cpython-37.pyc ├── ade20k_dataset.py ├── base_dataset.py ├── cityscapes_dataset.py ├── coco_dataset.py ├── custom_dataset.py ├── facades_dataset.py ├── image_folder.py └── pix2pix_dataset.py ├── datasets ├── coco_generate_instance_map.py └── coco_stuff │ ├── train_img │ ├── 000000017914.jpg │ ├── 000000029286.jpg │ ├── 000000138805.jpg │ ├── 000000184101.jpg │ ├── 000000197384.jpg │ ├── 000000203744.jpg │ ├── 000000284465.jpg │ ├── 000000350505.jpg │ ├── 000000371376.jpg │ ├── 000000426773.jpg │ ├── 000000475177.jpg │ ├── 000000500044.jpg │ └── 000000580986.jpg │ ├── train_inst │ ├── 000000017914.png │ ├── 000000029286.png │ ├── 000000138805.png │ ├── 000000184101.png │ ├── 000000197384.png │ ├── 000000203744.png │ ├── 000000284465.png │ ├── 000000350505.png │ ├── 000000371376.png │ ├── 000000426773.png │ ├── 000000475177.png │ ├── 000000500044.png │ └── 000000580986.png │ ├── train_label │ ├── 000000017914.png │ ├── 000000029286.png │ ├── 000000138805.png │ ├── 000000184101.png │ ├── 000000197384.png │ ├── 000000203744.png │ ├── 000000284465.png │ ├── 000000350505.png │ ├── 000000371376.png │ ├── 000000426773.png │ ├── 000000475177.png │ ├── 000000500044.png │ └── 000000580986.png │ ├── val_img │ ├── 000000000139.jpg │ ├── 000000000785.jpg │ ├── 000000001268.jpg │ ├── 000000001490.jpg │ ├── 000000001503.jpg │ ├── 000000001584.jpg │ ├── 000000001818.jpg │ └── 000000001993.jpg │ ├── val_inst │ ├── 000000000139.png │ ├── 000000000785.png │ ├── 000000001268.png │ ├── 000000001490.png │ ├── 000000001503.png │ ├── 000000001584.png │ ├── 000000001818.png │ └── 000000001993.png │ └── val_label │ ├── 000000000139.png │ ├── 000000000785.png │ ├── 000000001268.png │ ├── 000000001490.png │ ├── 000000001503.png │ ├── 000000001584.png │ ├── 000000001818.png │ └── 000000001993.png ├── models ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── pix2pix_model.cpython-36.pyc │ └── pix2pix_model.cpython-37.pyc ├── networks │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── architecture.cpython-36.pyc │ │ ├── architecture.cpython-37.pyc │ │ ├── base_network.cpython-36.pyc │ │ ├── base_network.cpython-37.pyc │ │ ├── discriminator.cpython-36.pyc │ │ ├── discriminator.cpython-37.pyc │ │ ├── encoder.cpython-36.pyc │ │ ├── encoder.cpython-37.pyc │ │ ├── generator.cpython-36.pyc │ │ ├── generator.cpython-37.pyc │ │ ├── loss.cpython-36.pyc │ │ ├── loss.cpython-37.pyc │ │ ├── normalization.cpython-36.pyc │ │ └── normalization.cpython-37.pyc │ ├── architecture.py │ ├── base_network.py │ ├── discriminator.py │ ├── encoder.py │ ├── generator.py │ ├── loss.py │ └── normalization.py └── pix2pix_model.py ├── options ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── base_options.cpython-36.pyc │ ├── base_options.cpython-37.pyc │ ├── test_options.cpython-36.pyc │ ├── train_options.cpython-36.pyc │ └── train_options.cpython-37.pyc ├── base_options.py ├── test_options.py └── train_options.py ├── requirements.txt ├── scripts └── download_selectiongan_model.sh ├── test.py ├── test.sh ├── train.py ├── train.sh ├── trainers ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── pix2pix_trainer.cpython-36.pyc │ └── pix2pix_trainer.cpython-37.pyc └── pix2pix_trainer.py └── util ├── __init__.py ├── __pycache__ ├── __init__.cpython-36.pyc ├── __init__.cpython-37.pyc ├── coco.cpython-36.pyc ├── coco.cpython-37.pyc ├── html.cpython-36.pyc ├── html.cpython-37.pyc ├── iter_counter.cpython-36.pyc ├── iter_counter.cpython-37.pyc ├── util.cpython-36.pyc ├── util.cpython-37.pyc ├── visualizer.cpython-36.pyc └── visualizer.cpython-37.pyc ├── coco.py ├── html.py ├── iter_counter.py ├── util.py └── visualizer.py /cross_view_v2/256_256/scripts/download_plus_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are cvusa_fork_plus, cvusa_pix2pix_plus, cvusa_seq_plus, dayton_a2g_fork_plus, dayton_a2g_seq_plus, dayton_a2g_pix2pix_plus, dayton_g2a_pix2pix_plus, dayton_g2a_seq_plus, dayton_g2a_fork_plus, ego2top_fork_plus, ego2top_seq_plus, ego2top_pix2pix_plus, sva_pix2pix_plus, sva_seq_plus and sva_fork_plus" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE -------------------------------------------------------------------------------- /cross_view_v2/64_64/scripts/download_plus_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are dayton_a2g_fork_plus_64, dayton_a2g_seq_plus_64, dayton_a2g_pix2pix_plus_64, dayton_g2a_fork_plus_64, dayton_g2a_seq_plus_64 and dayton_g2a_pix2pix_plus_64" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE -------------------------------------------------------------------------------- /cross_view_v2/README.md: -------------------------------------------------------------------------------- 1 | This code aims to handle cross-view image translation task with the combination of RGB images and target semantic maps as inputs. 2 | - `256_256`: 256 * 256 experiments 3 | - `64_64`: 64 * 64 experiments 4 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch.utils.data 8 | from data.base_dataset import BaseDataset 9 | 10 | 11 | def find_dataset_using_name(dataset_name): 12 | # Given the option --dataset [datasetname], 13 | # the file "datasets/datasetname_dataset.py" 14 | # will be imported. 15 | dataset_filename = "data." + dataset_name + "_dataset" 16 | datasetlib = importlib.import_module(dataset_filename) 17 | 18 | # In the file, the class called DatasetNameDataset() will 19 | # be instantiated. It has to be a subclass of BaseDataset, 20 | # and it is case-insensitive. 21 | dataset = None 22 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 23 | for name, cls in datasetlib.__dict__.items(): 24 | if name.lower() == target_dataset_name.lower() \ 25 | and issubclass(cls, BaseDataset): 26 | dataset = cls 27 | 28 | if dataset is None: 29 | raise ValueError("In %s.py, there should be a subclass of BaseDataset " 30 | "with class name that matches %s in lowercase." % 31 | (dataset_filename, target_dataset_name)) 32 | 33 | return dataset 34 | 35 | 36 | def get_option_setter(dataset_name): 37 | dataset_class = find_dataset_using_name(dataset_name) 38 | return dataset_class.modify_commandline_options 39 | 40 | 41 | def create_dataloader(opt): 42 | dataset = find_dataset_using_name(opt.dataset_mode) 43 | instance = dataset() 44 | instance.initialize(opt) 45 | print("dataset [%s] of size %d was created" % 46 | (type(instance).__name__, len(instance))) 47 | dataloader = torch.utils.data.DataLoader( 48 | instance, 49 | batch_size=opt.batchSize, 50 | shuffle=not opt.serial_batches, 51 | num_workers=int(opt.nThreads), 52 | drop_last=opt.isTrain 53 | ) 54 | return dataloader 55 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/aligned_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/aligned_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/coco_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/coco_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/custom_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/custom_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/__pycache__/image_folder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/data/__pycache__/image_folder.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/data/custom_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from data.aligned_dataset import AlignedDataset 7 | from data.image_folder import make_dataset 8 | 9 | 10 | class CustomDataset(AlignedDataset): 11 | """ Dataset that loads images from directories 12 | Use option --label_dir, --image_dir, --instance_dir to specify the directories. 13 | The images in the directories are sorted in alphabetical order and paired in order. 14 | """ 15 | 16 | @staticmethod 17 | def modify_commandline_options(parser, is_train): 18 | parser = AlignedDataset.modify_commandline_options(parser, is_train) 19 | parser.set_defaults(preprocess_mode='resize_and_crop') 20 | load_size = 286 if is_train else 256 21 | parser.set_defaults(load_size=load_size) 22 | parser.set_defaults(crop_size=256) 23 | parser.set_defaults(display_winsize=256) 24 | parser.set_defaults(label_nc=13) 25 | parser.set_defaults(contain_dontcare_label=False) 26 | 27 | parser.add_argument('--image_dir', type=str, required=True, 28 | help='path to the directory that contains photo images') 29 | return parser 30 | 31 | def get_paths(self, opt): 32 | image_dir = opt.image_dir 33 | image_paths = make_dataset(image_dir, recursive=False, read_cache=True) 34 | 35 | return image_paths 36 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch 8 | 9 | 10 | def find_model_using_name(model_name): 11 | # Given the option --model [modelname], 12 | # the file "models/modelname_model.py" 13 | # will be imported. 14 | model_filename = "models." + model_name + "_model" 15 | modellib = importlib.import_module(model_filename) 16 | 17 | # In the file, the class called ModelNameModel() will 18 | # be instantiated. It has to be a subclass of torch.nn.Module, 19 | # and it is case-insensitive. 20 | model = None 21 | target_model_name = model_name.replace('_', '') + 'model' 22 | for name, cls in modellib.__dict__.items(): 23 | if name.lower() == target_model_name.lower() \ 24 | and issubclass(cls, torch.nn.Module): 25 | model = cls 26 | 27 | if model is None: 28 | print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name)) 29 | exit(0) 30 | 31 | return model 32 | 33 | 34 | def get_option_setter(model_name): 35 | model_class = find_model_using_name(model_name) 36 | return model_class.modify_commandline_options 37 | 38 | 39 | def create_model(opt): 40 | model = find_model_using_name(opt.model) 41 | instance = model(opt) 42 | print("model [%s] was created" % (type(instance).__name__)) 43 | 44 | return instance 45 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/__pycache__/pix2pix_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/__pycache__/pix2pix_model.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | from models.networks.base_network import BaseNetwork 8 | from models.networks.loss import * 9 | from models.networks.discriminator import * 10 | from models.networks.generator import * 11 | from models.networks.encoder import * 12 | import util.util as util 13 | 14 | 15 | def find_network_using_name(target_network_name, filename): 16 | target_class_name = target_network_name + filename 17 | module_name = 'models.networks.' + filename 18 | network = util.find_class_in_module(target_class_name, module_name) 19 | 20 | assert issubclass(network, BaseNetwork), \ 21 | "Class %s should be a subclass of BaseNetwork" % network 22 | 23 | return network 24 | 25 | 26 | def modify_commandline_options(parser, is_train): 27 | opt, _ = parser.parse_known_args() 28 | 29 | netG_cls = find_network_using_name(opt.netG, 'generator') 30 | parser = netG_cls.modify_commandline_options(parser, is_train) 31 | if is_train: 32 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 33 | parser = netD_cls.modify_commandline_options(parser, is_train) 34 | netE_cls = find_network_using_name('conv', 'encoder') 35 | parser = netE_cls.modify_commandline_options(parser, is_train) 36 | 37 | return parser 38 | 39 | 40 | def create_network(cls, opt): 41 | net = cls(opt) 42 | net.print_network() 43 | if len(opt.gpu_ids) > 0: 44 | assert(torch.cuda.is_available()) 45 | net.cuda() 46 | net.init_weights(opt.init_type, opt.init_variance) 47 | return net 48 | 49 | 50 | def define_G(opt): 51 | netG_cls = find_network_using_name(opt.netG, 'generator') 52 | return create_network(netG_cls, opt) 53 | 54 | 55 | def define_D(opt): 56 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 57 | return create_network(netD_cls, opt) 58 | 59 | 60 | def define_E(opt): 61 | # there exists only one encoder type 62 | netE_cls = find_network_using_name('conv', 'encoder') 63 | return create_network(netE_cls, opt) 64 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/architecture.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/architecture.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/base_network.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/base_network.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/discriminator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/discriminator.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/encoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/encoder.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/generator.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/__pycache__/normalization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/models/networks/__pycache__/normalization.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/models/networks/encoder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | import numpy as np 8 | import torch.nn.functional as F 9 | from models.networks.base_network import BaseNetwork 10 | from models.networks.normalization import get_nonspade_norm_layer 11 | 12 | 13 | class ConvEncoder(BaseNetwork): 14 | """ Same architecture as the image discriminator """ 15 | 16 | def __init__(self, opt): 17 | super().__init__() 18 | 19 | kw = 3 20 | pw = int(np.ceil((kw - 1.0) / 2)) 21 | ndf = opt.ngf 22 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) 23 | self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw)) 24 | self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw)) 25 | self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw)) 26 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw)) 27 | self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 28 | if opt.crop_size >= 256: 29 | self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 30 | 31 | self.so = s0 = 4 32 | self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256) 33 | self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256) 34 | 35 | self.actvn = nn.LeakyReLU(0.2, False) 36 | self.opt = opt 37 | 38 | def forward(self, x): 39 | if x.size(2) != 256 or x.size(3) != 256: 40 | x = F.interpolate(x, size=(256, 256), mode='bilinear') 41 | 42 | x = self.layer1(x) 43 | x = self.layer2(self.actvn(x)) 44 | x = self.layer3(self.actvn(x)) 45 | x = self.layer4(self.actvn(x)) 46 | x = self.layer5(self.actvn(x)) 47 | if self.opt.crop_size >= 256: 48 | x = self.layer6(self.actvn(x)) 49 | x = self.actvn(x) 50 | 51 | x = x.view(x.size(0), -1) 52 | mu = self.fc_mu(x) 53 | logvar = self.fc_var(x) 54 | 55 | return mu, logvar 56 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/options/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/__pycache__/test_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/options/__pycache__/test_options.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/__pycache__/train_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/options/__pycache__/train_options.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/options/test_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from .base_options import BaseOptions 7 | 8 | 9 | class TestOptions(BaseOptions): 10 | def initialize(self, parser): 11 | BaseOptions.initialize(self, parser) 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 14 | parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') 15 | 16 | parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) 17 | parser.set_defaults(serial_batches=True) 18 | parser.set_defaults(no_flip=True) 19 | parser.set_defaults(phase='test') 20 | self.isTrain = False 21 | return parser 22 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/scripts/download_gaugan_pix2pixhd_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are gaugan_ntu, gaugan_rafd, gaugan_sva, pix2pixhd_ntu, pix2pixhd_rafd, pix2pixhd_sva" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE 15 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os 7 | from collections import OrderedDict 8 | 9 | import data 10 | from options.test_options import TestOptions 11 | from models.pix2pix_model import Pix2PixModel 12 | from util.visualizer import Visualizer 13 | from util import html 14 | 15 | opt = TestOptions().parse() 16 | 17 | dataloader = data.create_dataloader(opt) 18 | 19 | model = Pix2PixModel(opt) 20 | model.eval() 21 | 22 | visualizer = Visualizer(opt) 23 | 24 | # create a webpage that summarizes the all results 25 | web_dir = os.path.join(opt.results_dir, opt.name, 26 | '%s_%s' % (opt.phase, opt.which_epoch)) 27 | webpage = html.HTML(web_dir, 28 | 'Experiment = %s, Phase = %s, Epoch = %s' % 29 | (opt.name, opt.phase, opt.which_epoch)) 30 | 31 | # test 32 | for i, data_i in enumerate(dataloader): 33 | if i * opt.batchSize >= opt.how_many: 34 | break 35 | 36 | generated, real_image = model(data_i, mode='inference') 37 | 38 | img_path = data_i['path'] 39 | for b in range(generated.shape[0]): 40 | print('process image... %s' % img_path[b]) 41 | visuals = OrderedDict([('input_label', data_i['label'][b]), 42 | ('synthesized_image', generated[b]), 43 | ('real_image', real_image[b])]) 44 | visualizer.save_images(webpage, visuals, img_path[b:b + 1]) 45 | 46 | webpage.save() 47 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/test_ntu.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4; 2 | python test.py --name gaugan_gI2I_ntu --dataset_mode custom --image_dir ./GestureGAN/datasets/ntu/test --gpu_ids 0 --batchSize 24 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --checkpoints_dir ./checkpoints --use_vae --how_many 1000000000 3 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/test_ntu_pix2pixhd.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=7; 2 | python test.py --name pix2pixhd_ntu --dataset_mode custom --image_dir ./GestureGAN/datasets/ntu/test --gpu_ids 0 --netG pix2pixhd --batchSize 24 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --checkpoints_dir ./checkpoints --use_vae --how_many 1000000000 3 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_ntu.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4,7; 2 | python train.py --name gaugan_ntu --dataset_mode custom --image_dir ./GestureGAN/datasets/ntu/train --niter 10 --niter_decay 10 --gpu_ids 0,1 --checkpoints_dir ./checkpoints --batchSize 24 --save_epoch_freq 100 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | # --continue_train 4 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_ntu_pix2pixhd.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=7; 2 | python train.py --name pix2pixhd_gI2I_ntu --dataset_mode custom --image_dir ./GestureGAN/datasets/ntu/train --niter 10 --niter_decay 10 --gpu_ids 0 --netG pix2pixhd --checkpoints_dir ./checkpoints --batchSize 24 --save_epoch_freq 100 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | # --continue_train 4 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_rafd.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1; 2 | python train.py --name gaugan_gI2I_rafd --dataset_mode custom --image_dir ./SelectionGAN/selectiongan_v1/datasets/Radboud_selectiongan/train --niter 100 --niter_decay 100 --gpu_ids 0 --checkpoints_dir ./checkpoints --batchSize 16 --save_epoch_freq 50 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | 4 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_rafd_pix2pixhd.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1; 2 | python train.py --name pix2pixhd_gI2I_rafd --dataset_mode custom --image_dir ./SelectionGAN/selectiongan_v1/datasets/Radboud_selectiongan/train --niter 100 --niter_decay 100 --gpu_ids 0 --netG pix2pixhd --checkpoints_dir ./checkpoints --batchSize 32 --save_epoch_freq 50 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | # --continue_train 4 | 5 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_sva.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0; 2 | python train.py --name gaugan_gI2I_sva --dataset_mode custom --image_dir ./SelectionGAN/selectiongan_v1/datasets/sva/train --niter 10 --niter_decay 10 --gpu_ids 0 --checkpoints_dir ./checkpoints --batchSize 16 --save_epoch_freq 10 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | 4 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/train_sva_pix2pixhd.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0; 2 | python train.py --name pix2pixhd_gI2I_sva --dataset_mode custom --image_dir ./SelectionGAN/selectiongan_v1/datasets/sva/train --netG pix2pixhd --niter 10 --niter_decay 10 --gpu_ids 0 --checkpoints_dir ./checkpoints --batchSize 32 --save_epoch_freq 10 --save_latest_freq 1000 --label_nc 3 --no_instance --load_size 256 --crop_size 256 --use_vae 3 | # --continue_train 4 | 5 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/trainers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/trainers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/trainers/__pycache__/pix2pix_trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/trainers/__pycache__/pix2pix_trainer.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/coco.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/coco.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/iter_counter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/iter_counter.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /gaugan_pix2pixhd_guided/util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/gaugan_pix2pixhd_guided/util/__pycache__/visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /imgs/motivation.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/imgs/motivation.jpg -------------------------------------------------------------------------------- /person_transfer/README.md: -------------------------------------------------------------------------------- 1 | # Person Image Generation 2 | Code for person image generation. This is Pytorch implementation for pose transfer on both Market1501 and DeepFashion dataset. 3 | 4 | ## Requirement 5 | * pytorch 1.0.1 6 | * torchvision 7 | * dominate 8 | * Others 9 | 10 | ## Getting Started 11 | ### Installation 12 | 13 | - Clone this repo: 14 | ```bash 15 | git clone https://github.com/Ha0Tang/SelectionGAN 16 | cd SelectionGAN 17 | cd person_transfer 18 | ``` 19 | 20 | ### Data Preperation 21 | 22 | We use [OpenPose](https://github.com/ZheC/Realtime_Multi-Person_Pose_Estimation) to generate keypoints. We also provide the prepared images for convience. 23 | 24 | #### Market1501 25 | ```bash 26 | sh datasets/download_selectiongan_dataset.sh market_data 27 | ``` 28 | 29 | #### DeepFashion 30 | ```bash 31 | sh datasets/download_selectiongan_dataset.sh fashion_data 32 | ``` 33 | 34 | ### Training 35 | Market-1501 36 | ```bash 37 | sh train_market.sh 38 | ``` 39 | 40 | DeepFashion 41 | ```bash 42 | sh train_fashion.sh 43 | ``` 44 | 45 | ### Testing 46 | Market1501 47 | ```bash 48 | sh test_market.sh 49 | ``` 50 | DeepFashion 51 | ```bash 52 | sh test_fashion.sh 53 | ``` 54 | 55 | ### Pretrained Models 56 | Market1501 57 | ```bash 58 | sh scripts/download_selectiongan_model.sh market 59 | ``` 60 | 61 | DeepFashion 62 | ```bash 63 | sh scripts/download_selectiongan_model.sh fashion 64 | ``` 65 | 66 | ### Evaluation 67 | Follow [Pose-Transfer](https://github.com/tengteng95/Pose-Transfer) for more details. 68 | -------------------------------------------------------------------------------- /person_transfer/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__init__.py -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/base_data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/base_data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/base_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/base_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/custom_dataset_data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/custom_dataset_data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/image_folder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/image_folder.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/__pycache__/keypoint.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/data/__pycache__/keypoint.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/data/base_data_loader.py: -------------------------------------------------------------------------------- 1 | 2 | class BaseDataLoader(): 3 | def __init__(self): 4 | pass 5 | 6 | def initialize(self, opt): 7 | self.opt = opt 8 | pass 9 | 10 | def load_data(): 11 | return None 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /person_transfer/data/base_dataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | from PIL import Image 3 | import torchvision.transforms as transforms 4 | 5 | class BaseDataset(data.Dataset): 6 | def __init__(self): 7 | super(BaseDataset, self).__init__() 8 | 9 | def name(self): 10 | return 'BaseDataset' 11 | 12 | def initialize(self, opt): 13 | pass 14 | 15 | def get_transform(opt): 16 | transform_list = [] 17 | if opt.resize_or_crop == 'resize_and_crop': 18 | osize = [opt.loadSize, opt.loadSize] 19 | transform_list.append(transforms.Scale(osize, Image.BICUBIC)) 20 | transform_list.append(transforms.RandomCrop(opt.fineSize)) 21 | elif opt.resize_or_crop == 'crop': 22 | transform_list.append(transforms.RandomCrop(opt.fineSize)) 23 | elif opt.resize_or_crop == 'scale_width': 24 | transform_list.append(transforms.Lambda( 25 | lambda img: __scale_width(img, opt.fineSize))) 26 | elif opt.resize_or_crop == 'scale_width_and_crop': 27 | transform_list.append(transforms.Lambda( 28 | lambda img: __scale_width(img, opt.loadSize))) 29 | transform_list.append(transforms.RandomCrop(opt.fineSize)) 30 | 31 | transform_list += [transforms.ToTensor(), 32 | transforms.Normalize((0.5, 0.5, 0.5), 33 | (0.5, 0.5, 0.5))] 34 | return transforms.Compose(transform_list) 35 | 36 | def __scale_width(img, target_width): 37 | ow, oh = img.size 38 | if (ow == target_width): 39 | return img 40 | w = target_width 41 | h = int(target_width * oh / ow) 42 | return img.resize((w, h), Image.BICUBIC) 43 | -------------------------------------------------------------------------------- /person_transfer/data/custom_dataset_data_loader.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data 2 | from data.base_data_loader import BaseDataLoader 3 | 4 | 5 | def CreateDataset(opt): 6 | dataset = None 7 | 8 | if opt.dataset_mode == 'keypoint': 9 | from data.keypoint import KeyDataset 10 | dataset = KeyDataset() 11 | 12 | else: 13 | raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode) 14 | 15 | print("dataset [%s] was created" % (dataset.name())) 16 | dataset.initialize(opt) 17 | return dataset 18 | 19 | 20 | class CustomDatasetDataLoader(BaseDataLoader): 21 | def name(self): 22 | return 'CustomDatasetDataLoader' 23 | 24 | def initialize(self, opt): 25 | BaseDataLoader.initialize(self, opt) 26 | self.dataset = CreateDataset(opt) 27 | self.dataloader = torch.utils.data.DataLoader( 28 | self.dataset, 29 | batch_size=opt.batchSize, 30 | shuffle=not opt.serial_batches, 31 | num_workers=int(opt.nThreads)) 32 | 33 | def load_data(self): 34 | return self 35 | 36 | def __len__(self): 37 | return min(len(self.dataset), self.opt.max_dataset_size) 38 | 39 | def __iter__(self): 40 | for i, data in enumerate(self.dataloader): 41 | if i >= self.opt.max_dataset_size: 42 | break 43 | yield data 44 | -------------------------------------------------------------------------------- /person_transfer/data/data_loader.py: -------------------------------------------------------------------------------- 1 | 2 | def CreateDataLoader(opt): 3 | from data.custom_dataset_data_loader import CustomDatasetDataLoader 4 | data_loader = CustomDatasetDataLoader() 5 | print(data_loader.name()) 6 | data_loader.initialize(opt) 7 | return data_loader 8 | -------------------------------------------------------------------------------- /person_transfer/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | 8 | import torch.utils.data as data 9 | 10 | from PIL import Image 11 | import os 12 | import os.path 13 | 14 | IMG_EXTENSIONS = [ 15 | '.jpg', '.JPG', '.jpeg', '.JPEG', 16 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 17 | ] 18 | 19 | 20 | def is_image_file(filename): 21 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 22 | 23 | 24 | def make_dataset(dir): 25 | images = [] 26 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 27 | 28 | for root, _, fnames in sorted(os.walk(dir)): 29 | for fname in fnames: 30 | if is_image_file(fname): 31 | path = os.path.join(root, fname) 32 | images.append(path) 33 | 34 | return images 35 | 36 | 37 | def default_loader(path): 38 | return Image.open(path).convert('RGB') 39 | 40 | 41 | class ImageFolder(data.Dataset): 42 | 43 | def __init__(self, root, transform=None, return_paths=False, 44 | loader=default_loader): 45 | imgs = make_dataset(root) 46 | if len(imgs) == 0: 47 | raise(RuntimeError("Found 0 images in: " + root + "\n" 48 | "Supported image extensions are: " + 49 | ",".join(IMG_EXTENSIONS))) 50 | 51 | self.root = root 52 | self.imgs = imgs 53 | self.transform = transform 54 | self.return_paths = return_paths 55 | self.loader = loader 56 | 57 | def __getitem__(self, index): 58 | path = self.imgs[index] 59 | img = self.loader(path) 60 | if self.transform is not None: 61 | img = self.transform(img) 62 | if self.return_paths: 63 | return img, path 64 | else: 65 | return img 66 | 67 | def __len__(self): 68 | return len(self.imgs) 69 | -------------------------------------------------------------------------------- /person_transfer/datasets/download_selectiongan_dataset.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | if [[ $FILE != "dayton_ablation" && $FILE != "dayton" && $FILE != "cvusa" && $FILE != "ego2top" && $FILE != "sva" && $FILE != "market_data" && $FILE != "fashion_data" ]]; 4 | then echo "Available datasets are dayton_ablation, dayton, cvusa, ego2top, sva, market_data, fashion_data" 5 | exit 1 6 | fi 7 | 8 | 9 | echo "Specified [$FILE]" 10 | 11 | URL=http://disi.unitn.it/~hao.tang/uploads/datasets/SelectionGAN/$FILE.tar.gz 12 | TAR_FILE=./datasets/$FILE.tar.gz 13 | TARGET_DIR=./datasets/$FILE/ 14 | wget -N $URL -O $TAR_FILE 15 | mkdir -p $TARGET_DIR 16 | tar -zxvf $TAR_FILE -C ./datasets/ 17 | rm $TAR_FILE -------------------------------------------------------------------------------- /person_transfer/fashion/rename.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | IMG_EXTENSIONS = [ 5 | '.jpg', '.JPG', '.jpeg', '.JPEG', 6 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 7 | ] 8 | 9 | def is_image_file(filename): 10 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 11 | 12 | def make_dataset(dir): 13 | images = [] 14 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 15 | new_root = 'DeepFashion' 16 | if not os.path.exists(new_root): 17 | os.mkdir(new_root) 18 | 19 | for root, _, fnames in sorted(os.walk(dir)): 20 | for fname in fnames: 21 | if is_image_file(fname): 22 | path = os.path.join(root, fname) 23 | path_names = path.split('/') 24 | #path_names[2] = path_names[2].replace('_', '') 25 | path_names[3] = path_names[3].replace('_', '') 26 | path_names[4] = path_names[4].split('_')[0] + "_" + "".join(path_names[4].split('_')[1:]) 27 | path_names = "".join(path_names) 28 | new_path = os.path.join(root, path_names) 29 | 30 | os.rename(path, new_path) 31 | shutil.move(new_path, os.path.join(new_root, path_names)) 32 | 33 | make_dataset('fashion') 34 | -------------------------------------------------------------------------------- /person_transfer/losses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/losses/__init__.py -------------------------------------------------------------------------------- /person_transfer/losses/__pycache__/L1_plus_perceptualLoss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/losses/__pycache__/L1_plus_perceptualLoss.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/losses/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/losses/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/losses/__pycache__/ssim.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/losses/__pycache__/ssim.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__init__.py -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/PATN.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/PATN.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/base_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/base_model.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/model_variants.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/model_variants.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/__pycache__/networks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/models/__pycache__/networks.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn as nn 4 | 5 | 6 | class BaseModel(nn.Module): 7 | 8 | def __init__(self): 9 | super(BaseModel, self).__init__() 10 | 11 | def name(self): 12 | return 'BaseModel' 13 | 14 | def initialize(self, opt): 15 | self.opt = opt 16 | self.gpu_ids = opt.gpu_ids 17 | self.isTrain = opt.isTrain 18 | self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor 19 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) 20 | 21 | def set_input(self, input): 22 | self.input = input 23 | 24 | def forward(self): 25 | pass 26 | 27 | # used in test time, no backprop 28 | def test(self): 29 | pass 30 | 31 | def get_image_paths(self): 32 | pass 33 | 34 | def optimize_parameters(self): 35 | pass 36 | 37 | def get_current_visuals(self): 38 | return self.input 39 | 40 | def get_current_errors(self): 41 | return {} 42 | 43 | def save(self, label): 44 | pass 45 | 46 | # helper saving function that can be used by subclasses 47 | def save_network(self, network, network_label, epoch_label, gpu_ids): 48 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 49 | save_path = os.path.join(self.save_dir, save_filename) 50 | torch.save(network.cpu().state_dict(), save_path) 51 | if len(gpu_ids) and torch.cuda.is_available(): 52 | network.cuda(gpu_ids[0]) 53 | 54 | # helper loading function that can be used by subclasses 55 | def load_network(self, network, network_label, epoch_label): 56 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 57 | save_path = os.path.join(self.save_dir, save_filename) 58 | network.load_state_dict(torch.load(save_path)) 59 | 60 | # update learning rate (called once every epoch) 61 | def update_learning_rate(self): 62 | for scheduler in self.schedulers: 63 | scheduler.step() 64 | lr = self.optimizers[0].param_groups[0]['lr'] 65 | print('learning rate = %.7f' % lr) 66 | -------------------------------------------------------------------------------- /person_transfer/models/models.py: -------------------------------------------------------------------------------- 1 | 2 | def create_model(opt): 3 | model = None 4 | print(opt.model) 5 | 6 | if opt.model == 'PATN': 7 | assert opt.dataset_mode == 'keypoint' 8 | from .PATN import TransferModel 9 | model = TransferModel() 10 | 11 | else: 12 | raise ValueError("Model [%s] not recognized." % opt.model) 13 | model.initialize(opt) 14 | print("model [%s] was created" % (model.name())) 15 | return model 16 | -------------------------------------------------------------------------------- /person_transfer/models/test_model.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Variable 2 | from collections import OrderedDict 3 | import util.util as util 4 | from .base_model import BaseModel 5 | from . import networks 6 | 7 | 8 | class TestModel(BaseModel): 9 | def name(self): 10 | return 'TestModel' 11 | 12 | def initialize(self, opt): 13 | assert(not opt.isTrain) 14 | BaseModel.initialize(self, opt) 15 | self.input_A = self.Tensor(opt.batchSize, opt.input_nc, opt.fineSize, opt.fineSize) 16 | 17 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, 18 | opt.ngf, opt.which_model_netG, 19 | opt.norm, not opt.no_dropout, 20 | opt.init_type, 21 | self.gpu_ids) 22 | which_epoch = opt.which_epoch 23 | self.load_network(self.netG, 'G', which_epoch) 24 | 25 | print('---------- Networks initialized -------------') 26 | networks.print_network(self.netG) 27 | print('-----------------------------------------------') 28 | 29 | def set_input(self, input): 30 | # we need to use single_dataset mode 31 | input_A = input['A'] 32 | self.input_A.resize_(input_A.size()).copy_(input_A) 33 | self.image_paths = input['A_paths'] 34 | 35 | def test(self): 36 | self.real_A = Variable(self.input_A) 37 | self.fake_B = self.netG(self.real_A) 38 | 39 | # get image paths 40 | def get_image_paths(self): 41 | return self.image_paths 42 | 43 | def get_current_visuals(self): 44 | real_A = util.tensor2im(self.real_A.data) 45 | fake_B = util.tensor2im(self.fake_B.data) 46 | return OrderedDict([('real_A', real_A), ('fake_B', fake_B)]) 47 | -------------------------------------------------------------------------------- /person_transfer/options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/options/__init__.py -------------------------------------------------------------------------------- /person_transfer/options/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/options/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/options/__pycache__/base_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/options/__pycache__/base_options.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/options/__pycache__/test_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/options/__pycache__/test_options.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/options/__pycache__/train_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/options/__pycache__/train_options.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | def initialize(self): 6 | BaseOptions.initialize(self) 7 | self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 8 | self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 9 | self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 10 | self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 11 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 12 | self.parser.add_argument('--how_many', type=int, default=200, help='how many test images to run') 13 | 14 | self.isTrain = False 15 | -------------------------------------------------------------------------------- /person_transfer/scripts/download_selectiongan_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are market, fashion" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE -------------------------------------------------------------------------------- /person_transfer/ssd_score/__init__.py: -------------------------------------------------------------------------------- 1 | import compute_ssd_score 2 | -------------------------------------------------------------------------------- /person_transfer/test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | from options.test_options import TestOptions 4 | from data.data_loader import CreateDataLoader 5 | from models.models import create_model 6 | from util.visualizer import Visualizer 7 | from util import html 8 | import time 9 | 10 | opt = TestOptions().parse() 11 | opt.nThreads = 1 # test code only supports nThreads = 1 12 | opt.batchSize = 1 # test code only supports batchSize = 1 13 | opt.serial_batches = True # no shuffle 14 | opt.no_flip = True # no flip 15 | 16 | data_loader = CreateDataLoader(opt) 17 | dataset = data_loader.load_data() 18 | model = create_model(opt) 19 | visualizer = Visualizer(opt) 20 | # create website 21 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) 22 | 23 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) 24 | 25 | print(opt.how_many) 26 | print(len(dataset)) 27 | 28 | model = model.eval() 29 | print(model.training) 30 | 31 | opt.how_many = 999999 32 | # test 33 | for i, data in enumerate(dataset): 34 | print(' process %d/%d img ..'%(i,opt.how_many)) 35 | if i >= opt.how_many: 36 | break 37 | model.set_input(data) 38 | startTime = time.time() 39 | model.test() 40 | endTime = time.time() 41 | print(endTime-startTime) 42 | visuals = model.get_current_visuals() 43 | img_path = model.get_image_paths() 44 | img_path = [img_path] 45 | print(img_path) 46 | visualizer.save_images(webpage, visuals, img_path) 47 | 48 | webpage.save() 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /person_transfer/test_fashion.sh: -------------------------------------------------------------------------------- 1 | python test.py --dataroot ./datasets/fashion_data/ --name fashion_selectiongan --model PATN --phase test --dataset_mode keypoint --norm instance --batchSize 1 --resize_or_crop no --gpu_ids 0 --BP_input_nc 18 --no_flip --which_model_netG PATN --checkpoints_dir ./checkpoints --pairLst ./datasets/fashion_data/fasion-resize-pairs-test.csv --which_epoch latest --results_dir ./results --display_id 0 2 | -------------------------------------------------------------------------------- /person_transfer/test_market.sh: -------------------------------------------------------------------------------- 1 | python test.py --dataroot ./datasets/market_data/ --name market_selectiongan --model PATN --phase test --dataset_mode keypoint --norm batch --batchSize 1 --resize_or_crop no --gpu_ids 0 --BP_input_nc 18 --no_flip --which_model_netG PATN --checkpoints_dir ./checkpoints --pairLst ./datasets/market_data/market-pairs-test.csv --which_epoch latest --results_dir ./results --display_id 0 2 | -------------------------------------------------------------------------------- /person_transfer/tool/__pycache__/inception_score.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/tool/__pycache__/inception_score.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/tool/crop_fashion.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import os 3 | 4 | img_dir = './results/fashion_PATN_test/test_latest/images' 5 | save_dir = './results/fashion_PATN_test/test_latest/images_crop' 6 | 7 | if not os.path.exists(save_dir): 8 | os.mkdir(save_dir) 9 | 10 | cnt = 0 11 | 12 | for item in os.listdir(img_dir): 13 | if not item.endswith('.jpg') and not item.endswith('.png'): 14 | continue 15 | cnt = cnt + 1 16 | print('%d/8570 ...' %(cnt)) 17 | img = Image.open(os.path.join(img_dir, item)) 18 | imgcrop = img.crop((704, 0, 880, 256)) 19 | imgcrop.save(os.path.join(save_dir, item)) 20 | -------------------------------------------------------------------------------- /person_transfer/tool/crop_market.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import os 3 | 4 | img_dir = './results/market_PATN_test/test_latest/images' 5 | save_dir = './results/market_PATN_test/test_latest/images_crop' 6 | 7 | if not os.path.exists(save_dir): 8 | os.mkdir(save_dir) 9 | 10 | cnt = 0 11 | for item in os.listdir(img_dir): 12 | if not item.endswith('.jpg') and not item.endswith('.png'): 13 | continue 14 | cnt = cnt + 1 15 | print('%d/12000 ...' %(cnt)) 16 | img = Image.open(os.path.join(img_dir, item)) 17 | # for 5 split 18 | imgcrop = img.crop((256, 0, 320, 128)) 19 | imgcrop.save(os.path.join(save_dir, item)) 20 | -------------------------------------------------------------------------------- /person_transfer/tool/generate_fashion_datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | from shutil import copyfile 3 | from PIL import Image 4 | 5 | # path for downloaded fashion images 6 | root_fashion_dir = '/mnt/data4/htang/Pose-Transfer/fashion/Img/DeepFashion' 7 | assert len(root_fashion_dir) > 0, 'please give the path of raw deep fashion dataset!' 8 | 9 | # print(len(root_fashion_dir) ) 10 | 11 | train_images = [] 12 | train_f = open('fashion_data/train.lst', 'r') 13 | for lines in train_f: 14 | lines = lines.strip() 15 | if lines.endswith('.jpg'): 16 | train_images.append(lines) 17 | # print(train_images) 18 | 19 | test_images = [] 20 | test_f = open('fashion_data/test.lst', 'r') 21 | for lines in test_f: 22 | lines = lines.strip() 23 | if lines.endswith('.jpg'): 24 | test_images.append(lines) 25 | 26 | train_path = 'fashion_data/train' 27 | if not os.path.exists(train_path): 28 | os.mkdir(train_path) 29 | 30 | for item in train_images: 31 | from_ = os.path.join(root_fashion_dir, item) 32 | img = Image.open(from_) 33 | imgcrop = img.crop((40, 0, 216, 256)) 34 | to_ = os.path.join(train_path, item) 35 | imgcrop.save(to_) 36 | #copyfile(from_, to_) 37 | 38 | 39 | test_path = 'fashion_data/test' 40 | if not os.path.exists(test_path): 41 | os.mkdir(test_path) 42 | 43 | for item in test_images: 44 | from_ = os.path.join(root_fashion_dir, item) 45 | img = Image.open(from_) 46 | imgcrop = img.crop((40, 0, 216, 256)) 47 | to_ = os.path.join(test_path, item) 48 | # os.system('cp %s %s' %(from_, to_)) 49 | #copyfile(from_, to_) 50 | imgcrop.save(to_) 51 | -------------------------------------------------------------------------------- /person_transfer/tool/generate_pose_map_fashion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import json 4 | import os 5 | 6 | MISSING_VALUE = -1 7 | # fix PATH 8 | img_dir = '/mnt/data4/htang/Pose-Transfer/fashion/Img/DeepFashion' #raw image path 9 | annotations_file = 'fashion_data/fasion-resize-annotation-train.csv' #pose annotation path 10 | save_path = 'fashion_data/trainK' #path to store pose maps 11 | 12 | def load_pose_cords_from_strings(y_str, x_str): 13 | y_cords = json.loads(y_str) 14 | x_cords = json.loads(x_str) 15 | return np.concatenate([np.expand_dims(y_cords, -1), np.expand_dims(x_cords, -1)], axis=1) 16 | 17 | def cords_to_map(cords, img_size, sigma=6): 18 | result = np.zeros(img_size + cords.shape[0:1], dtype='uint8') 19 | for i, point in enumerate(cords): 20 | if point[0] == MISSING_VALUE or point[1] == MISSING_VALUE: 21 | continue 22 | xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 23 | result[..., i] = np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (2 * sigma ** 2)) 24 | # result[..., i] = np.where(((yy - point[0]) ** 2 + (xx - point[1]) ** 2) < (sigma ** 2), 1, 0) 25 | return result 26 | 27 | def compute_pose(image_dir, annotations_file, savePath, sigma): 28 | annotations_file = pd.read_csv(annotations_file, sep=':') 29 | annotations_file = annotations_file.set_index('name') 30 | image_size = (256, 176) 31 | cnt = len(annotations_file) 32 | for i in range(cnt): 33 | print('processing %d / %d ...' %(i, cnt)) 34 | row = annotations_file.iloc[i] 35 | name = row.name 36 | print(savePath, name) 37 | file_name = os.path.join(savePath, name + '.npy') 38 | kp_array = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 39 | pose = cords_to_map(kp_array, image_size, sigma) 40 | np.save(file_name, pose) 41 | # input() 42 | 43 | compute_pose(img_dir, annotations_file, save_path, sigma=6) 44 | -------------------------------------------------------------------------------- /person_transfer/tool/generate_pose_map_market.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import json 4 | import os 5 | 6 | MISSING_VALUE = -1 7 | 8 | img_dir = 'market_data/train' #raw image path 9 | annotations_file = 'market_data/market-annotation-train.csv' #pose annotation path 10 | save_path = 'market_data/trainK' #path to store pose maps 11 | 12 | def load_pose_cords_from_strings(y_str, x_str): 13 | y_cords = json.loads(y_str) 14 | x_cords = json.loads(x_str) 15 | return np.concatenate([np.expand_dims(y_cords, -1), np.expand_dims(x_cords, -1)], axis=1) 16 | 17 | def cords_to_map(cords, img_size, sigma=6): 18 | result = np.zeros(img_size + cords.shape[0:1], dtype='float32') 19 | for i, point in enumerate(cords): 20 | if point[0] == MISSING_VALUE or point[1] == MISSING_VALUE: 21 | continue 22 | xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 23 | result[..., i] = np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (2 * sigma ** 2)) 24 | return result 25 | 26 | def compute_pose(image_dir, annotations_file, savePath): 27 | annotations_file = pd.read_csv(annotations_file, sep=':') 28 | annotations_file = annotations_file.set_index('name') 29 | image_size = (128, 64) 30 | cnt = len(annotations_file) 31 | for i in range(cnt): 32 | print('processing %d / %d ...' %(i, cnt)) 33 | row = annotations_file.iloc[i] 34 | name = row.name 35 | print(savePath, name) 36 | file_name = os.path.join(savePath, name + '.npy') 37 | kp_array = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 38 | pose = cords_to_map(kp_array, image_size) 39 | np.save(file_name, pose) 40 | 41 | compute_pose(img_dir, annotations_file, save_path) 42 | 43 | -------------------------------------------------------------------------------- /person_transfer/tool/resize_fashion.py: -------------------------------------------------------------------------------- 1 | from skimage.io import imread, imsave 2 | from skimage.transform import resize 3 | import os 4 | import numpy as np 5 | import pandas as pd 6 | import json 7 | 8 | def resize_dataset(folder, new_folder, new_size = (256, 176), crop_bord=40): 9 | if not os.path.exists(new_folder): 10 | os.makedirs(new_folder) 11 | for name in os.listdir(folder): 12 | old_name = os.path.join(folder, name) 13 | new_name = os.path.join(new_folder, name) 14 | 15 | img = imread(old_name) 16 | if crop_bord == 0: 17 | pass 18 | else: 19 | img = img[:, crop_bord:-crop_bord] 20 | 21 | img = resize(img, new_size, preserve_range=True).astype(np.uint8) 22 | 23 | imsave(new_name, img) 24 | 25 | def resize_annotations(name, new_name, new_size = (256, 176), old_size = (256, 256), crop_bord=40): 26 | df = pd.read_csv(name, sep=':') 27 | 28 | ratio_y = new_size[0] / float(old_size[0]) 29 | ratio_x = new_size[1] / float(old_size[1] - 2 * crop_bord) 30 | 31 | def modify(values, ratio, crop): 32 | val = np.array(json.loads(values)) 33 | mask = val == -1 34 | val = ((val - crop) * ratio).astype(int) 35 | val[mask] = -1 36 | return str(list(val)) 37 | 38 | df['keypoints_y'] = df.apply(lambda row: modify(row['keypoints_y'], ratio_y, 0), axis=1) 39 | df['keypoints_x'] = df.apply(lambda row: modify(row['keypoints_x'], ratio_x, crop_bord), axis=1) 40 | 41 | df.to_csv(new_name, sep=':', index=False) 42 | 43 | 44 | root_dir = 'xxx' 45 | resize_dataset(root_dir + '/test', root_dir + 'fashion_resize/test') 46 | resize_annotations(root_dir + 'fasion-annotation-test.csv', root_dir + 'fasion-resize-annotation-test.csv') 47 | 48 | resize_dataset(root_dir + '/train', root_dir + 'fashion_resize/train') 49 | resize_annotations(root_dir + 'fasion-annotation-train.csv', root_dir + 'fasion-resize-annotation-train.csv') 50 | 51 | 52 | -------------------------------------------------------------------------------- /person_transfer/tool/rm_insnorm_running_vars.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth' 4 | save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' 5 | states_dict = torch.load(ckp_path) 6 | states_dict_new = states_dict.copy() 7 | for key in states_dict.keys(): 8 | if "running_var" in key or "running_mean" in key: 9 | del states_dict_new[key] 10 | 11 | torch.save(states_dict_new, save_path) -------------------------------------------------------------------------------- /person_transfer/train.py: -------------------------------------------------------------------------------- 1 | import time 2 | from options.train_options import TrainOptions 3 | from data.data_loader import CreateDataLoader 4 | from models.models import create_model 5 | from util.visualizer import Visualizer 6 | 7 | opt = TrainOptions().parse() 8 | data_loader = CreateDataLoader(opt) 9 | dataset = data_loader.load_data() 10 | dataset_size = len(data_loader) 11 | print('#training images = %d' % dataset_size) 12 | 13 | model = create_model(opt) 14 | visualizer = Visualizer(opt) 15 | total_steps = 0 16 | 17 | for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): 18 | epoch_start_time = time.time() 19 | epoch_iter = 0 20 | 21 | for i, data in enumerate(dataset): 22 | iter_start_time = time.time() 23 | visualizer.reset() 24 | total_steps += opt.batchSize 25 | epoch_iter += opt.batchSize 26 | model.set_input(data) 27 | model.optimize_parameters() 28 | 29 | if total_steps % opt.display_freq == 0: 30 | save_result = total_steps % opt.update_html_freq == 0 31 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) 32 | 33 | if total_steps % opt.print_freq == 0: 34 | errors = model.get_current_errors() 35 | t = (time.time() - iter_start_time) / opt.batchSize 36 | visualizer.print_current_errors(epoch, epoch_iter, errors, t) 37 | if opt.display_id > 0: 38 | visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors) 39 | 40 | if total_steps % opt.save_latest_freq == 0: 41 | print('saving the latest model (epoch %d, total_steps %d)' % 42 | (epoch, total_steps)) 43 | model.save('latest') 44 | 45 | if epoch % opt.save_epoch_freq == 0: 46 | print('saving the model at the end of epoch %d, iters %d' % 47 | (epoch, total_steps)) 48 | model.save('latest') 49 | model.save(epoch) 50 | 51 | print('End of epoch %d / %d \t Time Taken: %d sec' % 52 | (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) 53 | model.update_learning_rate() 54 | -------------------------------------------------------------------------------- /person_transfer/train_fashion.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1; 2 | python train.py --dataroot ./datasets/fashion_data/ --name fashion_selectiongan --model PATN --lambda_GAN 5 --lambda_A 10 --lambda_B 10 --dataset_mode keypoint --n_layers 3 --norm instance --batchSize 12 --pool_size 0 --resize_or_crop no --gpu_ids 0 --BP_input_nc 18 --no_flip --which_model_netG PATN --niter 500 --niter_decay 200 --checkpoints_dir ./checkpoints --pairLst ./datasets/fashion_data/fasion-resize-pairs-train.csv --L1_type l1_plus_perL1 --n_layers_D 3 --with_D_PP 1 --with_D_PB 1 --display_id 0 --gpu_ids 0,1;-- 3 | # --which_epoch 60 --epoch_count 61 4 | -------------------------------------------------------------------------------- /person_transfer/train_market.sh: -------------------------------------------------------------------------------- 1 | python train.py --dataroot ./datasets/market_data/ --name market_selectiongan --model PATN --lambda_GAN 5 --lambda_A 10 --lambda_B 10 --dataset_mode keypoint --no_lsgan --n_layers 3 --norm batch --batchSize 32 --resize_or_crop no --gpu_ids 0 --BP_input_nc 18 --no_flip --which_model_netG PATN --niter 500 --niter_decay 200 --checkpoints_dir ./checkpoints --pairLst ./datasets/market_data/market-pairs-train.csv --L1_type l1_plus_perL1 --n_layers_D 3 --with_D_PP 1 --with_D_PB 1 --display_id 0 2 | -------------------------------------------------------------------------------- /person_transfer/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__init__.py -------------------------------------------------------------------------------- /person_transfer/util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/util/__pycache__/html.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__pycache__/html.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/util/__pycache__/image_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__pycache__/image_pool.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/util/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/person_transfer/util/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /person_transfer/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, reflesh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | # print(self.img_dir) 16 | 17 | self.doc = dominate.document(title=title) 18 | if reflesh > 0: 19 | with self.doc.head: 20 | meta(http_equiv="reflesh", content=str(reflesh)) 21 | 22 | def get_image_dir(self): 23 | return self.img_dir 24 | 25 | def add_header(self, str): 26 | with self.doc: 27 | h3(str) 28 | 29 | def add_table(self, border=1): 30 | self.t = table(border=border, style="table-layout: fixed;") 31 | self.doc.add(self.t) 32 | 33 | def add_images(self, ims, txts, links, width=400): 34 | self.add_table() 35 | with self.t: 36 | with tr(): 37 | for im, txt, link in zip(ims, txts, links): 38 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 39 | with p(): 40 | with a(href=os.path.join('images', link)): 41 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 42 | br() 43 | p(txt) 44 | 45 | def save(self): 46 | html_file = '%s/index.html' % self.web_dir 47 | f = open(html_file, 'wt') 48 | f.write(self.doc.render()) 49 | f.close() 50 | 51 | 52 | if __name__ == '__main__': 53 | html = HTML('web/', 'test_html') 54 | html.add_header('hello world') 55 | 56 | ims = [] 57 | txts = [] 58 | links = [] 59 | for n in range(4): 60 | ims.append('image_%d.png' % n) 61 | txts.append('text_%d' % n) 62 | links.append('image_%d.png' % n) 63 | html.add_images(ims, txts, links) 64 | html.save() 65 | -------------------------------------------------------------------------------- /person_transfer/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | from torch.autograd import Variable 5 | 6 | 7 | class ImagePool(): 8 | def __init__(self, pool_size): 9 | self.pool_size = pool_size 10 | if self.pool_size > 0: 11 | self.num_imgs = 0 12 | self.images = [] 13 | 14 | def query(self, images): 15 | if self.pool_size == 0: 16 | return Variable(images) 17 | return_images = [] 18 | for image in images: 19 | image = torch.unsqueeze(image, 0) 20 | if self.num_imgs < self.pool_size: 21 | self.num_imgs = self.num_imgs + 1 22 | self.images.append(image) 23 | return_images.append(image) 24 | else: 25 | p = random.uniform(0, 1) 26 | if p > 0.5: 27 | random_id = random.randint(0, self.pool_size-1) 28 | tmp = self.images[random_id].clone() 29 | self.images[random_id] = image 30 | return_images.append(tmp) 31 | else: 32 | return_images.append(image) 33 | return_images = Variable(torch.cat(return_images, 0)) 34 | return return_images 35 | -------------------------------------------------------------------------------- /person_transfer/util/png.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import zlib 3 | 4 | def encode(buf, width, height): 5 | """ buf: must be bytes or a bytearray in py3, a regular string in py2. formatted RGBRGB... """ 6 | assert (width * height * 3 == len(buf)) 7 | bpp = 3 8 | 9 | def raw_data(): 10 | # reverse the vertical line order and add null bytes at the start 11 | row_bytes = width * bpp 12 | for row_start in range((height - 1) * width * bpp, -1, -row_bytes): 13 | yield b'\x00' 14 | yield buf[row_start:row_start + row_bytes] 15 | 16 | def chunk(tag, data): 17 | return [ 18 | struct.pack("!I", len(data)), 19 | tag, 20 | data, 21 | struct.pack("!I", 0xFFFFFFFF & zlib.crc32(data, zlib.crc32(tag))) 22 | ] 23 | 24 | SIGNATURE = b'\x89PNG\r\n\x1a\n' 25 | COLOR_TYPE_RGB = 2 26 | COLOR_TYPE_RGBA = 6 27 | bit_depth = 8 28 | return b''.join( 29 | [ SIGNATURE ] + 30 | chunk(b'IHDR', struct.pack("!2I5B", width, height, bit_depth, COLOR_TYPE_RGB, 0, 0, 0)) + 31 | chunk(b'IDAT', zlib.compress(b''.join(raw_data()), 9)) + 32 | chunk(b'IEND', b'') 33 | ) 34 | -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/aligned_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/aligned_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/aligned_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/aligned_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/base_data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/base_data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/base_data_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/base_data_loader.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/base_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/base_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/image_folder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/image_folder.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/image_folder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/image_folder.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/__pycache__/single_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/data/__pycache__/single_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/data/base_data_loader.py: -------------------------------------------------------------------------------- 1 | class BaseDataLoader(): 2 | def __init__(self): 3 | pass 4 | 5 | def initialize(self, opt): 6 | self.opt = opt 7 | pass 8 | 9 | def load_data(): 10 | return None 11 | -------------------------------------------------------------------------------- /selectiongan_v1/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | 8 | import torch.utils.data as data 9 | 10 | from PIL import Image 11 | import os 12 | import os.path 13 | 14 | IMG_EXTENSIONS = [ 15 | '.jpg', '.JPG', '.jpeg', '.JPEG', 16 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 17 | ] 18 | 19 | 20 | def is_image_file(filename): 21 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 22 | 23 | 24 | def make_dataset(dir): 25 | images = [] 26 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 27 | 28 | for root, _, fnames in sorted(os.walk(dir)): 29 | for fname in fnames: 30 | if is_image_file(fname): 31 | path = os.path.join(root, fname) 32 | images.append(path) 33 | 34 | return images 35 | 36 | 37 | def default_loader(path): 38 | return Image.open(path).convert('RGB') 39 | 40 | 41 | class ImageFolder(data.Dataset): 42 | 43 | def __init__(self, root, transform=None, return_paths=False, 44 | loader=default_loader): 45 | imgs = make_dataset(root) 46 | if len(imgs) == 0: 47 | raise(RuntimeError("Found 0 images in: " + root + "\n" 48 | "Supported image extensions are: " + 49 | ",".join(IMG_EXTENSIONS))) 50 | 51 | self.root = root 52 | self.imgs = imgs 53 | self.transform = transform 54 | self.return_paths = return_paths 55 | self.loader = loader 56 | 57 | def __getitem__(self, index): 58 | path = self.imgs[index] 59 | img = self.loader(path) 60 | if self.transform is not None: 61 | img = self.transform(img) 62 | if self.return_paths: 63 | return img, path 64 | else: 65 | return img 66 | 67 | def __len__(self): 68 | return len(self.imgs) 69 | -------------------------------------------------------------------------------- /selectiongan_v1/datasets/download_selectiongan_dataset.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | if [[ $FILE != "dayton_ablation" && $FILE != "dayton" && $FILE != "cvusa" && $FILE != "ego2top" && $FILE != "sva" && $FILE != "market_data" && $FILE != "fashion_data" ]]; 4 | then echo "Available datasets are dayton_ablation, dayton, cvusa, ego2top, sva, market_data, fashion_data" 5 | exit 1 6 | fi 7 | 8 | 9 | echo "Specified [$FILE]" 10 | 11 | URL=http://disi.unitn.it/~hao.tang/uploads/datasets/SelectionGAN/$FILE.tar.gz 12 | TAR_FILE=./datasets/$FILE.tar.gz 13 | TARGET_DIR=./datasets/$FILE/ 14 | wget -N $URL -O $TAR_FILE 15 | mkdir -p $TARGET_DIR 16 | tar -zxvf $TAR_FILE -C ./datasets/ 17 | rm $TAR_FILE -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/test/0000002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/test/0000002.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/test/0000730.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/test/0000730.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/test/0001780.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/test/0001780.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/test/0002792.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/test/0002792.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/train/0000777.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/train/0000777.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/train/0002067.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/train/0002067.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/train/0002664.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/train/0002664.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/cvusa/train/0003158.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/cvusa/train/0003158.jpg -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/test/--oXLKfmtepqJ5OHQ84jZg.x832.y508.a-86.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/test/--oXLKfmtepqJ5OHQ84jZg.x832.y508.a-86.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/test/-19hmUez9cLpI3Sq1-HXJw.x107.y483.a-42.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/test/-19hmUez9cLpI3Sq1-HXJw.x107.y483.a-42.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/test/08RqiB4xvi3kCBsCB85FQA.x503.y406.a154.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/test/08RqiB4xvi3kCBsCB85FQA.x503.y406.a154.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/test/0APOIZqVHuFXTlUyk1Zdqw.x1148.y423.a-108.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/test/0APOIZqVHuFXTlUyk1Zdqw.x1148.y423.a-108.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/train/--oXLKfmtepqJ5OHQ84jZg.x1066.y499.a-36.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/train/--oXLKfmtepqJ5OHQ84jZg.x1066.y499.a-36.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/train/-2SMmA-p4vE0f7f1wcVc7Q.x355.y438.a40.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/train/-2SMmA-p4vE0f7f1wcVc7Q.x355.y438.a40.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/train/-AZ2_ts7HTdh_SghAzJCmQ.x706.y453.a142.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/train/-AZ2_ts7HTdh_SghAzJCmQ.x706.y453.a142.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/dayton/train/-Ab8tr0-y0kZrAbMq8iUPA.x910.y482.a-86.a2g.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/dayton/train/-Ab8tr0-y0kZrAbMq8iUPA.x910.y482.a-86.a2g.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case16_Egocentric_5_00138_Egocentric_5_00298.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case16_Egocentric_5_00138_Egocentric_5_00298.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case19_Egocentric_5_00264_TopView_00426.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case19_Egocentric_5_00264_TopView_00426.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case24_Egocentric_1_00001_Egocentric_4_00304.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case24_Egocentric_1_00001_Egocentric_4_00304.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case29_Egocentric_5_00249_Egocentric_6_00150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case29_Egocentric_5_00249_Egocentric_6_00150.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case39_Egocentric_1_00481_Egocentric_5_00424.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case39_Egocentric_1_00481_Egocentric_5_00424.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case44_Egocentric_1_00161_TopView_00451.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case44_Egocentric_1_00161_TopView_00451.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case46_Egocentric_4_00008_Egocentric_5_00339.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case46_Egocentric_4_00008_Egocentric_5_00339.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/test/Case9_Egocentric_4_01326_Egocentric_6_00202.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/test/Case9_Egocentric_4_01326_Egocentric_6_00202.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/train/Case10_Egocentric_1_00000_Egocentric_1_01059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/train/Case10_Egocentric_1_00000_Egocentric_1_01059.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/train/Case10_Egocentric_3_00040_Egocentric_5_00374.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/train/Case10_Egocentric_3_00040_Egocentric_5_00374.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/train/Case4_Egocentric_2_01243_Egocentric_5_01485.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/train/Case4_Egocentric_2_01243_Egocentric_5_01485.png -------------------------------------------------------------------------------- /selectiongan_v1/datasets/samples/ego2top/train/Case9_TopView_02108_TopView_02148.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/datasets/samples/ego2top/train/Case9_TopView_02108_TopView_02148.png -------------------------------------------------------------------------------- /selectiongan_v1/imgs/SelectionGAN.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/imgs/SelectionGAN.gif -------------------------------------------------------------------------------- /selectiongan_v1/imgs/SelectionGAN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/imgs/SelectionGAN.png -------------------------------------------------------------------------------- /selectiongan_v1/imgs/framework.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/imgs/framework.jpg -------------------------------------------------------------------------------- /selectiongan_v1/imgs/method.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/imgs/method.jpg -------------------------------------------------------------------------------- /selectiongan_v1/imgs/supp_dayton_a2g.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/imgs/supp_dayton_a2g.jpg -------------------------------------------------------------------------------- /selectiongan_v1/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from models.base_model import BaseModel 3 | 4 | 5 | def find_model_using_name(model_name): 6 | # Given the option --model [modelname], 7 | # the file "models/modelname_model.py" 8 | # will be imported. 9 | model_filename = "models." + model_name + "_model" 10 | modellib = importlib.import_module(model_filename) 11 | 12 | # In the file, the class called ModelNameModel() will 13 | # be instantiated. It has to be a subclass of BaseModel, 14 | # and it is case-insensitive. 15 | model = None 16 | target_model_name = model_name.replace('_', '') + 'model' 17 | for name, cls in modellib.__dict__.items(): 18 | if name.lower() == target_model_name.lower() \ 19 | and issubclass(cls, BaseModel): 20 | model = cls 21 | 22 | if model is None: 23 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 24 | exit(0) 25 | 26 | return model 27 | 28 | 29 | def get_option_setter(model_name): 30 | model_class = find_model_using_name(model_name) 31 | return model_class.modify_commandline_options 32 | 33 | 34 | def create_model(opt): 35 | model = find_model_using_name(opt.model) 36 | instance = model() 37 | instance.initialize(opt) 38 | print("model [%s] was created" % (instance.name())) 39 | return instance 40 | -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/base_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/base_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/base_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/base_model.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/cycle_gan_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/cycle_gan_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/networks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/networks.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/networks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/networks.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/pix2pix_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/pix2pix_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/selectiongan_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/selectiongan_model.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/models/__pycache__/test_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/models/__pycache__/test_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__init__.py -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/base_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/base_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/test_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/test_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/test_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/test_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/train_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/train_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/__pycache__/train_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/options/__pycache__/train_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | def initialize(self, parser): 6 | parser = BaseOptions.initialize(self, parser) 7 | parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 8 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 9 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 10 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 11 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 12 | # Dropout and Batchnorm has different behavioir during training and test. 13 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 14 | parser.add_argument('--how_many', type=int, default=5, help='how many test images to run') 15 | 16 | parser.set_defaults(model='pix2pix') 17 | # To avoid cropping, the loadSize should be the same as fineSize 18 | parser.set_defaults(loadSize=parser.get_default('fineSize')) 19 | 20 | self.isTrain = False 21 | return parser 22 | -------------------------------------------------------------------------------- /selectiongan_v1/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=0.4.1 2 | torchvision>=0.2.1 3 | dominate>=2.3.1 4 | visdom>=0.1.8.3 5 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/Image_ids.txt: -------------------------------------------------------------------------------- 1 | Dayton 64*64 resolutions (Supplementary Figure 5): 3Jzl3r0yqwdVqN48Za6-Tw.x919.y466.a-169.jpg, CGqgAsZXAc0430FXzm0Tbw.x1565.y457.a-22.jpg, KJM_se1BWbsryTteUMtn4Q.x22.y462.a172.jpg, 0AxrRh6ZroLIx1PIL4D29w.x158.y417.a-156.jpg, 7xSVKTdDnniCg3GkYOMiQA.x446.y364.a103.jpg 2 | Dayton 256*256 resolutions (Figure 4): Z0C4WPCZE9S_DYkXYeDCRQ.x1584.y327.a-108.jpg, nk0YG7U3Wkn5Jjp9RuNu6w.x1622.y434.a-55.jpg, It8IotIuMpMdvW1I4rFuiw.x1574.y454.a74.jpg, gGhCRWQNNYYPwAzz8lqg2w.x498.y437.a104.jpg, nHRCbw-QPnZOzWnqN6EqUA.x1532.y472.a-32.jpg 3 | CVUSA (Figure 5): 0019405.jpg, 0021208.jpg, 0021376.jpg, 0001332.jpg, 0002462.jpg, 0000052.jpg, 0002487.jpg 4 | SVA: 033_004170.jpg, 011_001290.jpg, 014_001130.jpg, 019_002970.jpg 5 | Ego2Top:Case4_Egocentric_1_00000_Egocentric_5_02365.png, Case4_Egocentric_5_00645_TopView_02814.png, Case8_Egocentric_1_01370_TopView_00249.png, Case10_Egocentric_1_00000_Egocentric_1_01683.png 6 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/change_order.m: -------------------------------------------------------------------------------- 1 | clear all;close all;clc 2 | 3 | image_folder='./datasets/dayton/test'; 4 | save_folder='./datasets/dayton_g2a/test'; 5 | 6 | if ~isdir(save_folder) 7 | mkdir(save_folder) 8 | end 9 | 10 | Image = dir( image_folder ); 11 | for i = 1 : length( Image ) 12 | fprintf('%d / %d \n', i, length(Image)); 13 | if( isequal( Image( i ).name, '.' ) || isequal( Image( i ).name, '..' )) 14 | continue; 15 | end 16 | image_name=Image( i ).name; 17 | image_path=fullfile(image_folder, image_name); 18 | img=imread(image_path); 19 | imshow(img) 20 | image1=img(1:256,1:256,:); 21 | image2=img(1:256,257:512,:); 22 | image3=img(1:256,513:768,:); 23 | image4=img(1:256,769:1024,:); 24 | im=[image2, image1,image4,image3]; 25 | 26 | imwrite(im, fullfile(save_folder, image_name)); 27 | end 28 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/conda_deps.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing 3 | conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9 4 | conda install visdom dominate -c conda-forge # install visdom and dominate -------------------------------------------------------------------------------- /selectiongan_v1/scripts/convert_semantic_map_cvusa.m: -------------------------------------------------------------------------------- 1 | clear all;close all;clc 2 | 3 | image_folder='./cvusa/streetview/annotations'; 4 | save_folder='./cvusa/streetview/annotations_color'; 5 | 6 | if ~isdir(save_folder) 7 | mkdir(save_folder) 8 | end 9 | 10 | Image = dir( image_folder ); 11 | for i = 1 : length( Image ) 12 | fprintf('%d / %d \n', i, length(Image)); 13 | if( isequal( Image( i ).name, '.' ) || isequal( Image( i ).name, '..' )) 14 | continue; 15 | end 16 | image_name=Image( i ).name; 17 | image_path=fullfile(image_folder, image_name); 18 | img=imread(image_path); 19 | image(:,:,1)=img; 20 | image(:,:,2)=img; 21 | image(:,:,3)=img; 22 | 23 | for r =1:size(img,1) 24 | for c=1:size(img,2) 25 | if image(r, c, 1) == 0 % sky 26 | image(r, c, :) = [0, 0, 0]; % black 27 | elseif image(r, c, 1) == 1 % man-made 28 | image(r, c, :) = [0, 0, 255]; % blue 29 | elseif image(r, c, 1) == 2 % road 30 | image(r, c, :) = [255, 0, 0] ; % red 31 | else % vegetation 32 | image(r, c, :) = [0, 255, 0]; % green; 33 | end 34 | end 35 | end 36 | imwrite(image, fullfile(save_folder, image_name)); 37 | end 38 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/cvusa_prepare.m: -------------------------------------------------------------------------------- 1 | clear all;close all;clc 2 | % cvs_path='./cvusa/splits/train-19zl.csv'; 3 | cvs_path='./cvusa/splits/val-19zl.csv'; 4 | image_path='./cvusa'; 5 | 6 | % save_folder='./cvusa/train'; 7 | save_folder='./cvusa/test'; 8 | 9 | if ~isfolder(save_folder) 10 | mkdir(save_folder) 11 | end 12 | 13 | data = importdata(cvs_path); 14 | for i=1:length(data) 15 | fprintf('%d / %d \n', i, length(data)); 16 | three_name=data{i}; 17 | k = strfind(three_name,','); 18 | a=three_name(1:k(1)-1); 19 | b=three_name(k(1)+1:k(2)-1); 20 | c=three_name(k(2)+1:length(three_name)); 21 | a1=strcat(image_path,'/', a); 22 | b1=strcat(image_path,'/', b); 23 | c1=strcat(image_path,'/', c); 24 | a2=imread(a1); 25 | b2=imread(b1); 26 | c2=imread(c1); 27 | 28 | a3=imresize(a2,[256, 256]); 29 | b3=imresize(b2,[256, 1024]); 30 | c3=imresize(c2,[256,1024]); 31 | d=a3; 32 | d(:,:,:)=0; 33 | 34 | img=[a3,b3,d,c3]; 35 | imwrite(img, fullfile(save_folder, strcat(a(length(a)-11:length(a)-3), 'png'))); 36 | end -------------------------------------------------------------------------------- /selectiongan_v1/scripts/download_selectiongan_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are dayton_a2g_256, dayton_g2a_256, cvusa, dayton_a2g_64, dayton_g2a_64, ego2top, sva, ntu, senz3d and radboud" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE 15 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/evaluation/calculate_LPIPS.m: -------------------------------------------------------------------------------- 1 | clear all;close all;clc 2 | 3 | txt_path='./exp_file.txt'; 4 | 5 | data=importdata(txt_path); 6 | number_image=size(data.data,1) 7 | sum_score=sum(data.data); 8 | 9 | final_lpips=sum_score/number_image 10 | 11 | [val, idx] = min(data.data); 12 | id = find(data.data == val); 13 | data.textdata{id}; 14 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/evaluation/compute_ssim_psnr_sharpness.lua: -------------------------------------------------------------------------------- 1 | -- th compute_ssim_psnr_sharpness.lua ./realimage_folder ./fakeimage_folder 2 | 3 | require 'torch' 4 | require 'paths' 5 | require 'image' 6 | require 'my_image_error_measures' 7 | local lfs = require 'lfs' 8 | 9 | 10 | 11 | path_true = arg[1] 12 | path_synthesized = arg[2] 13 | 14 | local list_of_filenames = {} 15 | local filenamesonly_no_dir = {} 16 | 17 | for file in lfs.dir(path_synthesized) do -- get the list of the files 18 | if file~="." and file~=".." then 19 | table.insert(filenamesonly_no_dir, file) 20 | end 21 | end 22 | 23 | local number_of_files = #filenamesonly_no_dir 24 | ssim_synthesized = 0.0 25 | sharpness_synthesized = 0.0 26 | psnr_synthesized = 0.0 27 | 28 | 29 | for inputs = 1, number_of_files do -- number_of_files --define no of images to compute upon 30 | filename = filenamesonly_no_dir[inputs] 31 | 32 | local img_true = path_true..'/'..filename 33 | local img_synthesized = path_synthesized..'/'..filename 34 | local im_true = image.load(img_true) 35 | local im_synthesized = image.load(img_synthesized) 36 | 37 | ssim_synthesized = ssim_synthesized + SSIM(im_true, im_synthesized) 38 | psnr_synthesized = psnr_synthesized + PSNR(im_true, im_synthesized) 39 | sharpness_synthesized = sharpness_synthesized + computel1difference(im_true, im_synthesized) 40 | 41 | if inputs%500 ==0 then 42 | print("..........................................\n") 43 | print("Images into consideration:"..inputs.."\n") 44 | print ("For synthesized: ") 45 | print ("SSIM: "..ssim_synthesized/inputs) 46 | print ("PSNR: "..psnr_synthesized/inputs) 47 | print ("Sharpness: "..sharpness_synthesized/inputs) 48 | print("") 49 | end 50 | 51 | end 52 | 53 | print("\n..........................................\n") 54 | print("Final numbers\n") 55 | print("Images into consideration:"..number_of_files.."\n") 56 | -- print ("For synthesized: ") 57 | print ("SSIM: "..ssim_synthesized/number_of_files) 58 | print ("PSNR: "..psnr_synthesized/number_of_files) 59 | print ("Sharpness: "..sharpness_synthesized/number_of_files) 60 | 61 | ------------------------------------------------------------------------------------------------------------------------------- -------------------------------------------------------------------------------- /selectiongan_v1/scripts/evaluation/split_real_fake.m: -------------------------------------------------------------------------------- 1 | close all; clear all; clc 2 | path='./test_latest/'; 3 | Image_folder= strcat(path, 'images'); 4 | save_fake_folder=strcat(path, 'fakeimage_B'); 5 | save_I_folder= strcat(path, 'output_image'); 6 | save_real_folder=strcat(path, 'realimage_B'); 7 | 8 | if ~isdir(save_I_folder) 9 | mkdir(save_I_folder) 10 | end 11 | 12 | if ~isdir(save_fake_folder) 13 | mkdir(save_fake_folder) 14 | end 15 | 16 | if ~isdir(save_real_folder) 17 | mkdir(save_real_folder) 18 | end 19 | 20 | Image = dir( Image_folder ); 21 | for i = 1 : length( Image ) 22 | if( isequal( Image( i ).name, '.' ) || isequal( Image( i ).name, '..' )) 23 | continue; 24 | end 25 | image_name=Image( i ).name; 26 | fprintf('%d / %d \n', i, length(Image)) 27 | if contains(image_name, '_I.png') 28 | copyfile(fullfile(Image_folder, image_name), fullfile(save_I_folder, strcat(image_name(1:length(image_name)-6),'.png'))); 29 | elseif contains(image_name, '_real_B.png') 30 | copyfile(fullfile(Image_folder, image_name), fullfile(save_real_folder, strcat(image_name(1:length(image_name)-11),'.png'))); 31 | elseif contains(image_name, '_fake_B.png') 32 | copyfile(fullfile(Image_folder, image_name), fullfile(save_fake_folder, strcat(image_name(1:length(image_name)-11),'.png'))); 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /selectiongan_v1/scripts/split_real_fake.m: -------------------------------------------------------------------------------- 1 | close all; clear all; clc 2 | path='./results/sva_selectiongan/test_latest/'; 3 | 4 | Image_folder= strcat(path, 'images'); 5 | save_fake_folder=strcat(path, 'fakeimage_B'); 6 | save_I_folder= strcat(path, 'output_image'); 7 | save_real_folder=strcat(path, 'realimage_B'); 8 | 9 | if ~isdir(save_I_folder) 10 | mkdir(save_I_folder) 11 | end 12 | 13 | if ~isdir(save_fake_folder) 14 | mkdir(save_fake_folder) 15 | end 16 | 17 | if ~isdir(save_real_folder) 18 | mkdir(save_real_folder) 19 | end 20 | 21 | Image = dir( Image_folder ); 22 | for i = 1 : length( Image ) 23 | if( isequal( Image( i ).name, '.' ) || isequal( Image( i ).name, '..' )) 24 | continue; 25 | end 26 | image_name=Image( i ).name; 27 | fprintf('%d / %d \n', i, length(Image)) 28 | if contains(image_name, '_I.png') 29 | copyfile(fullfile(Image_folder, image_name), fullfile(save_I_folder, strcat(image_name(1:length(image_name)-6),'.png'))); 30 | elseif contains(image_name, '_real_B.png') 31 | copyfile(fullfile(Image_folder, image_name), fullfile(save_real_folder, strcat(image_name(1:length(image_name)-11),'.png'))); 32 | elseif contains(image_name, '_fake_B.png') 33 | copyfile(fullfile(Image_folder, image_name), fullfile(save_fake_folder, strcat(image_name(1:length(image_name)-11),'.png'))); 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /selectiongan_v1/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from options.test_options import TestOptions 3 | from data import CreateDataLoader 4 | from models import create_model 5 | from util.visualizer import save_images 6 | from util import html 7 | 8 | 9 | if __name__ == '__main__': 10 | opt = TestOptions().parse() 11 | opt.nThreads = 1 # test code only supports nThreads = 1 12 | opt.batchSize = 1 # test code only supports batchSize = 1 13 | opt.serial_batches = True # no shuffle 14 | opt.no_flip = True # no flip 15 | opt.display_id = -1 # no visdom display 16 | data_loader = CreateDataLoader(opt) 17 | dataset = data_loader.load_data() 18 | model = create_model(opt) 19 | model.setup(opt) 20 | # create website 21 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) 22 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) 23 | # test 24 | 25 | # Set eval mode. This only affects layers like batch norm and drop out. 26 | if opt.eval: 27 | model.eval() 28 | 29 | for i, data in enumerate(dataset): 30 | if i >= opt.how_many: 31 | break 32 | model.set_input(data) 33 | model.test() 34 | visuals = model.get_current_visuals() 35 | img_path = model.get_image_paths() 36 | if i % 5 == 0: 37 | print('processing (%04d)-th image... %s' % (i, img_path)) 38 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 39 | 40 | webpage.save() 41 | -------------------------------------------------------------------------------- /selectiongan_v1/test_selectiongan.sh: -------------------------------------------------------------------------------- 1 | python test.py --dataroot ./datasets/dayton_a2g --name dayton_a2g_selectiongan --model selectiongan --which_model_netG unet_256 --which_direction AtoB --dataset_mode aligned --norm batch --gpu_ids 0 --batchSize 4 --loadSize 286 --fineSize 256 --no_flip --eval -------------------------------------------------------------------------------- /selectiongan_v1/train_selectiongan.sh: -------------------------------------------------------------------------------- 1 | python train.py --dataroot ./datasets/dayton_a2g --name dayton_a2g_selectiongan --model selectiongan --which_model_netG unet_256 --which_direction AtoB --dataset_mode aligned --norm batch --gpu_ids 0 --batchSize 4 --loadSize 286 --fineSize 256 --no_flip --display_id 1 --lambda_L1 100 --lambda_L1_seg 1 --niter 10 --niter_decay 10 2 | -------------------------------------------------------------------------------- /selectiongan_v1/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__init__.py -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/html.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/html.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/image_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/image_pool.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/image_pool.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/image_pool.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v1/util/__pycache__/visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v1/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, reflesh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | # print(self.img_dir) 16 | 17 | self.doc = dominate.document(title=title) 18 | if reflesh > 0: 19 | with self.doc.head: 20 | meta(http_equiv="reflesh", content=str(reflesh)) 21 | 22 | def get_image_dir(self): 23 | return self.img_dir 24 | 25 | def add_header(self, str): 26 | with self.doc: 27 | h3(str) 28 | 29 | def add_table(self, border=1): 30 | self.t = table(border=border, style="table-layout: fixed;") 31 | self.doc.add(self.t) 32 | 33 | def add_images(self, ims, txts, links, width=400): 34 | self.add_table() 35 | with self.t: 36 | with tr(): 37 | for im, txt, link in zip(ims, txts, links): 38 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 39 | with p(): 40 | with a(href=os.path.join('images', link)): 41 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 42 | br() 43 | p(txt) 44 | 45 | def save(self): 46 | html_file = '%s/index.html' % self.web_dir 47 | f = open(html_file, 'wt') 48 | f.write(self.doc.render()) 49 | f.close() 50 | 51 | 52 | if __name__ == '__main__': 53 | html = HTML('web/', 'test_html') 54 | html.add_header('hello world') 55 | 56 | ims = [] 57 | txts = [] 58 | links = [] 59 | for n in range(4): 60 | ims.append('image_%d.png' % n) 61 | txts.append('text_%d' % n) 62 | links.append('image_%d.png' % n) 63 | html.add_images(ims, txts, links) 64 | html.save() 65 | -------------------------------------------------------------------------------- /selectiongan_v1/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | def __init__(self, pool_size): 7 | self.pool_size = pool_size 8 | if self.pool_size > 0: 9 | self.num_imgs = 0 10 | self.images = [] 11 | 12 | def query(self, images): 13 | if self.pool_size == 0: 14 | return images 15 | return_images = [] 16 | for image in images: 17 | image = torch.unsqueeze(image.data, 0) 18 | if self.num_imgs < self.pool_size: 19 | self.num_imgs = self.num_imgs + 1 20 | self.images.append(image) 21 | return_images.append(image) 22 | else: 23 | p = random.uniform(0, 1) 24 | if p > 0.5: 25 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 26 | tmp = self.images[random_id].clone() 27 | self.images[random_id] = image 28 | return_images.append(tmp) 29 | else: 30 | return_images.append(image) 31 | return_images = torch.cat(return_images, 0) 32 | return return_images 33 | -------------------------------------------------------------------------------- /selectiongan_v1/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import numpy as np 4 | from PIL import Image 5 | import os 6 | 7 | 8 | # Converts a Tensor into an image array (numpy) 9 | # |imtype|: the desired type of the converted numpy array 10 | def tensor2im(input_image, imtype=np.uint8): 11 | if isinstance(input_image, torch.Tensor): 12 | image_tensor = input_image.data 13 | else: 14 | return input_image 15 | image_numpy = image_tensor[0].cpu().float().numpy() 16 | if image_numpy.shape[0] == 1: 17 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 18 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 19 | return image_numpy.astype(imtype) 20 | 21 | 22 | def diagnose_network(net, name='network'): 23 | mean = 0.0 24 | count = 0 25 | for param in net.parameters(): 26 | if param.grad is not None: 27 | mean += torch.mean(torch.abs(param.grad.data)) 28 | count += 1 29 | if count > 0: 30 | mean = mean / count 31 | print(name) 32 | print(mean) 33 | 34 | 35 | def save_image(image_numpy, image_path): 36 | image_pil = Image.fromarray(image_numpy) 37 | image_pil.save(image_path) 38 | 39 | 40 | def print_numpy(x, val=True, shp=False): 41 | x = x.astype(np.float64) 42 | if shp: 43 | print('shape,', x.shape) 44 | if val: 45 | x = x.flatten() 46 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 47 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 48 | 49 | 50 | def mkdirs(paths): 51 | if isinstance(paths, list) and not isinstance(paths, str): 52 | for path in paths: 53 | mkdir(path) 54 | else: 55 | mkdir(paths) 56 | 57 | 58 | def mkdir(path): 59 | if not os.path.exists(path): 60 | os.makedirs(path) 61 | -------------------------------------------------------------------------------- /selectiongan_v2/README.md: -------------------------------------------------------------------------------- 1 | The repository offers the official implementation of our SelectionGAN++ in PyTorch. 2 | 3 | ## Data 4 | For more details, please follow up [selectiongan_v1](https://github.com/Ha0Tang/SelectionGAN/tree/master/selectiongan_v1). 5 | 6 | ## Train 7 | sh train.sh 8 | 9 | ## Test 10 | sh test.sh -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/aligned_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/aligned_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/aligned_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/aligned_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/base_data_loader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/base_data_loader.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/base_data_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/base_data_loader.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/base_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/base_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/image_folder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/image_folder.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/image_folder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/image_folder.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/__pycache__/single_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/data/__pycache__/single_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/data/base_data_loader.py: -------------------------------------------------------------------------------- 1 | class BaseDataLoader(): 2 | def __init__(self): 3 | pass 4 | 5 | def initialize(self, opt): 6 | self.opt = opt 7 | pass 8 | 9 | def load_data(): 10 | return None 11 | -------------------------------------------------------------------------------- /selectiongan_v2/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | 8 | import torch.utils.data as data 9 | 10 | from PIL import Image 11 | import os 12 | import os.path 13 | 14 | IMG_EXTENSIONS = [ 15 | '.jpg', '.JPG', '.jpeg', '.JPEG', 16 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 17 | ] 18 | 19 | 20 | def is_image_file(filename): 21 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 22 | 23 | 24 | def make_dataset(dir): 25 | images = [] 26 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 27 | 28 | for root, _, fnames in sorted(os.walk(dir)): 29 | for fname in fnames: 30 | if is_image_file(fname): 31 | path = os.path.join(root, fname) 32 | images.append(path) 33 | 34 | return images 35 | 36 | 37 | def default_loader(path): 38 | return Image.open(path).convert('RGB') 39 | 40 | 41 | class ImageFolder(data.Dataset): 42 | 43 | def __init__(self, root, transform=None, return_paths=False, 44 | loader=default_loader): 45 | imgs = make_dataset(root) 46 | if len(imgs) == 0: 47 | raise(RuntimeError("Found 0 images in: " + root + "\n" 48 | "Supported image extensions are: " + 49 | ",".join(IMG_EXTENSIONS))) 50 | 51 | self.root = root 52 | self.imgs = imgs 53 | self.transform = transform 54 | self.return_paths = return_paths 55 | self.loader = loader 56 | 57 | def __getitem__(self, index): 58 | path = self.imgs[index] 59 | img = self.loader(path) 60 | if self.transform is not None: 61 | img = self.transform(img) 62 | if self.return_paths: 63 | return img, path 64 | else: 65 | return img 66 | 67 | def __len__(self): 68 | return len(self.imgs) 69 | -------------------------------------------------------------------------------- /selectiongan_v2/models/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from models.base_model import BaseModel 3 | 4 | 5 | def find_model_using_name(model_name): 6 | # Given the option --model [modelname], 7 | # the file "models/modelname_model.py" 8 | # will be imported. 9 | model_filename = "models." + model_name + "_model" 10 | modellib = importlib.import_module(model_filename) 11 | 12 | # In the file, the class called ModelNameModel() will 13 | # be instantiated. It has to be a subclass of BaseModel, 14 | # and it is case-insensitive. 15 | model = None 16 | target_model_name = model_name.replace('_', '') + 'model' 17 | for name, cls in modellib.__dict__.items(): 18 | if name.lower() == target_model_name.lower() \ 19 | and issubclass(cls, BaseModel): 20 | model = cls 21 | 22 | if model is None: 23 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 24 | exit(0) 25 | 26 | return model 27 | 28 | 29 | def get_option_setter(model_name): 30 | model_class = find_model_using_name(model_name) 31 | return model_class.modify_commandline_options 32 | 33 | 34 | def create_model(opt): 35 | model = find_model_using_name(opt.model) 36 | instance = model() 37 | instance.initialize(opt) 38 | print("model [%s] was created" % (instance.name())) 39 | return instance 40 | -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/base_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/base_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/base_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/base_model.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/cycle_gan_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/cycle_gan_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/networks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/networks.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/networks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/networks.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/pix2pix_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/pix2pix_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/selectiongan_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/selectiongan_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/selectiongan_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/selectiongan_model.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/models/__pycache__/test_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/models/__pycache__/test_model.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__init__.py -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/base_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/base_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/test_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/test_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/test_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/test_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/train_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/train_options.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/__pycache__/train_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/options/__pycache__/train_options.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | def initialize(self, parser): 6 | parser = BaseOptions.initialize(self, parser) 7 | parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 8 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 9 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 10 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 11 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 12 | # Dropout and Batchnorm has different behavioir during training and test. 13 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 14 | parser.add_argument('--how_many', type=int, default=5, help='how many test images to run') 15 | 16 | parser.set_defaults(model='pix2pix') 17 | # To avoid cropping, the loadSize should be the same as fineSize 18 | parser.set_defaults(loadSize=parser.get_default('fineSize')) 19 | 20 | self.isTrain = False 21 | return parser 22 | -------------------------------------------------------------------------------- /selectiongan_v2/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=0.4.1 2 | torchvision>=0.2.1 3 | dominate>=2.3.1 4 | visdom>=0.1.8.3 5 | -------------------------------------------------------------------------------- /selectiongan_v2/scripts/conda_deps.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing 3 | conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9 4 | conda install visdom dominate -c conda-forge # install visdom and dominate -------------------------------------------------------------------------------- /selectiongan_v2/scripts/download_selectiongan_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are sva_plus and radboud_plus" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE -------------------------------------------------------------------------------- /selectiongan_v2/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from options.test_options import TestOptions 3 | from data import CreateDataLoader 4 | from models import create_model 5 | from util.visualizer import save_images 6 | from util import html 7 | 8 | 9 | if __name__ == '__main__': 10 | opt = TestOptions().parse() 11 | opt.nThreads = 1 # test code only supports nThreads = 1 12 | opt.batchSize = 1 # test code only supports batchSize = 1 13 | opt.serial_batches = True # no shuffle 14 | opt.no_flip = True # no flip 15 | opt.display_id = -1 # no visdom display 16 | data_loader = CreateDataLoader(opt) 17 | dataset = data_loader.load_data() 18 | model = create_model(opt) 19 | model.setup(opt) 20 | # create website 21 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) 22 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) 23 | # test 24 | 25 | # Set eval mode. This only affects layers like batch norm and drop out. 26 | if opt.eval: 27 | model.eval() 28 | 29 | for i, data in enumerate(dataset): 30 | if i >= opt.how_many: 31 | break 32 | model.set_input(data) 33 | model.test() 34 | visuals = model.get_current_visuals() 35 | img_path = model.get_image_paths() 36 | if i % 5 == 0: 37 | print('processing (%04d)-th image... %s' % (i, img_path)) 38 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 39 | 40 | webpage.save() 41 | -------------------------------------------------------------------------------- /selectiongan_v2/test.sh: -------------------------------------------------------------------------------- 1 | python test.py --dataroot ./datasets/sva \ 2 | --name sva_selectiongan_plus \ 3 | --model selectiongan \ 4 | --which_model_netG unet_256 \ 5 | --which_direction AtoB \ 6 | --dataset_mode aligned \ 7 | --norm batch \ 8 | --gpu_ids 0 \ 9 | --batchSize 4 \ 10 | --loadSize 256 \ 11 | --fineSize 256 \ 12 | --no_flip \ 13 | --eval --how_many 1000000000 --saveDisk 14 | -------------------------------------------------------------------------------- /selectiongan_v2/train.sh: -------------------------------------------------------------------------------- 1 | python train.py --dataroot ./datasets/sva \ 2 | --name sva_selectiongan_plus \ 3 | --model selectiongan \ 4 | --which_model_netG unet_256 \ 5 | --which_direction AtoB \ 6 | --dataset_mode aligned \ 7 | --norm batch \ 8 | --gpu_ids 0 \ 9 | --batchSize 4 \ 10 | --loadSize 286 \ 11 | --fineSize 256 \ 12 | --no_flip \ 13 | --display_id 0 \ 14 | --lambda_L1 100 \ 15 | --lambda_L1_seg 1 \ 16 | --niter 10 --niter_decay 10 17 | #--continue_train --which_epoch 184 --epoch_count 185 18 | 19 | -------------------------------------------------------------------------------- /selectiongan_v2/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__init__.py -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/html.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/html.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/image_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/image_pool.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/image_pool.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/image_pool.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/selectiongan_v2/util/__pycache__/visualizer.cpython-37.pyc -------------------------------------------------------------------------------- /selectiongan_v2/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, reflesh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | # print(self.img_dir) 16 | 17 | self.doc = dominate.document(title=title) 18 | if reflesh > 0: 19 | with self.doc.head: 20 | meta(http_equiv="reflesh", content=str(reflesh)) 21 | 22 | def get_image_dir(self): 23 | return self.img_dir 24 | 25 | def add_header(self, str): 26 | with self.doc: 27 | h3(str) 28 | 29 | def add_table(self, border=1): 30 | self.t = table(border=border, style="table-layout: fixed;") 31 | self.doc.add(self.t) 32 | 33 | def add_images(self, ims, txts, links, width=400): 34 | self.add_table() 35 | with self.t: 36 | with tr(): 37 | for im, txt, link in zip(ims, txts, links): 38 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 39 | with p(): 40 | with a(href=os.path.join('images', link)): 41 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 42 | br() 43 | p(txt) 44 | 45 | def save(self): 46 | html_file = '%s/index.html' % self.web_dir 47 | f = open(html_file, 'wt') 48 | f.write(self.doc.render()) 49 | f.close() 50 | 51 | 52 | if __name__ == '__main__': 53 | html = HTML('web/', 'test_html') 54 | html.add_header('hello world') 55 | 56 | ims = [] 57 | txts = [] 58 | links = [] 59 | for n in range(4): 60 | ims.append('image_%d.png' % n) 61 | txts.append('text_%d' % n) 62 | links.append('image_%d.png' % n) 63 | html.add_images(ims, txts, links) 64 | html.save() 65 | -------------------------------------------------------------------------------- /selectiongan_v2/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | def __init__(self, pool_size): 7 | self.pool_size = pool_size 8 | if self.pool_size > 0: 9 | self.num_imgs = 0 10 | self.images = [] 11 | 12 | def query(self, images): 13 | if self.pool_size == 0: 14 | return images 15 | return_images = [] 16 | for image in images: 17 | image = torch.unsqueeze(image.data, 0) 18 | if self.num_imgs < self.pool_size: 19 | self.num_imgs = self.num_imgs + 1 20 | self.images.append(image) 21 | return_images.append(image) 22 | else: 23 | p = random.uniform(0, 1) 24 | if p > 0.5: 25 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 26 | tmp = self.images[random_id].clone() 27 | self.images[random_id] = image 28 | return_images.append(tmp) 29 | else: 30 | return_images.append(image) 31 | return_images = torch.cat(return_images, 0) 32 | return return_images 33 | -------------------------------------------------------------------------------- /selectiongan_v2/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import numpy as np 4 | from PIL import Image 5 | import os 6 | 7 | 8 | # Converts a Tensor into an image array (numpy) 9 | # |imtype|: the desired type of the converted numpy array 10 | def tensor2im(input_image, imtype=np.uint8): 11 | if isinstance(input_image, torch.Tensor): 12 | image_tensor = input_image.data 13 | else: 14 | return input_image 15 | image_numpy = image_tensor[0].cpu().float().numpy() 16 | if image_numpy.shape[0] == 1: 17 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 18 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 19 | return image_numpy.astype(imtype) 20 | 21 | 22 | def diagnose_network(net, name='network'): 23 | mean = 0.0 24 | count = 0 25 | for param in net.parameters(): 26 | if param.grad is not None: 27 | mean += torch.mean(torch.abs(param.grad.data)) 28 | count += 1 29 | if count > 0: 30 | mean = mean / count 31 | print(name) 32 | print(mean) 33 | 34 | 35 | def save_image(image_numpy, image_path): 36 | image_pil = Image.fromarray(image_numpy) 37 | image_pil.save(image_path) 38 | 39 | 40 | def print_numpy(x, val=True, shp=False): 41 | x = x.astype(np.float64) 42 | if shp: 43 | print('shape,', x.shape) 44 | if val: 45 | x = x.flatten() 46 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 47 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 48 | 49 | 50 | def mkdirs(paths): 51 | if isinstance(paths, list) and not isinstance(paths, str): 52 | for path in paths: 53 | mkdir(path) 54 | else: 55 | mkdir(paths) 56 | 57 | 58 | def mkdir(path): 59 | if not os.path.exists(path): 60 | os.makedirs(path) 61 | -------------------------------------------------------------------------------- /semantic_synthesis/README.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | Clone this repo. 4 | ```bash 5 | git clone https://github.com/Ha0Tang/SelectionGAN 6 | cd SelectionGAN/ 7 | cd semantic_synthesis/ 8 | ``` 9 | 10 | This code requires PyTorch 1.0 and python 3+. Please install dependencies by 11 | ```bash 12 | pip install -r requirements.txt 13 | ``` 14 | 15 | This code also requires the Synchronized-BatchNorm-PyTorch rep. 16 | ``` 17 | cd models/networks/ 18 | git clone https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 19 | cp -rf Synchronized-BatchNorm-PyTorch/sync_batchnorm . 20 | cd ../../ 21 | ``` 22 | 23 | To reproduce the results reported in the paper, you would need an NVIDIA DGX1 machine with 8 V100 GPUs. 24 | 25 | ## Dataset Preparation 26 | Follow [GauGAN](https://github.com/NVlabs/SPADE) for more details. 27 | 28 | ## Generating Images Using Pretrained Model 29 | 30 | ## Training New Models 31 | 32 | New models can be trained with the following commands. 33 | 34 | 1. Prepare dataset. 35 | 2. Train. 36 | 37 | ```bash 38 | sh train.sh 39 | ``` 40 | 41 | ## Testing 42 | 43 | Testing is similar to testing pretrained models. 44 | 45 | ```bash 46 | sh test.sh 47 | ``` 48 | Use `--results_dir` to specify the output directory. `--how_many` will specify the maximum number of images to generate. By default, it loads the latest checkpoint. It can be changed using `--which_epoch`. 49 | 50 | ### Pretrained Models 51 | ```bash 52 | sh scripts/download_selectiongan_model.sh cityscapes 53 | sh scripts/download_selectiongan_model.sh ade20k 54 | ``` 55 | 56 | -------------------------------------------------------------------------------- /semantic_synthesis/data/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch.utils.data 8 | from data.base_dataset import BaseDataset 9 | 10 | 11 | def find_dataset_using_name(dataset_name): 12 | # Given the option --dataset [datasetname], 13 | # the file "datasets/datasetname_dataset.py" 14 | # will be imported. 15 | dataset_filename = "data." + dataset_name + "_dataset" 16 | datasetlib = importlib.import_module(dataset_filename) 17 | 18 | # In the file, the class called DatasetNameDataset() will 19 | # be instantiated. It has to be a subclass of BaseDataset, 20 | # and it is case-insensitive. 21 | dataset = None 22 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 23 | for name, cls in datasetlib.__dict__.items(): 24 | if name.lower() == target_dataset_name.lower() \ 25 | and issubclass(cls, BaseDataset): 26 | dataset = cls 27 | 28 | if dataset is None: 29 | raise ValueError("In %s.py, there should be a subclass of BaseDataset " 30 | "with class name that matches %s in lowercase." % 31 | (dataset_filename, target_dataset_name)) 32 | 33 | return dataset 34 | 35 | 36 | def get_option_setter(dataset_name): 37 | dataset_class = find_dataset_using_name(dataset_name) 38 | return dataset_class.modify_commandline_options 39 | 40 | 41 | def create_dataloader(opt): 42 | dataset = find_dataset_using_name(opt.dataset_mode) 43 | instance = dataset() 44 | instance.initialize(opt) 45 | print("dataset [%s] of size %d was created" % 46 | (type(instance).__name__, len(instance))) 47 | dataloader = torch.utils.data.DataLoader( 48 | instance, 49 | batch_size=opt.batchSize, 50 | shuffle=not opt.serial_batches, 51 | num_workers=int(opt.nThreads), 52 | drop_last=opt.isTrain 53 | ) 54 | return dataloader 55 | -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/ade20k_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/ade20k_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/ade20k_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/ade20k_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/base_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/base_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/cityscapes_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/cityscapes_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/image_folder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/image_folder.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/image_folder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/image_folder.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/pix2pix_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/pix2pix_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/__pycache__/pix2pix_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/data/__pycache__/pix2pix_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/data/ade20k_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from data.pix2pix_dataset import Pix2pixDataset 7 | from data.image_folder import make_dataset 8 | 9 | 10 | class ADE20KDataset(Pix2pixDataset): 11 | 12 | @staticmethod 13 | def modify_commandline_options(parser, is_train): 14 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 15 | parser.set_defaults(preprocess_mode='resize_and_crop') 16 | if is_train: 17 | parser.set_defaults(load_size=286) 18 | else: 19 | parser.set_defaults(load_size=256) 20 | parser.set_defaults(crop_size=256) 21 | parser.set_defaults(display_winsize=256) 22 | parser.set_defaults(label_nc=150) 23 | parser.set_defaults(contain_dontcare_label=True) 24 | parser.set_defaults(cache_filelist_read=False) 25 | parser.set_defaults(cache_filelist_write=False) 26 | parser.set_defaults(no_instance=True) 27 | return parser 28 | 29 | def get_paths(self, opt): 30 | root = opt.dataroot 31 | phase = 'val' if opt.phase == 'test' else 'train' 32 | 33 | all_images = make_dataset(root, recursive=True, read_cache=False, write_cache=False) 34 | image_paths = [] 35 | label_paths = [] 36 | for p in all_images: 37 | if '_%s_' % phase not in p: 38 | continue 39 | if p.endswith('.jpg'): 40 | image_paths.append(p) 41 | elif p.endswith('.png'): 42 | label_paths.append(p) 43 | 44 | instance_paths = [] # don't use instance map for ade20k 45 | 46 | return label_paths, image_paths, instance_paths 47 | 48 | # In ADE20k, 'unknown' label is of value 0. 49 | # Change the 'unknown' label to the last label to match other datasets. 50 | def postprocess(self, input_dict): 51 | label = input_dict['label'] 52 | label = label - 1 53 | label[label == -1] = self.opt.label_nc 54 | -------------------------------------------------------------------------------- /semantic_synthesis/data/cityscapes_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os.path 7 | from data.pix2pix_dataset import Pix2pixDataset 8 | from data.image_folder import make_dataset 9 | 10 | 11 | class CityscapesDataset(Pix2pixDataset): 12 | 13 | @staticmethod 14 | def modify_commandline_options(parser, is_train): 15 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 16 | parser.set_defaults(preprocess_mode='fixed') 17 | parser.set_defaults(load_size=512) 18 | parser.set_defaults(crop_size=512) 19 | parser.set_defaults(display_winsize=512) 20 | parser.set_defaults(label_nc=35) 21 | parser.set_defaults(aspect_ratio=2.0) 22 | parser.set_defaults(batchSize=16) 23 | opt, _ = parser.parse_known_args() 24 | if hasattr(opt, 'num_upsampling_layers'): 25 | parser.set_defaults(num_upsampling_layers='more') 26 | return parser 27 | 28 | def get_paths(self, opt): 29 | root = opt.dataroot 30 | phase = 'val' if opt.phase == 'test' else 'train' 31 | 32 | label_dir = os.path.join(root, 'gtFine', phase) 33 | label_paths_all = make_dataset(label_dir, recursive=True) 34 | label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')] 35 | 36 | image_dir = os.path.join(root, 'leftImg8bit', phase) 37 | image_paths = make_dataset(image_dir, recursive=True) 38 | 39 | if not opt.no_instance: 40 | instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')] 41 | else: 42 | instance_paths = [] 43 | 44 | return label_paths, image_paths, instance_paths 45 | 46 | def paths_match(self, path1, path2): 47 | name1 = os.path.basename(path1) 48 | name2 = os.path.basename(path2) 49 | # compare the first 3 components, [city]_[id1]_[id2] 50 | return '_'.join(name1.split('_')[:3]) == \ 51 | '_'.join(name2.split('_')[:3]) 52 | -------------------------------------------------------------------------------- /semantic_synthesis/data/facades_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os.path 7 | from data.pix2pix_dataset import Pix2pixDataset 8 | from data.image_folder import make_dataset 9 | 10 | 11 | class FacadesDataset(Pix2pixDataset): 12 | 13 | @staticmethod 14 | def modify_commandline_options(parser, is_train): 15 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 16 | parser.set_defaults(dataroot='./dataset/facades/') 17 | parser.set_defaults(preprocess_mode='resize_and_crop') 18 | load_size = 286 if is_train else 256 19 | parser.set_defaults(load_size=load_size) 20 | parser.set_defaults(crop_size=256) 21 | parser.set_defaults(display_winsize=256) 22 | parser.set_defaults(label_nc=13) 23 | parser.set_defaults(contain_dontcare_label=False) 24 | parser.set_defaults(no_instance=True) 25 | return parser 26 | 27 | def get_paths(self, opt): 28 | root = opt.dataroot 29 | phase = 'val' if opt.phase == 'test' else opt.phase 30 | 31 | label_dir = os.path.join(root, '%s_label' % phase) 32 | label_paths = make_dataset(label_dir, recursive=False, read_cache=True) 33 | 34 | image_dir = os.path.join(root, '%s_img' % phase) 35 | image_paths = make_dataset(image_dir, recursive=False, read_cache=True) 36 | 37 | instance_paths = [] 38 | 39 | return label_paths, image_paths, instance_paths 40 | -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000017914.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000017914.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000029286.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000029286.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000138805.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000138805.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000184101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000184101.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000197384.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000197384.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000203744.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000203744.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000284465.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000284465.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000350505.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000350505.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000371376.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000371376.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000426773.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000426773.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000475177.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000475177.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000500044.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000500044.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_img/000000580986.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_img/000000580986.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000017914.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000017914.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000029286.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000029286.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000138805.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000138805.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000184101.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000184101.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000197384.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000197384.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000203744.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000203744.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000284465.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000284465.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000350505.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000350505.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000371376.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000371376.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000426773.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000426773.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000475177.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000475177.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000500044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000500044.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_inst/000000580986.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_inst/000000580986.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000017914.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000017914.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000029286.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000029286.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000138805.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000138805.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000184101.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000184101.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000197384.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000197384.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000203744.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000203744.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000284465.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000284465.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000350505.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000350505.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000371376.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000371376.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000426773.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000426773.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000475177.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000475177.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000500044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000500044.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/train_label/000000580986.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/train_label/000000580986.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000000139.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000000139.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000000785.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001268.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001268.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001490.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001490.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001503.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001503.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001584.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001584.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001818.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001818.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_img/000000001993.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_img/000000001993.jpg -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000000139.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000000139.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000000785.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000000785.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001268.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001268.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001490.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001490.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001503.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001503.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001584.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001584.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001818.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001818.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_inst/000000001993.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_inst/000000001993.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000000139.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000000139.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000000785.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000000785.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001268.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001268.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001490.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001490.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001503.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001503.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001584.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001584.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001818.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001818.png -------------------------------------------------------------------------------- /semantic_synthesis/datasets/coco_stuff/val_label/000000001993.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/datasets/coco_stuff/val_label/000000001993.png -------------------------------------------------------------------------------- /semantic_synthesis/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch 8 | 9 | 10 | def find_model_using_name(model_name): 11 | # Given the option --model [modelname], 12 | # the file "models/modelname_model.py" 13 | # will be imported. 14 | model_filename = "models." + model_name + "_model" 15 | modellib = importlib.import_module(model_filename) 16 | 17 | # In the file, the class called ModelNameModel() will 18 | # be instantiated. It has to be a subclass of torch.nn.Module, 19 | # and it is case-insensitive. 20 | model = None 21 | target_model_name = model_name.replace('_', '') + 'model' 22 | for name, cls in modellib.__dict__.items(): 23 | if name.lower() == target_model_name.lower() \ 24 | and issubclass(cls, torch.nn.Module): 25 | model = cls 26 | 27 | if model is None: 28 | print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name)) 29 | exit(0) 30 | 31 | return model 32 | 33 | 34 | def get_option_setter(model_name): 35 | model_class = find_model_using_name(model_name) 36 | return model_class.modify_commandline_options 37 | 38 | 39 | def create_model(opt): 40 | model = find_model_using_name(opt.model) 41 | instance = model(opt) 42 | print("model [%s] was created" % (type(instance).__name__)) 43 | 44 | return instance 45 | -------------------------------------------------------------------------------- /semantic_synthesis/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/__pycache__/pix2pix_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/__pycache__/pix2pix_model.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/__pycache__/pix2pix_model.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/__pycache__/pix2pix_model.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | from models.networks.base_network import BaseNetwork 8 | from models.networks.loss import * 9 | from models.networks.discriminator import * 10 | from models.networks.generator import * 11 | from models.networks.encoder import * 12 | import util.util as util 13 | 14 | 15 | def find_network_using_name(target_network_name, filename): 16 | target_class_name = target_network_name + filename 17 | module_name = 'models.networks.' + filename 18 | network = util.find_class_in_module(target_class_name, module_name) 19 | 20 | assert issubclass(network, BaseNetwork), \ 21 | "Class %s should be a subclass of BaseNetwork" % network 22 | 23 | return network 24 | 25 | 26 | def modify_commandline_options(parser, is_train): 27 | opt, _ = parser.parse_known_args() 28 | 29 | netG_cls = find_network_using_name(opt.netG, 'generator') 30 | parser = netG_cls.modify_commandline_options(parser, is_train) 31 | if is_train: 32 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 33 | parser = netD_cls.modify_commandline_options(parser, is_train) 34 | netE_cls = find_network_using_name('conv', 'encoder') 35 | parser = netE_cls.modify_commandline_options(parser, is_train) 36 | 37 | return parser 38 | 39 | 40 | def create_network(cls, opt): 41 | net = cls(opt) 42 | net.print_network() 43 | if len(opt.gpu_ids) > 0: 44 | assert(torch.cuda.is_available()) 45 | net.cuda() 46 | net.init_weights(opt.init_type, opt.init_variance) 47 | return net 48 | 49 | 50 | def define_G(opt): 51 | netG_cls = find_network_using_name(opt.netG, 'generator') 52 | return create_network(netG_cls, opt) 53 | 54 | 55 | def define_D(opt): 56 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 57 | return create_network(netD_cls, opt) 58 | 59 | 60 | def define_E(opt): 61 | # there exists only one encoder type 62 | netE_cls = find_network_using_name('conv', 'encoder') 63 | return create_network(netE_cls, opt) 64 | -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/architecture.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/architecture.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/architecture.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/architecture.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/base_network.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/base_network.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/base_network.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/base_network.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/discriminator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/discriminator.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/discriminator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/discriminator.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/encoder.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/encoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/encoder.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/generator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/generator.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/generator.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/generator.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/loss.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/loss.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/normalization.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/normalization.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/__pycache__/normalization.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/models/networks/__pycache__/normalization.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/models/networks/encoder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | import numpy as np 8 | import torch.nn.functional as F 9 | from models.networks.base_network import BaseNetwork 10 | from models.networks.normalization import get_nonspade_norm_layer 11 | 12 | 13 | class ConvEncoder(BaseNetwork): 14 | """ Same architecture as the image discriminator """ 15 | 16 | def __init__(self, opt): 17 | super().__init__() 18 | 19 | kw = 3 20 | pw = int(np.ceil((kw - 1.0) / 2)) 21 | ndf = opt.ngf 22 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) 23 | self.layer1 = norm_layer(nn.Conv2d(3, ndf, kw, stride=2, padding=pw)) 24 | self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, kw, stride=2, padding=pw)) 25 | self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=2, padding=pw)) 26 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=2, padding=pw)) 27 | self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 28 | if opt.crop_size >= 256: 29 | self.layer6 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=2, padding=pw)) 30 | 31 | self.so = s0 = 4 32 | self.fc_mu = nn.Linear(ndf * 8 * s0 * s0, 256) 33 | self.fc_var = nn.Linear(ndf * 8 * s0 * s0, 256) 34 | 35 | self.actvn = nn.LeakyReLU(0.2, False) 36 | self.opt = opt 37 | 38 | def forward(self, x): 39 | if x.size(2) != 256 or x.size(3) != 256: 40 | x = F.interpolate(x, size=(256, 256), mode='bilinear') 41 | 42 | x = self.layer1(x) 43 | x = self.layer2(self.actvn(x)) 44 | x = self.layer3(self.actvn(x)) 45 | x = self.layer4(self.actvn(x)) 46 | x = self.layer5(self.actvn(x)) 47 | if self.opt.crop_size >= 256: 48 | x = self.layer6(self.actvn(x)) 49 | x = self.actvn(x) 50 | 51 | x = x.view(x.size(0), -1) 52 | mu = self.fc_mu(x) 53 | logvar = self.fc_var(x) 54 | 55 | return mu, logvar 56 | -------------------------------------------------------------------------------- /semantic_synthesis/options/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/base_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/base_options.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/test_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/test_options.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/train_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/train_options.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/__pycache__/train_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/options/__pycache__/train_options.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/options/test_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from .base_options import BaseOptions 7 | 8 | 9 | class TestOptions(BaseOptions): 10 | def initialize(self, parser): 11 | BaseOptions.initialize(self, parser) 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 14 | parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') 15 | 16 | parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) 17 | parser.set_defaults(serial_batches=True) 18 | parser.set_defaults(no_flip=True) 19 | parser.set_defaults(phase='test') 20 | self.isTrain = False 21 | return parser 22 | -------------------------------------------------------------------------------- /semantic_synthesis/requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.0.0 2 | torchvision 3 | dominate>=2.3.1 4 | dill 5 | scikit-image 6 | -------------------------------------------------------------------------------- /semantic_synthesis/scripts/download_selectiongan_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are cityscapes and ade20k" 4 | echo "Specified [$FILE]" 5 | 6 | URL=http://disi.unitn.it/~hao.tang/uploads/models/SelectionGAN/${FILE}_pretrained.tar.gz 7 | TAR_FILE=./checkpoints/${FILE}_pretrained.tar.gz 8 | TARGET_DIR=./checkpoints/${FILE}_pretrained/ 9 | 10 | wget -N $URL -O $TAR_FILE 11 | 12 | mkdir -p $TARGET_DIR 13 | tar -zxvf $TAR_FILE -C ./checkpoints/ 14 | rm $TAR_FILE -------------------------------------------------------------------------------- /semantic_synthesis/test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os 7 | from collections import OrderedDict 8 | 9 | import data 10 | from options.test_options import TestOptions 11 | from models.pix2pix_model import Pix2PixModel 12 | from util.visualizer import Visualizer 13 | from util import html 14 | 15 | opt = TestOptions().parse() 16 | 17 | dataloader = data.create_dataloader(opt) 18 | 19 | model = Pix2PixModel(opt) 20 | model.eval() 21 | 22 | visualizer = Visualizer(opt) 23 | 24 | # create a webpage that summarizes the all results 25 | web_dir = os.path.join(opt.results_dir, opt.name, 26 | '%s_%s' % (opt.phase, opt.which_epoch)) 27 | webpage = html.HTML(web_dir, 28 | 'Experiment = %s, Phase = %s, Epoch = %s' % 29 | (opt.name, opt.phase, opt.which_epoch)) 30 | 31 | # test 32 | for i, data_i in enumerate(dataloader): 33 | if i * opt.batchSize >= opt.how_many: 34 | break 35 | 36 | generated = model(data_i, mode='inference') 37 | 38 | img_path = data_i['path'] 39 | for b in range(generated.shape[0]): 40 | print('process image... %s' % img_path[b]) 41 | visuals = OrderedDict([('input_label', data_i['label'][b]), 42 | ('synthesized_image', generated[b])]) 43 | visualizer.save_images(webpage, visuals, img_path[b:b + 1]) 44 | 45 | webpage.save() 46 | -------------------------------------------------------------------------------- /semantic_synthesis/test.sh: -------------------------------------------------------------------------------- 1 | python test.py --name ade20k_selectiongan --dataset_mode ade20k --dataroot ./datasets/ADEChallengeData2016 --gpu_ids 0 --results_dir ./results --checkpoints_dir ./checkpoints --batchSize 1 --which_epoch latest; 2 | -------------------------------------------------------------------------------- /semantic_synthesis/train.sh: -------------------------------------------------------------------------------- 1 | python train.py --name ade20k_selectiongan --dataset_mode ade20k --dataroot ./datasets/ADEChallengeData2016 --niter 100 --niter_decay 100 --gpu_ids 0,1,2,3,4,5,6,7 --checkpoints_dir ./checkpoints --batchSize 32 --save_epoch_freq 5 2 | # --continue_train 3 | 4 | -------------------------------------------------------------------------------- /semantic_synthesis/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | -------------------------------------------------------------------------------- /semantic_synthesis/trainers/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/trainers/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/trainers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/trainers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/trainers/__pycache__/pix2pix_trainer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/trainers/__pycache__/pix2pix_trainer.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/trainers/__pycache__/pix2pix_trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/trainers/__pycache__/pix2pix_trainer.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/coco.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/coco.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/coco.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/coco.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/html.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/html.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/html.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/html.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/iter_counter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/iter_counter.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/iter_counter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/iter_counter.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /semantic_synthesis/util/__pycache__/visualizer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ha0Tang/SelectionGAN/d59c2d8819371d1014705ccefef1bb2b59dcd8c2/semantic_synthesis/util/__pycache__/visualizer.cpython-37.pyc --------------------------------------------------------------------------------