├── .gitignore
├── LICENSE
├── README.md
├── data
├── __init__.py
├── aligned_dataset.py
├── base_data_loader.py
├── base_dataset.py
├── image_folder.py
├── single_dataset.py
├── unaligned_dataset.py
└── unaligned_seg_dataset.py
├── datasets
├── bibtex
│ ├── cityscapes.tex
│ ├── facades.tex
│ ├── handbags.tex
│ ├── shoes.tex
│ └── transattr.tex
├── combine_A_and_B.py
├── download_coco.sh
├── download_cyclegan_dataset.sh
├── download_pix2pix_dataset.sh
├── generate_ccp_dataset.py
├── generate_coco_dataset.py
├── generate_mhp_dataset.py
├── make_dataset_aligned.py
├── pants2skirt_mhp
│ ├── sampleA
│ │ ├── 1602.png
│ │ ├── 2207.png
│ │ ├── 2781.png
│ │ ├── 3078.png
│ │ └── 3194.png
│ ├── sampleA_seg
│ │ ├── 1602_0.png
│ │ ├── 1602_1.png
│ │ ├── 2207_0.png
│ │ ├── 2207_1.png
│ │ ├── 2781_0.png
│ │ ├── 2781_1.png
│ │ ├── 2781_2.png
│ │ ├── 3078_0.png
│ │ ├── 3078_1.png
│ │ ├── 3194_0.png
│ │ └── 3194_1.png
│ ├── sampleB
│ │ ├── 0026.png
│ │ ├── 0637.png
│ │ ├── 1179.png
│ │ ├── 4217.png
│ │ └── 4413.png
│ └── sampleB_seg
│ │ ├── 0026_0.png
│ │ ├── 0637_0.png
│ │ ├── 1179_0.png
│ │ ├── 4217_0.png
│ │ └── 4413_0.png
└── shp2gir_coco
│ ├── sampleA
│ ├── 1106.png
│ ├── 1134.png
│ ├── 1271.png
│ ├── 139.png
│ ├── 602.png
│ ├── 732.png
│ └── 866.png
│ ├── sampleA_seg
│ ├── 1106_0.png
│ ├── 1106_1.png
│ ├── 1134_0.png
│ ├── 1134_1.png
│ ├── 1271_0.png
│ ├── 1271_1.png
│ ├── 139_0.png
│ ├── 139_1.png
│ ├── 139_2.png
│ ├── 139_3.png
│ ├── 602_0.png
│ ├── 602_1.png
│ ├── 602_2.png
│ ├── 602_3.png
│ ├── 602_4.png
│ ├── 602_5.png
│ ├── 602_6.png
│ ├── 602_7.png
│ ├── 732_0.png
│ ├── 732_1.png
│ ├── 732_2.png
│ ├── 732_3.png
│ ├── 732_4.png
│ ├── 732_5.png
│ ├── 866_0.png
│ ├── 866_1.png
│ ├── 866_2.png
│ ├── 866_3.png
│ └── 866_4.png
│ ├── sampleB
│ ├── 2075.png
│ ├── 2191.png
│ ├── 2316.png
│ ├── 364.png
│ ├── 380.png
│ ├── 46.png
│ └── 581.png
│ └── sampleB_seg
│ ├── 2075_0.png
│ ├── 2075_1.png
│ ├── 2191_0.png
│ ├── 2191_1.png
│ ├── 2191_2.png
│ ├── 2191_3.png
│ ├── 2316_0.png
│ ├── 2316_1.png
│ ├── 2316_2.png
│ ├── 2316_3.png
│ ├── 2316_4.png
│ ├── 364_0.png
│ ├── 364_1.png
│ ├── 380_0.png
│ ├── 380_1.png
│ ├── 46_0.png
│ ├── 581_0.png
│ └── 581_1.png
├── docs
└── more_results.md
├── environment.yml
├── imgs
├── intro.png
├── model.png
├── more
│ ├── coco-1.png
│ ├── coco-10.png
│ ├── coco-11.png
│ ├── coco-12.png
│ ├── coco-13.png
│ ├── coco-14.png
│ ├── coco-15.png
│ ├── coco-16.png
│ ├── coco-17.png
│ ├── coco-18.png
│ ├── coco-19.png
│ ├── coco-2.png
│ ├── coco-20.png
│ ├── coco-21.png
│ ├── coco-22.png
│ ├── coco-23.png
│ ├── coco-24.png
│ ├── coco-25.png
│ ├── coco-26.png
│ ├── coco-3.png
│ ├── coco-4.png
│ ├── coco-5.png
│ ├── coco-6.png
│ ├── coco-7.png
│ ├── coco-8.png
│ ├── coco-9.png
│ ├── google.png
│ ├── label.png
│ ├── mhp-1.png
│ ├── mhp-10.png
│ ├── mhp-2.png
│ ├── mhp-3.png
│ ├── mhp-4.png
│ ├── mhp-5.png
│ ├── mhp-6.png
│ ├── mhp-7.png
│ ├── mhp-8.png
│ ├── mhp-9.png
│ └── youtube.png
├── results-1.png
├── results-2.png
├── results-3.png
└── results-4.png
├── models
├── __init__.py
├── base_model.py
├── cycle_gan_model.py
├── insta_gan_model.py
├── networks.py
├── pix2pix_model.py
└── test_model.py
├── options
├── __init__.py
├── base_options.py
├── test_options.py
└── train_options.py
├── requirements.txt
├── results
├── pants2skirt_mhp_instagan
│ └── sample_200
│ │ ├── images
│ │ ├── 1602_fake_A_img.png
│ │ ├── 1602_fake_A_seg.png
│ │ ├── 1602_fake_B_img.png
│ │ ├── 1602_fake_B_seg.png
│ │ ├── 1602_real_A_img.png
│ │ ├── 1602_real_A_seg.png
│ │ ├── 1602_real_B_img.png
│ │ ├── 1602_real_B_seg.png
│ │ ├── 1602_rec_A_img.png
│ │ ├── 1602_rec_A_seg.png
│ │ ├── 1602_rec_B_img.png
│ │ ├── 1602_rec_B_seg.png
│ │ ├── 2207_fake_A_img.png
│ │ ├── 2207_fake_A_seg.png
│ │ ├── 2207_fake_B_img.png
│ │ ├── 2207_fake_B_seg.png
│ │ ├── 2207_real_A_img.png
│ │ ├── 2207_real_A_seg.png
│ │ ├── 2207_real_B_img.png
│ │ ├── 2207_real_B_seg.png
│ │ ├── 2207_rec_A_img.png
│ │ ├── 2207_rec_A_seg.png
│ │ ├── 2207_rec_B_img.png
│ │ ├── 2207_rec_B_seg.png
│ │ ├── 2781_fake_A_img.png
│ │ ├── 2781_fake_A_seg.png
│ │ ├── 2781_fake_B_img.png
│ │ ├── 2781_fake_B_seg.png
│ │ ├── 2781_real_A_img.png
│ │ ├── 2781_real_A_seg.png
│ │ ├── 2781_real_B_img.png
│ │ ├── 2781_real_B_seg.png
│ │ ├── 2781_rec_A_img.png
│ │ ├── 2781_rec_A_seg.png
│ │ ├── 2781_rec_B_img.png
│ │ ├── 2781_rec_B_seg.png
│ │ ├── 3078_fake_A_img.png
│ │ ├── 3078_fake_A_seg.png
│ │ ├── 3078_fake_B_img.png
│ │ ├── 3078_fake_B_seg.png
│ │ ├── 3078_real_A_img.png
│ │ ├── 3078_real_A_seg.png
│ │ ├── 3078_real_B_img.png
│ │ ├── 3078_real_B_seg.png
│ │ ├── 3078_rec_A_img.png
│ │ ├── 3078_rec_A_seg.png
│ │ ├── 3078_rec_B_img.png
│ │ ├── 3078_rec_B_seg.png
│ │ ├── 3194_fake_A_img.png
│ │ ├── 3194_fake_A_seg.png
│ │ ├── 3194_fake_B_img.png
│ │ ├── 3194_fake_B_seg.png
│ │ ├── 3194_real_A_img.png
│ │ ├── 3194_real_A_seg.png
│ │ ├── 3194_real_B_img.png
│ │ ├── 3194_real_B_seg.png
│ │ ├── 3194_rec_A_img.png
│ │ ├── 3194_rec_A_seg.png
│ │ ├── 3194_rec_B_img.png
│ │ └── 3194_rec_B_seg.png
│ │ └── index.html
└── shp2gir_coco_instagan
│ └── sample_200
│ ├── images
│ ├── 1106_fake_A_img.png
│ ├── 1106_fake_A_seg.png
│ ├── 1106_fake_B_img.png
│ ├── 1106_fake_B_seg.png
│ ├── 1106_real_A_img.png
│ ├── 1106_real_A_seg.png
│ ├── 1106_real_B_img.png
│ ├── 1106_real_B_seg.png
│ ├── 1106_rec_A_img.png
│ ├── 1106_rec_A_seg.png
│ ├── 1106_rec_B_img.png
│ ├── 1106_rec_B_seg.png
│ ├── 1134_fake_A_img.png
│ ├── 1134_fake_A_seg.png
│ ├── 1134_fake_B_img.png
│ ├── 1134_fake_B_seg.png
│ ├── 1134_real_A_img.png
│ ├── 1134_real_A_seg.png
│ ├── 1134_real_B_img.png
│ ├── 1134_real_B_seg.png
│ ├── 1134_rec_A_img.png
│ ├── 1134_rec_A_seg.png
│ ├── 1134_rec_B_img.png
│ ├── 1134_rec_B_seg.png
│ ├── 1271_fake_A_img.png
│ ├── 1271_fake_A_seg.png
│ ├── 1271_fake_B_img.png
│ ├── 1271_fake_B_seg.png
│ ├── 1271_real_A_img.png
│ ├── 1271_real_A_seg.png
│ ├── 1271_real_B_img.png
│ ├── 1271_real_B_seg.png
│ ├── 1271_rec_A_img.png
│ ├── 1271_rec_A_seg.png
│ ├── 1271_rec_B_img.png
│ ├── 1271_rec_B_seg.png
│ ├── 139_fake_A_img.png
│ ├── 139_fake_A_seg.png
│ ├── 139_fake_B_img.png
│ ├── 139_fake_B_seg.png
│ ├── 139_real_A_img.png
│ ├── 139_real_A_seg.png
│ ├── 139_real_B_img.png
│ ├── 139_real_B_seg.png
│ ├── 139_rec_A_img.png
│ ├── 139_rec_A_seg.png
│ ├── 139_rec_B_img.png
│ ├── 139_rec_B_seg.png
│ ├── 602_fake_A_img.png
│ ├── 602_fake_A_seg.png
│ ├── 602_fake_B_img.png
│ ├── 602_fake_B_seg.png
│ ├── 602_real_A_img.png
│ ├── 602_real_A_seg.png
│ ├── 602_real_B_img.png
│ ├── 602_real_B_seg.png
│ ├── 602_rec_A_img.png
│ ├── 602_rec_A_seg.png
│ ├── 602_rec_B_img.png
│ ├── 602_rec_B_seg.png
│ ├── 732_fake_A_img.png
│ ├── 732_fake_A_seg.png
│ ├── 732_fake_B_img.png
│ ├── 732_fake_B_seg.png
│ ├── 732_real_A_img.png
│ ├── 732_real_A_seg.png
│ ├── 732_real_B_img.png
│ ├── 732_real_B_seg.png
│ ├── 732_rec_A_img.png
│ ├── 732_rec_A_seg.png
│ ├── 732_rec_B_img.png
│ ├── 732_rec_B_seg.png
│ ├── 866_fake_A_img.png
│ ├── 866_fake_A_seg.png
│ ├── 866_fake_B_img.png
│ ├── 866_fake_B_seg.png
│ ├── 866_real_A_img.png
│ ├── 866_real_A_seg.png
│ ├── 866_real_B_img.png
│ ├── 866_real_B_seg.png
│ ├── 866_rec_A_img.png
│ ├── 866_rec_A_seg.png
│ ├── 866_rec_B_img.png
│ └── 866_rec_B_seg.png
│ └── index.html
├── scripts
├── conda_deps.sh
├── download_cyclegan_model.sh
├── download_pix2pix_model.sh
├── install_deps.sh
├── test_before_push.py
├── test_cyclegan.sh
├── test_pix2pix.sh
├── test_single.sh
├── train_cyclegan.sh
└── train_pix2pix.sh
├── test.py
├── train.py
└── util
├── __init__.py
├── get_data.py
├── html.py
├── image_pool.py
├── util.py
└── visualizer.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | debug*
3 | datasets/
4 | checkpoints/
5 | results/
6 | build/
7 | dist/
8 | *.png
9 | torch.egg-info/
10 | */**/__pycache__
11 | torch/version.py
12 | torch/csrc/generic/TensorMethods.cpp
13 | torch/lib/*.so*
14 | torch/lib/*.dylib*
15 | torch/lib/*.h
16 | torch/lib/build
17 | torch/lib/tmp_install
18 | torch/lib/include
19 | torch/lib/torch_shm_manager
20 | torch/csrc/cudnn/cuDNN.cpp
21 | torch/csrc/nn/THNN.cwrap
22 | torch/csrc/nn/THNN.cpp
23 | torch/csrc/nn/THCUNN.cwrap
24 | torch/csrc/nn/THCUNN.cpp
25 | torch/csrc/nn/THNN_generic.cwrap
26 | torch/csrc/nn/THNN_generic.cpp
27 | torch/csrc/nn/THNN_generic.h
28 | docs/src/**/*
29 | test/data/legacy_modules.t7
30 | test/data/gpu_tensors.pt
31 | test/htmlcov
32 | test/.coverage
33 | */*.pyc
34 | */**/*.pyc
35 | */**/**/*.pyc
36 | */**/**/**/*.pyc
37 | */**/**/**/**/*.pyc
38 | */*.so*
39 | */**/*.so*
40 | */**/*.dylib*
41 | test/data/legacy_serialized.pt
42 | *~
43 | .idea
44 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2018, Sangwoo Mo
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 |
25 | --------------------------- LICENSE FOR CycleGAN -------------------------------
26 | Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
27 | All rights reserved.
28 |
29 | Redistribution and use in source and binary forms, with or without
30 | modification, are permitted provided that the following conditions are met:
31 |
32 | * Redistributions of source code must retain the above copyright notice, this
33 | list of conditions and the following disclaimer.
34 |
35 | * Redistributions in binary form must reproduce the above copyright notice,
36 | this list of conditions and the following disclaimer in the documentation
37 | and/or other materials provided with the distribution.
38 |
39 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
42 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
43 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
45 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
46 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
47 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 |
50 |
51 | --------------------------- LICENSE FOR pix2pix --------------------------------
52 | BSD License
53 |
54 | For pix2pix software
55 | Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
56 | All rights reserved.
57 |
58 | Redistribution and use in source and binary forms, with or without
59 | modification, are permitted provided that the following conditions are met:
60 |
61 | * Redistributions of source code must retain the above copyright notice, this
62 | list of conditions and the following disclaimer.
63 |
64 | * Redistributions in binary form must reproduce the above copyright notice,
65 | this list of conditions and the following disclaimer in the documentation
66 | and/or other materials provided with the distribution.
67 |
68 | ----------------------------- LICENSE FOR DCGAN --------------------------------
69 | BSD License
70 |
71 | For dcgan.torch software
72 |
73 | Copyright (c) 2015, Facebook, Inc. All rights reserved.
74 |
75 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
76 |
77 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
78 |
79 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
80 |
81 | Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
82 |
83 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # InstaGAN: Instance-aware Image-to-Image Translation
2 |
3 | **Warning:** This repo contains a model which has potential ethical concerns. Remark that the task of jeans<->skirt was a bad application and should not be used in future research. See the [twitter thread](https://twitter.com/SashaMTL/status/1453491661720391685) for the discussion.
4 |
5 | ---
6 |
7 | PyTorch implementation of ["InstaGAN: Instance-aware Image-to-Image Translation"](https://openreview.net/forum?id=ryxwJhC9YX) (ICLR 2019).
8 | The implementation is based on the [official CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) code.
9 | Our major contributions are in `./models/insta_gan_model.py` and `./models/networks.py`.
10 |
11 |
12 |
13 |
14 |
15 |
16 | ## Getting Started
17 | ### Installation
18 |
19 | - Clone this repository
20 | ```
21 | git clone https://github.com/sangwoomo/instagan
22 | ```
23 |
24 | - Install PyTorch 0.4+ and torchvision from http://pytorch.org and other dependencies (e.g., [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)).
25 | You can install all the dependencies by
26 | ```
27 | pip install -r requirements.txt
28 | ```
29 |
30 | - For Conda users, you can use a script `./scripts/conda_deps.sh` to install PyTorch and other libraries.
31 |
32 | - **Acknowledgment:** Installation scripts are from the [official CycleGAN](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) code.
33 |
34 |
35 | ### Download base datasets
36 |
37 | - Download [clothing-co-parsing (CCP)](https://github.com/bearpaw/clothing-co-parsing) dataset:
38 | ```
39 | git clone https://github.com/bearpaw/clothing-co-parsing ./datasets/clothing-co-parsing
40 | ```
41 |
42 | - Download [multi-human parsing (MHP)](https://lv-mhp.github.io/) dataset:
43 | ```
44 | # Download "LV-MHP-v1" from the link and locate in ./datasets
45 | ```
46 |
47 | - Download [MS COCO](http://cocodataset.org/) dataset:
48 | ```
49 | ./datasets/download_coco.sh
50 | ```
51 |
52 | ### Generate two-domain datasets
53 |
54 | - Generate two-domain dataset for experiments:
55 | ```
56 | python ./datasets/generate_ccp_dataset.py --save_root ./datasets/jeans2skirt_ccp --cat1 jeans --cat2 skirt
57 | python ./datasets/generate_mhp_dataset.py --save_root ./datasets/pants2skirt_mhp --cat1 pants --cat2 skirt
58 | python ./datasets/generate_coco_dataset.py --save_root ./datasets/shp2gir_coco --cat1 sheep --cat2 giraffe
59 | ```
60 | - **Note:** Generated dataset contains images and corresponding masks, which are located in image folders (e.g., 'trainA') and mask folders (e.g., 'trainA_seg'), respectively.
61 | For each image (e.g., '0001.png'), corresponding masks for each instance (e.g., '0001_0.png', '0001_1.png', ...) are provided.
62 |
63 | ### Run experiments
64 |
65 | - Train a model:
66 | ```
67 | python train.py --dataroot ./datasets/jeans2skirt_ccp --model insta_gan --name jeans2skirt_ccp_instagan --loadSizeH 330 --loadSizeW 220 --fineSizeH 300 --fineSizeW 200 --niter 400 --niter_decay 200
68 | python train.py --dataroot ./datasets/pants2skirt_mhp --model insta_gan --name pants2skirt_mhp_instagan --loadSizeH 270 --loadSizeW 180 --fineSizeH 240 --fineSizeW 160
69 | python train.py --dataroot ./datasets/shp2gir_coco --model insta_gan --name shp2gir_coco_instagan --loadSizeH 220 --loadSizeW 220 --fineSizeH 200 --fineSizeW 200
70 | ```
71 |
72 | - To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
73 | To see more intermediate results, check out `./checkpoints/experiment_name/web/index.html`.
74 |
75 | - For faster experiment, increase batch size and use more gpus:
76 | ```
77 | python train.py --dataroot ./datasets/shp2gir_coco --model insta_gan --name shp2gir_coco_instagan --loadSizeH 220 --loadSizeW 220 --fineSizeH 200 --fineSizeW 200 --batch_size 4 --gpu_ids 0,1,2,3
78 | ```
79 |
80 | - Test the model:
81 | ```
82 | python test.py --dataroot ./datasets/jeans2skirt_ccp --model insta_gan --name jeans2skirt_ccp_instagan --loadSizeH 300 --loadSizeW 200 --fineSizeH 300 --fineSizeW 200
83 | python test.py --dataroot ./datasets/pants2skirt_mhp --model insta_gan --name pants2skirt_mhp_instagan --loadSizeH 240 --loadSizeW 160 --fineSizeH 240 --fineSizeW 160 --ins_per 2 --ins_max 20
84 | python test.py --dataroot ./datasets/shp2gir_coco --model insta_gan --name shp2gir_coco_instagan --loadSizeH 200 --loadSizeW 200 --fineSizeH 200 --fineSizeW 200 --ins_per 2 --ins_max 20
85 | ```
86 | - The test results will be saved to a html file here: `./results/experiment_name/latest_test/index.html`.
87 |
88 |
89 | ### Apply a pre-trained model
90 |
91 | - You can download a pre-trained model (pants->skirt and/or sheep->giraffe) from the following [Google drive link](https://drive.google.com/drive/folders/10TfnuqZ4tIVAQP23cgHxJQKuVeJusu85?usp=sharing).
92 | Save the pretrained model in `./checkpoints/` directory.
93 |
94 | - We provide samples of two datasets (pants->skirt and sheep->giraffe) in this repository.
95 | To test the model:
96 | ```
97 | python test.py --dataroot ./datasets/pants2skirt_mhp --model insta_gan --name pants2skirt_mhp_instagan --loadSizeH 240 --loadSizeW 160 --fineSizeH 240 --fineSizeW 160 --ins_per 2 --ins_max 20 --phase sample --epoch 200
98 | python test.py --dataroot ./datasets/shp2gir_coco --model insta_gan --name shp2gir_coco_instagan --loadSizeH 200 --loadSizeW 200 --fineSizeH 200 --fineSizeW 200 --ins_per 2 --ins_max 20 --phase sample --epoch 200
99 | ```
100 |
101 |
102 | ## Results
103 |
104 | We provide some translation results of our model.
105 | See the [**link**](/docs/more_results.md) for more translation results.
106 |
107 | ### 1. Fashion dataset (pants->skirt)
108 |
109 |
110 |
111 | ### 2. COCO dataset (sheep->giraffe)
112 |
113 |
114 |
115 | ### 3. Results on Google-searched images (pants->skirt)
116 |
117 |
118 |
119 | ### 4. Results on YouTube-searched videos (pants->skirt)
120 |
121 |
122 |
123 |
124 | ## Citation
125 | If you use this code for your research, please cite our papers.
126 | ```
127 | @inproceedings{
128 | mo2019instagan,
129 | title={InstaGAN: Instance-aware Image-to-Image Translation},
130 | author={Sangwoo Mo and Minsu Cho and Jinwoo Shin},
131 | booktitle={International Conference on Learning Representations},
132 | year={2019},
133 | url={https://openreview.net/forum?id=ryxwJhC9YX},
134 | }
135 | ```
136 |
--------------------------------------------------------------------------------
/data/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import torch.utils.data
3 | from data.base_data_loader import BaseDataLoader
4 | from data.base_dataset import BaseDataset
5 |
6 |
7 | def find_dataset_using_name(dataset_name):
8 | # Given the option --dataset_mode [datasetname],
9 | # the file "data/datasetname_dataset.py"
10 | # will be imported.
11 | dataset_filename = "data." + dataset_name + "_dataset"
12 | datasetlib = importlib.import_module(dataset_filename)
13 |
14 | # In the file, the class called DatasetNameDataset() will
15 | # be instantiated. It has to be a subclass of BaseDataset,
16 | # and it is case-insensitive.
17 | dataset = None
18 | target_dataset_name = dataset_name.replace('_', '') + 'dataset'
19 | for name, cls in datasetlib.__dict__.items():
20 | if name.lower() == target_dataset_name.lower() \
21 | and issubclass(cls, BaseDataset):
22 | dataset = cls
23 |
24 | if dataset is None:
25 | print("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
26 | exit(0)
27 |
28 | return dataset
29 |
30 |
31 | def get_option_setter(dataset_name):
32 | dataset_class = find_dataset_using_name(dataset_name)
33 | return dataset_class.modify_commandline_options
34 |
35 |
36 | def create_dataset(opt):
37 | dataset = find_dataset_using_name(opt.dataset_mode)
38 | instance = dataset()
39 | instance.initialize(opt)
40 | print("dataset [%s] was created" % (instance.name()))
41 | return instance
42 |
43 |
44 | def CreateDataLoader(opt):
45 | data_loader = CustomDatasetDataLoader()
46 | data_loader.initialize(opt)
47 | return data_loader
48 |
49 |
50 | # Wrapper class of Dataset class that performs
51 | # multi-threaded data loading
52 | class CustomDatasetDataLoader(BaseDataLoader):
53 | def name(self):
54 | return 'CustomDatasetDataLoader'
55 |
56 | def initialize(self, opt):
57 | BaseDataLoader.initialize(self, opt)
58 | self.dataset = create_dataset(opt)
59 | self.dataloader = torch.utils.data.DataLoader(
60 | self.dataset,
61 | batch_size=opt.batch_size,
62 | shuffle=not opt.serial_batches,
63 | num_workers=int(opt.num_threads))
64 |
65 | def load_data(self):
66 | return self
67 |
68 | def __len__(self):
69 | return min(len(self.dataset), self.opt.max_dataset_size)
70 |
71 | def __iter__(self):
72 | for i, data in enumerate(self.dataloader):
73 | if i * self.opt.batch_size >= self.opt.max_dataset_size:
74 | break
75 | yield data
76 |
--------------------------------------------------------------------------------
/data/aligned_dataset.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import random
3 | import torchvision.transforms as transforms
4 | import torch
5 | from data.base_dataset import BaseDataset
6 | from data.image_folder import make_dataset
7 | from PIL import Image
8 |
9 |
10 | class AlignedDataset(BaseDataset):
11 | @staticmethod
12 | def modify_commandline_options(parser, is_train):
13 | return parser
14 |
15 | def initialize(self, opt):
16 | self.opt = opt
17 | self.root = opt.dataroot
18 | self.dir_AB = os.path.join(opt.dataroot, opt.phase)
19 | self.AB_paths = sorted(make_dataset(self.dir_AB))
20 | assert(opt.resize_or_crop == 'resize_and_crop')
21 |
22 | def __getitem__(self, index):
23 | AB_path = self.AB_paths[index]
24 | AB = Image.open(AB_path).convert('RGB')
25 | w, h = AB.size
26 | assert(self.opt.loadSize >= self.opt.fineSize)
27 | w2 = int(w / 2)
28 | A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
29 | B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
30 | A = transforms.ToTensor()(A)
31 | B = transforms.ToTensor()(B)
32 | w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
33 | h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
34 |
35 | A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
36 | B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
37 |
38 | A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
39 | B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
40 |
41 | if self.opt.direction == 'BtoA':
42 | input_nc = self.opt.output_nc
43 | output_nc = self.opt.input_nc
44 | else:
45 | input_nc = self.opt.input_nc
46 | output_nc = self.opt.output_nc
47 |
48 | if (not self.opt.no_flip) and random.random() < 0.5:
49 | idx = [i for i in range(A.size(2) - 1, -1, -1)]
50 | idx = torch.LongTensor(idx)
51 | A = A.index_select(2, idx)
52 | B = B.index_select(2, idx)
53 |
54 | if input_nc == 1: # RGB to gray
55 | tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
56 | A = tmp.unsqueeze(0)
57 |
58 | if output_nc == 1: # RGB to gray
59 | tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
60 | B = tmp.unsqueeze(0)
61 |
62 | return {'A': A, 'B': B,
63 | 'A_paths': AB_path, 'B_paths': AB_path}
64 |
65 | def __len__(self):
66 | return len(self.AB_paths)
67 |
68 | def name(self):
69 | return 'AlignedDataset'
70 |
--------------------------------------------------------------------------------
/data/base_data_loader.py:
--------------------------------------------------------------------------------
1 | class BaseDataLoader():
2 | def __init__(self):
3 | pass
4 |
5 | def initialize(self, opt):
6 | self.opt = opt
7 | pass
8 |
9 | def load_data():
10 | return None
11 |
--------------------------------------------------------------------------------
/data/base_dataset.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data as data
2 | from PIL import Image
3 | import torchvision.transforms as transforms
4 |
5 |
6 | class BaseDataset(data.Dataset):
7 | def __init__(self):
8 | super(BaseDataset, self).__init__()
9 |
10 | def name(self):
11 | return 'BaseDataset'
12 |
13 | @staticmethod
14 | def modify_commandline_options(parser, is_train):
15 | return parser
16 |
17 | def initialize(self, opt):
18 | pass
19 |
20 | def __len__(self):
21 | return 0
22 |
23 |
24 | def get_transform(opt):
25 | transform_list = []
26 | # Modify transform to specify width and height
27 | if opt.resize_or_crop == 'resize_and_crop':
28 | osize = [opt.loadSizeH, opt.loadSizeW]
29 | fsize = [opt.fineSizeH, opt.fineSizeW]
30 | transform_list.append(transforms.Resize(osize, Image.BICUBIC))
31 | transform_list.append(transforms.RandomCrop(fsize))
32 | # Original CycleGAN code
33 | # if opt.resize_or_crop == 'resize_and_crop':
34 | # osize = [opt.loadSize, opt.loadSize]
35 | # transform_list.append(transforms.Resize(osize, Image.BICUBIC))
36 | # transform_list.append(transforms.RandomCrop(opt.fineSize))
37 | # elif opt.resize_or_crop == 'crop':
38 | # transform_list.append(transforms.RandomCrop(opt.fineSize))
39 | # elif opt.resize_or_crop == 'scale_width':
40 | # transform_list.append(transforms.Lambda(
41 | # lambda img: __scale_width(img, opt.fineSize)))
42 | # elif opt.resize_or_crop == 'scale_width_and_crop':
43 | # transform_list.append(transforms.Lambda(
44 | # lambda img: __scale_width(img, opt.loadSize)))
45 | # transform_list.append(transforms.RandomCrop(opt.fineSize))
46 | # elif opt.resize_or_crop == 'none':
47 | # transform_list.append(transforms.Lambda(
48 | # lambda img: __adjust(img)))
49 | else:
50 | raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
51 |
52 | if opt.isTrain and not opt.no_flip:
53 | transform_list.append(transforms.RandomHorizontalFlip())
54 |
55 | transform_list += [transforms.ToTensor(),
56 | transforms.Normalize((0.5, 0.5, 0.5),
57 | (0.5, 0.5, 0.5))]
58 | return transforms.Compose(transform_list)
59 |
60 |
61 | # just modify the width and height to be multiple of 4
62 | def __adjust(img):
63 | ow, oh = img.size
64 |
65 | # the size needs to be a multiple of this number,
66 | # because going through generator network may change img size
67 | # and eventually cause size mismatch error
68 | mult = 4
69 | if ow % mult == 0 and oh % mult == 0:
70 | return img
71 | w = (ow - 1) // mult
72 | w = (w + 1) * mult
73 | h = (oh - 1) // mult
74 | h = (h + 1) * mult
75 |
76 | if ow != w or oh != h:
77 | __print_size_warning(ow, oh, w, h)
78 |
79 | return img.resize((w, h), Image.BICUBIC)
80 |
81 |
82 | def __scale_width(img, target_width):
83 | ow, oh = img.size
84 |
85 | # the size needs to be a multiple of this number,
86 | # because going through generator network may change img size
87 | # and eventually cause size mismatch error
88 | mult = 4
89 | assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult
90 | if (ow == target_width and oh % mult == 0):
91 | return img
92 | w = target_width
93 | target_height = int(target_width * oh / ow)
94 | m = (target_height - 1) // mult
95 | h = (m + 1) * mult
96 |
97 | if target_height != h:
98 | __print_size_warning(target_width, target_height, w, h)
99 |
100 | return img.resize((w, h), Image.BICUBIC)
101 |
102 |
103 | def __print_size_warning(ow, oh, w, h):
104 | if not hasattr(__print_size_warning, 'has_printed'):
105 | print("The image size needs to be a multiple of 4. "
106 | "The loaded image size was (%d, %d), so it was adjusted to "
107 | "(%d, %d). This adjustment will be done to all images "
108 | "whose sizes are not multiples of 4" % (ow, oh, w, h))
109 | __print_size_warning.has_printed = True
110 |
--------------------------------------------------------------------------------
/data/image_folder.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # Code from
3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
4 | # Modified the original code so that it also loads images from the current
5 | # directory as well as the subdirectories
6 | ###############################################################################
7 |
8 | import torch.utils.data as data
9 |
10 | from PIL import Image
11 | import os
12 | import os.path
13 |
14 | IMG_EXTENSIONS = [
15 | '.jpg', '.JPG', '.jpeg', '.JPEG',
16 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
17 | ]
18 |
19 |
20 | def is_image_file(filename):
21 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
22 |
23 |
24 | def make_dataset(dir):
25 | images = []
26 | assert os.path.isdir(dir), '%s is not a valid directory' % dir
27 |
28 | for root, _, fnames in sorted(os.walk(dir)):
29 | for fname in fnames:
30 | if is_image_file(fname):
31 | path = os.path.join(root, fname)
32 | images.append(path)
33 |
34 | return images
35 |
36 |
37 | def default_loader(path):
38 | return Image.open(path).convert('RGB')
39 |
40 |
41 | class ImageFolder(data.Dataset):
42 |
43 | def __init__(self, root, transform=None, return_paths=False,
44 | loader=default_loader):
45 | imgs = make_dataset(root)
46 | if len(imgs) == 0:
47 | raise(RuntimeError("Found 0 images in: " + root + "\n"
48 | "Supported image extensions are: " +
49 | ",".join(IMG_EXTENSIONS)))
50 |
51 | self.root = root
52 | self.imgs = imgs
53 | self.transform = transform
54 | self.return_paths = return_paths
55 | self.loader = loader
56 |
57 | def __getitem__(self, index):
58 | path = self.imgs[index]
59 | img = self.loader(path)
60 | if self.transform is not None:
61 | img = self.transform(img)
62 | if self.return_paths:
63 | return img, path
64 | else:
65 | return img
66 |
67 | def __len__(self):
68 | return len(self.imgs)
69 |
--------------------------------------------------------------------------------
/data/single_dataset.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from data.base_dataset import BaseDataset, get_transform
3 | from data.image_folder import make_dataset
4 | from PIL import Image
5 |
6 |
7 | class SingleDataset(BaseDataset):
8 | @staticmethod
9 | def modify_commandline_options(parser, is_train):
10 | return parser
11 |
12 | def initialize(self, opt):
13 | self.opt = opt
14 | self.root = opt.dataroot
15 | self.dir_A = os.path.join(opt.dataroot)
16 |
17 | self.A_paths = make_dataset(self.dir_A)
18 |
19 | self.A_paths = sorted(self.A_paths)
20 |
21 | self.transform = get_transform(opt)
22 |
23 | def __getitem__(self, index):
24 | A_path = self.A_paths[index]
25 | A_img = Image.open(A_path).convert('RGB')
26 | A = self.transform(A_img)
27 | if self.opt.direction == 'BtoA':
28 | input_nc = self.opt.output_nc
29 | else:
30 | input_nc = self.opt.input_nc
31 |
32 | if input_nc == 1: # RGB to gray
33 | tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
34 | A = tmp.unsqueeze(0)
35 |
36 | return {'A': A, 'A_paths': A_path}
37 |
38 | def __len__(self):
39 | return len(self.A_paths)
40 |
41 | def name(self):
42 | return 'SingleImageDataset'
43 |
--------------------------------------------------------------------------------
/data/unaligned_dataset.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from data.base_dataset import BaseDataset, get_transform
3 | from data.image_folder import make_dataset
4 | from PIL import Image
5 | import random
6 |
7 |
8 | class UnalignedDataset(BaseDataset):
9 | @staticmethod
10 | def modify_commandline_options(parser, is_train):
11 | return parser
12 |
13 | def initialize(self, opt):
14 | self.opt = opt
15 | self.root = opt.dataroot
16 | self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
17 | self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
18 |
19 | self.A_paths = make_dataset(self.dir_A)
20 | self.B_paths = make_dataset(self.dir_B)
21 |
22 | self.A_paths = sorted(self.A_paths)
23 | self.B_paths = sorted(self.B_paths)
24 | self.A_size = len(self.A_paths)
25 | self.B_size = len(self.B_paths)
26 | self.transform = get_transform(opt)
27 |
28 | def __getitem__(self, index):
29 | A_path = self.A_paths[index % self.A_size]
30 | if self.opt.serial_batches:
31 | index_B = index % self.B_size
32 | else:
33 | index_B = random.randint(0, self.B_size - 1)
34 | B_path = self.B_paths[index_B]
35 | A_img = Image.open(A_path).convert('RGB')
36 | B_img = Image.open(B_path).convert('RGB')
37 |
38 | A = self.transform(A_img)
39 | B = self.transform(B_img)
40 | if self.opt.direction == 'BtoA':
41 | input_nc = self.opt.output_nc
42 | output_nc = self.opt.input_nc
43 | else:
44 | input_nc = self.opt.input_nc
45 | output_nc = self.opt.output_nc
46 |
47 | if input_nc == 1: # RGB to gray
48 | tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
49 | A = tmp.unsqueeze(0)
50 |
51 | if output_nc == 1: # RGB to gray
52 | tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
53 | B = tmp.unsqueeze(0)
54 | return {'A': A, 'B': B,
55 | 'A_paths': A_path, 'B_paths': B_path}
56 |
57 | def __len__(self):
58 | return max(self.A_size, self.B_size)
59 |
60 | def name(self):
61 | return 'UnalignedDataset'
62 |
--------------------------------------------------------------------------------
/data/unaligned_seg_dataset.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os.path
3 | from data.base_dataset import BaseDataset, get_transform
4 | from data.image_folder import make_dataset
5 | from PIL import Image
6 | import random
7 | import torch
8 |
9 |
10 | class UnalignedSegDataset(BaseDataset):
11 | def name(self):
12 | return 'UnalignedSegDataset'
13 |
14 | @staticmethod
15 | def modify_commandline_options(parser, is_train):
16 | return parser
17 |
18 | def initialize(self, opt):
19 | self.opt = opt
20 | self.root = opt.dataroot
21 | self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
22 | self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
23 | self.max_instances = 20 # default: 20
24 | self.seg_dir = 'seg' # default: 'seg'
25 |
26 | self.A_paths = sorted(make_dataset(self.dir_A))
27 | self.B_paths = sorted(make_dataset(self.dir_B))
28 | self.A_size = len(self.A_paths)
29 | self.B_size = len(self.B_paths)
30 | self.transform = get_transform(opt)
31 |
32 | def fixed_transform(self, image, seed):
33 | random.seed(seed)
34 | return self.transform(image)
35 |
36 | def read_segs(self, seg_path, seed):
37 | segs = list()
38 | for i in range(self.max_instances):
39 | path = seg_path.replace('.png', '_{}.png'.format(i))
40 | if os.path.isfile(path):
41 | seg = Image.open(path).convert('L')
42 | seg = self.fixed_transform(seg, seed)
43 | segs.append(seg)
44 | else:
45 | segs.append(-torch.ones(segs[0].size()))
46 | return torch.cat(segs)
47 |
48 | def __getitem__(self, index):
49 | index_A = index % self.A_size
50 | if self.opt.serial_batches:
51 | index_B = index % self.B_size
52 | else:
53 | index_B = random.randint(0, self.B_size - 1)
54 |
55 | A_path = self.A_paths[index_A]
56 | B_path = self.B_paths[index_B]
57 | A_seg_path = A_path.replace('A', 'A_{}'.format(self.seg_dir))
58 | B_seg_path = B_path.replace('B', 'B_{}'.format(self.seg_dir))
59 |
60 | A_idx = A_path.split('/')[-1].split('.')[0]
61 | B_idx = B_path.split('/')[-1].split('.')[0]
62 |
63 | # print('(A, B) = (%d, %d)' % (index_A, index_B))
64 | seed = random.randint(-sys.maxsize, sys.maxsize)
65 |
66 | A = Image.open(A_path).convert('RGB')
67 | B = Image.open(B_path).convert('RGB')
68 | A = self.fixed_transform(A, seed)
69 | B = self.fixed_transform(B, seed)
70 |
71 | A_segs = self.read_segs(A_seg_path, seed)
72 | B_segs = self.read_segs(B_seg_path, seed)
73 |
74 | if self.opt.direction == 'BtoA':
75 | input_nc = self.opt.output_nc
76 | output_nc = self.opt.input_nc
77 | else:
78 | input_nc = self.opt.input_nc
79 | output_nc = self.opt.output_nc
80 |
81 | if input_nc == 1: # RGB to gray
82 | tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
83 | A = tmp.unsqueeze(0)
84 | if output_nc == 1: # RGB to gray
85 | tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
86 | B = tmp.unsqueeze(0)
87 |
88 | return {'A': A, 'B': B,
89 | 'A_idx': A_idx, 'B_idx': B_idx,
90 | 'A_segs': A_segs, 'B_segs': B_segs,
91 | 'A_paths': A_path, 'B_paths': B_path}
92 |
93 | def __len__(self):
94 | return max(self.A_size, self.B_size)
95 |
--------------------------------------------------------------------------------
/datasets/bibtex/cityscapes.tex:
--------------------------------------------------------------------------------
1 | @inproceedings{Cordts2016Cityscapes,
2 | title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
3 | author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
4 | booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
5 | year={2016}
6 | }
7 |
--------------------------------------------------------------------------------
/datasets/bibtex/facades.tex:
--------------------------------------------------------------------------------
1 | @INPROCEEDINGS{Tylecek13,
2 | author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra},
3 | title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure},
4 | booktitle = {Proc. GCPR},
5 | year = {2013},
6 | address = {Saarbrucken, Germany},
7 | }
8 |
--------------------------------------------------------------------------------
/datasets/bibtex/handbags.tex:
--------------------------------------------------------------------------------
1 | @inproceedings{zhu2016generative,
2 | title={Generative Visual Manipulation on the Natural Image Manifold},
3 | author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.},
4 | booktitle={Proceedings of European Conference on Computer Vision (ECCV)},
5 | year={2016}
6 | }
7 |
8 | @InProceedings{xie15hed,
9 | author = {"Xie, Saining and Tu, Zhuowen"},
10 | Title = {Holistically-Nested Edge Detection},
11 | Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
12 | Year = {2015},
13 | }
14 |
--------------------------------------------------------------------------------
/datasets/bibtex/shoes.tex:
--------------------------------------------------------------------------------
1 | @InProceedings{fine-grained,
2 | author = {A. Yu and K. Grauman},
3 | title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning},
4 | booktitle = {Computer Vision and Pattern Recognition (CVPR)},
5 | month = {June},
6 | year = {2014}
7 | }
8 |
9 | @InProceedings{xie15hed,
10 | author = {"Xie, Saining and Tu, Zhuowen"},
11 | Title = {Holistically-Nested Edge Detection},
12 | Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
13 | Year = {2015},
14 | }
15 |
--------------------------------------------------------------------------------
/datasets/bibtex/transattr.tex:
--------------------------------------------------------------------------------
1 | @article {Laffont14,
2 | title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes},
3 | author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays},
4 | journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)},
5 | volume = {33},
6 | number = {4},
7 | year = {2014}
8 | }
9 |
--------------------------------------------------------------------------------
/datasets/combine_A_and_B.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import cv2
4 | import argparse
5 |
6 | parser = argparse.ArgumentParser('create image pairs')
7 | parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
8 | parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
9 | parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
10 | parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
11 | parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
12 | args = parser.parse_args()
13 |
14 | for arg in vars(args):
15 | print('[%s] = ' % arg, getattr(args, arg))
16 |
17 | splits = os.listdir(args.fold_A)
18 |
19 | for sp in splits:
20 | img_fold_A = os.path.join(args.fold_A, sp)
21 | img_fold_B = os.path.join(args.fold_B, sp)
22 | img_list = os.listdir(img_fold_A)
23 | if args.use_AB:
24 | img_list = [img_path for img_path in img_list if '_A.' in img_path]
25 |
26 | num_imgs = min(args.num_imgs, len(img_list))
27 | print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
28 | img_fold_AB = os.path.join(args.fold_AB, sp)
29 | if not os.path.isdir(img_fold_AB):
30 | os.makedirs(img_fold_AB)
31 | print('split = %s, number of images = %d' % (sp, num_imgs))
32 | for n in range(num_imgs):
33 | name_A = img_list[n]
34 | path_A = os.path.join(img_fold_A, name_A)
35 | if args.use_AB:
36 | name_B = name_A.replace('_A.', '_B.')
37 | else:
38 | name_B = name_A
39 | path_B = os.path.join(img_fold_B, name_B)
40 | if os.path.isfile(path_A) and os.path.isfile(path_B):
41 | name_AB = name_A
42 | if args.use_AB:
43 | name_AB = name_AB.replace('_A.', '.') # remove _A
44 | path_AB = os.path.join(img_fold_AB, name_AB)
45 | im_A = cv2.imread(path_A, cv2.CV_LOAD_IMAGE_COLOR)
46 | im_B = cv2.imread(path_B, cv2.CV_LOAD_IMAGE_COLOR)
47 | im_AB = np.concatenate([im_A, im_B], 1)
48 | cv2.imwrite(path_AB, im_AB)
49 |
--------------------------------------------------------------------------------
/datasets/download_coco.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | path=datasets/COCO
4 | items=(annotations_trainval2017.zip train2017.zip val2017.zip test2017.zip)
5 |
6 | mkdir $path
7 |
8 | wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip -P $path
9 | wget http://images.cocodataset.org/zips/train2017.zip -P $path
10 | wget http://images.cocodataset.org/zips/val2017.zip -P $path
11 | wget http://images.cocodataset.org/zips/test2017.zip -P $path
12 |
13 | for item in ${items[@]}; do
14 | unzip $path/$item -d $path
15 | rm $path/$item
16 | done
17 |
--------------------------------------------------------------------------------
/datasets/download_cyclegan_dataset.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "ae_photos" && $FILE != "mini" && $FILE != "mini_pix2pix" ]]; then
4 | echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos"
5 | exit 1
6 | fi
7 |
8 | echo "Specified [$FILE]"
9 | URL=https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/$FILE.zip
10 | ZIP_FILE=./datasets/$FILE.zip
11 | TARGET_DIR=./datasets/$FILE/
12 | wget -N $URL -O $ZIP_FILE
13 | mkdir $TARGET_DIR
14 | unzip $ZIP_FILE -d ./datasets/
15 | rm $ZIP_FILE
16 |
--------------------------------------------------------------------------------
/datasets/download_pix2pix_dataset.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then
4 | echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps"
5 | exit 1
6 | fi
7 |
8 | echo "Specified [$FILE]"
9 |
10 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz
11 | TAR_FILE=./datasets/$FILE.tar.gz
12 | TARGET_DIR=./datasets/$FILE/
13 | wget -N $URL -O $TAR_FILE
14 | mkdir -p $TARGET_DIR
15 | tar -zxvf $TAR_FILE -C ./datasets/
16 | rm $TAR_FILE
17 |
--------------------------------------------------------------------------------
/datasets/generate_ccp_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import numpy as np
3 | import scipy.io as sio
4 | from pathlib import Path
5 | from tqdm import tqdm
6 | from PIL import Image
7 |
8 |
9 | def main():
10 | parser = create_argument_parser()
11 | args = parser.parse_args()
12 | generate_ccp_dataset(args)
13 |
14 | def create_argument_parser():
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument('--data_root', type=str, default='datasets/clothing-co-parsing')
17 | parser.add_argument('--save_root', type=str, default='datasets/jeans2skirt_ccp')
18 | parser.add_argument('--cat1', type=str, default='jeans', help='category 1')
19 | parser.add_argument('--cat2', type=str, default='skirt', help='category 2')
20 | return parser
21 |
22 | def generate_ccp_dataset(args):
23 | """Generate COCO dataset (train/val, A/B)"""
24 | args.data_root = Path(args.data_root)
25 | args.img_root = args.data_root / 'photos'
26 | args.pix_ann_root = args.data_root / 'annotations' / 'pixel-level'
27 | args.img_ann_root = args.data_root / 'annotations' / 'image-level'
28 | args.pix_ann_ids = get_ann_ids(args.pix_ann_root)
29 | args.img_ann_ids = get_ann_ids(args.img_ann_root)
30 |
31 | args.label_list = sio.loadmat(str(args.data_root / 'label_list.mat'))['label_list'].squeeze()
32 |
33 | args.save_root = Path(args.save_root)
34 | args.save_root.mkdir()
35 |
36 | generate_ccp_dataset_train(args, 'A', args.cat1)
37 | generate_ccp_dataset_train(args, 'B', args.cat2)
38 | generate_ccp_dataset_val(args, 'A', args.cat1)
39 | generate_ccp_dataset_val(args, 'B', args.cat2)
40 |
41 | def generate_ccp_dataset_train(args, imset, cat):
42 | img_path = args.save_root / 'train{}'.format(imset)
43 | seg_path = args.save_root / 'train{}_seg'.format(imset)
44 | img_path.mkdir()
45 | seg_path.mkdir()
46 |
47 | cat_id = get_cat_id(args.label_list, cat)
48 |
49 | pb = tqdm(total=len(args.pix_ann_ids))
50 | pb.set_description('train{}'.format(imset))
51 | for ann_id in args.pix_ann_ids:
52 | ann = sio.loadmat(str(args.pix_ann_root / '{}.mat'.format(ann_id)))['groundtruth']
53 | if np.isin(ann, cat_id).sum() > 0:
54 | img = Image.open(args.img_root / '{}.jpg'.format(ann_id))
55 | img.save(img_path / '{}.png'.format(ann_id))
56 | seg = (ann == cat_id).astype('uint8') # get segment of given category
57 | seg = Image.fromarray(seg * 255)
58 | seg.save(seg_path / '{}_0.png'.format(ann_id))
59 | pb.update(1)
60 | pb.close()
61 |
62 | def generate_ccp_dataset_val(args, imset, cat):
63 | img_path = args.save_root / 'val{}'.format(imset)
64 | seg_path = args.save_root / 'val{}_seg'.format(imset)
65 | img_path.mkdir()
66 | seg_path.mkdir()
67 |
68 | cat_id = get_cat_id(args.label_list, cat)
69 |
70 | pb = tqdm(total=len(args.img_ann_ids))
71 | pb.set_description('val{}'.format(imset))
72 | for ann_id in args.img_ann_ids:
73 | ann = sio.loadmat(str(args.img_ann_root / '{}.mat'.format(ann_id)))['tags']
74 | if np.isin(ann, cat_id).sum() > 0:
75 | img = Image.open(args.img_root / '{}.jpg'.format(ann_id))
76 | img.save(img_path / '{}.png'.format(ann_id))
77 | pb.update(1)
78 | pb.close()
79 |
80 | def get_ann_ids(anno_path):
81 | ids = list()
82 | for p in anno_path.iterdir():
83 | ids.append(p.name.split('.')[0])
84 | return ids
85 |
86 | def get_cat_id(label_list, cat):
87 | for i in range(len(label_list)):
88 | if cat == label_list[i][0]:
89 | return i
90 |
91 | if __name__ == '__main__':
92 | main()
--------------------------------------------------------------------------------
/datasets/generate_coco_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import numpy as np
3 | from pathlib import Path
4 | from tqdm import tqdm
5 | from PIL import Image
6 | from pycocotools.coco import COCO
7 |
8 | import torch
9 | import torchvision.transforms as T
10 |
11 |
12 | def main():
13 | parser = create_argument_parser()
14 | args = parser.parse_args()
15 | generate_coco_dataset(args)
16 |
17 |
18 | def create_argument_parser():
19 | parser = argparse.ArgumentParser()
20 | parser.add_argument('--data_root', type=str, default='datasets/COCO')
21 | parser.add_argument('--save_root', type=str, default='datasets/shp2gir_coco')
22 | parser.add_argument('--image_size', type=int, default=256, help='image size')
23 | parser.add_argument('--cat1', type=str, default='sheep', help='category 1')
24 | parser.add_argument('--cat2', type=str, default='giraffe', help='category 2')
25 | return parser
26 |
27 |
28 | def generate_coco_dataset(args):
29 | """Generate COCO dataset (train/val, A/B)"""
30 | args.data_root = Path(args.data_root)
31 | args.save_root = Path(args.save_root)
32 | args.save_root.mkdir()
33 |
34 | generate_coco_dataset_sub(args, 'train', 'A', args.cat1)
35 | generate_coco_dataset_sub(args, 'train', 'B', args.cat2)
36 | generate_coco_dataset_sub(args, 'val', 'A', args.cat1)
37 | generate_coco_dataset_sub(args, 'val', 'B', args.cat2)
38 |
39 |
40 | def generate_coco_dataset_sub(args, idx1, idx2, cat):
41 | """
42 | Subroutine for generating COCO dataset
43 | - idx1: train/val
44 | - idx2: A/B
45 | - cat: category
46 | """
47 | data_path = args.data_root / '{}2017'.format(idx1)
48 | anno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)
49 | coco = COCO(anno_path) # COCO API
50 |
51 | img_path = args.save_root / '{}{}'.format(idx1, idx2)
52 | seg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)
53 | img_path.mkdir()
54 | seg_path.mkdir()
55 |
56 | cat_id = coco.getCatIds(catNms=cat)
57 | img_id = coco.getImgIds(catIds=cat_id)
58 | imgs = coco.loadImgs(img_id)
59 |
60 | pb = tqdm(total=len(imgs))
61 | pb.set_description('{}{}'.format(idx1, idx2))
62 | for img in imgs:
63 | ann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)
64 | anns = coco.loadAnns(ann_ids)
65 |
66 | count = 0
67 | for i in range(len(anns)):
68 | seg = coco.annToMask(anns[i])
69 | seg = Image.fromarray(seg * 255)
70 | seg = resize(seg, args.image_size)
71 | if np.sum(np.asarray(seg)) > 0:
72 | seg.save(seg_path / '{}_{}.png'.format(pb.n, count))
73 | count += 1
74 |
75 | if count > 0: # at least one instance exists
76 | img = Image.open(data_path / img['file_name'])
77 | img = resize(img, args.image_size)
78 | img.save(img_path / '{}.png'.format(pb.n))
79 |
80 | pb.update(1)
81 | pb.close()
82 |
83 |
84 | def resize(img, size):
85 | return T.Compose([
86 | T.Resize(size),
87 | T.CenterCrop(size),
88 | ])(img)
89 |
90 |
91 | if __name__ == '__main__':
92 | main()
93 |
--------------------------------------------------------------------------------
/datasets/generate_mhp_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import cv2
3 | import math
4 | import numpy as np
5 | import scipy.io as sio
6 | from pathlib import Path
7 | from tqdm import tqdm
8 | from PIL import Image
9 |
10 | import torch
11 | import torchvision.transforms as T
12 |
13 |
14 | def main():
15 | parser = create_argument_parser()
16 | args = parser.parse_args()
17 | generate_ccp_dataset(args)
18 |
19 | def create_argument_parser():
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument('--data_root', type=str, default='datasets/LV-MHP-v1')
22 | parser.add_argument('--save_root', type=str, default='datasets/pants2skirt_mhp')
23 | parser.add_argument('--cat1', type=str, default='pants', help='category 1')
24 | parser.add_argument('--cat2', type=str, default='skirt', help='category 2')
25 | parser.add_argument('--size_h', type=float, default=450, help='height')
26 | parser.add_argument('--size_w', type=float, default=300, help='width')
27 | parser.add_argument('--no_skip_horizontal', action='store_true', help='do *not* skip horizontal images')
28 | return parser
29 |
30 | def generate_ccp_dataset(args):
31 | """
32 | Generate COCO dataset (train/val, A/B)
33 | """
34 | args.data_root = Path(args.data_root)
35 | args.img_root = args.data_root / 'images'
36 | args.ann_root = args.data_root / 'annotations'
37 |
38 | args.save_root = Path(args.save_root)
39 | args.save_root.mkdir()
40 |
41 | generate_mhp_dataset(args, 'train', 'A', get_cat_id(args.cat1))
42 | generate_mhp_dataset(args, 'train', 'B', get_cat_id(args.cat2))
43 | generate_mhp_dataset(args, 'test', 'A', get_cat_id(args.cat1))
44 | generate_mhp_dataset(args, 'test', 'B', get_cat_id(args.cat2))
45 |
46 | def generate_mhp_dataset(args, phase, domain, cat):
47 | img_path = args.save_root / '{}{}'.format(phase, domain)
48 | seg_path = args.save_root / '{}{}_seg'.format(phase, domain)
49 | img_path.mkdir()
50 | seg_path.mkdir()
51 |
52 | idx_path = args.data_root / '{}_list.txt'.format(phase)
53 | f = idx_path.open()
54 | idxs = f.readlines()
55 |
56 | pb = tqdm(total=len(idxs))
57 | pb.set_description('{}{}'.format(phase, domain))
58 | for idx in idxs:
59 | count = 0 # number of instances
60 | id = idx.split('.')[0] # before extension
61 | for ann_path in args.ann_root.iterdir():
62 | if ann_path.name.split('_')[0] == id:
63 | ann = cv2.imread(str(ann_path))
64 | if not args.no_skip_horizontal:
65 | if ann.shape[1] > ann.shape[0]:
66 | continue # skip horizontal image
67 | if np.isin(ann, cat).sum() > 0:
68 | seg = (ann == cat).astype('uint8') # get segment of given category
69 | seg = Image.fromarray(seg * 255)
70 | seg = resize_and_crop(seg, [args.size_w, args.size_h]) # resize and crop
71 | if np.sum(np.asarray(seg)) > 0:
72 | seg.save(seg_path / '{}_{}.png'.format(id, count))
73 | count += 1
74 | if count > 0:
75 | # img = Image.open(args.img_root / '{}.jpg'.format(id))
76 | # PIL fails to open Image -> hence, open with cv2
77 | # https://stackoverflow.com/questions/48944819/image-open-gives-error-cannot-identify-image-file
78 | img = cv2.imread(str(args.img_root / '{}.jpg'.format(id)))
79 | # convert cv2 image to PIL image format
80 | # https://stackoverflow.com/questions/43232813/convert-opencv-image-format-to-pil-image-format?noredirect=1&lq=1
81 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
82 | img = Image.fromarray(img)
83 | img = resize_and_crop(img, [args.size_w, args.size_h])
84 | img.save(img_path / '{}.png'.format(id))
85 |
86 | pb.update(1)
87 |
88 | pb.close()
89 |
90 | def get_cat_id(cat):
91 | return {
92 | 'background': 0,
93 | 'hat': 1,
94 | 'hair': 2,
95 | 'sunglass': 3,
96 | 'upper-clothes': 4,
97 | 'skirt': 5,
98 | 'pants': 6,
99 | 'dress': 7,
100 | 'belt': 8,
101 | 'left-shoe': 9,
102 | 'right-shoe': 10,
103 | 'face': 11,
104 | 'left-leg': 12,
105 | 'right-leg': 13,
106 | 'left-arm': 14,
107 | 'right-arm': 15,
108 | 'bag': 16,
109 | 'scarf': 17,
110 | 'torso-skin': 18,
111 | }[cat]
112 |
113 | def resize_and_crop(img, size):
114 | src_w, src_h = img.size
115 | tgt_w, tgt_h = size
116 | ceil_w = math.ceil((src_w / src_h) * tgt_h)
117 | return T.Compose([
118 | T.Resize([tgt_h, ceil_w]),
119 | T.CenterCrop([tgt_h, tgt_w]),
120 | ])(img)
121 |
122 | if __name__ == '__main__':
123 | main()
--------------------------------------------------------------------------------
/datasets/make_dataset_aligned.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from PIL import Image
4 |
5 |
6 | def get_file_paths(folder):
7 | image_file_paths = []
8 | for root, dirs, filenames in os.walk(folder):
9 | filenames = sorted(filenames)
10 | for filename in filenames:
11 | input_path = os.path.abspath(root)
12 | file_path = os.path.join(input_path, filename)
13 | if filename.endswith('.png') or filename.endswith('.jpg'):
14 | image_file_paths.append(file_path)
15 |
16 | break # prevent descending into subfolders
17 | return image_file_paths
18 |
19 |
20 | def align_images(a_file_paths, b_file_paths, target_path):
21 | if not os.path.exists(target_path):
22 | os.makedirs(target_path)
23 |
24 | for i in range(len(a_file_paths)):
25 | img_a = Image.open(a_file_paths[i])
26 | img_b = Image.open(b_file_paths[i])
27 | assert(img_a.size == img_b.size)
28 |
29 | aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
30 | aligned_image.paste(img_a, (0, 0))
31 | aligned_image.paste(img_b, (img_a.size[0], 0))
32 | aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
33 |
34 |
35 | if __name__ == '__main__':
36 | import argparse
37 | parser = argparse.ArgumentParser()
38 | parser.add_argument(
39 | '--dataset-path',
40 | dest='dataset_path',
41 | help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
42 | )
43 | args = parser.parse_args()
44 |
45 | dataset_folder = args.dataset_path
46 | print(dataset_folder)
47 |
48 | test_a_path = os.path.join(dataset_folder, 'testA')
49 | test_b_path = os.path.join(dataset_folder, 'testB')
50 | test_a_file_paths = get_file_paths(test_a_path)
51 | test_b_file_paths = get_file_paths(test_b_path)
52 | assert(len(test_a_file_paths) == len(test_b_file_paths))
53 | test_path = os.path.join(dataset_folder, 'test')
54 |
55 | train_a_path = os.path.join(dataset_folder, 'trainA')
56 | train_b_path = os.path.join(dataset_folder, 'trainB')
57 | train_a_file_paths = get_file_paths(train_a_path)
58 | train_b_file_paths = get_file_paths(train_b_path)
59 | assert(len(train_a_file_paths) == len(train_b_file_paths))
60 | train_path = os.path.join(dataset_folder, 'train')
61 |
62 | align_images(test_a_file_paths, test_b_file_paths, test_path)
63 | align_images(train_a_file_paths, train_b_file_paths, train_path)
64 |
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA/1602.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA/1602.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA/2207.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA/2207.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA/2781.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA/2781.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA/3078.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA/3078.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA/3194.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA/3194.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/1602_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/1602_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/1602_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/1602_1.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/2207_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/2207_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/2207_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/2207_1.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/2781_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/2781_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/2781_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/2781_1.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/2781_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/2781_2.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/3078_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/3078_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/3078_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/3078_1.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/3194_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/3194_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleA_seg/3194_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleA_seg/3194_1.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB/0026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB/0026.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB/0637.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB/0637.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB/1179.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB/1179.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB/4217.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB/4217.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB/4413.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB/4413.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB_seg/0026_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB_seg/0026_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB_seg/0637_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB_seg/0637_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB_seg/1179_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB_seg/1179_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB_seg/4217_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB_seg/4217_0.png
--------------------------------------------------------------------------------
/datasets/pants2skirt_mhp/sampleB_seg/4413_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/pants2skirt_mhp/sampleB_seg/4413_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/1106.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/1106.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/1134.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/1134.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/1271.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/1271.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/139.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/139.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/602.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/602.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/732.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/732.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA/866.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA/866.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1106_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1106_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1106_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1106_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1134_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1134_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1134_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1134_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1271_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1271_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/1271_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/1271_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/139_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/139_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/139_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/139_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/139_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/139_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/139_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/139_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_4.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_5.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_6.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/602_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/602_7.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_4.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/732_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/732_5.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/866_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/866_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/866_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/866_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/866_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/866_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/866_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/866_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleA_seg/866_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleA_seg/866_4.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/2075.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/2075.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/2191.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/2191.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/2316.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/2316.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/364.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/364.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/380.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/380.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/46.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/46.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB/581.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB/581.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2075_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2075_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2075_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2075_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2191_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2191_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2191_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2191_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2191_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2191_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2191_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2191_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2316_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2316_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2316_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2316_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2316_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2316_2.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2316_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2316_3.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/2316_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/2316_4.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/364_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/364_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/364_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/364_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/380_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/380_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/380_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/380_1.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/46_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/46_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/581_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/581_0.png
--------------------------------------------------------------------------------
/datasets/shp2gir_coco/sampleB_seg/581_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/datasets/shp2gir_coco/sampleB_seg/581_1.png
--------------------------------------------------------------------------------
/docs/more_results.md:
--------------------------------------------------------------------------------
1 | ## More Translation Results
2 |
3 | ### MHP dataset (pants->skirt)
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | ### MHP dataset (skirt->pants)
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 | ### COCO dataset (sheep->giraffe)
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | ### COCO dataset (giraffe->sheep)
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | ### COCO dataset (zebra->elephant)
44 |
45 |
46 |
47 |
48 |
49 | ### COCO dataset (elephant->zebra)
50 |
51 |
52 |
53 |
54 |
55 | ### COCO dataset (bird->zebra)
56 |
57 |
58 |
59 |
60 |
61 | ### COCO dataset (zebra->bird)
62 |
63 |
64 |
65 |
66 |
67 | ### COCO dataset (horse->car)
68 |
69 |
70 |
71 |
72 |
73 | ### COCO dataset (car->horse)
74 |
75 |
76 |
77 |
78 |
79 | ### Google-searched images (pants->skirt)
80 |
81 |
82 |
83 | ### YouTube-searched videos (pants->skirt)
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: pytorch-CycleGAN-and-pix2pix
2 | channels:
3 | - peterjc123
4 | - defaults
5 | dependencies:
6 | - python=3.5.5
7 | - pytorch=0.4
8 | - scipy
9 | - pip:
10 | - dominate==2.3.1
11 | - git+https://github.com/pytorch/vision.git
12 | - Pillow==5.0.0
13 | - numpy==1.14.1
14 | - visdom==0.1.7
15 |
--------------------------------------------------------------------------------
/imgs/intro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/intro.png
--------------------------------------------------------------------------------
/imgs/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/model.png
--------------------------------------------------------------------------------
/imgs/more/coco-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-1.png
--------------------------------------------------------------------------------
/imgs/more/coco-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-10.png
--------------------------------------------------------------------------------
/imgs/more/coco-11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-11.png
--------------------------------------------------------------------------------
/imgs/more/coco-12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-12.png
--------------------------------------------------------------------------------
/imgs/more/coco-13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-13.png
--------------------------------------------------------------------------------
/imgs/more/coco-14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-14.png
--------------------------------------------------------------------------------
/imgs/more/coco-15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-15.png
--------------------------------------------------------------------------------
/imgs/more/coco-16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-16.png
--------------------------------------------------------------------------------
/imgs/more/coco-17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-17.png
--------------------------------------------------------------------------------
/imgs/more/coco-18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-18.png
--------------------------------------------------------------------------------
/imgs/more/coco-19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-19.png
--------------------------------------------------------------------------------
/imgs/more/coco-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-2.png
--------------------------------------------------------------------------------
/imgs/more/coco-20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-20.png
--------------------------------------------------------------------------------
/imgs/more/coco-21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-21.png
--------------------------------------------------------------------------------
/imgs/more/coco-22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-22.png
--------------------------------------------------------------------------------
/imgs/more/coco-23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-23.png
--------------------------------------------------------------------------------
/imgs/more/coco-24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-24.png
--------------------------------------------------------------------------------
/imgs/more/coco-25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-25.png
--------------------------------------------------------------------------------
/imgs/more/coco-26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-26.png
--------------------------------------------------------------------------------
/imgs/more/coco-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-3.png
--------------------------------------------------------------------------------
/imgs/more/coco-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-4.png
--------------------------------------------------------------------------------
/imgs/more/coco-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-5.png
--------------------------------------------------------------------------------
/imgs/more/coco-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-6.png
--------------------------------------------------------------------------------
/imgs/more/coco-7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-7.png
--------------------------------------------------------------------------------
/imgs/more/coco-8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-8.png
--------------------------------------------------------------------------------
/imgs/more/coco-9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/coco-9.png
--------------------------------------------------------------------------------
/imgs/more/google.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/google.png
--------------------------------------------------------------------------------
/imgs/more/label.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/label.png
--------------------------------------------------------------------------------
/imgs/more/mhp-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-1.png
--------------------------------------------------------------------------------
/imgs/more/mhp-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-10.png
--------------------------------------------------------------------------------
/imgs/more/mhp-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-2.png
--------------------------------------------------------------------------------
/imgs/more/mhp-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-3.png
--------------------------------------------------------------------------------
/imgs/more/mhp-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-4.png
--------------------------------------------------------------------------------
/imgs/more/mhp-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-5.png
--------------------------------------------------------------------------------
/imgs/more/mhp-6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-6.png
--------------------------------------------------------------------------------
/imgs/more/mhp-7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-7.png
--------------------------------------------------------------------------------
/imgs/more/mhp-8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-8.png
--------------------------------------------------------------------------------
/imgs/more/mhp-9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/mhp-9.png
--------------------------------------------------------------------------------
/imgs/more/youtube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/more/youtube.png
--------------------------------------------------------------------------------
/imgs/results-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/results-1.png
--------------------------------------------------------------------------------
/imgs/results-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/results-2.png
--------------------------------------------------------------------------------
/imgs/results-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/results-3.png
--------------------------------------------------------------------------------
/imgs/results-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/imgs/results-4.png
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | from models.base_model import BaseModel
3 |
4 |
5 | def find_model_using_name(model_name):
6 | # Given the option --model [modelname],
7 | # the file "models/modelname_model.py"
8 | # will be imported.
9 | model_filename = "models." + model_name + "_model"
10 | modellib = importlib.import_module(model_filename)
11 |
12 | # In the file, the class called ModelNameModel() will
13 | # be instantiated. It has to be a subclass of BaseModel,
14 | # and it is case-insensitive.
15 | model = None
16 | target_model_name = model_name.replace('_', '') + 'model'
17 | for name, cls in modellib.__dict__.items():
18 | if name.lower() == target_model_name.lower() \
19 | and issubclass(cls, BaseModel):
20 | model = cls
21 |
22 | if model is None:
23 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
24 | exit(0)
25 |
26 | return model
27 |
28 |
29 | def get_option_setter(model_name):
30 | model_class = find_model_using_name(model_name)
31 | return model_class.modify_commandline_options
32 |
33 |
34 | def create_model(opt):
35 | model = find_model_using_name(opt.model)
36 | instance = model()
37 | instance.initialize(opt)
38 | print("model [%s] was created" % (instance.name()))
39 | return instance
40 |
--------------------------------------------------------------------------------
/models/base_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | from collections import OrderedDict
4 | from . import networks
5 |
6 |
7 | class BaseModel():
8 |
9 | # modify parser to add command line options,
10 | # and also change the default values if needed
11 | @staticmethod
12 | def modify_commandline_options(parser, is_train):
13 | return parser
14 |
15 | def name(self):
16 | return 'BaseModel'
17 |
18 | def initialize(self, opt):
19 | self.opt = opt
20 | self.gpu_ids = opt.gpu_ids
21 | self.isTrain = opt.isTrain
22 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
23 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
24 | if opt.resize_or_crop != 'scale_width':
25 | torch.backends.cudnn.benchmark = True
26 | self.loss_names = []
27 | self.model_names = []
28 | self.visual_names = []
29 | self.image_paths = []
30 |
31 | def set_input(self, input):
32 | pass
33 |
34 | def forward(self):
35 | pass
36 |
37 | # load and print networks; create schedulers
38 | def setup(self, opt, parser=None):
39 | if self.isTrain:
40 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
41 | if not self.isTrain or opt.continue_train:
42 | load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
43 | self.load_networks(load_suffix)
44 | self.print_networks(opt.verbose)
45 |
46 | # make models eval mode during test time
47 | def eval(self):
48 | for name in self.model_names:
49 | if isinstance(name, str):
50 | net = getattr(self, 'net' + name)
51 | net.eval()
52 |
53 | # used in test time, wrapping `forward` in no_grad() so we don't save
54 | # intermediate steps for backprop
55 | def test(self):
56 | with torch.no_grad():
57 | self.forward()
58 |
59 | # get image paths
60 | def get_image_paths(self):
61 | return self.image_paths
62 |
63 | def optimize_parameters(self):
64 | pass
65 |
66 | # update learning rate (called once every epoch)
67 | def update_learning_rate(self):
68 | for scheduler in self.schedulers:
69 | scheduler.step()
70 | lr = self.optimizers[0].param_groups[0]['lr']
71 | print('learning rate = %.7f' % lr)
72 |
73 | # return visualization images. train.py will display these images, and save the images to a html
74 | def get_current_visuals(self):
75 | visual_ret = OrderedDict()
76 | for name in self.visual_names:
77 | if isinstance(name, str):
78 | visual_ret[name] = getattr(self, name)
79 | return visual_ret
80 |
81 | # return traning losses/errors. train.py will print out these errors as debugging information
82 | def get_current_losses(self):
83 | errors_ret = OrderedDict()
84 | for name in self.loss_names:
85 | if isinstance(name, str):
86 | # float(...) works for both scalar tensor and float number
87 | errors_ret[name] = float(getattr(self, 'loss_' + name))
88 | return errors_ret
89 |
90 | # save models to the disk
91 | def save_networks(self, epoch):
92 | for name in self.model_names:
93 | if isinstance(name, str):
94 | save_filename = '%s_net_%s.pth' % (epoch, name)
95 | save_path = os.path.join(self.save_dir, save_filename)
96 | net = getattr(self, 'net' + name)
97 |
98 | if len(self.gpu_ids) > 0 and torch.cuda.is_available():
99 | torch.save(net.module.cpu().state_dict(), save_path)
100 | net.cuda(self.gpu_ids[0])
101 | else:
102 | torch.save(net.cpu().state_dict(), save_path)
103 |
104 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
105 | key = keys[i]
106 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
107 | if module.__class__.__name__.startswith('InstanceNorm') and \
108 | (key == 'running_mean' or key == 'running_var'):
109 | if getattr(module, key) is None:
110 | state_dict.pop('.'.join(keys))
111 | if module.__class__.__name__.startswith('InstanceNorm') and \
112 | (key == 'num_batches_tracked'):
113 | state_dict.pop('.'.join(keys))
114 | else:
115 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
116 |
117 | # load models from the disk
118 | def load_networks(self, epoch):
119 | for name in self.model_names:
120 | if isinstance(name, str):
121 | load_filename = '%s_net_%s.pth' % (epoch, name)
122 | load_path = os.path.join(self.save_dir, load_filename)
123 | net = getattr(self, 'net' + name)
124 | if isinstance(net, torch.nn.DataParallel):
125 | net = net.module
126 | print('loading the model from %s' % load_path)
127 | # if you are using PyTorch newer than 0.4 (e.g., built from
128 | # GitHub source), you can remove str() on self.device
129 | state_dict = torch.load(load_path, map_location=str(self.device))
130 | if hasattr(state_dict, '_metadata'):
131 | del state_dict._metadata
132 |
133 | # patch InstanceNorm checkpoints prior to 0.4
134 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
135 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
136 | net.load_state_dict(state_dict)
137 |
138 | # print network information
139 | def print_networks(self, verbose):
140 | print('---------- Networks initialized -------------')
141 | for name in self.model_names:
142 | if isinstance(name, str):
143 | net = getattr(self, 'net' + name)
144 | num_params = 0
145 | for param in net.parameters():
146 | num_params += param.numel()
147 | if verbose:
148 | print(net)
149 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
150 | print('-----------------------------------------------')
151 |
152 | # set requies_grad=Fasle to avoid computation
153 | def set_requires_grad(self, nets, requires_grad=False):
154 | if not isinstance(nets, list):
155 | nets = [nets]
156 | for net in nets:
157 | if net is not None:
158 | for param in net.parameters():
159 | param.requires_grad = requires_grad
160 |
--------------------------------------------------------------------------------
/models/cycle_gan_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import itertools
3 | from util.image_pool import ImagePool
4 | from .base_model import BaseModel
5 | from . import networks
6 |
7 |
8 | class CycleGANModel(BaseModel):
9 | def name(self):
10 | return 'CycleGANModel'
11 |
12 | @staticmethod
13 | def modify_commandline_options(parser, is_train=True):
14 | # default CycleGAN did not use dropout
15 | parser.set_defaults(no_dropout=True)
16 | if is_train:
17 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
18 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
19 | parser.add_argument('--lambda_identity', type=float, default=1.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
20 |
21 | return parser
22 |
23 | def initialize(self, opt):
24 | BaseModel.initialize(self, opt)
25 |
26 | # specify the training losses you want to print out. The program will call base_model.get_current_losses
27 | self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
28 | # specify the images you want to save/display. The program will call base_model.get_current_visuals
29 | visual_names_A = ['real_A', 'fake_B', 'rec_A']
30 | visual_names_B = ['real_B', 'fake_A', 'rec_B']
31 | if self.isTrain and self.opt.lambda_identity > 0.0:
32 | visual_names_A.append('idt_A')
33 | visual_names_B.append('idt_B')
34 |
35 | self.visual_names = visual_names_A + visual_names_B
36 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
37 | if self.isTrain:
38 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
39 | else: # during test time, only load Gs
40 | self.model_names = ['G_A', 'G_B']
41 |
42 | # load/define networks
43 | # The naming conversion is different from those used in the paper
44 | # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
45 | self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
46 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
47 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
48 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
49 |
50 | if self.isTrain:
51 | use_sigmoid = opt.no_lsgan
52 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
53 | opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
54 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
55 | opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
56 |
57 | if self.isTrain:
58 | self.fake_A_pool = ImagePool(opt.pool_size)
59 | self.fake_B_pool = ImagePool(opt.pool_size)
60 | # define loss functions
61 | self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
62 | self.criterionCycle = torch.nn.L1Loss()
63 | self.criterionIdt = torch.nn.L1Loss()
64 | # initialize optimizers
65 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
66 | lr=opt.lr, betas=(opt.beta1, 0.999))
67 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
68 | lr=opt.lr, betas=(opt.beta1, 0.999))
69 | self.optimizers = []
70 | self.optimizers.append(self.optimizer_G)
71 | self.optimizers.append(self.optimizer_D)
72 |
73 | def set_input(self, input):
74 | AtoB = self.opt.direction == 'AtoB'
75 | self.real_A = input['A' if AtoB else 'B'].to(self.device)
76 | self.real_B = input['B' if AtoB else 'A'].to(self.device)
77 | self.image_paths = input['A_paths' if AtoB else 'B_paths']
78 |
79 | def forward(self):
80 | self.fake_B = self.netG_A(self.real_A)
81 | self.rec_A = self.netG_B(self.fake_B)
82 |
83 | self.fake_A = self.netG_B(self.real_B)
84 | self.rec_B = self.netG_A(self.fake_A)
85 |
86 | def backward_D_basic(self, netD, real, fake):
87 | # Real
88 | pred_real = netD(real)
89 | loss_D_real = self.criterionGAN(pred_real, True)
90 | # Fake
91 | pred_fake = netD(fake.detach())
92 | loss_D_fake = self.criterionGAN(pred_fake, False)
93 | # Combined loss
94 | loss_D = (loss_D_real + loss_D_fake) * 0.5
95 | # backward
96 | loss_D.backward()
97 | return loss_D
98 |
99 | def backward_D_A(self):
100 | fake_B = self.fake_B_pool.query(self.fake_B)
101 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
102 |
103 | def backward_D_B(self):
104 | fake_A = self.fake_A_pool.query(self.fake_A)
105 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
106 |
107 | def backward_G(self):
108 | lambda_idt = self.opt.lambda_identity
109 | lambda_A = self.opt.lambda_A
110 | lambda_B = self.opt.lambda_B
111 | # Identity loss
112 | if lambda_idt > 0:
113 | # G_A should be identity if real_B is fed.
114 | self.idt_A = self.netG_A(self.real_B)
115 | self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
116 | # G_B should be identity if real_A is fed.
117 | self.idt_B = self.netG_B(self.real_A)
118 | self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
119 | else:
120 | self.loss_idt_A = 0
121 | self.loss_idt_B = 0
122 |
123 | # GAN loss D_A(G_A(A))
124 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
125 | # GAN loss D_B(G_B(B))
126 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
127 | # Forward cycle loss
128 | self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
129 | # Backward cycle loss
130 | self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
131 | # combined loss
132 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
133 | self.loss_G.backward()
134 |
135 | def optimize_parameters(self):
136 | # forward
137 | self.forward()
138 | # G_A and G_B
139 | self.set_requires_grad([self.netD_A, self.netD_B], False)
140 | self.optimizer_G.zero_grad()
141 | self.backward_G()
142 | self.optimizer_G.step()
143 | # D_A and D_B
144 | self.set_requires_grad([self.netD_A, self.netD_B], True)
145 | self.optimizer_D.zero_grad()
146 | self.backward_D_A()
147 | self.backward_D_B()
148 | self.optimizer_D.step()
149 |
--------------------------------------------------------------------------------
/models/insta_gan_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import itertools
3 | from util.image_pool import ImagePool
4 | from .base_model import BaseModel
5 | from . import networks
6 | import numpy as np
7 | import copy
8 |
9 |
10 | class InstaGANModel(BaseModel):
11 | def name(self):
12 | return 'InstaGANModel'
13 |
14 | @staticmethod
15 | def modify_commandline_options(parser, is_train=True):
16 | # default CycleGAN did not use dropout
17 | parser.set_defaults(no_dropout=True)
18 | parser.add_argument('--set_order', type=str, default='decreasing', help='order of segmentation')
19 | parser.add_argument('--ins_max', type=int, default=4, help='maximum number of instances to forward')
20 | parser.add_argument('--ins_per', type=int, default=2, help='number of instances to forward, for one pass')
21 | if is_train:
22 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
23 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
24 | parser.add_argument('--lambda_idt', type=float, default=1.0, help='use identity mapping. Setting lambda_idt other than 0 has an effect of scaling the weight of the identity mapping loss')
25 | parser.add_argument('--lambda_ctx', type=float, default=1.0, help='use context preserving. Setting lambda_ctx other than 0 has an effect of scaling the weight of the context preserving loss')
26 |
27 | return parser
28 |
29 | def initialize(self, opt):
30 | BaseModel.initialize(self, opt)
31 |
32 | self.ins_iter = self.opt.ins_max // self.opt.ins_per # number of forward iteration
33 |
34 | # specify the training losses you want to print out. The program will call base_model.get_current_losses
35 | self.loss_names = ['D_A', 'G_A', 'cyc_A', 'idt_A', 'ctx_A', 'D_B', 'G_B', 'cyc_B', 'idt_B', 'ctx_B']
36 | # specify the images you want to save/display. The program will call base_model.get_current_visuals
37 | visual_names_A_img = ['real_A_img', 'fake_B_img', 'rec_A_img']
38 | visual_names_B_img = ['real_B_img', 'fake_A_img', 'rec_B_img']
39 | visual_names_A_seg = ['real_A_seg', 'fake_B_seg', 'rec_A_seg']
40 | visual_names_B_seg = ['real_B_seg', 'fake_A_seg', 'rec_B_seg']
41 | self.visual_names = visual_names_A_img + visual_names_A_seg + visual_names_B_img + visual_names_B_seg
42 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
43 | if self.isTrain:
44 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
45 | else:
46 | self.model_names = ['G_A', 'G_B']
47 |
48 | # load/define networks
49 | # The naming conversion is different from those used in the paper
50 | # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
51 | self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
52 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
53 | if self.isTrain:
54 | use_sigmoid = opt.no_lsgan
55 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
56 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
57 |
58 | if self.isTrain:
59 | self.fake_A_pool = ImagePool(opt.pool_size)
60 | self.fake_B_pool = ImagePool(opt.pool_size)
61 | # define loss functions
62 | self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
63 | self.criterionCyc = torch.nn.L1Loss()
64 | self.criterionIdt = torch.nn.L1Loss()
65 | # initialize optimizers
66 | self.optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, itertools.chain(self.netG_A.parameters(), self.netG_B.parameters())), lr=opt.lr, betas=(opt.beta1, 0.999))
67 | self.optimizer_D = torch.optim.Adam(filter(lambda p: p.requires_grad, itertools.chain(self.netD_A.parameters(), self.netD_B.parameters())), lr=opt.lr, betas=(opt.beta1, 0.999))
68 | self.optimizers = []
69 | self.optimizers.append(self.optimizer_G)
70 | self.optimizers.append(self.optimizer_D)
71 |
72 | def select_masks(self, segs_batch):
73 | """Select instance masks to use"""
74 | if self.opt.set_order == 'decreasing':
75 | return self.select_masks_decreasing(segs_batch)
76 | elif self.opt.set_order == 'random':
77 | return self.select_masks_random(segs_batch)
78 | else:
79 | raise NotImplementedError('Set order name [%s] is not recognized' % self.opt.set_order)
80 |
81 | def select_masks_decreasing(self, segs_batch):
82 | """Select masks in decreasing order"""
83 | ret = list()
84 | for segs in segs_batch:
85 | mean = segs.mean(-1).mean(-1)
86 | m, i = mean.topk(self.opt.ins_max)
87 | ret.append(segs[i, :, :])
88 | return torch.stack(ret)
89 |
90 | def select_masks_random(self, segs_batch):
91 | """Select masks in random order"""
92 | ret = list()
93 | for segs in segs_batch:
94 | mean = (segs + 1).mean(-1).mean(-1)
95 | m, i = mean.topk(self.opt.ins_max)
96 | num = min(len(mean.nonzero()), self.opt.ins_max)
97 | reorder = np.concatenate((np.random.permutation(num), np.arange(num, self.opt.ins_max)))
98 | ret.append(segs[i[reorder], :, :])
99 | return torch.stack(ret)
100 |
101 | def merge_masks(self, segs):
102 | """Merge masks (B, N, W, H) -> (B, 1, W, H)"""
103 | ret = torch.sum((segs + 1)/2, dim=1, keepdim=True) # (B, 1, W, H)
104 | return ret.clamp(max=1, min=0) * 2 - 1
105 |
106 | def get_weight_for_ctx(self, x, y):
107 | """Get weight for context preserving loss"""
108 | z = self.merge_masks(torch.cat([x, y], dim=1))
109 | return (1 - z) / 2 # [-1,1] -> [1,0]
110 |
111 | def weighted_L1_loss(self, src, tgt, weight):
112 | """L1 loss with given weight (used for context preserving loss)"""
113 | return torch.mean(weight * torch.abs(src - tgt))
114 |
115 | def split(self, x):
116 | """Split data into image and mask (only assume 3-channel image)"""
117 | return x[:, :3, :, :], x[:, 3:, :, :]
118 |
119 | def set_input(self, input):
120 | AtoB = self.opt.direction == 'AtoB'
121 | self.real_A_img = input['A' if AtoB else 'B'].to(self.device)
122 | self.real_B_img = input['B' if AtoB else 'A'].to(self.device)
123 | real_A_segs = input['A_segs' if AtoB else 'B_segs']
124 | real_B_segs = input['B_segs' if AtoB else 'A_segs']
125 | self.real_A_segs = self.select_masks(real_A_segs).to(self.device)
126 | self.real_B_segs = self.select_masks(real_B_segs).to(self.device)
127 | self.real_A = torch.cat([self.real_A_img, self.real_A_segs], dim=1)
128 | self.real_B = torch.cat([self.real_B_img, self.real_B_segs], dim=1)
129 | self.real_A_seg = self.merge_masks(self.real_A_segs) # merged mask
130 | self.real_B_seg = self.merge_masks(self.real_B_segs) # merged mask
131 | self.image_paths = input['A_paths' if AtoB else 'B_paths']
132 |
133 | def forward(self, idx=0):
134 | N = self.opt.ins_per
135 | self.real_A_seg_sng = self.real_A_segs[:, N*idx:N*(idx+1), :, :] # ith mask
136 | self.real_B_seg_sng = self.real_B_segs[:, N*idx:N*(idx+1), :, :] # ith mask
137 | empty = -torch.ones(self.real_A_seg_sng.size()).to(self.device) # empty image
138 |
139 | self.forward_A = (self.real_A_seg_sng + 1).sum() > 0 # check if there are remaining instances
140 | self.forward_B = (self.real_B_seg_sng + 1).sum() > 0 # check if there are remaining instances
141 |
142 | # forward A
143 | if self.forward_A:
144 | self.real_A_sng = torch.cat([self.real_A_img_sng, self.real_A_seg_sng], dim=1)
145 | self.fake_B_sng = self.netG_A(self.real_A_sng)
146 | self.rec_A_sng = self.netG_B(self.fake_B_sng)
147 |
148 | self.fake_B_img_sng, self.fake_B_seg_sng = self.split(self.fake_B_sng)
149 | self.rec_A_img_sng, self.rec_A_seg_sng = self.split(self.rec_A_sng)
150 | fake_B_seg_list = self.fake_B_seg_list + [self.fake_B_seg_sng] # not detach
151 | for i in range(self.ins_iter - idx - 1):
152 | fake_B_seg_list.append(empty)
153 |
154 | self.fake_B_seg_mul = torch.cat(fake_B_seg_list, dim=1)
155 | self.fake_B_mul = torch.cat([self.fake_B_img_sng, self.fake_B_seg_mul], dim=1)
156 |
157 | # forward B
158 | if self.forward_B:
159 | self.real_B_sng = torch.cat([self.real_B_img_sng, self.real_B_seg_sng], dim=1)
160 | self.fake_A_sng = self.netG_B(self.real_B_sng)
161 | self.rec_B_sng = self.netG_A(self.fake_A_sng)
162 |
163 | self.fake_A_img_sng, self.fake_A_seg_sng = self.split(self.fake_A_sng)
164 | self.rec_B_img_sng, self.rec_B_seg_sng = self.split(self.rec_B_sng)
165 | fake_A_seg_list = self.fake_A_seg_list + [self.fake_A_seg_sng] # not detach
166 | for i in range(self.ins_iter - idx - 1):
167 | fake_A_seg_list.append(empty)
168 |
169 | self.fake_A_seg_mul = torch.cat(fake_A_seg_list, dim=1)
170 | self.fake_A_mul = torch.cat([self.fake_A_img_sng, self.fake_A_seg_mul], dim=1)
171 |
172 | def test(self):
173 | self.real_A_img_sng = self.real_A_img
174 | self.real_B_img_sng = self.real_B_img
175 | self.fake_A_seg_list = list()
176 | self.fake_B_seg_list = list()
177 | self.rec_A_seg_list = list()
178 | self.rec_B_seg_list = list()
179 |
180 | # sequential mini-batch translation
181 | for i in range(self.ins_iter):
182 | # forward
183 | with torch.no_grad(): # no grad
184 | self.forward(i)
185 |
186 | # update setting for next iteration
187 | self.real_A_img_sng = self.fake_B_img_sng.detach()
188 | self.real_B_img_sng = self.fake_A_img_sng.detach()
189 | self.fake_A_seg_list.append(self.fake_A_seg_sng.detach())
190 | self.fake_B_seg_list.append(self.fake_B_seg_sng.detach())
191 | self.rec_A_seg_list.append(self.rec_A_seg_sng.detach())
192 | self.rec_B_seg_list.append(self.rec_B_seg_sng.detach())
193 |
194 | # save visuals
195 | if i == 0: # first
196 | self.rec_A_img = self.rec_A_img_sng
197 | self.rec_B_img = self.rec_B_img_sng
198 | if i == self.ins_iter - 1: # last
199 | self.fake_A_img = self.fake_A_img_sng
200 | self.fake_B_img = self.fake_B_img_sng
201 | self.fake_A_seg = self.merge_masks(self.fake_A_seg_mul)
202 | self.fake_B_seg = self.merge_masks(self.fake_B_seg_mul)
203 | self.rec_A_seg = self.merge_masks(torch.cat(self.rec_A_seg_list, dim=1))
204 | self.rec_B_seg = self.merge_masks(torch.cat(self.rec_B_seg_list, dim=1))
205 |
206 | def backward_G(self):
207 | lambda_A = self.opt.lambda_A
208 | lambda_B = self.opt.lambda_B
209 | lambda_idt = self.opt.lambda_idt
210 | lambda_ctx = self.opt.lambda_ctx
211 |
212 | # backward A
213 | if self.forward_A:
214 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B_mul), True)
215 | self.loss_cyc_A = self.criterionCyc(self.rec_A_sng, self.real_A_sng) * lambda_A
216 | self.loss_idt_B = self.criterionIdt(self.netG_B(self.real_A_sng), self.real_A_sng.detach()) * lambda_A * lambda_idt
217 | weight_A = self.get_weight_for_ctx(self.real_A_seg_sng, self.fake_B_seg_sng)
218 | self.loss_ctx_A = self.weighted_L1_loss(self.real_A_img_sng, self.fake_B_img_sng, weight=weight_A) * lambda_A * lambda_ctx
219 | else:
220 | self.loss_G_A = 0
221 | self.loss_cyc_A = 0
222 | self.loss_idt_B = 0
223 | self.loss_ctx_A = 0
224 |
225 | # backward B
226 | if self.forward_B:
227 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A_mul), True)
228 | self.loss_cyc_B = self.criterionCyc(self.rec_B_sng, self.real_B_sng) * lambda_B
229 | self.loss_idt_A = self.criterionIdt(self.netG_A(self.real_B_sng), self.real_B_sng.detach()) * lambda_B * lambda_idt
230 | weight_B = self.get_weight_for_ctx(self.real_B_seg_sng, self.fake_A_seg_sng)
231 | self.loss_ctx_B = self.weighted_L1_loss(self.real_B_img_sng, self.fake_A_img_sng, weight=weight_B) * lambda_B * lambda_ctx
232 | else:
233 | self.loss_G_B = 0
234 | self.loss_cyc_B = 0
235 | self.loss_idt_A = 0
236 | self.loss_ctx_B = 0
237 |
238 | # combined loss
239 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cyc_A + self.loss_cyc_B + self.loss_idt_A + self.loss_idt_B + self.loss_ctx_A + self.loss_ctx_B
240 | self.loss_G.backward()
241 |
242 | def backward_D_basic(self, netD, real, fake):
243 | # Real
244 | pred_real = netD(real)
245 | loss_D_real = self.criterionGAN(pred_real, True)
246 | # Fake
247 | pred_fake = netD(fake.detach())
248 | loss_D_fake = self.criterionGAN(pred_fake, False)
249 | # Combined loss
250 | loss_D = (loss_D_real + loss_D_fake) * 0.5
251 | # backward
252 | loss_D.backward()
253 | return loss_D
254 |
255 | def backward_D_A(self):
256 | fake_B = self.fake_B_pool.query(self.fake_B_mul)
257 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
258 |
259 | def backward_D_B(self):
260 | fake_A = self.fake_A_pool.query(self.fake_A_mul)
261 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
262 |
263 | def optimize_parameters(self):
264 | # init setting
265 | self.real_A_img_sng = self.real_A_img
266 | self.real_B_img_sng = self.real_B_img
267 | self.fake_A_seg_list = list()
268 | self.fake_B_seg_list = list()
269 | self.rec_A_seg_list = list()
270 | self.rec_B_seg_list = list()
271 |
272 | # sequential mini-batch translation
273 | for i in range(self.ins_iter):
274 | # forward
275 | self.forward(i)
276 |
277 | # G_A and G_B
278 | if self.forward_A or self.forward_B:
279 | self.set_requires_grad([self.netD_A, self.netD_B], False)
280 | self.optimizer_G.zero_grad()
281 | self.backward_G()
282 | self.optimizer_G.step()
283 |
284 | # D_A and D_B
285 | if self.forward_A or self.forward_B:
286 | self.set_requires_grad([self.netD_A, self.netD_B], True)
287 | self.optimizer_D.zero_grad()
288 | if self.forward_A:
289 | self.backward_D_A()
290 | if self.forward_B:
291 | self.backward_D_B()
292 | self.optimizer_D.step()
293 |
294 | # update setting for next iteration
295 | self.real_A_img_sng = self.fake_B_img_sng.detach()
296 | self.real_B_img_sng = self.fake_A_img_sng.detach()
297 | self.fake_A_seg_list.append(self.fake_A_seg_sng.detach())
298 | self.fake_B_seg_list.append(self.fake_B_seg_sng.detach())
299 | self.rec_A_seg_list.append(self.rec_A_seg_sng.detach())
300 | self.rec_B_seg_list.append(self.rec_B_seg_sng.detach())
301 |
302 | # save visuals
303 | if i == 0: # first
304 | self.rec_A_img = self.rec_A_img_sng
305 | self.rec_B_img = self.rec_B_img_sng
306 | if i == self.ins_iter - 1: # last
307 | self.fake_A_img = self.fake_A_img_sng
308 | self.fake_B_img = self.fake_B_img_sng
309 | self.fake_A_seg = self.merge_masks(self.fake_A_seg_mul)
310 | self.fake_B_seg = self.merge_masks(self.fake_B_seg_mul)
311 | self.rec_A_seg = self.merge_masks(torch.cat(self.rec_A_seg_list, dim=1))
312 | self.rec_B_seg = self.merge_masks(torch.cat(self.rec_B_seg_list, dim=1))
313 |
--------------------------------------------------------------------------------
/models/pix2pix_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from util.image_pool import ImagePool
3 | from .base_model import BaseModel
4 | from . import networks
5 |
6 |
7 | class Pix2PixModel(BaseModel):
8 | def name(self):
9 | return 'Pix2PixModel'
10 |
11 | @staticmethod
12 | def modify_commandline_options(parser, is_train=True):
13 |
14 | # changing the default values to match the pix2pix paper
15 | # (https://phillipi.github.io/pix2pix/)
16 | parser.set_defaults(norm='batch', netG='unet_256')
17 | parser.set_defaults(dataset_mode='aligned')
18 | if is_train:
19 | parser.set_defaults(pool_size=0, no_lsgan=True)
20 | parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
21 |
22 | return parser
23 |
24 | def initialize(self, opt):
25 | BaseModel.initialize(self, opt)
26 | self.isTrain = opt.isTrain
27 | # specify the training losses you want to print out. The program will call base_model.get_current_losses
28 | self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
29 | # specify the images you want to save/display. The program will call base_model.get_current_visuals
30 | self.visual_names = ['real_A', 'fake_B', 'real_B']
31 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
32 | if self.isTrain:
33 | self.model_names = ['G', 'D']
34 | else: # during test time, only load Gs
35 | self.model_names = ['G']
36 | # load/define networks
37 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
38 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
39 |
40 | if self.isTrain:
41 | use_sigmoid = opt.no_lsgan
42 | self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
43 | opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
44 |
45 | if self.isTrain:
46 | self.fake_AB_pool = ImagePool(opt.pool_size)
47 | # define loss functions
48 | self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
49 | self.criterionL1 = torch.nn.L1Loss()
50 |
51 | # initialize optimizers
52 | self.optimizers = []
53 | self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
54 | lr=opt.lr, betas=(opt.beta1, 0.999))
55 | self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
56 | lr=opt.lr, betas=(opt.beta1, 0.999))
57 | self.optimizers.append(self.optimizer_G)
58 | self.optimizers.append(self.optimizer_D)
59 |
60 | def set_input(self, input):
61 | AtoB = self.opt.direction == 'AtoB'
62 | self.real_A = input['A' if AtoB else 'B'].to(self.device)
63 | self.real_B = input['B' if AtoB else 'A'].to(self.device)
64 | self.image_paths = input['A_paths' if AtoB else 'B_paths']
65 |
66 | def forward(self):
67 | self.fake_B = self.netG(self.real_A)
68 |
69 | def backward_D(self):
70 | # Fake
71 | # stop backprop to the generator by detaching fake_B
72 | fake_AB = self.fake_AB_pool.query(torch.cat((self.real_A, self.fake_B), 1))
73 | pred_fake = self.netD(fake_AB.detach())
74 | self.loss_D_fake = self.criterionGAN(pred_fake, False)
75 |
76 | # Real
77 | real_AB = torch.cat((self.real_A, self.real_B), 1)
78 | pred_real = self.netD(real_AB)
79 | self.loss_D_real = self.criterionGAN(pred_real, True)
80 |
81 | # Combined loss
82 | self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
83 |
84 | self.loss_D.backward()
85 |
86 | def backward_G(self):
87 | # First, G(A) should fake the discriminator
88 | fake_AB = torch.cat((self.real_A, self.fake_B), 1)
89 | pred_fake = self.netD(fake_AB)
90 | self.loss_G_GAN = self.criterionGAN(pred_fake, True)
91 |
92 | # Second, G(A) = B
93 | self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
94 |
95 | self.loss_G = self.loss_G_GAN + self.loss_G_L1
96 |
97 | self.loss_G.backward()
98 |
99 | def optimize_parameters(self):
100 | self.forward()
101 | # update D
102 | self.set_requires_grad(self.netD, True)
103 | self.optimizer_D.zero_grad()
104 | self.backward_D()
105 | self.optimizer_D.step()
106 |
107 | # update G
108 | self.set_requires_grad(self.netD, False)
109 | self.optimizer_G.zero_grad()
110 | self.backward_G()
111 | self.optimizer_G.step()
112 |
--------------------------------------------------------------------------------
/models/test_model.py:
--------------------------------------------------------------------------------
1 | from .base_model import BaseModel
2 | from . import networks
3 |
4 |
5 | class TestModel(BaseModel):
6 | def name(self):
7 | return 'TestModel'
8 |
9 | @staticmethod
10 | def modify_commandline_options(parser, is_train=True):
11 | assert not is_train, 'TestModel cannot be used in train mode'
12 | parser.set_defaults(dataset_mode='single')
13 | parser.add_argument('--model_suffix', type=str, default='',
14 | help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will'
15 | ' be loaded as the generator of TestModel')
16 |
17 | return parser
18 |
19 | def initialize(self, opt):
20 | assert(not opt.isTrain)
21 | BaseModel.initialize(self, opt)
22 |
23 | # specify the training losses you want to print out. The program will call base_model.get_current_losses
24 | self.loss_names = []
25 | # specify the images you want to save/display. The program will call base_model.get_current_visuals
26 | self.visual_names = ['real_A', 'fake_B']
27 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
28 | self.model_names = ['G' + opt.model_suffix]
29 |
30 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
31 | opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
32 |
33 | # assigns the model to self.netG_[suffix] so that it can be loaded
34 | # please see BaseModel.load_networks
35 | setattr(self, 'netG' + opt.model_suffix, self.netG)
36 |
37 | def set_input(self, input):
38 | # we need to use single_dataset mode
39 | self.real_A = input['A'].to(self.device)
40 | self.image_paths = input['A_paths']
41 |
42 | def forward(self):
43 | self.fake_B = self.netG(self.real_A)
44 |
--------------------------------------------------------------------------------
/options/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/options/__init__.py
--------------------------------------------------------------------------------
/options/base_options.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | from util import util
4 | import torch
5 | import models
6 | import data
7 |
8 |
9 | class BaseOptions():
10 | def __init__(self):
11 | self.initialized = False
12 |
13 | def initialize(self, parser):
14 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
15 | parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
16 | # Specify width and height for load and fine sizes
17 | parser.add_argument('--loadSizeW', type=int, default=220, help='scale images to this size (width)')
18 | parser.add_argument('--loadSizeH', type=int, default=220, help='scale images to this size (height)')
19 | parser.add_argument('--fineSizeW', type=int, default=200, help='then crop to this size (width)')
20 | parser.add_argument('--fineSizeH', type=int, default=200, help='then crop to this size (height)')
21 | # parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size')
22 | # parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
23 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
24 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
25 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
26 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
27 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
28 | # Use set generator and set discriminator as default architectures
29 | parser.add_argument('--netD', type=str, default='set', help='selects model to use for netD')
30 | parser.add_argument('--netG', type=str, default='set', help='selects model to use for netG')
31 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
32 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
33 | parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
34 | # Use unaligned_seg as a default dataset_mode
35 | parser.add_argument('--dataset_mode', type=str, default='unaligned_seg', help='chooses how datasets are loaded. [unaligned | aligned | single]')
36 | # Use insta_gan as a default model
37 | parser.add_argument('--model', type=str, default='insta_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
38 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
39 | parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
40 | parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
41 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
42 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
43 | parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
44 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
45 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
46 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
47 | parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
48 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
49 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
50 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
51 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
52 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
53 | self.initialized = True
54 | return parser
55 |
56 | def gather_options(self):
57 | # initialize parser with basic options
58 | if not self.initialized:
59 | parser = argparse.ArgumentParser(
60 | formatter_class=argparse.ArgumentDefaultsHelpFormatter)
61 | parser = self.initialize(parser)
62 |
63 | # get the basic options
64 | opt, _ = parser.parse_known_args()
65 |
66 | # modify model-related parser options
67 | model_name = opt.model
68 | model_option_setter = models.get_option_setter(model_name)
69 | parser = model_option_setter(parser, self.isTrain)
70 | opt, _ = parser.parse_known_args() # parse again with the new defaults
71 |
72 | # modify dataset-related parser options
73 | dataset_name = opt.dataset_mode
74 | dataset_option_setter = data.get_option_setter(dataset_name)
75 | parser = dataset_option_setter(parser, self.isTrain)
76 |
77 | self.parser = parser
78 |
79 | return parser.parse_args()
80 |
81 | def print_options(self, opt):
82 | message = ''
83 | message += '----------------- Options ---------------\n'
84 | for k, v in sorted(vars(opt).items()):
85 | comment = ''
86 | default = self.parser.get_default(k)
87 | if v != default:
88 | comment = '\t[default: %s]' % str(default)
89 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
90 | message += '----------------- End -------------------'
91 | print(message)
92 |
93 | # save to the disk
94 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
95 | util.mkdirs(expr_dir)
96 | file_name = os.path.join(expr_dir, 'opt.txt')
97 | with open(file_name, 'wt') as opt_file:
98 | opt_file.write(message)
99 | opt_file.write('\n')
100 |
101 | def parse(self):
102 |
103 | opt = self.gather_options()
104 | opt.isTrain = self.isTrain # train or test
105 |
106 | # process opt.suffix
107 | if opt.suffix:
108 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
109 | opt.name = opt.name + suffix
110 |
111 | self.print_options(opt)
112 |
113 | # set gpu ids
114 | str_ids = opt.gpu_ids.split(',')
115 | opt.gpu_ids = []
116 | for str_id in str_ids:
117 | id = int(str_id)
118 | if id >= 0:
119 | opt.gpu_ids.append(id)
120 | if len(opt.gpu_ids) > 0:
121 | torch.cuda.set_device(opt.gpu_ids[0])
122 |
123 | self.opt = opt
124 | return self.opt
125 |
--------------------------------------------------------------------------------
/options/test_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TestOptions(BaseOptions):
5 | def initialize(self, parser):
6 | parser = BaseOptions.initialize(self, parser)
7 | parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
8 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
9 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
10 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
11 | # Dropout and Batchnorm has different behavioir during training and test.
12 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
13 | parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
14 |
15 | parser.set_defaults(model='test')
16 | # To avoid cropping, the loadSize should be the same as fineSize
17 | parser.set_defaults(loadSize=parser.get_default('fineSize'))
18 | self.isTrain = False
19 | return parser
20 |
--------------------------------------------------------------------------------
/options/train_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TrainOptions(BaseOptions):
5 | def initialize(self, parser):
6 | parser = BaseOptions.initialize(self, parser)
7 | parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
8 | # Use ncols = 6 for better visualization
9 | parser.add_argument('--display_ncols', type=int, default=6, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
10 | parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
11 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
12 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
13 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
14 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
15 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
16 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
17 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
18 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
19 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
20 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')
21 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
22 | parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
23 | parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
24 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
25 | parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
26 | parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
27 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
28 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
29 | parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')
30 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
31 |
32 | self.isTrain = True
33 | return parser
34 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch==0.4.0
2 | torchvision==0.2.1
3 | dominate>=2.3.1
4 | visdom>=0.1.8.3
5 |
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_fake_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_real_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/1602_rec_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_fake_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_real_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2207_rec_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_fake_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_real_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/2781_rec_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_fake_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_real_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3078_rec_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_fake_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_real_B_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_A_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_A_seg.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_B_img.png
--------------------------------------------------------------------------------
/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/pants2skirt_mhp_instagan/sample_200/images/3194_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1106_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1106_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1134_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1134_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/1271_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/1271_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/139_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/139_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/602_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/602_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/732_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/732_rec_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_fake_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_fake_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_fake_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_fake_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_fake_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_fake_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_fake_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_fake_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_real_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_real_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_real_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_real_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_real_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_real_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_real_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_real_B_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_rec_A_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_rec_A_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_rec_A_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_rec_A_seg.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_rec_B_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_rec_B_img.png
--------------------------------------------------------------------------------
/results/shp2gir_coco_instagan/sample_200/images/866_rec_B_seg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/results/shp2gir_coco_instagan/sample_200/images/866_rec_B_seg.png
--------------------------------------------------------------------------------
/scripts/conda_deps.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing
3 | conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9
4 | conda install visdom dominate -c conda-forge # install visdom and dominate
5 |
--------------------------------------------------------------------------------
/scripts/download_cyclegan_model.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | echo "Note: available models are apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower"
4 |
5 | echo "Specified [$FILE]"
6 |
7 | mkdir -p ./checkpoints/${FILE}_pretrained
8 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
9 | URL=http://efrosgans.eecs.berkeley.edu/cyclegan/pretrained_models/$FILE.pth
10 |
11 | wget -N $URL -O $MODEL_FILE
12 |
--------------------------------------------------------------------------------
/scripts/download_pix2pix_model.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | echo "Note: available models are edges2shoes, sat2map, facades_label2photo, and day2night"
4 |
5 | echo "Specified [$FILE]"
6 |
7 | mkdir -p ./checkpoints/${FILE}_pretrained
8 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
9 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix/models-pytorch/$FILE.pth
10 |
11 | wget -N $URL -O $MODEL_FILE
12 |
--------------------------------------------------------------------------------
/scripts/install_deps.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | pip install visdom
3 | pip install dominate
4 |
--------------------------------------------------------------------------------
/scripts/test_before_push.py:
--------------------------------------------------------------------------------
1 | # Simple script to make sure basic usage
2 | # such as training, testing, saving and loading
3 | # runs without errors.
4 | import os
5 |
6 |
7 | def run(command):
8 | print(command)
9 | exit_status = os.system(command)
10 | if exit_status > 0:
11 | exit(1)
12 |
13 |
14 | if __name__ == '__main__':
15 | run('flake8 --ignore E501 .')
16 | if not os.path.exists('./datasets/mini'):
17 | run('bash ./datasets/download_cyclegan_dataset.sh mini')
18 |
19 | if not os.path.exists('./datasets/mini_pix2pix'):
20 | run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
21 |
22 | # pretrained cyclegan model
23 | if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
24 | run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
25 | run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
26 |
27 | # pretrained pix2pix model
28 | if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
29 | run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
30 | if not os.path.exists('./datasets/facades'):
31 | run('bash ./datasets/download_pix2pix_dataset.sh facades')
32 | run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
33 |
34 | # cyclegan train/test
35 | run('python train.py --model cycle_gan --name temp --dataroot ./datasets/mini --niter 1 --niter_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
36 | run('python test.py --model test --name temp --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
37 |
38 | # pix2pix train/test
39 | run('python train.py --model pix2pix --name temp --dataroot ./datasets/mini_pix2pix --niter 1 --niter_decay 0 --save_latest_freq 10 --display_id -1')
40 | run('python test.py --model pix2pix --name temp --dataroot ./datasets/mini_pix2pix --num_test 1 --direction BtoA')
41 |
--------------------------------------------------------------------------------
/scripts/test_cyclegan.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --phase test --no_dropout
3 |
--------------------------------------------------------------------------------
/scripts/test_pix2pix.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --dataset_mode aligned --norm batch
3 |
--------------------------------------------------------------------------------
/scripts/test_single.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --netG unet_256 --direction BtoA --dataset_mode single --norm batch
3 |
--------------------------------------------------------------------------------
/scripts/train_cyclegan.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --pool_size 50 --no_dropout
3 |
--------------------------------------------------------------------------------
/scripts/train_pix2pix.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --lambda_L1 100 --dataset_mode aligned --no_lsgan --norm batch --pool_size 0
3 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import os
2 | from options.test_options import TestOptions
3 | from data import CreateDataLoader
4 | from models import create_model
5 | from util.visualizer import save_images
6 | from util import html
7 |
8 |
9 | if __name__ == '__main__':
10 | opt = TestOptions().parse()
11 | # hard-code some parameters for test
12 | opt.num_threads = 1 # test code only supports num_threads = 1
13 | opt.batch_size = 1 # test code only supports batch_size = 1
14 | opt.serial_batches = True # no shuffle
15 | opt.no_flip = True # no flip
16 | opt.display_id = -1 # no visdom display
17 | data_loader = CreateDataLoader(opt)
18 | dataset = data_loader.load_data()
19 | model = create_model(opt)
20 | model.setup(opt)
21 | # create a website
22 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch))
23 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
24 | # test with eval mode. This only affects layers like batchnorm and dropout.
25 | # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
26 | # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
27 | if opt.eval:
28 | model.eval()
29 | for i, data in enumerate(dataset):
30 | if i >= opt.num_test:
31 | break
32 | model.set_input(data)
33 | model.test()
34 | visuals = model.get_current_visuals()
35 | img_path = model.get_image_paths()
36 | if i % 5 == 0:
37 | print('processing (%04d)-th image... %s' % (i, img_path))
38 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
39 | # save the website
40 | webpage.save()
41 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import time
2 | from options.train_options import TrainOptions
3 | from data import CreateDataLoader
4 | from models import create_model
5 | from util.visualizer import Visualizer
6 |
7 | if __name__ == '__main__':
8 | opt = TrainOptions().parse()
9 | data_loader = CreateDataLoader(opt)
10 | dataset = data_loader.load_data()
11 | dataset_size = len(data_loader)
12 | print('#training images = %d' % dataset_size)
13 |
14 | model = create_model(opt)
15 | model.setup(opt)
16 | visualizer = Visualizer(opt)
17 | total_steps = 0
18 |
19 | for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
20 | epoch_start_time = time.time()
21 | iter_data_time = time.time()
22 | epoch_iter = 0
23 |
24 | for i, data in enumerate(dataset):
25 | iter_start_time = time.time()
26 | if total_steps % opt.print_freq == 0:
27 | t_data = iter_start_time - iter_data_time
28 | visualizer.reset()
29 | total_steps += opt.batch_size
30 | epoch_iter += opt.batch_size
31 | model.set_input(data)
32 | model.optimize_parameters()
33 |
34 | if total_steps % opt.display_freq == 0:
35 | save_result = total_steps % opt.update_html_freq == 0
36 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
37 |
38 | if total_steps % opt.print_freq == 0:
39 | losses = model.get_current_losses()
40 | t = (time.time() - iter_start_time) / opt.batch_size
41 | visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
42 | if opt.display_id > 0:
43 | visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)
44 |
45 | if total_steps % opt.save_latest_freq == 0:
46 | print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
47 | save_suffix = 'iter_%d' % total_steps if opt.save_by_iter else 'latest'
48 | model.save_networks(save_suffix)
49 |
50 | iter_data_time = time.time()
51 | if epoch % opt.save_epoch_freq == 0:
52 | print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
53 | model.save_networks('latest')
54 | model.save_networks(epoch)
55 |
56 | print('End of epoch %d / %d \t Time Taken: %d sec' %
57 | (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
58 | model.update_learning_rate()
59 |
--------------------------------------------------------------------------------
/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sangwoomo/instagan/f9c1d9c9b7d2c21491317921f24a5200a02a823d/util/__init__.py
--------------------------------------------------------------------------------
/util/get_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import os
3 | import tarfile
4 | import requests
5 | from warnings import warn
6 | from zipfile import ZipFile
7 | from bs4 import BeautifulSoup
8 | from os.path import abspath, isdir, join, basename
9 |
10 |
11 | class GetData(object):
12 | """
13 |
14 | Download CycleGAN or Pix2Pix Data.
15 |
16 | Args:
17 | technique : str
18 | One of: 'cyclegan' or 'pix2pix'.
19 | verbose : bool
20 | If True, print additional information.
21 |
22 | Examples:
23 | >>> from util.get_data import GetData
24 | >>> gd = GetData(technique='cyclegan')
25 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
26 |
27 | """
28 |
29 | def __init__(self, technique='cyclegan', verbose=True):
30 | url_dict = {
31 | 'pix2pix': 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets',
32 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
33 | }
34 | self.url = url_dict.get(technique.lower())
35 | self._verbose = verbose
36 |
37 | def _print(self, text):
38 | if self._verbose:
39 | print(text)
40 |
41 | @staticmethod
42 | def _get_options(r):
43 | soup = BeautifulSoup(r.text, 'lxml')
44 | options = [h.text for h in soup.find_all('a', href=True)
45 | if h.text.endswith(('.zip', 'tar.gz'))]
46 | return options
47 |
48 | def _present_options(self):
49 | r = requests.get(self.url)
50 | options = self._get_options(r)
51 | print('Options:\n')
52 | for i, o in enumerate(options):
53 | print("{0}: {1}".format(i, o))
54 | choice = input("\nPlease enter the number of the "
55 | "dataset above you wish to download:")
56 | return options[int(choice)]
57 |
58 | def _download_data(self, dataset_url, save_path):
59 | if not isdir(save_path):
60 | os.makedirs(save_path)
61 |
62 | base = basename(dataset_url)
63 | temp_save_path = join(save_path, base)
64 |
65 | with open(temp_save_path, "wb") as f:
66 | r = requests.get(dataset_url)
67 | f.write(r.content)
68 |
69 | if base.endswith('.tar.gz'):
70 | obj = tarfile.open(temp_save_path)
71 | elif base.endswith('.zip'):
72 | obj = ZipFile(temp_save_path, 'r')
73 | else:
74 | raise ValueError("Unknown File Type: {0}.".format(base))
75 |
76 | self._print("Unpacking Data...")
77 | obj.extractall(save_path)
78 | obj.close()
79 | os.remove(temp_save_path)
80 |
81 | def get(self, save_path, dataset=None):
82 | """
83 |
84 | Download a dataset.
85 |
86 | Args:
87 | save_path : str
88 | A directory to save the data to.
89 | dataset : str, optional
90 | A specific dataset to download.
91 | Note: this must include the file extension.
92 | If None, options will be presented for you
93 | to choose from.
94 |
95 | Returns:
96 | save_path_full : str
97 | The absolute path to the downloaded data.
98 |
99 | """
100 | if dataset is None:
101 | selected_dataset = self._present_options()
102 | else:
103 | selected_dataset = dataset
104 |
105 | save_path_full = join(save_path, selected_dataset.split('.')[0])
106 |
107 | if isdir(save_path_full):
108 | warn("\n'{0}' already exists. Voiding Download.".format(
109 | save_path_full))
110 | else:
111 | self._print('Downloading Data...')
112 | url = "{0}/{1}".format(self.url, selected_dataset)
113 | self._download_data(url, save_path=save_path)
114 |
115 | return abspath(save_path_full)
116 |
--------------------------------------------------------------------------------
/util/html.py:
--------------------------------------------------------------------------------
1 | import dominate
2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br
3 | import os
4 |
5 |
6 | class HTML:
7 | def __init__(self, web_dir, title, reflesh=0):
8 | self.title = title
9 | self.web_dir = web_dir
10 | self.img_dir = os.path.join(self.web_dir, 'images')
11 | if not os.path.exists(self.web_dir):
12 | os.makedirs(self.web_dir)
13 | if not os.path.exists(self.img_dir):
14 | os.makedirs(self.img_dir)
15 | # print(self.img_dir)
16 |
17 | self.doc = dominate.document(title=title)
18 | if reflesh > 0:
19 | with self.doc.head:
20 | meta(http_equiv="reflesh", content=str(reflesh))
21 |
22 | def get_image_dir(self):
23 | return self.img_dir
24 |
25 | def add_header(self, str):
26 | with self.doc:
27 | h3(str)
28 |
29 | def add_table(self, border=1):
30 | self.t = table(border=border, style="table-layout: fixed;")
31 | self.doc.add(self.t)
32 |
33 | def add_images(self, ims, txts, links, width=400):
34 | self.add_table()
35 | with self.t:
36 | with tr():
37 | for im, txt, link in zip(ims, txts, links):
38 | with td(style="word-wrap: break-word;", halign="center", valign="top"):
39 | with p():
40 | with a(href=os.path.join('images', link)):
41 | img(style="width:%dpx" % width, src=os.path.join('images', im))
42 | br()
43 | p(txt)
44 |
45 | def save(self):
46 | html_file = '%s/index.html' % self.web_dir
47 | f = open(html_file, 'wt')
48 | f.write(self.doc.render())
49 | f.close()
50 |
51 |
52 | if __name__ == '__main__':
53 | html = HTML('web/', 'test_html')
54 | html.add_header('hello world')
55 |
56 | ims = []
57 | txts = []
58 | links = []
59 | for n in range(4):
60 | ims.append('image_%d.png' % n)
61 | txts.append('text_%d' % n)
62 | links.append('image_%d.png' % n)
63 | html.add_images(ims, txts, links)
64 | html.save()
65 |
--------------------------------------------------------------------------------
/util/image_pool.py:
--------------------------------------------------------------------------------
1 | import random
2 | import torch
3 |
4 |
5 | class ImagePool():
6 | def __init__(self, pool_size):
7 | self.pool_size = pool_size
8 | if self.pool_size > 0:
9 | self.num_imgs = 0
10 | self.images = []
11 |
12 | def query(self, images):
13 | if self.pool_size == 0:
14 | return images
15 | return_images = []
16 | for image in images:
17 | image = torch.unsqueeze(image.data, 0)
18 | if self.num_imgs < self.pool_size:
19 | self.num_imgs = self.num_imgs + 1
20 | self.images.append(image)
21 | return_images.append(image)
22 | else:
23 | p = random.uniform(0, 1)
24 | if p > 0.5:
25 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
26 | tmp = self.images[random_id].clone()
27 | self.images[random_id] = image
28 | return_images.append(tmp)
29 | else:
30 | return_images.append(image)
31 | return_images = torch.cat(return_images, 0)
32 | return return_images
33 |
--------------------------------------------------------------------------------
/util/util.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import torch
3 | import numpy as np
4 | from PIL import Image
5 | import os
6 |
7 |
8 | # Converts a Tensor into an image array (numpy)
9 | # |imtype|: the desired type of the converted numpy array
10 | def tensor2im(input_image, imtype=np.uint8):
11 | if isinstance(input_image, torch.Tensor):
12 | image_tensor = input_image.data
13 | else:
14 | return input_image
15 | image_numpy = image_tensor[0].cpu().float().numpy()
16 | if image_numpy.shape[0] == 1:
17 | image_numpy = np.tile(image_numpy, (3, 1, 1))
18 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
19 | return image_numpy.astype(imtype)
20 |
21 |
22 | def diagnose_network(net, name='network'):
23 | mean = 0.0
24 | count = 0
25 | for param in net.parameters():
26 | if param.grad is not None:
27 | mean += torch.mean(torch.abs(param.grad.data))
28 | count += 1
29 | if count > 0:
30 | mean = mean / count
31 | print(name)
32 | print(mean)
33 |
34 |
35 | def save_image(image_numpy, image_path):
36 | image_pil = Image.fromarray(image_numpy)
37 | image_pil.save(image_path)
38 |
39 |
40 | def print_numpy(x, val=True, shp=False):
41 | x = x.astype(np.float64)
42 | if shp:
43 | print('shape,', x.shape)
44 | if val:
45 | x = x.flatten()
46 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
47 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
48 |
49 |
50 | def mkdirs(paths):
51 | if isinstance(paths, list) and not isinstance(paths, str):
52 | for path in paths:
53 | mkdir(path)
54 | else:
55 | mkdir(paths)
56 |
57 |
58 | def mkdir(path):
59 | if not os.path.exists(path):
60 | os.makedirs(path)
61 |
--------------------------------------------------------------------------------
/util/visualizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import sys
4 | import ntpath
5 | import time
6 | from . import util
7 | from . import html
8 | from scipy.misc import imresize
9 |
10 | if sys.version_info[0] == 2:
11 | VisdomExceptionBase = Exception
12 | else:
13 | VisdomExceptionBase = ConnectionError
14 |
15 |
16 | # save image to the disk
17 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
18 | image_dir = webpage.get_image_dir()
19 | short_path = ntpath.basename(image_path[0])
20 | name = os.path.splitext(short_path)[0]
21 |
22 | webpage.add_header(name)
23 | ims, txts, links = [], [], []
24 |
25 | for label, im_data in visuals.items():
26 | im = util.tensor2im(im_data)
27 | image_name = '%s_%s.png' % (name, label)
28 | save_path = os.path.join(image_dir, image_name)
29 | h, w, _ = im.shape
30 | if aspect_ratio > 1.0:
31 | im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
32 | if aspect_ratio < 1.0:
33 | im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
34 | util.save_image(im, save_path)
35 |
36 | ims.append(image_name)
37 | txts.append(label)
38 | links.append(image_name)
39 | webpage.add_images(ims, txts, links, width=width)
40 |
41 |
42 | class Visualizer():
43 | def __init__(self, opt):
44 | self.display_id = opt.display_id
45 | self.use_html = opt.isTrain and not opt.no_html
46 | self.win_size = opt.display_winsize
47 | self.name = opt.name
48 | self.opt = opt
49 | self.saved = False
50 | if self.display_id > 0:
51 | import visdom
52 | self.ncols = opt.display_ncols
53 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env, raise_exceptions=True)
54 |
55 | if self.use_html:
56 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
57 | self.img_dir = os.path.join(self.web_dir, 'images')
58 | print('create web directory %s...' % self.web_dir)
59 | util.mkdirs([self.web_dir, self.img_dir])
60 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
61 | with open(self.log_name, "a") as log_file:
62 | now = time.strftime("%c")
63 | log_file.write('================ Training Loss (%s) ================\n' % now)
64 |
65 | def reset(self):
66 | self.saved = False
67 |
68 | def throw_visdom_connection_error(self):
69 | print('\n\nCould not connect to Visdom server (https://github.com/facebookresearch/visdom) for displaying training progress.\nYou can suppress connection to Visdom using the option --display_id -1. To install visdom, run \n$ pip install visdom\n, and start the server by \n$ python -m visdom.server.\n\n')
70 | exit(1)
71 |
72 | # |visuals|: dictionary of images to display or save
73 | def display_current_results(self, visuals, epoch, save_result):
74 | if self.display_id > 0: # show images in the browser
75 | ncols = self.ncols
76 | if ncols > 0:
77 | ncols = min(ncols, len(visuals))
78 | h, w = next(iter(visuals.values())).shape[:2]
79 | table_css = """""" % (w, h)
83 | title = self.name
84 | label_html = ''
85 | label_html_row = ''
86 | images = []
87 | idx = 0
88 | for label, image in visuals.items():
89 | image_numpy = util.tensor2im(image)
90 | label_html_row += '%s | ' % label
91 | images.append(image_numpy.transpose([2, 0, 1]))
92 | idx += 1
93 | if idx % ncols == 0:
94 | label_html += '%s
' % label_html_row
95 | label_html_row = ''
96 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
97 | while idx % ncols != 0:
98 | images.append(white_image)
99 | label_html_row += ' | '
100 | idx += 1
101 | if label_html_row != '':
102 | label_html += '%s
' % label_html_row
103 | # pane col = image row
104 | try:
105 | self.vis.images(images, nrow=ncols, win=self.display_id + 1,
106 | padding=2, opts=dict(title=title + ' images'))
107 | label_html = '' % label_html
108 | self.vis.text(table_css + label_html, win=self.display_id + 2,
109 | opts=dict(title=title + ' labels'))
110 | except VisdomExceptionBase:
111 | self.throw_visdom_connection_error()
112 |
113 | else:
114 | idx = 1
115 | for label, image in visuals.items():
116 | image_numpy = util.tensor2im(image)
117 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
118 | win=self.display_id + idx)
119 | idx += 1
120 |
121 | if self.use_html and (save_result or not self.saved): # save images to a html file
122 | self.saved = True
123 | for label, image in visuals.items():
124 | image_numpy = util.tensor2im(image)
125 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
126 | util.save_image(image_numpy, img_path)
127 | # update website
128 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
129 | for n in range(epoch, 0, -1):
130 | webpage.add_header('epoch [%d]' % n)
131 | ims, txts, links = [], [], []
132 |
133 | for label, image_numpy in visuals.items():
134 | image_numpy = util.tensor2im(image)
135 | img_path = 'epoch%.3d_%s.png' % (n, label)
136 | ims.append(img_path)
137 | txts.append(label)
138 | links.append(img_path)
139 | webpage.add_images(ims, txts, links, width=self.win_size)
140 | webpage.save()
141 |
142 | # losses: dictionary of error labels and values
143 | def plot_current_losses(self, epoch, counter_ratio, opt, losses):
144 | if not hasattr(self, 'plot_data'):
145 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
146 | self.plot_data['X'].append(epoch + counter_ratio)
147 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
148 | try:
149 | self.vis.line(
150 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
151 | Y=np.array(self.plot_data['Y']),
152 | opts={
153 | 'title': self.name + ' loss over time',
154 | 'legend': self.plot_data['legend'],
155 | 'xlabel': 'epoch',
156 | 'ylabel': 'loss'},
157 | win=self.display_id)
158 | except VisdomExceptionBase:
159 | self.throw_visdom_connection_error()
160 |
161 | # losses: same format as |losses| of plot_current_losses
162 | def print_current_losses(self, epoch, i, losses, t, t_data):
163 | message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data)
164 | for k, v in losses.items():
165 | message += '%s: %.3f ' % (k, v)
166 |
167 | print(message)
168 | with open(self.log_name, "a") as log_file:
169 | log_file.write('%s\n' % message)
170 |
--------------------------------------------------------------------------------