├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── config ├── cifar_evaluation.yaml ├── cifar_step_1.yaml ├── cifar_step_2.yaml └── nuswide_step_1.yaml ├── data_list ├── cifar10 │ ├── database.txt │ ├── database_nolabel.txt │ ├── test.txt │ └── train.txt ├── coco │ ├── database.txt │ ├── database_nolabel.txt │ ├── test.txt │ └── train.txt └── nuswide_81 │ ├── database.txt │ ├── database_nolabel.txt │ ├── test.txt │ └── train.txt ├── environment.yml ├── lib ├── __init__.py ├── architecture.py ├── config.py ├── criterion.py ├── dataloader.py ├── metric.py ├── ops.py ├── params.py └── util.py └── main.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | db.sqlite3 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # Environments 84 | .env 85 | .venv 86 | env/ 87 | venv/ 88 | ENV/ 89 | env.bak/ 90 | venv.bak/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | 105 | .DS_Store 106 | images 107 | log.pkl 108 | .idea 109 | data 110 | output 111 | pretrained_models 112 | 113 | *.swp 114 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Ishaan Gulrajani 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | HashGAN: Deep Learning to Hash with Pair Conditional Wasserstein GAN 2 | ===================================== 3 | 4 | Code for CVPR 2018 Paper ["HashGAN: Deep Learning to Hash with Pair Conditional Wasserstein GAN"](http://openaccess.thecvf.com/content_cvpr_2018/papers/Cao_HashGAN_Deep_Learning_CVPR_2018_paper.pdf). 5 | 6 | 7 | ## Prerequisites 8 | 9 | - Python3, NumPy, TensorFlow-gpu, SciPy, Matplotlib, OpenCV, easydict, yacs, tqdm 10 | - A recent NVIDIA GPU 11 | 12 | We provide a `environment.yaml` for you and you can simplely use `conda env create -f environment.yml` to create the environment. 13 | 14 | Or you can create the environment from scratch: 15 | ```bash 16 | conda create --no-default-packages -n HashGAN python=3.6 && source activate HashGAN 17 | conda install -y numpy scipy matplotlib tensorflow-gpu opencv 18 | pip install easydict yacs tqdm pillow 19 | ``` 20 | 21 | ## Data Preparation 22 | In `data_list/` folder, we give three examples to show how to prepare image training data. If you want to add other datasets as the input, you need to prepare `train.txt`, `test.txt`, `database.txt` and `database_nolabel.txt` as CIFAR-10 dataset. 23 | 24 | You can download the whole cifar10 dataset including the images and data list from [here](https://github.com/thulab/DeepHash/releases/download/v0.1/cifar10.zip), and unzip it to `data/cifar10` folder. 25 | 26 | If you need run on NUSWIDE_81 and COCO, we recommend you to follow [here](https://github.com/thuml/HashNet/tree/master/pytorch#datasets) to prepare NUSWIDE_81 and COCO images. 27 | 28 | ## Pretrained Models 29 | 30 | The imagenet pretrained Alexnet model can be downloaded [here](https://github.com/thulab/DeepHash/releases/download/v0.1/reference_pretrain.npy.zip). 31 | You can download the pretrained Generator models in the [release page](https://github.com/thuml/HashGAN/releases) and modify config file to use the pretrained models. 32 | 33 | ## Training 34 | 35 | The training process can be divided into two step: 36 | 1. Training a image generator. 37 | 2. Fintune Alexnet using original labeled images and generated images. 38 | 39 | In `config` folder, we provide some examples to prepare yaml configuration. 40 | 41 | ``` 42 | config 43 | ├── cifar_evaluation.yaml 44 | ├── cifar_step_1.yaml 45 | ├── cifar_step_2.yaml 46 | └── nuswide_step_1.yaml 47 | ``` 48 | 49 | You can run the model using command like the following: 50 | 51 | - `python main.py --cfg config/cifar_step_1.yaml --gpus 0` 52 | - `python main.py --cfg config/cifar_step_2.yaml --gpus 0` 53 | 54 | You can use tensorboard to monitor the training process such as losses and Mean Average Precision. 55 | 56 | ## Citation 57 | If you use this code for your research, please consider citing: 58 | ``` 59 | @inproceedings{cao2018hashgan, 60 | title={HashGAN: Deep Learning to Hash with Pair Conditional Wasserstein GAN}, 61 | author={Cao, Yue and Liu, Bin and Long, Mingsheng and Wang, Jianmin and KLiss, MOE}, 62 | booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, 63 | pages={1287--1296}, 64 | year={2018} 65 | } 66 | ``` 67 | 68 | ## Contact 69 | If you have any problem about our code, feel free to contact 70 | - liubinthss@gmail.com 71 | - caoyue10@gmail.com 72 | 73 | or describe your problem in Issues. 74 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuml/HashGAN/2d114009ce68357bda9109e498b534d7f3019c0a/__init__.py -------------------------------------------------------------------------------- /config/cifar_evaluation.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | G_ARCHITECTURE: "NORM" 3 | D_ARCHITECTURE: "ALEXNET" 4 | G_PRETRAINED_MODEL_PATH: "./output/cifar10_step_1_ACGAN_SCALE_G_1.0/models/G_21999.ckpt" 5 | D_PRETRAINED_MODEL_PATH: "./output/cifar10_finetune_acgan_scale_fake_0/models/D_9999.ckpt" 6 | DATA: 7 | USE_DATASET: "cifar10" # "cifar10", "nuswide81", "coco" 8 | LABEL_DIM: 10 9 | DB_SIZE: 54000 10 | TEST_SIZE: 1000 11 | WIDTH_HEIGHT: 32 12 | MAP_R: 54000 13 | LIST_ROOT: "./data_list/cifar10" 14 | DATA_ROOT: "./data/cifar10" 15 | OUTPUT_DIR: "./output/cifar10_evaluation" 16 | 17 | TRAIN: 18 | EVALUATE_MODE: True 19 | BATCH_SIZE: 128 20 | ITERS: 10000 21 | CROSS_ENTROPY_ALPHA: 10 22 | LR: 1e-4 # Initial learning rate 23 | G_LR: 0.0 # 1e-4 24 | DECAY: True # Whether to decay LR over learning 25 | N_CRITIC: 1 # Critic steps per generator steps 26 | EVAL_FREQUENCY: 1 27 | SAMPLE_FREQUENCY: 1 28 | ACGAN_SCALE: 1.0 29 | ACGAN_SCALE_FAKE: 0.0 # 1.0 30 | WGAN_SCALE: 0.0 31 | WGAN_SCALE_GP: 10.0 32 | ACGAN_SCALE_G: 0.1 33 | WGAN_SCALE_G: 1.0 34 | -------------------------------------------------------------------------------- /config/cifar_step_1.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | G_ARCHITECTURE: "NORM" 3 | D_ARCHITECTURE: "NORM" 4 | G_PRETRAINED_MODEL_PATH: '' 5 | D_PRETRAINED_MODEL_PATH: '' 6 | 7 | DATA: 8 | USE_DATASET: "cifar10" # "cifar10", "nuswide81", "coco" 9 | LABEL_DIM: 10 10 | DB_SIZE: 54000 11 | TEST_SIZE: 1000 12 | WIDTH_HEIGHT: 32 13 | MAP_R: 54000 14 | LIST_ROOT: "./data_list/cifar10" 15 | DATA_ROOT: "./data/cifar10" 16 | OUTPUT_DIR: "./output/cifar10_step_1_ACGAN_SCALE_G_10.0" 17 | 18 | TRAIN: 19 | BATCH_SIZE: 64 20 | ITERS: 100000 21 | CROSS_ENTROPY_ALPHA: 10 22 | LR: 1e-4 # Initial learning rate 23 | G_LR: 1e-4 # 1e-4 24 | DECAY: True # Whether to decay LR over learning 25 | N_CRITIC: 5 # Critic steps per generator steps 26 | EVAL_FREQUENCY: 10000 # How frequently to evaluate and save model 27 | CHECKPOINT_FREQUENCY: 1000 28 | ACGAN_SCALE: 1.0 29 | ACGAN_SCALE_G: 10.0 30 | WGAN_SCALE: 1.0 31 | WGAN_SCALE_G: 1.0 32 | -------------------------------------------------------------------------------- /config/cifar_step_2.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | G_ARCHITECTURE: "NORM" 3 | D_ARCHITECTURE: "ALEXNET" 4 | G_PRETRAINED_MODEL_PATH: "./pretrained_models/cifar10/G_99999.ckpt" 5 | D_PRETRAINED_MODEL_PATH: "" 6 | 7 | DATA: 8 | USE_DATASET: "cifar10" # "cifar10", "nuswide81", "coco" 9 | LABEL_DIM: 10 10 | DB_SIZE: 54000 11 | TEST_SIZE: 1000 12 | WIDTH_HEIGHT: 32 13 | MAP_R: 54000 14 | LIST_ROOT: "./data_list/cifar10" 15 | DATA_ROOT: "./data/cifar10" 16 | OUTPUT_DIR: "./output/cifar10_finetune" 17 | 18 | TRAIN: 19 | BATCH_SIZE: 128 20 | ITERS: 10000 21 | CROSS_ENTROPY_ALPHA: 10 22 | LR: 1e-4 # Initial learning rate 23 | G_LR: 0.0 # 1e-4 24 | DECAY: True # Whether to decay LR over learning 25 | N_CRITIC: 1 # Critic steps per generator steps 26 | EVAL_FREQUENCY: 2000 27 | SAMPLE_FREQUENCY: 2000 28 | ACGAN_SCALE: 1.0 29 | ACGAN_SCALE_FAKE: 0.1 30 | WGAN_SCALE: 0.0 31 | WGAN_SCALE_GP: 10.0 32 | ACGAN_SCALE_G: 0.1 33 | WGAN_SCALE_G: 1.0 34 | -------------------------------------------------------------------------------- /config/nuswide_step_1.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | G_ARCHITECTURE: "GOOD" 3 | D_ARCHITECTURE: "GOOD" 4 | G_PRETRAINED_MODEL_PATH: '' 5 | D_PRETRAINED_MODEL_PATH: '' 6 | 7 | DATA: 8 | USE_DATASET: "nuswide81" # "cifar10", "nuswide81", "coco" 9 | LABEL_DIM: 81 10 | DB_SIZE: 168692 11 | TEST_SIZE: 5000 12 | WIDTH_HEIGHT: 64 13 | MAP_R: 5000 14 | LIST_ROOT: "./data_list/nuswide_81" 15 | DATA_ROOT: "/home/liubin/data/nuswide_81" 16 | OUTPUT_DIR: "./output/nuswide81_step_1" 17 | 18 | TRAIN: 19 | BATCH_SIZE: 64 20 | ITERS: 100000 21 | CROSS_ENTROPY_ALPHA: 5 22 | LR: 1e-4 # Initial learning rate 23 | G_LR: 1e-4 # 1e-4 24 | DECAY: True # Whether to decay LR over learning 25 | N_CRITIC: 5 # Critic steps per generator steps 26 | EVAL_FREQUENCY: 10000 # How frequently to evaluate and save model 27 | CHECKPOINT_FREQUENCY: 1000 28 | ACGAN_SCALE: 1.0 29 | ACGAN_SCALE_G: 0.3 30 | WGAN_SCALE: 1.0 31 | WGAN_SCALE_G: 1.0 32 | -------------------------------------------------------------------------------- /data_list/cifar10/test.txt: -------------------------------------------------------------------------------- 1 | train/1_690.jpg 1 0 0 0 0 0 0 0 0 0 2 | train/2_7582.jpg 0 1 0 0 0 0 0 0 0 0 3 | train/1_4019.jpg 0 0 0 0 1 0 0 0 0 0 4 | test/999_1813.jpg 0 0 0 1 0 0 0 0 0 0 5 | train/0_4364.jpg 0 0 0 0 0 0 0 0 0 1 6 | train/3_5619.jpg 0 0 0 0 0 0 0 0 1 0 7 | train/4_2702.jpg 0 0 0 0 1 0 0 0 0 0 8 | train/0_2694.jpg 0 0 0 0 0 0 1 0 0 0 9 | train/2_9045.jpg 0 0 0 0 0 0 0 0 0 1 10 | train/2_3993.jpg 0 0 0 0 0 0 0 1 0 0 11 | train/3_1694.jpg 0 1 0 0 0 0 0 0 0 0 12 | train/0_6837.jpg 0 0 0 1 0 0 0 0 0 0 13 | train/1_2165.jpg 1 0 0 0 0 0 0 0 0 0 14 | train/2_4688.jpg 0 0 0 0 0 0 0 0 1 0 15 | test/999_5045.jpg 0 0 0 0 0 0 0 1 0 0 16 | train/2_7320.jpg 0 0 0 0 0 0 0 1 0 0 17 | train/1_6194.jpg 0 0 0 0 1 0 0 0 0 0 18 | test/999_8975.jpg 0 0 0 0 0 0 0 0 1 0 19 | train/0_8262.jpg 0 0 0 1 0 0 0 0 0 0 20 | train/1_397.jpg 0 0 0 0 0 1 0 0 0 0 21 | test/999_5476.jpg 0 0 0 0 0 0 1 0 0 0 22 | test/999_785.jpg 1 0 0 0 0 0 0 0 0 0 23 | test/999_6614.jpg 0 0 0 0 0 1 0 0 0 0 24 | train/4_2657.jpg 0 0 0 1 0 0 0 0 0 0 25 | train/3_3846.jpg 0 0 1 0 0 0 0 0 0 0 26 | train/0_8339.jpg 0 1 0 0 0 0 0 0 0 0 27 | train/4_4600.jpg 0 0 0 0 0 0 1 0 0 0 28 | train/4_9559.jpg 1 0 0 0 0 0 0 0 0 0 29 | test/999_1860.jpg 0 0 0 0 0 0 0 0 1 0 30 | train/4_624.jpg 0 0 0 1 0 0 0 0 0 0 31 | train/0_7221.jpg 0 1 0 0 0 0 0 0 0 0 32 | train/1_9961.jpg 1 0 0 0 0 0 0 0 0 0 33 | train/3_2865.jpg 0 0 0 0 0 0 0 0 1 0 34 | train/4_2645.jpg 0 0 0 0 0 1 0 0 0 0 35 | train/1_6970.jpg 0 0 0 0 0 0 1 0 0 0 36 | train/1_6835.jpg 0 0 0 0 0 1 0 0 0 0 37 | train/0_8087.jpg 0 0 0 0 0 0 0 0 0 1 38 | test/999_1378.jpg 0 1 0 0 0 0 0 0 0 0 39 | train/0_4890.jpg 0 0 0 0 0 1 0 0 0 0 40 | train/3_3255.jpg 1 0 0 0 0 0 0 0 0 0 41 | test/999_1806.jpg 0 1 0 0 0 0 0 0 0 0 42 | test/999_9650.jpg 0 0 0 0 0 0 0 0 1 0 43 | train/0_7906.jpg 0 0 0 1 0 0 0 0 0 0 44 | train/1_6529.jpg 1 0 0 0 0 0 0 0 0 0 45 | train/0_149.jpg 0 0 0 0 1 0 0 0 0 0 46 | train/1_1297.jpg 1 0 0 0 0 0 0 0 0 0 47 | train/2_480.jpg 0 0 0 0 0 0 1 0 0 0 48 | train/0_8323.jpg 0 0 0 0 0 0 0 0 0 1 49 | train/3_2328.jpg 0 0 1 0 0 0 0 0 0 0 50 | train/2_7672.jpg 0 0 0 0 0 0 0 1 0 0 51 | train/3_9395.jpg 0 0 0 0 0 0 0 1 0 0 52 | train/4_6395.jpg 0 0 0 0 0 0 0 0 1 0 53 | train/0_1597.jpg 0 0 0 0 0 0 0 0 1 0 54 | train/0_5107.jpg 0 0 0 1 0 0 0 0 0 0 55 | train/1_7774.jpg 0 0 1 0 0 0 0 0 0 0 56 | train/3_6004.jpg 1 0 0 0 0 0 0 0 0 0 57 | test/999_9692.jpg 0 0 0 0 0 0 0 0 0 1 58 | train/2_4796.jpg 0 0 0 0 0 1 0 0 0 0 59 | train/4_2857.jpg 0 0 0 0 0 0 0 0 0 1 60 | train/2_8715.jpg 0 0 0 0 0 0 0 1 0 0 61 | train/1_7761.jpg 0 0 1 0 0 0 0 0 0 0 62 | train/1_1976.jpg 0 0 0 1 0 0 0 0 0 0 63 | train/1_1938.jpg 0 0 0 1 0 0 0 0 0 0 64 | train/2_6130.jpg 0 0 0 0 1 0 0 0 0 0 65 | train/4_1605.jpg 0 0 0 0 0 0 0 0 0 1 66 | train/2_9857.jpg 0 0 0 0 0 1 0 0 0 0 67 | test/999_2491.jpg 0 0 0 0 0 0 0 0 0 1 68 | train/3_9337.jpg 0 0 0 0 0 0 1 0 0 0 69 | train/4_3472.jpg 0 0 0 1 0 0 0 0 0 0 70 | train/2_3634.jpg 0 0 0 0 1 0 0 0 0 0 71 | test/999_2928.jpg 0 1 0 0 0 0 0 0 0 0 72 | train/4_3997.jpg 0 0 0 0 0 0 0 0 0 1 73 | train/1_3173.jpg 0 0 0 0 0 1 0 0 0 0 74 | train/3_5424.jpg 0 0 0 0 0 1 0 0 0 0 75 | test/999_3381.jpg 0 0 0 0 0 1 0 0 0 0 76 | train/2_132.jpg 1 0 0 0 0 0 0 0 0 0 77 | train/3_7859.jpg 0 0 0 0 0 0 0 1 0 0 78 | test/999_6971.jpg 0 0 0 0 0 0 0 0 0 1 79 | train/1_3327.jpg 0 1 0 0 0 0 0 0 0 0 80 | train/2_4548.jpg 0 0 0 0 1 0 0 0 0 0 81 | train/4_125.jpg 1 0 0 0 0 0 0 0 0 0 82 | train/2_4013.jpg 0 0 0 0 0 0 0 1 0 0 83 | train/2_1445.jpg 0 0 0 0 0 0 0 0 0 1 84 | train/1_8646.jpg 0 0 1 0 0 0 0 0 0 0 85 | test/999_7296.jpg 0 0 0 0 0 0 0 0 1 0 86 | train/3_6119.jpg 1 0 0 0 0 0 0 0 0 0 87 | train/0_2013.jpg 0 0 0 0 0 1 0 0 0 0 88 | train/1_3102.jpg 0 0 0 0 1 0 0 0 0 0 89 | train/2_1348.jpg 0 0 1 0 0 0 0 0 0 0 90 | train/4_9391.jpg 0 0 0 0 0 0 0 1 0 0 91 | train/0_8304.jpg 0 0 1 0 0 0 0 0 0 0 92 | train/2_175.jpg 0 0 0 0 0 0 0 1 0 0 93 | train/3_6767.jpg 0 0 0 1 0 0 0 0 0 0 94 | train/4_5847.jpg 0 0 1 0 0 0 0 0 0 0 95 | train/2_9356.jpg 0 0 0 0 0 0 0 0 0 1 96 | train/3_1736.jpg 0 0 0 0 0 0 0 1 0 0 97 | train/3_1214.jpg 1 0 0 0 0 0 0 0 0 0 98 | train/3_1593.jpg 1 0 0 0 0 0 0 0 0 0 99 | test/999_2977.jpg 0 0 1 0 0 0 0 0 0 0 100 | train/3_1968.jpg 0 0 0 0 0 0 0 0 1 0 101 | train/1_276.jpg 0 1 0 0 0 0 0 0 0 0 102 | train/2_1726.jpg 0 0 0 0 0 1 0 0 0 0 103 | train/0_5553.jpg 0 0 0 1 0 0 0 0 0 0 104 | train/0_8242.jpg 1 0 0 0 0 0 0 0 0 0 105 | train/0_4485.jpg 0 0 0 0 0 0 0 1 0 0 106 | test/999_2050.jpg 0 0 0 0 0 0 0 0 1 0 107 | train/4_1853.jpg 0 0 0 1 0 0 0 0 0 0 108 | test/999_88.jpg 0 0 0 0 0 0 0 0 1 0 109 | train/0_4748.jpg 0 0 1 0 0 0 0 0 0 0 110 | train/3_9889.jpg 0 0 0 0 0 0 1 0 0 0 111 | train/1_7228.jpg 0 0 0 0 0 0 0 0 0 1 112 | train/4_4569.jpg 0 0 0 0 0 0 0 0 1 0 113 | train/4_7064.jpg 0 0 0 0 1 0 0 0 0 0 114 | train/0_6502.jpg 0 0 1 0 0 0 0 0 0 0 115 | train/1_312.jpg 0 0 0 0 0 0 0 0 0 1 116 | train/4_4912.jpg 0 0 0 0 0 0 0 0 0 1 117 | train/1_1713.jpg 0 0 1 0 0 0 0 0 0 0 118 | test/999_1583.jpg 0 1 0 0 0 0 0 0 0 0 119 | train/0_8616.jpg 0 0 1 0 0 0 0 0 0 0 120 | train/2_972.jpg 0 0 0 0 0 0 1 0 0 0 121 | train/3_3485.jpg 0 0 0 0 0 0 0 1 0 0 122 | train/0_7723.jpg 0 1 0 0 0 0 0 0 0 0 123 | train/2_317.jpg 0 0 1 0 0 0 0 0 0 0 124 | train/4_6399.jpg 0 0 0 1 0 0 0 0 0 0 125 | train/0_8318.jpg 0 0 0 0 0 0 0 0 0 1 126 | train/1_6493.jpg 0 0 0 0 0 0 1 0 0 0 127 | test/999_404.jpg 0 0 0 0 0 0 0 0 1 0 128 | train/0_7463.jpg 0 0 0 0 0 0 0 1 0 0 129 | test/999_6494.jpg 1 0 0 0 0 0 0 0 0 0 130 | train/3_2778.jpg 0 0 0 0 0 0 0 0 0 1 131 | train/4_9842.jpg 0 0 0 0 0 0 0 0 0 1 132 | train/3_2219.jpg 0 0 0 1 0 0 0 0 0 0 133 | train/2_2254.jpg 1 0 0 0 0 0 0 0 0 0 134 | test/999_3009.jpg 0 0 0 0 0 0 0 1 0 0 135 | train/3_5245.jpg 0 0 0 1 0 0 0 0 0 0 136 | train/1_5535.jpg 0 1 0 0 0 0 0 0 0 0 137 | train/0_2678.jpg 0 0 1 0 0 0 0 0 0 0 138 | train/0_4754.jpg 0 0 0 0 0 0 0 0 0 1 139 | train/3_131.jpg 0 0 0 0 1 0 0 0 0 0 140 | train/3_795.jpg 0 0 1 0 0 0 0 0 0 0 141 | train/1_4550.jpg 1 0 0 0 0 0 0 0 0 0 142 | train/4_6721.jpg 0 0 0 0 0 0 1 0 0 0 143 | train/1_4868.jpg 0 0 0 0 0 0 0 0 0 1 144 | train/2_8538.jpg 0 1 0 0 0 0 0 0 0 0 145 | train/0_3499.jpg 0 0 0 0 0 1 0 0 0 0 146 | train/2_4144.jpg 1 0 0 0 0 0 0 0 0 0 147 | test/999_6492.jpg 0 1 0 0 0 0 0 0 0 0 148 | test/999_4908.jpg 0 0 0 0 0 0 1 0 0 0 149 | test/999_8543.jpg 1 0 0 0 0 0 0 0 0 0 150 | train/4_5507.jpg 1 0 0 0 0 0 0 0 0 0 151 | train/2_3953.jpg 0 0 0 0 0 0 0 0 1 0 152 | test/999_769.jpg 0 0 0 0 1 0 0 0 0 0 153 | train/4_7295.jpg 0 0 0 0 1 0 0 0 0 0 154 | train/4_7341.jpg 0 0 0 0 0 0 0 0 1 0 155 | train/4_1575.jpg 0 0 0 0 0 0 0 1 0 0 156 | train/4_601.jpg 0 0 0 0 0 0 0 0 1 0 157 | test/999_4645.jpg 0 0 0 0 1 0 0 0 0 0 158 | train/1_103.jpg 0 0 0 0 0 0 0 0 1 0 159 | train/2_7576.jpg 0 1 0 0 0 0 0 0 0 0 160 | train/0_8312.jpg 0 0 0 0 0 0 0 1 0 0 161 | test/999_3594.jpg 0 0 0 1 0 0 0 0 0 0 162 | train/2_9425.jpg 0 0 0 0 0 1 0 0 0 0 163 | train/3_1126.jpg 0 1 0 0 0 0 0 0 0 0 164 | train/0_5239.jpg 0 0 0 0 0 0 0 1 0 0 165 | train/0_4094.jpg 0 0 0 0 0 0 1 0 0 0 166 | train/0_6781.jpg 0 0 0 0 0 1 0 0 0 0 167 | train/2_425.jpg 0 0 0 1 0 0 0 0 0 0 168 | train/0_4148.jpg 0 0 0 0 0 0 1 0 0 0 169 | test/999_7755.jpg 0 0 0 0 0 0 0 1 0 0 170 | train/3_3604.jpg 0 0 0 0 0 0 1 0 0 0 171 | train/0_2748.jpg 0 0 0 0 0 1 0 0 0 0 172 | train/2_8854.jpg 0 0 0 0 0 1 0 0 0 0 173 | train/2_6413.jpg 0 0 0 0 0 0 1 0 0 0 174 | train/0_7157.jpg 0 0 0 0 0 1 0 0 0 0 175 | train/3_8954.jpg 0 0 0 0 0 1 0 0 0 0 176 | train/2_881.jpg 0 0 0 0 0 0 1 0 0 0 177 | train/3_4168.jpg 1 0 0 0 0 0 0 0 0 0 178 | train/3_7130.jpg 1 0 0 0 0 0 0 0 0 0 179 | train/2_5444.jpg 0 0 0 0 1 0 0 0 0 0 180 | train/4_2535.jpg 0 0 0 0 1 0 0 0 0 0 181 | train/3_5957.jpg 0 0 1 0 0 0 0 0 0 0 182 | train/3_148.jpg 0 0 0 0 0 0 0 0 1 0 183 | train/1_7200.jpg 1 0 0 0 0 0 0 0 0 0 184 | train/4_6465.jpg 0 0 0 0 0 0 0 1 0 0 185 | train/4_6673.jpg 0 0 1 0 0 0 0 0 0 0 186 | train/1_1320.jpg 0 0 0 1 0 0 0 0 0 0 187 | train/2_5695.jpg 0 0 1 0 0 0 0 0 0 0 188 | train/0_2158.jpg 0 0 0 0 0 0 0 0 0 1 189 | train/3_8198.jpg 1 0 0 0 0 0 0 0 0 0 190 | train/2_171.jpg 0 0 0 0 0 1 0 0 0 0 191 | train/3_4051.jpg 0 0 0 0 1 0 0 0 0 0 192 | train/1_7852.jpg 0 0 0 0 1 0 0 0 0 0 193 | test/999_6520.jpg 0 0 0 0 0 0 1 0 0 0 194 | train/1_9615.jpg 0 0 1 0 0 0 0 0 0 0 195 | train/4_8963.jpg 0 0 0 0 0 0 0 0 0 1 196 | train/3_9855.jpg 0 0 0 0 0 0 0 0 0 1 197 | test/999_3021.jpg 0 0 0 0 0 0 0 0 1 0 198 | train/3_1597.jpg 0 0 0 0 0 0 0 1 0 0 199 | train/0_8089.jpg 0 0 0 0 0 0 0 0 0 1 200 | train/0_5497.jpg 0 0 0 0 0 0 1 0 0 0 201 | train/4_3435.jpg 0 0 0 0 0 0 0 1 0 0 202 | train/3_7730.jpg 0 0 0 0 0 0 0 0 0 1 203 | train/2_5136.jpg 0 0 0 1 0 0 0 0 0 0 204 | train/0_2721.jpg 0 0 0 0 0 0 0 0 1 0 205 | train/0_7048.jpg 0 1 0 0 0 0 0 0 0 0 206 | train/2_9798.jpg 0 0 0 0 0 0 1 0 0 0 207 | train/2_5682.jpg 0 1 0 0 0 0 0 0 0 0 208 | train/0_7485.jpg 0 0 0 1 0 0 0 0 0 0 209 | train/0_4460.jpg 0 0 0 0 0 0 0 1 0 0 210 | train/2_8369.jpg 0 0 0 0 0 0 0 1 0 0 211 | train/4_9676.jpg 0 1 0 0 0 0 0 0 0 0 212 | train/0_217.jpg 0 0 0 0 0 1 0 0 0 0 213 | test/999_1327.jpg 0 0 0 0 0 0 1 0 0 0 214 | train/1_2338.jpg 0 0 1 0 0 0 0 0 0 0 215 | test/999_5937.jpg 0 1 0 0 0 0 0 0 0 0 216 | train/1_464.jpg 0 0 0 1 0 0 0 0 0 0 217 | train/3_6950.jpg 0 1 0 0 0 0 0 0 0 0 218 | train/3_8925.jpg 0 0 0 0 0 0 0 0 1 0 219 | train/2_7668.jpg 0 0 0 0 0 0 0 0 0 1 220 | train/2_8194.jpg 0 0 0 0 0 0 1 0 0 0 221 | train/1_8108.jpg 0 0 0 0 0 0 0 1 0 0 222 | train/3_4274.jpg 0 0 0 0 0 0 0 0 1 0 223 | train/2_5806.jpg 0 0 0 0 1 0 0 0 0 0 224 | test/999_7737.jpg 1 0 0 0 0 0 0 0 0 0 225 | train/1_3302.jpg 0 0 0 1 0 0 0 0 0 0 226 | train/4_5284.jpg 0 0 0 0 0 0 0 0 0 1 227 | train/3_8375.jpg 0 0 0 0 1 0 0 0 0 0 228 | test/999_2278.jpg 0 0 0 0 0 0 0 0 0 1 229 | test/999_3921.jpg 0 1 0 0 0 0 0 0 0 0 230 | train/3_4491.jpg 0 0 0 0 0 0 0 0 1 0 231 | test/999_9752.jpg 0 0 0 0 0 0 0 1 0 0 232 | train/0_2416.jpg 0 0 0 0 0 1 0 0 0 0 233 | train/2_9003.jpg 1 0 0 0 0 0 0 0 0 0 234 | train/1_6764.jpg 0 0 0 0 0 0 0 1 0 0 235 | train/1_6667.jpg 0 0 0 0 0 0 0 1 0 0 236 | train/2_7288.jpg 0 0 0 0 0 0 0 1 0 0 237 | train/0_4497.jpg 0 0 0 0 1 0 0 0 0 0 238 | train/1_8251.jpg 0 0 0 0 1 0 0 0 0 0 239 | train/1_5892.jpg 0 0 0 0 0 0 0 0 1 0 240 | train/1_5142.jpg 0 0 0 0 0 0 0 0 0 1 241 | train/4_7754.jpg 1 0 0 0 0 0 0 0 0 0 242 | train/1_1004.jpg 0 0 0 0 0 1 0 0 0 0 243 | test/999_7588.jpg 0 0 1 0 0 0 0 0 0 0 244 | train/4_8753.jpg 0 0 0 0 0 0 1 0 0 0 245 | train/3_6782.jpg 0 0 0 0 0 1 0 0 0 0 246 | train/4_542.jpg 1 0 0 0 0 0 0 0 0 0 247 | train/2_9516.jpg 0 0 0 0 0 0 1 0 0 0 248 | train/0_5994.jpg 1 0 0 0 0 0 0 0 0 0 249 | train/4_3775.jpg 1 0 0 0 0 0 0 0 0 0 250 | train/0_5861.jpg 0 0 0 0 0 0 0 0 0 1 251 | train/4_223.jpg 0 0 0 0 0 0 0 0 1 0 252 | train/2_7920.jpg 0 0 0 1 0 0 0 0 0 0 253 | test/999_4260.jpg 0 0 0 0 0 0 0 0 1 0 254 | train/0_9060.jpg 0 0 0 1 0 0 0 0 0 0 255 | train/4_7671.jpg 0 0 0 0 0 0 1 0 0 0 256 | train/2_7967.jpg 0 0 0 1 0 0 0 0 0 0 257 | train/2_5501.jpg 0 0 0 0 0 0 0 0 1 0 258 | train/2_2330.jpg 0 0 0 0 0 0 0 0 0 1 259 | test/999_5120.jpg 0 0 0 0 0 0 0 0 0 1 260 | test/999_6063.jpg 0 0 0 0 0 0 1 0 0 0 261 | train/0_1067.jpg 0 0 1 0 0 0 0 0 0 0 262 | train/0_4796.jpg 0 0 0 1 0 0 0 0 0 0 263 | train/4_8951.jpg 0 0 1 0 0 0 0 0 0 0 264 | train/3_1097.jpg 0 0 0 0 0 0 0 0 1 0 265 | train/4_5403.jpg 0 1 0 0 0 0 0 0 0 0 266 | train/3_7855.jpg 0 0 1 0 0 0 0 0 0 0 267 | train/4_5268.jpg 0 1 0 0 0 0 0 0 0 0 268 | train/1_1900.jpg 0 0 0 0 0 0 0 0 1 0 269 | train/3_304.jpg 1 0 0 0 0 0 0 0 0 0 270 | train/2_5752.jpg 0 0 0 0 0 0 0 0 0 1 271 | train/3_4664.jpg 1 0 0 0 0 0 0 0 0 0 272 | test/999_1904.jpg 0 1 0 0 0 0 0 0 0 0 273 | train/4_3478.jpg 0 0 1 0 0 0 0 0 0 0 274 | train/2_5495.jpg 0 0 1 0 0 0 0 0 0 0 275 | train/3_4084.jpg 0 0 0 0 0 1 0 0 0 0 276 | train/4_5623.jpg 0 0 0 0 1 0 0 0 0 0 277 | train/4_5119.jpg 0 0 1 0 0 0 0 0 0 0 278 | train/4_3580.jpg 0 0 0 0 0 0 0 1 0 0 279 | test/999_3119.jpg 0 0 0 0 1 0 0 0 0 0 280 | train/3_8556.jpg 0 0 1 0 0 0 0 0 0 0 281 | train/0_6210.jpg 0 0 1 0 0 0 0 0 0 0 282 | train/0_9937.jpg 0 0 0 0 1 0 0 0 0 0 283 | train/4_2678.jpg 1 0 0 0 0 0 0 0 0 0 284 | train/1_4382.jpg 0 0 0 0 1 0 0 0 0 0 285 | train/3_9457.jpg 0 0 0 0 0 0 1 0 0 0 286 | train/2_1951.jpg 0 0 0 0 1 0 0 0 0 0 287 | train/4_8622.jpg 0 0 0 0 0 0 0 0 0 1 288 | test/999_4258.jpg 0 0 0 0 0 0 0 0 1 0 289 | train/4_1576.jpg 0 1 0 0 0 0 0 0 0 0 290 | train/3_7502.jpg 0 0 1 0 0 0 0 0 0 0 291 | train/1_8153.jpg 0 0 0 0 0 0 0 0 1 0 292 | train/1_4353.jpg 0 0 1 0 0 0 0 0 0 0 293 | test/999_3378.jpg 0 0 0 0 1 0 0 0 0 0 294 | train/4_1358.jpg 0 0 0 0 0 1 0 0 0 0 295 | train/3_9044.jpg 1 0 0 0 0 0 0 0 0 0 296 | train/4_8275.jpg 0 1 0 0 0 0 0 0 0 0 297 | train/0_7151.jpg 0 0 1 0 0 0 0 0 0 0 298 | train/1_4392.jpg 0 0 0 0 0 0 1 0 0 0 299 | train/1_5200.jpg 0 0 1 0 0 0 0 0 0 0 300 | train/2_8912.jpg 1 0 0 0 0 0 0 0 0 0 301 | train/2_6107.jpg 0 0 0 0 0 0 0 1 0 0 302 | train/1_1479.jpg 1 0 0 0 0 0 0 0 0 0 303 | test/999_3836.jpg 0 0 0 0 0 0 0 1 0 0 304 | test/999_9789.jpg 0 0 0 0 0 0 1 0 0 0 305 | train/1_9277.jpg 1 0 0 0 0 0 0 0 0 0 306 | train/0_9452.jpg 0 0 0 0 1 0 0 0 0 0 307 | train/3_2361.jpg 0 0 1 0 0 0 0 0 0 0 308 | train/3_3771.jpg 0 0 1 0 0 0 0 0 0 0 309 | train/2_4810.jpg 0 0 0 0 1 0 0 0 0 0 310 | train/1_2712.jpg 0 0 0 0 0 0 0 0 0 1 311 | train/1_3547.jpg 0 0 0 0 0 0 0 1 0 0 312 | train/4_9267.jpg 0 0 0 0 0 1 0 0 0 0 313 | train/2_7066.jpg 0 0 0 0 0 1 0 0 0 0 314 | test/999_4620.jpg 0 0 0 0 0 0 0 1 0 0 315 | train/2_2265.jpg 0 1 0 0 0 0 0 0 0 0 316 | test/999_1608.jpg 0 0 0 0 1 0 0 0 0 0 317 | train/2_3964.jpg 1 0 0 0 0 0 0 0 0 0 318 | train/0_7236.jpg 0 0 0 0 0 1 0 0 0 0 319 | train/2_6436.jpg 0 0 0 1 0 0 0 0 0 0 320 | train/3_365.jpg 0 0 1 0 0 0 0 0 0 0 321 | test/999_6271.jpg 0 0 0 0 0 0 1 0 0 0 322 | train/4_2485.jpg 0 0 0 0 0 0 0 1 0 0 323 | train/3_3529.jpg 0 0 0 0 1 0 0 0 0 0 324 | train/1_8730.jpg 0 0 0 0 1 0 0 0 0 0 325 | train/0_829.jpg 0 0 1 0 0 0 0 0 0 0 326 | train/2_1370.jpg 0 0 0 0 0 1 0 0 0 0 327 | train/1_9712.jpg 0 0 0 0 0 0 0 1 0 0 328 | train/1_1889.jpg 0 0 0 0 0 0 0 0 0 1 329 | test/999_8840.jpg 0 0 1 0 0 0 0 0 0 0 330 | test/999_810.jpg 0 0 0 0 0 1 0 0 0 0 331 | train/3_4703.jpg 0 0 0 1 0 0 0 0 0 0 332 | train/1_2717.jpg 0 0 0 0 1 0 0 0 0 0 333 | train/1_6358.jpg 0 0 0 0 0 1 0 0 0 0 334 | test/999_98.jpg 1 0 0 0 0 0 0 0 0 0 335 | train/3_683.jpg 0 0 0 1 0 0 0 0 0 0 336 | train/0_5501.jpg 0 0 0 0 1 0 0 0 0 0 337 | train/1_3969.jpg 0 0 0 0 0 0 0 0 0 1 338 | train/0_151.jpg 0 0 0 0 0 0 1 0 0 0 339 | train/1_6261.jpg 0 1 0 0 0 0 0 0 0 0 340 | train/1_1064.jpg 0 0 0 0 1 0 0 0 0 0 341 | train/3_3345.jpg 0 0 0 1 0 0 0 0 0 0 342 | train/4_1096.jpg 0 0 0 0 0 1 0 0 0 0 343 | train/4_7908.jpg 0 0 0 0 1 0 0 0 0 0 344 | test/999_2469.jpg 0 0 0 0 0 0 0 0 0 1 345 | train/1_7254.jpg 0 0 0 0 0 0 0 1 0 0 346 | train/1_4452.jpg 0 0 0 0 0 0 0 1 0 0 347 | test/999_2942.jpg 0 0 0 0 0 1 0 0 0 0 348 | test/999_1653.jpg 0 0 0 0 0 0 0 0 0 1 349 | test/999_1185.jpg 0 1 0 0 0 0 0 0 0 0 350 | train/3_643.jpg 1 0 0 0 0 0 0 0 0 0 351 | test/999_702.jpg 0 0 0 0 1 0 0 0 0 0 352 | test/999_543.jpg 0 0 0 0 0 0 0 1 0 0 353 | train/1_3045.jpg 0 0 0 0 0 1 0 0 0 0 354 | train/3_1014.jpg 0 0 0 0 0 0 0 0 0 1 355 | train/0_7176.jpg 0 1 0 0 0 0 0 0 0 0 356 | train/2_7861.jpg 0 0 0 0 0 0 0 0 1 0 357 | train/2_9589.jpg 0 1 0 0 0 0 0 0 0 0 358 | train/0_9042.jpg 0 1 0 0 0 0 0 0 0 0 359 | train/2_2135.jpg 0 0 0 1 0 0 0 0 0 0 360 | train/3_53.jpg 0 0 0 0 0 0 0 1 0 0 361 | train/4_2067.jpg 0 0 0 0 0 0 1 0 0 0 362 | train/4_5254.jpg 0 0 0 0 0 0 1 0 0 0 363 | train/3_4425.jpg 0 0 0 0 1 0 0 0 0 0 364 | train/4_4071.jpg 0 0 0 0 1 0 0 0 0 0 365 | train/2_4652.jpg 0 0 0 1 0 0 0 0 0 0 366 | train/1_8364.jpg 0 0 1 0 0 0 0 0 0 0 367 | test/999_5796.jpg 0 0 0 0 0 1 0 0 0 0 368 | train/1_9940.jpg 0 0 0 0 0 0 0 1 0 0 369 | train/3_4271.jpg 0 0 0 0 0 0 0 0 0 1 370 | train/0_6152.jpg 0 0 0 0 0 0 0 0 0 1 371 | train/2_9814.jpg 0 0 1 0 0 0 0 0 0 0 372 | train/0_7071.jpg 0 0 0 0 0 0 0 0 0 1 373 | test/999_539.jpg 1 0 0 0 0 0 0 0 0 0 374 | train/1_6124.jpg 0 0 0 0 0 0 1 0 0 0 375 | train/4_9794.jpg 0 0 0 0 0 0 0 1 0 0 376 | train/4_763.jpg 0 0 0 0 0 1 0 0 0 0 377 | train/0_7598.jpg 0 0 0 0 0 0 0 1 0 0 378 | train/0_3510.jpg 0 0 0 0 0 0 0 0 0 1 379 | train/0_6910.jpg 0 0 0 1 0 0 0 0 0 0 380 | train/3_1957.jpg 0 1 0 0 0 0 0 0 0 0 381 | train/3_7445.jpg 0 0 0 1 0 0 0 0 0 0 382 | train/4_9862.jpg 0 0 0 0 0 0 1 0 0 0 383 | train/1_4126.jpg 0 0 0 0 0 0 0 1 0 0 384 | train/4_2589.jpg 1 0 0 0 0 0 0 0 0 0 385 | train/3_975.jpg 0 0 0 1 0 0 0 0 0 0 386 | test/999_4878.jpg 0 0 0 0 0 0 0 1 0 0 387 | train/3_3910.jpg 0 1 0 0 0 0 0 0 0 0 388 | train/0_7189.jpg 0 0 0 0 0 1 0 0 0 0 389 | test/999_4033.jpg 0 0 0 0 0 0 1 0 0 0 390 | test/999_7634.jpg 0 1 0 0 0 0 0 0 0 0 391 | train/4_171.jpg 0 0 0 0 1 0 0 0 0 0 392 | train/2_1977.jpg 0 0 0 0 0 1 0 0 0 0 393 | train/0_6897.jpg 0 0 0 0 1 0 0 0 0 0 394 | train/3_3333.jpg 0 0 0 0 0 1 0 0 0 0 395 | train/2_3690.jpg 0 0 0 1 0 0 0 0 0 0 396 | train/0_5626.jpg 0 0 0 0 0 1 0 0 0 0 397 | test/999_5958.jpg 0 0 0 0 0 0 0 0 0 1 398 | train/4_9349.jpg 1 0 0 0 0 0 0 0 0 0 399 | train/2_4100.jpg 0 0 0 1 0 0 0 0 0 0 400 | test/999_1802.jpg 0 0 1 0 0 0 0 0 0 0 401 | train/2_1588.jpg 0 0 0 0 0 0 1 0 0 0 402 | train/3_226.jpg 0 0 0 0 1 0 0 0 0 0 403 | train/0_4828.jpg 0 0 0 0 0 0 0 0 1 0 404 | train/1_4720.jpg 0 0 1 0 0 0 0 0 0 0 405 | train/3_1114.jpg 0 0 0 0 0 0 0 0 0 1 406 | train/3_5085.jpg 0 0 0 0 0 0 0 0 0 1 407 | train/2_5808.jpg 1 0 0 0 0 0 0 0 0 0 408 | train/3_2033.jpg 1 0 0 0 0 0 0 0 0 0 409 | train/3_4110.jpg 0 0 0 0 0 0 0 0 0 1 410 | train/4_7723.jpg 0 0 0 0 0 0 0 0 1 0 411 | train/1_1223.jpg 0 1 0 0 0 0 0 0 0 0 412 | train/2_4676.jpg 1 0 0 0 0 0 0 0 0 0 413 | train/2_6882.jpg 0 0 0 0 1 0 0 0 0 0 414 | train/0_1330.jpg 0 0 0 0 0 1 0 0 0 0 415 | train/1_5656.jpg 0 0 0 0 1 0 0 0 0 0 416 | train/4_9650.jpg 0 0 0 0 0 0 0 1 0 0 417 | train/3_3819.jpg 1 0 0 0 0 0 0 0 0 0 418 | train/0_9372.jpg 0 1 0 0 0 0 0 0 0 0 419 | train/2_1173.jpg 0 0 0 0 1 0 0 0 0 0 420 | train/1_7632.jpg 0 0 0 0 1 0 0 0 0 0 421 | train/2_8937.jpg 0 1 0 0 0 0 0 0 0 0 422 | train/4_2829.jpg 1 0 0 0 0 0 0 0 0 0 423 | train/1_2292.jpg 0 1 0 0 0 0 0 0 0 0 424 | train/2_9348.jpg 1 0 0 0 0 0 0 0 0 0 425 | train/4_7862.jpg 1 0 0 0 0 0 0 0 0 0 426 | test/999_6826.jpg 0 0 0 0 0 0 1 0 0 0 427 | train/4_2343.jpg 0 0 0 0 0 0 0 0 0 1 428 | train/2_3218.jpg 0 0 0 1 0 0 0 0 0 0 429 | train/4_4800.jpg 0 0 0 1 0 0 0 0 0 0 430 | train/2_2719.jpg 0 0 0 0 0 0 0 0 0 1 431 | test/999_4007.jpg 0 0 0 0 0 0 1 0 0 0 432 | train/1_6166.jpg 0 0 0 0 0 1 0 0 0 0 433 | train/4_5976.jpg 0 0 0 0 0 0 1 0 0 0 434 | train/0_2059.jpg 0 0 0 0 0 1 0 0 0 0 435 | train/1_5519.jpg 0 0 0 0 1 0 0 0 0 0 436 | test/999_2039.jpg 0 0 0 0 0 0 0 0 1 0 437 | train/2_6702.jpg 0 0 0 0 0 0 0 1 0 0 438 | train/0_1775.jpg 0 0 0 0 0 0 0 0 1 0 439 | train/0_9026.jpg 0 0 0 0 0 0 0 0 0 1 440 | train/2_7017.jpg 0 0 0 0 1 0 0 0 0 0 441 | train/3_1291.jpg 0 0 0 0 0 0 0 0 0 1 442 | test/999_8771.jpg 0 0 0 0 0 0 1 0 0 0 443 | train/2_4398.jpg 0 1 0 0 0 0 0 0 0 0 444 | test/999_8857.jpg 1 0 0 0 0 0 0 0 0 0 445 | train/1_3684.jpg 0 0 0 0 0 0 0 0 1 0 446 | train/0_109.jpg 0 0 0 0 0 0 0 0 0 1 447 | test/999_617.jpg 0 0 0 0 0 0 1 0 0 0 448 | train/2_4017.jpg 0 0 0 1 0 0 0 0 0 0 449 | test/999_1548.jpg 0 0 0 0 0 1 0 0 0 0 450 | train/3_3076.jpg 1 0 0 0 0 0 0 0 0 0 451 | train/2_6706.jpg 0 0 0 0 1 0 0 0 0 0 452 | train/3_4216.jpg 0 0 0 0 0 1 0 0 0 0 453 | train/1_8090.jpg 0 0 0 0 0 0 1 0 0 0 454 | train/0_8747.jpg 0 0 0 0 0 1 0 0 0 0 455 | train/4_344.jpg 0 0 0 0 0 0 0 0 1 0 456 | train/1_3541.jpg 0 0 0 1 0 0 0 0 0 0 457 | train/3_5231.jpg 0 0 0 0 0 0 0 0 0 1 458 | train/2_9673.jpg 0 0 1 0 0 0 0 0 0 0 459 | test/999_1426.jpg 0 0 0 0 0 0 0 1 0 0 460 | test/999_502.jpg 0 0 0 0 0 1 0 0 0 0 461 | train/4_6287.jpg 1 0 0 0 0 0 0 0 0 0 462 | train/4_721.jpg 0 0 0 0 0 1 0 0 0 0 463 | train/3_7961.jpg 0 0 0 0 0 0 0 0 0 1 464 | train/3_9473.jpg 0 0 0 0 1 0 0 0 0 0 465 | train/1_8057.jpg 0 1 0 0 0 0 0 0 0 0 466 | test/999_7847.jpg 0 0 0 0 0 0 0 1 0 0 467 | train/4_3848.jpg 0 0 0 0 0 0 0 1 0 0 468 | train/3_533.jpg 0 0 0 0 0 0 0 0 1 0 469 | train/2_27.jpg 0 0 0 0 0 0 1 0 0 0 470 | train/4_2293.jpg 0 0 0 0 0 1 0 0 0 0 471 | test/999_5722.jpg 1 0 0 0 0 0 0 0 0 0 472 | train/0_8463.jpg 0 0 0 0 0 1 0 0 0 0 473 | train/0_2323.jpg 0 0 0 0 0 0 0 0 1 0 474 | train/1_4731.jpg 0 0 0 0 0 0 0 0 0 1 475 | train/3_7806.jpg 1 0 0 0 0 0 0 0 0 0 476 | train/0_3986.jpg 0 0 0 0 0 0 1 0 0 0 477 | train/1_9516.jpg 0 0 0 0 1 0 0 0 0 0 478 | train/4_7770.jpg 0 0 0 0 0 0 0 0 1 0 479 | train/3_4518.jpg 0 0 0 0 0 0 0 1 0 0 480 | train/1_5110.jpg 0 0 0 0 0 0 1 0 0 0 481 | train/2_3828.jpg 0 0 0 0 0 0 0 1 0 0 482 | train/1_8312.jpg 0 0 0 0 0 0 0 0 1 0 483 | train/3_7170.jpg 0 0 0 0 0 0 0 0 0 1 484 | train/2_2597.jpg 0 0 0 0 0 0 0 0 1 0 485 | train/3_5782.jpg 1 0 0 0 0 0 0 0 0 0 486 | train/3_5465.jpg 0 1 0 0 0 0 0 0 0 0 487 | train/1_1829.jpg 0 1 0 0 0 0 0 0 0 0 488 | train/1_9428.jpg 0 0 1 0 0 0 0 0 0 0 489 | train/1_9753.jpg 1 0 0 0 0 0 0 0 0 0 490 | train/1_8679.jpg 0 0 0 1 0 0 0 0 0 0 491 | train/4_2723.jpg 0 0 0 0 0 0 1 0 0 0 492 | test/999_1516.jpg 0 0 0 0 1 0 0 0 0 0 493 | train/2_8476.jpg 0 1 0 0 0 0 0 0 0 0 494 | train/1_1504.jpg 0 0 0 0 0 0 0 0 0 1 495 | train/3_8891.jpg 0 0 0 1 0 0 0 0 0 0 496 | train/2_76.jpg 0 0 0 0 0 1 0 0 0 0 497 | train/1_3942.jpg 0 0 0 0 0 0 0 1 0 0 498 | train/4_8749.jpg 0 1 0 0 0 0 0 0 0 0 499 | test/999_6953.jpg 1 0 0 0 0 0 0 0 0 0 500 | train/2_4911.jpg 0 0 1 0 0 0 0 0 0 0 501 | test/999_66.jpg 0 1 0 0 0 0 0 0 0 0 502 | train/3_176.jpg 0 0 0 0 0 0 1 0 0 0 503 | train/0_4002.jpg 0 0 0 0 0 1 0 0 0 0 504 | train/2_6314.jpg 0 0 0 0 0 0 0 1 0 0 505 | train/4_4688.jpg 0 0 1 0 0 0 0 0 0 0 506 | train/3_5905.jpg 0 0 0 0 0 1 0 0 0 0 507 | train/1_4440.jpg 0 0 0 0 1 0 0 0 0 0 508 | train/0_463.jpg 0 0 1 0 0 0 0 0 0 0 509 | train/1_5565.jpg 0 0 1 0 0 0 0 0 0 0 510 | test/999_9823.jpg 0 0 0 0 0 1 0 0 0 0 511 | train/2_6461.jpg 0 0 0 0 0 0 1 0 0 0 512 | train/3_632.jpg 0 0 1 0 0 0 0 0 0 0 513 | test/999_5580.jpg 0 0 0 0 0 1 0 0 0 0 514 | train/0_9278.jpg 0 0 0 0 0 0 1 0 0 0 515 | train/4_4163.jpg 0 0 0 0 0 0 1 0 0 0 516 | train/4_1764.jpg 0 0 1 0 0 0 0 0 0 0 517 | train/0_7428.jpg 0 1 0 0 0 0 0 0 0 0 518 | train/3_980.jpg 0 0 1 0 0 0 0 0 0 0 519 | train/4_9301.jpg 0 0 0 0 0 1 0 0 0 0 520 | train/1_9068.jpg 0 0 0 1 0 0 0 0 0 0 521 | test/999_7274.jpg 0 1 0 0 0 0 0 0 0 0 522 | train/2_4247.jpg 0 1 0 0 0 0 0 0 0 0 523 | train/1_8494.jpg 0 0 0 0 0 0 1 0 0 0 524 | train/0_4709.jpg 0 0 0 0 1 0 0 0 0 0 525 | train/4_6494.jpg 0 0 0 0 0 1 0 0 0 0 526 | train/4_9020.jpg 0 0 1 0 0 0 0 0 0 0 527 | train/3_1410.jpg 0 0 1 0 0 0 0 0 0 0 528 | train/3_2732.jpg 0 0 0 1 0 0 0 0 0 0 529 | train/4_6693.jpg 0 1 0 0 0 0 0 0 0 0 530 | train/3_8231.jpg 0 1 0 0 0 0 0 0 0 0 531 | train/1_9933.jpg 0 0 0 0 0 1 0 0 0 0 532 | train/0_7195.jpg 0 0 0 0 0 1 0 0 0 0 533 | train/3_1243.jpg 0 0 0 0 0 1 0 0 0 0 534 | test/999_4344.jpg 0 0 0 0 1 0 0 0 0 0 535 | train/0_6467.jpg 0 0 1 0 0 0 0 0 0 0 536 | train/2_8729.jpg 0 0 0 0 1 0 0 0 0 0 537 | test/999_4142.jpg 0 0 0 0 0 0 0 0 1 0 538 | test/999_3988.jpg 0 1 0 0 0 0 0 0 0 0 539 | train/3_6085.jpg 0 1 0 0 0 0 0 0 0 0 540 | train/1_297.jpg 1 0 0 0 0 0 0 0 0 0 541 | train/2_7421.jpg 0 0 0 0 0 0 0 1 0 0 542 | train/3_8938.jpg 0 0 0 0 0 0 0 1 0 0 543 | train/3_1877.jpg 0 0 0 0 0 1 0 0 0 0 544 | train/4_8285.jpg 0 0 0 1 0 0 0 0 0 0 545 | train/2_6420.jpg 0 0 0 0 0 0 0 1 0 0 546 | train/3_4641.jpg 0 0 0 0 0 0 0 0 1 0 547 | train/3_4791.jpg 0 1 0 0 0 0 0 0 0 0 548 | train/1_4919.jpg 0 0 0 1 0 0 0 0 0 0 549 | train/4_9041.jpg 0 0 0 0 0 0 1 0 0 0 550 | test/999_1509.jpg 0 1 0 0 0 0 0 0 0 0 551 | train/1_8017.jpg 0 0 0 0 0 0 0 1 0 0 552 | train/1_2345.jpg 1 0 0 0 0 0 0 0 0 0 553 | train/4_1492.jpg 0 0 0 0 0 0 0 1 0 0 554 | train/4_4974.jpg 0 0 0 0 1 0 0 0 0 0 555 | train/0_8070.jpg 0 0 0 0 0 0 0 0 1 0 556 | train/1_4849.jpg 0 1 0 0 0 0 0 0 0 0 557 | train/2_4505.jpg 0 0 0 0 0 0 0 1 0 0 558 | train/1_5709.jpg 0 0 0 0 0 1 0 0 0 0 559 | train/3_3660.jpg 0 0 0 0 0 0 0 1 0 0 560 | test/999_3496.jpg 0 0 0 0 0 1 0 0 0 0 561 | train/3_8781.jpg 0 0 0 1 0 0 0 0 0 0 562 | train/0_513.jpg 0 0 1 0 0 0 0 0 0 0 563 | train/4_8647.jpg 0 0 0 0 0 0 0 0 1 0 564 | test/999_9103.jpg 0 0 0 0 0 0 0 1 0 0 565 | train/2_589.jpg 0 0 1 0 0 0 0 0 0 0 566 | train/4_9744.jpg 0 0 0 0 0 0 0 0 1 0 567 | test/999_5396.jpg 0 1 0 0 0 0 0 0 0 0 568 | train/3_2393.jpg 0 0 0 0 0 1 0 0 0 0 569 | train/2_8628.jpg 0 0 1 0 0 0 0 0 0 0 570 | train/3_6256.jpg 0 0 0 0 0 0 0 1 0 0 571 | train/1_8118.jpg 0 0 0 0 0 0 0 0 1 0 572 | train/2_2446.jpg 0 0 1 0 0 0 0 0 0 0 573 | train/0_733.jpg 1 0 0 0 0 0 0 0 0 0 574 | test/999_6990.jpg 0 0 0 1 0 0 0 0 0 0 575 | train/1_6996.jpg 0 0 0 0 0 0 0 1 0 0 576 | train/2_7054.jpg 0 0 0 0 0 0 0 1 0 0 577 | train/3_609.jpg 1 0 0 0 0 0 0 0 0 0 578 | train/1_4165.jpg 0 1 0 0 0 0 0 0 0 0 579 | train/3_3018.jpg 0 0 0 0 0 0 1 0 0 0 580 | train/4_92.jpg 0 1 0 0 0 0 0 0 0 0 581 | train/3_262.jpg 0 0 0 0 0 0 0 0 1 0 582 | train/4_4093.jpg 0 0 0 1 0 0 0 0 0 0 583 | train/2_3254.jpg 1 0 0 0 0 0 0 0 0 0 584 | train/4_4355.jpg 0 0 0 1 0 0 0 0 0 0 585 | train/1_6887.jpg 0 0 0 0 0 0 1 0 0 0 586 | test/999_8790.jpg 1 0 0 0 0 0 0 0 0 0 587 | train/2_1795.jpg 0 1 0 0 0 0 0 0 0 0 588 | train/0_3764.jpg 0 0 0 0 0 0 0 0 1 0 589 | train/2_4420.jpg 1 0 0 0 0 0 0 0 0 0 590 | test/999_2423.jpg 0 0 1 0 0 0 0 0 0 0 591 | train/4_6488.jpg 0 0 0 0 0 0 0 0 1 0 592 | train/0_2839.jpg 0 0 0 0 1 0 0 0 0 0 593 | train/3_8172.jpg 0 0 0 0 0 1 0 0 0 0 594 | train/4_8028.jpg 0 0 0 1 0 0 0 0 0 0 595 | train/2_9226.jpg 0 1 0 0 0 0 0 0 0 0 596 | train/0_6256.jpg 0 0 0 1 0 0 0 0 0 0 597 | train/2_1221.jpg 0 0 0 0 0 0 0 0 0 1 598 | test/999_150.jpg 0 0 0 0 0 0 0 0 1 0 599 | train/0_3709.jpg 0 0 0 1 0 0 0 0 0 0 600 | train/1_5555.jpg 0 0 0 1 0 0 0 0 0 0 601 | train/3_9087.jpg 0 0 0 0 0 0 1 0 0 0 602 | train/1_2560.jpg 0 1 0 0 0 0 0 0 0 0 603 | train/2_3973.jpg 0 0 0 0 0 0 0 0 1 0 604 | train/2_2854.jpg 0 0 0 0 0 0 0 1 0 0 605 | train/1_1673.jpg 0 0 0 0 0 0 0 0 1 0 606 | train/3_1219.jpg 0 0 0 0 0 0 0 0 1 0 607 | train/1_2754.jpg 1 0 0 0 0 0 0 0 0 0 608 | train/0_8234.jpg 0 0 1 0 0 0 0 0 0 0 609 | train/4_3852.jpg 0 0 0 0 0 1 0 0 0 0 610 | train/1_7246.jpg 0 0 0 0 0 0 0 0 1 0 611 | train/4_5671.jpg 0 0 0 0 0 0 0 0 0 1 612 | train/0_5800.jpg 0 0 0 1 0 0 0 0 0 0 613 | test/999_6233.jpg 0 0 0 0 0 0 0 1 0 0 614 | train/1_2636.jpg 0 0 0 0 1 0 0 0 0 0 615 | test/999_3376.jpg 0 0 0 0 0 1 0 0 0 0 616 | train/2_5310.jpg 0 0 0 0 0 0 0 1 0 0 617 | train/3_1324.jpg 0 0 0 0 0 0 0 0 0 1 618 | train/4_420.jpg 1 0 0 0 0 0 0 0 0 0 619 | train/2_991.jpg 0 0 0 0 0 1 0 0 0 0 620 | train/4_811.jpg 0 0 0 0 0 0 1 0 0 0 621 | train/0_69.jpg 0 0 0 0 0 0 0 0 1 0 622 | train/1_5745.jpg 0 0 0 0 0 0 0 0 1 0 623 | train/0_5962.jpg 0 0 0 0 0 1 0 0 0 0 624 | test/999_112.jpg 0 0 0 0 0 0 1 0 0 0 625 | train/1_3690.jpg 0 0 0 0 0 0 0 0 0 1 626 | train/1_4533.jpg 0 0 0 0 0 1 0 0 0 0 627 | train/4_1287.jpg 0 0 0 0 0 0 1 0 0 0 628 | train/1_4598.jpg 0 1 0 0 0 0 0 0 0 0 629 | train/2_6728.jpg 0 0 0 0 0 1 0 0 0 0 630 | train/0_6296.jpg 0 0 1 0 0 0 0 0 0 0 631 | train/4_9653.jpg 0 0 0 0 0 0 1 0 0 0 632 | train/3_7760.jpg 0 0 0 0 0 0 0 1 0 0 633 | train/0_9709.jpg 0 0 0 0 0 0 1 0 0 0 634 | train/3_9101.jpg 0 0 0 1 0 0 0 0 0 0 635 | test/999_1668.jpg 0 0 0 0 0 0 0 0 0 1 636 | train/1_232.jpg 0 0 0 0 0 0 0 1 0 0 637 | test/999_6332.jpg 0 0 0 0 0 1 0 0 0 0 638 | train/0_7652.jpg 0 0 0 0 0 0 0 0 0 1 639 | train/0_9297.jpg 0 0 1 0 0 0 0 0 0 0 640 | train/2_6784.jpg 0 0 0 0 0 0 0 0 1 0 641 | train/1_5990.jpg 0 0 0 0 1 0 0 0 0 0 642 | test/999_3605.jpg 0 0 0 0 0 0 1 0 0 0 643 | test/999_7875.jpg 0 0 1 0 0 0 0 0 0 0 644 | test/999_271.jpg 0 0 0 1 0 0 0 0 0 0 645 | train/3_9937.jpg 0 0 0 0 0 0 0 0 1 0 646 | train/3_2768.jpg 0 0 0 0 0 0 1 0 0 0 647 | train/2_5622.jpg 0 0 0 0 0 0 1 0 0 0 648 | train/4_4027.jpg 1 0 0 0 0 0 0 0 0 0 649 | test/999_4049.jpg 0 0 0 0 1 0 0 0 0 0 650 | train/3_3387.jpg 0 0 0 1 0 0 0 0 0 0 651 | train/0_6416.jpg 0 0 0 0 0 0 1 0 0 0 652 | train/3_4964.jpg 1 0 0 0 0 0 0 0 0 0 653 | train/0_5862.jpg 0 0 1 0 0 0 0 0 0 0 654 | train/3_8562.jpg 0 1 0 0 0 0 0 0 0 0 655 | train/3_2135.jpg 0 1 0 0 0 0 0 0 0 0 656 | test/999_8321.jpg 0 0 0 0 1 0 0 0 0 0 657 | train/0_7279.jpg 0 0 1 0 0 0 0 0 0 0 658 | train/4_6911.jpg 0 0 0 0 0 1 0 0 0 0 659 | train/0_4457.jpg 0 0 0 0 0 0 0 0 0 1 660 | train/0_2235.jpg 0 0 0 0 1 0 0 0 0 0 661 | test/999_4860.jpg 0 0 0 0 0 1 0 0 0 0 662 | train/4_5149.jpg 0 0 0 0 0 0 1 0 0 0 663 | train/3_1421.jpg 0 0 0 0 0 0 1 0 0 0 664 | train/3_4566.jpg 0 0 1 0 0 0 0 0 0 0 665 | train/1_2629.jpg 0 1 0 0 0 0 0 0 0 0 666 | train/2_7391.jpg 0 0 0 0 1 0 0 0 0 0 667 | train/4_7740.jpg 1 0 0 0 0 0 0 0 0 0 668 | train/1_1287.jpg 0 0 0 0 0 0 1 0 0 0 669 | train/0_5967.jpg 0 0 1 0 0 0 0 0 0 0 670 | train/2_3320.jpg 1 0 0 0 0 0 0 0 0 0 671 | train/3_6570.jpg 0 0 1 0 0 0 0 0 0 0 672 | train/1_2303.jpg 0 1 0 0 0 0 0 0 0 0 673 | train/4_3188.jpg 0 0 0 0 1 0 0 0 0 0 674 | train/0_1821.jpg 0 0 0 0 0 0 0 0 1 0 675 | test/999_8308.jpg 0 0 1 0 0 0 0 0 0 0 676 | train/4_227.jpg 0 0 0 1 0 0 0 0 0 0 677 | train/2_4072.jpg 1 0 0 0 0 0 0 0 0 0 678 | train/2_6458.jpg 0 0 0 0 0 0 0 0 0 1 679 | train/2_6529.jpg 0 0 0 0 0 1 0 0 0 0 680 | train/1_3271.jpg 0 0 0 0 0 0 0 1 0 0 681 | train/1_3047.jpg 0 0 1 0 0 0 0 0 0 0 682 | train/2_1369.jpg 0 1 0 0 0 0 0 0 0 0 683 | train/3_815.jpg 0 0 0 0 1 0 0 0 0 0 684 | train/3_1106.jpg 0 0 0 0 0 0 0 0 0 1 685 | train/2_8980.jpg 0 0 0 0 0 0 1 0 0 0 686 | train/2_2.jpg 1 0 0 0 0 0 0 0 0 0 687 | train/0_4945.jpg 0 1 0 0 0 0 0 0 0 0 688 | test/999_4837.jpg 0 0 0 0 0 0 0 1 0 0 689 | train/4_7380.jpg 0 0 0 0 0 0 0 0 1 0 690 | test/999_679.jpg 0 0 0 0 0 1 0 0 0 0 691 | train/4_4828.jpg 0 1 0 0 0 0 0 0 0 0 692 | train/4_4698.jpg 0 0 0 0 0 0 0 0 0 1 693 | train/4_8511.jpg 0 0 0 0 0 0 1 0 0 0 694 | train/0_6686.jpg 0 0 0 0 0 0 1 0 0 0 695 | train/1_8418.jpg 0 0 0 0 1 0 0 0 0 0 696 | train/1_8817.jpg 0 0 0 0 0 0 0 0 0 1 697 | train/1_3116.jpg 0 0 0 0 0 0 0 0 1 0 698 | train/1_2774.jpg 0 1 0 0 0 0 0 0 0 0 699 | train/1_7380.jpg 0 0 1 0 0 0 0 0 0 0 700 | test/999_2270.jpg 0 0 1 0 0 0 0 0 0 0 701 | train/2_2176.jpg 0 0 0 0 0 0 1 0 0 0 702 | train/0_9330.jpg 0 0 1 0 0 0 0 0 0 0 703 | train/4_1447.jpg 0 0 0 0 0 0 0 0 1 0 704 | train/4_6139.jpg 0 0 0 0 0 0 0 0 1 0 705 | test/999_35.jpg 0 0 1 0 0 0 0 0 0 0 706 | train/3_9306.jpg 0 0 0 0 0 0 0 1 0 0 707 | train/0_6939.jpg 0 0 0 0 0 0 0 1 0 0 708 | train/4_2736.jpg 0 0 0 1 0 0 0 0 0 0 709 | train/2_630.jpg 0 0 0 1 0 0 0 0 0 0 710 | train/3_789.jpg 0 0 0 1 0 0 0 0 0 0 711 | test/999_4103.jpg 0 0 0 0 0 0 0 0 1 0 712 | train/0_5180.jpg 0 0 0 0 1 0 0 0 0 0 713 | train/1_7905.jpg 0 0 0 0 0 0 0 0 0 1 714 | test/999_5306.jpg 0 0 0 1 0 0 0 0 0 0 715 | train/1_8969.jpg 0 0 0 1 0 0 0 0 0 0 716 | test/999_1246.jpg 0 0 0 0 0 0 0 1 0 0 717 | train/3_921.jpg 0 0 0 0 1 0 0 0 0 0 718 | train/1_9509.jpg 0 0 0 0 0 0 0 0 1 0 719 | train/2_3494.jpg 0 0 0 0 0 1 0 0 0 0 720 | train/2_4460.jpg 0 0 0 1 0 0 0 0 0 0 721 | train/1_848.jpg 0 0 0 0 0 0 0 0 1 0 722 | train/0_4811.jpg 0 0 1 0 0 0 0 0 0 0 723 | train/1_6240.jpg 0 0 0 0 1 0 0 0 0 0 724 | train/2_4049.jpg 0 0 0 0 0 0 1 0 0 0 725 | train/0_4857.jpg 0 0 0 0 1 0 0 0 0 0 726 | train/1_4331.jpg 0 0 0 0 0 1 0 0 0 0 727 | train/4_2146.jpg 0 0 0 0 1 0 0 0 0 0 728 | train/4_2409.jpg 0 0 0 0 0 1 0 0 0 0 729 | test/999_13.jpg 0 0 0 0 0 0 0 1 0 0 730 | train/1_6666.jpg 0 0 0 0 0 0 0 1 0 0 731 | train/1_264.jpg 0 0 0 0 1 0 0 0 0 0 732 | train/4_9563.jpg 0 0 0 1 0 0 0 0 0 0 733 | train/2_4412.jpg 0 0 0 0 0 0 0 0 1 0 734 | train/4_1202.jpg 0 0 0 0 1 0 0 0 0 0 735 | train/3_8251.jpg 0 0 0 0 0 0 1 0 0 0 736 | train/4_4458.jpg 0 0 0 0 0 0 0 0 1 0 737 | train/2_3354.jpg 0 0 0 0 1 0 0 0 0 0 738 | train/3_6602.jpg 0 0 0 0 0 0 1 0 0 0 739 | train/3_8666.jpg 0 0 0 0 0 0 0 0 1 0 740 | train/1_1820.jpg 0 0 0 0 1 0 0 0 0 0 741 | train/1_6611.jpg 0 0 0 0 0 0 1 0 0 0 742 | train/4_6932.jpg 0 0 0 1 0 0 0 0 0 0 743 | train/1_7683.jpg 0 0 0 0 0 0 0 0 1 0 744 | train/1_5188.jpg 0 0 0 0 0 0 0 0 0 1 745 | train/0_8811.jpg 0 0 0 0 0 0 0 0 0 1 746 | train/0_2838.jpg 0 0 0 0 0 0 0 0 0 1 747 | train/0_39.jpg 0 0 0 1 0 0 0 0 0 0 748 | train/4_3688.jpg 0 0 0 0 0 0 1 0 0 0 749 | train/1_6343.jpg 0 0 0 0 0 0 0 0 0 1 750 | train/1_7888.jpg 0 0 0 0 0 0 0 0 0 1 751 | train/1_3710.jpg 1 0 0 0 0 0 0 0 0 0 752 | train/4_1543.jpg 0 0 0 0 0 0 0 0 1 0 753 | train/3_5054.jpg 0 0 0 0 1 0 0 0 0 0 754 | train/0_752.jpg 1 0 0 0 0 0 0 0 0 0 755 | train/4_2201.jpg 1 0 0 0 0 0 0 0 0 0 756 | train/4_6770.jpg 1 0 0 0 0 0 0 0 0 0 757 | train/3_5828.jpg 0 0 0 0 0 0 0 0 1 0 758 | train/0_8775.jpg 0 0 0 0 0 0 0 0 1 0 759 | train/2_5605.jpg 1 0 0 0 0 0 0 0 0 0 760 | train/4_3738.jpg 0 0 1 0 0 0 0 0 0 0 761 | test/999_7439.jpg 0 0 0 0 0 1 0 0 0 0 762 | train/3_7877.jpg 1 0 0 0 0 0 0 0 0 0 763 | train/0_9103.jpg 0 0 0 0 0 0 1 0 0 0 764 | train/3_7600.jpg 0 0 0 0 0 0 0 0 0 1 765 | train/4_2683.jpg 0 0 0 0 0 0 0 0 1 0 766 | test/999_2981.jpg 0 0 0 0 1 0 0 0 0 0 767 | train/3_6899.jpg 0 0 0 0 0 1 0 0 0 0 768 | train/2_257.jpg 1 0 0 0 0 0 0 0 0 0 769 | train/2_3750.jpg 0 0 0 0 0 0 0 1 0 0 770 | train/4_9057.jpg 0 0 0 0 0 0 0 0 0 1 771 | test/999_2701.jpg 0 1 0 0 0 0 0 0 0 0 772 | train/4_2599.jpg 0 0 0 0 0 0 0 0 0 1 773 | train/3_7905.jpg 1 0 0 0 0 0 0 0 0 0 774 | train/4_2042.jpg 0 0 0 0 0 0 0 0 0 1 775 | train/3_5458.jpg 0 0 0 0 0 0 1 0 0 0 776 | train/2_8972.jpg 1 0 0 0 0 0 0 0 0 0 777 | train/0_7584.jpg 0 0 0 1 0 0 0 0 0 0 778 | test/999_8948.jpg 0 0 0 0 0 0 1 0 0 0 779 | train/2_866.jpg 0 1 0 0 0 0 0 0 0 0 780 | train/3_6563.jpg 0 0 0 0 0 0 1 0 0 0 781 | train/2_4504.jpg 0 0 0 0 1 0 0 0 0 0 782 | train/2_38.jpg 0 0 1 0 0 0 0 0 0 0 783 | train/3_3348.jpg 0 0 0 0 0 0 0 1 0 0 784 | train/1_7813.jpg 0 0 0 0 0 0 0 0 1 0 785 | train/0_2954.jpg 0 0 0 0 0 0 0 0 1 0 786 | train/4_9923.jpg 0 0 0 0 0 0 1 0 0 0 787 | train/2_8451.jpg 0 0 0 0 0 0 0 0 0 1 788 | test/999_4954.jpg 0 1 0 0 0 0 0 0 0 0 789 | train/4_4124.jpg 0 0 0 1 0 0 0 0 0 0 790 | train/4_6500.jpg 0 0 0 1 0 0 0 0 0 0 791 | train/3_5148.jpg 0 0 1 0 0 0 0 0 0 0 792 | train/4_6805.jpg 0 0 0 1 0 0 0 0 0 0 793 | train/0_726.jpg 0 0 0 0 0 1 0 0 0 0 794 | train/3_8750.jpg 0 0 1 0 0 0 0 0 0 0 795 | train/4_4200.jpg 0 0 0 0 1 0 0 0 0 0 796 | train/0_4966.jpg 0 0 0 0 0 0 1 0 0 0 797 | train/1_1677.jpg 0 0 0 0 0 0 0 0 0 1 798 | test/999_8676.jpg 0 0 0 0 0 0 0 0 1 0 799 | train/2_4045.jpg 0 0 0 0 0 0 0 0 1 0 800 | train/0_508.jpg 0 0 0 0 0 0 0 0 0 1 801 | test/999_864.jpg 1 0 0 0 0 0 0 0 0 0 802 | train/2_6598.jpg 0 0 0 1 0 0 0 0 0 0 803 | train/4_1205.jpg 0 0 0 0 0 0 0 1 0 0 804 | train/4_1609.jpg 1 0 0 0 0 0 0 0 0 0 805 | train/0_1924.jpg 0 0 0 0 0 0 0 0 1 0 806 | train/2_8949.jpg 0 0 0 0 0 0 0 0 0 1 807 | train/4_4453.jpg 0 0 0 0 0 0 0 1 0 0 808 | train/4_4865.jpg 0 0 0 1 0 0 0 0 0 0 809 | train/1_7901.jpg 0 1 0 0 0 0 0 0 0 0 810 | train/4_1444.jpg 0 0 0 0 0 0 1 0 0 0 811 | train/4_6700.jpg 0 0 0 0 0 1 0 0 0 0 812 | train/3_9421.jpg 0 0 0 0 0 0 0 1 0 0 813 | train/3_7121.jpg 0 0 1 0 0 0 0 0 0 0 814 | train/2_4312.jpg 0 0 0 0 0 0 1 0 0 0 815 | train/4_6286.jpg 0 0 0 1 0 0 0 0 0 0 816 | train/1_48.jpg 0 0 0 1 0 0 0 0 0 0 817 | train/0_5.jpg 0 1 0 0 0 0 0 0 0 0 818 | train/1_5748.jpg 0 1 0 0 0 0 0 0 0 0 819 | train/2_4151.jpg 0 1 0 0 0 0 0 0 0 0 820 | train/1_2807.jpg 1 0 0 0 0 0 0 0 0 0 821 | train/1_2938.jpg 0 0 0 0 0 0 0 0 1 0 822 | train/1_1019.jpg 0 0 0 0 0 0 0 1 0 0 823 | test/999_4999.jpg 0 1 0 0 0 0 0 0 0 0 824 | train/4_3217.jpg 0 1 0 0 0 0 0 0 0 0 825 | test/999_1055.jpg 0 0 0 0 0 1 0 0 0 0 826 | train/2_6158.jpg 0 1 0 0 0 0 0 0 0 0 827 | train/0_1520.jpg 0 1 0 0 0 0 0 0 0 0 828 | train/3_9724.jpg 0 1 0 0 0 0 0 0 0 0 829 | train/3_5136.jpg 0 0 0 0 0 0 1 0 0 0 830 | train/4_3286.jpg 0 0 1 0 0 0 0 0 0 0 831 | train/2_6767.jpg 0 0 0 0 0 0 0 1 0 0 832 | train/2_4533.jpg 0 0 1 0 0 0 0 0 0 0 833 | train/1_8131.jpg 0 0 0 0 0 1 0 0 0 0 834 | train/1_311.jpg 0 0 0 0 0 0 1 0 0 0 835 | train/1_8227.jpg 0 0 0 0 0 0 0 1 0 0 836 | train/1_4163.jpg 0 0 0 0 0 0 1 0 0 0 837 | train/4_6363.jpg 0 0 0 0 0 0 0 0 1 0 838 | train/0_4773.jpg 0 0 0 0 1 0 0 0 0 0 839 | train/3_9745.jpg 0 1 0 0 0 0 0 0 0 0 840 | test/999_8882.jpg 0 0 0 1 0 0 0 0 0 0 841 | train/4_4412.jpg 0 1 0 0 0 0 0 0 0 0 842 | train/3_6930.jpg 0 0 0 1 0 0 0 0 0 0 843 | train/1_5862.jpg 0 0 0 0 0 1 0 0 0 0 844 | train/3_9608.jpg 0 0 0 1 0 0 0 0 0 0 845 | test/999_7733.jpg 1 0 0 0 0 0 0 0 0 0 846 | test/999_9485.jpg 0 0 0 0 0 0 0 1 0 0 847 | train/4_2203.jpg 0 0 0 0 0 0 0 0 1 0 848 | train/2_3736.jpg 0 0 0 0 0 0 0 0 1 0 849 | train/1_2507.jpg 1 0 0 0 0 0 0 0 0 0 850 | train/4_59.jpg 0 0 0 0 0 0 0 0 0 1 851 | test/999_5415.jpg 0 0 0 0 1 0 0 0 0 0 852 | train/1_9617.jpg 0 0 0 1 0 0 0 0 0 0 853 | train/1_213.jpg 0 0 0 0 0 0 1 0 0 0 854 | train/3_6434.jpg 0 0 0 0 0 0 0 0 0 1 855 | test/999_9321.jpg 0 0 0 0 1 0 0 0 0 0 856 | train/3_1741.jpg 0 0 1 0 0 0 0 0 0 0 857 | train/2_4339.jpg 0 0 0 0 0 1 0 0 0 0 858 | test/999_1153.jpg 0 0 0 0 0 0 0 0 1 0 859 | train/2_3947.jpg 0 0 0 0 0 1 0 0 0 0 860 | train/4_8819.jpg 0 0 0 0 0 0 1 0 0 0 861 | train/1_8467.jpg 0 0 0 0 0 0 0 0 0 1 862 | train/3_9863.jpg 1 0 0 0 0 0 0 0 0 0 863 | train/3_6009.jpg 0 0 1 0 0 0 0 0 0 0 864 | train/4_4630.jpg 1 0 0 0 0 0 0 0 0 0 865 | train/3_57.jpg 0 0 0 0 0 0 1 0 0 0 866 | train/0_9952.jpg 0 0 1 0 0 0 0 0 0 0 867 | train/1_5526.jpg 0 0 0 0 1 0 0 0 0 0 868 | train/3_6847.jpg 0 0 0 0 0 0 1 0 0 0 869 | train/0_9694.jpg 0 0 0 1 0 0 0 0 0 0 870 | train/0_7450.jpg 0 0 0 0 0 0 0 1 0 0 871 | train/2_6256.jpg 0 0 0 0 0 0 0 1 0 0 872 | train/3_997.jpg 0 0 0 0 0 0 0 0 1 0 873 | train/0_9867.jpg 0 0 0 0 0 1 0 0 0 0 874 | train/1_7449.jpg 0 0 0 1 0 0 0 0 0 0 875 | train/1_1687.jpg 0 0 0 0 0 0 1 0 0 0 876 | train/3_965.jpg 0 0 0 0 0 1 0 0 0 0 877 | train/2_9787.jpg 0 0 0 0 1 0 0 0 0 0 878 | train/3_617.jpg 0 0 0 0 0 0 0 1 0 0 879 | train/0_7884.jpg 0 1 0 0 0 0 0 0 0 0 880 | test/999_5080.jpg 0 0 0 0 0 0 0 1 0 0 881 | train/2_273.jpg 0 1 0 0 0 0 0 0 0 0 882 | train/1_5177.jpg 0 0 0 0 0 1 0 0 0 0 883 | train/3_891.jpg 0 1 0 0 0 0 0 0 0 0 884 | train/3_6266.jpg 0 0 0 0 0 0 0 0 0 1 885 | train/4_1666.jpg 0 1 0 0 0 0 0 0 0 0 886 | train/0_2717.jpg 0 0 0 0 0 1 0 0 0 0 887 | test/999_5177.jpg 0 0 0 0 1 0 0 0 0 0 888 | train/1_1301.jpg 0 1 0 0 0 0 0 0 0 0 889 | train/3_7487.jpg 0 0 0 0 0 0 1 0 0 0 890 | train/4_9425.jpg 0 0 0 0 0 0 0 0 0 1 891 | train/4_6244.jpg 0 0 1 0 0 0 0 0 0 0 892 | train/1_7497.jpg 0 0 0 1 0 0 0 0 0 0 893 | test/999_5926.jpg 0 0 0 0 1 0 0 0 0 0 894 | test/999_8592.jpg 1 0 0 0 0 0 0 0 0 0 895 | train/1_9433.jpg 0 0 0 1 0 0 0 0 0 0 896 | train/2_2037.jpg 0 1 0 0 0 0 0 0 0 0 897 | train/3_2597.jpg 0 0 0 0 0 0 0 1 0 0 898 | train/2_2120.jpg 0 0 0 0 0 0 0 1 0 0 899 | train/2_9034.jpg 0 0 0 0 1 0 0 0 0 0 900 | train/1_3137.jpg 0 0 0 0 0 0 1 0 0 0 901 | train/4_7801.jpg 0 0 0 1 0 0 0 0 0 0 902 | train/3_2621.jpg 0 0 0 1 0 0 0 0 0 0 903 | test/999_8940.jpg 0 0 0 0 0 0 1 0 0 0 904 | train/2_7775.jpg 0 0 0 0 0 0 0 1 0 0 905 | train/3_9047.jpg 0 0 0 0 1 0 0 0 0 0 906 | train/1_168.jpg 0 1 0 0 0 0 0 0 0 0 907 | train/1_5159.jpg 0 0 0 0 0 0 0 0 0 1 908 | train/0_6424.jpg 0 0 0 0 0 1 0 0 0 0 909 | train/1_4002.jpg 0 0 0 0 0 0 0 1 0 0 910 | train/4_4783.jpg 0 0 1 0 0 0 0 0 0 0 911 | train/0_1342.jpg 0 0 0 0 0 0 1 0 0 0 912 | train/3_6419.jpg 0 0 0 0 0 0 1 0 0 0 913 | train/2_8758.jpg 0 0 0 0 0 1 0 0 0 0 914 | train/2_8214.jpg 0 0 0 1 0 0 0 0 0 0 915 | train/0_8636.jpg 0 0 0 0 0 0 0 0 1 0 916 | train/2_5624.jpg 1 0 0 0 0 0 0 0 0 0 917 | train/3_8682.jpg 0 1 0 0 0 0 0 0 0 0 918 | train/0_1252.jpg 0 0 0 1 0 0 0 0 0 0 919 | train/3_2888.jpg 0 0 0 0 0 0 0 1 0 0 920 | train/4_7386.jpg 0 0 0 0 0 1 0 0 0 0 921 | train/2_4444.jpg 0 0 0 0 0 0 0 0 1 0 922 | train/2_124.jpg 1 0 0 0 0 0 0 0 0 0 923 | test/999_7481.jpg 0 0 0 0 1 0 0 0 0 0 924 | train/4_577.jpg 0 0 0 1 0 0 0 0 0 0 925 | train/4_6274.jpg 0 0 0 0 0 1 0 0 0 0 926 | train/1_7434.jpg 0 0 0 0 0 0 0 0 0 1 927 | train/4_147.jpg 0 0 0 0 0 0 0 0 1 0 928 | train/2_4841.jpg 0 0 0 0 0 0 0 0 1 0 929 | train/1_8802.jpg 0 0 0 0 0 0 0 1 0 0 930 | train/0_1918.jpg 0 0 1 0 0 0 0 0 0 0 931 | train/1_8970.jpg 0 0 0 1 0 0 0 0 0 0 932 | train/3_1937.jpg 0 0 0 0 0 0 0 0 0 1 933 | train/1_6777.jpg 0 1 0 0 0 0 0 0 0 0 934 | test/999_2946.jpg 0 0 0 0 0 0 0 0 0 1 935 | train/0_2728.jpg 0 0 0 0 0 0 0 0 0 1 936 | train/0_8357.jpg 0 0 0 0 0 0 0 1 0 0 937 | train/4_6145.jpg 0 0 0 0 1 0 0 0 0 0 938 | train/2_2538.jpg 0 0 0 0 0 0 0 0 0 1 939 | train/0_4272.jpg 0 0 0 0 0 0 0 0 1 0 940 | test/999_3095.jpg 0 0 0 0 1 0 0 0 0 0 941 | train/2_2249.jpg 1 0 0 0 0 0 0 0 0 0 942 | train/2_2747.jpg 0 1 0 0 0 0 0 0 0 0 943 | test/999_8805.jpg 0 0 0 0 0 0 0 0 0 1 944 | train/3_5095.jpg 1 0 0 0 0 0 0 0 0 0 945 | train/4_9872.jpg 0 0 0 1 0 0 0 0 0 0 946 | train/3_9335.jpg 0 0 0 0 0 0 0 0 1 0 947 | train/3_8631.jpg 0 0 0 0 0 0 0 0 1 0 948 | train/3_3432.jpg 0 0 0 0 0 0 0 0 0 1 949 | train/1_5299.jpg 0 1 0 0 0 0 0 0 0 0 950 | train/3_4384.jpg 0 0 0 0 1 0 0 0 0 0 951 | train/2_937.jpg 0 1 0 0 0 0 0 0 0 0 952 | test/999_5777.jpg 0 0 0 0 0 0 0 0 1 0 953 | test/999_5151.jpg 0 0 0 0 0 0 0 0 0 1 954 | train/1_1567.jpg 0 0 0 0 1 0 0 0 0 0 955 | train/4_6967.jpg 0 0 1 0 0 0 0 0 0 0 956 | train/1_9225.jpg 0 0 1 0 0 0 0 0 0 0 957 | train/0_1961.jpg 0 0 0 1 0 0 0 0 0 0 958 | train/3_4511.jpg 0 0 0 0 0 0 0 1 0 0 959 | train/4_3082.jpg 0 0 1 0 0 0 0 0 0 0 960 | test/999_4203.jpg 0 0 1 0 0 0 0 0 0 0 961 | train/3_4060.jpg 0 0 0 0 1 0 0 0 0 0 962 | train/3_310.jpg 0 0 0 0 0 1 0 0 0 0 963 | train/0_9414.jpg 0 0 0 0 0 0 1 0 0 0 964 | train/2_8158.jpg 0 0 1 0 0 0 0 0 0 0 965 | train/2_8930.jpg 1 0 0 0 0 0 0 0 0 0 966 | test/999_3971.jpg 1 0 0 0 0 0 0 0 0 0 967 | train/2_5064.jpg 0 0 1 0 0 0 0 0 0 0 968 | train/2_3035.jpg 1 0 0 0 0 0 0 0 0 0 969 | train/4_8227.jpg 0 0 0 0 0 0 0 0 1 0 970 | train/3_4197.jpg 0 0 0 1 0 0 0 0 0 0 971 | train/3_7335.jpg 0 0 0 0 0 0 0 0 1 0 972 | train/3_4526.jpg 0 0 0 0 0 0 0 0 0 1 973 | train/0_1489.jpg 0 0 0 0 0 0 0 1 0 0 974 | train/3_1447.jpg 0 0 0 0 0 0 1 0 0 0 975 | test/999_3758.jpg 0 0 1 0 0 0 0 0 0 0 976 | train/3_7952.jpg 0 0 1 0 0 0 0 0 0 0 977 | train/2_5521.jpg 0 0 0 0 0 0 0 0 1 0 978 | train/4_5330.jpg 0 0 0 1 0 0 0 0 0 0 979 | test/999_9030.jpg 0 0 1 0 0 0 0 0 0 0 980 | train/0_3690.jpg 0 0 0 0 1 0 0 0 0 0 981 | train/0_856.jpg 0 0 0 0 1 0 0 0 0 0 982 | test/999_5773.jpg 0 0 0 1 0 0 0 0 0 0 983 | test/999_3907.jpg 0 0 1 0 0 0 0 0 0 0 984 | train/4_7813.jpg 0 0 0 0 1 0 0 0 0 0 985 | train/2_2344.jpg 0 0 0 0 0 0 0 1 0 0 986 | train/0_4584.jpg 0 0 0 0 0 0 0 0 0 1 987 | test/999_7905.jpg 0 0 0 1 0 0 0 0 0 0 988 | train/1_3805.jpg 0 0 0 0 0 1 0 0 0 0 989 | train/3_6161.jpg 0 0 0 1 0 0 0 0 0 0 990 | train/2_8127.jpg 0 1 0 0 0 0 0 0 0 0 991 | train/4_7404.jpg 1 0 0 0 0 0 0 0 0 0 992 | train/2_416.jpg 0 0 0 0 0 1 0 0 0 0 993 | train/2_8678.jpg 0 0 0 0 0 0 1 0 0 0 994 | test/999_5801.jpg 0 0 0 0 0 0 0 0 0 1 995 | train/2_5478.jpg 0 0 1 0 0 0 0 0 0 0 996 | test/999_2046.jpg 0 0 0 1 0 0 0 0 0 0 997 | test/999_1928.jpg 0 0 0 0 1 0 0 0 0 0 998 | train/3_9760.jpg 1 0 0 0 0 0 0 0 0 0 999 | train/4_5187.jpg 0 1 0 0 0 0 0 0 0 0 1000 | test/999_5283.jpg 0 0 0 0 1 0 0 0 0 0 1001 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: HashGAN 2 | channels: 3 | - defaults 4 | dependencies: 5 | - _tflow_select=2.1.0=gpu 6 | - absl-py=0.6.1=py36_0 7 | - astor=0.7.1=py36_0 8 | - blas=1.0=mkl 9 | - bzip2=1.0.6=h14c3975_5 10 | - c-ares=1.15.0=h7b6447c_1 11 | - ca-certificates=2018.03.07=0 12 | - cairo=1.14.12=h8948797_3 13 | - certifi=2018.10.15=py36_0 14 | - cudatoolkit=9.2=0 15 | - cudnn=7.2.1=cuda9.2_0 16 | - cupti=9.2.148=0 17 | - cycler=0.10.0=py36_0 18 | - dbus=1.13.2=h714fa37_1 19 | - expat=2.2.6=he6710b0_0 20 | - ffmpeg=4.0=hcdf2ecd_0 21 | - fontconfig=2.13.0=h9420a91_0 22 | - freeglut=3.0.0=hf484d3e_5 23 | - freetype=2.9.1=h8a8886c_1 24 | - gast=0.2.0=py36_0 25 | - glib=2.56.2=hd408876_0 26 | - graphite2=1.3.12=h23475e2_2 27 | - grpcio=1.14.1=py36h9ba97e2_0 28 | - gst-plugins-base=1.14.0=hbbd80ab_1 29 | - gstreamer=1.14.0=hb453b48_1 30 | - h5py=2.8.0=py36h989c5e5_3 31 | - harfbuzz=1.8.8=hffaf4a1_0 32 | - hdf5=1.10.2=hba1933b_1 33 | - icu=58.2=h9c2bf20_1 34 | - intel-openmp=2019.1=144 35 | - jasper=2.0.14=h07fcdf6_1 36 | - jpeg=9b=h024ee3a_2 37 | - keras-applications=1.0.6=py36_0 38 | - keras-preprocessing=1.0.5=py36_0 39 | - kiwisolver=1.0.1=py36hf484d3e_0 40 | - libedit=3.1.20170329=h6b74fdf_2 41 | - libffi=3.2.1=hd88cf55_4 42 | - libgcc-ng=8.2.0=hdf63c60_1 43 | - libgfortran-ng=7.3.0=hdf63c60_0 44 | - libglu=9.0.0=hf484d3e_1 45 | - libopencv=3.4.2=hb342d67_1 46 | - libopus=1.3=h7b6447c_0 47 | - libpng=1.6.35=hbc83047_0 48 | - libprotobuf=3.6.1=hd408876_0 49 | - libstdcxx-ng=8.2.0=hdf63c60_1 50 | - libtiff=4.0.9=he85c1e1_2 51 | - libuuid=1.0.3=h1bed415_2 52 | - libvpx=1.7.0=h439df22_0 53 | - libxcb=1.13=h1bed415_1 54 | - libxml2=2.9.8=h26e45fe_1 55 | - markdown=3.0.1=py36_0 56 | - matplotlib=3.0.1=py36h5429711_0 57 | - mkl=2018.0.3=1 58 | - mkl_fft=1.0.6=py36h7dd41cf_0 59 | - mkl_random=1.0.1=py36h4414c95_1 60 | - ncurses=6.1=hf484d3e_0 61 | - numpy=1.15.4=py36h1d66e8a_0 62 | - numpy-base=1.15.4=py36h81de0dd_0 63 | - opencv=3.4.2=py36h6fd60c2_1 64 | - openssl=1.0.2p=h14c3975_0 65 | - pcre=8.42=h439df22_0 66 | - pip=18.1=py36_0 67 | - pixman=0.34.0=hceecf20_3 68 | - protobuf=3.6.1=py36he6710b0_0 69 | - py-opencv=3.4.2=py36hb342d67_1 70 | - pyparsing=2.3.0=py36_0 71 | - pyqt=5.9.2=py36h05f1152_2 72 | - python=3.6.6=h6e4f718_2 73 | - python-dateutil=2.7.5=py36_0 74 | - pytz=2018.7=py36_0 75 | - qt=5.9.6=h8703b6f_2 76 | - readline=7.0=h7b6447c_5 77 | - scipy=1.1.0=py36hfa4b5c9_1 78 | - setuptools=40.6.2=py36_0 79 | - sip=4.19.8=py36hf484d3e_0 80 | - six=1.11.0=py36_1 81 | - sqlite=3.25.3=h7b6447c_0 82 | - tensorboard=1.12.0=py36hf484d3e_0 83 | - tensorflow=1.12.0=gpu_py36he74679b_0 84 | - tensorflow-base=1.12.0=gpu_py36had579c0_0 85 | - tensorflow-gpu=1.12.0=h0d30ee6_0 86 | - termcolor=1.1.0=py36_1 87 | - tk=8.6.8=hbc83047_0 88 | - tornado=5.1.1=py36h7b6447c_0 89 | - werkzeug=0.14.1=py36_0 90 | - wheel=0.32.3=py36_0 91 | - xz=5.2.4=h14c3975_4 92 | - zlib=1.2.11=h7b6447c_3 93 | - pip: 94 | - easydict==1.9 95 | - pyyaml==3.13 96 | - tqdm==4.28.1 97 | - yacs==0.1.4 98 | prefix: /home/liubin/anaconda3/envs/HashGAN 99 | 100 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thuml/HashGAN/2d114009ce68357bda9109e498b534d7f3019c0a/lib/__init__.py -------------------------------------------------------------------------------- /lib/architecture.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from lib.ops import conv2D, linear, batch_norm 7 | from lib.util import preprocess_resize_scale_img 8 | from lib.params import param 9 | 10 | 11 | def normalize(name, inputs): 12 | """This is messy, but basically it chooses between batchnorm, layernorm, 13 | their conditional variants, or nothing, depending on the value of `name` and 14 | hyperparam flags.""" 15 | 16 | if 'generator' in name: 17 | return batch_norm(name, [0, 2, 3], inputs, fused=True) 18 | else: 19 | return inputs 20 | 21 | 22 | def conv_mean_pool(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True): 23 | output = conv2D( 24 | name, input_dim, output_dim, filter_size, inputs, he_init=he_init, biases=biases) 25 | output = tf.add_n( 26 | [output[:, :, ::2, ::2], output[:, :, 1::2, ::2], output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. 27 | return output 28 | 29 | 30 | def mean_pool_conv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True): 31 | output = inputs 32 | output = tf.add_n( 33 | [output[:, :, ::2, ::2], output[:, :, 1::2, ::2], output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. 34 | output = conv2D( 35 | name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases) 36 | return output 37 | 38 | 39 | def upsample_conv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True): 40 | output = inputs 41 | output = tf.concat([output, output, output, output], axis=1) 42 | output = tf.transpose(output, [0, 2, 3, 1]) 43 | output = tf.depth_to_space(output, 2) 44 | output = tf.transpose(output, [0, 3, 1, 2]) 45 | output = conv2D( 46 | name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases) 47 | return output 48 | 49 | 50 | def residual_block(name, input_dim, output_dim, filter_size, inputs, resample=None): 51 | """ 52 | resample: None, 'down', or 'up' 53 | """ 54 | if resample == 'down': 55 | conv_1 = functools.partial( 56 | conv2D, input_dim=input_dim, output_dim=input_dim) 57 | conv_2 = functools.partial( 58 | conv_mean_pool, input_dim=input_dim, output_dim=output_dim) 59 | conv_shortcut = conv_mean_pool 60 | elif resample == 'up': 61 | conv_1 = functools.partial( 62 | upsample_conv, input_dim=input_dim, output_dim=output_dim) 63 | conv_shortcut = upsample_conv 64 | conv_2 = functools.partial( 65 | conv2D, input_dim=output_dim, output_dim=output_dim) 66 | elif resample is None: 67 | conv_shortcut = conv2D 68 | conv_1 = functools.partial( 69 | conv2D, input_dim=input_dim, output_dim=output_dim) 70 | conv_2 = functools.partial( 71 | conv2D, input_dim=output_dim, output_dim=output_dim) 72 | else: 73 | raise Exception('invalid resample value') 74 | 75 | if output_dim == input_dim and resample is None: 76 | shortcut = inputs # Identity skip-connection 77 | else: 78 | shortcut = conv_shortcut(name + '.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, 79 | he_init=False, biases=True, inputs=inputs) 80 | 81 | output = inputs 82 | output = normalize(name + '.N1', output) 83 | output = tf.nn.relu(output) 84 | output = conv_1(name + '.Conv1', filter_size=filter_size, inputs=output) 85 | output = normalize(name + '.N2', output) 86 | output = tf.nn.relu(output) 87 | output = conv_2(name + '.Conv2', filter_size=filter_size, inputs=output) 88 | 89 | return shortcut + output 90 | 91 | 92 | def optimized_res_block_disc1(inputs, cfg): 93 | conv_1 = functools.partial( 94 | conv2D, input_dim=3, output_dim=cfg.MODEL.DIM_D) 95 | conv_2 = functools.partial(conv_mean_pool, input_dim=cfg.MODEL.DIM_D, output_dim=cfg.MODEL.DIM_D) 96 | conv_shortcut = mean_pool_conv 97 | shortcut = conv_shortcut('discriminator.1.Shortcut', input_dim=3, output_dim=cfg.MODEL.DIM_D, 98 | filter_size=1, he_init=False, biases=True, inputs=inputs) 99 | 100 | output = inputs 101 | output = conv_1('discriminator.1.Conv1', filter_size=3, inputs=output) 102 | output = tf.nn.relu(output) 103 | output = conv_2('discriminator.1.Conv2', filter_size=3, inputs=output) 104 | return shortcut + output 105 | 106 | 107 | def old_generator(n_samples, labels, cfg, noise=None): 108 | if noise is None: 109 | noise = tf.random_normal([n_samples, 256]) 110 | 111 | # concat noise with label 112 | noise = tf.concat([tf.cast(labels, tf.float32), tf.slice( 113 | noise, [0, cfg.DATA.LABEL_DIM], [-1, -1])], 1) 114 | output = linear( 115 | 'generator.Input', 256, 4 * 4 * cfg.MODEL.DIM_G, noise) 116 | output = tf.reshape(output, [-1, cfg.MODEL.DIM_G, 4, 4]) 117 | output = residual_block('generator.1', cfg.MODEL.DIM_G, cfg.MODEL.DIM_G, 118 | 3, output, resample='up') 119 | output = residual_block('generator.2', cfg.MODEL.DIM_G, cfg.MODEL.DIM_G, 120 | 3, output, resample='up') 121 | output = residual_block('generator.3', cfg.MODEL.DIM_G, cfg.MODEL.DIM_G, 122 | 3, output, resample='up') 123 | output = normalize('generator.OutputN', output) 124 | output = tf.nn.relu(output) 125 | output = conv2D( 126 | 'generator.Output', cfg.MODEL.DIM_G, 3, 3, output, he_init=False) 127 | output = tf.tanh(output) 128 | return tf.reshape(output, [-1, cfg.DATA.OUTPUT_DIM]) 129 | 130 | 131 | def old_discriminator(inputs, cfg): 132 | output = tf.reshape(inputs, [-1, 3, 32, 32]) 133 | output = optimized_res_block_disc1(output, cfg=cfg) 134 | output = residual_block('discriminator.2', cfg.MODEL.DIM_D, cfg.MODEL.DIM_D, 135 | 3, output, resample='down') 136 | output = residual_block('discriminator.3', cfg.MODEL.DIM_D, cfg.MODEL.DIM_D, 137 | 3, output, resample=None) 138 | output = residual_block('discriminator.4', cfg.MODEL.DIM_D, cfg.MODEL.DIM_D, 139 | 3, output, resample=None) 140 | output = tf.nn.relu(output) 141 | output = tf.reduce_mean(output, axis=[2, 3]) 142 | output_wgan = linear( 143 | 'discriminator.Output', cfg.MODEL.DIM_D, 1, output) 144 | output_wgan = tf.reshape(output_wgan, [-1]) 145 | output_acgan = linear( 146 | 'discriminator.ACGANOutput', cfg.MODEL.DIM_D, cfg.MODEL.HASH_DIM, output) 147 | output_acgan = tf.nn.tanh(output_acgan) 148 | return output_wgan, output_acgan 149 | 150 | 151 | def good_generator(n_samples, labels, cfg, noise=None): 152 | if noise is None: 153 | noise = tf.random_normal([n_samples, 128]) 154 | 155 | noise = tf.concat([tf.cast(labels, tf.float32), tf.slice(noise, [0, cfg.DATA.LABEL_DIM], [-1, -1])], 1) 156 | output = linear('generator.Input', 128, 4 * 4 * 8 * cfg.MODEL.DIM, noise) 157 | output = tf.reshape(output, [-1, 8 * cfg.MODEL.DIM, 4, 4]) 158 | 159 | output = residual_block('generator.Res1', 8 * cfg.MODEL.DIM, 8 * cfg.MODEL.DIM, 3, output, resample='up') 160 | output = residual_block('generator.Res2', 8 * cfg.MODEL.DIM, 4 * cfg.MODEL.DIM, 3, output, resample='up') 161 | output = residual_block('generator.Res3', 4 * cfg.MODEL.DIM, 2 * cfg.MODEL.DIM, 3, output, resample='up') 162 | output = residual_block('generator.Res4', 2 * cfg.MODEL.DIM, 1 * cfg.MODEL.DIM, 3, output, resample='up') 163 | 164 | output = normalize('generator.OutputN', output) 165 | output = tf.nn.relu(output) 166 | output = conv2D('generator.Output', 1 * cfg.MODEL.DIM, 3, 3, output) 167 | output = tf.tanh(output) 168 | 169 | return tf.reshape(output, [-1, cfg.DATA.OUTPUT_DIM]) 170 | 171 | 172 | def good_discriminator(inputs, cfg): 173 | output = tf.reshape(inputs, [-1, 3, 64, 64]) 174 | output = conv2D( 175 | 'discriminator.Input', 3, cfg.MODEL.DIM, 3, output, he_init=False) 176 | 177 | output = residual_block('discriminator.Res1', cfg.MODEL.DIM, 178 | 2 * cfg.MODEL.DIM, 3, output, resample='down') 179 | output = residual_block('discriminator.Res2', 2 * cfg.MODEL.DIM, 180 | 4 * cfg.MODEL.DIM, 3, output, resample='down') 181 | output = residual_block('discriminator.Res3', 4 * cfg.MODEL.DIM, 182 | 8 * cfg.MODEL.DIM, 3, output, resample='down') 183 | output = residual_block('discriminator.Res4', 8 * cfg.MODEL.DIM, 184 | 8 * cfg.MODEL.DIM, 3, output, resample='down') 185 | 186 | output = tf.reshape(output, [-1, 4 * 4 * 8 * cfg.MODEL.DIM]) 187 | output_wgan = linear( 188 | 'discriminator.Output', 4 * 4 * 8 * cfg.MODEL.DIM, 1, output) 189 | 190 | output_acgan = linear( 191 | 'discriminator.ACGANOutput', 4 * 4 * 8 * cfg.MODEL.DIM, cfg.MODEL.HASH_DIM, output) 192 | output_acgan = tf.nn.tanh(output_acgan) 193 | return output_wgan, output_acgan 194 | 195 | 196 | def alexnet_discriminator(inputs, cfg, stage="train"): 197 | # noinspection PyTypeChecker 198 | # TODO: don't load imagenet pretrained model when D_PRETRAINED_MODEL_PATH is given 199 | net_data = dict(np.load(cfg.MODEL.ALEXNET_PRETRAINED_MODEL_PATH, encoding='latin1').item()) 200 | 201 | if inputs.shape[1] != 256: 202 | reshaped_image = preprocess_resize_scale_img(inputs, cfg.DATA.WIDTH_HEIGHT) 203 | else: 204 | reshaped_image = inputs 205 | 206 | height = width = 227 207 | 208 | # Randomly crop a [height, width] section of each image 209 | if stage == "train": 210 | distorted_image = tf.stack( 211 | [tf.random_crop(tf.image.random_flip_left_right(each_image), [height, width, 3]) for each_image in 212 | tf.unstack(reshaped_image)]) 213 | else: 214 | # Randomly crop a [height, width] section of each image 215 | distorted_image1 = tf.stack( 216 | [tf.image.crop_to_bounding_box(tf.image.flip_left_right(each_image), 0, 0, height, width) for each_image in 217 | tf.unstack(reshaped_image)]) 218 | distorted_image2 = tf.stack( 219 | [tf.image.crop_to_bounding_box(tf.image.flip_left_right(each_image), 28, 28, height, width) for each_image 220 | in tf.unstack(reshaped_image)]) 221 | distorted_image3 = tf.stack( 222 | [tf.image.crop_to_bounding_box(tf.image.flip_left_right(each_image), 28, 0, height, width) for each_image in 223 | tf.unstack(reshaped_image)]) 224 | distorted_image4 = tf.stack( 225 | [tf.image.crop_to_bounding_box(tf.image.flip_left_right(each_image), 0, 28, height, width) for each_image in 226 | tf.unstack(reshaped_image)]) 227 | distorted_image5 = tf.stack( 228 | [tf.image.crop_to_bounding_box(tf.image.flip_left_right(each_image), 14, 14, height, width) for each_image 229 | in tf.unstack(reshaped_image)]) 230 | 231 | distorted_image6 = tf.stack([tf.image.crop_to_bounding_box(each_image, 0, 0, height, width) for each_image in 232 | tf.unstack(reshaped_image)]) 233 | distorted_image7 = tf.stack([tf.image.crop_to_bounding_box(each_image, 28, 28, height, width) for each_image in 234 | tf.unstack(reshaped_image)]) 235 | distorted_image8 = tf.stack([tf.image.crop_to_bounding_box(each_image, 28, 0, height, width) for each_image in 236 | tf.unstack(reshaped_image)]) 237 | distorted_image9 = tf.stack([tf.image.crop_to_bounding_box(each_image, 0, 28, height, width) for each_image in 238 | tf.unstack(reshaped_image)]) 239 | distorted_image0 = tf.stack([tf.image.crop_to_bounding_box(each_image, 14, 14, height, width) for each_image in 240 | tf.unstack(reshaped_image)]) 241 | 242 | distorted_image = tf.concat( 243 | [distorted_image1, distorted_image2, distorted_image3, distorted_image4, distorted_image5, distorted_image6, 244 | distorted_image7, distorted_image8, distorted_image9, distorted_image0], 0) 245 | 246 | # Zero-mean input 247 | mean = tf.constant([103.939, 116.779, 123.68], dtype=tf.float32, shape=[ 248 | 1, 1, 1, 3], name='img-mean') 249 | distorted_image = distorted_image - mean 250 | 251 | # Conv1 252 | # Output 96, kernel 11, stride 4 253 | scope = 'discriminator.conv1.' 254 | kernel = param(scope + 'weights', net_data['conv1'][0]) 255 | biases = param(scope + 'biases', net_data['conv1'][1]) 256 | conv = tf.nn.conv2d(distorted_image, kernel, [1, 4, 4, 1], padding='VALID') 257 | out = tf.nn.bias_add(conv, biases) 258 | conv1 = tf.nn.relu(out, name=scope) 259 | 260 | # Pool1 261 | pool1 = tf.nn.max_pool(conv1, 262 | ksize=[1, 3, 3, 1], 263 | strides=[1, 2, 2, 1], 264 | padding='VALID', 265 | name='pool1') 266 | 267 | # LRN1 268 | if cfg.TRAIN.WGAN_SCALE == 0: 269 | lrn1 = tf.nn.local_response_normalization(pool1, depth_radius=2, alpha=2e-05, beta=0.75, bias=1.0) 270 | else: 271 | lrn1 = pool1 272 | 273 | # Conv2 274 | # Output 256, pad 2, kernel 5, group 2 275 | scope = 'discriminator.conv2.' 276 | kernel = param(scope + 'weights', net_data['conv2'][0]) 277 | biases = param(scope + 'biases', net_data['conv2'][1]) 278 | group = 2 279 | 280 | def convolve(i, k): return tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME') 281 | input_groups = tf.split(lrn1, group, 3) 282 | kernel_groups = tf.split(kernel, group, 3) 283 | output_groups = [convolve(i, k) 284 | for i, k in zip(input_groups, kernel_groups)] 285 | # Concatenate the groups 286 | conv = tf.concat(output_groups, 3) 287 | out = tf.nn.bias_add(conv, biases) 288 | conv2 = tf.nn.relu(out, name=scope) 289 | 290 | # Pool2 291 | pool2 = tf.nn.max_pool(conv2, 292 | ksize=[1, 3, 3, 1], 293 | strides=[1, 2, 2, 1], 294 | padding='VALID', 295 | name='pool2') 296 | 297 | # LRN2 298 | if cfg.TRAIN.WGAN_SCALE == 0: 299 | radius = 2 300 | alpha = 2e-05 301 | beta = 0.75 302 | bias = 1.0 303 | lrn2 = tf.nn.local_response_normalization(pool2, 304 | depth_radius=radius, 305 | alpha=alpha, 306 | beta=beta, 307 | bias=bias) 308 | else: 309 | lrn2 = pool2 310 | 311 | # Conv3 312 | # Output 384, pad 1, kernel 3 313 | scope = 'discriminator.conv3.' 314 | kernel = param(scope + 'weights', net_data['conv3'][0]) 315 | biases = param(scope + 'biases', net_data['conv3'][1]) 316 | conv = tf.nn.conv2d(lrn2, kernel, [1, 1, 1, 1], padding='SAME') 317 | out = tf.nn.bias_add(conv, biases) 318 | conv3 = tf.nn.relu(out, name=scope) 319 | 320 | # Conv4 321 | # Output 384, pad 1, kernel 3, group 2 322 | scope = 'discriminator.conv4.' 323 | kernel = param(scope + 'weights', net_data['conv4'][0]) 324 | biases = param(scope + 'biases', net_data['conv4'][1]) 325 | group = 2 326 | 327 | def convolve(i, k): return tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME') 328 | input_groups = tf.split(conv3, group, 3) 329 | kernel_groups = tf.split(kernel, group, 3) 330 | output_groups = [convolve(i, k) 331 | for i, k in zip(input_groups, kernel_groups)] 332 | # Concatenate the groups 333 | conv = tf.concat(output_groups, 3) 334 | out = tf.nn.bias_add(conv, biases) 335 | conv4 = tf.nn.relu(out, name=scope) 336 | 337 | # Conv5 338 | # Output 256, pad 1, kernel 3, group 2 339 | scope = 'discriminator.conv5.' 340 | kernel = param(scope + 'weights', net_data['conv5'][0]) 341 | biases = param(scope + 'biases', net_data['conv5'][1]) 342 | group = 2 343 | 344 | def convolve(i, k): return tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME') 345 | input_groups = tf.split(conv4, group, 3) 346 | kernel_groups = tf.split(kernel, group, 3) 347 | output_groups = [convolve(i, k) 348 | for i, k in zip(input_groups, kernel_groups)] 349 | # Concatenate the groups 350 | conv = tf.concat(output_groups, 3) 351 | out = tf.nn.bias_add(conv, biases) 352 | conv5 = tf.nn.relu(out, name=scope) 353 | 354 | # Pool5 355 | pool5 = tf.nn.max_pool(conv5, 356 | ksize=[1, 3, 3, 1], 357 | strides=[1, 2, 2, 1], 358 | padding='VALID', 359 | name='pool5') 360 | 361 | # FC6 362 | # Output 4096 363 | shape = int(np.prod(pool5.get_shape()[1:])) 364 | scope = 'discriminator.fc6.' 365 | fc6w = param(scope + 'weights', net_data['fc6'][0]) 366 | fc6b = param(scope + 'biases', net_data['fc6'][1]) 367 | pool5_flat = tf.reshape(pool5, [-1, shape]) 368 | fc6l = tf.nn.bias_add(tf.matmul(pool5_flat, fc6w), fc6b) 369 | fc6 = tf.nn.dropout(tf.nn.relu(fc6l), 0.5) 370 | 371 | # FC7 372 | # Output 4096 373 | scope = 'discriminator.fc7.' 374 | fc7w = param(scope + 'weights', net_data['fc7'][0]) 375 | fc7b = param(scope + 'biases', net_data['fc7'][1]) 376 | fc7l = tf.nn.bias_add(tf.matmul(fc6, fc7w), fc7b) 377 | fc7 = tf.nn.dropout(tf.nn.relu(fc7l), 0.5) 378 | 379 | # FC8 380 | # Output output_dim 381 | fc8 = linear( 382 | 'discriminator.ACGANOutput', 4096, cfg.MODEL.HASH_DIM, fc7) 383 | if stage == "train": 384 | output = tf.nn.tanh(fc8) 385 | else: 386 | fc8_t = tf.nn.tanh(fc8) 387 | fc8_t = tf.concat([tf.expand_dims(i, 0) 388 | for i in tf.split(fc8_t, 10, 0)], 0) 389 | output = tf.reduce_mean(fc8_t, 0) 390 | output_wgan = linear('discriminator.Output', 4096, 1, fc7) 391 | 392 | return output_wgan, output 393 | 394 | 395 | def generator(n_samples, labels, cfg, noise=None): 396 | if cfg.MODEL.G_ARCHITECTURE == "GOOD": 397 | return good_generator(n_samples, labels, noise=noise, cfg=cfg) 398 | else: 399 | return old_generator(n_samples, labels, noise=noise, cfg=cfg) 400 | 401 | 402 | def discriminator(inputs, cfg, stage="train"): 403 | if cfg.MODEL.D_ARCHITECTURE == "GOOD": 404 | return good_discriminator(inputs, cfg=cfg) 405 | elif cfg.MODEL.D_ARCHITECTURE == "ALEXNET": 406 | return alexnet_discriminator(inputs, stage=stage, cfg=cfg) 407 | else: 408 | return old_discriminator(inputs, cfg=cfg) 409 | -------------------------------------------------------------------------------- /lib/config.py: -------------------------------------------------------------------------------- 1 | from yacs.config import CfgNode 2 | import os 3 | 4 | config = CfgNode() 5 | 6 | config.MODEL = CfgNode() 7 | config.MODEL.DIM_G = 128 # generator dimensionality 8 | config.MODEL.DIM_D = 128 # Critic dimensionality 9 | config.MODEL.DIM = 64 # DIM for good generator and discriminator 10 | config.MODEL.HASH_DIM = 64 11 | config.MODEL.G_ARCHITECTURE = "NORM" # GOOD, NORM 12 | config.MODEL.D_ARCHITECTURE = "NORM" # GOOD, NORM, ALEXNET 13 | config.MODEL.G_PRETRAINED_MODEL_PATH = "" 14 | config.MODEL.D_PRETRAINED_MODEL_PATH = "" 15 | # TODO: merge ALEXNET_PRETRAINED_MODEL_PATH and D_PRETRAINED_MODEL_PATH 16 | config.MODEL.ALEXNET_PRETRAINED_MODEL_PATH = "./pretrained_models/reference_pretrain.npy" 17 | 18 | config.DATA = CfgNode() 19 | config.DATA.USE_DATASET = "cifar10" # "cifar10", "nuswide81", "coco" 20 | config.DATA.LIST_ROOT = "./data/cifar10" 21 | config.DATA.DATA_ROOT = "./data_list/cifar10" 22 | config.DATA.LABEL_DIM = 10 23 | config.DATA.DB_SIZE = 54000 24 | config.DATA.TEST_SIZE = 1000 25 | config.DATA.WIDTH_HEIGHT = 32 26 | config.DATA.OUTPUT_DIM = 3 * (config.DATA.WIDTH_HEIGHT ** 2) # Number of pixels (32*32*3) 27 | config.DATA.MAP_R = 54000 28 | config.DATA.OUTPUT_DIR = "./output/cifar10_step_1" 29 | config.DATA.IMAGE_DIR = os.path.join(config.DATA.OUTPUT_DIR, "images") 30 | config.DATA.MODEL_DIR = os.path.join(config.DATA.OUTPUT_DIR, "models") 31 | config.DATA.LOG_DIR = os.path.join(config.DATA.OUTPUT_DIR, "logs") 32 | 33 | config.TRAIN = CfgNode() 34 | config.TRAIN.EVALUATE_MODE = False 35 | config.TRAIN.BATCH_SIZE = 64 36 | config.TRAIN.ITERS = 100000 37 | config.TRAIN.CROSS_ENTROPY_ALPHA = 5 38 | config.TRAIN.LR = 1e-4 # Initial learning rate 39 | config.TRAIN.G_LR = 1e-4 # 1e-4 40 | config.TRAIN.DECAY = True # Whether to decay LR over learning 41 | config.TRAIN.N_CRITIC = 5 # Critic steps per generator steps 42 | config.TRAIN.EVAL_FREQUENCY = 20000 # How frequently to evaluate and save model 43 | config.TRAIN.CHECKPOINT_FREQUENCY = 2000 # How frequently to evaluate and save model 44 | config.TRAIN.SAMPLE_FREQUENCY = 1000 # How frequently to evaluate and save model 45 | config.TRAIN.ACGAN_SCALE = 1.0 46 | config.TRAIN.ACGAN_SCALE_FAKE = 1.0 47 | config.TRAIN.WGAN_SCALE = 1.0 48 | config.TRAIN.WGAN_SCALE_GP = 10.0 49 | config.TRAIN.ACGAN_SCALE_G = 0.1 50 | config.TRAIN.WGAN_SCALE_G = 1.0 51 | config.TRAIN.NORMED_CROSS_ENTROPY = True 52 | config.TRAIN.FAKE_RATIO = 1.0 53 | 54 | 55 | def update_and_inference_config(cfg_file): 56 | config.merge_from_file(cfg_file) 57 | 58 | config.DATA.IMAGE_DIR = os.path.join(config.DATA.OUTPUT_DIR, "images") 59 | config.DATA.MODEL_DIR = os.path.join(config.DATA.OUTPUT_DIR, "models") 60 | config.DATA.LOG_DIR = os.path.join(config.DATA.OUTPUT_DIR, "logs") 61 | config.DATA.OUTPUT_DIM = 3 * (config.DATA.WIDTH_HEIGHT ** 2) # Number of pixels (32*32*3) 62 | 63 | os.makedirs(config.DATA.IMAGE_DIR, exist_ok=True) 64 | os.makedirs(config.DATA.MODEL_DIR, exist_ok=True) 65 | os.makedirs(config.DATA.LOG_DIR, exist_ok=True) 66 | 67 | config.freeze() 68 | return config 69 | -------------------------------------------------------------------------------- /lib/criterion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | 4 | import tensorflow as tf 5 | 6 | 7 | def cross_entropy(u, label_u, v=None, label_v=None, alpha=1, partial=False, normed=False): 8 | if v is None: 9 | v = u 10 | else: 11 | # v is the fake data_list, which cannot influence real data_list 12 | if partial is True: 13 | u = tf.stop_gradient(u) 14 | 15 | if label_v is None: 16 | label_v = label_u 17 | label_ip = tf.cast(tf.matmul(label_u, tf.transpose(label_v)), tf.float32) 18 | s = tf.clip_by_value(label_ip, 0.0, 1.0) 19 | 20 | # compute balance param 21 | # s_t \in {-1, 1} 22 | s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0)) 23 | sum_1 = tf.reduce_sum(s) 24 | sum_all = tf.reduce_sum(tf.abs(s_t)) 25 | balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))), 26 | tf.multiply(tf.div(sum_all, sum_1), s)) 27 | 28 | if normed: 29 | # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1) 30 | ip_1 = tf.matmul(u, tf.transpose(v)) 31 | 32 | def reduce_shaper(t): 33 | return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1]) 34 | 35 | mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)), 36 | reduce_shaper(tf.square(v)), transpose_b=True)) 37 | ip = tf.div(ip_1, mod_1) 38 | else: 39 | ip = tf.clip_by_value(tf.matmul(u, tf.transpose(v)), -1.5e1, 1.5e1) 40 | ones = tf.ones([tf.shape(u)[0], tf.shape(v)[0]]) 41 | return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param)) 42 | 43 | 44 | if __name__ == "__main__": 45 | import numpy as np 46 | 47 | sess = tf.InteractiveSession() 48 | u_ = np.ones([2, 3], dtype=np.float32) 49 | label = np.ones([2, 2]) 50 | print(("cross entropy loss 1 = %d" % cross_entropy(u_, label).eval())) 51 | 52 | label[1, :] = 0 53 | print(("cross entropy loss 2 = %d" % cross_entropy(u_, label).eval())) 54 | -------------------------------------------------------------------------------- /lib/dataloader.py: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | import os 17 | import math 18 | import cv2 19 | import numpy as np 20 | 21 | 22 | class Dataset(object): 23 | def __init__(self, list_path, image_root, train=True, height_width=256): 24 | self.lines = open(list_path, 'r').readlines() 25 | self.image_root = image_root 26 | self.n_samples = len(self.lines) 27 | self.train = train 28 | self.height_width = height_width 29 | self.img_shape = (self.height_width, self.height_width) 30 | 31 | self._img = [0] * self.n_samples 32 | self._label = [0] * self.n_samples 33 | self._load = [0] * self.n_samples 34 | self._load_num = 0 35 | self._status = 0 36 | self.data = self.img_data 37 | 38 | def read_image_at(self, index): 39 | filename = self.lines[index].strip().split()[0] 40 | path = os.path.join(self.image_root, filename) 41 | img = cv2.imread(path) 42 | return cv2.resize(img, self.img_shape, interpolation=cv2.INTER_AREA) 43 | 44 | def get_label(self, index): 45 | return [int(j) for j in self.lines[index].strip().split()[1:]] 46 | 47 | def img_data(self, index): 48 | if self._status: 49 | return self._img[index, :], self._label[index, :] 50 | else: 51 | ret_img = [] 52 | ret_label = [] 53 | for i in index: 54 | # noinspection PyBroadException,PyPep8 55 | try: 56 | if self.train: 57 | if not self._load[i]: 58 | self._img[i] = self.read_image_at(i) 59 | self._label[i] = self.get_label(i) 60 | self._load[i] = 1 61 | self._load_num += 1 62 | ret_img.append(self._img[i]) 63 | ret_label.append(self._label[i]) 64 | else: 65 | self._label[i] = self.get_label(i) 66 | ret_img.append(self.read_image_at(i)) 67 | ret_label.append(self._label[i]) 68 | except: 69 | print('cannot open', self.lines[i]) 70 | 71 | if self._load_num == self.n_samples: 72 | self._status = 1 73 | self._img = np.asarray(self._img) 74 | self._label = np.asarray(self._label) 75 | return np.asarray(ret_img), np.asarray(ret_label) 76 | 77 | 78 | class Dataloader(object): 79 | 80 | def __init__(self, batch_size, width_height, list_root, image_root): 81 | self.batch_size = batch_size 82 | self.width_height = width_height 83 | self.data_root = list_root 84 | self.image_root = image_root 85 | 86 | def data_generator(self, split): 87 | _dataset = Dataset(list_path=os.path.join(self.data_root, split + '.txt'), 88 | image_root=self.image_root, train=True, height_width=self.width_height) 89 | 90 | def get_epoch(): 91 | 92 | _index_in_epoch = 0 93 | _perm = np.arange(_dataset.n_samples) 94 | np.random.shuffle(_perm) 95 | for _ in range(int(math.ceil(_dataset.n_samples / self.batch_size))): 96 | start = _index_in_epoch 97 | _index_in_epoch += self.batch_size 98 | # finish one epoch 99 | if _index_in_epoch > _dataset.n_samples: 100 | data, label = _dataset.data(_perm[start:]) 101 | data1, label1 = _dataset.data( 102 | _perm[:_index_in_epoch - _dataset.n_samples]) 103 | data = np.concatenate([data, data1], axis=0) 104 | label = np.concatenate([label, label1], axis=0) 105 | else: 106 | end = _index_in_epoch 107 | data, label = _dataset.data(_perm[start:end]) 108 | 109 | # n*h*w*c -> n*c*h*w 110 | data = np.transpose(data, (0, 3, 1, 2)) 111 | # bgr -> rgb 112 | data = data[:, ::-1, :, :] 113 | data = np.reshape(data, (self.batch_size, -1)) 114 | yield (data, label) 115 | 116 | return get_epoch 117 | 118 | @property 119 | def train_gen(self): 120 | return self.data_generator('train') 121 | 122 | @property 123 | def test_gen(self): 124 | return self.data_generator('test') 125 | 126 | @property 127 | def db_gen(self): 128 | return self.data_generator('database') 129 | 130 | @property 131 | def unlabeled_db_gen(self): 132 | return self.data_generator('database_nolabel') 133 | 134 | @staticmethod 135 | def inf_gen(gen): 136 | def generator(): 137 | while True: 138 | for images_iter_, labels_iter_ in gen(): 139 | return images_iter_, labels_iter_ 140 | return generator 141 | -------------------------------------------------------------------------------- /lib/metric.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class MAPs: 5 | def __init__(self, r): 6 | self.R = r 7 | 8 | @staticmethod 9 | def distance(a, b): 10 | return np.dot(a, b) 11 | 12 | def get_maps_by_feature(self, database, query): 13 | ips = np.dot(query.output, database.output.T) 14 | ids = np.argsort(-ips, 1) 15 | apx = [] 16 | for i in range(ips.shape[0]): 17 | label = query.label[i, :].copy() 18 | label[label == 0] = -1 19 | imatch = np.sum(database.label[ids[i, :][0: self.R], :] == label, 1) > 0 20 | rel = np.sum(imatch) 21 | px = np.cumsum(imatch).astype(float) / np.arange(1, self.R + 1, 1) 22 | if rel != 0: 23 | apx.append(np.sum(px * imatch) / rel) 24 | return np.mean(np.array(apx)) 25 | -------------------------------------------------------------------------------- /lib/ops.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from lib.params import param 4 | 5 | 6 | # noinspection PyUnboundLocalVariable,PyPep8Naming 7 | def conv2D( 8 | name, 9 | input_dim, 10 | output_dim, 11 | filter_size, 12 | inputs, 13 | he_init=True, 14 | mask_type=None, 15 | stride=1, 16 | weightnorm=None, 17 | biases=True, 18 | gain=1.): 19 | """ 20 | inputs: tensor of shape (batch size, num channels, height, width) 21 | mask_type: one of None, 'a', 'b' 22 | 23 | returns: tensor of shape (batch size, num channels, height, width) 24 | """ 25 | if mask_type is not None: 26 | mask_type, mask_n_channels = mask_type 27 | 28 | mask = np.ones( 29 | (filter_size, filter_size, input_dim, output_dim), 30 | dtype='float32' 31 | ) 32 | center = filter_size // 2 33 | 34 | # Mask out future locations 35 | # filter shape is (height, width, input channels, output channels) 36 | mask[center + 1:, :, :, :] = 0. 37 | mask[center, center + 1:, :, :] = 0. 38 | 39 | # Mask out future channels 40 | for i in range(mask_n_channels): 41 | for j in range(mask_n_channels): 42 | if (mask_type == 'a' and i >= j) or (mask_type == 'b' and i > j): 43 | mask[center, center, i::mask_n_channels, j::mask_n_channels] = 0. 44 | 45 | def uniform(stdev, size): 46 | return np.random.uniform( 47 | low=-stdev * np.sqrt(3), 48 | high=stdev * np.sqrt(3), 49 | size=size 50 | ).astype('float32') 51 | 52 | fan_in = input_dim * filter_size ** 2 53 | fan_out = output_dim * filter_size ** 2 / (stride ** 2) 54 | 55 | if mask_type is not None: # only approximately correct 56 | fan_in /= 2. 57 | fan_out /= 2. 58 | 59 | if he_init: 60 | filters_stdev = np.sqrt(4. / (fan_in + fan_out)) 61 | else: # Normalized init (Glorot & Bengio) 62 | filters_stdev = np.sqrt(2. / (fan_in + fan_out)) 63 | 64 | filter_values = uniform( 65 | filters_stdev, 66 | (filter_size, filter_size, input_dim, output_dim) 67 | ) 68 | 69 | # print "WARNING IGNORING GAIN" 70 | filter_values *= gain 71 | 72 | filters = param(name + '.Filters', filter_values) 73 | 74 | if weightnorm: 75 | norm_values = np.sqrt( 76 | np.sum(np.square(filter_values), axis=(0, 1, 2))) 77 | target_norms = param( 78 | name + '.g', 79 | norm_values 80 | ) 81 | with tf.name_scope('weightnorm'): 82 | norms = tf.sqrt(tf.reduce_sum( 83 | tf.square(filters), reduction_indices=[0, 1, 2])) 84 | filters = filters * (target_norms / norms) 85 | 86 | if mask_type is not None: 87 | with tf.name_scope('filter_mask'): 88 | filters = filters * mask 89 | 90 | result = tf.nn.conv2d( 91 | input=inputs, 92 | filter=filters, 93 | strides=[1, 1, stride, stride], 94 | padding='SAME', 95 | data_format='NCHW' 96 | ) 97 | 98 | if biases: 99 | _biases = param( 100 | name + '.Biases', 101 | np.zeros(output_dim, dtype='float32') 102 | ) 103 | 104 | result = tf.nn.bias_add(result, _biases, data_format='NCHW') 105 | 106 | return result 107 | 108 | 109 | def batch_norm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True): 110 | if ((axes == [0, 2, 3]) or (axes == [0, 2])) and fused is True: 111 | if axes == [0, 2]: 112 | inputs = tf.expand_dims(inputs, 3) 113 | 114 | offset = param( 115 | name + '.offset', np.zeros(inputs.get_shape()[1], dtype='float32')) 116 | scale = param( 117 | name + '.scale', np.ones(inputs.get_shape()[1], dtype='float32')) 118 | 119 | moving_mean = param( 120 | name + '.moving_mean', np.zeros(inputs.get_shape()[1], dtype='float32'), trainable=False) 121 | moving_variance = param( 122 | name + '.moving_variance', np.ones(inputs.get_shape()[1], dtype='float32'), trainable=False) 123 | 124 | def _fused_batch_norm_training(): 125 | return tf.nn.fused_batch_norm(inputs, scale, offset, epsilon=1e-5, data_format='NCHW') 126 | 127 | def _fused_batch_norm_inference(): 128 | # Version which blends in the current item's statistics 129 | batch_size = tf.cast(tf.shape(inputs)[0], 'float32') 130 | mean_, var_ = tf.nn.moments(inputs, [2, 3], keep_dims=True) 131 | mean_ = ((1. / batch_size) * mean_) + (((batch_size - 1.) / 132 | batch_size) * moving_mean)[None, :, None, None] 133 | var_ = ((1. / batch_size) * var_) + (((batch_size - 1.) / batch_size) 134 | * moving_variance)[None, :, None, None] 135 | bn = tf.nn.batch_normalization(inputs, mean_, var_, 136 | offset[None, :, None, None], 137 | scale[None, :, None, None], 138 | 1e-5) 139 | return bn, mean_, var_ 140 | 141 | if is_training is None: 142 | outputs, batch_mean, batch_var = _fused_batch_norm_training() 143 | else: 144 | outputs, batch_mean, batch_var = tf.cond(is_training, 145 | _fused_batch_norm_training, 146 | _fused_batch_norm_inference) 147 | if update_moving_stats: 148 | def no_updates(): return outputs 149 | 150 | def _force_updates(): 151 | """Internal function forces updates moving_vars if is_training.""" 152 | float_stats_iter = tf.cast(stats_iter, tf.float32) 153 | 154 | alpha = float_stats_iter / (float_stats_iter + 1) 155 | update_moving_mean = tf.assign(moving_mean, (alpha * moving_mean) + (1 - alpha) * batch_mean) 156 | update_moving_variance = tf.assign(moving_variance, 157 | (alpha * moving_variance) + (1 - alpha) * batch_var) 158 | 159 | with tf.control_dependencies([update_moving_mean, update_moving_variance]): 160 | return tf.identity(outputs) 161 | 162 | outputs = tf.cond(is_training, _force_updates, no_updates) 163 | 164 | if axes == [0, 2]: 165 | return outputs[:, :, :, 0] # collapse last dim 166 | else: 167 | return outputs 168 | else: 169 | mean, var = tf.nn.moments(inputs, axes, keep_dims=True) 170 | shape = mean.get_shape().as_list() 171 | if 0 not in axes: 172 | print("WARNING ({}): didn't find 0 in axes, but not using separate BN params for each item in batch" 173 | .format(name)) 174 | shape[0] = 1 175 | offset = param(name + '.offset', np.zeros(shape, dtype='float32')) 176 | scale = param(name + '.scale', np.ones(shape, dtype='float32')) 177 | result = tf.nn.batch_normalization( 178 | inputs, mean, var, offset, scale, 1e-5) 179 | 180 | return result 181 | 182 | 183 | def linear( 184 | name, 185 | input_dim, 186 | output_dim, 187 | inputs, 188 | biases=True, 189 | initialization=None, 190 | weightnorm=None, 191 | gain=1. 192 | ): 193 | """ 194 | initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)` 195 | """ 196 | 197 | def uniform(stdev, size): 198 | return np.random.uniform( 199 | low=-stdev * np.sqrt(3), 200 | high=stdev * np.sqrt(3), 201 | size=size 202 | ).astype('float32') 203 | 204 | if initialization == 'lecun': # and input_dim != output_dim): 205 | # disabling orth. init for now because it's too slow 206 | weight_values = uniform( 207 | np.sqrt(1. / input_dim), 208 | (input_dim, output_dim) 209 | ) 210 | 211 | elif initialization == 'glorot' or (initialization is None): 212 | 213 | weight_values = uniform( 214 | np.sqrt(2. / (input_dim + output_dim)), 215 | (input_dim, output_dim) 216 | ) 217 | 218 | elif initialization == 'he': 219 | 220 | weight_values = uniform( 221 | np.sqrt(2. / input_dim), 222 | (input_dim, output_dim) 223 | ) 224 | 225 | elif initialization == 'glorot_he': 226 | 227 | weight_values = uniform( 228 | np.sqrt(4. / (input_dim + output_dim)), 229 | (input_dim, output_dim) 230 | ) 231 | 232 | elif initialization == 'orthogonal' or \ 233 | (initialization is None and input_dim == output_dim): 234 | 235 | # From lasagne 236 | def sample(shape): 237 | if len(shape) < 2: 238 | raise RuntimeError("Only shapes of length 2 or more are " 239 | "supported.") 240 | flat_shape = (shape[0], np.prod(shape[1:])) 241 | a = np.random.normal(0.0, 1.0, flat_shape) 242 | u, _, v = np.linalg.svd(a, full_matrices=False) 243 | # pick the one with the correct shape 244 | q = u if u.shape == flat_shape else v 245 | q = q.reshape(shape) 246 | return q.astype('float32') 247 | 248 | weight_values = sample((input_dim, output_dim)) 249 | 250 | elif initialization[0] == 'uniform': 251 | 252 | weight_values = np.random.uniform( 253 | low=-initialization[1], 254 | high=initialization[1], 255 | size=(input_dim, output_dim) 256 | ).astype('float32') 257 | 258 | else: 259 | 260 | raise Exception('Invalid initialization!') 261 | 262 | weight_values *= gain 263 | 264 | weight = param( 265 | name + '.W', 266 | weight_values 267 | ) 268 | 269 | if weightnorm: 270 | norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0)) 271 | # norm_values = np.linalg.norm(weight_values, axis=0) 272 | 273 | target_norms = param( 274 | name + '.g', 275 | norm_values 276 | ) 277 | 278 | with tf.name_scope('weightnorm'): 279 | norms = tf.sqrt(tf.reduce_sum( 280 | tf.square(weight), reduction_indices=[0])) 281 | weight = weight * (target_norms / norms) 282 | 283 | # if 'discriminator' in name: 284 | # print "WARNING weight constraint on {}".format(name) 285 | # weight = tf.nn.softsign(10.*weight)*.1 286 | 287 | if inputs.get_shape().ndims == 2: 288 | result = tf.matmul(inputs, weight) 289 | else: 290 | reshaped_inputs = tf.reshape(inputs, [-1, input_dim]) 291 | result = tf.matmul(reshaped_inputs, weight) 292 | result = tf.reshape(result, tf.stack( 293 | tf.unstack(tf.shape(inputs))[:-1] + [output_dim])) 294 | 295 | if biases: 296 | result = tf.nn.bias_add( 297 | result, 298 | param( 299 | name + '.b', 300 | np.zeros((output_dim,), dtype='float32') 301 | ) 302 | ) 303 | 304 | return result 305 | -------------------------------------------------------------------------------- /lib/params.py: -------------------------------------------------------------------------------- 1 | import locale 2 | 3 | import tensorflow as tf 4 | 5 | locale.setlocale(locale.LC_ALL, '') 6 | 7 | _params = {} 8 | _param_aliases = {} 9 | 10 | 11 | def param(name, *args, **kwargs): 12 | """ 13 | A wrapper for `tf.Variable` which enables parameter sharing in models. 14 | 15 | Creates and returns theano shared variables similarly to `tf.Variable`, 16 | except if you try to create a param with the same name as a 17 | previously-created one, `param(...)` will just return the old one instead of 18 | making a new one. 19 | 20 | This constructor also adds a `param` attribute to the shared variables it 21 | creates, so that you can easily search a graph for all params. 22 | """ 23 | 24 | if name not in _params: 25 | kwargs['name'] = name 26 | var = tf.Variable(*args, **kwargs) 27 | var.param = True 28 | _params[name] = var 29 | result = _params[name] 30 | i = 0 31 | while result in _param_aliases: 32 | # print 'following alias {}: {} to {}'.format(i, result, _param_aliases[result]) 33 | i += 1 34 | result = _param_aliases[result] 35 | return result 36 | 37 | 38 | def params_with_name(name): 39 | return [p for n, p in list(_params.items()) if name in n] 40 | 41 | 42 | # compute param size 43 | def print_param_size(gen_gv, disc_gv): 44 | print("computing param size") 45 | for name, grads_and_vars in [('G', gen_gv), ('D', disc_gv)]: 46 | print("{} Params:".format(name)) 47 | total_param_count = 0 48 | for g, v in grads_and_vars: 49 | shape = v.get_shape() 50 | shape_str = ",".join([str(x) for x in v.get_shape()]) 51 | 52 | param_count = 1 53 | for dim in shape: 54 | param_count *= int(dim) 55 | total_param_count += param_count 56 | 57 | if g is None: 58 | print("\t{} ({}) [no grad!]".format(v.name, shape_str)) 59 | else: 60 | print("\t{} ({})".format(v.name, shape_str)) 61 | print("Total param count: {}".format( 62 | locale.format("%d", total_param_count, grouping=True) 63 | )) 64 | -------------------------------------------------------------------------------- /lib/util.py: -------------------------------------------------------------------------------- 1 | # ----------------------------------------------------------------------- 2 | # HashGAN: Deep Learning to Hash with Pair Conditional Wasserstein GAN 3 | # Licensed under The MIT License [see LICENSE for details] 4 | # Modified by Bin Liu 5 | # ----------------------------------------------------------------------- 6 | 7 | import numpy as np 8 | from scipy.misc import imsave 9 | import tensorflow as tf 10 | 11 | 12 | def preprocess_resize_scale_img(inputs, width_height): 13 | img = (inputs + 1.) * 255.99 / 2 14 | reshaped_image = tf.cast(img, tf.float32) 15 | reshaped_image = tf.reshape( 16 | reshaped_image, [-1, 3, width_height, width_height]) 17 | 18 | transpose_image = tf.transpose(reshaped_image, perm=[0, 2, 3, 1]) 19 | resized_image = tf.image.resize_bilinear(transpose_image, [256, 256]) 20 | 21 | return resized_image 22 | 23 | 24 | # noinspection PyUnboundLocalVariable 25 | def save_images(x, save_path): 26 | # [0, 1] -> [0,255] 27 | if isinstance(x.flatten()[0], np.floating): 28 | x = (255.99 * x).astype('uint8') 29 | 30 | n_samples = x.shape[0] 31 | rows = int(np.sqrt(n_samples)) 32 | while n_samples % rows != 0: 33 | rows -= 1 34 | 35 | nh, nw = rows, n_samples // rows 36 | 37 | if x.ndim == 2: 38 | x = np.reshape( 39 | x, (x.shape[0], int(np.sqrt(x.shape[1])), int(np.sqrt(x.shape[1])))) 40 | 41 | if x.ndim == 4: 42 | # BCHW -> BHWC 43 | x = x.transpose((0, 2, 3, 1)) 44 | h, w = x[0].shape[:2] 45 | img = np.zeros((h * nh, w * nw, 3)) 46 | elif x.ndim == 3: 47 | h, w = x[0].shape[:2] 48 | img = np.zeros((h * nh, w * nw)) 49 | else: 50 | print('x.ndim must be 3 or 4') 51 | 52 | for n, x in enumerate(x): 53 | j = n // nw 54 | i = n % nw 55 | img[j * h:j * h + h, i * w:i * w + w] = x 56 | 57 | imsave(save_path, img) 58 | 59 | 60 | def scalar_summary(tag, value): 61 | return tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) 62 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # ----------------------------------------------------------------------- 2 | # HashGAN: Deep Learning to Hash with Pair Conditional Wasserstein GAN 3 | # Licensed under The MIT License [see LICENSE for details] 4 | # Modified by Bin Liu 5 | # ----------------------------------------------------------------------- 6 | # Based on: 7 | # Improved Training of Wasserstein GANs 8 | # Licensed under The MIT License 9 | # https://github.com/igul222/improved_wgan_training 10 | # ----------------------------------------------------------------------- 11 | 12 | import argparse 13 | import locale 14 | import os 15 | import sys 16 | import time 17 | from pprint import pprint 18 | from datetime import datetime 19 | 20 | import numpy as np 21 | import tensorflow as tf 22 | from easydict import EasyDict 23 | from tqdm import trange 24 | 25 | from lib.dataloader import Dataloader 26 | from lib.metric import MAPs 27 | from lib.params import print_param_size, params_with_name 28 | from lib.util import preprocess_resize_scale_img, save_images, scalar_summary 29 | from lib.criterion import cross_entropy 30 | from lib.architecture import generator, discriminator 31 | from lib.config import config, update_and_inference_config 32 | 33 | 34 | # noinspection PyAttributeOutsideInit 35 | class Model(object): 36 | def __init__(self, cfg): 37 | self.cfg = cfg 38 | self.batch_size = self.cfg.TRAIN.BATCH_SIZE 39 | 40 | self._iteration = tf.placeholder(tf.int32, shape=None) 41 | 42 | self.labeled_real_data_holder = tf.placeholder(tf.int32, shape=[self.batch_size, self.cfg.DATA.OUTPUT_DIM]) 43 | self.unlabeled_real_data_holder = tf.placeholder(tf.int32, shape=[self.batch_size, self.cfg.DATA.OUTPUT_DIM]) 44 | self.labeled_labels_holder = tf.placeholder(tf.int32, shape=[self.batch_size, self.cfg.DATA.LABEL_DIM]) 45 | self.unlabeled_labels_holder = tf.placeholder(tf.int32, shape=[self.batch_size, self.cfg.DATA.LABEL_DIM]) 46 | 47 | self.build_graph() 48 | self.build_fixed_noise_samples() 49 | 50 | def build_graph(self): 51 | labeled_real_data = self.normalize(self.labeled_real_data_holder) 52 | unlabeled_real_data = self.normalize(self.unlabeled_real_data_holder) 53 | labeled_fake_data = generator(self.batch_size, self.labeled_labels_holder, cfg=self.cfg) 54 | unlabeled_fake_data = generator(self.batch_size, self.unlabeled_labels_holder, cfg=self.cfg) 55 | 56 | # init optimizer 57 | if self.cfg.TRAIN.DECAY: 58 | decay = tf.maximum(0., 1. - (tf.cast(self._iteration, tf.float32) / self.cfg.TRAIN.ITERS)) 59 | else: 60 | decay = 1.0 61 | 62 | all_data = tf.concat([unlabeled_real_data, labeled_real_data, labeled_fake_data, unlabeled_fake_data], axis=0) 63 | pos_start, pos_middle, pos_end = [i * self.batch_size for i in range(1, 4)] 64 | disc_wgan_all, disc_acgan_all = discriminator(all_data, cfg=self.cfg) 65 | 66 | # real vs real acgan loss 67 | self.cost_disc_acgan_rr = cross_entropy(disc_acgan_all[pos_start:pos_middle], self.labeled_labels_holder, 68 | alpha=self.cfg.TRAIN.CROSS_ENTROPY_ALPHA, 69 | normed=self.cfg.TRAIN.NORMED_CROSS_ENTROPY) 70 | self.cost_disc_acgan = self.cost_disc_acgan_rr 71 | summary_list_disc = [ 72 | tf.summary.scalar('cost_disc_acgan_rr', self.cost_disc_acgan_rr), 73 | tf.summary.scalar('cost_disc_acgan', self.cost_disc_acgan)] 74 | # real vs fake acgan loss, fake can't influence real. 75 | if self.cfg.TRAIN.ACGAN_SCALE_FAKE != 0: 76 | self.cost_disc_acgan_fr = cross_entropy(disc_acgan_all[pos_start:pos_middle], self.labeled_labels_holder, 77 | disc_acgan_all[pos_middle:pos_end], self.labeled_labels_holder, 78 | alpha=self.cfg.TRAIN.CROSS_ENTROPY_ALPHA, 79 | normed=self.cfg.TRAIN.NORMED_CROSS_ENTROPY, 80 | partial=True) 81 | self.cost_disc_acgan += self.cfg.TRAIN.ACGAN_SCALE_FAKE * self.cost_disc_acgan_fr 82 | summary_list_disc.append(tf.summary.scalar('cost_disc_acgan_fr', self.cost_disc_acgan_fr)) 83 | self.cost_disc = self.cfg.TRAIN.ACGAN_SCALE * self.cost_disc_acgan 84 | summary_list_disc.append(tf.summary.scalar('cost_disc', self.cost_disc)) 85 | 86 | # disciminator wgan loss 87 | if self.cfg.TRAIN.WGAN_SCALE != 0.0: 88 | self.cost_disc_wgan_l = tf.reduce_mean(disc_wgan_all[pos_middle:]) - tf.reduce_mean(disc_wgan_all[:pos_middle]) 89 | self.cost_disc_wgan_gp = self.gradient_penalty(all_data[:pos_middle], all_data[pos_middle:]) 90 | self.cost_disc_wgan = self.cost_disc_wgan_l + self.cfg.TRAIN.WGAN_SCALE_GP * self.cost_disc_wgan_gp 91 | self.cost_disc += self.cfg.TRAIN.WGAN_SCALE * self.cost_disc_wgan 92 | summary_list_disc += [ 93 | tf.summary.scalar('cost_disc_wgan_l', self.cost_disc_wgan_l), 94 | tf.summary.scalar('cost_disc_wgan_gp', self.cost_disc_wgan_gp), 95 | tf.summary.scalar('cost_disc_wgan', self.cost_disc_wgan)] 96 | 97 | disc_opt = tf.train.AdamOptimizer(learning_rate=self.cfg.TRAIN.LR * decay, beta1=0., beta2=0.9) 98 | self.train_op_disc = disc_opt.minimize(self.cost_disc, var_list=params_with_name('discriminator')) 99 | self.gv_disc = disc_opt.compute_gradients(self.cost_disc, var_list=params_with_name('discriminator')) 100 | self.summary_disc = tf.summary.merge([summary_list_disc]) 101 | 102 | # generator loss 103 | self.gv_gen = [] # TODO: real gv_gen 104 | if self.cfg.TRAIN.G_LR != 0: 105 | gen_opt = tf.train.AdamOptimizer(learning_rate=self.cfg.TRAIN.G_LR * decay, beta1=0., beta2=0.9) 106 | self.cost_gen_wgan = - tf.reduce_mean(disc_wgan_all[pos_middle:]) 107 | self.cost_gen_acgan = self.cost_disc_acgan_fr 108 | self.cost_gen = self.cfg.TRAIN.WGAN_SCALE_G * self.cost_gen_wgan \ 109 | + self.cfg.TRAIN.ACGAN_SCALE_G * self.cost_gen_acgan 110 | self.train_op_gen = gen_opt.minimize(self.cost_gen, var_list=params_with_name('generator')) 111 | self.gv_gen = gen_opt.compute_gradients(self.cost_gen, var_list=params_with_name('generator')) 112 | self.summary_gen = tf.summary.merge([ 113 | tf.summary.scalar('cost_gen_wgan', self.cost_gen_wgan), 114 | tf.summary.scalar('cost_gen_acgan', self.cost_gen_acgan), 115 | tf.summary.scalar('cost_gen', self.cost_gen), 116 | ]) 117 | 118 | # set acgan_output 119 | _, self.disc_real_acgan = discriminator(labeled_real_data, stage='val', cfg=self.cfg) 120 | 121 | def build_fixed_noise_samples(self): 122 | noise_dim = 256 if self.cfg.MODEL.G_ARCHITECTURE == "NORM" else 128 123 | fixed_noise = tf.constant(np.random.normal(size=(100, noise_dim)).astype('float32')) 124 | fixed_labels = tf.eye(10, self.cfg.DATA.LABEL_DIM, dtype=tf.int32) 125 | fixed_labels = tf.reshape(tf.tile(fixed_labels, [1, 10]), (100, self.cfg.DATA.LABEL_DIM)) 126 | 127 | self.fixed_noise_samples = generator(100, fixed_labels, noise=fixed_noise, cfg=self.cfg) 128 | 129 | def gradient_penalty(self, real_data, fake_data): 130 | shape = [2 * self.batch_size, 1] 131 | reduction_indices = [1] 132 | if self.cfg.MODEL.D_ARCHITECTURE == "ALEXNET": 133 | shape += [1, 1] 134 | reduction_indices += [2, 3] 135 | real_data = preprocess_resize_scale_img(real_data, width_height=self.cfg.DATA.WIDTH_HEIGHT) 136 | fake_data = preprocess_resize_scale_img(fake_data, width_height=self.cfg.DATA.WIDTH_HEIGHT) 137 | alpha = tf.random_uniform(shape=shape, minval=0, maxval=1) 138 | 139 | interpolates = real_data + alpha * (fake_data - real_data) 140 | gradients = tf.gradients(discriminator(interpolates, cfg=self.cfg)[0], [interpolates])[0] 141 | slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=reduction_indices)) 142 | return tf.reduce_mean((slopes - 1.) ** 2) 143 | 144 | @staticmethod 145 | def normalize(x): 146 | x = 2 * tf.cast(x, tf.float32) / 256. - 1 147 | x += tf.random_uniform(shape=x.shape, minval=0., maxval=1. / 128) # de-quantize 148 | return x 149 | 150 | 151 | def forward_all(session, model, data_generator, size, cfg): 152 | outputs, labels = [], [] 153 | for image, label in data_generator(): 154 | feed_dict = {model.labeled_real_data_holder: image, model.labeled_labels_holder: label} 155 | outputs.append(session.run(model.disc_real_acgan, feed_dict=feed_dict)) 156 | labels.append(label) 157 | return EasyDict(output=np.array(outputs).reshape([-1, cfg.MODEL.HASH_DIM])[:size, :], 158 | label=np.array(labels).reshape([-1, cfg.DATA.LABEL_DIM])[:size, :]) 159 | 160 | 161 | def evaluate(session, model, dataloader, cfg): 162 | db = forward_all(session, model, dataloader.db_gen, cfg.DATA.DB_SIZE, cfg) 163 | test = forward_all(session, model, dataloader.test_gen, cfg.DATA.TEST_SIZE, cfg) 164 | return MAPs(cfg.DATA.MAP_R).get_maps_by_feature(db, test) 165 | 166 | 167 | def main(cfg): 168 | # build graph 169 | model = Model(cfg) 170 | 171 | # training 172 | config_proto = tf.ConfigProto() 173 | config_proto.gpu_options.allow_growth = True 174 | config_proto.allow_soft_placement = True 175 | with tf.Session(config=config_proto) as session: 176 | summary_writer = tf.summary.FileWriter(cfg.DATA.LOG_DIR, session.graph) 177 | 178 | dataloader = Dataloader(cfg.TRAIN.BATCH_SIZE, cfg.DATA.WIDTH_HEIGHT, cfg.DATA.LIST_ROOT, cfg.DATA.DATA_ROOT) 179 | gen = dataloader.inf_gen(dataloader.train_gen) 180 | unlabel_gen = dataloader.inf_gen(dataloader.unlabeled_db_gen) 181 | 182 | print_param_size(model.gv_gen, model.gv_disc) 183 | 184 | print("initializing global variables") 185 | session.run(tf.global_variables_initializer()) 186 | 187 | saver_gen = tf.train.Saver(params_with_name('generator')) 188 | saver_disc = tf.train.Saver(params_with_name('discriminator')) 189 | 190 | if len(cfg.MODEL.G_PRETRAINED_MODEL_PATH) > 0: 191 | saver_gen.restore(session, cfg.MODEL.G_PRETRAINED_MODEL_PATH) 192 | print("Generator pretrained model restored: {}".format(cfg.MODEL.G_PRETRAINED_MODEL_PATH)) 193 | if len(cfg.MODEL.D_PRETRAINED_MODEL_PATH) > 0: 194 | saver_disc.restore(session, cfg.MODEL.D_PRETRAINED_MODEL_PATH) 195 | print("Discriminator pretrained model restored: {}".format(cfg.MODEL.D_PRETRAINED_MODEL_PATH)) 196 | 197 | if cfg.TRAIN.EVALUATE_MODE: 198 | map_val = evaluate(session, model, dataloader, cfg) 199 | print('map_val: {}'.format(map_val)) 200 | return 0 201 | 202 | print("training") 203 | for iteration in trange(cfg.TRAIN.ITERS, desc='Training'): 204 | start_time = time.time() 205 | 206 | def get_feed_dict(): 207 | labeled_data, labeled_labels = gen() 208 | unlabeled_data, unlabeled_labels = unlabel_gen() 209 | return { 210 | model.labeled_real_data_holder: labeled_data, 211 | model.unlabeled_real_data_holder: unlabeled_data, 212 | model.labeled_labels_holder: labeled_labels, 213 | model.unlabeled_labels_holder: unlabeled_labels, 214 | model._iteration: iteration 215 | } 216 | 217 | # train generator 218 | if iteration > 0 and cfg.TRAIN.G_LR != 0: 219 | summary_gen, _ = session.run([model.summary_gen, model.train_op_gen], feed_dict=get_feed_dict()) 220 | summary_writer.add_summary(summary_gen, iteration) 221 | 222 | # train discriminator 223 | for i in range(cfg.TRAIN.N_CRITIC): 224 | summary_disc, _ = session.run([model.summary_disc, model.train_op_disc], feed_dict=get_feed_dict()) 225 | summary_writer.add_summary(summary_disc, iteration * cfg.TRAIN.N_CRITIC + i) 226 | 227 | summary_writer.add_summary(scalar_summary(tag="time", value=time.time() - start_time), iteration) 228 | 229 | # sample images 230 | if (iteration + 1) % cfg.TRAIN.SAMPLE_FREQUENCY == 0: 231 | samples = session.run(model.fixed_noise_samples) 232 | samples = ((samples + 1.) * (255. / 2)).astype('int32') 233 | save_images(samples.reshape((100, 3, cfg.DATA.WIDTH_HEIGHT, cfg.DATA.WIDTH_HEIGHT)), 234 | '{}/samples_{}.png'.format(cfg.DATA.IMAGE_DIR, iteration)) 235 | 236 | # calculate mAP score w.r.t all db data_list 237 | if (iteration + 1) % cfg.TRAIN.EVAL_FREQUENCY == 0 or iteration + 1 == cfg.TRAIN.ITERS: 238 | map_val = evaluate(session, model, dataloader, cfg) 239 | print('map_val: {}'.format(map_val)) 240 | summary_writer.add_summary(scalar_summary("mAP_feature", map_val), iteration) 241 | 242 | # save checkpoints 243 | if (iteration + 1) % cfg.TRAIN.CHECKPOINT_FREQUENCY == 0 or iteration + 1 == cfg.TRAIN.ITERS: 244 | save_path_gen = os.path.join(cfg.DATA.MODEL_DIR, "G_{}.ckpt".format(iteration)) 245 | save_path_disc = os.path.join(cfg.DATA.MODEL_DIR, "D_{}.ckpt".format(iteration)) 246 | saver_gen.save(session, save_path_gen) 247 | saver_disc.save(session, save_path_disc) 248 | print("Model saved in file:") 249 | print(" - generator: {}".format(save_path_gen)) 250 | print(" - discriminator: {}".format(save_path_disc)) 251 | 252 | 253 | if __name__ == "__main__": 254 | sys.path.append(os.getcwd()) 255 | locale.setlocale(locale.LC_ALL, '') 256 | 257 | parser = argparse.ArgumentParser(description='HashGAN') 258 | parser.add_argument('--cfg', '--config', required=True, 259 | type=str, metavar="FILE", help="path to yaml config") 260 | parser.add_argument('--gpus', default='0', type=str) 261 | args = parser.parse_args() 262 | 263 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus 264 | 265 | 266 | config = update_and_inference_config(args.cfg) 267 | pprint(config) 268 | pprint(config, open(os.path.join(config.DATA.OUTPUT_DIR, 'config.txt'), 'w')) 269 | 270 | main(config) 271 | 272 | --------------------------------------------------------------------------------