├── csrc └── eval_cylib │ ├── __init__.py │ ├── Makefile │ ├── __pycache__ │ └── __init__.cpython-37.pyc │ ├── eval_metrics_cy.cpython-37m-x86_64-linux-gnu.so │ ├── setup.py │ └── test_cython.py ├── modeling ├── losses │ ├── CMC │ │ ├── __init__.py │ │ ├── NCECriterion.py │ │ └── alias_multinomial.py │ ├── InsDis │ │ ├── __init__.py │ │ ├── .ipynb_checkpoints │ │ │ ├── __init__-checkpoint.py │ │ │ ├── normalize-checkpoint.py │ │ │ ├── utils-checkpoint.py │ │ │ ├── NCECriterion-checkpoint.py │ │ │ └── NCEAverage-checkpoint.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── NCEAverage.cpython-37.pyc │ │ │ ├── NCECriterion.cpython-37.pyc │ │ │ └── alias_multinomial.cpython-37.pyc │ │ ├── normalize.py │ │ ├── utils.py │ │ ├── NCECriterion.py │ │ ├── LinearAverage.py │ │ ├── alias_multinomial.py │ │ └── NCEAverage.py │ ├── __pycache__ │ │ ├── loss.cpython-37.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── cosface.cpython-37.pyc │ │ ├── label_smooth.cpython-37.pyc │ │ └── triplet_loss.cpython-37.pyc │ ├── __init__.py │ ├── label_smooth.py │ ├── center_loss.py │ ├── .ipynb_checkpoints │ │ └── center_loss-checkpoint.py │ └── loss.py ├── __pycache__ │ ├── utils.cpython-37.pyc │ ├── __init__.cpython-37.pyc │ ├── baseline.cpython-37.pyc │ └── baseline_selfgcn.cpython-37.pyc ├── backbones │ ├── __init__.py │ └── __pycache__ │ │ ├── resnet.cpython-37.pyc │ │ └── __init__.cpython-37.pyc ├── utils.py └── __init__.py ├── tools ├── __init__.py ├── train_selfgcn.py ├── test.py ├── test_selfgcn.py ├── test_vis.py └── train.py ├── utils ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── iotools.cpython-37.pyc │ ├── logger.cpython-37.pyc │ └── meters.cpython-37.pyc ├── meters.py ├── iotools.py └── logger.py ├── data ├── __pycache__ │ ├── build.cpython-37.pyc │ ├── __init__.cpython-37.pyc │ ├── prefetcher.cpython-37.pyc │ └── collate_batch.cpython-37.pyc ├── datasets │ ├── __pycache__ │ │ ├── aic.cpython-37.pyc │ │ ├── bases.cpython-37.pyc │ │ ├── vd1.cpython-37.pyc │ │ ├── vd2.cpython-37.pyc │ │ ├── veri.cpython-37.pyc │ │ ├── cuhk03.cpython-37.pyc │ │ ├── msmt17.cpython-37.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── eval_reid.cpython-37.pyc │ │ ├── market1501.cpython-37.pyc │ │ ├── vehicleid.cpython-37.pyc │ │ ├── veri_mask.cpython-37.pyc │ │ ├── veriwild.cpython-37.pyc │ │ ├── dukemtmcreid.cpython-37.pyc │ │ ├── vehicleonem.cpython-37.pyc │ │ ├── dataset_loader.cpython-37.pyc │ │ ├── vehicleid_mask.cpython-37.pyc │ │ ├── vehicleid_small.cpython-37.pyc │ │ ├── veriwild_mask.cpython-37.pyc │ │ ├── veriwild_medium.cpython-37.pyc │ │ ├── veriwild_small.cpython-37.pyc │ │ ├── vehicleid_small_mask.cpython-37.pyc │ │ ├── veriwild_medium_mask.cpython-37.pyc │ │ └── veriwild_small_mask.cpython-37.pyc │ ├── eval_threshold.py │ ├── __init__.py │ ├── .ipynb_checkpoints │ │ ├── __init__-checkpoint.py │ │ ├── veri-checkpoint.py │ │ ├── aic-checkpoint.py │ │ ├── vd2-checkpoint.py │ │ ├── vd1-checkpoint.py │ │ ├── vehicleonem-checkpoint.py │ │ ├── vehicleid_small-checkpoint.py │ │ ├── vehicleid_small_mask-checkpoint.py │ │ ├── vehicleid-checkpoint.py │ │ ├── vehicleid_mask-checkpoint.py │ │ ├── market1501-checkpoint.py │ │ ├── veri_mask-checkpoint.py │ │ ├── veriwild_small-checkpoint.py │ │ ├── veriwild_medium-checkpoint.py │ │ └── veriwild_small_mask-checkpoint.py │ ├── dukemtmcreid.py │ ├── veri.py │ ├── aic.py │ ├── vd2.py │ ├── vd1.py │ ├── vehicleonem.py │ ├── vehicleid_small.py │ ├── vehicleid_small_mask.py │ ├── vehicleid.py │ ├── vehicleid_mask.py │ ├── msmt17.py │ ├── market1501.py │ ├── veri_mask.py │ ├── veriwild_small.py │ ├── veriwild_medium.py │ ├── veriwild_small_mask.py │ └── veriwild_medium_mask.py ├── transforms │ ├── __pycache__ │ │ ├── build.cpython-37.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── functional.cpython-37.pyc │ │ └── transforms.cpython-37.pyc │ ├── __init__.py │ ├── .ipynb_checkpoints │ │ ├── __init__-checkpoint.py │ │ ├── functional-checkpoint.py │ │ ├── build-checkpoint.py │ │ └── transforms-checkpoint.py │ ├── functional.py │ ├── build.py │ └── transforms.py ├── samplers │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ └── triplet_sampler.cpython-37.pyc │ ├── __init__.py │ └── .ipynb_checkpoints │ │ └── __init__-checkpoint.py ├── __init__.py └── collate_batch.py ├── engine └── __pycache__ │ ├── trainer.cpython-37.pyc │ ├── inference.cpython-37.pyc │ ├── trainer_gcn.cpython-37.pyc │ ├── trainer_mask.cpython-37.pyc │ ├── inference_gcn.cpython-37.pyc │ ├── inference_selfgcn.cpython-37.pyc │ └── trainer_selfgcn.cpython-37.pyc ├── config ├── __pycache__ │ ├── __init__.cpython-37.pyc │ └── defaults.cpython-37.pyc └── __init__.py ├── solver ├── __init__.py ├── build.py └── lr_scheduler.py ├── configs ├── softmax.yml ├── InsDis_veri.yml ├── softmax_triplet_aic.yml ├── InsDis_veriwild.yml ├── softmax_triplet_vd1.yml ├── softmax_triplet_vd2.yml ├── softmax_triplet_veri.yml ├── softmax_triplet.yml ├── softmax_triplet_multi.yml ├── softmax_triplet_vehicleid.yml ├── softmax_triplet_veri_mask.yml ├── softmax_triplet_veriwild.yml ├── softmax_triplet_vehicleonem.yml ├── veriwild_softmax_baseline.yml └── veriwild_softmax_triplet_baseline.yml ├── Experiment-baseline-veri.sh ├── Experiment-selfgcn-veri.sh └── README.md /csrc/eval_cylib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /modeling/losses/CMC/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/__init__.py: -------------------------------------------------------------------------------- 1 | # nothing 2 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/.ipynb_checkpoints/__init__-checkpoint.py: -------------------------------------------------------------------------------- 1 | # nothing 2 | -------------------------------------------------------------------------------- /tools/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /data/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/trainer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/trainer.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/iotools.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/utils/__pycache__/iotools.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/logger.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/utils/__pycache__/logger.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/meters.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/utils/__pycache__/meters.cpython-37.pyc -------------------------------------------------------------------------------- /config/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/config/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /config/__pycache__/defaults.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/config/__pycache__/defaults.cpython-37.pyc -------------------------------------------------------------------------------- /csrc/eval_cylib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python3 setup.py build_ext --inplace 3 | rm -rf build 4 | 5 | clean: 6 | rm -rf build 7 | rm -f eval_metrics_cy.c *.so -------------------------------------------------------------------------------- /data/__pycache__/prefetcher.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/__pycache__/prefetcher.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/inference.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/inference.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .resnet import * -------------------------------------------------------------------------------- /config/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .defaults import _C as cfg 8 | -------------------------------------------------------------------------------- /data/__pycache__/collate_batch.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/__pycache__/collate_batch.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/aic.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/aic.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/bases.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/bases.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vd1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vd1.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vd2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vd2.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veri.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veri.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/trainer_gcn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/trainer_gcn.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/trainer_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/trainer_mask.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/__pycache__/baseline.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/__pycache__/baseline.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/cuhk03.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/cuhk03.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/msmt17.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/msmt17.cpython-37.pyc -------------------------------------------------------------------------------- /data/transforms/__pycache__/build.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/transforms/__pycache__/build.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/inference_gcn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/inference_gcn.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /csrc/eval_cylib/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/csrc/eval_cylib/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/eval_reid.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/eval_reid.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/market1501.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/market1501.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vehicleid.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vehicleid.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veri_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veri_mask.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild.cpython-37.pyc -------------------------------------------------------------------------------- /data/samplers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/samplers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import build_transforms -------------------------------------------------------------------------------- /data/transforms/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/transforms/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/inference_selfgcn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/inference_selfgcn.cpython-37.pyc -------------------------------------------------------------------------------- /engine/__pycache__/trainer_selfgcn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/engine/__pycache__/trainer_selfgcn.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__pycache__/cosface.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/__pycache__/cosface.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/dukemtmcreid.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/dukemtmcreid.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vehicleonem.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vehicleonem.cpython-37.pyc -------------------------------------------------------------------------------- /data/transforms/__pycache__/functional.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/transforms/__pycache__/functional.cpython-37.pyc -------------------------------------------------------------------------------- /data/transforms/__pycache__/transforms.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/transforms/__pycache__/transforms.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/__pycache__/baseline_selfgcn.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/__pycache__/baseline_selfgcn.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/backbones/__pycache__/resnet.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/backbones/__pycache__/resnet.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/dataset_loader.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/dataset_loader.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vehicleid_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vehicleid_mask.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/vehicleid_small.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vehicleid_small.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild_mask.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild_medium.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild_medium.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild_small.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild_small.cpython-37.pyc -------------------------------------------------------------------------------- /data/samplers/__pycache__/triplet_sampler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/samplers/__pycache__/triplet_sampler.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/backbones/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/backbones/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__pycache__/label_smooth.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/__pycache__/label_smooth.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__pycache__/triplet_loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/__pycache__/triplet_loss.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/InsDis/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/InsDis/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /solver/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import make_optimizer, make_lr_scheduler 9 | -------------------------------------------------------------------------------- /data/datasets/__pycache__/vehicleid_small_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/vehicleid_small_mask.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild_medium_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild_medium_mask.cpython-37.pyc -------------------------------------------------------------------------------- /data/datasets/__pycache__/veriwild_small_mask.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/data/datasets/__pycache__/veriwild_small_mask.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/InsDis/__pycache__/NCEAverage.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/InsDis/__pycache__/NCEAverage.cpython-37.pyc -------------------------------------------------------------------------------- /csrc/eval_cylib/eval_metrics_cy.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/csrc/eval_cylib/eval_metrics_cy.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /modeling/losses/InsDis/__pycache__/NCECriterion.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/InsDis/__pycache__/NCECriterion.cpython-37.pyc -------------------------------------------------------------------------------- /modeling/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_loss import TripletLoss 8 | from .loss import * -------------------------------------------------------------------------------- /data/transforms/.ipynb_checkpoints/__init__-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import build_transforms -------------------------------------------------------------------------------- /modeling/losses/InsDis/__pycache__/alias_multinomial.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxc86739795/vehicle_reid_by_parsing/HEAD/modeling/losses/InsDis/__pycache__/alias_multinomial.cpython-37.pyc -------------------------------------------------------------------------------- /data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_sampler import RandomIdentitySampler, RandomIdentitySampler_mask 8 | -------------------------------------------------------------------------------- /data/samplers/.ipynb_checkpoints/__init__-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_sampler import RandomIdentitySampler, RandomIdentitySampler_mask 8 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # Changed by Xinchen Liu 8 | 9 | from .build import get_dataloader, get_test_dataloader, get_ins_dataloader, get_dataloader_mask, get_test_dataloader_mask 10 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/normalize.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | from torch import nn 4 | 5 | class Normalize(nn.Module): 6 | 7 | def __init__(self, power=2): 8 | super(Normalize, self).__init__() 9 | self.power = power 10 | 11 | def forward(self, x): 12 | norm = x.pow(self.power).sum(1, keepdim=True).pow(1./self.power) 13 | out = x.div(norm) 14 | return out 15 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/.ipynb_checkpoints/normalize-checkpoint.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | from torch import nn 4 | 5 | class Normalize(nn.Module): 6 | 7 | def __init__(self, power=2): 8 | super(Normalize, self).__init__() 9 | self.power = power 10 | 11 | def forward(self, x): 12 | norm = x.pow(self.power).sum(1, keepdim=True).pow(1./self.power) 13 | out = x.div(norm) 14 | return out 15 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/utils.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | def __init__(self): 4 | self.reset() 5 | 6 | def reset(self): 7 | self.val = 0 8 | self.avg = 0 9 | self.sum = 0 10 | self.count = 0 11 | 12 | def update(self, val, n=1): 13 | self.val = val 14 | self.sum += val * n 15 | self.count += n 16 | self.avg = self.sum / self.count 17 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/.ipynb_checkpoints/utils-checkpoint.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | def __init__(self): 4 | self.reset() 5 | 6 | def reset(self): 7 | self.val = 0 8 | self.avg = 0 9 | self.sum = 0 10 | self.count = 0 11 | 12 | def update(self, val, n=1): 13 | self.val = val 14 | self.sum += val * n 15 | self.count += n 16 | self.avg = self.sum / self.count 17 | -------------------------------------------------------------------------------- /utils/meters.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | class AverageMeter(object): 9 | """Computes and stores the average and current value""" 10 | def __init__(self): 11 | self.reset() 12 | 13 | def reset(self): 14 | self.val = 0 15 | self.avg = 0 16 | self.sum = 0 17 | self.count = 0 18 | 19 | def update(self, val, n=1): 20 | self.val = val 21 | self.sum += val * n 22 | self.count += n 23 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /configs/softmax.yml: -------------------------------------------------------------------------------- 1 | DATASETS: 2 | NAMES: ("market1501",) 3 | TEST_NAMES: 'market1501' 4 | 5 | DATALOADER: 6 | SAMPLER: 'softmax' 7 | 8 | SOLVER: 9 | OPT: 'adam' 10 | LOSSTYPE: ('softmax',) 11 | MAX_EPOCHS: 100 12 | BASE_LR: 0.00035 13 | WEIGHT_DECAY: 0.0005 14 | WEIGHT_DECAY_BIAS: 0.0005 15 | IMS_PER_BATCH: 64 16 | 17 | STEPS: [30, 55, 80] 18 | GAMMA: 0.1 19 | 20 | WARMUP_FACTOR: 0.01 21 | WARMUP_ITERS: 10 22 | WARMUP_METHOD: 'linear' 23 | 24 | CHECKPOINT_PERIOD: 20 25 | EVAL_PERIOD: 20 26 | 27 | TEST: 28 | IMS_PER_BATCH: 256 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /configs/InsDis_veri.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veri",) 9 | TEST_NAMES: "veri" 10 | 11 | DATALOADER: 12 | SAMPLER: 'InsDis' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('InsDis',) 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.003 20 | WEIGHT_DECAY: 0.0001 21 | WEIGHT_DECAY_BIAS: 0.0001 22 | IMS_PER_BATCH: 256 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /Experiment-baseline-veri.sh: -------------------------------------------------------------------------------- 1 | # Experiment baseline : 256x256-bs32x4-warmup10-erase0_5 2 | # Dataset: veri 3 | # imagesize: 256x256 4 | # batchsize: 64x4 5 | # warmup_step 0 6 | # random erase prob 0.5 7 | CUDA_VISIBLE_DEVICES='0,1,2,3' python tools/train.py -cfg='configs/softmax_triplet_veri.yml' \ 8 | DATASETS.NAMES '("veri",)' \ 9 | SOLVER.IMS_PER_BATCH '512' \ 10 | MODEL.WITH_IBN 'False' \ 11 | MODEL.BACKBONE 'resnet50' \ 12 | SOLVER.OPT 'adam' \ 13 | SOLVER.LOSSTYPE '("softmax", "triplet")' \ 14 | OUTPUT_DIR '/home/liuxinchen3/notespace/project/vehiclereid/reid_baseline/experiment/veri/Experiment-baseline-veri_0417_0000' -------------------------------------------------------------------------------- /configs/softmax_triplet_aic.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("aic",) 9 | TEST_NAMES: "aic" 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | -------------------------------------------------------------------------------- /configs/InsDis_veriwild.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veriwild",) 9 | TEST_NAMES: "veriwild" 10 | 11 | DATALOADER: 12 | SAMPLER: 'InsDis' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('InsDis',) 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.003 20 | WEIGHT_DECAY: 0.0001 21 | WEIGHT_DECAY_BIAS: 0.0001 22 | IMS_PER_BATCH: 256 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /csrc/eval_cylib/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy as np 5 | 6 | 7 | def numpy_include(): 8 | try: 9 | numpy_include = np.get_include() 10 | except AttributeError: 11 | numpy_include = np.get_numpy_include() 12 | return numpy_include 13 | 14 | ext_modules = [ 15 | Extension('eval_metrics_cy', 16 | ['eval_metrics_cy.pyx'], 17 | include_dirs=[numpy_include()], 18 | ) 19 | ] 20 | 21 | setup( 22 | name='Cython-based reid evaluation code', 23 | ext_modules=cythonize(ext_modules) 24 | ) -------------------------------------------------------------------------------- /configs/softmax_triplet_vd1.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("vd1",) 9 | TEST_NAMES: "vd1" 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_vd2.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("vd2",) 9 | TEST_NAMES: "vd2" 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_veri.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veri",) 9 | TEST_NAMES: ("veri",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 100 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 0 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ('market1501',) 9 | TEST_NAMES: "market1501" 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 64 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 30 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_multi.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veri",) 9 | TEST_NAMES: ("veri",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_vehicleid.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("vehicleid",) 9 | TEST_NAMES: ("vehicleid",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 100 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 0 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_veri_mask.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veri_mask",) 9 | TEST_NAMES: ("veri_mask",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 100 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 0 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_veriwild.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veriwild",) 9 | TEST_NAMES: ("veriwild",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 100 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 0 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/softmax_triplet_vehicleonem.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ('vehicleonem',) 9 | TEST_NAMES: "vehicleonem" 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 128 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | 32 | TEST: 33 | IMS_PER_BATCH: 512 34 | WEIGHT: "path" 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /configs/veriwild_softmax_baseline.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veriwild",) 9 | TEST_NAMES: ("veriwild",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax',) 18 | MAX_EPOCHS: 100 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 256 23 | 24 | STEPS: [40, 80] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 0 29 | 30 | EVAL_PERIOD: 10 31 | LOG_INTERVAL: 10 32 | 33 | TEST: 34 | IMS_PER_BATCH: 512 35 | WEIGHT: "path" 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /configs/veriwild_softmax_triplet_baseline.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | BACKBONE: "resnet50" 3 | GCB: 4 | ratio: 0.0625 5 | 6 | 7 | DATASETS: 8 | NAMES: ("veriwild",) 9 | TEST_NAMES: ("veriwild",) 10 | 11 | DATALOADER: 12 | SAMPLER: 'triplet' 13 | NUM_INSTANCE: 4 14 | 15 | SOLVER: 16 | OPT: "adam" 17 | LOSSTYPE: ('softmax', 'triplet') 18 | MAX_EPOCHS: 120 19 | BASE_LR: 0.00035 20 | WEIGHT_DECAY: 0.0005 21 | WEIGHT_DECAY_BIAS: 0.0005 22 | IMS_PER_BATCH: 256 23 | 24 | STEPS: [40, 90] 25 | GAMMA: 0.1 26 | 27 | WARMUP_FACTOR: 0.01 28 | WARMUP_ITERS: 10 29 | 30 | EVAL_PERIOD: 10 31 | LOG_INTERVAL: 10 32 | 33 | TEST: 34 | IMS_PER_BATCH: 1024 35 | WEIGHT: "path" 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /Experiment-selfgcn-veri.sh: -------------------------------------------------------------------------------- 1 | # Experiment baseline : 256x256-bs32x4-warmup10-erase0_5 2 | # Dataset: veri 3 | # imagesize: 256x256 4 | # batchsize: 32x4 5 | # warmup_step 0 6 | # random erase prob 0 7 | CUDA_VISIBLE_DEVICES='0,1,2,3' python tools/train_selfgcn.py -cfg='configs/softmax_triplet_veri_mask.yml' \ 8 | DATASETS.NAMES '("veri_mask",)' \ 9 | DATASETS.TEST_NAMES '("veri_mask",)' \ 10 | SOLVER.IMS_PER_BATCH '256' \ 11 | SOLVER.OPT 'adam' \ 12 | SOLVER.LOSSTYPE '("softmax", "triplet")' \ 13 | MODEL.BACKBONE 'resnet50' \ 14 | MODEL.PRETRAIN_PATH '/home/liuxinchen3/notespace/project/resnet_pretrain/resnet50-19c8e357.pth' \ 15 | MODEL.NUM_PARTS '10' \ 16 | OUTPUT_DIR '/home/liuxinchen3/notespace/project/vehiclereid/reid_baseline/final_experiment/veri_mask/Experiment-2branch_global_selfgcn_0421_0000' -------------------------------------------------------------------------------- /utils/iotools.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import errno 8 | import json 9 | import os 10 | 11 | import os.path as osp 12 | 13 | 14 | def mkdir_if_missing(directory): 15 | if not osp.exists(directory): 16 | try: 17 | os.makedirs(directory) 18 | except OSError as e: 19 | if e.errno != errno.EEXIST: 20 | raise 21 | 22 | 23 | def check_isfile(path): 24 | isfile = osp.isfile(path) 25 | if not isfile: 26 | print("=> Warning: no file found at '{}' (ignored)".format(path)) 27 | return isfile 28 | 29 | 30 | def read_json(fpath): 31 | with open(fpath, 'r') as f: 32 | obj = json.load(f) 33 | return obj 34 | 35 | 36 | def write_json(obj, fpath): 37 | mkdir_if_missing(osp.dirname(fpath)) 38 | with open(fpath, 'w') as f: 39 | json.dump(obj, f, indent=4, separators=(',', ': ')) 40 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/NCECriterion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | eps = 1e-7 5 | 6 | class NCECriterion(nn.Module): 7 | 8 | def __init__(self, nLem): 9 | super(NCECriterion, self).__init__() 10 | self.nLem = nLem 11 | 12 | def forward(self, x, targets): 13 | batchSize = x.size(0) 14 | K = x.size(1)-1 15 | Pnt = 1 / float(self.nLem) 16 | Pns = 1 / float(self.nLem) 17 | 18 | # eq 5.1 : P(origin=model) = Pmt / (Pmt + k*Pnt) 19 | Pmt = x.select(1,0) 20 | Pmt_div = Pmt.add(K * Pnt + eps) 21 | lnPmt = torch.div(Pmt, Pmt_div) 22 | 23 | # eq 5.2 : P(origin=noise) = k*Pns / (Pms + k*Pns) 24 | Pon_div = x.narrow(1,1,K).add(K * Pns + eps) 25 | Pon = Pon_div.clone().fill_(K * Pns) 26 | lnPon = torch.div(Pon, Pon_div) 27 | 28 | # equation 6 in ref. A 29 | lnPmt.log_() 30 | lnPon.log_() 31 | 32 | lnPmtsum = lnPmt.sum(0) 33 | lnPonsum = lnPon.view(-1, 1).sum(0) 34 | 35 | loss = - (lnPmtsum + lnPonsum) / batchSize 36 | 37 | return loss 38 | 39 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/.ipynb_checkpoints/NCECriterion-checkpoint.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | eps = 1e-7 5 | 6 | class NCECriterion(nn.Module): 7 | 8 | def __init__(self, nLem): 9 | super(NCECriterion, self).__init__() 10 | self.nLem = nLem 11 | 12 | def forward(self, x, targets): 13 | batchSize = x.size(0) 14 | K = x.size(1)-1 15 | Pnt = 1 / float(self.nLem) 16 | Pns = 1 / float(self.nLem) 17 | 18 | # eq 5.1 : P(origin=model) = Pmt / (Pmt + k*Pnt) 19 | Pmt = x.select(1,0) 20 | Pmt_div = Pmt.add(K * Pnt + eps) 21 | lnPmt = torch.div(Pmt, Pmt_div) 22 | 23 | # eq 5.2 : P(origin=noise) = k*Pns / (Pms + k*Pns) 24 | Pon_div = x.narrow(1,1,K).add(K * Pns + eps) 25 | Pon = Pon_div.clone().fill_(K * Pns) 26 | lnPon = torch.div(Pon, Pon_div) 27 | 28 | # equation 6 in ref. A 29 | lnPmt.log_() 30 | lnPon.log_() 31 | 32 | lnPmtsum = lnPmt.sum(0) 33 | lnPonsum = lnPon.view(-1, 1).sum(0) 34 | 35 | loss = - (lnPmtsum + lnPonsum) / batchSize 36 | 37 | return loss 38 | 39 | -------------------------------------------------------------------------------- /modeling/utils.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from torch import nn 7 | 8 | __all__ = ['weights_init_classifier', 'weights_init_kaiming', 'BN_no_bias'] 9 | 10 | 11 | def weights_init_kaiming(m): 12 | classname = m.__class__.__name__ 13 | if classname.find('Linear') != -1: 14 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') 15 | nn.init.constant_(m.bias, 0.0) 16 | elif classname.find('Conv') != -1: 17 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') 18 | if m.bias is not None: 19 | nn.init.constant_(m.bias, 0.0) 20 | elif classname.find('BatchNorm') != -1: 21 | if m.affine: 22 | nn.init.constant_(m.weight, 1.0) 23 | nn.init.constant_(m.bias, 0.0) 24 | 25 | 26 | def weights_init_classifier(m): 27 | classname = m.__class__.__name__ 28 | if classname.find('Linear') != -1: 29 | nn.init.normal_(m.weight, std=0.001) 30 | if m.bias: 31 | nn.init.constant_(m.bias, 0.0) 32 | 33 | 34 | def BN_no_bias(in_features): 35 | bn_layer = nn.BatchNorm1d(in_features) 36 | bn_layer.bias.requires_grad_(False) 37 | return bn_layer -------------------------------------------------------------------------------- /modeling/losses/CMC/NCECriterion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | eps = 1e-7 5 | 6 | 7 | class NCECriterion(nn.Module): 8 | """ 9 | Eq. (12): L_{NCE} 10 | """ 11 | def __init__(self, n_data): 12 | super(NCECriterion, self).__init__() 13 | self.n_data = n_data 14 | 15 | def forward(self, x): 16 | bsz = x.shape[0] 17 | m = x.size(1) - 1 18 | 19 | # noise distribution 20 | Pn = 1 / float(self.n_data) 21 | 22 | # loss for positive pair 23 | P_pos = x.select(1, 0) 24 | log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_() 25 | 26 | # loss for K negative pair 27 | P_neg = x.narrow(1, 1, m) 28 | log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_() 29 | 30 | loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz 31 | 32 | return loss 33 | 34 | 35 | class NCESoftmaxLoss(nn.Module): 36 | """Softmax cross-entropy loss (a.k.a., info-NCE loss in CPC paper)""" 37 | def __init__(self): 38 | super(NCESoftmaxLoss, self).__init__() 39 | self.criterion = nn.CrossEntropyLoss() 40 | 41 | def forward(self, x): 42 | bsz = x.shape[0] 43 | x = x.squeeze() 44 | label = torch.zeros([bsz]).cuda().long() 45 | loss = self.criterion(x, label) 46 | return loss 47 | -------------------------------------------------------------------------------- /solver/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | import torch 9 | from .lr_scheduler import WarmupMultiStepLR 10 | 11 | 12 | def make_optimizer(cfg, model): 13 | params = [] 14 | for key, value in model.named_parameters(): 15 | if not value.requires_grad: 16 | continue 17 | lr = cfg.SOLVER.BASE_LR 18 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 19 | if "bias" in key: 20 | lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR 21 | weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS 22 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 23 | if cfg.SOLVER.OPT == 'sgd': opt_fns = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM) 24 | elif cfg.SOLVER.OPT == 'adam': opt_fns = torch.optim.Adam(params) 25 | elif cfg.SOLVER.OPT == 'adamw': opt_fns = torch.optim.AdamW(params) 26 | else: 27 | raise NameError(f'optimizer {cfg.SOLVER.OPT} not support') 28 | return opt_fns 29 | 30 | 31 | def make_lr_scheduler(cfg, optimizer): 32 | return WarmupMultiStepLR( 33 | optimizer, 34 | cfg.SOLVER.STEPS, 35 | cfg.SOLVER.GAMMA, 36 | warmup_factor=cfg.SOLVER.WARMUP_FACTOR, 37 | warmup_iters=cfg.SOLVER.WARMUP_ITERS, 38 | warmup_method=cfg.SOLVER.WARMUP_METHOD 39 | ) 40 | -------------------------------------------------------------------------------- /modeling/losses/label_smooth.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | from torch import nn 9 | 10 | 11 | class CrossEntropyLabelSmooth(nn.Module): 12 | """Cross entropy loss with label smoothing regularizer. 13 | Reference: 14 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 15 | Equation: y = (1 - epsilon) * y + epsilon / K. 16 | Args: 17 | num_classes (int): number of classes. 18 | epsilon (float): weight. 19 | """ 20 | def __init__(self, num_classes, epsilon=0.1): 21 | super(CrossEntropyLabelSmooth, self).__init__() 22 | self.num_classes = num_classes 23 | self.epsilon = epsilon 24 | self.logsoftmax = nn.LogSoftmax(dim=1) 25 | 26 | def forward(self, inputs, targets): 27 | """ 28 | Args: 29 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 30 | targets: ground truth labels with shape (num_classes) 31 | """ 32 | log_probs = self.logsoftmax(inputs) 33 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) 34 | targets = targets.to(inputs.device) 35 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 36 | loss = (-targets * log_probs).mean(0).sum() 37 | return loss 38 | -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import logging 8 | import os 9 | import sys 10 | 11 | from .iotools import mkdir_if_missing 12 | 13 | 14 | def setup_logger(name, save_dir, distributed_rank): 15 | logger = logging.getLogger(name) 16 | logger.setLevel(logging.DEBUG) 17 | # don't log results for the non-master process 18 | if distributed_rank > 0: 19 | return logger 20 | ch = logging.StreamHandler(stream=sys.stdout) 21 | ch.setLevel(logging.DEBUG) 22 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 23 | ch.setFormatter(formatter) 24 | logger.addHandler(ch) 25 | 26 | if save_dir: 27 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 28 | fh.setLevel(logging.DEBUG) 29 | fh.setFormatter(formatter) 30 | logger.addHandler(fh) 31 | 32 | return logger 33 | 34 | 35 | class Logger(object): 36 | def __init__(self, fpath=None): 37 | self.console = sys.stdout 38 | self.file = None 39 | if fpath is not None: 40 | mkdir_if_missing(os.path.dirname(fpath)) 41 | self.file = open(fpath, 'w') 42 | 43 | def __del__(self): 44 | self.close() 45 | 46 | def __enter__(self): 47 | pass 48 | 49 | def __exit__(self, *args): 50 | self.close() 51 | 52 | def write(self, msg): 53 | self.console.write(msg) 54 | if self.file is not None: 55 | self.file.write(msg) 56 | 57 | def flush(self): 58 | self.console.flush() 59 | if self.file is not None: 60 | self.file.flush() 61 | os.fsync(self.file.fileno()) 62 | 63 | def close(self): 64 | self.console.close() 65 | if self.file is not None: 66 | self.file.close() 67 | -------------------------------------------------------------------------------- /modeling/losses/center_loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | from torch import nn 8 | 9 | 10 | class CenterLoss(nn.Module): 11 | """Center loss. 12 | Reference: 13 | Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016. 14 | Args: 15 | num_classes (int): number of classes. 16 | feat_dim (int): feature dimension. 17 | """ 18 | 19 | def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True): 20 | super(CenterLoss, self).__init__() 21 | self.num_classes,self.feat_dim = num_classes, feat_dim 22 | 23 | if use_gpu: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) 24 | else: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) 25 | 26 | def forward(self, x, labels): 27 | """ 28 | Args: 29 | x: feature matrix with shape (batch_size, feat_dim). 30 | labels: ground truth labels with shape (num_classes). 31 | """ 32 | assert x.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)" 33 | 34 | batch_size = x.size(0) 35 | distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \ 36 | torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t() 37 | distmat.addmm_(1, -2, x, self.centers.t()) 38 | 39 | classes = torch.arange(self.num_classes).long() 40 | classes = classes.to(x.device) 41 | labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) 42 | mask = labels.eq(classes.expand(batch_size, self.num_classes)) 43 | 44 | dist = distmat * mask.float() 45 | loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size 46 | return loss 47 | -------------------------------------------------------------------------------- /modeling/losses/.ipynb_checkpoints/center_loss-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | from torch import nn 8 | 9 | 10 | class CenterLoss(nn.Module): 11 | """Center loss. 12 | Reference: 13 | Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016. 14 | Args: 15 | num_classes (int): number of classes. 16 | feat_dim (int): feature dimension. 17 | """ 18 | 19 | def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True): 20 | super(CenterLoss, self).__init__() 21 | self.num_classes,self.feat_dim = num_classes, feat_dim 22 | 23 | if use_gpu: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) 24 | else: self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) 25 | 26 | def forward(self, x, labels): 27 | """ 28 | Args: 29 | x: feature matrix with shape (batch_size, feat_dim). 30 | labels: ground truth labels with shape (num_classes). 31 | """ 32 | assert x.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)" 33 | 34 | batch_size = x.size(0) 35 | distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \ 36 | torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t() 37 | distmat.addmm_(1, -2, x, self.centers.t()) 38 | 39 | classes = torch.arange(self.num_classes).long() 40 | classes = classes.to(x.device) 41 | labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) 42 | mask = labels.eq(classes.expand(batch_size, self.num_classes)) 43 | 44 | dist = distmat * mask.float() 45 | loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size 46 | return loss 47 | -------------------------------------------------------------------------------- /modeling/losses/loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from torch import nn 7 | 8 | from .label_smooth import CrossEntropyLabelSmooth 9 | from .triplet_loss import TripletLoss 10 | 11 | __all__ = ['reidLoss'] 12 | 13 | 14 | class reidLoss(object): 15 | def __init__(self, lossType: list, margin: float, num_classes: float): 16 | super().__init__() 17 | self.lossType = lossType 18 | 19 | if 'softmax' in self.lossType: self.ce_loss = nn.CrossEntropyLoss() 20 | if 'softmax_smooth' in self.lossType: self.ce_loss = CrossEntropyLabelSmooth(num_classes) 21 | if 'triplet' in self.lossType: self.triplet_loss = TripletLoss(margin) 22 | # if 'center' in self.lossType: self.center_loss = CenterLoss(num_classes, feat_dim) 23 | 24 | def __call__(self, outputs, labels): 25 | # cls_scores, feats = outputs 26 | loss = {} 27 | if 'softmax' or 'softmax_smooth' in self.lossType: 28 | loss['ce_loss'] = self.ce_loss(outputs[0], labels) 29 | # loss['ce_loss'] = 0 30 | # ce_iter = 0 31 | # for output in outputs[1:]: 32 | # loss['ce_loss'] += self.ce_loss(output, labels) 33 | # ce_iter += 1 34 | # loss['ce_loss'] = 2 * loss['ce_loss'] / ce_iter 35 | if 'triplet' in self.lossType: 36 | loss['triplet'] = self.triplet_loss(outputs[1], labels)[0] 37 | # tri_iter = 0 38 | # for output in outputs[:3]: 39 | # loss['triplet'] += self.triplet_loss(output, labels)[0] 40 | # tri_iter += 1 41 | # loss['triplet'] = loss['triplet'] / tri_iter 42 | # loss['triplet'] = self.triplet_loss(feats, labels)[0] 43 | # if 'center' in self.lossType: loss += 0.0005 * self.center_loss(feats, labels) 44 | return loss 45 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/LinearAverage.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | from torch import nn 4 | import math 5 | 6 | class LinearAverageOp(Function): 7 | @staticmethod 8 | def forward(self, x, y, memory, params): 9 | T = params[0].item() 10 | batchSize = x.size(0) 11 | 12 | # inner product 13 | out = torch.mm(x.data, memory.t()) 14 | out.div_(T) # batchSize * N 15 | 16 | self.save_for_backward(x, memory, y, params) 17 | 18 | return out 19 | 20 | @staticmethod 21 | def backward(self, gradOutput): 22 | x, memory, y, params = self.saved_tensors 23 | batchSize = gradOutput.size(0) 24 | T = params[0].item() 25 | momentum = params[1].item() 26 | 27 | # add temperature 28 | gradOutput.data.div_(T) 29 | 30 | # gradient of linear 31 | gradInput = torch.mm(gradOutput.data, memory) 32 | gradInput.resize_as_(x) 33 | 34 | # update the non-parametric data 35 | weight_pos = memory.index_select(0, y.data.view(-1)).resize_as_(x) 36 | weight_pos.mul_(momentum) 37 | weight_pos.add_(torch.mul(x.data, 1-momentum)) 38 | w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) 39 | updated_weight = weight_pos.div(w_norm) 40 | memory.index_copy_(0, y, updated_weight) 41 | 42 | return gradInput, None, None, None 43 | 44 | class LinearAverage(nn.Module): 45 | 46 | def __init__(self, inputSize, outputSize, T=0.07, momentum=0.5): 47 | super(LinearAverage, self).__init__() 48 | stdv = 1 / math.sqrt(inputSize) 49 | self.nLem = outputSize 50 | 51 | self.register_buffer('params',torch.tensor([T, momentum])); 52 | stdv = 1. / math.sqrt(inputSize/3) 53 | self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv)) 54 | 55 | def forward(self, x, y): 56 | out = LinearAverageOp.apply(x, y, self.memory, self.params) 57 | return out 58 | 59 | -------------------------------------------------------------------------------- /solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | from bisect import bisect_right 9 | 10 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 11 | # separating MultiStepLR with WarmupLR 12 | # but the current LRScheduler design doesn't allow it 13 | 14 | 15 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 16 | def __init__( 17 | self, 18 | optimizer, 19 | milestones, 20 | gamma=0.1, 21 | warmup_factor=1.0 / 3, 22 | warmup_iters=500, 23 | warmup_method="linear", 24 | last_epoch=-1, 25 | ): 26 | if not list(milestones) == sorted(milestones): 27 | raise ValueError( 28 | "Milestones should be a list of" " increasing integers. Got {}", 29 | milestones, 30 | ) 31 | 32 | if warmup_method not in ("constant", "linear"): 33 | raise ValueError( 34 | "Only 'constant' or 'linear' warmup_method accepted" 35 | "got {}".format(warmup_method) 36 | ) 37 | self.milestones = milestones 38 | self.gamma = gamma 39 | self.warmup_factor = warmup_factor 40 | self.warmup_iters = warmup_iters 41 | self.warmup_method = warmup_method 42 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 43 | 44 | def get_lr(self): 45 | warmup_factor = 1 46 | if self.last_epoch < self.warmup_iters: 47 | if self.warmup_method == "constant": 48 | warmup_factor = self.warmup_factor 49 | elif self.warmup_method == "linear": 50 | alpha = self.last_epoch / self.warmup_iters 51 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 52 | return [ 53 | base_lr 54 | * warmup_factor 55 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 56 | for base_lr in self.base_lrs 57 | ] 58 | -------------------------------------------------------------------------------- /data/datasets/eval_threshold.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | import os 9 | import sys 10 | 11 | import numpy as np 12 | import torch 13 | import torch.nn.functional as F 14 | from torch import nn 15 | from torch.backends import cudnn 16 | from torch.utils.data import Dataset 17 | 18 | 19 | def eval_roc(distmat, q_pids, g_pids, q_cmaids, g_camids, t_start=0.1, t_end=0.9): 20 | # sort cosine dist from large to small 21 | indices = np.argsort(distmat, axis=1)[:, ::-1] 22 | # query id and gallery id match 23 | matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32) 24 | 25 | new_dist = [] 26 | new_matches = [] 27 | # Remove the same identity in the same camera. 28 | num_q = distmat.shape[0] 29 | for q_idx in range(num_q): 30 | q_pid = q_pids[q_idx] 31 | q_camid = q_cmaids[q_idx] 32 | 33 | order = indices[q_idx] 34 | remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid) 35 | keep = np.invert(remove) 36 | new_matches.extend(matches[q_idx][keep].tolist()) 37 | new_dist.extend(distmat[q_idx][indices[q_idx]][keep].tolist()) 38 | 39 | fpr = [] 40 | tpr = [] 41 | fps = [] 42 | tps = [] 43 | thresholds = np.arange(t_start, t_end, 0.02) 44 | 45 | # get number of positive and negative examples in the dataset 46 | p = sum(new_matches) 47 | n = len(new_matches) - p 48 | 49 | # iteration through all thresholds and determine fraction of true positives 50 | # and false positives found at this threshold 51 | for t in thresholds: 52 | fp = 0 53 | tp = 0 54 | for i in range(len(new_dist)): 55 | if new_dist[i] > t: 56 | if new_matches[i] == 1: 57 | tp += 1 58 | else: 59 | fp += 1 60 | fpr.append(fp / float(n)) 61 | tpr.append(tp / float(p)) 62 | fps.append(fp) 63 | tps.append(tp) 64 | return fpr, tpr, fps, tps, p, n, thresholds 65 | -------------------------------------------------------------------------------- /data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # Changed by Xinchen Liu 8 | 9 | from .cuhk03 import CUHK03 10 | from .dukemtmcreid import DukeMTMCreID 11 | from .market1501 import Market1501 12 | from .msmt17 import MSMT17 13 | 14 | from .aic import AICity19 15 | from .veri import VeRi 16 | from .vehicleid import VehicleID 17 | from .veriwild import VeRiWild 18 | from .vehicleonem import VehicleOneM 19 | from .vd1 import VD1 20 | from .vd2 import VD2 21 | 22 | from .veri_mask import VeRi_Mask 23 | from .veriwild_mask import VeRiWild_Mask 24 | from .veriwild_small import VeRiWild_Small 25 | from .veriwild_small_mask import VeRiWild_Small_Mask 26 | from .veriwild_medium import VeRiWild_Medium 27 | from .veriwild_medium_mask import VeRiWild_Medium_Mask 28 | 29 | from .vehicleid_mask import VehicleID_Mask 30 | from .vehicleid_small import VehicleID_Small 31 | from .vehicleid_small_mask import VehicleID_Small_Mask 32 | 33 | 34 | from .dataset_loader import * 35 | 36 | __factory = { 37 | 'market1501': Market1501, 38 | 'cuhk03': CUHK03, 39 | 'dukemtmc': DukeMTMCreID, 40 | 'msmt17': MSMT17, 41 | 42 | 'aic': AICity19, 43 | 'vehicleid': VehicleID, 44 | 'veri': VeRi, 45 | 'veriwild': VeRiWild, 46 | 'vehicleonem': VehicleOneM, 47 | 'vd1': VD1, 48 | 'vd2': VD2, 49 | 50 | 'veri_mask': VeRi_Mask, 51 | 'veriwild_mask': VeRiWild_Mask, 52 | 53 | 'veriwild_small': VeRiWild_Small, 54 | 'veriwild_small_mask': VeRiWild_Small_Mask, 55 | 'veriwild_medium': VeRiWild_Medium, 56 | 'veriwild_medium_mask': VeRiWild_Medium_Mask, 57 | 58 | 'vehicleid_mask': VehicleID_Mask, 59 | 'vehicleid_small': VehicleID_Small, 60 | 'vehicleid_small_mask': VehicleID_Small_Mask 61 | } 62 | 63 | 64 | def get_names(): 65 | return __factory.keys() 66 | 67 | 68 | def init_dataset(name, *args, **kwargs): 69 | if name not in __factory.keys(): 70 | raise KeyError("Unknown datasets: {}".format(name)) 71 | return __factory[name](*args, **kwargs) 72 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/__init__-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # Changed by Xinchen Liu 8 | 9 | from .cuhk03 import CUHK03 10 | from .dukemtmcreid import DukeMTMCreID 11 | from .market1501 import Market1501 12 | from .msmt17 import MSMT17 13 | 14 | from .aic import AICity19 15 | from .veri import VeRi 16 | from .vehicleid import VehicleID 17 | from .veriwild import VeRiWild 18 | from .vehicleonem import VehicleOneM 19 | from .vd1 import VD1 20 | from .vd2 import VD2 21 | 22 | from .veri_mask import VeRi_Mask 23 | from .veriwild_mask import VeRiWild_Mask 24 | from .veriwild_small import VeRiWild_Small 25 | from .veriwild_small_mask import VeRiWild_Small_Mask 26 | from .veriwild_medium import VeRiWild_Medium 27 | from .veriwild_medium_mask import VeRiWild_Medium_Mask 28 | 29 | from .vehicleid_mask import VehicleID_Mask 30 | from .vehicleid_small import VehicleID_Small 31 | from .vehicleid_small_mask import VehicleID_Small_Mask 32 | 33 | 34 | from .dataset_loader import * 35 | 36 | __factory = { 37 | 'market1501': Market1501, 38 | 'cuhk03': CUHK03, 39 | 'dukemtmc': DukeMTMCreID, 40 | 'msmt17': MSMT17, 41 | 42 | 'aic': AICity19, 43 | 'vehicleid': VehicleID, 44 | 'veri': VeRi, 45 | 'veriwild': VeRiWild, 46 | 'vehicleonem': VehicleOneM, 47 | 'vd1': VD1, 48 | 'vd2': VD2, 49 | 50 | 'veri_mask': VeRi_Mask, 51 | 'veriwild_mask': VeRiWild_Mask, 52 | 53 | 'veriwild_small': VeRiWild_Small, 54 | 'veriwild_small_mask': VeRiWild_Small_Mask, 55 | 'veriwild_medium': VeRiWild_Medium, 56 | 'veriwild_medium_mask': VeRiWild_Medium_Mask, 57 | 58 | 'vehicleid_mask': VehicleID_Mask, 59 | 'vehicleid_small': VehicleID_Small, 60 | 'vehicleid_small_mask': VehicleID_Small_Mask 61 | } 62 | 63 | 64 | def get_names(): 65 | return __factory.keys() 66 | 67 | 68 | def init_dataset(name, *args, **kwargs): 69 | if name not in __factory.keys(): 70 | raise KeyError("Unknown datasets: {}".format(name)) 71 | return __factory[name](*args, **kwargs) 72 | -------------------------------------------------------------------------------- /tools/train_selfgcn.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # Changed by Xinchen Liu 8 | 9 | import argparse 10 | import os 11 | import sys 12 | 13 | from torch.backends import cudnn 14 | 15 | sys.path.append(".") 16 | from config import cfg 17 | from utils.logger import setup_logger 18 | from engine.trainer_selfgcn import ReidSystem 19 | from torch.utils.tensorboard import SummaryWriter 20 | 21 | def main(): 22 | parser = argparse.ArgumentParser(description="ReID Model Training") 23 | parser.add_argument( 24 | '-cfg', "--config_file", 25 | default="", 26 | metavar="FILE", 27 | help="path to config file", 28 | type=str 29 | ) 30 | # parser.add_argument("--local_rank", type=int, default=0) 31 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 32 | nargs=argparse.REMAINDER) 33 | args = parser.parse_args() 34 | if args.config_file != "": 35 | cfg.merge_from_file(args.config_file) 36 | cfg.merge_from_list(args.opts) 37 | 38 | gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' 39 | gpus = [int(i) for i in gpus.split(',')] 40 | num_gpus = len(gpus) 41 | 42 | cfg.freeze() 43 | 44 | log_save_dir = os.path.join(cfg.OUTPUT_DIR, '-'.join(cfg.DATASETS.TEST_NAMES), cfg.MODEL.VERSION) 45 | if not os.path.exists(log_save_dir): os.makedirs(log_save_dir) 46 | 47 | logger = setup_logger("reid_baseline.train", log_save_dir, 0) 48 | logger.info("Using {} GPUs.".format(num_gpus)) 49 | logger.info(args) 50 | 51 | if args.config_file != "": 52 | logger.info("Loaded configuration file {}".format(args.config_file)) 53 | logger.info("Running with config:\n{}".format(cfg)) 54 | 55 | logger.info('start training') 56 | cudnn.benchmark = True 57 | 58 | writer = SummaryWriter(os.path.join(log_save_dir, 'tf')) 59 | reid_system = ReidSystem(cfg, logger, writer) 60 | reid_system.train() 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /tools/test.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import os 9 | import sys 10 | 11 | import torch 12 | from torch.backends import cudnn 13 | from torch import nn 14 | 15 | sys.path.append('.') 16 | from config import cfg 17 | from data import get_test_dataloader 18 | from engine.inference import inference 19 | from modeling import build_model 20 | from utils.logger import setup_logger 21 | 22 | 23 | def main(): 24 | parser = argparse.ArgumentParser(description="ReID Baseline Inference") 25 | parser.add_argument('-cfg', 26 | "--config_file", default="", help="path to config file", type=str 27 | ) 28 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 29 | nargs=argparse.REMAINDER) 30 | 31 | args = parser.parse_args() 32 | 33 | gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' 34 | gpus = [int(i) for i in gpus.split(',')] 35 | num_gpus = len(gpus) 36 | 37 | if args.config_file != "": 38 | cfg.merge_from_file(args.config_file) 39 | cfg.merge_from_list(args.opts) 40 | # set pretrian = False to avoid loading weight repeatedly 41 | cfg.MODEL.PRETRAIN = False 42 | cfg.freeze() 43 | 44 | logger = setup_logger("reid_baseline", False, 0) 45 | logger.info("Using {} GPUS".format(num_gpus)) 46 | logger.info(args) 47 | 48 | if args.config_file != "": 49 | logger.info("Loaded configuration file {}".format(args.config_file)) 50 | logger.info("Running with config:\n{}".format(cfg)) 51 | 52 | cudnn.benchmark = True 53 | 54 | model = build_model(cfg, 0) 55 | model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT)) 56 | if num_gpus > 1: 57 | model = nn.DataParallel(model) 58 | model = model.cuda() 59 | 60 | print('prepare test set ...') 61 | test_dataloader_collection, num_query_collection, _ = get_test_dataloader(cfg) 62 | 63 | inference(cfg, model, test_dataloader_collection, num_query_collection, use_mask=False) 64 | 65 | 66 | if __name__ == '__main__': 67 | main() 68 | 69 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/alias_multinomial.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | class AliasMethod(object): 5 | ''' 6 | From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ 7 | ''' 8 | def __init__(self, probs): 9 | 10 | if probs.sum() > 1: 11 | probs.div_(probs.sum()) 12 | K = len(probs) 13 | self.prob = torch.zeros(K) 14 | self.alias = torch.LongTensor([0]*K) 15 | 16 | # Sort the data into the outcomes with probabilities 17 | # that are larger and smaller than 1/K. 18 | smaller = [] 19 | larger = [] 20 | for kk, prob in enumerate(probs): 21 | self.prob[kk] = K*prob 22 | if self.prob[kk] < 1.0: 23 | smaller.append(kk) 24 | else: 25 | larger.append(kk) 26 | 27 | # Loop though and create little binary mixtures that 28 | # appropriately allocate the larger outcomes over the 29 | # overall uniform mixture. 30 | while len(smaller) > 0 and len(larger) > 0: 31 | small = smaller.pop() 32 | large = larger.pop() 33 | 34 | self.alias[small] = large 35 | self.prob[large] = (self.prob[large] - 1.0) + self.prob[small] 36 | 37 | if self.prob[large] < 1.0: 38 | smaller.append(large) 39 | else: 40 | larger.append(large) 41 | 42 | for last_one in smaller+larger: 43 | self.prob[last_one] = 1 44 | 45 | def cuda(self): 46 | self.prob = self.prob.cuda() 47 | self.alias = self.alias.cuda() 48 | 49 | def draw(self, N): 50 | ''' 51 | Draw N samples from multinomial 52 | ''' 53 | K = self.alias.size(0) 54 | 55 | kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K) 56 | prob = self.prob.index_select(0, kk) 57 | alias = self.alias.index_select(0, kk) 58 | # b is whether a random number is greater than q 59 | b = torch.bernoulli(prob) 60 | oq = kk.mul(b.long()) 61 | oj = alias.mul((1-b).long()) 62 | 63 | return oq + oj 64 | 65 | -------------------------------------------------------------------------------- /tools/test_selfgcn.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import os 9 | import sys 10 | 11 | import torch 12 | from torch.backends import cudnn 13 | from torch import nn 14 | 15 | # Changed by Xinchen Liu 16 | 17 | sys.path.append('.') 18 | from config import cfg 19 | from data import get_test_dataloader_mask 20 | from engine.inference_selfgcn import inference 21 | from modeling import build_model_selfgcn 22 | from utils.logger import setup_logger 23 | 24 | 25 | def main(): 26 | parser = argparse.ArgumentParser(description="ReID Baseline Inference") 27 | parser.add_argument('-cfg', 28 | "--config_file", default="", help="path to config file", type=str 29 | ) 30 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 31 | nargs=argparse.REMAINDER) 32 | 33 | args = parser.parse_args() 34 | 35 | gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' 36 | gpus = [int(i) for i in gpus.split(',')] 37 | num_gpus = len(gpus) 38 | 39 | if args.config_file != "": 40 | cfg.merge_from_file(args.config_file) 41 | cfg.merge_from_list(args.opts) 42 | # set pretrian = False to avoid loading weight repeatedly 43 | cfg.MODEL.PRETRAIN = False 44 | cfg.freeze() 45 | 46 | logger = setup_logger("reid_baseline", False, 0) 47 | logger.info("Using {} GPUS".format(num_gpus)) 48 | logger.info(args) 49 | 50 | if args.config_file != "": 51 | logger.info("Loaded configuration file {}".format(args.config_file)) 52 | logger.info("Running with config:\n{}".format(cfg)) 53 | 54 | cudnn.benchmark = True 55 | 56 | model = build_model_selfgcn(cfg, 0) 57 | model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT)) 58 | if num_gpus > 1: 59 | model = nn.DataParallel(model) 60 | model = model.cuda() 61 | 62 | print('prepare test set ...') 63 | test_dataloader_collection, num_query_collection, _ = get_test_dataloader_mask(cfg) 64 | 65 | inference(cfg, model, test_dataloader_collection, num_query_collection, use_mask=True) 66 | 67 | 68 | if __name__ == '__main__': 69 | main() 70 | 71 | -------------------------------------------------------------------------------- /tools/test_vis.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import os 9 | import sys 10 | 11 | import torch 12 | from torch.backends import cudnn 13 | from torch import nn 14 | import random 15 | 16 | sys.path.append('.') 17 | from config import cfg 18 | from data import get_test_dataloader 19 | from engine.inference import inference 20 | from modeling import build_model 21 | from utils.logger import setup_logger 22 | 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser(description="ReID Baseline Inference") 26 | parser.add_argument('-cfg', 27 | "--config_file", default="", help="path to config file", type=str 28 | ) 29 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 30 | nargs=argparse.REMAINDER) 31 | 32 | args = parser.parse_args() 33 | 34 | gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' 35 | gpus = [int(i) for i in gpus.split(',')] 36 | num_gpus = len(gpus) 37 | 38 | if args.config_file != "": 39 | cfg.merge_from_file(args.config_file) 40 | cfg.merge_from_list(args.opts) 41 | # set pretrian = False to avoid loading weight repeatedly 42 | cfg.MODEL.PRETRAIN = False 43 | cfg.freeze() 44 | 45 | logger = setup_logger("reid_baseline", False, 0) 46 | logger.info("Using {} GPUS".format(num_gpus)) 47 | logger.info(args) 48 | 49 | if args.config_file != "": 50 | logger.info("Loaded configuration file {}".format(args.config_file)) 51 | logger.info("Running with config:\n{}".format(cfg)) 52 | 53 | cudnn.benchmark = True 54 | 55 | model = build_model(cfg, 0) 56 | model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT)) 57 | if num_gpus > 1: 58 | model = nn.DataParallel(model) 59 | model = model.cuda() 60 | 61 | print('prepare test set ...') 62 | test_dataloader_collection, num_query_collection, test_items_collection = get_test_dataloader(cfg) 63 | 64 | inference(cfg, model, test_dataloader_collection, num_query_collection, is_vis=True, test_collection=test_items_collection) 65 | 66 | 67 | if __name__ == '__main__': 68 | main() 69 | 70 | -------------------------------------------------------------------------------- /modeling/losses/CMC/alias_multinomial.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class AliasMethod(object): 5 | """ 6 | From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ 7 | """ 8 | def __init__(self, probs): 9 | 10 | if probs.sum() > 1: 11 | probs.div_(probs.sum()) 12 | K = len(probs) 13 | self.prob = torch.zeros(K) 14 | self.alias = torch.LongTensor([0]*K) 15 | 16 | # Sort the data into the outcomes with probabilities 17 | # that are larger and smaller than 1/K. 18 | smaller = [] 19 | larger = [] 20 | for kk, prob in enumerate(probs): 21 | self.prob[kk] = K*prob 22 | if self.prob[kk] < 1.0: 23 | smaller.append(kk) 24 | else: 25 | larger.append(kk) 26 | 27 | # Loop though and create little binary mixtures that 28 | # appropriately allocate the larger outcomes over the 29 | # overall uniform mixture. 30 | while len(smaller) > 0 and len(larger) > 0: 31 | small = smaller.pop() 32 | large = larger.pop() 33 | 34 | self.alias[small] = large 35 | self.prob[large] = (self.prob[large] - 1.0) + self.prob[small] 36 | 37 | if self.prob[large] < 1.0: 38 | smaller.append(large) 39 | else: 40 | larger.append(large) 41 | 42 | for last_one in smaller+larger: 43 | self.prob[last_one] = 1 44 | 45 | def cuda(self): 46 | self.prob = self.prob.cuda() 47 | self.alias = self.alias.cuda() 48 | 49 | def draw(self, N): 50 | """ 51 | Draw N samples from multinomial 52 | :param N: number of samples 53 | :return: samples 54 | """ 55 | K = self.alias.size(0) 56 | 57 | kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K) 58 | prob = self.prob.index_select(0, kk) 59 | alias = self.alias.index_select(0, kk) 60 | # b is whether a random number is greater than q 61 | b = torch.bernoulli(prob) 62 | oq = kk.mul(b.long()) 63 | oj = alias.mul((1-b).long()) 64 | 65 | return oq + oj 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vehicle_reid_by_parsing 2 | This repo gives the code for the paper "Xinchen Liu, Wu Liu, Jinkai Zheng, Chenggang Yan, Tao Mei: [Beyond the Parts: 3 | Learning Multi-view Cross-part Correlation for Vehicle Re-identification](https://lxc86739795.github.io/papers/2020_ACMMM_PCRNet.pdf). ACM MM 2020". 4 | This code is based on [reid strong baseline](https://github.com/michuanhaohao/reid-strong-baseline). 5 | 6 | ## Requirements 7 | 8 | - Linux or macOS with python ≥ 3.6 9 | - PyTorch ≥ 1.0 10 | - torchvision that matches the Pytorch installation. You can install them together at [pytorch.org](https://pytorch.org/) to make sure of this. 11 | - [yacs](https://github.com/rbgirshick/yacs) 12 | - Cython (optional to compile evaluation code) 13 | - tensorboard (needed for visualization): `pip install tensorboard` 14 | 15 | ## Data Preparation 16 | 17 | To train a vehicle reid model with parsing, you need the original image datasets like [VeRi](https://github.com/JDAI-CV/VeRidataset) and the parsing masks of all images. 18 | For vehicle parsing models pretrained on the [MVP dataset](https://lxc86739795.github.io/MVP.html) based on PSPNet/DeepLabV3/HRNet, please refer to this [repo](https://github.com/lxc86739795/human_vehicle_parsing_platform). 19 | 20 | ## Training 21 | 22 | You can run the examplar training script in `.sh` files. 23 | 24 | ## Main Code 25 | 26 | The main code for GCN can be found in 27 | ```bash 28 | root 29 | engine 30 | trainer_selfgcn.py # training pipline 31 | modeling 32 | baseline_selfgcn.py # definition of the model 33 | tools 34 | train_selfgcn.py # training preparation 35 | 36 | ``` 37 | 38 | The code for data io and sampler also be modified for the parsing based reid method. 39 | 40 | ## License 41 | 42 | PCRNet is released under the [Apache 2.0 license](LICENSE). 43 | 44 | ## Reference 45 | ```BibTeX 46 | 47 | @inproceedings{mm/LiuLZY020, 48 | author = {Xinchen Liu and 49 | Wu Liu and 50 | Jinkai Zheng and 51 | Chenggang Yan and 52 | Tao Mei}, 53 | title = {Beyond the Parts: Learning Multi-view Cross-part Correlation for Vehicle 54 | Re-identification}, 55 | booktitle = {ACM MM}, 56 | pages = {907--915}, 57 | year = {2020} 58 | } 59 | ``` 60 | -------------------------------------------------------------------------------- /data/transforms/functional.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import random 7 | from PIL import Image 8 | 9 | __all__ = ['swap'] 10 | 11 | 12 | def swap(img, crop): 13 | def crop_image(image, cropnum): 14 | width, high = image.size 15 | crop_x = [int((width / cropnum[0]) * i) for i in range(cropnum[0] + 1)] 16 | crop_y = [int((high / cropnum[1]) * i) for i in range(cropnum[1] + 1)] 17 | im_list = [] 18 | for j in range(len(crop_y) - 1): 19 | for i in range(len(crop_x) - 1): 20 | im_list.append(image.crop((crop_x[i], crop_y[j], min(crop_x[i + 1], width), min(crop_y[j + 1], high)))) 21 | return im_list 22 | 23 | widthcut, highcut = img.size 24 | img = img.crop((10, 10, widthcut - 10, highcut - 10)) 25 | images = crop_image(img, crop) 26 | pro = 5 27 | if pro >= 5: 28 | tmpx = [] 29 | tmpy = [] 30 | count_x = 0 31 | count_y = 0 32 | k = 1 33 | RAN = 2 34 | for i in range(crop[1] * crop[0]): 35 | tmpx.append(images[i]) 36 | count_x += 1 37 | if len(tmpx) >= k: 38 | tmp = tmpx[count_x - RAN:count_x] 39 | random.shuffle(tmp) 40 | tmpx[count_x - RAN:count_x] = tmp 41 | if count_x == crop[0]: 42 | tmpy.append(tmpx) 43 | count_x = 0 44 | count_y += 1 45 | tmpx = [] 46 | if len(tmpy) >= k: 47 | tmp2 = tmpy[count_y - RAN:count_y] 48 | random.shuffle(tmp2) 49 | tmpy[count_y - RAN:count_y] = tmp2 50 | random_im = [] 51 | for line in tmpy: 52 | random_im.extend(line) 53 | 54 | # random.shuffle(images) 55 | width, high = img.size 56 | iw = int(width / crop[0]) 57 | ih = int(high / crop[1]) 58 | toImage = Image.new('RGB', (iw * crop[0], ih * crop[1])) 59 | x = 0 60 | y = 0 61 | for i in random_im: 62 | i = i.resize((iw, ih), Image.ANTIALIAS) 63 | toImage.paste(i, (x * iw, y * ih)) 64 | x += 1 65 | if x == crop[0]: 66 | x = 0 67 | y += 1 68 | else: 69 | toImage = img 70 | toImage = toImage.resize((widthcut, highcut)) 71 | return toImage 72 | -------------------------------------------------------------------------------- /data/transforms/.ipynb_checkpoints/functional-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import random 7 | from PIL import Image 8 | 9 | __all__ = ['swap'] 10 | 11 | 12 | def swap(img, crop): 13 | def crop_image(image, cropnum): 14 | width, high = image.size 15 | crop_x = [int((width / cropnum[0]) * i) for i in range(cropnum[0] + 1)] 16 | crop_y = [int((high / cropnum[1]) * i) for i in range(cropnum[1] + 1)] 17 | im_list = [] 18 | for j in range(len(crop_y) - 1): 19 | for i in range(len(crop_x) - 1): 20 | im_list.append(image.crop((crop_x[i], crop_y[j], min(crop_x[i + 1], width), min(crop_y[j + 1], high)))) 21 | return im_list 22 | 23 | widthcut, highcut = img.size 24 | img = img.crop((10, 10, widthcut - 10, highcut - 10)) 25 | images = crop_image(img, crop) 26 | pro = 5 27 | if pro >= 5: 28 | tmpx = [] 29 | tmpy = [] 30 | count_x = 0 31 | count_y = 0 32 | k = 1 33 | RAN = 2 34 | for i in range(crop[1] * crop[0]): 35 | tmpx.append(images[i]) 36 | count_x += 1 37 | if len(tmpx) >= k: 38 | tmp = tmpx[count_x - RAN:count_x] 39 | random.shuffle(tmp) 40 | tmpx[count_x - RAN:count_x] = tmp 41 | if count_x == crop[0]: 42 | tmpy.append(tmpx) 43 | count_x = 0 44 | count_y += 1 45 | tmpx = [] 46 | if len(tmpy) >= k: 47 | tmp2 = tmpy[count_y - RAN:count_y] 48 | random.shuffle(tmp2) 49 | tmpy[count_y - RAN:count_y] = tmp2 50 | random_im = [] 51 | for line in tmpy: 52 | random_im.extend(line) 53 | 54 | # random.shuffle(images) 55 | width, high = img.size 56 | iw = int(width / crop[0]) 57 | ih = int(high / crop[1]) 58 | toImage = Image.new('RGB', (iw * crop[0], ih * crop[1])) 59 | x = 0 60 | y = 0 61 | for i in random_im: 62 | i = i.resize((iw, ih), Image.ANTIALIAS) 63 | toImage.paste(i, (x * iw, y * ih)) 64 | x += 1 65 | if x == crop[0]: 66 | x = 0 67 | y += 1 68 | else: 69 | toImage = img 70 | toImage = toImage.resize((widthcut, highcut)) 71 | return toImage 72 | -------------------------------------------------------------------------------- /data/transforms/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torchvision.transforms as T 8 | 9 | from .transforms import * 10 | 11 | 12 | # def build_transforms(cfg, is_train=True): 13 | # res = [] 14 | # if is_train: 15 | # res.append(T.Resize(cfg.INPUT.SIZE_TRAIN)) 16 | # if cfg.INPUT.DO_FLIP: 17 | # res.append(T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB)) 18 | # if cfg.INPUT.DO_PAD: 19 | # res.extend([T.Pad(cfg.INPUT.PADDING, padding_mode=cfg.INPUT.PADDING_MODE), 20 | # T.RandomCrop(cfg.INPUT.SIZE_TRAIN)]) 21 | # if cfg.INPUT.DO_LIGHTING: 22 | # res.append(T.ColorJitter(cfg.INPUT.MAX_LIGHTING, cfg.INPUT.MAX_LIGHTING)) 23 | # # res.append(T.ToTensor()) # to slow 24 | # if cfg.INPUT.DO_RE: 25 | # res.append(RandomErasing(probability=cfg.INPUT.RE_PROB)) 26 | # else: 27 | # res.append(T.Resize(cfg.INPUT.SIZE_TEST)) 28 | # # res.append(T.ToTensor()) 29 | # return T.Compose(res) 30 | 31 | def build_transforms(cfg, is_train=True): 32 | #normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD) 33 | if is_train: 34 | transform = T.Compose([ 35 | T.Resize(cfg.INPUT.SIZE_TRAIN, interpolation = 3), 36 | T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB), 37 | T.Pad(cfg.INPUT.PADDING), 38 | T.RandomCrop(cfg.INPUT.SIZE_TRAIN), 39 | #T.ToTensor(), 40 | #normalize_transform, 41 | #RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN) 42 | ]) 43 | mask_transform = T.Compose([ 44 | T.Resize(cfg.INPUT.SIZE_TRAIN, interpolation = 0), 45 | T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB), 46 | T.Pad(cfg.INPUT.PADDING), 47 | T.RandomCrop(cfg.INPUT.SIZE_TRAIN), 48 | #T.ToTensor() 49 | #RandomErasing(probability=cfg.INPUT.RE_PROB, mean=[0]) 50 | ]) 51 | 52 | else: 53 | transform = T.Compose([ 54 | T.Resize(cfg.INPUT.SIZE_TEST), 55 | #T.ToTensor(), 56 | #normalize_transform 57 | ]) 58 | mask_transform = T.Compose([ 59 | T.Resize(cfg.INPUT.SIZE_TEST), 60 | #T.ToTensor(), 61 | #normalize_transform 62 | ]) 63 | 64 | return (transform, mask_transform) -------------------------------------------------------------------------------- /data/transforms/.ipynb_checkpoints/build-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torchvision.transforms as T 8 | 9 | from .transforms import * 10 | 11 | 12 | # def build_transforms(cfg, is_train=True): 13 | # res = [] 14 | # if is_train: 15 | # res.append(T.Resize(cfg.INPUT.SIZE_TRAIN)) 16 | # if cfg.INPUT.DO_FLIP: 17 | # res.append(T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB)) 18 | # if cfg.INPUT.DO_PAD: 19 | # res.extend([T.Pad(cfg.INPUT.PADDING, padding_mode=cfg.INPUT.PADDING_MODE), 20 | # T.RandomCrop(cfg.INPUT.SIZE_TRAIN)]) 21 | # if cfg.INPUT.DO_LIGHTING: 22 | # res.append(T.ColorJitter(cfg.INPUT.MAX_LIGHTING, cfg.INPUT.MAX_LIGHTING)) 23 | # # res.append(T.ToTensor()) # to slow 24 | # if cfg.INPUT.DO_RE: 25 | # res.append(RandomErasing(probability=cfg.INPUT.RE_PROB)) 26 | # else: 27 | # res.append(T.Resize(cfg.INPUT.SIZE_TEST)) 28 | # # res.append(T.ToTensor()) 29 | # return T.Compose(res) 30 | 31 | def build_transforms(cfg, is_train=True): 32 | #normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD) 33 | if is_train: 34 | transform = T.Compose([ 35 | T.Resize(cfg.INPUT.SIZE_TRAIN, interpolation = 3), 36 | T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB), 37 | T.Pad(cfg.INPUT.PADDING), 38 | T.RandomCrop(cfg.INPUT.SIZE_TRAIN), 39 | #T.ToTensor(), 40 | #normalize_transform, 41 | #RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN) 42 | ]) 43 | mask_transform = T.Compose([ 44 | T.Resize(cfg.INPUT.SIZE_TRAIN, interpolation = 0), 45 | T.RandomHorizontalFlip(p=cfg.INPUT.FLIP_PROB), 46 | T.Pad(cfg.INPUT.PADDING), 47 | T.RandomCrop(cfg.INPUT.SIZE_TRAIN), 48 | #T.ToTensor() 49 | #RandomErasing(probability=cfg.INPUT.RE_PROB, mean=[0]) 50 | ]) 51 | 52 | else: 53 | transform = T.Compose([ 54 | T.Resize(cfg.INPUT.SIZE_TEST), 55 | #T.ToTensor(), 56 | #normalize_transform 57 | ]) 58 | mask_transform = T.Compose([ 59 | T.Resize(cfg.INPUT.SIZE_TEST), 60 | #T.ToTensor(), 61 | #normalize_transform 62 | ]) 63 | 64 | return (transform, mask_transform) -------------------------------------------------------------------------------- /data/collate_batch.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | import numpy as np 8 | import cv2 9 | 10 | def fast_collate_fn(batch): 11 | imgs, pids, camids = zip(*batch) 12 | is_ndarray = isinstance(imgs[0], np.ndarray) 13 | if not is_ndarray: # PIL Image object 14 | w = imgs[0].size[0] 15 | h = imgs[0].size[1] 16 | else: 17 | w = imgs[0].shape[1] 18 | h = imgs[0].shape[0] 19 | tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8) 20 | for i, img in enumerate(imgs): 21 | if not is_ndarray: 22 | img = np.asarray(img, dtype=np.uint8) 23 | numpy_array = np.rollaxis(img, 2) 24 | tensor[i] += torch.from_numpy(numpy_array) 25 | return tensor, torch.tensor(pids).long(), camids 26 | 27 | 28 | def fast_collate_fn_mask(batch): # Changed by Xinchen Liu 29 | imgs, masks, pids, camids = zip(*batch) 30 | is_ndarray = isinstance(imgs[0], np.ndarray) 31 | if not is_ndarray: # PIL Image object 32 | w = imgs[0].size[0] 33 | h = imgs[0].size[1] 34 | else: 35 | w = imgs[0].shape[1] 36 | h = imgs[0].shape[0] 37 | tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8) 38 | tensor_mask = torch.zeros((len(imgs), 1, h, w), dtype=torch.uint8) 39 | for i, img in enumerate(imgs): 40 | mask = masks[i] 41 | if not is_ndarray: 42 | img = np.asarray(img, dtype=np.uint8) 43 | mask = np.asarray(mask, dtype=np.uint8) 44 | numpy_array = np.rollaxis(img, 2) 45 | mask_array = mask[np.newaxis, :, :] 46 | tensor[i] += torch.from_numpy(numpy_array) 47 | tensor_mask[i] += torch.from_numpy(mask_array) 48 | return tensor, tensor_mask, torch.tensor(pids).long(), camids 49 | 50 | 51 | def fast_instance_collate_fn(batch): 52 | imgs, pids, camids, indexes = zip(*batch) 53 | is_ndarray = isinstance(imgs[0], np.ndarray) 54 | if not is_ndarray: # PIL Image object 55 | w = imgs[0].size[0] 56 | h = imgs[0].size[1] 57 | else: 58 | w = imgs[0].shape[1] 59 | h = imgs[0].shape[0] 60 | tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8) 61 | for i, img in enumerate(imgs): 62 | if not is_ndarray: 63 | img = np.asarray(img, dtype=np.uint8) 64 | numpy_array = np.rollaxis(img, 2) 65 | tensor[i] += torch.from_numpy(numpy_array) 66 | return tensor, torch.tensor(pids).long(), camids, torch.tensor(indexes).long() 67 | -------------------------------------------------------------------------------- /modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from torch import nn 8 | 9 | from .baseline import Baseline, Baseline_InsDis, Baseline_Mask, Baseline_GCN 10 | from .baseline_selfgcn import Baseline_SelfGCN 11 | from .losses import reidLoss 12 | 13 | # Changed by Xinchen Liu 14 | 15 | def build_model(cfg, num_classes, use_mask=False) -> nn.Module: 16 | if 'InsDis' in list(cfg.SOLVER.LOSSTYPE): 17 | print('Baseline Instance Model') 18 | model = Baseline_InsDis( 19 | cfg.MODEL.BACKBONE, 20 | num_classes, 21 | cfg.MODEL.LAST_STRIDE, 22 | cfg.MODEL.WITH_IBN, 23 | cfg.MODEL.GCB, 24 | cfg.MODEL.STAGE_WITH_GCB, 25 | cfg.MODEL.PRETRAIN, 26 | cfg.MODEL.PRETRAIN_PATH) 27 | elif use_mask: 28 | print('Baseline with Mask Branch') 29 | model = Baseline_Mask( 30 | cfg.MODEL.BACKBONE, 31 | num_classes, 32 | cfg.MODEL.NUM_PARTS, 33 | cfg.MODEL.LAST_STRIDE, 34 | cfg.MODEL.WITH_IBN, 35 | cfg.MODEL.GCB, 36 | cfg.MODEL.STAGE_WITH_GCB, 37 | cfg.MODEL.PRETRAIN, 38 | cfg.MODEL.PRETRAIN_PATH) 39 | else: 40 | print('Baseline Model') 41 | model = Baseline( 42 | cfg.MODEL.BACKBONE, 43 | num_classes, 44 | cfg.MODEL.LAST_STRIDE, 45 | cfg.MODEL.WITH_IBN, 46 | cfg.MODEL.GCB, 47 | cfg.MODEL.STAGE_WITH_GCB, 48 | cfg.MODEL.PRETRAIN, 49 | cfg.MODEL.PRETRAIN_PATH) 50 | return model 51 | 52 | def build_model_gcn(cfg, num_classes, use_mask=False) -> nn.Module: 53 | print('Baseline GCN Model') 54 | model = Baseline_GCN( 55 | cfg.MODEL.BACKBONE, 56 | num_classes, 57 | cfg.MODEL.NUM_PARTS, 58 | cfg.MODEL.LAST_STRIDE, 59 | cfg.MODEL.WITH_IBN, 60 | cfg.MODEL.GCB, 61 | cfg.MODEL.STAGE_WITH_GCB, 62 | cfg.MODEL.PRETRAIN, 63 | cfg.MODEL.PRETRAIN_PATH) 64 | 65 | return model 66 | 67 | def build_model_selfgcn(cfg, num_classes) -> nn.Module: 68 | print('Baseline SelfGCN Model') 69 | model = Baseline_SelfGCN( 70 | cfg.MODEL.BACKBONE, 71 | num_classes, 72 | cfg.MODEL.NUM_PARTS, 73 | cfg.MODEL.LAST_STRIDE, 74 | cfg.MODEL.WITH_IBN, 75 | cfg.MODEL.GCB, 76 | cfg.MODEL.STAGE_WITH_GCB, 77 | cfg.MODEL.PRETRAIN, 78 | cfg.MODEL.PRETRAIN_PATH) 79 | 80 | return model 81 | -------------------------------------------------------------------------------- /data/datasets/dukemtmcreid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: liaoxingyu2@jd.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | 11 | from .bases import ImageDataset 12 | 13 | 14 | class DukeMTMCreID(ImageDataset): 15 | """DukeMTMC-reID. 16 | 17 | Reference: 18 | - Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016. 19 | - Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017. 20 | 21 | URL: ``_ 22 | 23 | Dataset statistics: 24 | - identities: 1404 (train + query). 25 | - images:16522 (train) + 2228 (query) + 17661 (gallery). 26 | - cameras: 8. 27 | """ 28 | dataset_dir = 'DukeMTMC-reID' 29 | dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip' 30 | 31 | def __init__(self, root='datasets', **kwargs): 32 | # self.root = osp.abspath(osp.expanduser(root)) 33 | self.root = root 34 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 35 | self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train') 36 | self.query_dir = osp.join(self.dataset_dir, 'query') 37 | self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test') 38 | 39 | required_files = [ 40 | self.dataset_dir, 41 | self.train_dir, 42 | self.query_dir, 43 | self.gallery_dir 44 | ] 45 | self.check_before_run(required_files) 46 | 47 | train = self.process_dir(self.train_dir, relabel=True) 48 | query = self.process_dir(self.query_dir, relabel=False) 49 | gallery = self.process_dir(self.gallery_dir, relabel=False) 50 | 51 | super(DukeMTMCreID, self).__init__(train, query, gallery, **kwargs) 52 | 53 | def process_dir(self, dir_path, relabel=False): 54 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 55 | pattern = re.compile(r'([-\d]+)_c(\d)') 56 | 57 | pid_container = set() 58 | for img_path in img_paths: 59 | pid, _ = map(int, pattern.search(img_path).groups()) 60 | pid_container.add(pid) 61 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 62 | 63 | data = [] 64 | for img_path in img_paths: 65 | pid, camid = map(int, pattern.search(img_path).groups()) 66 | assert 1 <= camid <= 8 67 | camid -= 1 # index starts from 0 68 | if relabel: pid = pid2label[pid] 69 | data.append((img_path, pid, camid)) 70 | 71 | return data 72 | -------------------------------------------------------------------------------- /data/datasets/veri.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRi(ImageDataset): 17 | """ 18 | VeRi 19 | Reference: 20 | Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016. 21 | URL: https://vehiclereid.github.io/VeRi/ 22 | 23 | Dataset statistics: 24 | # identities: 775 25 | # images: 37746 (train) + 1678 (query) + 11579 (gallery) 26 | """ 27 | dataset_dir = 'veri' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.train_dir = osp.join(self.dataset_dir, 'image_train') 32 | self.query_dir = osp.join(self.dataset_dir, 'image_query') 33 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test') 34 | 35 | required_files = [ 36 | self.dataset_dir, 37 | self.train_dir, 38 | self.query_dir, 39 | self.gallery_dir 40 | ] 41 | 42 | self.check_before_run(required_files) 43 | 44 | train = self._process_dir(self.train_dir, relabel=True) 45 | query = self._process_dir(self.query_dir, relabel=False) 46 | gallery = self._process_dir(self.gallery_dir, relabel=False) 47 | 48 | self.train = train 49 | self.query = query 50 | self.gallery = gallery 51 | 52 | super(VeRi, self).__init__(train, query, gallery, **kwargs) 53 | 54 | def _process_dir(self, dir_path, relabel=False): 55 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 56 | img_paths.sort() 57 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 58 | 59 | pid_container = set() 60 | for img_path in img_paths: 61 | pid, _ = map(int, pattern.search(img_path).groups()) 62 | if pid == -1: continue # junk images are just ignored 63 | pid_container.add(pid) 64 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 65 | 66 | dataset = [] 67 | for img_path in img_paths: 68 | pid, camid = map(int, pattern.search(img_path).groups()) 69 | if pid == -1: continue # junk images are just ignored 70 | #print('pid : ', pid, ' camid : ', camid) 71 | assert 1 <= pid <= 776 72 | assert 1 <= camid <= 20 73 | camid -= 1 # index starts from 0 74 | if relabel: pid = pid2label[pid] 75 | dataset.append((img_path, pid, camid)) 76 | 77 | return dataset 78 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/veri-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRi(ImageDataset): 17 | """ 18 | VeRi 19 | Reference: 20 | Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016. 21 | URL: https://vehiclereid.github.io/VeRi/ 22 | 23 | Dataset statistics: 24 | # identities: 775 25 | # images: 37746 (train) + 1678 (query) + 11579 (gallery) 26 | """ 27 | dataset_dir = 'veri' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.train_dir = osp.join(self.dataset_dir, 'image_train') 32 | self.query_dir = osp.join(self.dataset_dir, 'image_query') 33 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test') 34 | 35 | required_files = [ 36 | self.dataset_dir, 37 | self.train_dir, 38 | self.query_dir, 39 | self.gallery_dir 40 | ] 41 | 42 | self.check_before_run(required_files) 43 | 44 | train = self._process_dir(self.train_dir, relabel=True) 45 | query = self._process_dir(self.query_dir, relabel=False) 46 | gallery = self._process_dir(self.gallery_dir, relabel=False) 47 | 48 | self.train = train 49 | self.query = query 50 | self.gallery = gallery 51 | 52 | super(VeRi, self).__init__(train, query, gallery, **kwargs) 53 | 54 | def _process_dir(self, dir_path, relabel=False): 55 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 56 | img_paths.sort() 57 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 58 | 59 | pid_container = set() 60 | for img_path in img_paths: 61 | pid, _ = map(int, pattern.search(img_path).groups()) 62 | if pid == -1: continue # junk images are just ignored 63 | pid_container.add(pid) 64 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 65 | 66 | dataset = [] 67 | for img_path in img_paths: 68 | pid, camid = map(int, pattern.search(img_path).groups()) 69 | if pid == -1: continue # junk images are just ignored 70 | #print('pid : ', pid, ' camid : ', camid) 71 | assert 1 <= pid <= 776 72 | assert 1 <= camid <= 20 73 | camid -= 1 # index starts from 0 74 | if relabel: pid = pid2label[pid] 75 | dataset.append((img_path, pid, camid)) 76 | 77 | return dataset 78 | -------------------------------------------------------------------------------- /data/datasets/aic.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class AICity19(ImageDataset): 17 | """ 18 | AICYTY 19 | Reference: 20 | Zheng et al. CityFlow: A City-Scale Benchmark for Multi-Target Multi-Camera Vehicle Tracking and Re-Identification. CVPR 2019. 21 | URL: https://github.com/zhengthomastang 22 | 23 | Dataset statistics: 24 | # identities: 666 25 | # images: 36935 (train) + 1052 (query) + 18290 (gallery) 26 | # in practice the query and gallery is from veri 27 | """ 28 | dataset_dir = 'aic19' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.train_dir = osp.join(self.dataset_dir, 'image_train_offset') 33 | self.query_dir = osp.join(self.dataset_dir, 'image_query_eval') 34 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test_eval') 35 | 36 | required_files = [ 37 | self.dataset_dir, 38 | self.train_dir, 39 | self.query_dir, 40 | self.gallery_dir 41 | ] 42 | 43 | self.check_before_run(required_files) 44 | 45 | train = self._process_dir(self.train_dir, relabel=True) 46 | query = self._process_dir(self.query_dir, relabel=False) 47 | gallery = self._process_dir(self.gallery_dir, relabel=False) 48 | 49 | self.train = train 50 | self.query = query 51 | self.gallery = gallery 52 | 53 | super(AICity19, self).__init__(train, query, gallery, **kwargs) 54 | 55 | def _process_dir(self, dir_path, relabel=False): 56 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 57 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 58 | 59 | pid_container = set() 60 | for img_path in img_paths: 61 | pid, _ = map(int, pattern.search(img_path).groups()) 62 | if pid == -1: continue # junk images are just ignored 63 | pid_container.add(pid) 64 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 65 | 66 | dataset = [] 67 | for img_path in img_paths: 68 | pid, camid = map(int, pattern.search(img_path).groups()) 69 | if pid == -1: continue # junk images are just ignored 70 | #print('img_path:', img_path, ' pid : ', pid, ' camid : ', camid) 71 | #assert 0 <= pid 72 | #assert 0 <= camid 73 | camid -= 1 # index starts from 0 74 | if relabel: pid = pid2label[pid] 75 | dataset.append((img_path, pid, camid)) 76 | 77 | return dataset 78 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/aic-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class AICity19(ImageDataset): 17 | """ 18 | AICYTY 19 | Reference: 20 | Zheng et al. CityFlow: A City-Scale Benchmark for Multi-Target Multi-Camera Vehicle Tracking and Re-Identification. CVPR 2019. 21 | URL: https://github.com/zhengthomastang 22 | 23 | Dataset statistics: 24 | # identities: 666 25 | # images: 36935 (train) + 1052 (query) + 18290 (gallery) 26 | # in practice the query and gallery is from veri 27 | """ 28 | dataset_dir = 'aic19' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.train_dir = osp.join(self.dataset_dir, 'image_train_offset') 33 | self.query_dir = osp.join(self.dataset_dir, 'image_query_eval') 34 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test_eval') 35 | 36 | required_files = [ 37 | self.dataset_dir, 38 | self.train_dir, 39 | self.query_dir, 40 | self.gallery_dir 41 | ] 42 | 43 | self.check_before_run(required_files) 44 | 45 | train = self._process_dir(self.train_dir, relabel=True) 46 | query = self._process_dir(self.query_dir, relabel=False) 47 | gallery = self._process_dir(self.gallery_dir, relabel=False) 48 | 49 | self.train = train 50 | self.query = query 51 | self.gallery = gallery 52 | 53 | super(AICity19, self).__init__(train, query, gallery, **kwargs) 54 | 55 | def _process_dir(self, dir_path, relabel=False): 56 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 57 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 58 | 59 | pid_container = set() 60 | for img_path in img_paths: 61 | pid, _ = map(int, pattern.search(img_path).groups()) 62 | if pid == -1: continue # junk images are just ignored 63 | pid_container.add(pid) 64 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 65 | 66 | dataset = [] 67 | for img_path in img_paths: 68 | pid, camid = map(int, pattern.search(img_path).groups()) 69 | if pid == -1: continue # junk images are just ignored 70 | #print('img_path:', img_path, ' pid : ', pid, ' camid : ', camid) 71 | #assert 0 <= pid 72 | #assert 0 <= camid 73 | camid -= 1 # index starts from 0 74 | if relabel: pid = pid2label[pid] 75 | dataset.append((img_path, pid, camid)) 76 | 77 | return dataset 78 | -------------------------------------------------------------------------------- /tools/train.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import os 9 | import sys 10 | 11 | from torch.backends import cudnn 12 | 13 | sys.path.append(".") 14 | from config import cfg 15 | from utils.logger import setup_logger 16 | from engine.trainer import ReidSystem 17 | from torch.utils.tensorboard import SummaryWriter 18 | 19 | 20 | def main(): 21 | parser = argparse.ArgumentParser(description="ReID Model Training") 22 | parser.add_argument( 23 | '-cfg', "--config_file", 24 | default="", 25 | metavar="FILE", 26 | help="path to config file", 27 | type=str 28 | ) 29 | # parser.add_argument("--local_rank", type=int, default=0) 30 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 31 | nargs=argparse.REMAINDER) 32 | args = parser.parse_args() 33 | if args.config_file != "": 34 | cfg.merge_from_file(args.config_file) 35 | cfg.merge_from_list(args.opts) 36 | 37 | gpus = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0' 38 | gpus = [int(i) for i in gpus.split(',')] 39 | num_gpus = len(gpus) 40 | 41 | # cfg.SOLVER.DIST = num_gpus > 1 42 | 43 | # if cfg.SOLVER.DIST: 44 | # torch.cuda.set_device(args.local_rank) 45 | # torch.distributed.init_process_group( 46 | # backend="nccl", init_method="env://" 47 | # ) 48 | # torch.cuda.synchronize() 49 | 50 | cfg.freeze() 51 | 52 | log_save_dir = os.path.join(cfg.OUTPUT_DIR, '-'.join(cfg.DATASETS.TEST_NAMES), cfg.MODEL.VERSION) 53 | if not os.path.exists(log_save_dir): os.makedirs(log_save_dir) 54 | 55 | logger = setup_logger("reid_baseline.train", log_save_dir, 0) 56 | logger.info("Using {} GPUs.".format(num_gpus)) 57 | logger.info(args) 58 | 59 | if args.config_file != "": 60 | logger.info("Loaded configuration file {}".format(args.config_file)) 61 | logger.info("Running with config:\n{}".format(cfg)) 62 | 63 | logger.info('start training') 64 | cudnn.benchmark = True 65 | 66 | writer = SummaryWriter(os.path.join(log_save_dir, 'tf')) 67 | reid_system = ReidSystem(cfg, logger, writer) 68 | reid_system.train() 69 | 70 | # TODO: continue training 71 | # if cfg.MODEL.CHECKPOINT is not '': 72 | # state = torch.load(cfg.MODEL.CHECKPOINT) 73 | # if set(state.keys()) == {'model', 'opt'}: 74 | # model_state = state['model'] 75 | # learn.model.load_state_dict(model_state) 76 | # learn.create_opt(0, 0) 77 | # learn.opt.load_state_dict(state['opt']) 78 | # else: 79 | # learn.model.load_state_dict(state['model']) 80 | # logger.info(f'continue training from checkpoint {cfg.MODEL.CHECKPOINT}') 81 | 82 | 83 | if __name__ == '__main__': 84 | main() 85 | -------------------------------------------------------------------------------- /csrc/eval_cylib/test_cython.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os.path as osp 4 | import sys 5 | import timeit 6 | 7 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..') 8 | 9 | """ 10 | Test the speed of cython-based evaluation code. The speed improvements 11 | can be much bigger when using the real reid data, which contains a larger 12 | amount of query and gallery images. 13 | 14 | Note: you might encounter the following error: 15 | 'AssertionError: Error: all query identities do not appear in gallery'. 16 | This is normal because the inputs are random numbers. Just try again. 17 | """ 18 | 19 | print('*** Compare running time ***') 20 | 21 | setup = ''' 22 | import sys 23 | import os.path as osp 24 | import numpy as np 25 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..') 26 | from data.datasets.eval_reid import evaluate 27 | num_q = 30 28 | num_g = 300 29 | max_rank = 5 30 | distmat = np.random.rand(num_q, num_g) * 20 31 | q_pids = np.random.randint(0, num_q, size=num_q) 32 | g_pids = np.random.randint(0, num_g, size=num_g) 33 | q_camids = np.random.randint(0, 5, size=num_q) 34 | g_camids = np.random.randint(0, 5, size=num_g) 35 | ''' 36 | 37 | print('=> Using market1501\'s metric') 38 | pytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)', setup=setup, 39 | number=20) 40 | cytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)', setup=setup, 41 | number=20) 42 | print('Python time: {} s'.format(pytime)) 43 | print('Cython time: {} s'.format(cytime)) 44 | print('Cython is {} times faster than python\n'.format(pytime / cytime)) 45 | 46 | print('=> Using cuhk03\'s metric') 47 | pytime = timeit.timeit( 48 | 'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)', 49 | setup=setup, number=20) 50 | cytime = timeit.timeit( 51 | 'evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)', 52 | setup=setup, number=20) 53 | print('Python time: {} s'.format(pytime)) 54 | print('Cython time: {} s'.format(cytime)) 55 | print('Cython is {} times faster than python\n'.format(pytime / cytime)) 56 | 57 | """ 58 | print("=> Check precision") 59 | 60 | num_q = 30 61 | num_g = 300 62 | max_rank = 5 63 | distmat = np.random.rand(num_q, num_g) * 20 64 | q_pids = np.random.randint(0, num_q, size=num_q) 65 | g_pids = np.random.randint(0, num_g, size=num_g) 66 | q_camids = np.random.randint(0, 5, size=num_q) 67 | g_camids = np.random.randint(0, 5, size=num_g) 68 | 69 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False) 70 | print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc)) 71 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True) 72 | print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc)) 73 | """ 74 | -------------------------------------------------------------------------------- /data/transforms/transforms.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | __all__ = ['RandomErasing', 'Randomswap'] 8 | 9 | import math 10 | import random 11 | 12 | import numpy as np 13 | 14 | from .functional import * 15 | 16 | 17 | class RandomErasing(object): 18 | """ Randomly selects a rectangle region in an image and erases its pixels. 19 | 'Random Erasing Data Augmentation' by Zhong et al. 20 | See https://arxiv.org/pdf/1708.04896.pdf 21 | Args: 22 | probability: The probability that the Random Erasing operation will be performed. 23 | sl: Minimum proportion of erased area against input image. 24 | sh: Maximum proportion of erased area against input image. 25 | r1: Minimum aspect ratio of erased area. 26 | mean: Erasing value. 27 | """ 28 | 29 | def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(255*0.59606, 255*0.55814, 255*0.49735)): 30 | self.probability = probability 31 | self.mean = mean 32 | self.sl = sl 33 | self.sh = sh 34 | self.r1 = r1 35 | 36 | def __call__(self, img): 37 | img = np.asarray(img, dtype=np.uint8).copy() 38 | if random.uniform(0, 1) > self.probability: 39 | return img 40 | 41 | for attempt in range(100): 42 | area = img.shape[0] * img.shape[1] 43 | 44 | target_area = random.uniform(self.sl, self.sh) * area 45 | aspect_ratio = random.uniform(self.r1, 1 / self.r1) 46 | 47 | h = int(round(math.sqrt(target_area * aspect_ratio))) 48 | w = int(round(math.sqrt(target_area / aspect_ratio))) 49 | 50 | if w < img.shape[1] and h < img.shape[0]: 51 | x1 = random.randint(0, img.shape[0] - h) 52 | y1 = random.randint(0, img.shape[1] - w) 53 | if len(img) == 3: 54 | if img.shape[2] >= 3: 55 | img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0] 56 | img[x1:x1 + h, y1:y1 + w, 1] = self.mean[1] 57 | img[x1:x1 + h, y1:y1 + w, 2] = self.mean[2] 58 | else: 59 | img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0] 60 | elif len(img) == 2: 61 | img[x1:x1 + h, y1:y1 + w] = self.mean[0] 62 | return img 63 | 64 | return img 65 | 66 | 67 | class Randomswap(object): 68 | def __init__(self, size): 69 | self.size = size 70 | if isinstance(size, numbers.Number): 71 | self.size = (int(size), int(size)) 72 | else: 73 | assert len(size) == 2, "Please provide only two dimensions (h, w) for size." 74 | self.size = size 75 | 76 | def __call__(self, img): 77 | return swap(img, self.size) 78 | 79 | def __repr__(self): 80 | return self.__class__.__name__ + '(size={0})'.format(self.size) 81 | -------------------------------------------------------------------------------- /data/datasets/vd2.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VD2(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Ke Yan, Yonghong Tian, Yaowei Wang, Wei Zeng, Tiejun Huang: Exploiting Multi-Grain Ranking Constraints for Precisely Searching Visually-similar Vehicles. ICCV 2017. 21 | URL: https://pkuml.org/resources/pku-vd.html 22 | 23 | Dataset statistics: 24 | # identities: 77,963 25 | # images: 690,518 26 | """ 27 | dataset_dir = 'PKU-VD' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'VD2/image') 31 | self.train_list = osp.join(root, self.dataset_dir, 'VD2/train_test/trainlist.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'VD2/train_test/testlist.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VD2, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | dataset = dataset[:100000] 79 | query = [] 80 | gallery = [] 81 | for sample in dataset: 82 | if sample[1] not in vid_container: 83 | vid_container.add(sample[1]) 84 | query.append(sample) 85 | 86 | return query, dataset 87 | -------------------------------------------------------------------------------- /data/datasets/vd1.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VD1(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Ke Yan, Yonghong Tian, Yaowei Wang, Wei Zeng, Tiejun Huang: Exploiting Multi-Grain Ranking Constraints for Precisely Searching Visually-similar Vehicles. ICCV 2017. 21 | URL: https://pkuml.org/resources/pku-vd.html 22 | 23 | Dataset statistics: 24 | # identities: 141,756 25 | # images: 846,358 26 | """ 27 | dataset_dir = 'PKU-VD' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'VD1/image') 31 | self.train_list = osp.join(root, self.dataset_dir, 'VD1/train_test/trainlist.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'VD1/train_test/testlist.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VD1, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | dataset = dataset[:100000] 79 | query = [] 80 | gallery = [] 81 | for sample in dataset: 82 | if sample[1] not in vid_container: 83 | vid_container.add(sample[1]) 84 | query.append(sample) 85 | 86 | return query, dataset 87 | -------------------------------------------------------------------------------- /data/transforms/.ipynb_checkpoints/transforms-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | __all__ = ['RandomErasing', 'Randomswap'] 8 | 9 | import math 10 | import random 11 | 12 | import numpy as np 13 | 14 | from .functional import * 15 | 16 | 17 | class RandomErasing(object): 18 | """ Randomly selects a rectangle region in an image and erases its pixels. 19 | 'Random Erasing Data Augmentation' by Zhong et al. 20 | See https://arxiv.org/pdf/1708.04896.pdf 21 | Args: 22 | probability: The probability that the Random Erasing operation will be performed. 23 | sl: Minimum proportion of erased area against input image. 24 | sh: Maximum proportion of erased area against input image. 25 | r1: Minimum aspect ratio of erased area. 26 | mean: Erasing value. 27 | """ 28 | 29 | def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(255*0.59606, 255*0.55814, 255*0.49735)): 30 | self.probability = probability 31 | self.mean = mean 32 | self.sl = sl 33 | self.sh = sh 34 | self.r1 = r1 35 | 36 | def __call__(self, img): 37 | img = np.asarray(img, dtype=np.uint8).copy() 38 | if random.uniform(0, 1) > self.probability: 39 | return img 40 | 41 | for attempt in range(100): 42 | area = img.shape[0] * img.shape[1] 43 | 44 | target_area = random.uniform(self.sl, self.sh) * area 45 | aspect_ratio = random.uniform(self.r1, 1 / self.r1) 46 | 47 | h = int(round(math.sqrt(target_area * aspect_ratio))) 48 | w = int(round(math.sqrt(target_area / aspect_ratio))) 49 | 50 | if w < img.shape[1] and h < img.shape[0]: 51 | x1 = random.randint(0, img.shape[0] - h) 52 | y1 = random.randint(0, img.shape[1] - w) 53 | if len(img) == 3: 54 | if img.shape[2] >= 3: 55 | img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0] 56 | img[x1:x1 + h, y1:y1 + w, 1] = self.mean[1] 57 | img[x1:x1 + h, y1:y1 + w, 2] = self.mean[2] 58 | else: 59 | img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0] 60 | elif len(img) == 2: 61 | img[x1:x1 + h, y1:y1 + w] = self.mean[0] 62 | return img 63 | 64 | return img 65 | 66 | 67 | class Randomswap(object): 68 | def __init__(self, size): 69 | self.size = size 70 | if isinstance(size, numbers.Number): 71 | self.size = (int(size), int(size)) 72 | else: 73 | assert len(size) == 2, "Please provide only two dimensions (h, w) for size." 74 | self.size = size 75 | 76 | def __call__(self, img): 77 | return swap(img, self.size) 78 | 79 | def __repr__(self): 80 | return self.__class__.__name__ + '(size={0})'.format(self.size) 81 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vd2-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VD2(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Ke Yan, Yonghong Tian, Yaowei Wang, Wei Zeng, Tiejun Huang: Exploiting Multi-Grain Ranking Constraints for Precisely Searching Visually-similar Vehicles. ICCV 2017. 21 | URL: https://pkuml.org/resources/pku-vd.html 22 | 23 | Dataset statistics: 24 | # identities: 77,963 25 | # images: 690,518 26 | """ 27 | dataset_dir = 'PKU-VD' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'VD2/image') 31 | self.train_list = osp.join(root, self.dataset_dir, 'VD2/train_test/trainlist.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'VD2/train_test/testlist.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VD2, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | dataset = dataset[:100000] 79 | query = [] 80 | gallery = [] 81 | for sample in dataset: 82 | if sample[1] not in vid_container: 83 | vid_container.add(sample[1]) 84 | query.append(sample) 85 | 86 | return query, dataset 87 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vd1-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VD1(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Ke Yan, Yonghong Tian, Yaowei Wang, Wei Zeng, Tiejun Huang: Exploiting Multi-Grain Ranking Constraints for Precisely Searching Visually-similar Vehicles. ICCV 2017. 21 | URL: https://pkuml.org/resources/pku-vd.html 22 | 23 | Dataset statistics: 24 | # identities: 141,756 25 | # images: 846,358 26 | """ 27 | dataset_dir = 'PKU-VD' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'VD1/image') 31 | self.train_list = osp.join(root, self.dataset_dir, 'VD1/train_test/trainlist.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'VD1/train_test/testlist.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VD1, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | dataset = dataset[:100000] 79 | query = [] 80 | gallery = [] 81 | for sample in dataset: 82 | if sample[1] not in vid_container: 83 | vid_container.add(sample[1]) 84 | query.append(sample) 85 | 86 | return query, dataset 87 | -------------------------------------------------------------------------------- /data/datasets/vehicleonem.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleOneM(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Haiyun Guo, Chaoyang Zhao, Zhiwei Liu, Jinqiao Wang, Hanqing Lu: Learning coarse-to-fine structured feature embedding for vehicle re-identification. AAAI 2018. 21 | URL: http://www.nlpr.ia.ac.cn/iva/homepage/jqwang/Vehicle1M.htm 22 | 23 | Dataset statistics: 24 | # identities: 55527 25 | # images: 936051 26 | """ 27 | dataset_dir = 'Vehicle-1M' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'image_jpg') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train-test-split/train_list.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'train-test-split/test_full.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VehicleOneM, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0].split('/')[1].split('.')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0].split('.')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | query = [] 79 | gallery = [] 80 | for sample in dataset: 81 | if sample[1] not in vid_container: 82 | vid_container.add(sample[1]) 83 | query.append(sample) 84 | 85 | return query, dataset 86 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vehicleonem-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleOneM(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Haiyun Guo, Chaoyang Zhao, Zhiwei Liu, Jinqiao Wang, Hanqing Lu: Learning coarse-to-fine structured feature embedding for vehicle re-identification. AAAI 2018. 21 | URL: http://www.nlpr.ia.ac.cn/iva/homepage/jqwang/Vehicle1M.htm 22 | 23 | Dataset statistics: 24 | # identities: 55527 25 | # images: 936051 26 | """ 27 | dataset_dir = 'Vehicle-1M' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'image_jpg') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train-test-split/train_list.txt') 32 | self.test_list = osp.join(root, self.dataset_dir, 'train-test-split/test_full.txt') 33 | 34 | required_files = [ 35 | self.image_dir, 36 | self.train_list, 37 | self.test_list 38 | ] 39 | 40 | self.check_before_run(required_files) 41 | 42 | query, gallery = self._process_dir(self.test_list, relabel=False) 43 | train = self._process_dir(self.train_list, relabel=True) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | super(VehicleOneM, self).__init__(train, query, gallery, **kwargs) 50 | 51 | def _process_dir(self, img_list, relabel=False): 52 | 53 | vid_container = set() 54 | img_list_lines = open(img_list, 'r').readlines() 55 | for idx, line in enumerate(img_list_lines): 56 | line = line.strip() 57 | vid = line.split(' ')[1] 58 | vid_container.add(vid) 59 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 60 | 61 | dataset = [] 62 | for idx, line in enumerate(img_list_lines): 63 | # if idx < 10: 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | imgid = line.split(' ')[0].split('/')[1].split('.')[0] 67 | if relabel: vid = vid2label[vid] 68 | img_path = osp.join(self.image_dir, line.split(' ')[0].split('.')[0] + '.jpg') 69 | dataset.append((img_path, int(vid), int(imgid))) 70 | 71 | # print(dataset) 72 | # assert len(dataset) == len(img_list_lines) 73 | random.shuffle(dataset) 74 | vid_container = set() 75 | if relabel: 76 | return dataset 77 | else: 78 | query = [] 79 | gallery = [] 80 | for sample in dataset: 81 | if sample[1] not in vid_container: 82 | vid_container.add(sample[1]) 83 | query.append(sample) 84 | 85 | return query, dataset 86 | -------------------------------------------------------------------------------- /data/datasets/vehicleid_small.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Small(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | #super(vehicleid, self).__init__() 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.image_dir = osp.join(self.dataset_dir, 'image') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list_3000.txt') 34 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_3000.txt') 35 | 36 | # self._check_before_run() 37 | 38 | query, gallery = self._process_dir(self.test_list, relabel=False) 39 | train = self._process_dir(self.train_list, relabel=True) 40 | 41 | required_files = [ 42 | self.dataset_dir, 43 | self.image_dir, 44 | self.train_list, 45 | self.test_list 46 | ] 47 | 48 | self.check_before_run(required_files) 49 | 50 | self.train = train 51 | self.query = query 52 | self.gallery = gallery 53 | 54 | 55 | super(VehicleID_Small, self).__init__(train, query, gallery, **kwargs) 56 | 57 | def _process_dir(self, list_file, relabel=False): 58 | vid_container = set() 59 | img_list_lines = open(list_file, 'r').readlines() 60 | for idx, line in enumerate(img_list_lines): 61 | line = line.strip() 62 | vid = line.split(' ')[1] 63 | vid_container.add(vid) 64 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 65 | 66 | dataset = [] 67 | for idx, line in enumerate(img_list_lines): 68 | # if idx < 10: 69 | line = line.strip() 70 | vid = line.split(' ')[1] 71 | imgid = line.split(' ')[0] 72 | if relabel: vid = vid2label[vid] 73 | img_path = osp.join(self.image_dir, imgid + '.jpg') 74 | dataset.append((img_path, int(vid), int(imgid))) 75 | 76 | # print(dataset) 77 | # assert len(dataset) == len(img_list_lines) 78 | random.shuffle(dataset) 79 | vid_container = set() 80 | if relabel: 81 | return dataset 82 | else: 83 | query = [] 84 | gallery = [] 85 | for sample in dataset: 86 | if sample[1] not in vid_container: 87 | vid_container.add(sample[1]) 88 | query.append(sample) 89 | 90 | return query, dataset 91 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vehicleid_small-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Small(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | #super(vehicleid, self).__init__() 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.image_dir = osp.join(self.dataset_dir, 'image') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list_3000.txt') 34 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_3000.txt') 35 | 36 | # self._check_before_run() 37 | 38 | query, gallery = self._process_dir(self.test_list, relabel=False) 39 | train = self._process_dir(self.train_list, relabel=True) 40 | 41 | required_files = [ 42 | self.dataset_dir, 43 | self.image_dir, 44 | self.train_list, 45 | self.test_list 46 | ] 47 | 48 | self.check_before_run(required_files) 49 | 50 | self.train = train 51 | self.query = query 52 | self.gallery = gallery 53 | 54 | 55 | super(VehicleID_Small, self).__init__(train, query, gallery, **kwargs) 56 | 57 | def _process_dir(self, list_file, relabel=False): 58 | vid_container = set() 59 | img_list_lines = open(list_file, 'r').readlines() 60 | for idx, line in enumerate(img_list_lines): 61 | line = line.strip() 62 | vid = line.split(' ')[1] 63 | vid_container.add(vid) 64 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 65 | 66 | dataset = [] 67 | for idx, line in enumerate(img_list_lines): 68 | # if idx < 10: 69 | line = line.strip() 70 | vid = line.split(' ')[1] 71 | imgid = line.split(' ')[0] 72 | if relabel: vid = vid2label[vid] 73 | img_path = osp.join(self.image_dir, imgid + '.jpg') 74 | dataset.append((img_path, int(vid), int(imgid))) 75 | 76 | # print(dataset) 77 | # assert len(dataset) == len(img_list_lines) 78 | random.shuffle(dataset) 79 | vid_container = set() 80 | if relabel: 81 | return dataset 82 | else: 83 | query = [] 84 | gallery = [] 85 | for sample in dataset: 86 | if sample[1] not in vid_container: 87 | vid_container.add(sample[1]) 88 | query.append(sample) 89 | 90 | return query, dataset 91 | -------------------------------------------------------------------------------- /data/datasets/vehicleid_small_mask.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Small_Mask(ImageMaskDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.image_dir = osp.join(self.dataset_dir, 'image') 32 | self.mask_dir = osp.join(self.dataset_dir, 'image_mask') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list_3000.txt') 34 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_3000.txt') 35 | 36 | query, gallery = self._process_dir(self.test_list, relabel=False) 37 | train = self._process_dir(self.train_list, relabel=True) 38 | 39 | required_files = [ 40 | self.dataset_dir, 41 | self.image_dir, 42 | self.mask_dir, 43 | self.train_list, 44 | self.test_list 45 | ] 46 | 47 | self.check_before_run(required_files) 48 | 49 | self.train = train 50 | self.query = query 51 | self.gallery = gallery 52 | 53 | 54 | super(VehicleID_Small_Mask, self).__init__(train, query, gallery, **kwargs) 55 | 56 | def _process_dir(self, list_file, relabel=False): 57 | vid_container = set() 58 | img_list_lines = open(list_file, 'r').readlines() 59 | for idx, line in enumerate(img_list_lines): 60 | line = line.strip() 61 | vid = line.split(' ')[1] 62 | vid_container.add(vid) 63 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 64 | 65 | dataset = [] 66 | for idx, line in enumerate(img_list_lines): 67 | # if idx < 10: 68 | line = line.strip() 69 | vid = line.split(' ')[1] 70 | imgid = line.split(' ')[0] 71 | if relabel: vid = vid2label[vid] 72 | img_path = osp.join(self.image_dir, imgid + '.jpg') 73 | mask_path = osp.join(self.mask_dir, imgid + '.png') 74 | dataset.append((img_path, mask_path, int(vid), int(imgid))) 75 | 76 | # print(dataset) 77 | # assert len(dataset) == len(img_list_lines) 78 | random.shuffle(dataset) 79 | vid_container = set() 80 | if relabel: 81 | return dataset 82 | else: 83 | query = [] 84 | gallery = [] 85 | for sample in dataset: 86 | if sample[2] not in vid_container: 87 | vid_container.add(sample[2]) 88 | query.append(sample) 89 | else: 90 | gallery.append(sample) 91 | 92 | return query, gallery 93 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vehicleid_small_mask-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Small_Mask(ImageMaskDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.image_dir = osp.join(self.dataset_dir, 'image') 32 | self.mask_dir = osp.join(self.dataset_dir, 'image_mask') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list_3000.txt') 34 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_3000.txt') 35 | 36 | query, gallery = self._process_dir(self.test_list, relabel=False) 37 | train = self._process_dir(self.train_list, relabel=True) 38 | 39 | required_files = [ 40 | self.dataset_dir, 41 | self.image_dir, 42 | self.mask_dir, 43 | self.train_list, 44 | self.test_list 45 | ] 46 | 47 | self.check_before_run(required_files) 48 | 49 | self.train = train 50 | self.query = query 51 | self.gallery = gallery 52 | 53 | 54 | super(VehicleID_Small_Mask, self).__init__(train, query, gallery, **kwargs) 55 | 56 | def _process_dir(self, list_file, relabel=False): 57 | vid_container = set() 58 | img_list_lines = open(list_file, 'r').readlines() 59 | for idx, line in enumerate(img_list_lines): 60 | line = line.strip() 61 | vid = line.split(' ')[1] 62 | vid_container.add(vid) 63 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 64 | 65 | dataset = [] 66 | for idx, line in enumerate(img_list_lines): 67 | # if idx < 10: 68 | line = line.strip() 69 | vid = line.split(' ')[1] 70 | imgid = line.split(' ')[0] 71 | if relabel: vid = vid2label[vid] 72 | img_path = osp.join(self.image_dir, imgid + '.jpg') 73 | mask_path = osp.join(self.mask_dir, imgid + '.png') 74 | dataset.append((img_path, mask_path, int(vid), int(imgid))) 75 | 76 | # print(dataset) 77 | # assert len(dataset) == len(img_list_lines) 78 | random.shuffle(dataset) 79 | vid_container = set() 80 | if relabel: 81 | return dataset 82 | else: 83 | query = [] 84 | gallery = [] 85 | for sample in dataset: 86 | if sample[2] not in vid_container: 87 | vid_container.add(sample[2]) 88 | query.append(sample) 89 | else: 90 | gallery.append(sample) 91 | 92 | return query, gallery 93 | -------------------------------------------------------------------------------- /data/datasets/vehicleid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleID(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | #super(vehicleid, self).__init__() 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.image_dir = osp.join(self.dataset_dir, 'image') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 34 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_13164.txt') 35 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_800.txt') 36 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_1600.txt') 37 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt') 38 | 39 | # self._check_before_run() 40 | 41 | query, gallery = self._process_dir(self.test_list, relabel=False) 42 | train = self._process_dir(self.train_list, relabel=True) 43 | 44 | required_files = [ 45 | self.dataset_dir, 46 | self.image_dir, 47 | self.train_list, 48 | self.test_list 49 | ] 50 | 51 | self.check_before_run(required_files) 52 | 53 | self.train = train 54 | self.query = query 55 | self.gallery = gallery 56 | 57 | 58 | super(VehicleID, self).__init__(train, query, gallery, **kwargs) 59 | 60 | def _process_dir(self, list_file, relabel=False): 61 | vid_container = set() 62 | img_list_lines = open(list_file, 'r').readlines() 63 | for idx, line in enumerate(img_list_lines): 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | vid_container.add(vid) 67 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 68 | 69 | dataset = [] 70 | for idx, line in enumerate(img_list_lines): 71 | # if idx < 10: 72 | line = line.strip() 73 | vid = line.split(' ')[1] 74 | imgid = line.split(' ')[0] 75 | if relabel: vid = vid2label[vid] 76 | img_path = osp.join(self.image_dir, imgid + '.jpg') 77 | dataset.append((img_path, int(vid), int(imgid))) 78 | 79 | # print(dataset) 80 | # assert len(dataset) == len(img_list_lines) 81 | random.shuffle(dataset) 82 | vid_container = set() 83 | if relabel: 84 | return dataset 85 | else: 86 | query = [] 87 | gallery = [] 88 | for sample in dataset: 89 | if sample[1] not in vid_container: 90 | vid_container.add(sample[1]) 91 | query.append(sample) 92 | else: 93 | gallery.append(sample) 94 | 95 | return query, gallery 96 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vehicleid-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VehicleID(ImageDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | #super(vehicleid, self).__init__() 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.image_dir = osp.join(self.dataset_dir, 'image') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 34 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_13164.txt') 35 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_800.txt') 36 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_1600.txt') 37 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt') 38 | 39 | # self._check_before_run() 40 | 41 | query, gallery = self._process_dir(self.test_list, relabel=False) 42 | train = self._process_dir(self.train_list, relabel=True) 43 | 44 | required_files = [ 45 | self.dataset_dir, 46 | self.image_dir, 47 | self.train_list, 48 | self.test_list 49 | ] 50 | 51 | self.check_before_run(required_files) 52 | 53 | self.train = train 54 | self.query = query 55 | self.gallery = gallery 56 | 57 | 58 | super(VehicleID, self).__init__(train, query, gallery, **kwargs) 59 | 60 | def _process_dir(self, list_file, relabel=False): 61 | vid_container = set() 62 | img_list_lines = open(list_file, 'r').readlines() 63 | for idx, line in enumerate(img_list_lines): 64 | line = line.strip() 65 | vid = line.split(' ')[1] 66 | vid_container.add(vid) 67 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 68 | 69 | dataset = [] 70 | for idx, line in enumerate(img_list_lines): 71 | # if idx < 10: 72 | line = line.strip() 73 | vid = line.split(' ')[1] 74 | imgid = line.split(' ')[0] 75 | if relabel: vid = vid2label[vid] 76 | img_path = osp.join(self.image_dir, imgid + '.jpg') 77 | dataset.append((img_path, int(vid), int(imgid))) 78 | 79 | # print(dataset) 80 | # assert len(dataset) == len(img_list_lines) 81 | random.shuffle(dataset) 82 | vid_container = set() 83 | if relabel: 84 | return dataset 85 | else: 86 | query = [] 87 | gallery = [] 88 | for sample in dataset: 89 | if sample[1] not in vid_container: 90 | vid_container.add(sample[1]) 91 | query.append(sample) 92 | else: 93 | gallery.append(sample) 94 | 95 | return query, gallery 96 | -------------------------------------------------------------------------------- /data/datasets/vehicleid_mask.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Mask(ImageMaskDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.image_dir = osp.join(self.dataset_dir, 'image') 32 | self.mask_dir = osp.join(self.dataset_dir, 'image_mask') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 34 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_13164.txt') 35 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_800.txt') 36 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_1600.txt') 37 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt') 38 | 39 | query, gallery = self._process_dir(self.test_list, relabel=False) 40 | train = self._process_dir(self.train_list, relabel=True) 41 | 42 | required_files = [ 43 | self.dataset_dir, 44 | self.image_dir, 45 | self.mask_dir, 46 | self.train_list, 47 | self.test_list 48 | ] 49 | 50 | self.check_before_run(required_files) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | 57 | super(VehicleID_Mask, self).__init__(train, query, gallery, **kwargs) 58 | 59 | def _process_dir(self, list_file, relabel=False): 60 | vid_container = set() 61 | img_list_lines = open(list_file, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split(' ')[1] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split(' ')[1] 73 | imgid = line.split(' ')[0] 74 | if relabel: vid = vid2label[vid] 75 | img_path = osp.join(self.image_dir, imgid + '.jpg') 76 | mask_path = osp.join(self.mask_dir, imgid + '.png') 77 | dataset.append((img_path, mask_path, int(vid), int(imgid))) 78 | 79 | # print(dataset) 80 | # assert len(dataset) == len(img_list_lines) 81 | random.shuffle(dataset) 82 | vid_container = set() 83 | if relabel: 84 | return dataset 85 | else: 86 | query = [] 87 | gallery = [] 88 | for sample in dataset: 89 | if sample[2] not in vid_container: 90 | vid_container.add(sample[2]) 91 | query.append(sample) 92 | else: 93 | gallery.append(sample) 94 | 95 | return query, gallery 96 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/vehicleid_mask-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | import random 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | 16 | class VehicleID_Mask(ImageMaskDataset): 17 | """ 18 | vehicleid 19 | Reference: 20 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 21 | URL: https://pkuml.org/resources/pku-vehicleid.html 22 | 23 | Dataset statistics: 24 | # identities: 26267 25 | # images: 221763 26 | """ 27 | dataset_dir = 'vehicleid' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.image_dir = osp.join(self.dataset_dir, 'image') 32 | self.mask_dir = osp.join(self.dataset_dir, 'image_mask') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 34 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_13164.txt') 35 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_800.txt') 36 | # self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_1600.txt') 37 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt') 38 | 39 | query, gallery = self._process_dir(self.test_list, relabel=False) 40 | train = self._process_dir(self.train_list, relabel=True) 41 | 42 | required_files = [ 43 | self.dataset_dir, 44 | self.image_dir, 45 | self.mask_dir, 46 | self.train_list, 47 | self.test_list 48 | ] 49 | 50 | self.check_before_run(required_files) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | 57 | super(VehicleID_Mask, self).__init__(train, query, gallery, **kwargs) 58 | 59 | def _process_dir(self, list_file, relabel=False): 60 | vid_container = set() 61 | img_list_lines = open(list_file, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split(' ')[1] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split(' ')[1] 73 | imgid = line.split(' ')[0] 74 | if relabel: vid = vid2label[vid] 75 | img_path = osp.join(self.image_dir, imgid + '.jpg') 76 | mask_path = osp.join(self.mask_dir, imgid + '.png') 77 | dataset.append((img_path, mask_path, int(vid), int(imgid))) 78 | 79 | # print(dataset) 80 | # assert len(dataset) == len(img_list_lines) 81 | random.shuffle(dataset) 82 | vid_container = set() 83 | if relabel: 84 | return dataset 85 | else: 86 | query = [] 87 | gallery = [] 88 | for sample in dataset: 89 | if sample[2] not in vid_container: 90 | vid_container.add(sample[2]) 91 | query.append(sample) 92 | else: 93 | gallery.append(sample) 94 | 95 | return query, gallery 96 | -------------------------------------------------------------------------------- /data/datasets/msmt17.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import sys 8 | import os 9 | import os.path as osp 10 | 11 | from .bases import ImageDataset 12 | 13 | ##### Log ##### 14 | # 22.01.2019 15 | # - add v2 16 | # - v1 and v2 differ in dir names 17 | # - note that faces in v2 are blurred 18 | TRAIN_DIR_KEY = 'train_dir' 19 | TEST_DIR_KEY = 'test_dir' 20 | VERSION_DICT = { 21 | 'MSMT17_V1': { 22 | TRAIN_DIR_KEY: 'train', 23 | TEST_DIR_KEY: 'test', 24 | }, 25 | 'MSMT17_V2': { 26 | TRAIN_DIR_KEY: 'mask_train_v2', 27 | TEST_DIR_KEY: 'mask_test_v2', 28 | } 29 | } 30 | 31 | 32 | class MSMT17(ImageDataset): 33 | """MSMT17. 34 | Reference: 35 | Wei et al. Person Transfer GAN to Bridge Domain Gap for Person Re-Identification. CVPR 2018. 36 | URL: ``_ 37 | 38 | Dataset statistics: 39 | - identities: 4101. 40 | - images: 32621 (train) + 11659 (query) + 82161 (gallery). 41 | - cameras: 15. 42 | """ 43 | # dataset_dir = 'MSMT17_V2' 44 | dataset_url = None 45 | 46 | def __init__(self, root='datasets', **kwargs): 47 | # self.root = osp.abspath(osp.expanduser(root)) 48 | self.root = root 49 | self.dataset_dir = self.root 50 | 51 | has_main_dir = False 52 | for main_dir in VERSION_DICT: 53 | if osp.exists(osp.join(self.dataset_dir, main_dir)): 54 | train_dir = VERSION_DICT[main_dir][TRAIN_DIR_KEY] 55 | test_dir = VERSION_DICT[main_dir][TEST_DIR_KEY] 56 | has_main_dir = True 57 | break 58 | assert has_main_dir, 'Dataset folder not found' 59 | 60 | self.train_dir = osp.join(self.dataset_dir, main_dir, train_dir) 61 | self.test_dir = osp.join(self.dataset_dir, main_dir, test_dir) 62 | self.list_train_path = osp.join(self.dataset_dir, main_dir, 'list_train.txt') 63 | self.list_val_path = osp.join(self.dataset_dir, main_dir, 'list_val.txt') 64 | self.list_query_path = osp.join(self.dataset_dir, main_dir, 'list_query.txt') 65 | self.list_gallery_path = osp.join(self.dataset_dir, main_dir, 'list_gallery.txt') 66 | 67 | required_files = [ 68 | self.dataset_dir, 69 | self.train_dir, 70 | self.test_dir 71 | ] 72 | self.check_before_run(required_files) 73 | 74 | train = self.process_dir(self.train_dir, self.list_train_path) 75 | val = self.process_dir(self.train_dir, self.list_val_path) 76 | query = self.process_dir(self.test_dir, self.list_query_path) 77 | gallery = self.process_dir(self.test_dir, self.list_gallery_path) 78 | 79 | # Note: to fairly compare with published methods on the conventional ReID setting, 80 | # do not add val images to the training set. 81 | if 'combineall' in kwargs and kwargs['combineall']: 82 | train += val 83 | 84 | super(MSMT17, self).__init__(train, query, gallery, **kwargs) 85 | 86 | def process_dir(self, dir_path, list_path): 87 | with open(list_path, 'r') as txt: 88 | lines = txt.readlines() 89 | 90 | data = [] 91 | 92 | for img_idx, img_info in enumerate(lines): 93 | img_path, pid = img_info.split(' ') 94 | pid = int(pid) # no need to relabel 95 | camid = int(img_path.split('_')[2]) - 1 # index starts from 0 96 | img_path = osp.join(dir_path, img_path) 97 | data.append((img_path, pid, camid)) 98 | 99 | return data -------------------------------------------------------------------------------- /data/datasets/market1501.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class Market1501(ImageDataset): 17 | """Market1501. 18 | 19 | Reference: 20 | Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015. 21 | 22 | URL: ``_ 23 | 24 | Dataset statistics: 25 | - identities: 1501 (+1 for background). 26 | - images: 12936 (train) + 3368 (query) + 15913 (gallery). 27 | """ 28 | _junk_pids = [0, -1] 29 | dataset_dir = '' 30 | dataset_url = 'http://188.138.127.15:81/Datasets/Market-1501-v15.09.15.zip' 31 | 32 | def __init__(self, root='/home/liuxinchen3/notespace/data', market1501_500k=False, **kwargs): 33 | # self.root = osp.abspath(osp.expanduser(root)) 34 | self.root = root 35 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 36 | 37 | # allow alternative directory structure 38 | self.data_dir = self.dataset_dir 39 | data_dir = osp.join(self.data_dir, 'Market-1501-v15.09.15') 40 | if osp.isdir(data_dir): 41 | self.data_dir = data_dir 42 | else: 43 | warnings.warn('The current data structure is deprecated. Please ' 44 | 'put data folders such as "bounding_box_train" under ' 45 | '"Market-1501-v15.09.15".') 46 | 47 | self.train_dir = osp.join(self.data_dir, 'bounding_box_train') 48 | self.query_dir = osp.join(self.data_dir, 'query') 49 | self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test') 50 | self.extra_gallery_dir = osp.join(self.data_dir, 'images') 51 | self.market1501_500k = market1501_500k 52 | 53 | required_files = [ 54 | self.data_dir, 55 | self.train_dir, 56 | self.query_dir, 57 | self.gallery_dir 58 | ] 59 | if self.market1501_500k: 60 | required_files.append(self.extra_gallery_dir) 61 | self.check_before_run(required_files) 62 | 63 | train = self.process_dir(self.train_dir, relabel=True) 64 | query = self.process_dir(self.query_dir, relabel=False) 65 | gallery = self.process_dir(self.gallery_dir, relabel=False) 66 | if self.market1501_500k: 67 | gallery += self.process_dir(self.extra_gallery_dir, relabel=False) 68 | 69 | super(Market1501, self).__init__(train, query, gallery, **kwargs) 70 | 71 | def process_dir(self, dir_path, relabel=False): 72 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 73 | img_paths.sort() 74 | pattern = re.compile(r'([-\d]+)_c(\d)') 75 | 76 | pid_container = set() 77 | for img_path in img_paths: 78 | pid, _ = map(int, pattern.search(img_path).groups()) 79 | if pid == -1: 80 | continue # junk images are just ignored 81 | pid_container.add(pid) 82 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 83 | 84 | data = [] 85 | for img_path in img_paths: 86 | pid, camid = map(int, pattern.search(img_path).groups()) 87 | if pid == -1: 88 | continue # junk images are just ignored 89 | assert 0 <= pid <= 1501 # pid == 0 means background 90 | assert 1 <= camid <= 6 91 | camid -= 1 # index starts from 0 92 | if relabel: 93 | pid = pid2label[pid] 94 | data.append((img_path, pid, camid)) 95 | 96 | return data 97 | 98 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/market1501-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class Market1501(ImageDataset): 17 | """Market1501. 18 | 19 | Reference: 20 | Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015. 21 | 22 | URL: ``_ 23 | 24 | Dataset statistics: 25 | - identities: 1501 (+1 for background). 26 | - images: 12936 (train) + 3368 (query) + 15913 (gallery). 27 | """ 28 | _junk_pids = [0, -1] 29 | dataset_dir = '' 30 | dataset_url = 'http://188.138.127.15:81/Datasets/Market-1501-v15.09.15.zip' 31 | 32 | def __init__(self, root='/home/liuxinchen3/notespace/data', market1501_500k=False, **kwargs): 33 | # self.root = osp.abspath(osp.expanduser(root)) 34 | self.root = root 35 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 36 | 37 | # allow alternative directory structure 38 | self.data_dir = self.dataset_dir 39 | data_dir = osp.join(self.data_dir, 'Market-1501-v15.09.15') 40 | if osp.isdir(data_dir): 41 | self.data_dir = data_dir 42 | else: 43 | warnings.warn('The current data structure is deprecated. Please ' 44 | 'put data folders such as "bounding_box_train" under ' 45 | '"Market-1501-v15.09.15".') 46 | 47 | self.train_dir = osp.join(self.data_dir, 'bounding_box_train') 48 | self.query_dir = osp.join(self.data_dir, 'query') 49 | self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test') 50 | self.extra_gallery_dir = osp.join(self.data_dir, 'images') 51 | self.market1501_500k = market1501_500k 52 | 53 | required_files = [ 54 | self.data_dir, 55 | self.train_dir, 56 | self.query_dir, 57 | self.gallery_dir 58 | ] 59 | if self.market1501_500k: 60 | required_files.append(self.extra_gallery_dir) 61 | self.check_before_run(required_files) 62 | 63 | train = self.process_dir(self.train_dir, relabel=True) 64 | query = self.process_dir(self.query_dir, relabel=False) 65 | gallery = self.process_dir(self.gallery_dir, relabel=False) 66 | if self.market1501_500k: 67 | gallery += self.process_dir(self.extra_gallery_dir, relabel=False) 68 | 69 | super(Market1501, self).__init__(train, query, gallery, **kwargs) 70 | 71 | def process_dir(self, dir_path, relabel=False): 72 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 73 | img_paths.sort() 74 | pattern = re.compile(r'([-\d]+)_c(\d)') 75 | 76 | pid_container = set() 77 | for img_path in img_paths: 78 | pid, _ = map(int, pattern.search(img_path).groups()) 79 | if pid == -1: 80 | continue # junk images are just ignored 81 | pid_container.add(pid) 82 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 83 | 84 | data = [] 85 | for img_path in img_paths: 86 | pid, camid = map(int, pattern.search(img_path).groups()) 87 | if pid == -1: 88 | continue # junk images are just ignored 89 | assert 0 <= pid <= 1501 # pid == 0 means background 90 | assert 1 <= camid <= 6 91 | camid -= 1 # index starts from 0 92 | if relabel: 93 | pid = pid2label[pid] 94 | data.append((img_path, pid, camid)) 95 | 96 | return data 97 | 98 | -------------------------------------------------------------------------------- /data/datasets/veri_mask.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | # Changed by Xinchen Liu 16 | 17 | class VeRi_Mask(ImageMaskDataset): 18 | """ 19 | VeRi 20 | Reference: 21 | Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016. 22 | URL: https://vehiclereid.github.io/VeRi/ 23 | 24 | Dataset statistics: 25 | # identities: 775 26 | # images: 37746 (train) + 1678 (query) + 11579 (gallery) 27 | """ 28 | dataset_dir = 'veri' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.train_dir = osp.join(self.dataset_dir, 'image_train') 33 | self.query_dir = osp.join(self.dataset_dir, 'image_query') 34 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test') 35 | self.train_mask_dir = osp.join(self.dataset_dir, 'image_train_mask') 36 | self.query_mask_dir = osp.join(self.dataset_dir, 'image_query_mask') 37 | self.gallery_mask_dir = osp.join(self.dataset_dir, 'image_test_mask') 38 | 39 | required_files = [ 40 | self.dataset_dir, 41 | self.train_dir, 42 | self.query_dir, 43 | self.gallery_dir, 44 | self.train_mask_dir, 45 | self.query_mask_dir, 46 | self.gallery_mask_dir 47 | ] 48 | 49 | self.check_before_run(required_files) 50 | 51 | train = self._process_dir(self.train_dir, self.train_mask_dir, relabel=True) 52 | query = self._process_dir(self.query_dir, self.query_mask_dir, relabel=False) 53 | gallery = self._process_dir(self.gallery_dir, self.gallery_mask_dir, relabel=False) 54 | 55 | self.train = train 56 | self.query = query 57 | self.gallery = gallery 58 | 59 | super(VeRi_Mask, self).__init__(train, query, gallery, **kwargs) 60 | 61 | def _process_dir(self, dir_path, mask_dir, relabel=False): 62 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 63 | mask_paths = glob.glob(osp.join(mask_dir, '*.png')) 64 | img_paths.sort() 65 | mask_paths.sort() 66 | # img_names = [name.strip().split('/')[-1].split('.')[0] for name in img_paths] 67 | # mask_names = [name.strip().split('/')[-1].split('.')[0] for name in mask_paths] 68 | # diff = list(set(img_names).difference(set(mask_names))) 69 | # diff.sort() 70 | # print(len(diff)) 71 | # print(diff) 72 | assert len(img_paths) == len(mask_paths), f'len(img_paths) = {len(img_paths)}, len(mask_paths) = {len(mask_paths)}' 73 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 74 | 75 | pid_container = set() 76 | for img_path in img_paths: 77 | pid, _ = map(int, pattern.search(img_path).groups()) 78 | if pid == -1: continue # junk images are just ignored 79 | pid_container.add(pid) 80 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 81 | 82 | dataset = [] 83 | 84 | for img_path, mask_path in zip(img_paths, mask_paths): 85 | pid, camid = map(int, pattern.search(img_path).groups()) 86 | 87 | if pid == -1: continue # junk images are just ignored 88 | #print('pid : ', pid, ' camid : ', camid) 89 | assert 1 <= pid <= 776 90 | assert 1 <= camid <= 20 91 | camid -= 1 # index starts from 0 92 | if relabel: pid = pid2label[pid] 93 | dataset.append((img_path, mask_path, pid, camid)) 94 | 95 | return dataset 96 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/veri_mask-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | # Changed by Xinchen Liu 16 | 17 | class VeRi_Mask(ImageMaskDataset): 18 | """ 19 | VeRi 20 | Reference: 21 | Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016. 22 | URL: https://vehiclereid.github.io/VeRi/ 23 | 24 | Dataset statistics: 25 | # identities: 775 26 | # images: 37746 (train) + 1678 (query) + 11579 (gallery) 27 | """ 28 | dataset_dir = 'veri' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | self.train_dir = osp.join(self.dataset_dir, 'image_train') 33 | self.query_dir = osp.join(self.dataset_dir, 'image_query') 34 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test') 35 | self.train_mask_dir = osp.join(self.dataset_dir, 'image_train_mask') 36 | self.query_mask_dir = osp.join(self.dataset_dir, 'image_query_mask') 37 | self.gallery_mask_dir = osp.join(self.dataset_dir, 'image_test_mask') 38 | 39 | required_files = [ 40 | self.dataset_dir, 41 | self.train_dir, 42 | self.query_dir, 43 | self.gallery_dir, 44 | self.train_mask_dir, 45 | self.query_mask_dir, 46 | self.gallery_mask_dir 47 | ] 48 | 49 | self.check_before_run(required_files) 50 | 51 | train = self._process_dir(self.train_dir, self.train_mask_dir, relabel=True) 52 | query = self._process_dir(self.query_dir, self.query_mask_dir, relabel=False) 53 | gallery = self._process_dir(self.gallery_dir, self.gallery_mask_dir, relabel=False) 54 | 55 | self.train = train 56 | self.query = query 57 | self.gallery = gallery 58 | 59 | super(VeRi_Mask, self).__init__(train, query, gallery, **kwargs) 60 | 61 | def _process_dir(self, dir_path, mask_dir, relabel=False): 62 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 63 | mask_paths = glob.glob(osp.join(mask_dir, '*.png')) 64 | img_paths.sort() 65 | mask_paths.sort() 66 | # img_names = [name.strip().split('/')[-1].split('.')[0] for name in img_paths] 67 | # mask_names = [name.strip().split('/')[-1].split('.')[0] for name in mask_paths] 68 | # diff = list(set(img_names).difference(set(mask_names))) 69 | # diff.sort() 70 | # print(len(diff)) 71 | # print(diff) 72 | assert len(img_paths) == len(mask_paths), f'len(img_paths) = {len(img_paths)}, len(mask_paths) = {len(mask_paths)}' 73 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 74 | 75 | pid_container = set() 76 | for img_path in img_paths: 77 | pid, _ = map(int, pattern.search(img_path).groups()) 78 | if pid == -1: continue # junk images are just ignored 79 | pid_container.add(pid) 80 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 81 | 82 | dataset = [] 83 | 84 | for img_path, mask_path in zip(img_paths, mask_paths): 85 | pid, camid = map(int, pattern.search(img_path).groups()) 86 | 87 | if pid == -1: continue # junk images are just ignored 88 | #print('pid : ', pid, ' camid : ', camid) 89 | assert 1 <= pid <= 776 90 | assert 1 <= camid <= 20 91 | camid -= 1 # index starts from 0 92 | if relabel: pid = pid2label[pid] 93 | dataset.append((img_path, mask_path, pid, camid)) 94 | 95 | return dataset 96 | -------------------------------------------------------------------------------- /data/datasets/veriwild_small.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRiWild_Small(ImageDataset): 17 | """ 18 | VeRi-Wild 19 | Reference: 20 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 21 | URL: https://github.com/PKU-IMRE/VERI-Wild 22 | 23 | Dataset statistics: 24 | # identities: 40,671 25 | # images: 416,314 26 | """ 27 | dataset_dir = 'VERI-Wild' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_small/train_700.txt') 32 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300_query.txt') 33 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300.txt') 34 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 35 | 36 | required_files = [ 37 | self.image_dir, 38 | self.train_list, 39 | self.query_list, 40 | self.gallery_list, 41 | self.vehicle_info 42 | ] 43 | 44 | self.check_before_run(required_files) 45 | 46 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self._process_vehicle(self.vehicle_info) 47 | 48 | query = self._process_dir(self.query_list, relabel=False) 49 | gallery = self._process_dir(self.gallery_list, relabel=False) 50 | train = self._process_dir(self.train_list, relabel=True) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | super(VeRiWild_Small, self).__init__(train, query, gallery, **kwargs) 57 | 58 | def _process_dir(self, img_list, relabel=False): 59 | 60 | vid_container = set() 61 | img_list_lines = open(img_list, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split('/')[0] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split('/')[0] 73 | imgid = line.split('/')[1] 74 | if relabel: vid = vid2label[vid] 75 | dataset.append((self.imgid2imgpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 76 | 77 | # print(dataset) 78 | # random.shuffle(dataset) 79 | assert len(dataset) == len(img_list_lines) 80 | # if relabel == True: 81 | # return dataset[:len(dataset)//4] 82 | return dataset 83 | 84 | def _process_vehicle(self, vehicle_info): 85 | imgid2vid = {} 86 | imgid2camid = {} 87 | imgid2imgpath = {} 88 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 89 | 90 | for idx, line in enumerate(vehicle_info_lines[1:]): 91 | # if idx < 10: 92 | vid = line.strip().split('/')[0] 93 | imgid = line.strip().split(';')[0].split('/')[1] 94 | camid = line.strip().split(';')[1] 95 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 96 | imgid2vid[imgid] = vid 97 | imgid2camid[imgid] = camid 98 | imgid2imgpath[imgid] = img_path 99 | # print(idx, vid, imgid, camid, img_path) 100 | 101 | assert len(imgid2vid) == len(vehicle_info_lines)-1 102 | return imgid2vid, imgid2camid, imgid2imgpath 103 | -------------------------------------------------------------------------------- /data/datasets/veriwild_medium.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRiWild_Medium(ImageDataset): 17 | """ 18 | VeRi-Wild 19 | Reference: 20 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 21 | URL: https://github.com/PKU-IMRE/VERI-Wild 22 | 23 | Dataset statistics: 24 | # identities: 40,671 25 | # images: 416,314 26 | """ 27 | dataset_dir = 'VERI-Wild' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/train_3500.txt') 32 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500_query.txt') 33 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500.txt') 34 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 35 | 36 | required_files = [ 37 | self.image_dir, 38 | self.train_list, 39 | self.query_list, 40 | self.gallery_list, 41 | self.vehicle_info 42 | ] 43 | 44 | self.check_before_run(required_files) 45 | 46 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self._process_vehicle(self.vehicle_info) 47 | 48 | query = self._process_dir(self.query_list, relabel=False) 49 | gallery = self._process_dir(self.gallery_list, relabel=False) 50 | train = self._process_dir(self.train_list, relabel=True) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | super(VeRiWild_Medium, self).__init__(train, query, gallery, **kwargs) 57 | 58 | def _process_dir(self, img_list, relabel=False): 59 | 60 | vid_container = set() 61 | img_list_lines = open(img_list, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split('/')[0] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split('/')[0] 73 | imgid = line.split('/')[1] 74 | if relabel: vid = vid2label[vid] 75 | dataset.append((self.imgid2imgpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 76 | 77 | # print(dataset) 78 | # random.shuffle(dataset) 79 | assert len(dataset) == len(img_list_lines) 80 | # if relabel == True: 81 | # return dataset[:len(dataset)//4] 82 | return dataset 83 | 84 | def _process_vehicle(self, vehicle_info): 85 | imgid2vid = {} 86 | imgid2camid = {} 87 | imgid2imgpath = {} 88 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 89 | 90 | for idx, line in enumerate(vehicle_info_lines[1:]): 91 | # if idx < 10: 92 | vid = line.strip().split('/')[0] 93 | imgid = line.strip().split(';')[0].split('/')[1] 94 | camid = line.strip().split(';')[1] 95 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 96 | imgid2vid[imgid] = vid 97 | imgid2camid[imgid] = camid 98 | imgid2imgpath[imgid] = img_path 99 | # print(idx, vid, imgid, camid, img_path) 100 | 101 | assert len(imgid2vid) == len(vehicle_info_lines)-1 102 | return imgid2vid, imgid2camid, imgid2imgpath 103 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/veriwild_small-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRiWild_Small(ImageDataset): 17 | """ 18 | VeRi-Wild 19 | Reference: 20 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 21 | URL: https://github.com/PKU-IMRE/VERI-Wild 22 | 23 | Dataset statistics: 24 | # identities: 40,671 25 | # images: 416,314 26 | """ 27 | dataset_dir = 'VERI-Wild' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_small/train_700.txt') 32 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300_query.txt') 33 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300.txt') 34 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 35 | 36 | required_files = [ 37 | self.image_dir, 38 | self.train_list, 39 | self.query_list, 40 | self.gallery_list, 41 | self.vehicle_info 42 | ] 43 | 44 | self.check_before_run(required_files) 45 | 46 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self._process_vehicle(self.vehicle_info) 47 | 48 | query = self._process_dir(self.query_list, relabel=False) 49 | gallery = self._process_dir(self.gallery_list, relabel=False) 50 | train = self._process_dir(self.train_list, relabel=True) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | super(VeRiWild_Small, self).__init__(train, query, gallery, **kwargs) 57 | 58 | def _process_dir(self, img_list, relabel=False): 59 | 60 | vid_container = set() 61 | img_list_lines = open(img_list, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split('/')[0] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split('/')[0] 73 | imgid = line.split('/')[1] 74 | if relabel: vid = vid2label[vid] 75 | dataset.append((self.imgid2imgpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 76 | 77 | # print(dataset) 78 | # random.shuffle(dataset) 79 | assert len(dataset) == len(img_list_lines) 80 | # if relabel == True: 81 | # return dataset[:len(dataset)//4] 82 | return dataset 83 | 84 | def _process_vehicle(self, vehicle_info): 85 | imgid2vid = {} 86 | imgid2camid = {} 87 | imgid2imgpath = {} 88 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 89 | 90 | for idx, line in enumerate(vehicle_info_lines[1:]): 91 | # if idx < 10: 92 | vid = line.strip().split('/')[0] 93 | imgid = line.strip().split(';')[0].split('/')[1] 94 | camid = line.strip().split(';')[1] 95 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 96 | imgid2vid[imgid] = vid 97 | imgid2camid[imgid] = camid 98 | imgid2imgpath[imgid] = img_path 99 | # print(idx, vid, imgid, camid, img_path) 100 | 101 | assert len(imgid2vid) == len(vehicle_info_lines)-1 102 | return imgid2vid, imgid2camid, imgid2imgpath 103 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/veriwild_medium-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageDataset 13 | import warnings 14 | 15 | 16 | class VeRiWild_Medium(ImageDataset): 17 | """ 18 | VeRi-Wild 19 | Reference: 20 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 21 | URL: https://github.com/PKU-IMRE/VERI-Wild 22 | 23 | Dataset statistics: 24 | # identities: 40,671 25 | # images: 416,314 26 | """ 27 | dataset_dir = 'VERI-Wild' 28 | 29 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 30 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 31 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/train_3500.txt') 32 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500_query.txt') 33 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500.txt') 34 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 35 | 36 | required_files = [ 37 | self.image_dir, 38 | self.train_list, 39 | self.query_list, 40 | self.gallery_list, 41 | self.vehicle_info 42 | ] 43 | 44 | self.check_before_run(required_files) 45 | 46 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self._process_vehicle(self.vehicle_info) 47 | 48 | query = self._process_dir(self.query_list, relabel=False) 49 | gallery = self._process_dir(self.gallery_list, relabel=False) 50 | train = self._process_dir(self.train_list, relabel=True) 51 | 52 | self.train = train 53 | self.query = query 54 | self.gallery = gallery 55 | 56 | super(VeRiWild_Medium, self).__init__(train, query, gallery, **kwargs) 57 | 58 | def _process_dir(self, img_list, relabel=False): 59 | 60 | vid_container = set() 61 | img_list_lines = open(img_list, 'r').readlines() 62 | for idx, line in enumerate(img_list_lines): 63 | line = line.strip() 64 | vid = line.split('/')[0] 65 | vid_container.add(vid) 66 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 67 | 68 | dataset = [] 69 | for idx, line in enumerate(img_list_lines): 70 | # if idx < 10: 71 | line = line.strip() 72 | vid = line.split('/')[0] 73 | imgid = line.split('/')[1] 74 | if relabel: vid = vid2label[vid] 75 | dataset.append((self.imgid2imgpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 76 | 77 | # print(dataset) 78 | # random.shuffle(dataset) 79 | assert len(dataset) == len(img_list_lines) 80 | # if relabel == True: 81 | # return dataset[:len(dataset)//4] 82 | return dataset 83 | 84 | def _process_vehicle(self, vehicle_info): 85 | imgid2vid = {} 86 | imgid2camid = {} 87 | imgid2imgpath = {} 88 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 89 | 90 | for idx, line in enumerate(vehicle_info_lines[1:]): 91 | # if idx < 10: 92 | vid = line.strip().split('/')[0] 93 | imgid = line.strip().split(';')[0].split('/')[1] 94 | camid = line.strip().split(';')[1] 95 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 96 | imgid2vid[imgid] = vid 97 | imgid2camid[imgid] = camid 98 | imgid2imgpath[imgid] = img_path 99 | # print(idx, vid, imgid, camid, img_path) 100 | 101 | assert len(imgid2vid) == len(vehicle_info_lines)-1 102 | return imgid2vid, imgid2camid, imgid2imgpath 103 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/NCEAverage.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | from torch import nn 4 | from .alias_multinomial import AliasMethod 5 | import math 6 | 7 | class NCEFunction(Function): 8 | @staticmethod 9 | def forward(self, x, y, memory, idx, params): 10 | K = int(params[0].item()) 11 | T = params[1].item() 12 | Z = params[2].item() 13 | momentum = params[3].item() 14 | 15 | batchSize = x.size(0) 16 | outputSize = memory.size(0) 17 | inputSize = memory.size(1) 18 | 19 | # sample positives & negatives 20 | idx.select(1, 0).copy_(y.data) 21 | 22 | # sample correspoinding weights 23 | weight = torch.index_select(memory, 0, idx.view(-1)) 24 | #weight.resize_(batchSize, K+1, inputSize) 25 | weight = weight.view(batchSize, K+1, inputSize) 26 | 27 | # inner product 28 | # out = torch.bmm(weight, x.data.resize_(batchSize, inputSize, 1)) 29 | out = torch.bmm(weight, x.view(batchSize, inputSize, 1)) 30 | #out.div_(T).exp_() # batchSize * self.K+1 31 | out = torch.exp(torch.div(out, T)) 32 | # x.data.resize_(batchSize, inputSize) 33 | 34 | if Z < 0: 35 | params[2] = out.mean() * outputSize 36 | #Z = params[2].item() 37 | Z = params[2].clone().detach().item() 38 | print("normalization constant Z is set to {:.1f}".format(Z)) 39 | 40 | # out.div_(Z).resize_(batchSize, K+1) 41 | out = torch.div(out, Z).squeeze().contiguous() 42 | 43 | self.save_for_backward(x, memory, y, weight, out, params) 44 | 45 | return out 46 | 47 | @staticmethod 48 | def backward(self, gradOutput): 49 | x, memory, y, weight, out, params = self.saved_tensors 50 | K = int(params[0].item()) 51 | T = params[1].item() 52 | Z = params[2].item() 53 | momentum = params[3].item() 54 | batchSize = gradOutput.size(0) 55 | 56 | # gradients d Pm / d linear = exp(linear) / Z 57 | # gradOutput.data.mul_(out.data) 58 | gradOutput = gradOutput.mul(out) 59 | # add temperature 60 | # gradOutput.data.div_(T) 61 | gradOutput = gradOutput.div(T) 62 | 63 | #gradOutput.data.resize_(batchSize, 1, K+1) 64 | gradOutput = gradOutput.view(batchSize, 1, K+1) 65 | 66 | # gradient of linear 67 | # gradInput = torch.bmm(gradOutput.data, weight) 68 | gradInput = torch.bmm(gradOutput, weight) 69 | gradInput.resize_as_(x) 70 | 71 | # update the non-parametric data 72 | weight_pos = weight.select(1, 0).resize_as_(x) 73 | # weight_pos.mul_(momentum) 74 | weight_pos = weight_pos.mul(momentum) 75 | # weight_pos.add_(torch.mul(x.data, 1-momentum)) 76 | weight_pos = weight_pos.add(torch.mul(x, 1-momentum)) 77 | w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) 78 | updated_weight = weight_pos.div(w_norm) 79 | memory.index_copy_(0, y, updated_weight) 80 | 81 | return gradInput, None, None, None, None 82 | 83 | class NCEAverage(nn.Module): 84 | 85 | def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None): 86 | super(NCEAverage, self).__init__() 87 | self.nLem = outputSize 88 | self.unigrams = torch.ones(self.nLem) 89 | self.multinomial = AliasMethod(self.unigrams) 90 | self.multinomial.cuda() 91 | self.K = K 92 | 93 | self.register_buffer('params',torch.tensor([K, T, -1, momentum])); 94 | stdv = 1. / math.sqrt(inputSize/3) 95 | self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv)) 96 | 97 | def forward(self, x, y): 98 | batchSize = x.size(0) 99 | idx = self.multinomial.draw(batchSize * (self.K+1)).view(batchSize, -1) 100 | out = NCEFunction.apply(x, y, self.memory, idx, self.params) 101 | return out 102 | 103 | -------------------------------------------------------------------------------- /modeling/losses/InsDis/.ipynb_checkpoints/NCEAverage-checkpoint.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Function 3 | from torch import nn 4 | from .alias_multinomial import AliasMethod 5 | import math 6 | 7 | class NCEFunction(Function): 8 | @staticmethod 9 | def forward(self, x, y, memory, idx, params): 10 | K = int(params[0].item()) 11 | T = params[1].item() 12 | Z = params[2].item() 13 | momentum = params[3].item() 14 | 15 | batchSize = x.size(0) 16 | outputSize = memory.size(0) 17 | inputSize = memory.size(1) 18 | 19 | # sample positives & negatives 20 | idx.select(1, 0).copy_(y.data) 21 | 22 | # sample correspoinding weights 23 | weight = torch.index_select(memory, 0, idx.view(-1)) 24 | #weight.resize_(batchSize, K+1, inputSize) 25 | weight = weight.view(batchSize, K+1, inputSize) 26 | 27 | # inner product 28 | # out = torch.bmm(weight, x.data.resize_(batchSize, inputSize, 1)) 29 | out = torch.bmm(weight, x.view(batchSize, inputSize, 1)) 30 | #out.div_(T).exp_() # batchSize * self.K+1 31 | out = torch.exp(torch.div(out, T)) 32 | # x.data.resize_(batchSize, inputSize) 33 | 34 | if Z < 0: 35 | params[2] = out.mean() * outputSize 36 | #Z = params[2].item() 37 | Z = params[2].clone().detach().item() 38 | print("normalization constant Z is set to {:.1f}".format(Z)) 39 | 40 | # out.div_(Z).resize_(batchSize, K+1) 41 | out = torch.div(out, Z).squeeze().contiguous() 42 | 43 | self.save_for_backward(x, memory, y, weight, out, params) 44 | 45 | return out 46 | 47 | @staticmethod 48 | def backward(self, gradOutput): 49 | x, memory, y, weight, out, params = self.saved_tensors 50 | K = int(params[0].item()) 51 | T = params[1].item() 52 | Z = params[2].item() 53 | momentum = params[3].item() 54 | batchSize = gradOutput.size(0) 55 | 56 | # gradients d Pm / d linear = exp(linear) / Z 57 | # gradOutput.data.mul_(out.data) 58 | gradOutput = gradOutput.mul(out) 59 | # add temperature 60 | # gradOutput.data.div_(T) 61 | gradOutput = gradOutput.div(T) 62 | 63 | #gradOutput.data.resize_(batchSize, 1, K+1) 64 | gradOutput = gradOutput.view(batchSize, 1, K+1) 65 | 66 | # gradient of linear 67 | # gradInput = torch.bmm(gradOutput.data, weight) 68 | gradInput = torch.bmm(gradOutput, weight) 69 | gradInput.resize_as_(x) 70 | 71 | # update the non-parametric data 72 | weight_pos = weight.select(1, 0).resize_as_(x) 73 | # weight_pos.mul_(momentum) 74 | weight_pos = weight_pos.mul(momentum) 75 | # weight_pos.add_(torch.mul(x.data, 1-momentum)) 76 | weight_pos = weight_pos.add(torch.mul(x, 1-momentum)) 77 | w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) 78 | updated_weight = weight_pos.div(w_norm) 79 | memory.index_copy_(0, y, updated_weight) 80 | 81 | return gradInput, None, None, None, None 82 | 83 | class NCEAverage(nn.Module): 84 | 85 | def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None): 86 | super(NCEAverage, self).__init__() 87 | self.nLem = outputSize 88 | self.unigrams = torch.ones(self.nLem) 89 | self.multinomial = AliasMethod(self.unigrams) 90 | self.multinomial.cuda() 91 | self.K = K 92 | 93 | self.register_buffer('params',torch.tensor([K, T, -1, momentum])); 94 | stdv = 1. / math.sqrt(inputSize/3) 95 | self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv)) 96 | 97 | def forward(self, x, y): 98 | batchSize = x.size(0) 99 | idx = self.multinomial.draw(batchSize * (self.K+1)).view(batchSize, -1) 100 | out = NCEFunction.apply(x, y, self.memory, idx, self.params) 101 | return out 102 | 103 | -------------------------------------------------------------------------------- /data/datasets/veriwild_small_mask.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | # Changed by Xinchen Liu 16 | 17 | class VeRiWild_Small_Mask(ImageMaskDataset): 18 | """ 19 | VeRi-Wild 20 | Reference: 21 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 22 | URL: https://github.com/PKU-IMRE/VERI-Wild 23 | 24 | Dataset statistics: 25 | # identities: 40,671 26 | # images: 416,314 27 | """ 28 | dataset_dir = 'VERI-Wild' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 32 | self.mask_dir = osp.join(root, self.dataset_dir, 'images_mask') 33 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_small/train_700.txt') 34 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300_query.txt') 35 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300.txt') 36 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 37 | 38 | required_files = [ 39 | self.image_dir, 40 | self.mask_dir, 41 | self.train_list, 42 | self.query_list, 43 | self.gallery_list, 44 | self.vehicle_info 45 | ] 46 | 47 | self.check_before_run(required_files) 48 | 49 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath, self.imgid2maskpath = self._process_vehicle(self.vehicle_info) 50 | 51 | query = self._process_dir(self.query_list, relabel=False) 52 | gallery = self._process_dir(self.gallery_list, relabel=False) 53 | train = self._process_dir(self.train_list, relabel=True) 54 | 55 | self.train = train 56 | self.query = query 57 | self.gallery = gallery 58 | 59 | super(VeRiWild_Small_Mask, self).__init__(train, query, gallery, **kwargs) 60 | 61 | def _process_dir(self, img_list, relabel=False): 62 | 63 | vid_container = set() 64 | img_list_lines = open(img_list, 'r').readlines() 65 | for idx, line in enumerate(img_list_lines): 66 | line = line.strip() 67 | vid = line.split('/')[0] 68 | vid_container.add(vid) 69 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 70 | 71 | dataset = [] 72 | for idx, line in enumerate(img_list_lines): 73 | # if idx < 10: 74 | line = line.strip() 75 | vid = line.split('/')[0] 76 | imgid = line.split('/')[1] 77 | if relabel: vid = vid2label[vid] 78 | dataset.append((self.imgid2imgpath[imgid], self.imgid2maskpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 79 | 80 | # print(dataset) 81 | # random.shuffle(dataset) 82 | assert len(dataset) == len(img_list_lines) 83 | 84 | return dataset 85 | 86 | def _process_vehicle(self, vehicle_info): 87 | imgid2vid = {} 88 | imgid2camid = {} 89 | imgid2imgpath = {} 90 | imgid2maskpath = {} 91 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 92 | 93 | for idx, line in enumerate(vehicle_info_lines[1:]): 94 | # if idx < 10: 95 | vid = line.strip().split('/')[0] 96 | imgid = line.strip().split(';')[0].split('/')[1] 97 | camid = line.strip().split(';')[1] 98 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 99 | mask_path = osp.join(self.mask_dir, vid, imgid + '.png') 100 | 101 | imgid2vid[imgid] = vid 102 | imgid2camid[imgid] = camid 103 | imgid2imgpath[imgid] = img_path 104 | imgid2maskpath[imgid] = mask_path 105 | # print(idx, vid, imgid, camid, img_path) 106 | 107 | assert len(imgid2vid) == len(vehicle_info_lines)-1 108 | return imgid2vid, imgid2camid, imgid2imgpath, imgid2maskpath 109 | -------------------------------------------------------------------------------- /data/datasets/veriwild_medium_mask.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | # Changed by Xinchen Liu 16 | 17 | class VeRiWild_Medium_Mask(ImageMaskDataset): 18 | """ 19 | VeRi-Wild 20 | Reference: 21 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 22 | URL: https://github.com/PKU-IMRE/VERI-Wild 23 | 24 | Dataset statistics: 25 | # identities: 40,671 26 | # images: 416,314 27 | """ 28 | dataset_dir = 'VERI-Wild' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 32 | self.mask_dir = osp.join(root, self.dataset_dir, 'images_mask') 33 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/train_3500.txt') 34 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500_query.txt') 35 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_medium/test_1500.txt') 36 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 37 | 38 | required_files = [ 39 | self.image_dir, 40 | self.mask_dir, 41 | self.train_list, 42 | self.query_list, 43 | self.gallery_list, 44 | self.vehicle_info 45 | ] 46 | 47 | self.check_before_run(required_files) 48 | 49 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath, self.imgid2maskpath = self._process_vehicle(self.vehicle_info) 50 | 51 | query = self._process_dir(self.query_list, relabel=False) 52 | gallery = self._process_dir(self.gallery_list, relabel=False) 53 | train = self._process_dir(self.train_list, relabel=True) 54 | 55 | self.train = train 56 | self.query = query 57 | self.gallery = gallery 58 | 59 | super(VeRiWild_Medium_Mask, self).__init__(train, query, gallery, **kwargs) 60 | 61 | def _process_dir(self, img_list, relabel=False): 62 | 63 | vid_container = set() 64 | img_list_lines = open(img_list, 'r').readlines() 65 | for idx, line in enumerate(img_list_lines): 66 | line = line.strip() 67 | vid = line.split('/')[0] 68 | vid_container.add(vid) 69 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 70 | 71 | dataset = [] 72 | for idx, line in enumerate(img_list_lines): 73 | # if idx < 10: 74 | line = line.strip() 75 | vid = line.split('/')[0] 76 | imgid = line.split('/')[1] 77 | if relabel: vid = vid2label[vid] 78 | dataset.append((self.imgid2imgpath[imgid], self.imgid2maskpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 79 | 80 | # print(dataset) 81 | # random.shuffle(dataset) 82 | assert len(dataset) == len(img_list_lines) 83 | 84 | return dataset 85 | 86 | def _process_vehicle(self, vehicle_info): 87 | imgid2vid = {} 88 | imgid2camid = {} 89 | imgid2imgpath = {} 90 | imgid2maskpath = {} 91 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 92 | 93 | for idx, line in enumerate(vehicle_info_lines[1:]): 94 | # if idx < 10: 95 | vid = line.strip().split('/')[0] 96 | imgid = line.strip().split(';')[0].split('/')[1] 97 | camid = line.strip().split(';')[1] 98 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 99 | mask_path = osp.join(self.mask_dir, vid, imgid + '.png') 100 | 101 | imgid2vid[imgid] = vid 102 | imgid2camid[imgid] = camid 103 | imgid2imgpath[imgid] = img_path 104 | imgid2maskpath[imgid] = mask_path 105 | # print(idx, vid, imgid, camid, img_path) 106 | 107 | assert len(imgid2vid) == len(vehicle_info_lines)-1 108 | return imgid2vid, imgid2camid, imgid2imgpath, imgid2maskpath 109 | -------------------------------------------------------------------------------- /data/datasets/.ipynb_checkpoints/veriwild_small_mask-checkpoint.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Xinchen Liu 4 | @contact: lxc86739795@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import ImageMaskDataset 13 | import warnings 14 | 15 | # Changed by Xinchen Liu 16 | 17 | class VeRiWild_Small_Mask(ImageMaskDataset): 18 | """ 19 | VeRi-Wild 20 | Reference: 21 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 22 | URL: https://github.com/PKU-IMRE/VERI-Wild 23 | 24 | Dataset statistics: 25 | # identities: 40,671 26 | # images: 416,314 27 | """ 28 | dataset_dir = 'VERI-Wild' 29 | 30 | def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs): 31 | self.image_dir = osp.join(root, self.dataset_dir, 'images') 32 | self.mask_dir = osp.join(root, self.dataset_dir, 'images_mask') 33 | self.train_list = osp.join(root, self.dataset_dir, 'train_test_split_small/train_700.txt') 34 | self.query_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300_query.txt') 35 | self.gallery_list = osp.join(root, self.dataset_dir, 'train_test_split_small/test_300.txt') 36 | self.vehicle_info = osp.join(root, self.dataset_dir, 'train_test_split/vehicle_info.txt') 37 | 38 | required_files = [ 39 | self.image_dir, 40 | self.mask_dir, 41 | self.train_list, 42 | self.query_list, 43 | self.gallery_list, 44 | self.vehicle_info 45 | ] 46 | 47 | self.check_before_run(required_files) 48 | 49 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath, self.imgid2maskpath = self._process_vehicle(self.vehicle_info) 50 | 51 | query = self._process_dir(self.query_list, relabel=False) 52 | gallery = self._process_dir(self.gallery_list, relabel=False) 53 | train = self._process_dir(self.train_list, relabel=True) 54 | 55 | self.train = train 56 | self.query = query 57 | self.gallery = gallery 58 | 59 | super(VeRiWild_Small_Mask, self).__init__(train, query, gallery, **kwargs) 60 | 61 | def _process_dir(self, img_list, relabel=False): 62 | 63 | vid_container = set() 64 | img_list_lines = open(img_list, 'r').readlines() 65 | for idx, line in enumerate(img_list_lines): 66 | line = line.strip() 67 | vid = line.split('/')[0] 68 | vid_container.add(vid) 69 | vid2label = {vid: label for label, vid in enumerate(vid_container)} 70 | 71 | dataset = [] 72 | for idx, line in enumerate(img_list_lines): 73 | # if idx < 10: 74 | line = line.strip() 75 | vid = line.split('/')[0] 76 | imgid = line.split('/')[1] 77 | if relabel: vid = vid2label[vid] 78 | dataset.append((self.imgid2imgpath[imgid], self.imgid2maskpath[imgid], int(vid), int(self.imgid2camid[imgid]))) 79 | 80 | # print(dataset) 81 | # random.shuffle(dataset) 82 | assert len(dataset) == len(img_list_lines) 83 | 84 | return dataset 85 | 86 | def _process_vehicle(self, vehicle_info): 87 | imgid2vid = {} 88 | imgid2camid = {} 89 | imgid2imgpath = {} 90 | imgid2maskpath = {} 91 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 92 | 93 | for idx, line in enumerate(vehicle_info_lines[1:]): 94 | # if idx < 10: 95 | vid = line.strip().split('/')[0] 96 | imgid = line.strip().split(';')[0].split('/')[1] 97 | camid = line.strip().split(';')[1] 98 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 99 | mask_path = osp.join(self.mask_dir, vid, imgid + '.png') 100 | 101 | imgid2vid[imgid] = vid 102 | imgid2camid[imgid] = camid 103 | imgid2imgpath[imgid] = img_path 104 | imgid2maskpath[imgid] = mask_path 105 | # print(idx, vid, imgid, camid, img_path) 106 | 107 | assert len(imgid2vid) == len(vehicle_info_lines)-1 108 | return imgid2vid, imgid2camid, imgid2imgpath, imgid2maskpath 109 | --------------------------------------------------------------------------------