├── tools ├── deploy │ ├── Caffe │ │ ├── __init__.py │ │ ├── net.py │ │ ├── caffe_lmdb.py │ │ ├── ReadMe.md │ │ └── caffe_net.py │ ├── run_export.sh │ ├── run_inference.sh │ ├── export2tf.py │ ├── caffe_export.py │ ├── caffe_inference.py │ └── README.md ├── pre_train.py └── train.py ├── fastreid ├── modeling │ ├── Net │ │ └── __init__.py │ ├── __init__.py │ ├── meta_arch │ │ ├── __init__.py │ │ └── build.py │ ├── losses │ │ ├── __init__.py │ │ ├── cross_entroy_loss.py │ │ └── focal_loss.py │ ├── backbones │ │ ├── __init__.py │ │ └── build.py │ └── heads │ │ ├── __init__.py │ │ ├── build.py │ │ ├── linear_head.py │ │ ├── bnneck_head.py │ │ └── reduction_head.py ├── export │ ├── __init__.py │ └── tf_modeling.py ├── utils │ ├── __init__.py │ ├── weight_init.py │ ├── timer.py │ ├── registry.py │ ├── history_buffer.py │ ├── precision_bn.py │ ├── env.py │ ├── summary.py │ └── collect_env.py ├── evaluation │ ├── rank_cylib │ │ ├── __init__.py │ │ ├── Makefile │ │ ├── setup.py │ │ └── test_cython.py │ ├── __init__.py │ ├── roc.py │ ├── query_expansion.py │ ├── testing.py │ └── rerank.py ├── solver │ ├── optim │ │ ├── __init__.py │ │ ├── sgd.py │ │ ├── adam.py │ │ └── lamb.py │ ├── __init__.py │ └── build.py ├── __init__.py ├── config │ └── __init__.py ├── data │ ├── __init__.py │ ├── transforms │ │ ├── __init__.py │ │ └── build.py │ ├── samplers │ │ ├── __init__.py │ │ └── data_sampler.py │ ├── datasets │ │ ├── __init__.py │ │ ├── veri.py │ │ ├── dukemtmcreid.py │ │ ├── hazy_dukemtmcreid.py │ │ ├── market1501.py │ │ ├── hazy_market1501.py │ │ ├── msmt17.py │ │ ├── vehicleid.py │ │ └── veriwild.py │ ├── data_utils.py │ ├── common.py │ └── build.py ├── engine │ ├── __init__.py │ └── launch.py └── layers │ ├── __init__.py │ ├── se_layer.py │ ├── batch_drop.py │ ├── circle.py │ ├── activation.py │ ├── non_local.py │ ├── arcface.py │ ├── pooling.py │ ├── splat.py │ └── context_block.py ├── .gitignore ├── tests ├── __init__.py ├── sampler_test.py ├── lr_scheduler_test.py ├── interp_test.py ├── model_test.py ├── feature_align.py └── dataset_test.py ├── configs ├── MSMT17 │ ├── AGW_R50.yml │ ├── sbs_R50.yml │ ├── bagtricks_R50.yml │ ├── AGW_S50.yml │ ├── sbs_S50.yml │ ├── bagtricks_S50.yml │ ├── AGW_R50-ibn.yml │ ├── mgn_R50-ibn.yml │ ├── sbs_R50-ibn.yml │ ├── AGW_R101-ibn.yml │ ├── bagtricks_R50-ibn.yml │ ├── sbs_R101-ibn.yml │ └── bagtricks_R101-ibn.yml ├── Base-AGW.yml ├── Base-MGN.yml ├── VeRi │ └── sbs_R50-ibn.yml ├── Hazy_DukeMTMC │ ├── server_baseline_bagtricks_R50.yml │ └── server_ism_bagtricks_R50.yml ├── VERIWild │ └── bagtricks_R50-ibn.yml ├── VehicleID │ └── bagtricks_R50-ibn.yml ├── Hazy_Market1501 │ ├── server_ism_bagtricks_R50.yml │ └── server_baseline_bagtricks_R50.yml ├── Base-Strongerbaseline.yml └── Base-bagtricks.yml ├── demo ├── README.md ├── plot_roc_with_pickle.py ├── demo.py ├── modeify-visualize_result.py └── predictor.py ├── train.sh ├── pre-train.sh └── README.md /tools/deploy/Caffe/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fastreid/modeling/Net/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .Nets import * -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ 3 | .DS_Store 4 | .vscode 5 | *.so 6 | logs/ 7 | .ipynb_checkpoints 8 | logs -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/net.py: -------------------------------------------------------------------------------- 1 | raise ImportError,'the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net' -------------------------------------------------------------------------------- /fastreid/export/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ -------------------------------------------------------------------------------- /fastreid/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ -------------------------------------------------------------------------------- /fastreid/solver/optim/__init__.py: -------------------------------------------------------------------------------- 1 | from .lamb import Lamb 2 | from .swa import SWA 3 | from .adam import Adam 4 | from .sgd import SGD 5 | 6 | -------------------------------------------------------------------------------- /fastreid/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | __version__ = "0.1.0" -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python setup.py build_ext --inplace 3 | rm -rf build 4 | clean: 5 | rm -rf build 6 | rm -f rank_cy.c *.so 7 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-AGW.yml" 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: "logs/msmt17/agw_R50" 8 | -------------------------------------------------------------------------------- /fastreid/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .meta_arch import build_model 8 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-Strongerbaseline.yml" 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: "logs/msmt17/sbs_R50" 8 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: "logs/msmt17/bagtricks_R50" 8 | -------------------------------------------------------------------------------- /fastreid/config/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .config import CfgNode, get_cfg 8 | from .defaults import _C as cfg 9 | -------------------------------------------------------------------------------- /fastreid/data/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import build_reid_train_loader, build_reid_test_loader, build_reid_paired_train_loader 8 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-AGW.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: "build_resnest_backbone" 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: "logs/msmt17/agw_S50" 12 | -------------------------------------------------------------------------------- /fastreid/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import build_lr_scheduler, build_optimizer, build_dis_optimizer, build_dis_lr_scheduler, Net_scheduler -------------------------------------------------------------------------------- /fastreid/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import build_transforms 9 | from .transforms import * 10 | from .autoaugment import * 11 | -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | # FastReID Demo 2 | 3 | We provide a command line tool to run a simple demo of builtin models. 4 | 5 | You can run this command to get cosine similarites between different images 6 | 7 | ```bash 8 | cd demo/ 9 | sh run_demo.sh 10 | ``` -------------------------------------------------------------------------------- /configs/MSMT17/sbs_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-Strongerbaseline.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: "build_resnest_backbone" 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: "logs/msmt17/sbs_S50" 12 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: "build_resnest_backbone" 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: "logs/msmt17/bagtricks_S50" 12 | 13 | -------------------------------------------------------------------------------- /fastreid/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_sampler import BalancedIdentitySampler, NaiveIdentitySampler 8 | from .data_sampler import TrainingSampler, InferenceSampler 9 | -------------------------------------------------------------------------------- /tools/deploy/run_export.sh: -------------------------------------------------------------------------------- 1 | 2 | python caffe_export.py --config-file "/export/home/lxy/cvpalgo-fast-reid/logs/dukemtmc/R34/config.yaml" \ 3 | --name "baseline_R34" \ 4 | --output "logs/caffe_R34" \ 5 | --opts MODEL.WEIGHTS "/export/home/lxy/cvpalgo-fast-reid/logs/dukemtmc/R34/model_final.pth" 6 | -------------------------------------------------------------------------------- /train.sh: -------------------------------------------------------------------------------- 1 | # training ISM on Market-1501->hazy_DukeMTMC 2 | python tools/train.py --info "ISM" --config-file ./configs/Hazy_Market1501/server_ism_bagtricks_R50.yml 3 | # training ISM on DukeMTMC->hazy_Market-1501 4 | python tools/train.py --info "ISM" --config-file ./configs/Hazy_DukeMTMC/server_ism_bagtricks_R50.yml -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-AGW.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet50_ibn_a.pth.tar" 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: "logs/msmt17/agw_R50-ibn" 13 | -------------------------------------------------------------------------------- /configs/MSMT17/mgn_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-MGN.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet50_ibn_a.pth.tar" 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: "logs/msmt17/mgn_R50-ibn" 13 | -------------------------------------------------------------------------------- /fastreid/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import META_ARCH_REGISTRY, build_model 8 | 9 | 10 | # import all the meta_arch, so they will be registered 11 | from .baseline import Baseline 12 | from .mgn import MGN 13 | -------------------------------------------------------------------------------- /fastreid/modeling/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .cross_entroy_loss import CrossEntropyLoss 8 | from .focal_loss import FocalLoss 9 | from .metric_loss import * 10 | from .model_loss import Dis_loss, KDLoss, L1Loss, BachDistance_loss 11 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-Strongerbaseline.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet50_ibn_a.pth.tar" 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: "logs/msmt17/sbs_R50-ibn" 13 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-AGW.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101 6 | WITH_IBN: True 7 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet101_ibn_a.pth.tar" 8 | 9 | DATASETS: 10 | NAMES: ("MSMT17",) 11 | TESTS: ("MSMT17",) 12 | 13 | OUTPUT_DIR: "logs/msmt17/agw_R101-ibn" 14 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet50_ibn_a.pth.tar" 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: "logs/msmt17/bagtricks_R50-ibn" 13 | 14 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-Strongerbaseline.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101 6 | WITH_IBN: True 7 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet101_ibn_a.pth.tar" 8 | 9 | DATASETS: 10 | NAMES: ("MSMT17",) 11 | TESTS: ("MSMT17",) 12 | 13 | OUTPUT_DIR: "logs/msmt17/sbs-R101-ibn" 14 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101 6 | WITH_IBN: True 7 | PRETRAIN_PATH: "/home/liaoxingyu2/lxy/.cache/torch/checkpoints/resnet101_ibn_a.pth.tar" 8 | 9 | DATASETS: 10 | NAMES: ("MSMT17",) 11 | TESTS: ("MSMT17",) 12 | 13 | OUTPUT_DIR: "logs/msmt17/bagtricks_R101-ibn" 14 | 15 | -------------------------------------------------------------------------------- /configs/Base-AGW.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-bagtricks.yml" 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_NL: True 6 | 7 | HEADS: 8 | POOL_LAYER: "gempool" 9 | 10 | LOSSES: 11 | NAME: ("CrossEntropyLoss", "TripletLoss") 12 | CE: 13 | EPSILON: 0.1 14 | SCALE: 1.0 15 | 16 | TRI: 17 | MARGIN: 0.0 18 | HARD_MINING: False 19 | SCALE: 1.0 20 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import build_backbone, BACKBONE_REGISTRY 8 | 9 | from .resnet import build_resnet_backbone 10 | from .osnet import build_osnet_backbone 11 | from .resnest import build_resnest_backbone 12 | from .resnext import build_resnext_backbone -------------------------------------------------------------------------------- /fastreid/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .evaluator import DatasetEvaluator, inference_context, inference_on_dataset 3 | from .rank import evaluate_rank 4 | from .reid_evaluation import ReidEvaluator 5 | from .testing import print_csv_format, verify_results 6 | 7 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 8 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import REID_HEADS_REGISTRY, build_reid_heads 8 | 9 | # import all the meta_arch, so they will be registered 10 | from .linear_head import LinearHead 11 | from .bnneck_head import BNneckHead 12 | from .reduction_head import ReductionHead 13 | -------------------------------------------------------------------------------- /tests/sampler_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | sys.path.append('.') 4 | from fastreid.data.samplers import TrainingSampler 5 | 6 | 7 | class SamplerTestCase(unittest.TestCase): 8 | def test_training_sampler(self): 9 | sampler = TrainingSampler(5) 10 | for i in sampler: 11 | from ipdb import set_trace; set_trace() 12 | print(i) 13 | 14 | 15 | if __name__ == '__main__': 16 | unittest.main() 17 | -------------------------------------------------------------------------------- /fastreid/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from .train_loop import * 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | 10 | 11 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 12 | # but still make them available here 13 | from .hooks import * 14 | from .defaults import * 15 | from .launch import * 16 | -------------------------------------------------------------------------------- /fastreid/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .activation import * 8 | from .arcface import Arcface 9 | from .batch_drop import BatchDrop 10 | from .batch_norm import * 11 | from .circle import Circle 12 | from .context_block import ContextBlock 13 | from .frn import FRN, TLU 14 | from .non_local import Non_local 15 | from .pooling import * 16 | from .se_layer import SELayer 17 | from .splat import SplAtConv2d 18 | -------------------------------------------------------------------------------- /configs/Base-MGN.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-Strongerbaseline.yml" 2 | 3 | MODEL: 4 | META_ARCHITECTURE: 'MGN' 5 | 6 | FREEZE_LAYERS: ["backbone", "b1", "b2", "b3",] 7 | 8 | BACKBONE: 9 | WITH_NL: False 10 | 11 | HEADS: 12 | IN_FEAT: 256 13 | 14 | LOSSES: 15 | NAME: ("CrossEntropyLoss", "TripletLoss",) 16 | CE: 17 | EPSILON: 0.1 18 | SCALE: 0.125 19 | 20 | TRI: 21 | MARGIN: 0.0 22 | HARD_MINING: True 23 | NORM_FEAT: False 24 | SCALE: 0.20 25 | 26 | -------------------------------------------------------------------------------- /pre-train.sh: -------------------------------------------------------------------------------- 1 | # supervised training on Market-1501 2 | python tools/pre_train.py --config-file ./configs/Hazy_Market1501/server_baseline_bagtricks_R50.yml --info "baseline" INPUT.DO_AUTOAUG True INPUT.DO_CJ True OUTPUT_DIR "/home/pj/fast-reid-master/logs/hazy-market1501/bagtricks_R50/baseline/" 3 | # supervised training on DukeMTMC 4 | python tools/pre_train.py --config-file ./configs/Hazy_DukeMTMC/server_baseline_bagtricks_R50.yml --info "baseline" INPUT.DO_AUTOAUG True INPUT.DO_CJ True OUTPUT_DIR "/home/pj/fast-reid-master/logs/hazy-dukemtmc/bagtricks_R50/baseline/" -------------------------------------------------------------------------------- /tools/deploy/run_inference.sh: -------------------------------------------------------------------------------- 1 | 2 | python caffe_inference.py --model-def "logs/caffe_R34/baseline_R34.prototxt" \ 3 | --model-weights "logs/caffe_R34/baseline_R34.caffemodel" \ 4 | --height 256 --width 128 \ 5 | --input \ 6 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c5s3_015240_04.jpg' \ 7 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c6s3_038217_01.jpg' \ 8 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1183_c5s3_006943_05.jpg' \ 9 | '/export/home/DATA/DukeMTMC-reID/bounding_box_train/0728_c4_f0161265.jpg' \ 10 | --output "caffe_R34_output" 11 | -------------------------------------------------------------------------------- /fastreid/export/tf_modeling.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from torch import nn 7 | from ..modeling.backbones import build_backbone 8 | from ..modeling.heads import build_reid_heads 9 | 10 | 11 | class TfMetaArch(nn.Module): 12 | def __init__(self, cfg): 13 | super().__init__() 14 | self.backbone = build_backbone(cfg) 15 | self.heads = build_reid_heads(cfg) 16 | 17 | def forward(self, x): 18 | global_feat = self.backbone(x) 19 | pred_features = self.heads(global_feat) 20 | return pred_features 21 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | 4 | import numpy as np 5 | from Cython.Build import cythonize 6 | 7 | 8 | def numpy_include(): 9 | try: 10 | numpy_include = np.get_include() 11 | except AttributeError: 12 | numpy_include = np.get_numpy_include() 13 | return numpy_include 14 | 15 | 16 | ext_modules = [ 17 | Extension( 18 | 'rank_cy', 19 | ['rank_cy.pyx'], 20 | include_dirs=[numpy_include()], 21 | ) 22 | ] 23 | 24 | setup( 25 | name='Cython-based reid evaluation code', 26 | ext_modules=cythonize(ext_modules) 27 | ) 28 | -------------------------------------------------------------------------------- /configs/VeRi/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-Strongerbaseline.yml" 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | PRETRAIN_PATH: "/export2/home/zjk/pretrain_models/resnet50_ibn_a.pth.tar" 11 | 12 | HEADS: 13 | NUM_CLASSES: 575 14 | 15 | SOLVER: 16 | OPT: "SGD" 17 | BASE_LR: 0.01 18 | ETA_MIN_LR: 7.7e-5 19 | 20 | IMS_PER_BATCH: 64 21 | MAX_ITER: 60 22 | DELAY_ITERS: 30 23 | WARMUP_ITERS: 10 24 | FREEZE_ITERS: 10 25 | 26 | CHECKPOINT_PERIOD: 20 27 | 28 | DATASETS: 29 | NAMES: ("VeRi",) 30 | TESTS: ("VeRi",) 31 | 32 | TEST: 33 | EVAL_PERIOD: 20 34 | IMS_PER_BATCH: 128 35 | 36 | OUTPUT_DIR: "logs/veri/sbs_R50-ibn" 37 | -------------------------------------------------------------------------------- /configs/Hazy_DukeMTMC/server_baseline_bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | DATASETS: 4 | NAMES: ("Hazy_DukeMTMC",) 5 | TESTS: ("Market1501", "Hazy_Market1501") 6 | 7 | TDATASETS: 8 | NAMES: ("Hazy_Market1501",) 9 | 10 | SOLVER: 11 | D_BASE_LR: (0.0001) 12 | D_OPT: ("Adam") # SGD 13 | 14 | MODEL: 15 | LOSSES: 16 | NAME: ['s_CrossEntropyLoss', 't_CrossEntropyLoss',] 17 | 18 | PARAM: 19 | KD_PARAM: (1.) 20 | KD_TEMP: (10.) 21 | KD_red: ("mean") 22 | BD_param: (1.) 23 | METRIC: ("euclidean") # "cosine, euclidean" 24 | Dis_mode: "mix" 25 | Dis_iter: 0.99 26 | BASEMODE: True 27 | Dis_net: "cam_Classifier" 28 | 29 | OUTPUT_DIR: "/home/pj/fast-reid-master/logs/hazy-dukemtmc/bagtricks_R50" 30 | 31 | -------------------------------------------------------------------------------- /tests/lr_scheduler_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | import torch 5 | from torch import nn 6 | 7 | sys.path.append('.') 8 | from solver.lr_scheduler import WarmupMultiStepLR 9 | from solver.build import make_optimizer 10 | from config import cfg 11 | 12 | 13 | class MyTestCase(unittest.TestCase): 14 | def test_something(self): 15 | net = nn.Linear(10, 10) 16 | optimizer = make_optimizer(cfg, net) 17 | lr_scheduler = WarmupMultiStepLR(optimizer, [20, 40], warmup_iters=10) 18 | for i in range(50): 19 | lr_scheduler.step() 20 | for j in range(3): 21 | print(i, lr_scheduler.get_lr()[0]) 22 | optimizer.step() 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/interp_test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from fastai.vision import * 3 | from fastai.basic_data import * 4 | from fastai.layers import * 5 | 6 | import sys 7 | sys.path.append('.') 8 | from engine.interpreter import ReidInterpretation 9 | 10 | from data import get_data_bunch 11 | from modeling import build_model 12 | from config import cfg 13 | cfg.DATASETS.NAMES = ('market1501',) 14 | cfg.DATASETS.TEST_NAMES = 'market1501' 15 | cfg.MODEL.BACKBONE = 'resnet50' 16 | 17 | data_bunch, test_labels, num_query = get_data_bunch(cfg) 18 | 19 | model = build_model(cfg, 10) 20 | model.load_params_wo_fc(torch.load('logs/2019.8.14/market/baseline/models/model_149.pth')['model']) 21 | learn = Learner(data_bunch, model) 22 | 23 | feats, _ = learn.get_preds(DatasetType.Test, activ=Lambda(lambda x: x)) -------------------------------------------------------------------------------- /fastreid/layers/se_layer.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from torch import nn 8 | 9 | 10 | class SELayer(nn.Module): 11 | def __init__(self, channel, reduction=16): 12 | super(SELayer, self).__init__() 13 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 14 | self.fc = nn.Sequential( 15 | nn.Linear(channel, int(channel / reduction), bias=False), 16 | nn.ReLU(inplace=True), 17 | nn.Linear(int(channel / reduction), channel, bias=False), 18 | nn.Sigmoid() 19 | ) 20 | 21 | def forward(self, x): 22 | b, c, _, _ = x.size() 23 | y = self.avg_pool(x).view(b, c) 24 | y = self.fc(y).view(b, c, 1, 1) 25 | return x * y.expand_as(x) 26 | -------------------------------------------------------------------------------- /configs/VERIWild/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | PRETRAIN_PATH: '/export2/home/zjk/pretrain_models/resnet50_ibn_a.pth.tar' 11 | HEADS: 12 | NUM_CLASSES: 30671 13 | POOL_LAYER: gempool 14 | LOSSES: 15 | TRI: 16 | HARD_MINING: False 17 | MARGIN: 0.0 18 | 19 | DATASETS: 20 | NAMES: ("VeRiWild",) 21 | TESTS: ("SmallVeRiWild", "MediumVeRiWild", "LargeVeRiWild",) 22 | 23 | SOLVER: 24 | IMS_PER_BATCH: 128 25 | MAX_ITER: 60 26 | STEPS: [30, 50] 27 | WARMUP_ITERS: 10 28 | 29 | CHECKPOINT_PERIOD: 20 30 | 31 | TEST: 32 | EVAL_PERIOD: 20 33 | IMS_PER_BATCH: 128 34 | 35 | OUTPUT_DIR: "logs/veriwild/bagtricks_R50-ibn_4gpu" 36 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | REID_HEADS_REGISTRY = Registry("HEADS") 10 | REID_HEADS_REGISTRY.__doc__ = """ 11 | Registry for ROI heads in a generalized R-CNN model. 12 | ROIHeads take feature maps and region proposals, and 13 | perform per-region computation. 14 | The registered object will be called with `obj(cfg, input_shape)`. 15 | The call is expected to return an :class:`ROIHeads`. 16 | """ 17 | 18 | 19 | def build_reid_heads(cfg, in_feat, num_classes, pool_layer): 20 | """ 21 | Build REIDHeads defined by `cfg.MODEL.REID_HEADS.NAME`. 22 | """ 23 | head = cfg.MODEL.HEADS.NAME 24 | return REID_HEADS_REGISTRY.get(head)(cfg, in_feat, num_classes, pool_layer) 25 | -------------------------------------------------------------------------------- /configs/VehicleID/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | PRETRAIN_PATH: '/export2/home/zjk/pretrain_models/resnet50_ibn_a.pth.tar' 11 | HEADS: 12 | NUM_CLASSES: 13164 13 | POOL_LAYER: gempool 14 | LOSSES: 15 | TRI: 16 | HARD_MINING: False 17 | MARGIN: 0.0 18 | 19 | DATASETS: 20 | NAMES: ("VehicleID",) 21 | TESTS: ("SmallVehicleID", "MediumVehicleID", "LargeVehicleID",) 22 | 23 | SOLVER: 24 | BIAS_LR_FACTOR: 1. 25 | 26 | IMS_PER_BATCH: 512 27 | MAX_ITER: 60 28 | STEPS: [30, 50] 29 | WARMUP_ITERS: 10 30 | 31 | CHECKPOINT_PERIOD: 20 32 | 33 | TEST: 34 | EVAL_PERIOD: 20 35 | IMS_PER_BATCH: 128 36 | 37 | OUTPUT_DIR: "logs/vehicleid/bagtricks_R50-ibn_4gpu" 38 | -------------------------------------------------------------------------------- /configs/Hazy_DukeMTMC/server_ism_bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | DATASETS: 4 | NAMES: ("Hazy_DukeMTMC",) 5 | TESTS: ("Market1501", "Hazy_Market1501") 6 | 7 | TDATASETS: 8 | NAMES: ("Hazy_Market1501",) 9 | 10 | SOLVER: 11 | D_BASE_LR: (0.0001) 12 | D_OPT: ("Adam") # SGD 13 | 14 | MODEL: 15 | WEIGHTS: "/home/pj/fast-reid-master/logs/hazy-dukemtmc/bagtricks_R50/baseline/model_final.pth" 16 | LOSSES: 17 | NAME: ['s_CrossEntropyLoss', 'Dis_loss', 'BachDistance_loss_t', ] 18 | 19 | PARAM: 20 | KD_PARAM: (0.1) 21 | KD_TEMP: (10.) 22 | KD_red: ("mean") 23 | BD_param: (4.) 24 | METRIC: ("euclidean") # "cosine, euclidean" 25 | Dis_mode: "mix" 26 | Dis_iter: 0.99 27 | BASEMODE: False 28 | Dis_net: "cam_Classifier" 29 | 30 | OUTPUT_DIR: "/home/pj/fast-reid-master/logs/hazy-dukemtmc/bagtricks_R50" 31 | 32 | -------------------------------------------------------------------------------- /fastreid/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | 8 | from fastreid.utils.registry import Registry 9 | 10 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 11 | META_ARCH_REGISTRY.__doc__ = """ 12 | Registry for meta-architectures, i.e. the whole model. 13 | The registered object will be called with `obj(cfg)` 14 | and expected to return a `nn.Module` object. 15 | """ 16 | 17 | 18 | def build_model(cfg): 19 | """ 20 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 21 | Note that it does not load any weights from ``cfg``. 22 | """ 23 | meta_arch = cfg.MODEL.META_ARCHITECTURE 24 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 25 | model.to(torch.device(cfg.MODEL.DEVICE)) 26 | return model 27 | -------------------------------------------------------------------------------- /configs/Hazy_Market1501/server_ism_bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | DATASETS: 4 | NAMES: ("Hazy_Market1501",) 5 | TESTS: ("DukeMTMC", "Hazy_DukeMTMC") 6 | 7 | TDATASETS: 8 | NAMES: ("Hazy_DukeMTMC",) 9 | 10 | SOLVER: 11 | D_BASE_LR: (0.0001) 12 | D_OPT: ("Adam") # SGD 13 | 14 | # ISM 15 | MODEL: 16 | WEIGHTS: "/home/pj/fast-reid-master/logs/hazy-market1501/bagtricks_R50/baseline/model_final.pth" 17 | LOSSES: 18 | NAME: ['s_CrossEntropyLoss', 'Dis_loss', 'BachDistance_loss_t',] 19 | 20 | PARAM: 21 | KD_PARAM: (0.1) 22 | KD_TEMP: (10.) 23 | KD_red: ("mean") 24 | BD_param: (4.) 25 | METRIC: ("euclidean") # "cosine, euclidean" 26 | Dis_mode: "mix" 27 | Dis_iter: 0.99 28 | BASEMODE: False 29 | Dis_net: "cam_Classifier" 30 | 31 | OUTPUT_DIR: "/home/pj/fast-reid-master/logs/hazy-market1501/bagtricks_R50" 32 | -------------------------------------------------------------------------------- /demo/plot_roc_with_pickle.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import matplotlib.pyplot as plt 8 | import sys 9 | 10 | sys.path.append('.') 11 | from fastreid.utils.visualizer import Visualizer 12 | 13 | if __name__ == "__main__": 14 | baseline_res = Visualizer.load_roc_info("logs/duke_vis/roc_info.pickle") 15 | mgn_res = Visualizer.load_roc_info("logs/mgn_duke_vis/roc_info.pickle") 16 | 17 | fig = Visualizer.plot_roc_curve(baseline_res['fpr'], baseline_res['tpr'], name='baseline') 18 | Visualizer.plot_roc_curve(mgn_res['fpr'], mgn_res['tpr'], name='mgn', fig=fig) 19 | plt.savefig('roc.jpg') 20 | 21 | fig = Visualizer.plot_distribution(baseline_res['pos'], baseline_res['neg'], name='baseline') 22 | Visualizer.plot_distribution(mgn_res['pos'], mgn_res['neg'], name='mgn', fig=fig) 23 | plt.savefig('dist.jpg') 24 | -------------------------------------------------------------------------------- /fastreid/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | DATASET_REGISTRY = Registry("DATASET") 10 | DATASET_REGISTRY.__doc__ = """ 11 | Registry for datasets 12 | It must returns an instance of :class:`Backbone`. 13 | """ 14 | 15 | from .cuhk03 import CUHK03 16 | from .dukemtmcreid import DukeMTMC 17 | from .hazy_dukemtmcreid import Hazy_DukeMTMC 18 | from .market1501 import Market1501 19 | from .hazy_market1501 import Hazy_Market1501 20 | from .msmt17 import MSMT17 21 | from .veri import VeRi 22 | from .vehicleid import VehicleID, SmallVehicleID, MediumVehicleID, LargeVehicleID 23 | from .veriwild import VeRiWild, SmallVeRiWild, MediumVeRiWild, LargeVeRiWild 24 | 25 | __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] 26 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | BACKBONE_REGISTRY = Registry("BACKBONE") 10 | BACKBONE_REGISTRY.__doc__ = """ 11 | Registry for backbones, which extract feature maps from images 12 | The registered object must be a callable that accepts two arguments: 13 | 1. A :class:`detectron2.config.CfgNode` 14 | 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. 15 | It must returns an instance of :class:`Backbone`. 16 | """ 17 | 18 | 19 | def build_backbone(cfg): 20 | """ 21 | Build a backbone from `cfg.MODEL.BACKBONE.NAME`. 22 | Returns: 23 | an instance of :class:`Backbone` 24 | """ 25 | 26 | backbone_name = cfg.MODEL.BACKBONE.NAME 27 | backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg) 28 | return backbone 29 | -------------------------------------------------------------------------------- /fastreid/layers/batch_drop.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import random 8 | 9 | from torch import nn 10 | 11 | 12 | class BatchDrop(nn.Module): 13 | """ref: https://github.com/daizuozhuo/batch-dropblock-network/blob/master/models/networks.py 14 | batch drop mask 15 | """ 16 | 17 | def __init__(self, h_ratio, w_ratio): 18 | super(BatchDrop, self).__init__() 19 | self.h_ratio = h_ratio 20 | self.w_ratio = w_ratio 21 | 22 | def forward(self, x): 23 | if self.training: 24 | h, w = x.size()[-2:] 25 | rh = round(self.h_ratio * h) 26 | rw = round(self.w_ratio * w) 27 | sx = random.randint(0, h - rh) 28 | sy = random.randint(0, w - rw) 29 | mask = x.new_ones(x.size()) 30 | mask[:, :, sx:sx + rh, sy:sy + rw] = 0 31 | x = x * mask 32 | return x 33 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/caffe_lmdb.py: -------------------------------------------------------------------------------- 1 | import lmdb 2 | from Caffe import caffe_pb2 as pb2 3 | import numpy as np 4 | 5 | class Read_Caffe_LMDB(): 6 | def __init__(self,path,dtype=np.uint8): 7 | 8 | self.env=lmdb.open(path, readonly=True) 9 | self.dtype=dtype 10 | self.txn=self.env.begin() 11 | self.cursor=self.txn.cursor() 12 | 13 | @staticmethod 14 | def to_numpy(value,dtype=np.uint8): 15 | datum = pb2.Datum() 16 | datum.ParseFromString(value) 17 | flat_x = np.fromstring(datum.data, dtype=dtype) 18 | data = flat_x.reshape(datum.channels, datum.height, datum.width) 19 | label=flat_x = datum.label 20 | return data,label 21 | 22 | def iterator(self): 23 | while True: 24 | key,value=self.cursor.key(),self.cursor.value() 25 | yield self.to_numpy(value,self.dtype) 26 | if not self.cursor.next(): 27 | return 28 | 29 | def __iter__(self): 30 | self.cursor.first() 31 | it = self.iterator() 32 | return it 33 | 34 | def __len__(self): 35 | return int(self.env.stat()['entries']) 36 | -------------------------------------------------------------------------------- /configs/Base-Strongerbaseline.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "Base-bagtricks.yml" 2 | 3 | MODEL: 4 | FREEZE_LAYERS: ["backbone"] 5 | 6 | BACKBONE: 7 | WITH_NL: True 8 | 9 | HEADS: 10 | NECK_FEAT: "after" 11 | POOL_LAYER: "gempool" 12 | CLS_LAYER: "circle" 13 | SCALE: 64 14 | MARGIN: 0.35 15 | 16 | LOSSES: 17 | NAME: ("CrossEntropyLoss", "TripletLoss",) 18 | CE: 19 | EPSILON: 0.1 20 | SCALE: 1.0 21 | TRI: 22 | MARGIN: 0.0 23 | HARD_MINING: True 24 | NORM_FEAT: False 25 | SCALE: 1.0 26 | 27 | INPUT: 28 | SIZE_TRAIN: [384, 128] 29 | SIZE_TEST: [384, 128] 30 | DO_AUTOAUG: True 31 | 32 | DATALOADER: 33 | NUM_INSTANCE: 16 34 | 35 | SOLVER: 36 | OPT: "Adam" 37 | MAX_ITER: 60 38 | BASE_LR: 0.00035 39 | BIAS_LR_FACTOR: 1. 40 | WEIGHT_DECAY: 0.0005 41 | WEIGHT_DECAY_BIAS: 0.0005 42 | IMS_PER_BATCH: 64 43 | 44 | SCHED: "WarmupCosineAnnealingLR" 45 | DELAY_ITERS: 30 46 | ETA_MIN_LR: 0.00000077 47 | 48 | WARMUP_FACTOR: 0.01 49 | WARMUP_ITERS: 10 50 | FREEZE_ITERS: 10 51 | 52 | CHECKPOINT_PERIOD: 30 53 | 54 | TEST: 55 | EVAL_PERIOD: 30 56 | IMS_PER_BATCH: 128 57 | 58 | CUDNN_BENCHMARK: True 59 | 60 | -------------------------------------------------------------------------------- /fastreid/utils/weight_init.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import math 8 | from torch import nn 9 | 10 | __all__ = [ 11 | 'weights_init_classifier', 12 | 'weights_init_kaiming', 13 | ] 14 | 15 | 16 | def weights_init_kaiming(m): 17 | classname = m.__class__.__name__ 18 | if classname.find('Linear') != -1: 19 | nn.init.normal_(m.weight, 0, 0.01) 20 | if m.bias is not None: 21 | nn.init.constant_(m.bias, 0.0) 22 | elif classname.find('Conv') != -1: 23 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 24 | if m.bias is not None: 25 | nn.init.constant_(m.bias, 0.0) 26 | elif classname.find('BatchNorm') != -1: 27 | if m.affine: 28 | nn.init.normal_(m.weight, 1.0, 0.02) 29 | nn.init.constant_(m.bias, 0.0) 30 | 31 | 32 | def weights_init_classifier(m): 33 | classname = m.__class__.__name__ 34 | if classname.find('Linear') != -1: 35 | nn.init.normal_(m.weight, std=0.001) 36 | if m.bias is not None: 37 | nn.init.constant_(m.bias, 0.0) 38 | elif classname.find("Arcface") != -1 or classname.find("Circle") != -1: 39 | nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5)) 40 | -------------------------------------------------------------------------------- /tests/model_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | 5 | import sys 6 | sys.path.append('.') 7 | from fastreid.config import cfg 8 | from fastreid.modeling.backbones import build_resnet_backbone 9 | from fastreid.modeling.backbones.resnet_ibn_a import se_resnet101_ibn_a 10 | from torch import nn 11 | 12 | 13 | class MyTestCase(unittest.TestCase): 14 | def test_se_resnet101(self): 15 | cfg.MODEL.BACKBONE.NAME = 'resnet101' 16 | cfg.MODEL.BACKBONE.DEPTH = 101 17 | cfg.MODEL.BACKBONE.WITH_IBN = True 18 | cfg.MODEL.BACKBONE.WITH_SE = True 19 | cfg.MODEL.BACKBONE.PRETRAIN_PATH = '/export/home/lxy/.cache/torch/checkpoints/se_resnet101_ibn_a.pth.tar' 20 | 21 | net1 = build_resnet_backbone(cfg) 22 | net1.cuda() 23 | net2 = nn.DataParallel(se_resnet101_ibn_a()) 24 | res = net2.load_state_dict(torch.load(cfg.MODEL.BACKBONE.PRETRAIN_PATH)['state_dict'], strict=False) 25 | net2.cuda() 26 | x = torch.randn(10, 3, 256, 128).cuda() 27 | y1 = net1(x) 28 | y2 = net2(x) 29 | assert y1.sum() == y2.sum(), 'train mode problem' 30 | net1.eval() 31 | net2.eval() 32 | y1 = net1(x) 33 | y2 = net2(x) 34 | assert y1.sum() == y2.sum(), 'eval mode problem' 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /tests/feature_align.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | import os 4 | from glob import glob 5 | 6 | 7 | class TestFeatureAlign(unittest.TestCase): 8 | def test_caffe_pytorch_feat_align(self): 9 | caffe_feat_path = "/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output" 10 | pytorch_feat_path = "/export/home/lxy/cvpalgo-fast-reid/demo/logs/R50_256x128_pytorch_feat_output" 11 | feat_filenames = os.listdir(caffe_feat_path) 12 | for feat_name in feat_filenames: 13 | caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name)) 14 | pytorch_feat = np.load(os.path.join(pytorch_feat_path, feat_name)) 15 | sim = np.dot(caffe_feat, pytorch_feat.transpose())[0][0] 16 | assert sim > 0.97, f"Got similarity {sim} and feature of {feat_name} is not aligned" 17 | 18 | def test_model_performance(self): 19 | caffe_feat_path = "/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output" 20 | feat_filenames = os.listdir(caffe_feat_path) 21 | feats = [] 22 | for feat_name in feat_filenames: 23 | caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name)) 24 | feats.append(caffe_feat) 25 | from ipdb import set_trace; set_trace() 26 | 27 | 28 | 29 | if __name__ == '__main__': 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /configs/Hazy_Market1501/server_baseline_bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: "../Base-bagtricks.yml" 2 | 3 | DATASETS: 4 | NAMES: ("Hazy_Market1501",) 5 | TESTS: ("DukeMTMC", "Hazy_DukeMTMC") 6 | 7 | TDATASETS: 8 | NAMES: ("Hazy_DukeMTMC",) 9 | #DATASETS: 10 | # NAMES: ("Hazy_DukeMTMC",) 11 | # TESTS: ("Market1501", "Hazy_Market1501") 12 | # 13 | #TDATASETS: 14 | # NAMES: ("Hazy_Market1501",) 15 | 16 | SOLVER: 17 | D_BASE_LR: (0.0001) 18 | D_OPT: ("Adam") # SGD 19 | 20 | # ISM 21 | #MODEL: 22 | # WEIGHTS: "/home/pj/fast-reid-master/logs/hazy-market1501/bagtricks_R50/baseline/model_final.pth" 23 | # LOSSES: 24 | # NAME: ['s_CrossEntropyLoss', 'Dis_loss', 'BachDistance_loss_t',] 25 | 26 | # Baseline modified 27 | MODEL: 28 | LOSSES: 29 | NAME: ['s_CrossEntropyLoss', 't_CrossEntropyLoss',] 30 | 31 | PARAM: 32 | KD_PARAM: (1.) 33 | KD_TEMP: (10.) 34 | KD_red: ("mean") 35 | BD_param: (1.) 36 | METRIC: ("euclidean") # "cosine, euclidean" 37 | Dis_mode: "mix" 38 | Dis_iter: 0.99 39 | BASEMODE: True 40 | Dis_net: "cam_Classifier" 41 | 42 | OUTPUT_DIR: "/home/pj/fast-reid-master/logs/hazy-market1501/bagtricks_R50" 43 | #OUTPUT_DIR: "/home/lhf/pj/fast-reid-master/logs/hazy-dukemtmc/bagtricks_R50" 44 | 45 | #MODEL: 46 | # LOSSES: 47 | # NAME: ( Hazy_CrossEntropyLoss, "Tar_KD_loss", "Src_KDLoss") 48 | # NAME: ("CrossEntropyLoss","Hazy_CrossEntropyLoss", 49 | # "Cross_adv" , "KDLoss") 50 | -------------------------------------------------------------------------------- /fastreid/layers/circle.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | from torch.nn import Parameter 11 | 12 | 13 | class Circle(nn.Module): 14 | def __init__(self, cfg, in_feat, num_classes): 15 | super().__init__() 16 | self.in_feat = in_feat 17 | self._num_classes = num_classes 18 | self._s = cfg.MODEL.HEADS.SCALE 19 | self._m = cfg.MODEL.HEADS.MARGIN 20 | 21 | self.weight = Parameter(torch.Tensor(num_classes, in_feat)) 22 | 23 | def forward(self, features, targets): 24 | sim_mat = F.linear(F.normalize(features), F.normalize(self.weight)) 25 | alpha_p = F.relu(-sim_mat.detach() + 1 + self._m) 26 | alpha_n = F.relu(sim_mat.detach() + self._m) 27 | delta_p = 1 - self._m 28 | delta_n = self._m 29 | 30 | s_p = self._s * alpha_p * (sim_mat - delta_p) 31 | s_n = self._s * alpha_n * (sim_mat - delta_n) 32 | 33 | targets = F.one_hot(targets, num_classes=self._num_classes) 34 | 35 | pred_class_logits = targets * s_p + (1.0 - targets) * s_n 36 | 37 | return pred_class_logits 38 | 39 | def extra_repr(self): 40 | return 'in_features={}, num_classes={}, scale={}, margin={}'.format( 41 | self.in_feat, self._num_classes, self._s, self._m 42 | ) 43 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/ReadMe.md: -------------------------------------------------------------------------------- 1 | # The Caffe in nn_tools Provides some convenient API 2 | If there are some problem in parse your prototxt or caffemodel, Please replace 3 | the caffe.proto with your own version and compile it with command 4 | `protoc --python_out ./ caffe.proto` 5 | 6 | ## caffe_net.py 7 | Using `from nn_tools.Caffe import caffe_net` to import this model 8 | ### Prototxt 9 | + `net=caffe_net.Prototxt(file_name)` to open a prototxt file 10 | + `net.init_caffemodel(caffe_cmd_path='caffe')` to generate a caffemodel file in the current work directory \ 11 | if your `caffe` cmd not in the $PATH, specify your caffe cmd path by the `caffe_cmd_path` kwargs. 12 | ### Caffemodel 13 | + `net=caffe_net.Caffemodel(file_name)` to open a caffemodel 14 | + `net.save_prototxt(path)` to save the caffemodel to a prototxt file (not containing the weight data) 15 | + `net.get_layer_data(layer_name)` return the numpy ndarray data of the layer 16 | + `net.set_layer_date(layer_name, datas)` specify the data of one layer in the caffemodel .`datas` is normally a list of numpy ndarray `[weights,bias]` 17 | + `net.save(path)` save the changed caffemodel 18 | ### Functions for both Prototxt and Caffemodel 19 | + `net.add_layer(layer_params,before='',after='')` add a new layer with `Layer_Param` object 20 | + `net.remove_layer_by_name(layer_name)` 21 | + `net.get_layer_by_name(layer_name)` or `net.layer(layer_name)` get the raw Layer object defined in caffe_pb2 22 | -------------------------------------------------------------------------------- /configs/Base-bagtricks.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "Baseline" 3 | 4 | BACKBONE: 5 | NAME: "build_resnet_backbone" 6 | NORM: "BN" 7 | DEPTH: 50 8 | LAST_STRIDE: 1 9 | WITH_IBN: False 10 | PRETRAIN: True 11 | 12 | HEADS: 13 | NAME: "BNneckHead" 14 | NORM: "BN" 15 | POOL_LAYER: "avgpool" 16 | NECK_FEAT: "before" 17 | CLS_LAYER: "linear" 18 | 19 | LOSSES: 20 | NAME: ( "CrossEntropyLoss", "Hazy_CrossEntropyLoss", "Cross_Adv_Loss") 21 | 22 | CE: 23 | EPSILON: 0.1 24 | SCALE: 1. 25 | 26 | TRI: 27 | MARGIN: 0.3 28 | HARD_MINING: True 29 | NORM_FEAT: False 30 | SCALE: 1. 31 | 32 | INPUT: 33 | SIZE_TRAIN: [256, 128] 34 | SIZE_TEST: [256, 128] 35 | REA: 36 | ENABLED: False 37 | PROB: 0.5 38 | MEAN: [123.675, 116.28, 103.53] 39 | DO_PAD: True 40 | 41 | DATALOADER: 42 | PK_SAMPLER: True 43 | NAIVE_WAY: True 44 | NUM_INSTANCE: 4 45 | NUM_WORKERS: 8 46 | 47 | SOLVER: 48 | D_BASE_LR: 0.00001 49 | D_OPT: "Adam" 50 | D_STEPS: [60] 51 | D_GAMMA: 0.1 52 | LR_POLICY: "multistep" 53 | 54 | OPT: "Adam" 55 | MAX_ITER: 120 56 | BASE_LR: 0.00035 57 | BIAS_LR_FACTOR: 2. 58 | WEIGHT_DECAY: 0.0005 59 | WEIGHT_DECAY_BIAS: 0.0005 60 | IMS_PER_BATCH: 64 61 | 62 | SCHED: "WarmupMultiStepLR" 63 | STEPS: [40, 90] 64 | GAMMA: 0.1 65 | 66 | WARMUP_FACTOR: 0.01 67 | WARMUP_ITERS: 10 68 | 69 | CHECKPOINT_PERIOD: 60 70 | 71 | TEST: 72 | EVAL_PERIOD: 30 73 | IMS_PER_BATCH: 128 74 | 75 | CUDNN_BENCHMARK: True 76 | 77 | -------------------------------------------------------------------------------- /tests/dataset_test.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import sys 8 | sys.path.append('.') 9 | from data import get_dataloader 10 | from config import cfg 11 | import argparse 12 | from data.datasets import init_dataset 13 | # cfg.DATALOADER.SAMPLER = 'triplet' 14 | cfg.DATASETS.NAMES = ("market1501", "dukemtmc", "cuhk03", "msmt17",) 15 | 16 | 17 | if __name__ == '__main__': 18 | parser = argparse.ArgumentParser(description="ReID Baseline Training") 19 | parser.add_argument( 20 | '-cfg', "--config_file", 21 | default="", 22 | metavar="FILE", 23 | help="path to config file", 24 | type=str 25 | ) 26 | # parser.add_argument("--local_rank", type=int, default=0) 27 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 28 | nargs=argparse.REMAINDER) 29 | args = parser.parse_args() 30 | cfg.merge_from_list(args.opts) 31 | 32 | # dataset = init_dataset('msmt17', combineall=True) 33 | get_dataloader(cfg) 34 | # tng_dataloader, val_dataloader, num_classes, num_query = get_dataloader(cfg) 35 | # def get_ex(): return open_image('datasets/beijingStation/query/000245_c10s2_1561732033722.000000.jpg') 36 | # im = get_ex() 37 | # print(data.train_ds[0]) 38 | # print(data.test_ds[0]) 39 | # a = next(iter(data.train_dl)) 40 | # from IPython import embed; embed() 41 | # from ipdb import set_trace; set_trace() 42 | # im.apply_tfms(crop_pad(size=(300, 300))) 43 | -------------------------------------------------------------------------------- /fastreid/layers/activation.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import math 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | 13 | __all__ = [ 14 | 'Mish', 15 | 'Swish', 16 | 'MemoryEfficientSwish', 17 | 'GELU'] 18 | 19 | 20 | class Mish(nn.Module): 21 | def __init__(self): 22 | super().__init__() 23 | 24 | def forward(self, x): 25 | # inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!) 26 | return x * (torch.tanh(F.softplus(x))) 27 | 28 | 29 | class Swish(nn.Module): 30 | def forward(self, x): 31 | return x * torch.sigmoid(x) 32 | 33 | 34 | class SwishImplementation(torch.autograd.Function): 35 | @staticmethod 36 | def forward(ctx, i): 37 | result = i * torch.sigmoid(i) 38 | ctx.save_for_backward(i) 39 | return result 40 | 41 | @staticmethod 42 | def backward(ctx, grad_output): 43 | i = ctx.saved_variables[0] 44 | sigmoid_i = torch.sigmoid(i) 45 | return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) 46 | 47 | 48 | class MemoryEfficientSwish(nn.Module): 49 | def forward(self, x): 50 | return SwishImplementation.apply(x) 51 | 52 | 53 | class GELU(nn.Module): 54 | """ 55 | Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU 56 | """ 57 | 58 | def forward(self, x): 59 | return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) 60 | -------------------------------------------------------------------------------- /tools/deploy/export2tf.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import sys 8 | 9 | import torch 10 | sys.path.append('../..') 11 | from fastreid.config import get_cfg 12 | from fastreid.engine import default_argument_parser, default_setup 13 | from fastreid.modeling.meta_arch import build_model 14 | from fastreid.export.tensorflow_export import export_tf_reid_model 15 | from fastreid.export.tf_modeling import TfMetaArch 16 | 17 | 18 | def setup(args): 19 | """ 20 | Create configs and perform basic setups. 21 | """ 22 | cfg = get_cfg() 23 | # cfg.merge_from_file(args.config_file) 24 | cfg.merge_from_list(args.opts) 25 | cfg.freeze() 26 | default_setup(cfg, args) 27 | return cfg 28 | 29 | 30 | if __name__ == "__main__": 31 | args = default_argument_parser().parse_args() 32 | print("Command Line Args:", args) 33 | cfg = setup(args) 34 | cfg.defrost() 35 | cfg.MODEL.BACKBONE.NAME = "build_resnet_backbone" 36 | cfg.MODEL.BACKBONE.DEPTH = 50 37 | cfg.MODEL.BACKBONE.LAST_STRIDE = 1 38 | # If use IBN block in backbone 39 | cfg.MODEL.BACKBONE.WITH_IBN = False 40 | cfg.MODEL.BACKBONE.PRETRAIN = False 41 | 42 | from torchvision.models import resnet50 43 | # model = TfMetaArch(cfg) 44 | model = resnet50(pretrained=False) 45 | # model.load_params_wo_fc(torch.load('logs/bjstation/res50_baseline_v0.4/ckpts/model_epoch80.pth')) 46 | model.eval() 47 | dummy_inputs = torch.randn(1, 3, 256, 128) 48 | export_tf_reid_model(model, dummy_inputs, 'reid_tf.pb') 49 | -------------------------------------------------------------------------------- /fastreid/data/data_utils.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import numpy as np 7 | from PIL import Image, ImageOps 8 | 9 | from fastreid.utils.file_io import PathManager 10 | 11 | 12 | def read_image(file_name, format=None): 13 | """ 14 | Read an image into the given format. 15 | Will apply rotation and flipping if the image has such exif information. 16 | Args: 17 | file_name (str): image file path 18 | format (str): one of the supported image modes in PIL, or "BGR" 19 | Returns: 20 | image (np.ndarray): an HWC image 21 | """ 22 | with PathManager.open(file_name, "rb") as f: 23 | image = Image.open(f) 24 | 25 | # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973 26 | try: 27 | image = ImageOps.exif_transpose(image) 28 | except Exception: 29 | pass 30 | 31 | if format is not None: 32 | # PIL only supports RGB, so convert to RGB and flip channels over below 33 | conversion_format = format 34 | if format == "BGR": 35 | conversion_format = "RGB" 36 | image = image.convert(conversion_format) 37 | image = np.asarray(image) 38 | if format == "BGR": 39 | # flip channels if needed 40 | image = image[:, :, ::-1] 41 | # PIL squeezes out the channel dimension for "L", so make it HWC 42 | if format == "L": 43 | image = np.expand_dims(image, -1) 44 | image = Image.fromarray(image) 45 | return image 46 | -------------------------------------------------------------------------------- /fastreid/evaluation/roc.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import numpy as np 8 | from sklearn import metrics 9 | 10 | 11 | def evaluate_roc(distmat, q_pids, g_pids, q_camids, g_camids): 12 | r"""Evaluation with ROC curve. 13 | Key: for each query identity, its gallery images from the same camera view are discarded. 14 | 15 | Args: 16 | distmat (np.ndarray): cosine distance matrix 17 | """ 18 | num_q, num_g = distmat.shape 19 | 20 | indices = np.argsort(distmat, axis=1) 21 | matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32) 22 | 23 | pos = [] 24 | neg = [] 25 | for q_idx in range(num_q): 26 | # get query pid and camid 27 | q_pid = q_pids[q_idx] 28 | q_camid = q_camids[q_idx] 29 | 30 | # Remove gallery samples that have the same pid and camid with query 31 | order = indices[q_idx] 32 | remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid) 33 | keep = np.invert(remove) 34 | cmc = matches[q_idx][keep] 35 | sort_idx = order[keep] 36 | 37 | q_dist = distmat[q_idx] 38 | ind_pos = np.where(cmc == 1)[0] 39 | pos.extend(q_dist[sort_idx[ind_pos]]) 40 | 41 | ind_neg = np.where(cmc == 0)[0] 42 | neg.extend(q_dist[sort_idx[ind_neg]]) 43 | 44 | scores = np.hstack((pos, neg)) 45 | 46 | labels = np.hstack((np.zeros(len(pos)), np.ones(len(neg)))) 47 | fpr, tpr, thresholds = metrics.roc_curve(labels, scores) 48 | tprs = [] 49 | for i in [1e-4, 1e-3, 1e-2]: 50 | ind = np.argmin(np.abs(fpr-i)) 51 | tprs.append(tpr[ind]) 52 | return tprs 53 | -------------------------------------------------------------------------------- /fastreid/evaluation/query_expansion.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | # based on 8 | # https://github.com/PyRetri/PyRetri/blob/master/pyretri/index/re_ranker/re_ranker_impl/query_expansion.py 9 | 10 | import numpy as np 11 | import torch 12 | import torch.nn.functional as F 13 | 14 | 15 | def aqe(query_feat: torch.tensor, gallery_feat: torch.tensor, 16 | qe_times: int = 1, qe_k: int = 10, alpha: float = 3.0): 17 | """ 18 | Combining the retrieved topk nearest neighbors with the original query and doing another retrieval. 19 | c.f. https://www.robots.ox.ac.uk/~vgg/publications/papers/chum07b.pdf 20 | Args : 21 | query_feat (torch.tensor): 22 | gallery_feat (torch.tensor): 23 | qe_times (int): number of query expansion times. 24 | qe_k (int): number of the neighbors to be combined. 25 | alpha (float): 26 | """ 27 | num_query = query_feat.shape[0] 28 | all_feat = torch.cat((query_feat, gallery_feat), dim=0) 29 | norm_feat = F.normalize(all_feat, p=2, dim=1) 30 | 31 | all_feat = all_feat.numpy() 32 | for i in range(qe_times): 33 | all_feat_list = [] 34 | sims = torch.mm(norm_feat, norm_feat.t()) 35 | sims = sims.data.cpu().numpy() 36 | for sim in sims: 37 | init_rank = np.argpartition(-sim, range(1, qe_k + 1)) 38 | weights = sim[init_rank[:qe_k]].reshape((-1, 1)) 39 | weights = np.power(weights, alpha) 40 | all_feat_list.append(np.mean(all_feat[init_rank[:qe_k], :] * weights, axis=0)) 41 | all_feat = np.stack(all_feat_list, axis=0) 42 | norm_feat = F.normalize(torch.from_numpy(all_feat), p=2, dim=1) 43 | 44 | query_feat = torch.from_numpy(all_feat[:num_query]) 45 | gallery_feat = torch.from_numpy(all_feat[num_query:]) 46 | return query_feat, gallery_feat 47 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/linear_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.layers import * 8 | from fastreid.modeling.losses import * 9 | from .build import REID_HEADS_REGISTRY 10 | from fastreid.utils.weight_init import weights_init_classifier 11 | 12 | 13 | @REID_HEADS_REGISTRY.register() 14 | class LinearHead(nn.Module): 15 | def __init__(self, cfg, in_feat, num_classes, pool_layer): 16 | super().__init__() 17 | self.pool_layer = pool_layer 18 | 19 | # identity classification layer 20 | cls_type = cfg.MODEL.HEADS.CLS_LAYER 21 | if cls_type == 'linear': self.classifier = nn.Linear(in_feat, num_classes, bias=False) 22 | elif cls_type == 'arcface': self.classifier = Arcface(cfg, in_feat, num_classes) 23 | elif cls_type == 'circle': self.classifier = Circle(cfg, in_feat, num_classes) 24 | else: 25 | raise KeyError(f"{cls_type} is invalid, please choose from " 26 | f"'linear', 'arcface' and 'circle'.") 27 | 28 | self.classifier.apply(weights_init_classifier) 29 | 30 | def forward(self, features, targets=None): 31 | """ 32 | See :class:`ReIDHeads.forward`. 33 | """ 34 | global_feat = self.pool_layer(features) 35 | global_feat = global_feat[..., 0, 0] 36 | 37 | # Evaluation 38 | if not self.training: return global_feat 39 | 40 | # Training 41 | try: 42 | cls_outputs = self.classifier(global_feat) 43 | pred_class_logits = cls_outputs.detach() 44 | except TypeError: 45 | cls_outputs = self.classifier(global_feat, targets) 46 | pred_class_logits = F.linear(F.normalize(global_feat.detach()), F.normalize(self.classifier.weight.detach())) 47 | # Log prediction accuracy 48 | CrossEntropyLoss.log_accuracy(pred_class_logits, targets) 49 | 50 | return cls_outputs, global_feat 51 | -------------------------------------------------------------------------------- /fastreid/layers/non_local.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | 4 | import torch 5 | from torch import nn 6 | from .batch_norm import get_norm 7 | 8 | 9 | class Non_local(nn.Module): 10 | def __init__(self, in_channels, bn_norm, num_splits, reduc_ratio=2): 11 | super(Non_local, self).__init__() 12 | 13 | self.in_channels = in_channels 14 | self.inter_channels = reduc_ratio // reduc_ratio 15 | 16 | self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, 17 | kernel_size=1, stride=1, padding=0) 18 | 19 | self.W = nn.Sequential( 20 | nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels, 21 | kernel_size=1, stride=1, padding=0), 22 | get_norm(bn_norm, self.in_channels, num_splits), 23 | ) 24 | nn.init.constant_(self.W[1].weight, 0.0) 25 | nn.init.constant_(self.W[1].bias, 0.0) 26 | 27 | self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, 28 | kernel_size=1, stride=1, padding=0) 29 | 30 | self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, 31 | kernel_size=1, stride=1, padding=0) 32 | 33 | def forward(self, x): 34 | ''' 35 | :param x: (b, t, h, w) 36 | :return x: (b, t, h, w) 37 | ''' 38 | batch_size = x.size(0) 39 | g_x = self.g(x).view(batch_size, self.inter_channels, -1) 40 | g_x = g_x.permute(0, 2, 1) 41 | 42 | theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) 43 | theta_x = theta_x.permute(0, 2, 1) 44 | phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) 45 | f = torch.matmul(theta_x, phi_x) 46 | N = f.size(-1) 47 | f_div_C = f / N 48 | 49 | y = torch.matmul(f_div_C, g_x) 50 | y = y.permute(0, 2, 1).contiguous() 51 | y = y.view(batch_size, self.inter_channels, *x.size()[2:]) 52 | W_y = self.W(y) 53 | z = W_y + x 54 | return z 55 | -------------------------------------------------------------------------------- /fastreid/layers/arcface.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import math 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | from torch.nn import Parameter 13 | 14 | 15 | class Arcface(nn.Module): 16 | def __init__(self, cfg, in_feat, num_classes): 17 | super().__init__() 18 | self.in_feat = in_feat 19 | self._num_classes = num_classes 20 | self._s = cfg.MODEL.HEADS.SCALE 21 | self._m = cfg.MODEL.HEADS.MARGIN 22 | 23 | self.cos_m = math.cos(self._m) 24 | self.sin_m = math.sin(self._m) 25 | self.threshold = math.cos(math.pi - self._m) 26 | self.mm = math.sin(math.pi - self._m) * self._m 27 | 28 | self.weight = Parameter(torch.Tensor(num_classes, in_feat)) 29 | self.register_buffer('t', torch.zeros(1)) 30 | 31 | def forward(self, features, targets): 32 | # get cos(theta) 33 | cos_theta = F.linear(F.normalize(features), F.normalize(self.weight)) 34 | cos_theta = cos_theta.clamp(-1, 1) # for numerical stability 35 | 36 | target_logit = cos_theta[torch.arange(0, features.size(0)), targets].view(-1, 1) 37 | 38 | sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2)) 39 | cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m # cos(target+margin) 40 | mask = cos_theta > cos_theta_m 41 | final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm) 42 | 43 | hard_example = cos_theta[mask] 44 | with torch.no_grad(): 45 | self.t = target_logit.mean() * 0.01 + (1 - 0.01) * self.t 46 | cos_theta[mask] = hard_example * (self.t + hard_example) 47 | cos_theta.scatter_(1, targets.view(-1, 1).long(), final_target_logit) 48 | pred_class_logits = cos_theta * self._s 49 | return pred_class_logits 50 | 51 | def extra_repr(self): 52 | return 'in_features={}, num_classes={}, scale={}, margin={}'.format( 53 | self.in_feat, self._num_classes, self._s, self._m 54 | ) 55 | -------------------------------------------------------------------------------- /fastreid/data/transforms/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torchvision.transforms as T 8 | 9 | from .transforms import * 10 | from .autoaugment import AutoAugment 11 | 12 | 13 | def build_transforms(cfg, is_train=True): 14 | res = [] 15 | 16 | if is_train: 17 | size_train = cfg.INPUT.SIZE_TRAIN 18 | 19 | # augmix augmentation 20 | do_augmix = cfg.INPUT.DO_AUGMIX 21 | 22 | # auto augmentation 23 | do_autoaug = cfg.INPUT.DO_AUTOAUG 24 | total_iter = cfg.SOLVER.MAX_ITER 25 | 26 | # horizontal filp 27 | do_flip = cfg.INPUT.DO_FLIP 28 | flip_prob = cfg.INPUT.FLIP_PROB 29 | 30 | # padding 31 | do_pad = cfg.INPUT.DO_PAD 32 | padding = cfg.INPUT.PADDING 33 | padding_mode = cfg.INPUT.PADDING_MODE 34 | 35 | # color jitter 36 | do_cj = cfg.INPUT.DO_CJ 37 | 38 | # random erasing 39 | do_rea = cfg.INPUT.REA.ENABLED 40 | rea_prob = cfg.INPUT.REA.PROB 41 | rea_mean = cfg.INPUT.REA.MEAN 42 | # random patch 43 | do_rpt = cfg.INPUT.RPT.ENABLED 44 | rpt_prob = cfg.INPUT.RPT.PROB 45 | 46 | if do_autoaug: 47 | res.append(AutoAugment(total_iter)) 48 | res.append(T.Resize(size_train, interpolation=3)) 49 | if do_flip: 50 | res.append(T.RandomHorizontalFlip(p=flip_prob)) 51 | if do_pad: 52 | res.extend([T.Pad(padding, padding_mode=padding_mode), 53 | T.RandomCrop(size_train)]) 54 | if do_cj: 55 | res.append(T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0)) 56 | if do_augmix: 57 | res.append(AugMix()) 58 | if do_rea: 59 | res.append(RandomErasing(probability=rea_prob, mean=rea_mean)) 60 | if do_rpt: 61 | res.append(RandomPatch(prob_happen=rpt_prob)) 62 | else: 63 | size_test = cfg.INPUT.SIZE_TEST 64 | res.append(T.Resize(size_test, interpolation=3)) 65 | res.append(ToTensor()) 66 | return T.Compose(res) 67 | -------------------------------------------------------------------------------- /fastreid/utils/timer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 | # -*- coding: utf-8 -*- 3 | 4 | from time import perf_counter 5 | from typing import Optional 6 | 7 | 8 | class Timer: 9 | """ 10 | A timer which computes the time elapsed since the start/reset of the timer. 11 | """ 12 | 13 | def __init__(self): 14 | self.reset() 15 | 16 | def reset(self): 17 | """ 18 | Reset the timer. 19 | """ 20 | self._start = perf_counter() 21 | self._paused: Optional[float] = None 22 | self._total_paused = 0 23 | self._count_start = 1 24 | 25 | def pause(self): 26 | """ 27 | Pause the timer. 28 | """ 29 | if self._paused is not None: 30 | raise ValueError("Trying to pause a Timer that is already paused!") 31 | self._paused = perf_counter() 32 | 33 | def is_paused(self) -> bool: 34 | """ 35 | Returns: 36 | bool: whether the timer is currently paused 37 | """ 38 | return self._paused is not None 39 | 40 | def resume(self): 41 | """ 42 | Resume the timer. 43 | """ 44 | if self._paused is None: 45 | raise ValueError("Trying to resume a Timer that is not paused!") 46 | self._total_paused += perf_counter() - self._paused 47 | self._paused = None 48 | self._count_start += 1 49 | 50 | def seconds(self) -> float: 51 | """ 52 | Returns: 53 | (float): the total number of seconds since the start/reset of the 54 | timer, excluding the time when the timer is paused. 55 | """ 56 | if self._paused is not None: 57 | end_time: float = self._paused # type: ignore 58 | else: 59 | end_time = perf_counter() 60 | return end_time - self._start - self._total_paused 61 | 62 | def avg_seconds(self) -> float: 63 | """ 64 | Returns: 65 | (float): the average number of seconds between every start/reset and 66 | pause. 67 | """ 68 | return self.seconds() / self._count_start 69 | -------------------------------------------------------------------------------- /tools/deploy/caffe_export.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import argparse 8 | 9 | import torch 10 | import sys 11 | sys.path.append('../../') 12 | 13 | import pytorch_to_caffe 14 | from fastreid.config import get_cfg 15 | from fastreid.modeling.meta_arch import build_model 16 | from fastreid.utils.file_io import PathManager 17 | from fastreid.utils.checkpoint import Checkpointer 18 | 19 | 20 | def setup_cfg(args): 21 | cfg = get_cfg() 22 | cfg.merge_from_file(args.config_file) 23 | cfg.merge_from_list(args.opts) 24 | cfg.freeze() 25 | return cfg 26 | 27 | 28 | def get_parser(): 29 | parser = argparse.ArgumentParser(description="Convert Pytorch to Caffe model") 30 | 31 | parser.add_argument( 32 | "--config-file", 33 | metavar="FILE", 34 | help="path to config file", 35 | ) 36 | parser.add_argument( 37 | "--name", 38 | default="baseline", 39 | help="name for converted model" 40 | ) 41 | parser.add_argument( 42 | "--output", 43 | default='caffe_model', 44 | help='path to save converted caffe model' 45 | ) 46 | parser.add_argument( 47 | "--opts", 48 | help="Modify config options using the command-line 'KEY VALUE' pairs", 49 | default=[], 50 | nargs=argparse.REMAINDER, 51 | ) 52 | return parser 53 | 54 | 55 | if __name__ == '__main__': 56 | args = get_parser().parse_args() 57 | cfg = setup_cfg(args) 58 | 59 | cfg.defrost() 60 | cfg.MODEL.BACKBONE.PRETRAIN = False 61 | cfg.MODEL.HEADS.POOL_LAYER = "identity" 62 | cfg.MODEL.BACKBONE.WITH_NL = False 63 | 64 | model = build_model(cfg) 65 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) 66 | model.eval() 67 | print(model) 68 | 69 | inputs = torch.randn(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).cuda() 70 | PathManager.mkdirs(args.output) 71 | pytorch_to_caffe.trans_net(model, inputs, args.name) 72 | pytorch_to_caffe.save_prototxt(f"{args.output}/{args.name}.prototxt") 73 | pytorch_to_caffe.save_caffemodel(f"{args.output}/{args.name}.caffemodel") 74 | -------------------------------------------------------------------------------- /fastreid/utils/registry.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | 4 | from typing import Dict, Optional 5 | 6 | 7 | class Registry(object): 8 | """ 9 | The registry that provides name -> object mapping, to support third-party 10 | users' custom modules. 11 | To create a registry (e.g. a backbone registry): 12 | .. code-block:: python 13 | BACKBONE_REGISTRY = Registry('BACKBONE') 14 | To register an object: 15 | .. code-block:: python 16 | @BACKBONE_REGISTRY.register() 17 | class MyBackbone(): 18 | ... 19 | Or: 20 | .. code-block:: python 21 | BACKBONE_REGISTRY.register(MyBackbone) 22 | """ 23 | 24 | def __init__(self, name: str) -> None: 25 | """ 26 | Args: 27 | name (str): the name of this registry 28 | """ 29 | self._name: str = name 30 | self._obj_map: Dict[str, object] = {} 31 | 32 | def _do_register(self, name: str, obj: object) -> None: 33 | assert ( 34 | name not in self._obj_map 35 | ), "An object named '{}' was already registered in '{}' registry!".format( 36 | name, self._name 37 | ) 38 | self._obj_map[name] = obj 39 | 40 | def register(self, obj: object = None) -> Optional[object]: 41 | """ 42 | Register the given object under the the name `obj.__name__`. 43 | Can be used as either a decorator or not. See docstring of this class for usage. 44 | """ 45 | if obj is None: 46 | # used as a decorator 47 | def deco(func_or_class: object) -> object: 48 | name = func_or_class.__name__ # pyre-ignore 49 | self._do_register(name, func_or_class) 50 | return func_or_class 51 | 52 | return deco 53 | 54 | # used as a function call 55 | name = obj.__name__ # pyre-ignore 56 | self._do_register(name, obj) 57 | 58 | def get(self, name: str) -> object: 59 | ret = self._obj_map.get(name) 60 | if ret is None: 61 | raise KeyError( 62 | "No object named '{}' found in '{}' registry!".format( 63 | name, self._name 64 | ) 65 | ) 66 | return ret 67 | -------------------------------------------------------------------------------- /fastreid/modeling/losses/cross_entroy_loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | from fastreid.utils.events import get_event_storage 10 | 11 | 12 | class CrossEntropyLoss(object): 13 | """ 14 | A class that stores information and compute losses about outputs of a Baseline head. 15 | """ 16 | 17 | def __init__(self, cfg): 18 | self._num_classes = cfg.MODEL.HEADS.NUM_CLASSES 19 | self._eps = cfg.MODEL.LOSSES.CE.EPSILON 20 | self._alpha = cfg.MODEL.LOSSES.CE.ALPHA 21 | self._scale = cfg.MODEL.LOSSES.CE.SCALE 22 | 23 | @staticmethod 24 | def log_accuracy(pred_class_logits, gt_classes, topk=(1,)): 25 | """ 26 | Log the accuracy metrics to EventStorage. 27 | """ 28 | bsz = pred_class_logits.size(0) 29 | maxk = max(topk) 30 | _, pred_class = pred_class_logits.topk(maxk, 1, True, True) 31 | pred_class = pred_class.t() 32 | correct = pred_class.eq(gt_classes.view(1, -1).expand_as(pred_class)) 33 | 34 | ret = [] 35 | for k in topk: 36 | correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True) 37 | ret.append(correct_k.mul_(1. / bsz)) 38 | 39 | storage = get_event_storage() 40 | storage.put_scalar("cls_accuracy", ret[0]) 41 | 42 | def __call__(self, pred_class_logits, gt_classes): 43 | """ 44 | Compute the softmax cross entropy loss for box classification. 45 | Returns: 46 | scalar Tensor 47 | """ 48 | if self._eps >= 0: 49 | smooth_param = self._eps 50 | else: 51 | # adaptive lsr 52 | soft_label = F.softmax(pred_class_logits, dim=1) 53 | smooth_param = self._alpha * soft_label[torch.arange(soft_label.size(0)), gt_classes].unsqueeze(1) 54 | 55 | log_probs = F.log_softmax(pred_class_logits, dim=1) 56 | with torch.no_grad(): 57 | targets = torch.ones_like(log_probs) 58 | targets *= smooth_param / (self._num_classes - 1) 59 | targets.scatter_(1, gt_classes.data.unsqueeze(1), (1 - smooth_param)) 60 | 61 | loss = (-targets * log_probs).mean(0).sum() 62 | return loss * self._scale 63 | -------------------------------------------------------------------------------- /fastreid/data/datasets/veri.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Jinkai Zheng 4 | @contact: 1315673509@qq.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | 11 | from .bases import ImageDataset 12 | from ..datasets import DATASET_REGISTRY 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class VeRi(ImageDataset): 17 | """VeRi. 18 | 19 | Reference: 20 | Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016. 21 | 22 | URL: ``_ 23 | 24 | Dataset statistics: 25 | - identities: 775. 26 | - images: 37778 (train) + 1678 (query) + 11579 (gallery). 27 | """ 28 | dataset_dir = "veri" 29 | dataset_name = "veri" 30 | 31 | def __init__(self, root='datasets', **kwargs): 32 | self.dataset_dir = osp.join(root, self.dataset_dir) 33 | 34 | self.train_dir = osp.join(self.dataset_dir, 'image_train') 35 | self.query_dir = osp.join(self.dataset_dir, 'image_query') 36 | self.gallery_dir = osp.join(self.dataset_dir, 'image_test') 37 | 38 | required_files = [ 39 | self.dataset_dir, 40 | self.train_dir, 41 | self.query_dir, 42 | self.gallery_dir, 43 | ] 44 | self.check_before_run(required_files) 45 | 46 | train = self.process_dir(self.train_dir) 47 | query = self.process_dir(self.query_dir, is_train=False) 48 | gallery = self.process_dir(self.gallery_dir, is_train=False) 49 | 50 | super(VeRi, self).__init__(train, query, gallery, **kwargs) 51 | 52 | def process_dir(self, dir_path, is_train=True): 53 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 54 | pattern = re.compile(r'([\d]+)_c(\d\d\d)') 55 | 56 | data = [] 57 | for img_path in img_paths: 58 | pid, camid = map(int, pattern.search(img_path).groups()) 59 | if pid == -1: continue # junk images are just ignored 60 | assert 1 <= pid <= 776 61 | assert 1 <= camid <= 20 62 | camid -= 1 # index starts from 0 63 | if is_train: 64 | pid = self.dataset_name + "_" + str(pid) 65 | data.append((img_path, pid, camid)) 66 | 67 | return data 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # HAZY RE-ID: AN INTERFERENCE SUPPRESSION MODEL FOR DOMAIN ADAPTATION PERSON RE-IDENTIFICATION UNDER INCLEMENT WEATHER CONDITION. 3 | 4 | This package contains the source code which is associated with the following paper: 5 | 6 | Jian Pang, Dacheng Zhang, Huafeng Li, Weifeng Liu, Zhengtao Yu, “HAZY RE-ID: AN INTERFERENCE SUPPRESSION MODEL FOR DOMAIN ADAPTATION PERSON RE-IDENTIFICATION UNDER INCLEMENT WEATHER CONDITION.” accepted as oral on ICME 2021. 7 | 8 | Edited by Jian Pang 9 | 10 | Usage of this code is free for research purposes only. 11 | 12 | Thank you. 13 | 14 | # Requirements: 15 | CUDA 10.2 16 | Python 3.8 17 | Pytorch 1.6.0 18 | torchvision 0.2.2 19 | numpy 1.19.0 20 | 21 | # Get Started 22 | ## 1.Install: 23 | download the code 24 | git clone https://github.com/PangJian123/ISM-ReID.git 25 | cd ISM-ReID 26 | 27 | ## 2.Datasets and the pre-trained 28 | - Prepare datasets and the pre-trained model please refer to https://github.com/PangJian123/fast-reid 29 | - Download the synthetic hazy datasets through the links below: 30 | *Hazy-DukeMTMC-reID*:[Baidu Pan](https://pan.baidu.com/s/12engFwDf---c9QSFdyF3eg)(password:7ec8) 31 | *Hazy-Market1501*:[Baidu Pan](https://pan.baidu.com/s/1QFYbfOYgKkLokxXpPHr4uA) (password:xs52) 32 | 33 | ## 3.Run the training file: 34 | sh pre_train.sh (Supervised training on source domain) 35 | sh train.sh (Training ISM) 36 | ## Hazy image generation: 37 | - The hazy image is obtained by combining clear image and the corresponding depth map through the method of [hazyimage_generator](https://github.com/phoenixtreesky7/hazyimage_generator). 38 | - The clear image comes from the original dataset(Market-1501 or DukeMTMC-ReID). 39 | - The depth map is generated by [monodepth2](https://github.com/nianticlabs/monodepth2). Note that, we add 'disp = torch.exp(-disp)' after 'disp = outputs[("disp, 0")]' in test_simple.py to obtain reversed depth map. 40 | 41 | 42 | # Contact: 43 | Don't hesitate to contact me if you meet any problems when using this code. 44 | 45 | Jian Pang 46 | Faculty of Information Engineering and Automation 47 | Kunming University of Science and Technology 48 | Email: pangjian@stu.kust.edu.cn 49 | 50 | # Acknowledgements 51 | Our code is based on https://github.com/JDAI-CV/fast-reid,https://github.com/nianticlabs/monodepth2 and https://github.com/phoenixtreesky7/hazyimage_generator. 52 | -------------------------------------------------------------------------------- /fastreid/utils/history_buffer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 3 | 4 | import numpy as np 5 | from typing import List, Tuple 6 | 7 | 8 | class HistoryBuffer: 9 | """ 10 | Track a series of scalar values and provide access to smoothed values over a 11 | window or the global average of the series. 12 | """ 13 | 14 | def __init__(self, max_length: int = 1000000): 15 | """ 16 | Args: 17 | max_length: maximal number of values that can be stored in the 18 | buffer. When the capacity of the buffer is exhausted, old 19 | values will be removed. 20 | """ 21 | self._max_length: int = max_length 22 | self._data: List[Tuple[float, float]] = [] # (value, iteration) pairs 23 | self._count: int = 0 24 | self._global_avg: float = 0 25 | 26 | def update(self, value: float, iteration: float = None): 27 | """ 28 | Add a new scalar value produced at certain iteration. If the length 29 | of the buffer exceeds self._max_length, the oldest element will be 30 | removed from the buffer. 31 | """ 32 | if iteration is None: 33 | iteration = self._count 34 | if len(self._data) == self._max_length: 35 | self._data.pop(0) 36 | self._data.append((value, iteration)) 37 | 38 | self._count += 1 39 | self._global_avg += (value - self._global_avg) / self._count 40 | 41 | def latest(self): 42 | """ 43 | Return the latest scalar value added to the buffer. 44 | """ 45 | return self._data[-1][0] 46 | 47 | def median(self, window_size: int): 48 | """ 49 | Return the median of the latest `window_size` values in the buffer. 50 | """ 51 | return np.median([x[0] for x in self._data[-window_size:]]) 52 | 53 | def avg(self, window_size: int): 54 | """ 55 | Return the mean of the latest `window_size` values in the buffer. 56 | """ 57 | return np.mean([x[0] for x in self._data[-window_size:]]) 58 | 59 | def global_avg(self): 60 | """ 61 | Return the mean of all the elements in the buffer. Note that this 62 | includes those getting removed due to limited buffer storage. 63 | """ 64 | return self._global_avg 65 | 66 | def values(self): 67 | """ 68 | Returns: 69 | list[(number, iteration)]: content of the current buffer. 70 | """ 71 | return self._data 72 | -------------------------------------------------------------------------------- /fastreid/data/datasets/dukemtmcreid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: liaoxingyu2@jd.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | 11 | from .bases import ImageDataset 12 | from ..datasets import DATASET_REGISTRY 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class DukeMTMC(ImageDataset): 17 | """DukeMTMC-reID. 18 | 19 | Reference: 20 | - Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016. 21 | - Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017. 22 | 23 | URL: ``_ 24 | 25 | Dataset statistics: 26 | - identities: 1404 (train + query). 27 | - images:16522 (train) + 2228 (query) + 17661 (gallery). 28 | - cameras: 8. 29 | """ 30 | dataset_dir = 'DukeMTMC-reID' 31 | dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip' 32 | dataset_name = "dukemtmc" 33 | 34 | def __init__(self, root='datasets', **kwargs): 35 | # self.root = osp.abspath(osp.expanduser(root)) 36 | self.root = root 37 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 38 | self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train') 39 | self.query_dir = osp.join(self.dataset_dir, 'query') 40 | self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test') 41 | 42 | required_files = [ 43 | self.dataset_dir, 44 | self.train_dir, 45 | self.query_dir, 46 | self.gallery_dir, 47 | ] 48 | self.check_before_run(required_files) 49 | 50 | train = self.process_dir(self.train_dir) 51 | query = self.process_dir(self.query_dir, is_train=False) 52 | gallery = self.process_dir(self.gallery_dir, is_train=False) 53 | 54 | super(DukeMTMC, self).__init__(train, query, gallery, **kwargs) 55 | 56 | def process_dir(self, dir_path, is_train=True): 57 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 58 | pattern = re.compile(r'([-\d]+)_c(\d)') 59 | 60 | data = [] 61 | for img_path in img_paths: 62 | pid, camid = map(int, pattern.search(img_path).groups()) 63 | assert 1 <= camid <= 8 64 | camid -= 1 # index starts from 0 65 | if is_train: 66 | pid = self.dataset_name + "_" + str(pid) 67 | data.append((img_path, pid, camid)) 68 | 69 | return data 70 | -------------------------------------------------------------------------------- /demo/demo.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import glob 9 | import os 10 | import sys 11 | 12 | import cv2 13 | import numpy as np 14 | import tqdm 15 | from torch.backends import cudnn 16 | 17 | sys.path.append('..') 18 | 19 | from fastreid.config import get_cfg 20 | from fastreid.utils.file_io import PathManager 21 | from predictor import FeatureExtractionDemo 22 | 23 | # import some modules added in project like this below 24 | # from projects.PartialReID.partialreid import * 25 | 26 | cudnn.benchmark = True 27 | 28 | 29 | def setup_cfg(args): 30 | # load config from file and command-line arguments 31 | cfg = get_cfg() 32 | # add_partialreid_config(cfg) 33 | cfg.merge_from_file(args.config_file) 34 | cfg.merge_from_list(args.opts) 35 | cfg.freeze() 36 | return cfg 37 | 38 | 39 | def get_parser(): 40 | parser = argparse.ArgumentParser(description="Feature extraction with reid models") 41 | parser.add_argument( 42 | "--config-file", 43 | metavar="FILE", 44 | help="path to config file", 45 | ) 46 | parser.add_argument( 47 | "--parallel", 48 | action='store_true', 49 | help='If use multiprocess for feature extraction.' 50 | ) 51 | parser.add_argument( 52 | "--input", 53 | nargs="+", 54 | help="A list of space separated input images; " 55 | "or a single glob pattern such as 'directory/*.jpg'", 56 | ) 57 | parser.add_argument( 58 | "--output", 59 | default='demo_output', 60 | help='path to save features' 61 | ) 62 | parser.add_argument( 63 | "--opts", 64 | help="Modify config options using the command-line 'KEY VALUE' pairs", 65 | default=[], 66 | nargs=argparse.REMAINDER, 67 | ) 68 | return parser 69 | 70 | 71 | if __name__ == '__main__': 72 | args = get_parser().parse_args() 73 | cfg = setup_cfg(args) 74 | demo = FeatureExtractionDemo(cfg, parallel=args.parallel) 75 | 76 | PathManager.mkdirs(args.output) 77 | if args.input: 78 | if PathManager.isdir(args.input[0]): 79 | args.input = glob.glob(os.path.expanduser(args.input[0])) 80 | assert args.input, "The input path(s) was not found" 81 | for path in tqdm.tqdm(args.input): 82 | img = cv2.imread(path) 83 | feat = demo.run_on_image(img) 84 | feat = feat.numpy() 85 | np.save(os.path.join(args.output, path.replace('.jpg', '.npy').split('/')[-1]), feat) 86 | -------------------------------------------------------------------------------- /fastreid/evaluation/testing.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import logging 3 | import pprint 4 | import sys 5 | from collections import Mapping, OrderedDict 6 | 7 | import numpy as np 8 | 9 | 10 | def print_csv_format(results): 11 | """ 12 | Print main metrics in a format similar to Detectron, 13 | so that they are easy to copypaste into a spreadsheet. 14 | Args: 15 | results (OrderedDict[dict]): task_name -> {metric -> score} 16 | """ 17 | 18 | 19 | assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed 20 | task = list(results.keys())[0] 21 | metrics = [k for k in results[task]] 22 | logger = logging.getLogger(__name__) 23 | 24 | logger.info('----------------------------------------') 25 | logger.info("Evaluation results in csv format:") 26 | logger.info("Metric: " + ", ".join([k for k in metrics])) 27 | for task, res in results.items(): 28 | logger.info(f"{task}: " + ", ".join(["{:.1%}".format(v) for v in res.values()])) 29 | 30 | logger.info('----------------------------------------') 31 | 32 | 33 | def verify_results(cfg, results): 34 | """ 35 | Args: 36 | results (OrderedDict[dict]): task_name -> {metric -> score} 37 | Returns: 38 | bool: whether the verification succeeds or not 39 | """ 40 | expected_results = cfg.TEST.EXPECTED_RESULTS 41 | if not len(expected_results): 42 | return True 43 | 44 | ok = True 45 | for task, metric, expected, tolerance in expected_results: 46 | actual = results[task][metric] 47 | if not np.isfinite(actual): 48 | ok = False 49 | diff = abs(actual - expected) 50 | if diff > tolerance: 51 | ok = False 52 | 53 | logger = logging.getLogger(__name__) 54 | if not ok: 55 | logger.error("Result verification failed!") 56 | logger.error("Expected Results: " + str(expected_results)) 57 | logger.error("Actual Results: " + pprint.pformat(results)) 58 | 59 | sys.exit(1) 60 | else: 61 | logger.info("Results verification passed.") 62 | return ok 63 | 64 | 65 | def flatten_results_dict(results): 66 | """ 67 | Expand a hierarchical dict of scalars into a flat dict of scalars. 68 | If results[k1][k2][k3] = v, the returned dict will have the entry 69 | {"k1/k2/k3": v}. 70 | Args: 71 | results (dict): 72 | """ 73 | r = {} 74 | for k, v in results.items(): 75 | if isinstance(v, Mapping): 76 | v = flatten_results_dict(v) 77 | for kk, vv in v.items(): 78 | r[k + "/" + kk] = vv 79 | else: 80 | r[k] = v 81 | return r 82 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/bnneck_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.layers import * 8 | from fastreid.modeling.losses import * 9 | from fastreid.utils.weight_init import weights_init_kaiming, weights_init_classifier 10 | from .build import REID_HEADS_REGISTRY 11 | 12 | 13 | @REID_HEADS_REGISTRY.register() 14 | class BNneckHead(nn.Module): 15 | def __init__(self, cfg, in_feat, num_classes, pool_layer): 16 | super().__init__() 17 | self.neck_feat = cfg.MODEL.HEADS.NECK_FEAT 18 | self.pool_layer = pool_layer 19 | 20 | self.bnneck = get_norm(cfg.MODEL.HEADS.NORM, in_feat, cfg.MODEL.HEADS.NORM_SPLIT, bias_freeze=True) 21 | self.bnneck.apply(weights_init_kaiming) 22 | 23 | # identity classification layer 24 | cls_type = cfg.MODEL.HEADS.CLS_LAYER 25 | if cls_type == 'linear': self.classifier = nn.Linear(in_feat, num_classes, bias=False) 26 | elif cls_type == 'arcface': self.classifier = Arcface(cfg, in_feat, num_classes) 27 | elif cls_type == 'circle': self.classifier = Circle(cfg, in_feat, num_classes) 28 | else: raise KeyError(f"{cls_type} is invalid, please choose from " 29 | f"'linear', 'arcface' and 'circle'.") 30 | 31 | self.classifier.apply(weights_init_classifier) 32 | 33 | def forward(self, features, targets=None, t_data=False): 34 | """ 35 | See :class:`ReIDHeads.forward`. 36 | """ 37 | global_feat = self.pool_layer(features) 38 | bn_feat = self.bnneck(global_feat) 39 | bn_feat = bn_feat[..., 0, 0] 40 | 41 | # Evaluation 42 | if not self.training: return bn_feat 43 | 44 | if t_data: 45 | if self.neck_feat == "before": 46 | feat = global_feat[..., 0, 0] 47 | elif self.neck_feat == "after": 48 | feat = bn_feat 49 | return feat 50 | # Training 51 | try: 52 | cls_outputs = self.classifier(bn_feat) 53 | pred_class_logits = cls_outputs.detach() 54 | except TypeError: 55 | cls_outputs = self.classifier(bn_feat, targets) 56 | pred_class_logits = F.linear(F.normalize(bn_feat.detach()), F.normalize(self.classifier.weight.detach())) 57 | # Log prediction accuracy 58 | CrossEntropyLoss.log_accuracy(pred_class_logits, targets) 59 | 60 | if self.neck_feat == "before": 61 | feat = global_feat[..., 0, 0] 62 | elif self.neck_feat == "after": 63 | feat = bn_feat 64 | else: 65 | raise KeyError("MODEL.HEADS.NECK_FEAT value is invalid, must choose from ('after' & 'before')") 66 | return cls_outputs, feat 67 | -------------------------------------------------------------------------------- /fastreid/layers/pooling.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | from torch import nn 10 | 11 | 12 | class Flatten(nn.Module): 13 | def forward(self, input): 14 | return input.view(input.size(0), -1) 15 | 16 | 17 | class GeneralizedMeanPooling(nn.Module): 18 | r"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes. 19 | The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` 20 | - At p = infinity, one gets Max Pooling 21 | - At p = 1, one gets Average Pooling 22 | The output is of size H x W, for any input size. 23 | The number of output features is equal to the number of input planes. 24 | Args: 25 | output_size: the target output size of the image of the form H x W. 26 | Can be a tuple (H, W) or a single H for a square image H x H 27 | H and W can be either a ``int``, or ``None`` which means the size will 28 | be the same as that of the input. 29 | """ 30 | 31 | def __init__(self, norm, output_size=1, eps=1e-6): 32 | super(GeneralizedMeanPooling, self).__init__() 33 | assert norm > 0 34 | self.p = float(norm) 35 | self.output_size = output_size 36 | self.eps = eps 37 | 38 | def forward(self, x): 39 | x = x.clamp(min=self.eps).pow(self.p) 40 | return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p) 41 | 42 | def __repr__(self): 43 | return self.__class__.__name__ + '(' \ 44 | + str(self.p) + ', ' \ 45 | + 'output_size=' + str(self.output_size) + ')' 46 | 47 | 48 | class GeneralizedMeanPoolingP(GeneralizedMeanPooling): 49 | """ Same, but norm is trainable 50 | """ 51 | 52 | def __init__(self, norm=3, output_size=1, eps=1e-6): 53 | super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps) 54 | self.p = nn.Parameter(torch.ones(1) * norm) 55 | 56 | 57 | class AdaptiveAvgMaxPool2d(nn.Module): 58 | def __init__(self): 59 | super(AdaptiveAvgMaxPool2d, self).__init__() 60 | self.avgpool = FastGlobalAvgPool2d() 61 | 62 | def forward(self, x): 63 | x_avg = self.avgpool(x, self.output_size) 64 | x_max = F.adaptive_max_pool2d(x, 1) 65 | x = x_max + x_avg 66 | return x 67 | 68 | 69 | class FastGlobalAvgPool2d(nn.Module): 70 | def __init__(self, flatten=False): 71 | super(FastGlobalAvgPool2d, self).__init__() 72 | self.flatten = flatten 73 | 74 | def forward(self, x): 75 | if self.flatten: 76 | in_size = x.size() 77 | return x.view((in_size[0], in_size[1], -1)).mean(dim=2) 78 | else: 79 | return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) 80 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/reduction_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.layers import * 8 | from fastreid.modeling.losses import * 9 | from fastreid.utils.weight_init import weights_init_kaiming, weights_init_classifier 10 | from .build import REID_HEADS_REGISTRY 11 | 12 | 13 | @REID_HEADS_REGISTRY.register() 14 | class ReductionHead(nn.Module): 15 | def __init__(self, cfg, in_feat, num_classes, pool_layer): 16 | super().__init__() 17 | self._cfg = cfg 18 | reduction_dim = cfg.MODEL.HEADS.REDUCTION_DIM 19 | self.neck_feat = cfg.MODEL.HEADS.NECK_FEAT 20 | 21 | self.pool_layer = pool_layer 22 | 23 | self.bottleneck = nn.Sequential( 24 | nn.Conv2d(in_feat, reduction_dim, 1, 1, bias=False), 25 | get_norm(cfg.MODEL.HEADS.NORM, reduction_dim, cfg.MODEL.HEADS.NORM_SPLIT), 26 | nn.LeakyReLU(0.1, inplace=True), 27 | ) 28 | 29 | self.bnneck = get_norm(cfg.MODEL.HEADS.NORM, reduction_dim, cfg.MODEL.HEADS.NORM_SPLIT, bias_freeze=True) 30 | 31 | self.bottleneck.apply(weights_init_kaiming) 32 | self.bnneck.apply(weights_init_kaiming) 33 | 34 | # identity classification layer 35 | cls_type = cfg.MODEL.HEADS.CLS_LAYER 36 | if cls_type == 'linear': self.classifier = nn.Linear(reduction_dim, num_classes, bias=False) 37 | elif cls_type == 'arcface': self.classifier = Arcface(cfg, reduction_dim, num_classes) 38 | elif cls_type == 'circle': self.classifier = Circle(cfg, reduction_dim, num_classes) 39 | else: 40 | raise KeyError(f"{cls_type} is invalid, please choose from " 41 | f"'linear', 'arcface' and 'circle'.") 42 | 43 | self.classifier.apply(weights_init_classifier) 44 | 45 | def forward(self, features, targets=None): 46 | """ 47 | See :class:`ReIDHeads.forward`. 48 | """ 49 | features = self.pool_layer(features) 50 | global_feat = self.bottleneck(features) 51 | bn_feat = self.bnneck(global_feat) 52 | bn_feat = bn_feat[..., 0, 0] 53 | 54 | # Evaluation 55 | if not self.training: return bn_feat 56 | 57 | # Training 58 | try: 59 | cls_outputs = self.classifier(bn_feat) 60 | pred_class_logits = cls_outputs.detach() 61 | except TypeError: 62 | cls_outputs = self.classifier(bn_feat, targets) 63 | pred_class_logits = F.linear(F.normalize(bn_feat.detach()), F.normalize(self.classifier.weight.detach())) 64 | # Log prediction accuracy 65 | CrossEntropyLoss.log_accuracy(pred_class_logits, targets) 66 | 67 | if self.neck_feat == "before": feat = global_feat[..., 0, 0] 68 | elif self.neck_feat == "after": feat = bn_feat 69 | else: 70 | raise KeyError("MODEL.HEADS.NECK_FEAT value is invalid, must choose from ('after' & 'before')") 71 | 72 | return cls_outputs, feat 73 | 74 | -------------------------------------------------------------------------------- /fastreid/data/samplers/data_sampler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import itertools 7 | from typing import Optional 8 | 9 | import numpy as np 10 | from torch.utils.data import Sampler 11 | 12 | from fastreid.utils import comm 13 | 14 | 15 | class TrainingSampler(Sampler): 16 | """ 17 | In training, we only care about the "infinite stream" of training data. 18 | So this sampler produces an infinite stream of indices and 19 | all workers cooperate to correctly shuffle the indices and sample different indices. 20 | The samplers in each worker effectively produces `indices[worker_id::num_workers]` 21 | where `indices` is an infinite stream of indices consisting of 22 | `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) 23 | or `range(size) + range(size) + ...` (if shuffle is False) 24 | """ 25 | 26 | def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None): 27 | """ 28 | Args: 29 | size (int): the total number of data of the underlying dataset to sample from 30 | shuffle (bool): whether to shuffle the indices or not 31 | seed (int): the initial seed of the shuffle. Must be the same 32 | across all workers. If None, will use a random seed shared 33 | among workers (require synchronization among all workers). 34 | """ 35 | self._size = size 36 | assert size > 0 37 | self._shuffle = shuffle 38 | if seed is None: 39 | seed = comm.shared_random_seed() 40 | self._seed = int(seed) 41 | 42 | self._rank = comm.get_rank() 43 | self._world_size = comm.get_world_size() 44 | 45 | def __iter__(self): 46 | start = self._rank 47 | yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) 48 | 49 | def _infinite_indices(self): 50 | np.random.seed(self._seed) 51 | while True: 52 | if self._shuffle: 53 | yield from np.random.permutation(self._size) 54 | else: 55 | yield from np.arange(self._size) 56 | 57 | 58 | class InferenceSampler(Sampler): 59 | """ 60 | Produce indices for inference. 61 | Inference needs to run on the __exact__ set of samples, 62 | therefore when the total number of samples is not divisible by the number of workers, 63 | this sampler produces different number of samples on different workers. 64 | """ 65 | 66 | def __init__(self, size: int): 67 | """ 68 | Args: 69 | size (int): the total number of data of the underlying dataset to sample from 70 | """ 71 | self._size = size 72 | assert size > 0 73 | 74 | begin = 0 75 | end = self._size 76 | self._local_indices = range(begin, end) 77 | 78 | def __iter__(self): 79 | yield from self._local_indices 80 | 81 | def __len__(self): 82 | return len(self._local_indices) 83 | -------------------------------------------------------------------------------- /fastreid/data/common.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | from torch.utils.data import Dataset 9 | 10 | from .data_utils import read_image 11 | 12 | 13 | class CommDataset(Dataset): 14 | """Image Person ReID Dataset""" 15 | 16 | def __init__(self, img_items, transform=None, relabel=True): 17 | self.img_items = img_items 18 | self.transform = transform 19 | self.relabel = relabel 20 | 21 | self.pid_dict = {} 22 | if self.relabel: 23 | pids = list() 24 | for i, item in enumerate(img_items): 25 | if item[1] in pids: continue 26 | pids.append(item[1]) 27 | self.pids = pids 28 | self.pid_dict = dict([(p, i) for i, p in enumerate(self.pids)]) 29 | 30 | def __len__(self): 31 | return len(self.img_items) 32 | 33 | def __getitem__(self, index): 34 | img_path, pid, camid = self.img_items[index] 35 | img = read_image(img_path) 36 | if self.transform is not None: img = self.transform(img) 37 | if self.relabel: pid = self.pid_dict[pid] 38 | return { 39 | "images": img, 40 | "targets": pid, 41 | "camid": camid, 42 | "img_path": img_path 43 | } 44 | 45 | @property 46 | def num_classes(self): 47 | return len(self.pids) 48 | 49 | class PairDataset(Dataset): 50 | """Image Person ReID Dataset""" 51 | 52 | def __init__(self, img_items, hazy_img_items, transform=None, relabel=True): 53 | self.img_items = img_items 54 | self.hazy_img_items = hazy_img_items 55 | self.transform = transform 56 | self.relabel = relabel 57 | 58 | self.pid_dict = {} 59 | if self.relabel: 60 | pids = list() 61 | for i, item in enumerate(img_items): 62 | if item[1] in pids: continue 63 | pids.append(item[1]) 64 | self.pids = pids 65 | self.pid_dict = dict([(p, i) for i, p in enumerate(self.pids)]) 66 | 67 | def __len__(self): 68 | return len(self.img_items) 69 | 70 | def __getitem__(self, index): 71 | img_path, pid, camid = self.img_items[index] 72 | hazy_img_path, hazy_pid, hazy_camid = self.hazy_img_items[index] 73 | img = read_image(img_path) 74 | hazy_img = read_image(hazy_img_path) 75 | if self.transform is not None: img = self.transform(img) 76 | if self.transform is not None: hazy_img = self.transform(hazy_img) 77 | if self.relabel: pid = self.pid_dict[pid] 78 | if self.relabel: hazy_pid = self.pid_dict[hazy_pid] 79 | return { 80 | "images": img, 81 | "targets": pid, 82 | "camid": camid, 83 | "img_path": img_path, 84 | "hazy_images": hazy_img, 85 | "hazy_targets": hazy_pid, 86 | "hazy_camid": hazy_camid, 87 | "hazy_img_path": hazy_img_path 88 | } 89 | 90 | @property 91 | def num_classes(self): 92 | return len(self.pids) 93 | -------------------------------------------------------------------------------- /tools/pre_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: sherlock 5 | @contact: sherlockliao01@gmail.com 6 | """ 7 | 8 | import logging 9 | import os 10 | import sys 11 | 12 | sys.path.append('.') 13 | 14 | from fastreid.config import get_cfg 15 | from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch, Hazytrainer 16 | from fastreid.utils.checkpoint import Checkpointer 17 | from fastreid.engine import hooks 18 | from fastreid.evaluation import ReidEvaluator 19 | 20 | 21 | class H_Trainer(Hazytrainer): 22 | @classmethod 23 | def build_evaluator(cls, cfg, num_query, output_folder=None): 24 | if output_folder is None: 25 | output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") 26 | return ReidEvaluator(cfg, num_query) 27 | 28 | class BaseTrainer(DefaultTrainer): 29 | @classmethod 30 | def build_evaluator(cls, cfg, num_query, output_folder=None): 31 | if output_folder is None: 32 | output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") 33 | return ReidEvaluator(cfg, num_query) 34 | 35 | def setup(args): 36 | """ 37 | Create configs and perform basic setups. 38 | """ 39 | cfg = get_cfg() 40 | cfg.merge_from_file(args.config_file) 41 | cfg.merge_from_list(args.opts) 42 | cfg.freeze() 43 | default_setup(cfg, args) 44 | return cfg 45 | 46 | 47 | def main(args): 48 | cfg = setup(args) 49 | 50 | if args.eval_only: 51 | logger = logging.getLogger("fastreid.trainer") 52 | cfg.defrost() 53 | cfg.MODEL.BACKBONE.PRETRAIN = False 54 | model = H_Trainer.build_model(cfg) 55 | 56 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model 57 | 58 | if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model): 59 | prebn_cfg = cfg.clone() 60 | prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN 61 | prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN 62 | logger.info("Prepare precise BN dataset") 63 | hooks.PreciseBN( 64 | # Run at the same freq as (but before) evaluation. 65 | model, 66 | # Build a new data loader to not affect training 67 | H_Trainer.build_train_loader(prebn_cfg), 68 | cfg.TEST.PRECISE_BN.NUM_ITER, 69 | ).update_stats() 70 | res = H_Trainer.test(cfg, model) 71 | return res 72 | 73 | trainer = H_Trainer(cfg) 74 | trainer.resume_or_load(resume=args.resume) 75 | return trainer.train() 76 | 77 | 78 | if __name__ == "__main__": 79 | args = default_argument_parser() 80 | args.add_argument("--info", type=str, default="test", help="information of parameters and losses") 81 | args = args.parse_args() 82 | print("Command Line Args:", args) 83 | launch( 84 | main, 85 | args.num_gpus, 86 | num_machines=args.num_machines, 87 | machine_rank=args.machine_rank, 88 | dist_url=args.dist_url, 89 | args=(args,), 90 | ) 91 | 92 | -------------------------------------------------------------------------------- /fastreid/data/datasets/hazy_dukemtmcreid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: liaoxingyu2@jd.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | 11 | from .bases import ImageDataset, ImagePairDataset 12 | from ..datasets import DATASET_REGISTRY 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class Hazy_DukeMTMC(ImagePairDataset): 17 | """DukeMTMC-reID. 18 | 19 | Reference: 20 | - Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016. 21 | - Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017. 22 | 23 | URL: ``_ 24 | 25 | Dataset statistics: 26 | - identities: 1404 (train + query). 27 | - images:16522 (train) + 2228 (query) + 17661 (gallery). 28 | - cameras: 8. 29 | """ 30 | dataset_dir = 'DukeMTMC-reID' 31 | dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip' 32 | dataset_name = "dukemtmc" 33 | 34 | def __init__(self, root='datasets', **kwargs): 35 | # self.root = osp.abspath(osp.expanduser(root)) 36 | self.root = root 37 | self.dataset_dir = osp.join(self.root, 'Hazy_DukeMTMC-reID') 38 | self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train') 39 | self.hazy_train_dir = osp.join(self.dataset_dir, 'hazy_bounding_box_train') 40 | self.query_dir = osp.join(self.dataset_dir, 'query') 41 | # self.hazy_query_dir = osp.join(self.dataset_dir, 'hazy_query') 42 | self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test') 43 | # self.hazy_gallery_dir = osp.join(self.dataset_dir, 'hazy_bounding_box_test') 44 | 45 | required_files = [ 46 | self.dataset_dir, 47 | self.train_dir, 48 | self.hazy_train_dir, 49 | self.query_dir, 50 | self.gallery_dir, 51 | ] 52 | self.check_before_run(required_files) 53 | 54 | train = self.process_dir(self.train_dir) 55 | query = self.process_dir(self.query_dir, is_train=False) 56 | gallery = self.process_dir(self.gallery_dir, is_train=False) 57 | hazy_train = self.process_dir(self.hazy_train_dir) 58 | # hazy_query = self.process_dir(self.hazy_query_dir, is_train=False) 59 | # hazy_gallery = self.process_dir(self.hazy_gallery_dir, is_train=False) 60 | 61 | super(Hazy_DukeMTMC, self).__init__(train, query, gallery, hazy_train, **kwargs) 62 | 63 | def process_dir(self, dir_path, is_train=True): 64 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 65 | pattern = re.compile(r'([-\d]+)_c(\d)') 66 | 67 | data = [] 68 | for img_path in img_paths: 69 | pid, camid = map(int, pattern.search(img_path).groups()) 70 | assert 1 <= camid <= 8 71 | camid -= 1 # index starts from 0 72 | if is_train: 73 | pid = self.dataset_name + "_" + str(pid) 74 | data.append((img_path, pid, camid)) 75 | 76 | return data 77 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/test_cython.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import timeit 4 | import os.path as osp 5 | 6 | 7 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..') 8 | 9 | """ 10 | Test the speed of cython-based evaluation code. The speed improvements 11 | can be much bigger when using the real reid data, which contains a larger 12 | amount of query and gallery images. 13 | Note: you might encounter the following error: 14 | 'AssertionError: Error: all query identities do not appear in gallery'. 15 | This is normal because the inputs are random numbers. Just try again. 16 | """ 17 | 18 | print('*** Compare running time ***') 19 | 20 | setup = ''' 21 | import sys 22 | import os.path as osp 23 | import numpy as np 24 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..') 25 | from fastreid import evaluation 26 | num_q = 30 27 | num_g = 300 28 | max_rank = 5 29 | distmat = np.random.rand(num_q, num_g) * 20 30 | q_pids = np.random.randint(0, num_q, size=num_q) 31 | g_pids = np.random.randint(0, num_g, size=num_g) 32 | q_camids = np.random.randint(0, 5, size=num_q) 33 | g_camids = np.random.randint(0, 5, size=num_g) 34 | ''' 35 | 36 | # print('=> Using market1501\'s metric') 37 | # pytime = timeit.timeit( 38 | # 'evaluation.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)', 39 | # setup=setup, 40 | # number=20 41 | # ) 42 | # cytime = timeit.timeit( 43 | # 'evaluation.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)', 44 | # setup=setup, 45 | # number=20 46 | # ) 47 | # print('Python time: {} s'.format(pytime)) 48 | # print('Cython time: {} s'.format(cytime)) 49 | # print('Cython is {} times faster than python\n'.format(pytime / cytime)) 50 | # 51 | # print('=> Using cuhk03\'s metric') 52 | # pytime = timeit.timeit( 53 | # 'evaluation.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)', 54 | # setup=setup, 55 | # number=20 56 | # ) 57 | # cytime = timeit.timeit( 58 | # 'evaluation.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)', 59 | # setup=setup, 60 | # number=20 61 | # ) 62 | # print('Python time: {} s'.format(pytime)) 63 | # print('Cython time: {} s'.format(cytime)) 64 | # print('Cython is {} times faster than python\n'.format(pytime / cytime)) 65 | 66 | from fastreid.evaluation import evaluate_rank 67 | print("=> Check precision") 68 | num_q = 30 69 | num_g = 300 70 | max_rank = 5 71 | distmat = np.random.rand(num_q, num_g) * 20 72 | q_pids = np.random.randint(0, num_q, size=num_q) 73 | g_pids = np.random.randint(0, num_g, size=num_g) 74 | q_camids = np.random.randint(0, 5, size=num_q) 75 | g_camids = np.random.randint(0, 5, size=num_g) 76 | cmc, mAP, mINP = evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False) 77 | print("Python:\nmAP = {} \ncmc = {}\nmINP = {}".format(mAP, cmc, mINP)) 78 | cmc, mAP, mINP = evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True) 79 | print("Cython:\nmAP = {} \ncmc = {}\nmINP = {}".format(mAP, cmc, mINP)) 80 | -------------------------------------------------------------------------------- /tools/deploy/caffe_inference.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import caffe 8 | import tqdm 9 | import glob 10 | import os 11 | import cv2 12 | import numpy as np 13 | 14 | caffe.set_mode_gpu() 15 | 16 | import argparse 17 | 18 | 19 | def get_parser(): 20 | parser = argparse.ArgumentParser(description="Caffe model inference") 21 | 22 | parser.add_argument( 23 | "--model-def", 24 | default="logs/test_caffe/baseline_R50.prototxt", 25 | help="caffe model prototxt" 26 | ) 27 | parser.add_argument( 28 | "--model-weights", 29 | default="logs/test_caffe/baseline_R50.caffemodel", 30 | help="caffe model weights" 31 | ) 32 | parser.add_argument( 33 | "--input", 34 | nargs="+", 35 | help="A list of space separated input images; " 36 | "or a single glob pattern such as 'directory/*.jpg'", 37 | ) 38 | parser.add_argument( 39 | "--output", 40 | default='caffe_output', 41 | help='path to save converted caffe model' 42 | ) 43 | parser.add_argument( 44 | "--height", 45 | type=int, 46 | default=384, 47 | help="height of image" 48 | ) 49 | parser.add_argument( 50 | "--width", 51 | type=int, 52 | default=128, 53 | help="width of image" 54 | ) 55 | return parser 56 | 57 | 58 | def preprocess(image_path, image_height, image_width): 59 | original_image = cv2.imread(image_path) 60 | # the model expects RGB inputs 61 | original_image = original_image[:, :, ::-1] 62 | 63 | # Apply pre-processing to image. 64 | image = cv2.resize(original_image, (image_width, image_height), interpolation=cv2.INTER_CUBIC) 65 | image = image.astype("float32").transpose(2, 0, 1)[np.newaxis] # (1, 3, h, w) 66 | image = (image - np.array([0.485 * 255, 0.456 * 255, 0.406 * 255]).reshape((1, -1, 1, 1))) / np.array( 67 | [0.229 * 255, 0.224 * 255, 0.225 * 255]).reshape((1, -1, 1, 1)) 68 | return image 69 | 70 | 71 | def normalize(nparray, order=2, axis=-1): 72 | """Normalize a N-D numpy array along the specified axis.""" 73 | norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True) 74 | return nparray / (norm + np.finfo(np.float32).eps) 75 | 76 | 77 | if __name__ == "__main__": 78 | args = get_parser().parse_args() 79 | 80 | net = caffe.Net(args.model_def, args.model_weights, caffe.TEST) 81 | net.blobs['blob1'].reshape(1, 3, args.height, args.width) 82 | 83 | if not os.path.exists(args.output): os.makedirs(args.output) 84 | 85 | if args.input: 86 | if os.path.isdir(args.input[0]): 87 | args.input = glob.glob(os.path.expanduser(args.input[0])) 88 | assert args.input, "The input path(s) was not found" 89 | for path in tqdm.tqdm(args.input): 90 | image = preprocess(path, args.height, args.width) 91 | net.blobs['blob1'].data[...] = image 92 | feat = net.forward()['output'] 93 | feat = normalize(feat[..., 0, 0], axis=1) 94 | np.save(os.path.join(args.output, path.replace('.jpg', '.npy').split('/')[-1]), feat) 95 | 96 | -------------------------------------------------------------------------------- /tools/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: sherlock 5 | @contact: sherlockliao01@gmail.com 6 | """ 7 | 8 | import logging 9 | import os 10 | import sys 11 | 12 | sys.path.append('.') 13 | 14 | from fastreid.config import get_cfg 15 | from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch, Hazytrainer 16 | from fastreid.utils.checkpoint import Checkpointer 17 | from fastreid.engine import hooks 18 | from fastreid.evaluation import ReidEvaluator 19 | 20 | 21 | class H_Trainer(Hazytrainer): 22 | @classmethod 23 | def build_evaluator(cls, cfg, num_query, output_folder=None): 24 | if output_folder is None: 25 | output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") 26 | return ReidEvaluator(cfg, num_query) 27 | 28 | class BaseTrainer(DefaultTrainer): 29 | @classmethod 30 | def build_evaluator(cls, cfg, num_query, output_folder=None): 31 | if output_folder is None: 32 | output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") 33 | return ReidEvaluator(cfg, num_query) 34 | 35 | def setup(args): 36 | """ 37 | Create configs and perform basic setups. 38 | """ 39 | cfg = get_cfg() 40 | cfg.merge_from_file(args.config_file) 41 | cfg.merge_from_list(args.opts) 42 | cfg.freeze() 43 | default_setup(cfg, args) 44 | return cfg 45 | 46 | 47 | def main(args): 48 | cfg = setup(args) 49 | 50 | 51 | if args.eval_only: 52 | logger = logging.getLogger("fastreid.trainer") 53 | cfg.defrost() 54 | cfg.MODEL.BACKBONE.PRETRAIN = False 55 | model = H_Trainer.build_model(cfg) 56 | 57 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model 58 | 59 | if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model): 60 | prebn_cfg = cfg.clone() 61 | prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN 62 | prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN 63 | logger.info("Prepare precise BN dataset") 64 | hooks.PreciseBN( 65 | # Run at the same freq as (but before) evaluation. 66 | model, 67 | # Build a new data loader to not affect training 68 | H_Trainer.build_train_loader(prebn_cfg), 69 | cfg.TEST.PRECISE_BN.NUM_ITER, 70 | ).update_stats() 71 | res = H_Trainer.test(cfg, model) 72 | return res 73 | 74 | trainer = H_Trainer(cfg) 75 | args.resume = True 76 | trainer.resume_or_load(resume=args.resume) 77 | # trainer.test(cfg, trainer.model) 78 | # Checkpointer().load(cfg.MODEL.WEIGHTS) 79 | return trainer.train() 80 | 81 | 82 | if __name__ == "__main__": 83 | args = default_argument_parser() 84 | args.add_argument("--info", type=str, default="Test", help="information of parameters and losses") 85 | args = args.parse_args() 86 | print("Command Line Args:", args) 87 | launch( 88 | main, 89 | args.num_gpus, 90 | num_machines=args.num_machines, 91 | machine_rank=args.machine_rank, 92 | dist_url=args.dist_url, 93 | args=(args,), 94 | ) 95 | 96 | -------------------------------------------------------------------------------- /fastreid/data/datasets/market1501.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | import warnings 11 | 12 | from .bases import ImageDataset 13 | from ..datasets import DATASET_REGISTRY 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class Market1501(ImageDataset): 18 | """Market1501. 19 | 20 | Reference: 21 | Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015. 22 | 23 | URL: ``_ 24 | 25 | Dataset statistics: 26 | - identities: 1501 (+1 for background). 27 | - images: 12936 (train) + 3368 (query) + 15913 (gallery). 28 | """ 29 | _junk_pids = [0, -1] 30 | dataset_dir = '' 31 | dataset_url = 'http://188.138.127.15:81/Datasets/Market-1501-v15.09.15.zip' 32 | dataset_name = "market1501" 33 | 34 | def __init__(self, root='', market1501_500k=False, **kwargs): 35 | # self.root = osp.abspath(osp.expanduser(root)) 36 | self.root = root 37 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 38 | 39 | # allow alternative directory structure 40 | self.data_dir = self.dataset_dir 41 | data_dir = osp.join(self.data_dir, 'Market-1501-v15.09.15') 42 | if osp.isdir(data_dir): 43 | self.data_dir = data_dir 44 | else: 45 | warnings.warn('The current data structure is deprecated. Please ' 46 | 'put data folders such as "bounding_box_train" under ' 47 | '"Market-1501-v15.09.15".') 48 | 49 | self.train_dir = osp.join(self.data_dir, 'bounding_box_train') 50 | self.query_dir = osp.join(self.data_dir, 'query') 51 | self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test') 52 | self.extra_gallery_dir = osp.join(self.data_dir, 'images') 53 | self.market1501_500k = market1501_500k 54 | 55 | required_files = [ 56 | self.data_dir, 57 | self.train_dir, 58 | self.query_dir, 59 | self.gallery_dir, 60 | ] 61 | if self.market1501_500k: 62 | required_files.append(self.extra_gallery_dir) 63 | self.check_before_run(required_files) 64 | 65 | train = self.process_dir(self.train_dir) 66 | query = self.process_dir(self.query_dir, is_train=False) 67 | gallery = self.process_dir(self.gallery_dir, is_train=False) 68 | if self.market1501_500k: 69 | gallery += self.process_dir(self.extra_gallery_dir, is_train=False) 70 | 71 | super(Market1501, self).__init__(train, query, gallery, **kwargs) 72 | 73 | def process_dir(self, dir_path, is_train=True): 74 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 75 | pattern = re.compile(r'([-\d]+)_c(\d)') 76 | 77 | data = [] 78 | for img_path in img_paths: 79 | pid, camid = map(int, pattern.search(img_path).groups()) 80 | if pid == -1: 81 | continue # junk images are just ignored 82 | assert 0 <= pid <= 1501 # pid == 0 means background 83 | assert 1 <= camid <= 6 84 | camid -= 1 # index starts from 0 85 | if is_train: 86 | pid = self.dataset_name + "_" + str(pid) 87 | data.append((img_path, pid, camid)) 88 | 89 | return data 90 | -------------------------------------------------------------------------------- /fastreid/evaluation/rerank.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | # based on: 4 | # https://github.com/zhunzhong07/person-re-ranking 5 | 6 | __all__ = ['re_ranking'] 7 | 8 | import numpy as np 9 | 10 | 11 | def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1: int = 20, k2: int = 6, lambda_value: float = 0.3): 12 | original_dist = np.concatenate( 13 | [np.concatenate([q_q_dist, q_g_dist], axis=1), 14 | np.concatenate([q_g_dist.T, g_g_dist], axis=1)], 15 | axis=0) 16 | original_dist = np.power(original_dist, 2).astype(np.float32) 17 | original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0)) 18 | V = np.zeros_like(original_dist).astype(np.float32) 19 | initial_rank = np.argsort(original_dist).astype(np.int32) 20 | 21 | query_num = q_g_dist.shape[0] 22 | gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1] 23 | all_num = gallery_num 24 | 25 | for i in range(all_num): 26 | # k-reciprocal neighbors 27 | forward_k_neigh_index = initial_rank[i, :k1 + 1] 28 | backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1] 29 | fi = np.where(backward_k_neigh_index == i)[0] 30 | k_reciprocal_index = forward_k_neigh_index[fi] 31 | k_reciprocal_expansion_index = k_reciprocal_index 32 | for j in range(len(k_reciprocal_index)): 33 | candidate = k_reciprocal_index[j] 34 | candidate_forward_k_neigh_index = initial_rank[candidate, 35 | :int(np.around(k1 / 2.)) + 1] 36 | candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index, 37 | :int(np.around(k1 / 2.)) + 1] 38 | fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0] 39 | candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate] 40 | if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len( 41 | candidate_k_reciprocal_index): 42 | k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index) 43 | 44 | k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) 45 | weight = np.exp(-original_dist[i, k_reciprocal_expansion_index]) 46 | V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight) 47 | original_dist = original_dist[:query_num, ] 48 | if k2 != 1: 49 | V_qe = np.zeros_like(V, dtype=np.float32) 50 | for i in range(all_num): 51 | V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0) 52 | V = V_qe 53 | del V_qe 54 | del initial_rank 55 | invIndex = [] 56 | for i in range(gallery_num): 57 | invIndex.append(np.where(V[:, i] != 0)[0]) 58 | 59 | jaccard_dist = np.zeros_like(original_dist, dtype=np.float32) 60 | 61 | for i in range(query_num): 62 | temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32) 63 | indNonZero = np.where(V[i, :] != 0)[0] 64 | indImages = [invIndex[ind] for ind in indNonZero] 65 | for j in range(len(indNonZero)): 66 | temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]], 67 | V[indImages[j], indNonZero[j]]) 68 | jaccard_dist[i] = 1 - temp_min / (2. - temp_min) 69 | 70 | final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value 71 | del original_dist, V, jaccard_dist 72 | final_dist = final_dist[:query_num, query_num:] 73 | return final_dist 74 | -------------------------------------------------------------------------------- /fastreid/solver/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from . import lr_scheduler 8 | from . import optim 9 | from .lr_scheduler import Net_scheduler 10 | 11 | 12 | def build_optimizer(cfg, model, heads=None): 13 | params = [] 14 | for key, value in model.named_parameters(): 15 | if not value.requires_grad: 16 | continue 17 | lr = cfg.SOLVER.BASE_LR 18 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 19 | if "heads" in key: 20 | lr *= cfg.SOLVER.HEADS_LR_FACTOR 21 | if "bias" in key: 22 | lr *= cfg.SOLVER.BIAS_LR_FACTOR 23 | weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS 24 | params += [{"name": key, "params": [value], "lr": lr, "weight_decay": weight_decay, "freeze": False}] 25 | 26 | if heads: 27 | for key, value in heads.named_parameters(): 28 | if not value.requires_grad: 29 | continue 30 | lr = cfg.SOLVER.BASE_LR 31 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 32 | if "heads" in key: 33 | lr *= cfg.SOLVER.HEADS_LR_FACTOR 34 | if "bias" in key: 35 | lr *= cfg.SOLVER.BIAS_LR_FACTOR 36 | weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS 37 | params += [{"name": key, "params": [value], "lr": lr, "weight_decay": weight_decay, "freeze": False}] 38 | 39 | solver_opt = cfg.SOLVER.OPT 40 | if hasattr(optim, solver_opt): 41 | if solver_opt == "SGD": 42 | opt_fns = getattr(optim, solver_opt)(params, momentum=cfg.SOLVER.MOMENTUM) 43 | else: 44 | opt_fns = getattr(optim, solver_opt)(params) 45 | else: 46 | raise NameError("optimizer {} not support".format(cfg.SOLVER.OPT)) 47 | return opt_fns 48 | 49 | 50 | def build_dis_optimizer(cfg, model): 51 | params = [] 52 | for key, value in model.named_parameters(): 53 | if not value.requires_grad: 54 | continue 55 | lr = cfg.SOLVER.D_BASE_LR 56 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 57 | if "heads" in key: 58 | lr *= cfg.SOLVER.HEADS_LR_FACTOR 59 | if "bias" in key: 60 | lr *= cfg.SOLVER.BIAS_LR_FACTOR 61 | weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS 62 | params += [{"name": key, "params": [value], "lr": lr, "weight_decay": weight_decay, "freeze": False}] 63 | 64 | solver_opt = cfg.SOLVER.D_OPT 65 | if hasattr(optim, solver_opt): 66 | if solver_opt == "SGD": 67 | opt_fns = getattr(optim, solver_opt)(params, momentum=cfg.SOLVER.MOMENTUM) 68 | else: 69 | opt_fns = getattr(optim, solver_opt)(params) 70 | else: 71 | raise NameError("optimizer {} not support".format(cfg.SOLVER.OPT)) 72 | return opt_fns 73 | 74 | 75 | def build_lr_scheduler(cfg, optimizer): 76 | scheduler_args = { 77 | "optimizer": optimizer, 78 | 79 | # warmup options 80 | "warmup_factor": cfg.SOLVER.WARMUP_FACTOR, 81 | "warmup_iters": cfg.SOLVER.WARMUP_ITERS, 82 | "warmup_method": cfg.SOLVER.WARMUP_METHOD, 83 | 84 | # multi-step lr scheduler options 85 | "milestones": cfg.SOLVER.STEPS, 86 | "gamma": cfg.SOLVER.GAMMA, 87 | 88 | # cosine annealing lr scheduler options 89 | "max_iters": cfg.SOLVER.MAX_ITER, 90 | "delay_iters": cfg.SOLVER.DELAY_ITERS, 91 | "eta_min_lr": cfg.SOLVER.ETA_MIN_LR, 92 | 93 | } 94 | return getattr(lr_scheduler, cfg.SOLVER.SCHED)(**scheduler_args) 95 | 96 | def build_dis_lr_scheduler(cfg, optimizer): 97 | 98 | return Net_scheduler(optimizer, cfg) 99 | -------------------------------------------------------------------------------- /fastreid/layers/splat.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | from torch import nn 10 | from torch.nn import Conv2d, ReLU 11 | from torch.nn.modules.utils import _pair 12 | from fastreid.layers import get_norm 13 | 14 | 15 | class SplAtConv2d(nn.Module): 16 | """Split-Attention Conv2d 17 | """ 18 | 19 | def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), 20 | dilation=(1, 1), groups=1, bias=True, 21 | radix=2, reduction_factor=4, 22 | rectify=False, rectify_avg=False, norm_layer=None, num_splits=1, 23 | dropblock_prob=0.0, **kwargs): 24 | super(SplAtConv2d, self).__init__() 25 | padding = _pair(padding) 26 | self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) 27 | self.rectify_avg = rectify_avg 28 | inter_channels = max(in_channels * radix // reduction_factor, 32) 29 | self.radix = radix 30 | self.cardinality = groups 31 | self.channels = channels 32 | self.dropblock_prob = dropblock_prob 33 | if self.rectify: 34 | from rfconv import RFConv2d 35 | self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, 36 | groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) 37 | else: 38 | self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, 39 | groups=groups * radix, bias=bias, **kwargs) 40 | self.use_bn = norm_layer is not None 41 | if self.use_bn: 42 | self.bn0 = get_norm(norm_layer, channels * radix, num_splits) 43 | self.relu = ReLU(inplace=True) 44 | self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) 45 | if self.use_bn: 46 | self.bn1 = get_norm(norm_layer, inter_channels, num_splits) 47 | self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality) 48 | 49 | self.rsoftmax = rSoftMax(radix, groups) 50 | 51 | def forward(self, x): 52 | x = self.conv(x) 53 | if self.use_bn: 54 | x = self.bn0(x) 55 | if self.dropblock_prob > 0.0: 56 | x = self.dropblock(x) 57 | x = self.relu(x) 58 | 59 | batch, rchannel = x.shape[:2] 60 | if self.radix > 1: 61 | splited = torch.split(x, rchannel // self.radix, dim=1) 62 | gap = sum(splited) 63 | else: 64 | gap = x 65 | gap = F.adaptive_avg_pool2d(gap, 1) 66 | gap = self.fc1(gap) 67 | 68 | if self.use_bn: 69 | gap = self.bn1(gap) 70 | gap = self.relu(gap) 71 | 72 | atten = self.fc2(gap) 73 | atten = self.rsoftmax(atten).view(batch, -1, 1, 1) 74 | 75 | if self.radix > 1: 76 | attens = torch.split(atten, rchannel // self.radix, dim=1) 77 | out = sum([att * split for (att, split) in zip(attens, splited)]) 78 | else: 79 | out = atten * x 80 | return out.contiguous() 81 | 82 | 83 | class rSoftMax(nn.Module): 84 | def __init__(self, radix, cardinality): 85 | super().__init__() 86 | self.radix = radix 87 | self.cardinality = cardinality 88 | 89 | def forward(self, x): 90 | batch = x.size(0) 91 | if self.radix > 1: 92 | x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) 93 | x = F.softmax(x, dim=1) 94 | x = x.reshape(batch, -1) 95 | else: 96 | x = torch.sigmoid(x) 97 | return x 98 | -------------------------------------------------------------------------------- /tools/deploy/README.md: -------------------------------------------------------------------------------- 1 | # Deployment 2 | 3 | This directory contains: 4 | 5 | 1. A script that converts a fastreid model to Caffe format. 6 | 7 | 2. An exmpale that loads a R50 baseline model in Caffe and run inference. 8 | 9 | ## Tutorial 10 | 11 | This is a tiny example for converting fastreid-baseline in `meta_arch` to Caffe model, if you want to convert more complex architecture, you need to customize more things. 12 | 13 | 1. Change `preprocess_image` in `fastreid/modeling/meta_arch/baseline.py` as below 14 | 15 | ```python 16 | def preprocess_image(self, batched_inputs): 17 | """ 18 | Normalize and batch the input images. 19 | """ 20 | # images = [x["images"] for x in batched_inputs] 21 | # images = batched_inputs["images"] 22 | images = batched_inputs 23 | images.sub_(self.pixel_mean).div_(self.pixel_std) 24 | return images 25 | ``` 26 | 27 | 2. Run `caffe_export.py` to get the converted Caffe model, 28 | 29 | ```bash 30 | python caffe_export.py --config-file "/export/home/lxy/fast-reid/logs/market1501/bagtricks_R50/config.yaml" --name "baseline_R50" --output "logs/caffe_model" --opts MODEL.WEIGHTS "/export/home/lxy/fast-reid/logs/market1501/bagtricks_R50/model_final.pth" 31 | ``` 32 | 33 | then you can check the Caffe model and prototxt in `logs/caffe_model`. 34 | 35 | 3. Change `prototxt` following next three steps: 36 | 37 | 1) Edit `max_pooling` in `baseline_R50.prototxt` like this 38 | 39 | ```prototxt 40 | layer { 41 | name: "max_pool1" 42 | type: "Pooling" 43 | bottom: "relu_blob1" 44 | top: "max_pool_blob1" 45 | pooling_param { 46 | pool: MAX 47 | kernel_size: 3 48 | stride: 2 49 | pad: 0 # 1 50 | # ceil_mode: false 51 | } 52 | } 53 | ``` 54 | 55 | 2) Add `avg_pooling` right place in `baseline_R50.prototxt` 56 | 57 | ```prototxt 58 | layer { 59 | name: "avgpool1" 60 | type: "Pooling" 61 | bottom: "relu_blob49" 62 | top: "avgpool_blob1" 63 | pooling_param { 64 | pool: AVE 65 | global_pooling: true 66 | } 67 | } 68 | ``` 69 | 70 | 3) Change the last layer `top` name to `output` 71 | 72 | ```prototxt 73 | layer { 74 | name: "bn_scale54" 75 | type: "Scale" 76 | bottom: "batch_norm_blob54" 77 | top: "output" # bn_norm_blob54 78 | scale_param { 79 | bias_term: true 80 | } 81 | } 82 | ``` 83 | 84 | 4. (optional) You can open [Netscope](https://ethereon.github.io/netscope/quickstart.html), then enter you network `prototxt` to visualize the network. 85 | 86 | 5. Run `caffe_inference.py` to save Caffe model features with input images 87 | 88 | ```bash 89 | python caffe_inference.py --model-def "logs/caffe_model/baseline_R50.prototxt" \ 90 | --model-weights "logs/caffe_model/baseline_R50.caffemodel" \ 91 | --input \ 92 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c5s3_015240_04.jpg' \ 93 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c6s3_038217_01.jpg' \ 94 | '/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1183_c5s3_006943_05.jpg' \ 95 | --output "caffe_R34_output" 96 | ``` 97 | 98 | 6. Run `demo/demo.py` to get fastreid model features with the same input images, then compute the cosine similarity of difference model features to verify if you convert Caffe model successfully. 99 | 100 | ## Acknowledgements 101 | 102 | Thank to [CPFLAME](https://github.com/CPFLAME), [gcong18](https://github.com/gcong18), [YuxiangJohn](https://github.com/YuxiangJohn) and [wiggin66](https://github.com/wiggin66) at JDAI Model Acceleration Group for help in PyTorch to Caffe model converting. 103 | -------------------------------------------------------------------------------- /fastreid/utils/precision_bn.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import itertools 8 | 9 | import torch 10 | 11 | BN_MODULE_TYPES = ( 12 | torch.nn.BatchNorm1d, 13 | torch.nn.BatchNorm2d, 14 | torch.nn.BatchNorm3d, 15 | torch.nn.SyncBatchNorm, 16 | ) 17 | 18 | 19 | @torch.no_grad() 20 | def update_bn_stats(model, data_loader, num_iters: int = 200): 21 | """ 22 | Recompute and update the batch norm stats to make them more precise. During 23 | training both BN stats and the weight are changing after every iteration, so 24 | the running average can not precisely reflect the actual stats of the 25 | current model. 26 | In this function, the BN stats are recomputed with fixed weights, to make 27 | the running average more precise. Specifically, it computes the true average 28 | of per-batch mean/variance instead of the running average. 29 | Args: 30 | model (nn.Module): the model whose bn stats will be recomputed. 31 | Note that: 32 | 1. This function will not alter the training mode of the given model. 33 | Users are responsible for setting the layers that needs 34 | precise-BN to training mode, prior to calling this function. 35 | 2. Be careful if your models contain other stateful layers in 36 | addition to BN, i.e. layers whose state can change in forward 37 | iterations. This function will alter their state. If you wish 38 | them unchanged, you need to either pass in a submodule without 39 | those layers, or backup the states. 40 | data_loader (iterator): an iterator. Produce data as inputs to the model. 41 | num_iters (int): number of iterations to compute the stats. 42 | """ 43 | bn_layers = get_bn_modules(model) 44 | if len(bn_layers) == 0: 45 | return 46 | 47 | # In order to make the running stats only reflect the current batch, the 48 | # momentum is disabled. 49 | # bn.running_mean = (1 - momentum) * bn.running_mean + momentum * batch_mean 50 | # Setting the momentum to 1.0 to compute the stats without momentum. 51 | momentum_actual = [bn.momentum for bn in bn_layers] 52 | for bn in bn_layers: 53 | bn.momentum = 1.0 54 | 55 | # Note that running_var actually means "running average of variance" 56 | running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers] 57 | running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers] 58 | 59 | for ind, inputs in enumerate(itertools.islice(data_loader, num_iters)): 60 | inputs['targets'].fill_(-1) 61 | with torch.no_grad(): # No need to backward 62 | model(inputs) 63 | for i, bn in enumerate(bn_layers): 64 | # Accumulates the bn stats. 65 | running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1) 66 | running_var[i] += (bn.running_var - running_var[i]) / (ind + 1) 67 | # We compute the "average of variance" across iterations. 68 | assert ind == num_iters - 1, ( 69 | "update_bn_stats is meant to run for {} iterations, " 70 | "but the dataloader stops at {} iterations.".format(num_iters, ind) 71 | ) 72 | 73 | for i, bn in enumerate(bn_layers): 74 | # Sets the precise bn stats. 75 | bn.running_mean = running_mean[i] 76 | bn.running_var = running_var[i] 77 | bn.momentum = momentum_actual[i] 78 | 79 | 80 | def get_bn_modules(model): 81 | """ 82 | Find all BatchNorm (BN) modules that are in training mode. See 83 | fvcore.precise_bn.BN_MODULE_TYPES for a list of all modules that are 84 | included in this search. 85 | Args: 86 | model (nn.Module): a model possibly containing BN modules. 87 | Returns: 88 | list[nn.Module]: all BN modules in the model. 89 | """ 90 | # Finds all the bn layers. 91 | bn_layers = [ 92 | m for m in model.modules() if m.training and isinstance(m, BN_MODULE_TYPES) 93 | ] 94 | return bn_layers 95 | -------------------------------------------------------------------------------- /fastreid/data/datasets/hazy_market1501.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | import warnings 11 | 12 | from .bases import ImageDataset, ImagePairDataset 13 | from ..datasets import DATASET_REGISTRY 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class Hazy_Market1501(ImagePairDataset): 18 | """Market1501. 19 | 20 | Reference: 21 | Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015. 22 | 23 | URL: ``_ 24 | 25 | Dataset statistics: 26 | - identities: 1501 (+1 for background). 27 | - images: 12936 (train) + 3368 (query) + 15913 (gallery). 28 | """ 29 | _junk_pids = [0, -1] 30 | dataset_dir = 'Hazy_Market-1501-v15.09.15' 31 | dataset_url = 'http://188.138.127.15:81/Datasets/Market-1501-v15.09.15.zip' 32 | dataset_name = "market1501" 33 | 34 | def __init__(self, root='', market1501_500k=False, **kwargs): 35 | # self.root = osp.abspath(osp.expanduser(root)) 36 | self.root = root 37 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 38 | 39 | # allow alternative directory structure 40 | self.data_dir = self.dataset_dir 41 | data_dir = osp.join(self.data_dir, 'Hazy_Market-1501-v15.09.15') 42 | if osp.isdir(data_dir): 43 | self.data_dir = data_dir 44 | else: 45 | warnings.warn('The current data structure is deprecated. Please ' 46 | 'put data folders such as "bounding_box_train" under ' 47 | '"Market-1501-v15.09.15".') 48 | 49 | self.train_dir = osp.join(self.data_dir, 'bounding_box_train') 50 | self.hazy_train_dir = osp.join(self.data_dir, 'hazy_bounding_box_train') 51 | self.query_dir = osp.join(self.data_dir, 'query') 52 | # self.hazy_query_dir = osp.join(self.data_dir, 'hazy_query') 53 | self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test') 54 | # self.hazy_gallery_dir = osp.join(self.data_dir, 'hazy_bounding_box_test') 55 | self.extra_gallery_dir = osp.join(self.data_dir, 'images') 56 | self.market1501_500k = market1501_500k 57 | 58 | required_files = [ 59 | self.data_dir, 60 | self.train_dir, 61 | self.hazy_train_dir, 62 | self.query_dir, 63 | # self.hazy_query_dir, 64 | self.gallery_dir, 65 | # self.hazy_gallery_dir, 66 | ] 67 | if self.market1501_500k: 68 | required_files.append(self.extra_gallery_dir) 69 | self.check_before_run(required_files) 70 | 71 | train = self.process_dir(self.train_dir) 72 | hazy_train = self.process_dir(self.hazy_train_dir) 73 | query = self.process_dir(self.query_dir, is_train=False) 74 | # hazy_query = self.process_dir(self.hazy_query_dir, is_train=False) 75 | gallery = self.process_dir(self.gallery_dir, is_train=False) 76 | # hazy_gallery = self.process_dir(self.hazy_gallery_dir, is_train=False) 77 | if self.market1501_500k: 78 | gallery += self.process_dir(self.extra_gallery_dir, is_train=False) 79 | 80 | super(Hazy_Market1501, self).__init__(train, query, gallery, hazy_train, **kwargs) 81 | 82 | def process_dir(self, dir_path, is_train=True): 83 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 84 | pattern = re.compile(r'([-\d]+)_c(\d)') 85 | 86 | data = [] 87 | for img_path in img_paths: 88 | pid, camid = map(int, pattern.search(img_path).groups()) 89 | if pid == -1: 90 | continue # junk images are just ignored 91 | assert 0 <= pid <= 1501 # pid == 0 means background 92 | assert 1 <= camid <= 6 93 | camid -= 1 # index starts from 0 94 | if is_train: 95 | pid = self.dataset_name + "_" + str(pid) 96 | data.append((img_path, pid, camid)) 97 | 98 | return data 99 | -------------------------------------------------------------------------------- /fastreid/engine/launch.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | # based on: 8 | # https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py 9 | 10 | 11 | import logging 12 | 13 | import torch 14 | import torch.distributed as dist 15 | import torch.multiprocessing as mp 16 | 17 | from fastreid.utils import comm 18 | 19 | __all__ = ["launch"] 20 | 21 | 22 | def _find_free_port(): 23 | import socket 24 | 25 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | # Binding to port 0 will cause the OS to find an available port for us 27 | sock.bind(("", 0)) 28 | port = sock.getsockname()[1] 29 | sock.close() 30 | # NOTE: there is still a chance the port could be taken by other processes. 31 | return port 32 | 33 | 34 | def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()): 35 | """ 36 | Launch multi-gpu or distributed training. 37 | This function must be called on all machines involved in the training. 38 | It will spawn child processes (defined by ``num_gpus_per_machine`) on each machine. 39 | Args: 40 | main_func: a function that will be called by `main_func(*args)` 41 | num_gpus_per_machine (int): number of GPUs per machine 42 | num_machines (int): the total number of machines 43 | machine_rank (int): the rank of this machine 44 | dist_url (str): url to connect to for distributed jobs, including protocol 45 | e.g. "tcp://127.0.0.1:8686". 46 | Can be set to "auto" to automatically select a free port on localhost 47 | args (tuple): arguments passed to main_func 48 | """ 49 | world_size = num_machines * num_gpus_per_machine 50 | if world_size > 1: 51 | # https://github.com/pytorch/pytorch/pull/14391 52 | # TODO prctl in spawned processes 53 | 54 | if dist_url == "auto": 55 | assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." 56 | port = _find_free_port() 57 | dist_url = f"tcp://127.0.0.1:{port}" 58 | if num_machines > 1 and dist_url.startswith("file://"): 59 | logger = logging.getLogger(__name__) 60 | logger.warning( 61 | "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" 62 | ) 63 | 64 | mp.spawn( 65 | _distributed_worker, 66 | nprocs=num_gpus_per_machine, 67 | args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args), 68 | daemon=False, 69 | ) 70 | else: 71 | main_func(*args) 72 | 73 | 74 | def _distributed_worker( 75 | local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args 76 | ): 77 | assert torch.cuda.is_available(), "cuda is not available. Please check your installation." 78 | global_rank = machine_rank * num_gpus_per_machine + local_rank 79 | try: 80 | dist.init_process_group( 81 | backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank 82 | ) 83 | except Exception as e: 84 | logger = logging.getLogger(__name__) 85 | logger.error("Process group URL: {}".format(dist_url)) 86 | raise e 87 | # synchronize is needed here to prevent a possible timeout after calling init_process_group 88 | # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 89 | comm.synchronize() 90 | 91 | assert num_gpus_per_machine <= torch.cuda.device_count() 92 | torch.cuda.set_device(local_rank) 93 | 94 | # Setup the local process group (which contains ranks within the same machine) 95 | assert comm._LOCAL_PROCESS_GROUP is None 96 | num_machines = world_size // num_gpus_per_machine 97 | for i in range(num_machines): 98 | ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) 99 | pg = dist.new_group(ranks_on_i) 100 | if i == machine_rank: 101 | comm._LOCAL_PROCESS_GROUP = pg 102 | 103 | main_func(*args) 104 | -------------------------------------------------------------------------------- /fastreid/utils/env.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import importlib 3 | import importlib.util 4 | import logging 5 | import numpy as np 6 | import os 7 | import random 8 | import sys 9 | from datetime import datetime 10 | import torch 11 | 12 | __all__ = ["seed_all_rng"] 13 | 14 | 15 | TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) 16 | """ 17 | PyTorch version as a tuple of 2 ints. Useful for comparison. 18 | """ 19 | 20 | 21 | def seed_all_rng(seed=None): 22 | """ 23 | Set the random seed for the RNG in torch, numpy and python. 24 | Args: 25 | seed (int): if None, will use a strong random seed. 26 | """ 27 | if seed is None: 28 | seed = ( 29 | os.getpid() 30 | + int(datetime.now().strftime("%S%f")) 31 | + int.from_bytes(os.urandom(2), "big") 32 | ) 33 | logger = logging.getLogger(__name__) 34 | logger.info("Using a generated random seed {}".format(seed)) 35 | np.random.seed(seed) 36 | torch.set_rng_state(torch.manual_seed(seed).get_state()) 37 | random.seed(seed) 38 | 39 | 40 | # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path 41 | def _import_file(module_name, file_path, make_importable=False): 42 | spec = importlib.util.spec_from_file_location(module_name, file_path) 43 | module = importlib.util.module_from_spec(spec) 44 | spec.loader.exec_module(module) 45 | if make_importable: 46 | sys.modules[module_name] = module 47 | return module 48 | 49 | 50 | def _configure_libraries(): 51 | """ 52 | Configurations for some libraries. 53 | """ 54 | # An environment option to disable `import cv2` globally, 55 | # in case it leads to negative performance impact 56 | disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) 57 | if disable_cv2: 58 | sys.modules["cv2"] = None 59 | else: 60 | # Disable opencl in opencv since its interaction with cuda often has negative effects 61 | # This envvar is supported after OpenCV 3.4.0 62 | os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" 63 | try: 64 | import cv2 65 | 66 | if int(cv2.__version__.split(".")[0]) >= 3: 67 | cv2.ocl.setUseOpenCL(False) 68 | except ImportError: 69 | pass 70 | 71 | def get_version(module, digit=2): 72 | return tuple(map(int, module.__version__.split(".")[:digit])) 73 | 74 | # fmt: off 75 | assert get_version(torch) >= (1, 4), "Requires torch>=1.4" 76 | import yaml 77 | assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" 78 | # fmt: on 79 | 80 | 81 | _ENV_SETUP_DONE = False 82 | 83 | 84 | def setup_environment(): 85 | """Perform environment setup work. The default setup is a no-op, but this 86 | function allows the user to specify a Python source file or a module in 87 | the $DETECTRON2_ENV_MODULE environment variable, that performs 88 | custom setup work that may be necessary to their computing environment. 89 | """ 90 | global _ENV_SETUP_DONE 91 | if _ENV_SETUP_DONE: 92 | return 93 | _ENV_SETUP_DONE = True 94 | 95 | _configure_libraries() 96 | 97 | custom_module_path = os.environ.get("FASTREID_ENV_MODULE") 98 | 99 | if custom_module_path: 100 | setup_custom_environment(custom_module_path) 101 | else: 102 | # The default setup is a no-op 103 | pass 104 | 105 | 106 | def setup_custom_environment(custom_module): 107 | """ 108 | Load custom environment setup by importing a Python source file or a 109 | module, and run the setup function. 110 | """ 111 | if custom_module.endswith(".py"): 112 | module = _import_file("fastreid.utils.env.custom_module", custom_module) 113 | else: 114 | module = importlib.import_module(custom_module) 115 | assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( 116 | "Custom environment module defined in {} does not have the " 117 | "required callable attribute 'setup_environment'." 118 | ).format(custom_module) 119 | module.setup_environment() -------------------------------------------------------------------------------- /fastreid/data/datasets/msmt17.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import sys 8 | import os 9 | import os.path as osp 10 | 11 | from .bases import ImageDataset 12 | from ..datasets import DATASET_REGISTRY 13 | ##### Log ##### 14 | # 22.01.2019 15 | # - add v2 16 | # - v1 and v2 differ in dir names 17 | # - note that faces in v2 are blurred 18 | TRAIN_DIR_KEY = 'train_dir' 19 | TEST_DIR_KEY = 'test_dir' 20 | VERSION_DICT = { 21 | 'MSMT17_V1': { 22 | TRAIN_DIR_KEY: 'train', 23 | TEST_DIR_KEY: 'test', 24 | }, 25 | 'MSMT17_V2': { 26 | TRAIN_DIR_KEY: 'mask_train_v2', 27 | TEST_DIR_KEY: 'mask_test_v2', 28 | } 29 | } 30 | 31 | 32 | @DATASET_REGISTRY.register() 33 | class MSMT17(ImageDataset): 34 | """MSMT17. 35 | Reference: 36 | Wei et al. Person Transfer GAN to Bridge Domain Gap for Person Re-Identification. CVPR 2018. 37 | URL: ``_ 38 | 39 | Dataset statistics: 40 | - identities: 4101. 41 | - images: 32621 (train) + 11659 (query) + 82161 (gallery). 42 | - cameras: 15. 43 | """ 44 | # dataset_dir = 'MSMT17_V2' 45 | dataset_url = None 46 | dataset_name = 'msmt17' 47 | 48 | def __init__(self, root='datasets', **kwargs): 49 | self.root = root 50 | self.dataset_dir = self.root 51 | 52 | has_main_dir = False 53 | for main_dir in VERSION_DICT: 54 | if osp.exists(osp.join(self.dataset_dir, main_dir)): 55 | train_dir = VERSION_DICT[main_dir][TRAIN_DIR_KEY] 56 | test_dir = VERSION_DICT[main_dir][TEST_DIR_KEY] 57 | has_main_dir = True 58 | break 59 | assert has_main_dir, 'Dataset folder not found' 60 | 61 | self.train_dir = osp.join(self.dataset_dir, main_dir, train_dir) 62 | self.test_dir = osp.join(self.dataset_dir, main_dir, test_dir) 63 | self.list_train_path = osp.join(self.dataset_dir, main_dir, 'list_train.txt') 64 | self.list_val_path = osp.join(self.dataset_dir, main_dir, 'list_val.txt') 65 | self.list_query_path = osp.join(self.dataset_dir, main_dir, 'list_query.txt') 66 | self.list_gallery_path = osp.join(self.dataset_dir, main_dir, 'list_gallery.txt') 67 | 68 | required_files = [ 69 | self.dataset_dir, 70 | self.train_dir, 71 | self.test_dir 72 | ] 73 | self.check_before_run(required_files) 74 | 75 | train = self.process_dir(self.train_dir, self.list_train_path) 76 | val = self.process_dir(self.train_dir, self.list_val_path) 77 | query = self.process_dir(self.test_dir, self.list_query_path, is_train=False) 78 | gallery = self.process_dir(self.test_dir, self.list_gallery_path, is_train=False) 79 | 80 | num_train_pids = self.get_num_pids(train) 81 | query_tmp = [] 82 | for img_path, pid, camid in query: 83 | query_tmp.append((img_path, pid+num_train_pids, camid)) 84 | del query 85 | query = query_tmp 86 | 87 | gallery_temp = [] 88 | for img_path, pid, camid in gallery: 89 | gallery_temp.append((img_path, pid+num_train_pids, camid)) 90 | del gallery 91 | gallery = gallery_temp 92 | 93 | # Note: to fairly compare with published methods on the conventional ReID setting, 94 | # do not add val images to the training set. 95 | if 'combineall' in kwargs and kwargs['combineall']: 96 | train += val 97 | 98 | super(MSMT17, self).__init__(train, query, gallery, **kwargs) 99 | 100 | def process_dir(self, dir_path, list_path, is_train=True): 101 | with open(list_path, 'r') as txt: 102 | lines = txt.readlines() 103 | 104 | data = [] 105 | 106 | for img_idx, img_info in enumerate(lines): 107 | img_path, pid = img_info.split(' ') 108 | pid = int(pid) # no need to relabel 109 | camid = int(img_path.split('_')[2]) - 1 # index starts from 0 110 | img_path = osp.join(dir_path, img_path) 111 | if is_train: 112 | pid = self.dataset_name + "_" + str(pid) 113 | data.append((img_path, pid, camid)) 114 | 115 | return data -------------------------------------------------------------------------------- /fastreid/modeling/losses/focal_loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | 11 | # based on: 12 | # https://github.com/kornia/kornia/blob/master/kornia/losses/focal.py 13 | 14 | def focal_loss( 15 | input: torch.Tensor, 16 | target: torch.Tensor, 17 | alpha: float, 18 | gamma: float = 2.0, 19 | reduction: str = 'mean', ) -> torch.Tensor: 20 | r"""Function that computes Focal loss. 21 | See :class:`fastreid.modeling.losses.FocalLoss` for details. 22 | """ 23 | if not torch.is_tensor(input): 24 | raise TypeError("Input type is not a torch.Tensor. Got {}" 25 | .format(type(input))) 26 | 27 | if not len(input.shape) >= 2: 28 | raise ValueError("Invalid input shape, we expect BxCx*. Got: {}" 29 | .format(input.shape)) 30 | 31 | if input.size(0) != target.size(0): 32 | raise ValueError('Expected input batch_size ({}) to match target batch_size ({}).' 33 | .format(input.size(0), target.size(0))) 34 | 35 | n = input.size(0) 36 | out_size = (n,) + input.size()[2:] 37 | if target.size()[1:] != input.size()[2:]: 38 | raise ValueError('Expected target size {}, got {}'.format( 39 | out_size, target.size())) 40 | 41 | if not input.device == target.device: 42 | raise ValueError( 43 | "input and target must be in the same device. Got: {}".format( 44 | input.device, target.device)) 45 | 46 | # compute softmax over the classes axis 47 | input_soft = F.softmax(input, dim=1) 48 | 49 | # create the labels one hot tensor 50 | target_one_hot = F.one_hot(target, num_classes=input.shape[1]) 51 | 52 | # compute the actual focal loss 53 | weight = torch.pow(-input_soft + 1., gamma) 54 | 55 | focal = -alpha * weight * torch.log(input_soft) 56 | loss_tmp = torch.sum(target_one_hot * focal, dim=1) 57 | 58 | if reduction == 'none': 59 | loss = loss_tmp 60 | elif reduction == 'mean': 61 | loss = torch.mean(loss_tmp) 62 | elif reduction == 'sum': 63 | loss = torch.sum(loss_tmp) 64 | else: 65 | raise NotImplementedError("Invalid reduction mode: {}" 66 | .format(reduction)) 67 | return loss 68 | 69 | 70 | class FocalLoss(object): 71 | r"""Criterion that computes Focal loss. 72 | According to [1], the Focal loss is computed as follows: 73 | .. math:: 74 | \text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t) 75 | where: 76 | - :math:`p_t` is the model's estimated probability for each class. 77 | Arguments: 78 | alpha (float): Weighting factor :math:`\alpha \in [0, 1]`. 79 | gamma (float): Focusing parameter :math:`\gamma >= 0`. 80 | reduction (str, optional): Specifies the reduction to apply to the 81 | output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied, 82 | ‘mean’: the sum of the output will be divided by the number of elements 83 | in the output, ‘sum’: the output will be summed. Default: ‘none’. 84 | Shape: 85 | - Input: :math:`(N, C, *)` where C = number of classes. 86 | - Target: :math:`(N, *)` where each value is 87 | :math:`0 ≤ targets[i] ≤ C−1`. 88 | Examples: 89 | >>> N = 5 # num_classes 90 | >>> loss = FocalLoss(cfg) 91 | >>> input = torch.randn(1, N, 3, 5, requires_grad=True) 92 | >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) 93 | >>> output = loss(input, target) 94 | >>> output.backward() 95 | References: 96 | [1] https://arxiv.org/abs/1708.02002 97 | """ 98 | 99 | # def __init__(self, alpha: float, gamma: float = 2.0, 100 | # reduction: str = 'none') -> None: 101 | def __init__(self, cfg): 102 | self._alpha: float = cfg.MODEL.LOSSES.FL.ALPHA 103 | self._gamma: float = cfg.MODEL.LOSSES.FL.GAMMA 104 | self._scale: float = cfg.MODEL.LOSSES.FL.SCALE 105 | 106 | def __call__(self, pred_class_logits: torch.Tensor, _, gt_classes: torch.Tensor) -> dict: 107 | loss = focal_loss(pred_class_logits, gt_classes, self._alpha, self._gamma) 108 | return { 109 | 'loss_focal': loss * self._scale, 110 | } 111 | -------------------------------------------------------------------------------- /fastreid/data/datasets/vehicleid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Jinkai Zheng 4 | @contact: 1315673509@qq.com 5 | """ 6 | 7 | import os.path as osp 8 | import random 9 | 10 | from .bases import ImageDataset 11 | from ..datasets import DATASET_REGISTRY 12 | 13 | 14 | @DATASET_REGISTRY.register() 15 | class VehicleID(ImageDataset): 16 | """VehicleID. 17 | 18 | Reference: 19 | Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016. 20 | 21 | URL: ``_ 22 | 23 | Train dataset statistics: 24 | - identities: 13164. 25 | - images: 113346. 26 | """ 27 | dataset_dir = "vehicleid" 28 | dataset_name = "vehicleid" 29 | 30 | def __init__(self, root='datasets', test_list='', **kwargs): 31 | self.dataset_dir = osp.join(root, self.dataset_dir) 32 | 33 | self.image_dir = osp.join(self.dataset_dir, 'image') 34 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 35 | if test_list: 36 | self.test_list = test_list 37 | else: 38 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_13164.txt') 39 | 40 | required_files = [ 41 | self.dataset_dir, 42 | self.image_dir, 43 | self.train_list, 44 | self.test_list, 45 | ] 46 | self.check_before_run(required_files) 47 | 48 | train = self.process_dir(self.train_list, is_train=True) 49 | query, gallery = self.process_dir(self.test_list, is_train=False) 50 | 51 | super(VehicleID, self).__init__(train, query, gallery, **kwargs) 52 | 53 | def process_dir(self, list_file, is_train=True): 54 | img_list_lines = open(list_file, 'r').readlines() 55 | 56 | dataset = [] 57 | for idx, line in enumerate(img_list_lines): 58 | line = line.strip() 59 | vid = int(line.split(' ')[1]) 60 | imgid = line.split(' ')[0] 61 | img_path = osp.join(self.image_dir, imgid + '.jpg') 62 | if is_train: 63 | vid = self.dataset_name + "_" + str(vid) 64 | dataset.append((img_path, vid, int(imgid))) 65 | 66 | if is_train: return dataset 67 | else: 68 | random.shuffle(dataset) 69 | vid_container = set() 70 | query = [] 71 | gallery = [] 72 | for sample in dataset: 73 | if sample[1] not in vid_container: 74 | vid_container.add(sample[1]) 75 | gallery.append(sample) 76 | else: 77 | query.append(sample) 78 | 79 | return query, gallery 80 | 81 | 82 | @DATASET_REGISTRY.register() 83 | class SmallVehicleID(VehicleID): 84 | """VehicleID. 85 | Small test dataset statistics: 86 | - identities: 800. 87 | - images: 6493. 88 | """ 89 | 90 | def __init__(self, root='datasets', **kwargs): 91 | self.dataset_dir = osp.join(root, self.dataset_dir) 92 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_800.txt') 93 | 94 | super(SmallVehicleID, self).__init__(root, self.test_list, **kwargs) 95 | 96 | 97 | @DATASET_REGISTRY.register() 98 | class MediumVehicleID(VehicleID): 99 | """VehicleID. 100 | Medium test dataset statistics: 101 | - identities: 1600. 102 | - images: 13377. 103 | """ 104 | 105 | def __init__(self, root='datasets', **kwargs): 106 | self.dataset_dir = osp.join(root, self.dataset_dir) 107 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_1600.txt') 108 | 109 | super(MediumVehicleID, self).__init__(root, self.test_list, **kwargs) 110 | 111 | 112 | @DATASET_REGISTRY.register() 113 | class LargeVehicleID(VehicleID): 114 | """VehicleID. 115 | Large test dataset statistics: 116 | - identities: 2400. 117 | - images: 19777. 118 | """ 119 | 120 | def __init__(self, root='datasets', **kwargs): 121 | self.dataset_dir = osp.join(root, self.dataset_dir) 122 | self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt') 123 | 124 | super(LargeVehicleID, self).__init__(root, self.test_list, **kwargs) 125 | -------------------------------------------------------------------------------- /fastreid/solver/optim/sgd.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | 8 | import torch 9 | from torch.optim.optimizer import Optimizer, required 10 | 11 | 12 | class SGD(Optimizer): 13 | r"""Implements stochastic gradient descent (optionally with momentum). 14 | 15 | Nesterov momentum is based on the formula from 16 | `On the importance of initialization and momentum in deep learning`__. 17 | 18 | Args: 19 | params (iterable): iterable of parameters to optimize or dicts defining 20 | parameter groups 21 | lr (float): learning rate 22 | momentum (float, optional): momentum factor (default: 0) 23 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 24 | dampening (float, optional): dampening for momentum (default: 0) 25 | nesterov (bool, optional): enables Nesterov momentum (default: False) 26 | 27 | Example: 28 | >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) 29 | >>> optimizer.zero_grad() 30 | >>> loss_fn(model(input), target).backward() 31 | >>> optimizer.step() 32 | 33 | __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf 34 | 35 | .. note:: 36 | The implementation of SGD with Momentum/Nesterov subtly differs from 37 | Sutskever et. al. and implementations in some other frameworks. 38 | 39 | Considering the specific case of Momentum, the update can be written as 40 | 41 | .. math:: 42 | v = \rho * v + g \\ 43 | p = p - lr * v 44 | 45 | where p, g, v and :math:`\rho` denote the parameters, gradient, 46 | velocity, and momentum respectively. 47 | 48 | This is in contrast to Sutskever et. al. and 49 | other frameworks which employ an update of the form 50 | 51 | .. math:: 52 | v = \rho * v + lr * g \\ 53 | p = p - v 54 | 55 | The Nesterov version is analogously modified. 56 | """ 57 | 58 | def __init__(self, params, lr=required, momentum=0, dampening=0, 59 | weight_decay=0, nesterov=False): 60 | if lr is not required and lr < 0.0: 61 | raise ValueError("Invalid learning rate: {}".format(lr)) 62 | if momentum < 0.0: 63 | raise ValueError("Invalid momentum value: {}".format(momentum)) 64 | if weight_decay < 0.0: 65 | raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) 66 | 67 | defaults = dict(lr=lr, momentum=momentum, dampening=dampening, 68 | weight_decay=weight_decay, nesterov=nesterov) 69 | if nesterov and (momentum <= 0 or dampening != 0): 70 | raise ValueError("Nesterov momentum requires a momentum and zero dampening") 71 | super(SGD, self).__init__(params, defaults) 72 | 73 | def __setstate__(self, state): 74 | super(SGD, self).__setstate__(state) 75 | for group in self.param_groups: 76 | group.setdefault('nesterov', False) 77 | 78 | def step(self, closure=None): 79 | """Performs a single optimization step. 80 | 81 | Arguments: 82 | closure (callable, optional): A closure that reevaluates the model 83 | and returns the loss. 84 | """ 85 | loss = None 86 | if closure is not None: 87 | loss = closure() 88 | 89 | for group in self.param_groups: 90 | weight_decay = group['weight_decay'] 91 | momentum = group['momentum'] 92 | dampening = group['dampening'] 93 | nesterov = group['nesterov'] 94 | 95 | for p in group['params']: 96 | if p.grad is None or group['freeze']: 97 | continue 98 | d_p = p.grad.data 99 | if weight_decay != 0: 100 | d_p.add_(weight_decay, p.data) 101 | if momentum != 0: 102 | param_state = self.state[p] 103 | if 'momentum_buffer' not in param_state: 104 | buf = param_state['momentum_buffer'] = torch.clone(d_p).detach() 105 | else: 106 | buf = param_state['momentum_buffer'] 107 | buf.mul_(momentum).add_(1 - dampening, d_p) 108 | if nesterov: 109 | d_p = d_p.add(momentum, buf) 110 | else: 111 | d_p = buf 112 | 113 | p.data.add_(-group['lr'], d_p) 114 | 115 | return loss -------------------------------------------------------------------------------- /fastreid/layers/context_block.py: -------------------------------------------------------------------------------- 1 | # copy from https://github.com/xvjiarui/GCNet/blob/master/mmdet/ops/gcb/context_block.py 2 | 3 | import torch 4 | from torch import nn 5 | 6 | __all__ = ['ContextBlock'] 7 | 8 | 9 | def last_zero_init(m): 10 | if isinstance(m, nn.Sequential): 11 | nn.init.constant_(m[-1].weight, val=0) 12 | if hasattr(m[-1], 'bias') and m[-1].bias is not None: 13 | nn.init.constant_(m[-1].bias, 0) 14 | else: 15 | nn.init.constant_(m.weight, val=0) 16 | if hasattr(m, 'bias') and m.bias is not None: 17 | nn.init.constant_(m.bias, 0) 18 | 19 | 20 | class ContextBlock(nn.Module): 21 | 22 | def __init__(self, 23 | inplanes, 24 | ratio, 25 | pooling_type='att', 26 | fusion_types=('channel_add',)): 27 | super(ContextBlock, self).__init__() 28 | assert pooling_type in ['avg', 'att'] 29 | assert isinstance(fusion_types, (list, tuple)) 30 | valid_fusion_types = ['channel_add', 'channel_mul'] 31 | assert all([f in valid_fusion_types for f in fusion_types]) 32 | assert len(fusion_types) > 0, 'at least one fusion should be used' 33 | self.inplanes = inplanes 34 | self.ratio = ratio 35 | self.planes = int(inplanes * ratio) 36 | self.pooling_type = pooling_type 37 | self.fusion_types = fusion_types 38 | if pooling_type == 'att': 39 | self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1) 40 | self.softmax = nn.Softmax(dim=2) 41 | else: 42 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 43 | if 'channel_add' in fusion_types: 44 | self.channel_add_conv = nn.Sequential( 45 | nn.Conv2d(self.inplanes, self.planes, kernel_size=1), 46 | nn.LayerNorm([self.planes, 1, 1]), 47 | nn.ReLU(inplace=True), # yapf: disable 48 | nn.Conv2d(self.planes, self.inplanes, kernel_size=1)) 49 | else: 50 | self.channel_add_conv = None 51 | if 'channel_mul' in fusion_types: 52 | self.channel_mul_conv = nn.Sequential( 53 | nn.Conv2d(self.inplanes, self.planes, kernel_size=1), 54 | nn.LayerNorm([self.planes, 1, 1]), 55 | nn.ReLU(inplace=True), # yapf: disable 56 | nn.Conv2d(self.planes, self.inplanes, kernel_size=1)) 57 | else: 58 | self.channel_mul_conv = None 59 | self.reset_parameters() 60 | 61 | def reset_parameters(self): 62 | if self.pooling_type == 'att': 63 | nn.init.kaiming_normal_(self.conv_mask.weight, a=0, mode='fan_in', nonlinearity='relu') 64 | if hasattr(self.conv_mask, 'bias') and self.conv_mask.bias is not None: 65 | nn.init.constant_(self.conv_mask.bias, 0) 66 | self.conv_mask.inited = True 67 | 68 | if self.channel_add_conv is not None: 69 | last_zero_init(self.channel_add_conv) 70 | if self.channel_mul_conv is not None: 71 | last_zero_init(self.channel_mul_conv) 72 | 73 | def spatial_pool(self, x): 74 | batch, channel, height, width = x.size() 75 | if self.pooling_type == 'att': 76 | input_x = x 77 | # [N, C, H * W] 78 | input_x = input_x.view(batch, channel, height * width) 79 | # [N, 1, C, H * W] 80 | input_x = input_x.unsqueeze(1) 81 | # [N, 1, H, W] 82 | context_mask = self.conv_mask(x) 83 | # [N, 1, H * W] 84 | context_mask = context_mask.view(batch, 1, height * width) 85 | # [N, 1, H * W] 86 | context_mask = self.softmax(context_mask) 87 | # [N, 1, H * W, 1] 88 | context_mask = context_mask.unsqueeze(-1) 89 | # [N, 1, C, 1] 90 | context = torch.matmul(input_x, context_mask) 91 | # [N, C, 1, 1] 92 | context = context.view(batch, channel, 1, 1) 93 | else: 94 | # [N, C, 1, 1] 95 | context = self.avg_pool(x) 96 | 97 | return context 98 | 99 | def forward(self, x): 100 | # [N, C, 1, 1] 101 | context = self.spatial_pool(x) 102 | 103 | out = x 104 | if self.channel_mul_conv is not None: 105 | # [N, C, 1, 1] 106 | channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) 107 | out = out * channel_mul_term 108 | if self.channel_add_conv is not None: 109 | # [N, C, 1, 1] 110 | channel_add_term = self.channel_add_conv(context) 111 | out = out + channel_add_term 112 | 113 | return out 114 | -------------------------------------------------------------------------------- /demo/modeify-visualize_result.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import argparse 8 | import logging 9 | import sys 10 | 11 | import numpy as np 12 | import torch 13 | import tqdm 14 | from torch.backends import cudnn 15 | 16 | sys.path.append('.') 17 | 18 | from fastreid.evaluation import evaluate_rank 19 | from fastreid.config import get_cfg 20 | from fastreid.utils.logger import setup_logger 21 | from fastreid.data import build_reid_test_loader 22 | from .predictor import FeatureExtractionDemo 23 | from fastreid.utils.visualizer import Visualizer 24 | 25 | cudnn.benchmark = True 26 | logger = logging.getLogger('fastreid.visualize_result') 27 | 28 | 29 | def setup_cfg(args): 30 | # load config from file and command-line arguments 31 | cfg = get_cfg() 32 | cfg.merge_from_file(args.config_file) 33 | cfg.merge_from_list(args.opts) 34 | cfg.freeze() 35 | return cfg 36 | 37 | 38 | def get_parser(): 39 | parser = argparse.ArgumentParser(description="Feature extraction with reid models") 40 | parser.add_argument( 41 | "--config-file", 42 | metavar="FILE", 43 | default='/home/pj/fast-reid-master/configs/Hazy_Market1501/load_bagtricks_R50.yml', 44 | help="path to config file", 45 | ) 46 | parser.add_argument( 47 | '--parallel', 48 | default=True, 49 | # action='store_true', 50 | help='if use multiprocess for feature extraction.' 51 | ) 52 | parser.add_argument( 53 | "--dataset-name", 54 | default='Hazy_DukeMTMC', 55 | help="a test dataset name for visualizing ranking list." 56 | ) 57 | parser.add_argument( 58 | "--output", 59 | default="./vis_rank_list", 60 | help="a file or directory to save rankling list result.", 61 | 62 | ) 63 | parser.add_argument( 64 | "--vis-label", 65 | default=True, 66 | # action='store_true', 67 | help="if visualize label of query instance" 68 | ) 69 | parser.add_argument( 70 | "--num-vis", 71 | default=100, 72 | help="number of query images to be visualized", 73 | ) 74 | parser.add_argument( 75 | "--rank-sort", 76 | default="ascending", 77 | help="rank order of visualization images by AP metric", 78 | ) 79 | parser.add_argument( 80 | "--label-sort", 81 | default="ascending", 82 | help="label order of visualization images by cosine similarity metric", 83 | ) 84 | parser.add_argument( 85 | "--max-rank", 86 | default=10, 87 | help="maximum number of rank list to be visualized", 88 | ) 89 | parser.add_argument( 90 | "--opts", 91 | help="Modify config options using the command-line 'KEY VALUE' pairs", 92 | default=[], 93 | nargs=argparse.REMAINDER, 94 | ) 95 | return parser 96 | 97 | 98 | if __name__ == '__main__': 99 | args = get_parser().parse_args() 100 | logger = setup_logger() 101 | cfg = setup_cfg(args) 102 | test_loader, num_query = build_reid_test_loader(cfg, args.dataset_name) 103 | demo = FeatureExtractionDemo(cfg, parallel=args.parallel) 104 | 105 | logger.info("Start extracting image features") 106 | feats = [] 107 | pids = [] 108 | camids = [] 109 | for (feat, pid, camid) in tqdm.tqdm(demo.run_on_loader(test_loader), total=len(test_loader)): 110 | feats.append(feat) 111 | pids.extend(pid) 112 | camids.extend(camid) 113 | 114 | feats = torch.cat(feats, dim=0) 115 | q_feat = feats[:num_query] 116 | g_feat = feats[num_query:] 117 | q_pids = np.asarray(pids[:num_query]) 118 | g_pids = np.asarray(pids[num_query:]) 119 | q_camids = np.asarray(camids[:num_query]) 120 | g_camids = np.asarray(camids[num_query:]) 121 | 122 | # compute cosine distance 123 | distmat = 1 - torch.mm(q_feat, g_feat.t()) 124 | distmat = distmat.numpy() 125 | 126 | logger.info("Computing APs for all query images ...") 127 | cmc, all_ap, all_inp = evaluate_rank(distmat, q_feat, g_feat, q_pids, g_pids, q_camids, g_camids) 128 | 129 | visualizer = Visualizer(test_loader.dataset) 130 | visualizer.get_model_output(all_ap, distmat, q_pids, g_pids, q_camids, g_camids) 131 | 132 | logger.info("Saving ROC curve ...") 133 | fpr, tpr, pos, neg = visualizer.vis_roc_curve(args.output) 134 | visualizer.save_roc_info(args.output, fpr, tpr, pos, neg) 135 | 136 | logger.info("Saving rank list result ...") 137 | query_indices = visualizer.vis_rank_list(args.output, args.vis_label, args.num_vis, 138 | args.rank_sort, args.label_sort, args.max_rank) -------------------------------------------------------------------------------- /fastreid/utils/summary.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | from torch.autograd import Variable 10 | 11 | from collections import OrderedDict 12 | import numpy as np 13 | 14 | 15 | def summary(model, input_size, batch_size=-1, device="cuda"): 16 | def register_hook(module): 17 | 18 | def hook(module, input, output): 19 | class_name = str(module.__class__).split(".")[-1].split("'")[0] 20 | module_idx = len(summary) 21 | 22 | m_key = "%s-%i" % (class_name, module_idx + 1) 23 | summary[m_key] = OrderedDict() 24 | summary[m_key]["input_shape"] = list(input[0].size()) 25 | summary[m_key]["input_shape"][0] = batch_size 26 | if isinstance(output, (list, tuple)): 27 | summary[m_key]["output_shape"] = [ 28 | [-1] + list(o.size())[1:] for o in output 29 | ] 30 | else: 31 | summary[m_key]["output_shape"] = list(output.size()) 32 | summary[m_key]["output_shape"][0] = batch_size 33 | 34 | params = 0 35 | if hasattr(module, "weight") and hasattr(module.weight, "size"): 36 | params += torch.prod(torch.LongTensor(list(module.weight.size()))) 37 | summary[m_key]["trainable"] = module.weight.requires_grad 38 | if hasattr(module, "bias") and hasattr(module.bias, "size"): 39 | params += torch.prod(torch.LongTensor(list(module.bias.size()))) 40 | summary[m_key]["nb_params"] = params 41 | 42 | if ( 43 | not isinstance(module, nn.Sequential) 44 | and not isinstance(module, nn.ModuleList) 45 | and not (module == model) 46 | ): 47 | hooks.append(module.register_forward_hook(hook)) 48 | 49 | device = device.lower() 50 | assert device in [ 51 | "cuda", 52 | "cpu", 53 | ], "Input device is not valid, please specify 'cuda' or 'cpu'" 54 | 55 | if device == "cuda" and torch.cuda.is_available(): 56 | dtype = torch.cuda.FloatTensor 57 | else: 58 | dtype = torch.FloatTensor 59 | 60 | # multiple inputs to the network 61 | if isinstance(input_size, tuple): 62 | input_size = [input_size] 63 | 64 | # batch_size of 2 for batchnorm 65 | x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size] 66 | # print(type(x[0])) 67 | 68 | # create properties 69 | summary = OrderedDict() 70 | hooks = [] 71 | 72 | # register hook 73 | model.apply(register_hook) 74 | 75 | # make a forward pass 76 | # print(x.shape) 77 | model(*x) 78 | 79 | # remove these hooks 80 | for h in hooks: 81 | h.remove() 82 | 83 | print("----------------------------------------------------------------") 84 | line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #") 85 | print(line_new) 86 | print("================================================================") 87 | total_params = 0 88 | total_output = 0 89 | trainable_params = 0 90 | for layer in summary: 91 | # input_shape, output_shape, trainable, nb_params 92 | line_new = "{:>20} {:>25} {:>15}".format( 93 | layer, 94 | str(summary[layer]["output_shape"]), 95 | "{0:,}".format(summary[layer]["nb_params"]), 96 | ) 97 | total_params += summary[layer]["nb_params"] 98 | total_output += np.prod(summary[layer]["output_shape"]) 99 | if "trainable" in summary[layer]: 100 | if summary[layer]["trainable"] == True: 101 | trainable_params += summary[layer]["nb_params"] 102 | print(line_new) 103 | 104 | # assume 4 bytes/number (float on cuda). 105 | total_input_size = abs(np.prod(input_size) * batch_size * 4. / (1024 ** 2.)) 106 | total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients 107 | total_params_size = abs(total_params.numpy() * 4. / (1024 ** 2.)) 108 | total_size = total_params_size + total_output_size + total_input_size 109 | 110 | print("================================================================") 111 | print("Total params: {0:,}".format(total_params)) 112 | print("Trainable params: {0:,}".format(trainable_params)) 113 | print("Non-trainable params: {0:,}".format(total_params - trainable_params)) 114 | print("----------------------------------------------------------------") 115 | print("Input size (MB): %0.2f" % total_input_size) 116 | print("Forward/backward pass size (MB): %0.2f" % total_output_size) 117 | print("Params size (MB): %0.2f" % total_params_size) 118 | print("Estimated Total Size (MB): %0.2f" % total_size) 119 | print("----------------------------------------------------------------") 120 | # return summary 121 | -------------------------------------------------------------------------------- /fastreid/solver/optim/adam.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | import torch 8 | import math 9 | from torch.optim.optimizer import Optimizer 10 | 11 | 12 | class Adam(Optimizer): 13 | r"""Implements Adam algorithm. 14 | 15 | It has been proposed in `Adam: A Method for Stochastic Optimization`_. 16 | 17 | Arguments: 18 | params (iterable): iterable of parameters to optimize or dicts defining 19 | parameter groups 20 | lr (float, optional): learning rate (default: 1e-3) 21 | betas (Tuple[float, float], optional): coefficients used for computing 22 | running averages of gradient and its square (default: (0.9, 0.999)) 23 | eps (float, optional): term added to the denominator to improve 24 | numerical stability (default: 1e-8) 25 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 26 | amsgrad (boolean, optional): whether to use the AMSGrad variant of this 27 | algorithm from the paper `On the Convergence of Adam and Beyond`_ 28 | (default: False) 29 | 30 | .. _Adam\: A Method for Stochastic Optimization: 31 | https://arxiv.org/abs/1412.6980 32 | .. _On the Convergence of Adam and Beyond: 33 | https://openreview.net/forum?id=ryQu7f-RZ 34 | """ 35 | 36 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, 37 | weight_decay=0, amsgrad=False): 38 | if not 0.0 <= lr: 39 | raise ValueError("Invalid learning rate: {}".format(lr)) 40 | if not 0.0 <= eps: 41 | raise ValueError("Invalid epsilon value: {}".format(eps)) 42 | if not 0.0 <= betas[0] < 1.0: 43 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) 44 | if not 0.0 <= betas[1] < 1.0: 45 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) 46 | defaults = dict(lr=lr, betas=betas, eps=eps, 47 | weight_decay=weight_decay, amsgrad=amsgrad) 48 | super(Adam, self).__init__(params, defaults) 49 | 50 | def __setstate__(self, state): 51 | super(Adam, self).__setstate__(state) 52 | for group in self.param_groups: 53 | group.setdefault('amsgrad', False) 54 | 55 | def step(self, closure=None): 56 | """Performs a single optimization step. 57 | 58 | Arguments: 59 | closure (callable, optional): A closure that reevaluates the model 60 | and returns the loss. 61 | """ 62 | loss = None 63 | if closure is not None: 64 | loss = closure() 65 | 66 | for group in self.param_groups: 67 | for p in group['params']: 68 | if p.grad is None or group['freeze']: 69 | continue 70 | grad = p.grad.data 71 | if grad.is_sparse: 72 | raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') 73 | amsgrad = group['amsgrad'] 74 | 75 | state = self.state[p] 76 | 77 | # State initialization 78 | if len(state) == 0: 79 | state['step'] = 0 80 | # Exponential moving average of gradient values 81 | state['exp_avg'] = torch.zeros_like(p.data) 82 | # Exponential moving average of squared gradient values 83 | state['exp_avg_sq'] = torch.zeros_like(p.data) 84 | if amsgrad: 85 | # Maintains max of all exp. moving avg. of sq. grad. values 86 | state['max_exp_avg_sq'] = torch.zeros_like(p.data) 87 | 88 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 89 | if amsgrad: 90 | max_exp_avg_sq = state['max_exp_avg_sq'] 91 | beta1, beta2 = group['betas'] 92 | 93 | state['step'] += 1 94 | 95 | if group['weight_decay'] != 0: 96 | grad.add_(group['weight_decay'], p.data) 97 | 98 | # Decay the first and second moment running average coefficient 99 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 100 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 101 | if amsgrad: 102 | # Maintains the maximum of all 2nd moment running avg. till now 103 | torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) 104 | # Use the max. for normalizing running avg. of gradient 105 | denom = max_exp_avg_sq.sqrt().add_(group['eps']) 106 | else: 107 | denom = exp_avg_sq.sqrt().add_(group['eps']) 108 | 109 | bias_correction1 = 1 - beta1 ** state['step'] 110 | bias_correction2 = 1 - beta2 ** state['step'] 111 | step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 112 | 113 | p.data.addcdiv_(-step_size, exp_avg, denom) 114 | 115 | return loss 116 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/caffe_net.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import caffe_pb2 as pb 3 | import google.protobuf.text_format as text_format 4 | import numpy as np 5 | from .layer_param import Layer_param 6 | 7 | class _Net(object): 8 | def __init__(self): 9 | self.net=pb.NetParameter() 10 | 11 | def layer_index(self,layer_name): 12 | # find a layer's index by name. if the layer was found, return the layer position in the net, else return -1. 13 | for i, layer in enumerate(self.net.layer): 14 | if layer.name == layer_name: 15 | return i 16 | 17 | def add_layer(self,layer_params,before='',after=''): 18 | # find the before of after layer's position 19 | index = -1 20 | if after != '': 21 | index = self.layer_index(after) + 1 22 | if before != '': 23 | index = self.layer_index(before) 24 | new_layer = pb.LayerParameter() 25 | new_layer.CopyFrom(layer_params.param) 26 | #insert the layer into the layer protolist 27 | if index != -1: 28 | self.net.layer.add() 29 | for i in range(len(self.net.layer) - 1, index, -1): 30 | self.net.layer[i].CopyFrom(self.net.layer[i - 1]) 31 | self.net.layer[index].CopyFrom(new_layer) 32 | else: 33 | self.net.layer.extend([new_layer]) 34 | 35 | def remove_layer_by_name(self,layer_name): 36 | for i,layer in enumerate(self.net.layer): 37 | if layer.name == layer_name: 38 | del self.net.layer[i] 39 | return 40 | raise(AttributeError, "cannot found layer %s" % str(layer_name)) 41 | 42 | def get_layer_by_name(self, layer_name): 43 | # get the layer by layer_name 44 | for layer in self.net.layer: 45 | if layer.name == layer_name: 46 | return layer 47 | raise(AttributeError, "cannot found layer %s" % str(layer_name)) 48 | 49 | def save_prototxt(self,path): 50 | prototxt=pb.NetParameter() 51 | prototxt.CopyFrom(self.net) 52 | for layer in prototxt.layer: 53 | del layer.blobs[:] 54 | with open(path,'w') as f: 55 | f.write(text_format.MessageToString(prototxt)) 56 | 57 | def layer(self,layer_name): 58 | return self.get_layer_by_name(layer_name) 59 | 60 | def layers(self): 61 | return list(self.net.layer) 62 | 63 | 64 | 65 | class Prototxt(_Net): 66 | def __init__(self,file_name=''): 67 | super(Prototxt,self).__init__() 68 | self.file_name=file_name 69 | if file_name!='': 70 | f = open(file_name,'r') 71 | text_format.Parse(f.read(), self.net) 72 | pass 73 | 74 | def init_caffemodel(self,caffe_cmd_path='caffe'): 75 | """ 76 | :param caffe_cmd_path: The shell command of caffe, normally at /build/tools/caffe 77 | """ 78 | s=pb.SolverParameter() 79 | s.train_net=self.file_name 80 | s.max_iter=0 81 | s.base_lr=1 82 | s.solver_mode = pb.SolverParameter.CPU 83 | s.snapshot_prefix='./nn' 84 | with open('/tmp/nn_tools_solver.prototxt','w') as f: 85 | f.write(str(s)) 86 | import os 87 | os.system('%s train --solver /tmp/nn_tools_solver.prototxt'%caffe_cmd_path) 88 | 89 | class Caffemodel(_Net): 90 | def __init__(self, file_name=''): 91 | super(Caffemodel,self).__init__() 92 | # caffe_model dir 93 | if file_name!='': 94 | f = open(file_name,'rb') 95 | self.net.ParseFromString(f.read()) 96 | f.close() 97 | 98 | def save(self, path): 99 | with open(path,'wb') as f: 100 | f.write(self.net.SerializeToString()) 101 | 102 | def add_layer_with_data(self,layer_params,datas, before='', after=''): 103 | """ 104 | Args: 105 | layer_params:A Layer_Param object 106 | datas:a fixed dimension numpy object list 107 | after: put the layer after a specified layer 108 | before: put the layer before a specified layer 109 | """ 110 | self.add_layer(layer_params,before,after) 111 | new_layer =self.layer(layer_params.name) 112 | 113 | #process blobs 114 | del new_layer.blobs[:] 115 | for data in datas: 116 | new_blob=new_layer.blobs.add() 117 | for dim in data.shape: 118 | new_blob.shape.dim.append(dim) 119 | new_blob.data.extend(data.flatten().astype(float)) 120 | 121 | def get_layer_data(self,layer_name): 122 | layer=self.layer(layer_name) 123 | datas=[] 124 | for blob in layer.blobs: 125 | shape=list(blob.shape.dim) 126 | data=np.array(blob.data).reshape(shape) 127 | datas.append(data) 128 | return datas 129 | 130 | def set_layer_data(self,layer_name,datas): 131 | # datas is normally a list of [weights,bias] 132 | layer=self.layer(layer_name) 133 | for blob,data in zip(layer.blobs,datas): 134 | blob.data[:]=data.flatten() 135 | pass 136 | 137 | class Net(): 138 | def __init__(self,*args,**kwargs): 139 | raise(TypeError,'the class Net is no longer used, please use Caffemodel or Prototxt instead') -------------------------------------------------------------------------------- /fastreid/utils/collect_env.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: liaoxingyu5@jd.com 5 | """ 6 | 7 | # based on 8 | # https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/collect_env.py 9 | import importlib 10 | import os 11 | import re 12 | import subprocess 13 | import sys 14 | from collections import defaultdict 15 | 16 | import PIL 17 | import numpy as np 18 | import torch 19 | import torchvision 20 | from tabulate import tabulate 21 | 22 | __all__ = ["collect_env_info"] 23 | 24 | 25 | def collect_torch_env(): 26 | try: 27 | import torch.__config__ 28 | 29 | return torch.__config__.show() 30 | except ImportError: 31 | # compatible with older versions of pytorch 32 | from torch.utils.collect_env import get_pretty_env_info 33 | 34 | return get_pretty_env_info() 35 | 36 | 37 | def get_env_module(): 38 | var_name = "DETECTRON2_ENV_MODULE" 39 | return var_name, os.environ.get(var_name, "") 40 | 41 | 42 | def detect_compute_compatibility(CUDA_HOME, so_file): 43 | try: 44 | cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") 45 | if os.path.isfile(cuobjdump): 46 | output = subprocess.check_output( 47 | "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True 48 | ) 49 | output = output.decode("utf-8").strip().split("\n") 50 | sm = [] 51 | for line in output: 52 | line = re.findall(r"\.sm_[0-9]*\.", line)[0] 53 | sm.append(line.strip(".")) 54 | sm = sorted(set(sm)) 55 | return ", ".join(sm) 56 | else: 57 | return so_file + "; cannot find cuobjdump" 58 | except Exception: 59 | # unhandled failure 60 | return so_file 61 | 62 | 63 | def collect_env_info(): 64 | has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM 65 | torch_version = torch.__version__ 66 | 67 | # NOTE: the use of CUDA_HOME and ROCM_HOME requires the CUDA/ROCM build deps, though in 68 | # theory detectron2 should be made runnable with only the corresponding runtimes 69 | from torch.utils.cpp_extension import CUDA_HOME 70 | 71 | has_rocm = False 72 | if tuple(map(int, torch_version.split(".")[:2])) >= (1, 5): 73 | from torch.utils.cpp_extension import ROCM_HOME 74 | 75 | if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): 76 | has_rocm = True 77 | has_cuda = has_gpu and (not has_rocm) 78 | 79 | data = [] 80 | data.append(("sys.platform", sys.platform)) 81 | data.append(("Python", sys.version.replace("\n", ""))) 82 | data.append(("numpy", np.__version__)) 83 | 84 | try: 85 | import fastreid # noqa 86 | 87 | data.append( 88 | ("fastreid", fastreid.__version__ + " @" + os.path.dirname(fastreid.__file__)) 89 | ) 90 | except ImportError: 91 | data.append(("fastreid", "failed to import")) 92 | 93 | data.append(get_env_module()) 94 | data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) 95 | data.append(("PyTorch debug build", torch.version.debug)) 96 | 97 | data.append(("GPU available", has_gpu)) 98 | if has_gpu: 99 | devices = defaultdict(list) 100 | for k in range(torch.cuda.device_count()): 101 | devices[torch.cuda.get_device_name(k)].append(str(k)) 102 | for name, devids in devices.items(): 103 | data.append(("GPU " + ",".join(devids), name)) 104 | 105 | if has_rocm: 106 | data.append(("ROCM_HOME", str(ROCM_HOME))) 107 | else: 108 | data.append(("CUDA_HOME", str(CUDA_HOME))) 109 | 110 | cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) 111 | if cuda_arch_list: 112 | data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) 113 | data.append(("Pillow", PIL.__version__)) 114 | 115 | try: 116 | data.append( 117 | ( 118 | "torchvision", 119 | str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), 120 | ) 121 | ) 122 | if has_cuda: 123 | try: 124 | torchvision_C = importlib.util.find_spec("torchvision._C").origin 125 | msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) 126 | data.append(("torchvision arch flags", msg)) 127 | except ImportError: 128 | data.append(("torchvision._C", "failed to find")) 129 | except AttributeError: 130 | data.append(("torchvision", "unknown")) 131 | 132 | try: 133 | import fvcore 134 | 135 | data.append(("fvcore", fvcore.__version__)) 136 | except ImportError: 137 | pass 138 | 139 | try: 140 | import cv2 141 | 142 | data.append(("cv2", cv2.__version__)) 143 | except ImportError: 144 | pass 145 | env_str = tabulate(data) + "\n" 146 | env_str += collect_torch_env() 147 | return env_str 148 | 149 | 150 | if __name__ == "__main__": 151 | try: 152 | import detectron2 # noqa 153 | except ImportError: 154 | print(collect_env_info()) 155 | else: 156 | from fastreid.utils.collect_env import collect_env_info 157 | 158 | print(collect_env_info()) 159 | -------------------------------------------------------------------------------- /fastreid/data/datasets/veriwild.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: Jinkai Zheng 4 | @contact: 1315673509@qq.com 5 | """ 6 | 7 | import os.path as osp 8 | 9 | from .bases import ImageDataset 10 | from ..datasets import DATASET_REGISTRY 11 | 12 | 13 | @DATASET_REGISTRY.register() 14 | class VeRiWild(ImageDataset): 15 | """VeRi-Wild. 16 | 17 | Reference: 18 | Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019. 19 | 20 | URL: ``_ 21 | 22 | Train dataset statistics: 23 | - identities: 30671. 24 | - images: 277797. 25 | """ 26 | dataset_dir = "VERI-Wild" 27 | dataset_name = "veriwild" 28 | 29 | def __init__(self, root='datasets', query_list='', gallery_list='', **kwargs): 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | 32 | self.image_dir = osp.join(self.dataset_dir, 'images') 33 | self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt') 34 | self.vehicle_info = osp.join(self.dataset_dir, 'train_test_split/vehicle_info.txt') 35 | if query_list and gallery_list: 36 | self.query_list = query_list 37 | self.gallery_list = gallery_list 38 | else: 39 | self.query_list = osp.join(self.dataset_dir, 'train_test_split/test_10000_query.txt') 40 | self.gallery_list = osp.join(self.dataset_dir, 'train_test_split/test_10000.txt') 41 | 42 | required_files = [ 43 | self.image_dir, 44 | self.train_list, 45 | self.query_list, 46 | self.gallery_list, 47 | self.vehicle_info, 48 | ] 49 | self.check_before_run(required_files) 50 | 51 | self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self.process_vehicle(self.vehicle_info) 52 | 53 | train = self.process_dir(self.train_list) 54 | query = self.process_dir(self.query_list, is_train=False) 55 | gallery = self.process_dir(self.gallery_list, is_train=False) 56 | 57 | super(VeRiWild, self).__init__(train, query, gallery, **kwargs) 58 | 59 | def process_dir(self, img_list, is_train=True): 60 | img_list_lines = open(img_list, 'r').readlines() 61 | 62 | dataset = [] 63 | for idx, line in enumerate(img_list_lines): 64 | line = line.strip() 65 | vid = int(line.split('/')[0]) 66 | imgid = line.split('/')[1] 67 | if is_train: 68 | vid = self.dataset_name + "_" + str(vid) 69 | dataset.append((self.imgid2imgpath[imgid], vid, int(self.imgid2camid[imgid]))) 70 | 71 | assert len(dataset) == len(img_list_lines) 72 | return dataset 73 | 74 | def process_vehicle(self, vehicle_info): 75 | imgid2vid = {} 76 | imgid2camid = {} 77 | imgid2imgpath = {} 78 | vehicle_info_lines = open(vehicle_info, 'r').readlines() 79 | 80 | for idx, line in enumerate(vehicle_info_lines[1:]): 81 | vid = line.strip().split('/')[0] 82 | imgid = line.strip().split(';')[0].split('/')[1] 83 | camid = line.strip().split(';')[1] 84 | img_path = osp.join(self.image_dir, vid, imgid + '.jpg') 85 | imgid2vid[imgid] = vid 86 | imgid2camid[imgid] = camid 87 | imgid2imgpath[imgid] = img_path 88 | 89 | assert len(imgid2vid) == len(vehicle_info_lines) - 1 90 | return imgid2vid, imgid2camid, imgid2imgpath 91 | 92 | 93 | @DATASET_REGISTRY.register() 94 | class SmallVeRiWild(VeRiWild): 95 | """VeRi-Wild. 96 | Small test dataset statistics: 97 | - identities: 3000. 98 | - images: 41861. 99 | """ 100 | 101 | def __init__(self, root='datasets', **kwargs): 102 | self.dataset_dir = osp.join(root, self.dataset_dir) 103 | self.query_list = osp.join(self.dataset_dir, 'train_test_split/test_3000_query.txt') 104 | self.gallery_list = osp.join(self.dataset_dir, 'train_test_split/test_3000.txt') 105 | 106 | super(SmallVeRiWild, self).__init__(root, self.query_list, self.gallery_list, **kwargs) 107 | 108 | 109 | @DATASET_REGISTRY.register() 110 | class MediumVeRiWild(VeRiWild): 111 | """VeRi-Wild. 112 | Medium test dataset statistics: 113 | - identities: 5000. 114 | - images: 69389. 115 | """ 116 | 117 | def __init__(self, root='datasets', **kwargs): 118 | self.dataset_dir = osp.join(root, self.dataset_dir) 119 | self.query_list = osp.join(self.dataset_dir, 'train_test_split/test_5000_query.txt') 120 | self.gallery_list = osp.join(self.dataset_dir, 'train_test_split/test_5000.txt') 121 | 122 | super(MediumVeRiWild, self).__init__(root, self.query_list, self.gallery_list, **kwargs) 123 | 124 | 125 | @DATASET_REGISTRY.register() 126 | class LargeVeRiWild(VeRiWild): 127 | """VeRi-Wild. 128 | Large test dataset statistics: 129 | - identities: 10000. 130 | - images: 138517. 131 | """ 132 | 133 | def __init__(self, root='datasets', **kwargs): 134 | self.dataset_dir = osp.join(root, self.dataset_dir) 135 | self.query_list = osp.join(self.dataset_dir, 'train_test_split/test_10000_query.txt') 136 | self.gallery_list = osp.join(self.dataset_dir, 'train_test_split/test_10000.txt') 137 | 138 | super(LargeVeRiWild, self).__init__(root, self.query_list, self.gallery_list, **kwargs) 139 | -------------------------------------------------------------------------------- /fastreid/solver/optim/lamb.py: -------------------------------------------------------------------------------- 1 | #### 2 | # CODE TAKEN FROM https://github.com/mgrankin/over9000 3 | #### 4 | 5 | import collections 6 | 7 | import torch 8 | from torch.optim.optimizer import Optimizer 9 | from torch.utils.tensorboard import SummaryWriter 10 | 11 | 12 | def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int): 13 | """Log a histogram of trust ratio scalars in across layers.""" 14 | results = collections.defaultdict(list) 15 | for group in optimizer.param_groups: 16 | for p in group['params']: 17 | state = optimizer.state[p] 18 | for i in ('weight_norm', 'adam_norm', 'trust_ratio'): 19 | if i in state: 20 | results[i].append(state[i]) 21 | 22 | for k, v in results.items(): 23 | event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count) 24 | 25 | 26 | class Lamb(Optimizer): 27 | r"""Implements Lamb algorithm. 28 | It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. 29 | Arguments: 30 | params (iterable): iterable of parameters to optimize or dicts defining 31 | parameter groups 32 | lr (float, optional): learning rate (default: 1e-3) 33 | betas (Tuple[float, float], optional): coefficients used for computing 34 | running averages of gradient and its square (default: (0.9, 0.999)) 35 | eps (float, optional): term added to the denominator to improve 36 | numerical stability (default: 1e-8) 37 | weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 38 | adam (bool, optional): always use trust ratio = 1, which turns this into 39 | Adam. Useful for comparison purposes. 40 | .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: 41 | https://arxiv.org/abs/1904.00962 42 | """ 43 | 44 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, 45 | weight_decay=0, adam=False): 46 | if not 0.0 <= lr: 47 | raise ValueError("Invalid learning rate: {}".format(lr)) 48 | if not 0.0 <= eps: 49 | raise ValueError("Invalid epsilon value: {}".format(eps)) 50 | if not 0.0 <= betas[0] < 1.0: 51 | raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) 52 | if not 0.0 <= betas[1] < 1.0: 53 | raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) 54 | defaults = dict(lr=lr, betas=betas, eps=eps, 55 | weight_decay=weight_decay) 56 | self.adam = adam 57 | super(Lamb, self).__init__(params, defaults) 58 | 59 | def step(self, closure=None): 60 | """Performs a single optimization step. 61 | Arguments: 62 | closure (callable, optional): A closure that reevaluates the model 63 | and returns the loss. 64 | """ 65 | loss = None 66 | if closure is not None: 67 | loss = closure() 68 | 69 | for group in self.param_groups: 70 | for p in group['params']: 71 | if p.grad is None or group['freeze']: 72 | continue 73 | grad = p.grad.data 74 | if grad.is_sparse: 75 | raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') 76 | 77 | state = self.state[p] 78 | 79 | # State initialization 80 | if len(state) == 0: 81 | state['step'] = 0 82 | # Exponential moving average of gradient values 83 | state['exp_avg'] = torch.zeros_like(p.data) 84 | # Exponential moving average of squared gradient values 85 | state['exp_avg_sq'] = torch.zeros_like(p.data) 86 | 87 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 88 | beta1, beta2 = group['betas'] 89 | 90 | state['step'] += 1 91 | 92 | # Decay the first and second moment running average coefficient 93 | # m_t 94 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 95 | # v_t 96 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 97 | 98 | # Paper v3 does not use debiasing. 99 | # bias_correction1 = 1 - beta1 ** state['step'] 100 | # bias_correction2 = 1 - beta2 ** state['step'] 101 | # Apply bias to lr to avoid broadcast. 102 | step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1 103 | 104 | weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) 105 | 106 | adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) 107 | if group['weight_decay'] != 0: 108 | adam_step.add_(group['weight_decay'], p.data) 109 | 110 | adam_norm = adam_step.pow(2).sum().sqrt() 111 | if weight_norm == 0 or adam_norm == 0: 112 | trust_ratio = 1 113 | else: 114 | trust_ratio = weight_norm / adam_norm 115 | state['weight_norm'] = weight_norm 116 | state['adam_norm'] = adam_norm 117 | state['trust_ratio'] = trust_ratio 118 | if self.adam: 119 | trust_ratio = 1 120 | 121 | p.data.add_(-step_size * trust_ratio, adam_step) 122 | 123 | return loss 124 | -------------------------------------------------------------------------------- /fastreid/data/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | import torch 9 | from torch._six import container_abcs, string_classes, int_classes 10 | from torch.utils.data import DataLoader 11 | from fastreid.utils import comm 12 | 13 | from . import samplers 14 | from .common import CommDataset, PairDataset 15 | from .datasets import DATASET_REGISTRY 16 | from .transforms import build_transforms 17 | 18 | _root = os.getenv("FASTREID_DATASETS", "datasets") 19 | 20 | 21 | def build_reid_paired_train_loader(cfg, target=False): 22 | train_transforms = build_transforms(cfg, is_train=True) 23 | 24 | train_items = list() 25 | hazy_items = list() 26 | if target: 27 | for d in cfg.TDATASETS.NAMES: 28 | dataset = DATASET_REGISTRY.get(d)(root=_root, combineall=cfg.DATASETS.COMBINEALL) 29 | if comm.is_main_process(): 30 | dataset.show_train() 31 | train_items.extend(dataset.train) 32 | hazy_items.extend(dataset.hazy_train) 33 | elif not target: 34 | for d in cfg.DATASETS.NAMES: 35 | dataset = DATASET_REGISTRY.get(d)(root=_root, combineall=cfg.DATASETS.COMBINEALL) 36 | if comm.is_main_process(): 37 | dataset.show_train() 38 | train_items.extend(dataset.train) 39 | hazy_items.extend(dataset.hazy_train) 40 | 41 | train_set = PairDataset(train_items, hazy_items, train_transforms, relabel=True) 42 | 43 | num_workers = cfg.DATALOADER.NUM_WORKERS 44 | mini_batch_size = cfg.SOLVER.IMS_PER_BATCH 45 | num_instance = cfg.DATALOADER.NUM_INSTANCE 46 | global_batch_size = mini_batch_size * comm.get_world_size() 47 | 48 | if cfg.DATALOADER.PK_SAMPLER: 49 | if cfg.DATALOADER.NAIVE_WAY: 50 | data_sampler = samplers.NaiveIdentitySampler(train_set.img_items, 51 | global_batch_size, num_instance) 52 | else: 53 | data_sampler = samplers.BalancedIdentitySampler(train_set.img_items, 54 | global_batch_size, num_instance) 55 | else: 56 | data_sampler = samplers.TrainingSampler(len(train_set)) 57 | batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, mini_batch_size, True) 58 | 59 | train_loader = torch.utils.data.DataLoader( 60 | train_set, 61 | num_workers=num_workers, 62 | batch_sampler=batch_sampler, 63 | collate_fn=fast_batch_collator, 64 | ) 65 | return train_loader 66 | 67 | def build_reid_train_loader(cfg): 68 | train_transforms = build_transforms(cfg, is_train=True) 69 | 70 | train_items = list() 71 | for d in cfg.DATASETS.NAMES: 72 | dataset = DATASET_REGISTRY.get(d)(root=_root, combineall=cfg.DATASETS.COMBINEALL) 73 | if comm.is_main_process(): 74 | dataset.show_train() 75 | train_items.extend(dataset.train) 76 | 77 | train_set = CommDataset(train_items, train_transforms, relabel=True) 78 | 79 | num_workers = cfg.DATALOADER.NUM_WORKERS 80 | num_instance = cfg.DATALOADER.NUM_INSTANCE 81 | mini_batch_size = cfg.SOLVER.IMS_PER_BATCH // comm.get_world_size() 82 | 83 | if cfg.DATALOADER.PK_SAMPLER: 84 | if cfg.DATALOADER.NAIVE_WAY: 85 | data_sampler = samplers.NaiveIdentitySampler(train_set.img_items, 86 | cfg.SOLVER.IMS_PER_BATCH, num_instance) 87 | else: 88 | data_sampler = samplers.BalancedIdentitySampler(train_set.img_items, 89 | cfg.SOLVER.IMS_PER_BATCH, num_instance) 90 | else: 91 | data_sampler = samplers.TrainingSampler(len(train_set)) 92 | batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, mini_batch_size, True) 93 | 94 | train_loader = torch.utils.data.DataLoader( 95 | train_set, 96 | num_workers=num_workers, 97 | batch_sampler=batch_sampler, 98 | collate_fn=fast_batch_collator, 99 | ) 100 | return train_loader 101 | 102 | 103 | def build_reid_test_loader(cfg, dataset_name): 104 | test_transforms = build_transforms(cfg, is_train=False) 105 | 106 | dataset = DATASET_REGISTRY.get(dataset_name)(root=_root) 107 | if comm.is_main_process(): 108 | dataset.show_test() 109 | test_items = dataset.query + dataset.gallery 110 | 111 | test_set = CommDataset(test_items, test_transforms, relabel=False) 112 | 113 | batch_size = cfg.TEST.IMS_PER_BATCH 114 | data_sampler = samplers.InferenceSampler(len(test_set)) 115 | batch_sampler = torch.utils.data.BatchSampler(data_sampler, batch_size, False) 116 | test_loader = DataLoader( 117 | test_set, 118 | batch_sampler=batch_sampler, 119 | num_workers=4, # save some memory 120 | collate_fn=fast_batch_collator) 121 | return test_loader, len(dataset.query) 122 | 123 | 124 | def trivial_batch_collator(batch): 125 | """ 126 | A batch collator that does nothing. 127 | """ 128 | return batch 129 | 130 | 131 | def fast_batch_collator(batched_inputs): 132 | """ 133 | A simple batch collator for most common reid tasks 134 | """ 135 | elem = batched_inputs[0] 136 | if isinstance(elem, torch.Tensor): 137 | out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype) 138 | for i, tensor in enumerate(batched_inputs): 139 | out[i] += tensor 140 | return out 141 | 142 | elif isinstance(elem, container_abcs.Mapping): 143 | return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem} 144 | 145 | elif isinstance(elem, float): 146 | return torch.tensor(batched_inputs, dtype=torch.float64) 147 | elif isinstance(elem, int_classes): 148 | return torch.tensor(batched_inputs) 149 | elif isinstance(elem, string_classes): 150 | return batched_inputs 151 | -------------------------------------------------------------------------------- /demo/predictor.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import atexit 8 | import bisect 9 | 10 | import cv2 11 | import torch 12 | import torch.multiprocessing as mp 13 | from collections import deque 14 | 15 | from fastreid.engine import DefaultPredictor 16 | 17 | try: 18 | mp.set_start_method('spawn') 19 | except RuntimeError: 20 | pass 21 | 22 | 23 | class FeatureExtractionDemo(object): 24 | def __init__(self, cfg, parallel=False): 25 | """ 26 | Args: 27 | cfg (CfgNode): 28 | parallel (bool) whether to run the model in different processes from visualization.: 29 | Useful since the visualization logic can be slow. 30 | """ 31 | self.cfg = cfg 32 | self.parallel = parallel 33 | 34 | if parallel: 35 | self.num_gpus = torch.cuda.device_count() 36 | self.predictor = AsyncPredictor(cfg, self.num_gpus) 37 | else: 38 | self.predictor = DefaultPredictor(cfg) 39 | 40 | def run_on_image(self, original_image): 41 | """ 42 | Args: 43 | original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). 44 | This is the format used by OpenCV. 45 | Returns: 46 | predictions (np.ndarray): normalized feature of the model. 47 | """ 48 | # the model expects RGB inputs 49 | original_image = original_image[:, :, ::-1] 50 | # Apply pre-processing to image. 51 | image = cv2.resize(original_image, tuple(self.cfg.INPUT.SIZE_TEST[::-1]), interpolation=cv2.INTER_CUBIC) 52 | # Make shape with a new batch dimension which is adapted for 53 | # network input 54 | image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[None] 55 | predictions = self.predictor(image) 56 | return predictions 57 | 58 | def run_on_loader(self, data_loader): 59 | if self.parallel: 60 | buffer_size = self.predictor.default_buffer_size 61 | 62 | batch_data = deque() 63 | 64 | for cnt, batch in enumerate(data_loader): 65 | batch_data.append(batch) 66 | self.predictor.put(batch["images"]) 67 | 68 | if cnt >= buffer_size: 69 | batch = batch_data.popleft() 70 | predictions = self.predictor.get() 71 | yield predictions, batch["targets"].numpy(), batch["camid"].numpy() 72 | 73 | while len(batch_data): 74 | batch = batch_data.popleft() 75 | predictions = self.predictor.get() 76 | yield predictions, batch["targets"].numpy(), batch["camid"].numpy() 77 | else: 78 | for batch in data_loader: 79 | predictions = self.predictor(batch["images"]) 80 | yield predictions, batch["targets"].numpy(), batch["camid"].numpy() 81 | 82 | 83 | class AsyncPredictor: 84 | """ 85 | A predictor that runs the model asynchronously, possibly on >1 GPUs. 86 | Because when the amount of data is large. 87 | """ 88 | 89 | class _StopToken: 90 | pass 91 | 92 | class _PredictWorker(mp.Process): 93 | def __init__(self, cfg, task_queue, result_queue): 94 | self.cfg = cfg 95 | self.task_queue = task_queue 96 | self.result_queue = result_queue 97 | super().__init__() 98 | 99 | def run(self): 100 | predictor = DefaultPredictor(self.cfg) 101 | 102 | while True: 103 | task = self.task_queue.get() 104 | if isinstance(task, AsyncPredictor._StopToken): 105 | break 106 | idx, data = task 107 | result = predictor(data) 108 | self.result_queue.put((idx, result)) 109 | 110 | def __init__(self, cfg, num_gpus: int = 1): 111 | """ 112 | Args: 113 | cfg (CfgNode): 114 | num_gpus (int): if 0, will run on CPU 115 | """ 116 | num_workers = max(num_gpus, 1) 117 | self.task_queue = mp.Queue(maxsize=num_workers * 3) 118 | self.result_queue = mp.Queue(maxsize=num_workers * 3) 119 | self.procs = [] 120 | for gpuid in range(max(num_gpus, 1)): 121 | cfg = cfg.clone() 122 | cfg.defrost() 123 | cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" 124 | self.procs.append( 125 | AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) 126 | ) 127 | 128 | self.put_idx = 0 129 | self.get_idx = 0 130 | self.result_rank = [] 131 | self.result_data = [] 132 | 133 | for p in self.procs: 134 | p.start() 135 | 136 | atexit.register(self.shutdown) 137 | 138 | def put(self, image): 139 | self.put_idx += 1 140 | self.task_queue.put((self.put_idx, image)) 141 | 142 | def get(self): 143 | self.get_idx += 1 144 | if len(self.result_rank) and self.result_rank[0] == self.get_idx: 145 | res = self.result_data[0] 146 | del self.result_data[0], self.result_rank[0] 147 | return res 148 | 149 | while True: 150 | # Make sure the results are returned in the correct order 151 | idx, res = self.result_queue.get() 152 | if idx == self.get_idx: 153 | return res 154 | insert = bisect.bisect(self.result_rank, idx) 155 | self.result_rank.insert(insert, idx) 156 | self.result_data.insert(insert, res) 157 | 158 | def __len__(self): 159 | return self.put_idx - self.get_idx 160 | 161 | def __call__(self, image): 162 | self.put(image) 163 | return self.get() 164 | 165 | def shutdown(self): 166 | for _ in self.procs: 167 | self.task_queue.put(AsyncPredictor._StopToken()) 168 | 169 | @property 170 | def default_buffer_size(self): 171 | return len(self.procs) * 5 172 | --------------------------------------------------------------------------------