├── .github ├── FastReID-Logo.png ├── ISSUE_TEMPLATE │ ├── bugs.md │ ├── config.yml │ ├── questions-help-support.md │ └── unexpected-problems-bugs.md ├── wechat_group.png └── workflows │ ├── issue_auto_close.yml │ └── lint_python.yml ├── .gitignore ├── CHANGELOG.md ├── GETTING_STARTED.md ├── INSTALL.md ├── LICENSE ├── MODEL_ZOO.md ├── README.md ├── configs ├── Base-AGW.yml ├── Base-MGN.yml ├── Base-SBS.yml ├── Base-bagtricks.yml ├── DukeMTMC │ ├── AGW_R101-ibn.yml │ ├── AGW_R50-ibn.yml │ ├── AGW_R50.yml │ ├── AGW_S50.yml │ ├── bagtricks_R101-ibn.yml │ ├── bagtricks_R50-ibn.yml │ ├── bagtricks_R50.yml │ ├── bagtricks_S50.yml │ ├── mgn_R50-ibn.yml │ ├── sbs_R101-ibn.yml │ ├── sbs_R50-ibn.yml │ ├── sbs_R50.yml │ └── sbs_S50.yml ├── MSMT17 │ ├── AGW_R101-ibn.yml │ ├── AGW_R50-ibn.yml │ ├── AGW_R50.yml │ ├── AGW_S50.yml │ ├── bagtricks_R101-ibn.yml │ ├── bagtricks_R50-ibn.yml │ ├── bagtricks_R50.yml │ ├── bagtricks_S50.yml │ ├── mgn_R50-ibn.yml │ ├── sbs_R101-ibn.yml │ ├── sbs_R50-ibn.yml │ ├── sbs_R50.yml │ └── sbs_S50.yml ├── Market1501 │ ├── AGW_R101-ibn.yml │ ├── AGW_R50-ibn.yml │ ├── AGW_R50.yml │ ├── AGW_S50.yml │ ├── bagtricks_R101-ibn.yml │ ├── bagtricks_R50-ibn.yml │ ├── bagtricks_R50.yml │ ├── bagtricks_S50.yml │ ├── bagtricks_vit.yml │ ├── mgn_R50-ibn.yml │ ├── sbs_R101-ibn.yml │ ├── sbs_R50-ibn.yml │ ├── sbs_R50.yml │ └── sbs_S50.yml ├── VERIWild │ └── bagtricks_R50-ibn.yml ├── VeRi │ └── sbs_R50-ibn.yml └── VehicleID │ └── bagtricks_R50-ibn.yml ├── datasets └── README.md ├── demo ├── README.md ├── demo.py ├── plot_roc_with_pickle.py ├── predictor.py └── visualize_result.py ├── docker ├── Dockerfile └── README.md ├── docs ├── .gitignore ├── Makefile ├── README.md ├── _static │ └── css │ │ └── custom.css ├── conf.py ├── index.rst ├── modules │ ├── checkpoint.rst │ ├── config.rst │ ├── data.rst │ ├── data_transforms.rst │ ├── engine.rst │ ├── evaluation.rst │ ├── index.rst │ ├── layers.rst │ ├── modeling.rst │ ├── solver.rst │ └── utils.rst └── requirements.txt ├── fastreid ├── __init__.py ├── config │ ├── __init__.py │ ├── config.py │ └── defaults.py ├── data │ ├── __init__.py │ ├── build.py │ ├── common.py │ ├── data_utils.py │ ├── datasets │ │ ├── AirportALERT.py │ │ ├── __init__.py │ │ ├── bases.py │ │ ├── caviara.py │ │ ├── cuhk03.py │ │ ├── cuhk_sysu.py │ │ ├── dukemtmcreid.py │ │ ├── grid.py │ │ ├── iLIDS.py │ │ ├── lpw.py │ │ ├── market1501.py │ │ ├── msmt17.py │ │ ├── pes3d.py │ │ ├── pku.py │ │ ├── prai.py │ │ ├── prid.py │ │ ├── saivt.py │ │ ├── sensereid.py │ │ ├── shinpuhkan.py │ │ ├── sysu_mm.py │ │ ├── thermalworld.py │ │ ├── vehicleid.py │ │ ├── veri.py │ │ ├── veriwild.py │ │ ├── viper.py │ │ └── wildtracker.py │ ├── samplers │ │ ├── __init__.py │ │ ├── data_sampler.py │ │ ├── imbalance_sampler.py │ │ └── triplet_sampler.py │ └── transforms │ │ ├── __init__.py │ │ ├── autoaugment.py │ │ ├── build.py │ │ ├── functional.py │ │ └── transforms.py ├── engine │ ├── __init__.py │ ├── defaults.py │ ├── hooks.py │ ├── launch.py │ └── train_loop.py ├── evaluation │ ├── __init__.py │ ├── clas_evaluator.py │ ├── evaluator.py │ ├── query_expansion.py │ ├── rank.py │ ├── rank_cylib │ │ ├── Makefile │ │ ├── __init__.py │ │ ├── rank_cy.pyx │ │ ├── roc_cy.pyx │ │ ├── setup.py │ │ └── test_cython.py │ ├── reid_evaluation.py │ ├── rerank.py │ ├── roc.py │ └── testing.py ├── layers │ ├── __init__.py │ ├── activation.py │ ├── any_softmax.py │ ├── batch_norm.py │ ├── context_block.py │ ├── drop.py │ ├── frn.py │ ├── gather_layer.py │ ├── helpers.py │ ├── non_local.py │ ├── pooling.py │ ├── se_layer.py │ ├── splat.py │ └── weight_init.py ├── modeling │ ├── __init__.py │ ├── backbones │ │ ├── __init__.py │ │ ├── build.py │ │ ├── mobilenet.py │ │ ├── mobilenetv3.py │ │ ├── osnet.py │ │ ├── regnet │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── effnet.py │ │ │ ├── effnet │ │ │ │ ├── EN-B0_dds_8gpu.yaml │ │ │ │ ├── EN-B1_dds_8gpu.yaml │ │ │ │ ├── EN-B2_dds_8gpu.yaml │ │ │ │ ├── EN-B3_dds_8gpu.yaml │ │ │ │ ├── EN-B4_dds_8gpu.yaml │ │ │ │ └── EN-B5_dds_8gpu.yaml │ │ │ ├── regnet.py │ │ │ ├── regnetx │ │ │ │ ├── RegNetX-1.6GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-12GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-16GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-200MF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-3.2GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-32GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-4.0GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-400MF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-6.4GF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-600MF_dds_8gpu.yaml │ │ │ │ ├── RegNetX-8.0GF_dds_8gpu.yaml │ │ │ │ └── RegNetX-800MF_dds_8gpu.yaml │ │ │ └── regnety │ │ │ │ ├── RegNetY-1.6GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-12GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-16GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-200MF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-3.2GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-32GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-4.0GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-400MF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-6.4GF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-600MF_dds_8gpu.yaml │ │ │ │ ├── RegNetY-8.0GF_dds_8gpu.yaml │ │ │ │ └── RegNetY-800MF_dds_8gpu.yaml │ │ ├── repvgg.py │ │ ├── resnest.py │ │ ├── resnet.py │ │ ├── resnext.py │ │ ├── shufflenet.py │ │ └── vision_transformer.py │ ├── heads │ │ ├── __init__.py │ │ ├── build.py │ │ ├── clas_head.py │ │ └── embedding_head.py │ ├── losses │ │ ├── __init__.py │ │ ├── circle_loss.py │ │ ├── cross_entroy_loss.py │ │ ├── focal_loss.py │ │ ├── triplet_loss.py │ │ └── utils.py │ └── meta_arch │ │ ├── __init__.py │ │ ├── baseline.py │ │ ├── build.py │ │ ├── distiller.py │ │ ├── mgn.py │ │ └── moco.py ├── solver │ ├── __init__.py │ ├── build.py │ ├── lr_scheduler.py │ └── optim │ │ ├── __init__.py │ │ ├── lamb.py │ │ ├── radam.py │ │ └── swa.py └── utils │ ├── __init__.py │ ├── checkpoint.py │ ├── collect_env.py │ ├── comm.py │ ├── compute_dist.py │ ├── env.py │ ├── events.py │ ├── faiss_utils.py │ ├── file_io.py │ ├── history_buffer.py │ ├── logger.py │ ├── params.py │ ├── precision_bn.py │ ├── registry.py │ ├── summary.py │ ├── timer.py │ └── visualizer.py ├── projects ├── CrossDomainReID │ └── README.md ├── DG-ReID │ └── README.md ├── FastAttr │ ├── README.md │ ├── configs │ │ ├── Base-attribute.yml │ │ ├── dukemtmc.yml │ │ ├── market1501.yml │ │ └── pa100.yml │ ├── fastattr │ │ ├── __init__.py │ │ ├── attr_dataset.py │ │ ├── attr_evaluation.py │ │ ├── config.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── bases.py │ │ │ ├── dukemtmcattr.py │ │ │ ├── market1501attr.py │ │ │ └── pa100k.py │ │ └── modeling │ │ │ ├── __init__.py │ │ │ ├── attr_baseline.py │ │ │ ├── attr_head.py │ │ │ └── bce_loss.py │ └── train_net.py ├── FastClas │ ├── README.md │ ├── configs │ │ └── base-clas.yaml │ ├── fastclas │ │ ├── __init__.py │ │ ├── bee_ant.py │ │ ├── dataset.py │ │ └── trainer.py │ └── train_net.py ├── FastDistill │ ├── README.md │ ├── configs │ │ ├── Base-kd.yml │ │ ├── kd-sbs_r101ibn-sbs_r34.yml │ │ ├── sbs_r101ibn.yml │ │ └── sbs_r34.yml │ ├── fastdistill │ │ ├── __init__.py │ │ ├── overhaul.py │ │ └── resnet_distill.py │ └── train_net.py ├── FastFace │ ├── README.md │ ├── configs │ │ ├── face_base.yml │ │ ├── r101_ir.yml │ │ └── r50_ir.yml │ ├── fastface │ │ ├── __init__.py │ │ ├── config.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── ms1mv2.py │ │ │ └── test_dataset.py │ │ ├── face_data.py │ │ ├── face_evaluator.py │ │ ├── modeling │ │ │ ├── __init__.py │ │ │ ├── face_baseline.py │ │ │ ├── face_head.py │ │ │ ├── iresnet.py │ │ │ └── partial_fc.py │ │ ├── pfc_checkpointer.py │ │ ├── trainer.py │ │ ├── utils_amp.py │ │ └── verification.py │ └── train_net.py ├── FastRT │ ├── .gitignore │ ├── CMakeLists.txt │ ├── README.md │ ├── demo │ │ ├── CMakeLists.txt │ │ └── inference.cpp │ ├── docker │ │ ├── trt7cu100 │ │ │ └── Dockerfile │ │ └── trt7cu102 │ │ │ └── Dockerfile │ ├── fastrt │ │ ├── CMakeLists.txt │ │ ├── backbones │ │ │ ├── CMakeLists.txt │ │ │ └── sbs_resnet.cpp │ │ ├── common │ │ │ ├── calibrator.cpp │ │ │ └── utils.cpp │ │ ├── engine │ │ │ ├── CMakeLists.txt │ │ │ └── InferenceEngine.cpp │ │ ├── factory │ │ │ ├── CMakeLists.txt │ │ │ └── factory.cpp │ │ ├── heads │ │ │ ├── CMakeLists.txt │ │ │ └── embedding_head.cpp │ │ ├── layers │ │ │ ├── CMakeLists.txt │ │ │ ├── layers.cpp │ │ │ ├── poolingLayerRT.cpp │ │ │ └── poolingLayerRT.h │ │ └── meta_arch │ │ │ ├── CMakeLists.txt │ │ │ ├── baseline.cpp │ │ │ └── model.cpp │ ├── include │ │ └── fastrt │ │ │ ├── IPoolingLayerRT.h │ │ │ ├── InferenceEngine.h │ │ │ ├── baseline.h │ │ │ ├── calibrator.h │ │ │ ├── config.h.in │ │ │ ├── cuda_utils.h │ │ │ ├── embedding_head.h │ │ │ ├── factory.h │ │ │ ├── holder.h │ │ │ ├── layers.h │ │ │ ├── logging.h │ │ │ ├── model.h │ │ │ ├── module.h │ │ │ ├── sbs_resnet.h │ │ │ ├── struct.h │ │ │ └── utils.h │ ├── pybind_interface │ │ ├── CMakeLists.txt │ │ ├── docker │ │ │ ├── trt7cu100 │ │ │ │ └── Dockerfile │ │ │ └── trt7cu102_torch160 │ │ │ │ └── Dockerfile │ │ ├── market_benchmark.py │ │ ├── reid.cpp │ │ └── test.py │ ├── third_party │ │ └── cnpy │ │ │ ├── CMakeLists.txt │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── cnpy.cpp │ │ │ ├── cnpy.h │ │ │ ├── example1.cpp │ │ │ ├── mat2npz │ │ │ ├── npy2mat │ │ │ └── npz2mat │ └── tools │ │ ├── How_to_Generate.md │ │ └── gen_wts.py ├── FastRetri │ ├── README.md │ ├── configs │ │ ├── base-image_retri.yml │ │ ├── cars.yml │ │ ├── cub.yml │ │ ├── inshop.yml │ │ └── sop.yml │ ├── fastretri │ │ ├── __init__.py │ │ ├── config.py │ │ ├── datasets.py │ │ └── retri_evaluator.py │ └── train_net.py ├── FastTune │ ├── README.md │ ├── autotuner │ │ ├── __init__.py │ │ └── tune_hooks.py │ ├── configs │ │ └── search_trial.yml │ └── tune_net.py ├── HAA │ └── Readme.md ├── NAIC20 │ ├── README.md │ ├── configs │ │ ├── Base-naic.yml │ │ ├── nest101-base.yml │ │ ├── r34-ibn.yml │ │ └── submit.yml │ ├── label.txt │ ├── naic │ │ ├── __init__.py │ │ ├── config.py │ │ ├── naic_dataset.py │ │ └── naic_evaluator.py │ ├── naic20r2_train_list_clean.txt │ ├── train_list_clean.txt │ ├── train_net.py │ ├── val_gallery.txt │ └── val_query.txt ├── PartialReID │ ├── README.md │ ├── configs │ │ └── partial_market.yml │ ├── partialreid │ │ ├── __init__.py │ │ ├── config.py │ │ ├── dsr_distance.py │ │ ├── dsr_evaluation.py │ │ ├── dsr_head.py │ │ ├── partial_dataset.py │ │ └── partialbaseline.py │ └── train_net.py └── README.md ├── tests ├── __init__.py ├── dataset_test.py ├── feature_align.py ├── interp_test.py ├── lr_scheduler_test.py ├── model_test.py ├── sampler_test.py └── test_repvgg.py └── tools ├── deploy ├── Caffe │ ├── ReadMe.md │ ├── __init__.py │ ├── caffe.proto │ ├── caffe_lmdb.py │ ├── caffe_net.py │ ├── caffe_pb2.py │ ├── layer_param.py │ └── net.py ├── README.md ├── caffe_export.py ├── caffe_inference.py ├── onnx_export.py ├── onnx_inference.py ├── pytorch_to_caffe.py ├── test_data │ ├── 0022_c6s1_002976_01.jpg │ ├── 0027_c2s2_091032_02.jpg │ ├── 0032_c6s1_002851_01.jpg │ ├── 0048_c1s1_005351_01.jpg │ └── 0065_c6s1_009501_02.jpg ├── trt_calibrator.py ├── trt_export.py └── trt_inference.py ├── plain_train_net.py └── train_net.py /.github/FastReID-Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/.github/FastReID-Logo.png -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bugs" 3 | about: Report bugs in fastreid 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | ## Instructions To Reproduce the 🐛 Bug: 9 | 10 | 1. what changes you made (`git diff`) or what code you wrote 11 | ``` 12 | 13 | ``` 14 | 2. what exact command you run: 15 | 3. what you observed (including __full logs__): 16 | ``` 17 | 18 | ``` 19 | 4. please simplify the steps as much as possible so they do not require additional resources to 20 | run, such as a private dataset. 21 | 22 | ## Expected behavior: 23 | 24 | If there are no obvious error in "what you observed" provided above, 25 | please tell us the expected behavior. 26 | 27 | ## Environment: 28 | 29 | Provide your environment information using the following command: 30 | 31 | ``` 32 | wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py 33 | ``` 34 | 35 | If your issue looks like an installation issue / environment issue, 36 | please first try to solve it yourself with the instructions in 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | # require an issue template to be chosen 2 | blank_issues_enabled: false 3 | 4 | # Unexpected behaviors & bugs are split to two templates. 5 | # When they are one template, users think "it's not a bug" and don't choose the template. 6 | # 7 | # But the file name is still "unexpected-problems-bugs.md" so that old references 8 | # to this issue template still works. 9 | # It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs) -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/questions-help-support.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "How to do something❓" 3 | about: How to do something using fastreid? What does an API do? 4 | 5 | --- 6 | 7 | ## ❓ How to do something using fastreid 8 | 9 | Describe what you want to do, including: 10 | 11 | 1. what inputs you will provide, if any: 12 | 2. what outputs you are expecting: 13 | 14 | NOTE: 15 | 16 | 1. Only general answers are provided. 17 | If you want to ask about "why X did not work", please use the 18 | [Unexpected behaviors](https://github.com/JDAI-CV/fast-reid/issues/new/choose) issue template. 19 | 20 | 2. About how to implement new models / new dataloader / new training logic, etc., check documentation first. 21 | 22 | 3. We do not answer general machine learning / computer vision questions that are not specific to fastreid, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X. 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Unexpected behaviors" 3 | about: Run into unexpected behaviors when using fastreid 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | If you do not know the root cause of the problem, and wish someone to help you, please 9 | post according to this template: 10 | 11 | ## Instructions To Reproduce the Issue: 12 | 13 | Check https://stackoverflow.com/help/minimal-reproducible-example for how to ask good questions. 14 | Simplify the steps to reproduce the issue using suggestions from the above link, and provide them below: 15 | 16 | 1. full code you wrote or full changes you made (`git diff`) 17 | ``` 18 | 19 | ``` 20 | 2. what exact command you run: 21 | 3. __full logs__ you observed: 22 | ``` 23 | 24 | ``` 25 | 26 | ## Expected behavior: 27 | 28 | If there are no obvious error in "what you observed" provided above, 29 | please tell us the expected behavior. 30 | 31 | If you expect the model to converge / work better, note that we do not give suggestions 32 | on how to train a new model. 33 | Only in one of the two conditions we will help with it: 34 | (1) You're unable to reproduce the results in fastreid model zoo. 35 | (2) It indicates a fastreid bug. 36 | -------------------------------------------------------------------------------- /.github/wechat_group.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/.github/wechat_group.png -------------------------------------------------------------------------------- /.github/workflows/issue_auto_close.yml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | jobs: 7 | close-issues: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v3 11 | with: 12 | days-before-issue-stale: 30 13 | days-before-issue-close: 14 14 | stale-issue-label: "stale" 15 | stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." 16 | close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." 17 | days-before-pr-stale: -1 18 | days-before-pr-close: -1 19 | repo-token: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/lint_python.yml: -------------------------------------------------------------------------------- 1 | name: lint_python 2 | on: [pull_request, push] 3 | jobs: 4 | lint_python: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v2 8 | - uses: actions/setup-python@v2 9 | - run: pip install black codespell flake8 isort pytest 10 | - run: black --check . || true 11 | - run: codespell --quiet-level=2 || true # --ignore-words-list="" --skip="" 12 | - run: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 13 | - run: isort --profile black . || true 14 | - run: pip install -r requirements.txt || true 15 | - run: pytest . || true 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | logs 3 | 4 | # compilation and distribution 5 | __pycache__ 6 | _ext 7 | *.pyc 8 | *.pyd 9 | *.so 10 | *.dll 11 | *.egg-info/ 12 | build/ 13 | dist/ 14 | wheels/ 15 | 16 | # pytorch/python/numpy formats 17 | *.pth 18 | *.pkl 19 | *.npy 20 | *.ts 21 | model_ts*.txt 22 | 23 | # ipython/jupyter notebooks 24 | *.ipynb 25 | **/.ipynb_checkpoints/ 26 | 27 | # Editor temporaries 28 | *.swn 29 | *.swo 30 | *.swp 31 | *~ 32 | 33 | # editor settings 34 | .idea 35 | .vscode 36 | _darcs 37 | .DS_Store 38 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ### v1.3 4 | 5 | #### New Features 6 | - Vision Transformer backbone, see config in `configs/Market1501/bagtricks_vit.yml` 7 | - Self-Distillation with EMA update 8 | - Gradient Clip 9 | 10 | #### Improvements 11 | - Faster dataloader with pre-fetch thread and cuda stream 12 | - Optimize DDP training speed by removing `find_unused_parameters` in DDP 13 | 14 | 15 | ### v1.2 (06/04/2021) 16 | 17 | #### New Features 18 | 19 | - Multiple machine training support 20 | - [RepVGG](https://github.com/DingXiaoH/RepVGG) backbone 21 | - [Partial FC](projects/FastFace) 22 | 23 | #### Improvements 24 | 25 | - Torch2trt pipeline 26 | - Decouple linear transforms and softmax 27 | - config decorator 28 | 29 | ### v1.1 (29/01/2021) 30 | 31 | #### New Features 32 | 33 | - NAIC20(reid track) [1-st solution](projects/NAIC20) 34 | - Multi-teacher Knowledge Distillation 35 | - TRT network definition APIs in [FastRT](projects/FastRT) 36 | 37 | #### Bug Fixes 38 | 39 | #### Improvements -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Requirements 4 | 5 | - Linux or macOS with python ≥ 3.6 6 | - PyTorch ≥ 1.6 7 | - torchvision that matches the Pytorch installation. You can install them together at [pytorch.org](https://pytorch.org/) to make sure of this. 8 | - [yacs](https://github.com/rbgirshick/yacs) 9 | - Cython (optional to compile evaluation code) 10 | - tensorboard (needed for visualization): `pip install tensorboard` 11 | - gdown (for automatically downloading pre-train model) 12 | - sklearn 13 | - termcolor 14 | - tabulate 15 | - [faiss](https://github.com/facebookresearch/faiss) `pip install faiss-cpu` 16 | 17 | 18 | 19 | # Set up with Conda 20 | ```shell script 21 | conda create -n fastreid python=3.7 22 | conda activate fastreid 23 | conda install pytorch==1.6.0 torchvision tensorboard -c pytorch 24 | pip install -r docs/requirements.txt 25 | ``` 26 | 27 | # Set up with Dockder 28 | 29 | Please check the [docker folder](docker) 30 | -------------------------------------------------------------------------------- /configs/Base-AGW.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_NL: True 6 | 7 | HEADS: 8 | POOL_LAYER: GeneralizedMeanPooling 9 | 10 | LOSSES: 11 | NAME: ("CrossEntropyLoss", "TripletLoss") 12 | CE: 13 | EPSILON: 0.1 14 | SCALE: 1.0 15 | 16 | TRI: 17 | MARGIN: 0.0 18 | HARD_MINING: False 19 | SCALE: 1.0 20 | -------------------------------------------------------------------------------- /configs/Base-MGN.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-SBS.yml 2 | 3 | MODEL: 4 | META_ARCHITECTURE: MGN 5 | 6 | FREEZE_LAYERS: [backbone, b1, b2, b3,] 7 | 8 | BACKBONE: 9 | WITH_NL: False 10 | 11 | HEADS: 12 | EMBEDDING_DIM: 256 13 | -------------------------------------------------------------------------------- /configs/Base-SBS.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-bagtricks.yml 2 | 3 | MODEL: 4 | FREEZE_LAYERS: [ backbone ] 5 | 6 | BACKBONE: 7 | WITH_NL: True 8 | 9 | HEADS: 10 | NECK_FEAT: after 11 | POOL_LAYER: GeneralizedMeanPoolingP 12 | CLS_LAYER: CircleSoftmax 13 | SCALE: 64 14 | MARGIN: 0.35 15 | 16 | LOSSES: 17 | NAME: ("CrossEntropyLoss", "TripletLoss",) 18 | CE: 19 | EPSILON: 0.1 20 | SCALE: 1.0 21 | 22 | TRI: 23 | MARGIN: 0.0 24 | HARD_MINING: True 25 | NORM_FEAT: False 26 | SCALE: 1.0 27 | 28 | INPUT: 29 | SIZE_TRAIN: [ 384, 128 ] 30 | SIZE_TEST: [ 384, 128 ] 31 | 32 | AUTOAUG: 33 | ENABLED: True 34 | PROB: 0.1 35 | 36 | DATALOADER: 37 | NUM_INSTANCE: 16 38 | 39 | SOLVER: 40 | AMP: 41 | ENABLED: True 42 | OPT: Adam 43 | MAX_EPOCH: 60 44 | BASE_LR: 0.00035 45 | WEIGHT_DECAY: 0.0005 46 | IMS_PER_BATCH: 64 47 | 48 | SCHED: CosineAnnealingLR 49 | DELAY_EPOCHS: 30 50 | ETA_MIN_LR: 0.0000007 51 | 52 | WARMUP_FACTOR: 0.1 53 | WARMUP_ITERS: 2000 54 | 55 | FREEZE_ITERS: 1000 56 | 57 | CHECKPOINT_PERIOD: 20 58 | 59 | TEST: 60 | EVAL_PERIOD: 10 61 | IMS_PER_BATCH: 128 62 | 63 | CUDNN_BENCHMARK: True 64 | -------------------------------------------------------------------------------- /configs/Base-bagtricks.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: Baseline 3 | 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | NORM: BN 7 | DEPTH: 50x 8 | LAST_STRIDE: 1 9 | FEAT_DIM: 2048 10 | WITH_IBN: False 11 | PRETRAIN: True 12 | 13 | HEADS: 14 | NAME: EmbeddingHead 15 | NORM: BN 16 | WITH_BNNECK: True 17 | POOL_LAYER: GlobalAvgPool 18 | NECK_FEAT: before 19 | CLS_LAYER: Linear 20 | 21 | LOSSES: 22 | NAME: ("CrossEntropyLoss", "TripletLoss",) 23 | 24 | CE: 25 | EPSILON: 0.1 26 | SCALE: 1. 27 | 28 | TRI: 29 | MARGIN: 0.3 30 | HARD_MINING: True 31 | NORM_FEAT: False 32 | SCALE: 1. 33 | 34 | INPUT: 35 | SIZE_TRAIN: [ 256, 128 ] 36 | SIZE_TEST: [ 256, 128 ] 37 | 38 | REA: 39 | ENABLED: True 40 | PROB: 0.5 41 | 42 | FLIP: 43 | ENABLED: True 44 | 45 | PADDING: 46 | ENABLED: True 47 | 48 | DATALOADER: 49 | SAMPLER_TRAIN: NaiveIdentitySampler 50 | NUM_INSTANCE: 4 51 | NUM_WORKERS: 8 52 | 53 | SOLVER: 54 | AMP: 55 | ENABLED: True 56 | OPT: Adam 57 | MAX_EPOCH: 120 58 | BASE_LR: 0.00035 59 | WEIGHT_DECAY: 0.0005 60 | WEIGHT_DECAY_NORM: 0.0005 61 | IMS_PER_BATCH: 64 62 | 63 | SCHED: MultiStepLR 64 | STEPS: [ 40, 90 ] 65 | GAMMA: 0.1 66 | 67 | WARMUP_FACTOR: 0.1 68 | WARMUP_ITERS: 2000 69 | 70 | CHECKPOINT_PERIOD: 30 71 | 72 | TEST: 73 | EVAL_PERIOD: 30 74 | IMS_PER_BATCH: 128 75 | 76 | CUDNN_BENCHMARK: True 77 | -------------------------------------------------------------------------------- /configs/DukeMTMC/AGW_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("DukeMTMC",) 10 | TESTS: ("DukeMTMC",) 11 | 12 | OUTPUT_DIR: logs/dukemtmc/agw_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/DukeMTMC/AGW_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/agw_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/AGW_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | DATASETS: 4 | NAMES: ("DukeMTMC",) 5 | TESTS: ("DukeMTMC",) 6 | 7 | OUTPUT_DIR: logs/dukemtmc/agw_R50 8 | -------------------------------------------------------------------------------- /configs/DukeMTMC/AGW_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/agw_S50 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/bagtricks_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("DukeMTMC",) 10 | TESTS: ("DukeMTMC",) 11 | 12 | OUTPUT_DIR: logs/dukemtmc/bagtricks_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/DukeMTMC/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/bagtricks_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | DATASETS: 4 | NAMES: ("DukeMTMC",) 5 | TESTS: ("DukeMTMC",) 6 | 7 | OUTPUT_DIR: logs/dukemtmc/bagtricks_R50 8 | -------------------------------------------------------------------------------- /configs/DukeMTMC/bagtricks_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/bagtricks_S50 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/mgn_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-MGN.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/mgn_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/sbs_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("DukeMTMC",) 10 | TESTS: ("DukeMTMC",) 11 | 12 | OUTPUT_DIR: logs/dukemtmc/sbs_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/DukeMTMC/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/sbs_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/DukeMTMC/sbs_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | DATASETS: 4 | NAMES: ("DukeMTMC",) 5 | TESTS: ("DukeMTMC",) 6 | 7 | OUTPUT_DIR: logs/dukemtmc/sbs_R50 8 | -------------------------------------------------------------------------------- /configs/DukeMTMC/sbs_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("DukeMTMC",) 9 | TESTS: ("DukeMTMC",) 10 | 11 | OUTPUT_DIR: logs/dukemtmc/sbs_S50 12 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: logs/msmt17/agw_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/agw_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: logs/msmt17/agw_R50 8 | -------------------------------------------------------------------------------- /configs/MSMT17/AGW_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/agw_S50 12 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: logs/msmt17/bagtricks_R101-ibn 13 | 14 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/bagtricks_R50-ibn 12 | 13 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: logs/msmt17/bagtricks_R50 8 | -------------------------------------------------------------------------------- /configs/MSMT17/bagtricks_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/bagtricks_S50 12 | 13 | -------------------------------------------------------------------------------- /configs/MSMT17/mgn_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-MGN.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/mgn_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("MSMT17",) 10 | TESTS: ("MSMT17",) 11 | 12 | OUTPUT_DIR: logs/msmt17/sbs_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/sbs_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | DATASETS: 4 | NAMES: ("MSMT17",) 5 | TESTS: ("MSMT17",) 6 | 7 | OUTPUT_DIR: logs/msmt17/sbs_R50 8 | -------------------------------------------------------------------------------- /configs/MSMT17/sbs_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("MSMT17",) 9 | TESTS: ("MSMT17",) 10 | 11 | OUTPUT_DIR: logs/msmt17/sbs_S50 12 | -------------------------------------------------------------------------------- /configs/Market1501/AGW_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("Market1501",) 10 | TESTS: ("Market1501",) 11 | 12 | OUTPUT_DIR: logs/market1501/agw_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/Market1501/AGW_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/agw_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/Market1501/AGW_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | DATASETS: 4 | NAMES: ("Market1501",) 5 | TESTS: ("Market1501",) 6 | 7 | OUTPUT_DIR: logs/market1501/agw_R50 8 | -------------------------------------------------------------------------------- /configs/Market1501/AGW_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-AGW.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/agw_S50 12 | -------------------------------------------------------------------------------- /configs/Market1501/bagtricks_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("Market1501",) 10 | TESTS: ("Market1501",) 11 | 12 | OUTPUT_DIR: logs/market1501/bagtricks_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/Market1501/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/bagtricks_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/Market1501/bagtricks_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | DATASETS: 4 | NAMES: ("Market1501",) 5 | TESTS: ("Market1501",) 6 | 7 | OUTPUT_DIR: logs/market1501/bagtricks_R50 8 | -------------------------------------------------------------------------------- /configs/Market1501/bagtricks_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/bagtricks_S50 12 | -------------------------------------------------------------------------------- /configs/Market1501/bagtricks_vit.yml: -------------------------------------------------------------------------------- 1 | 2 | MODEL: 3 | META_ARCHITECTURE: Baseline 4 | PIXEL_MEAN: [127.5, 127.5, 127.5] 5 | PIXEL_STD: [127.5, 127.5, 127.5] 6 | 7 | BACKBONE: 8 | NAME: build_vit_backbone 9 | DEPTH: base 10 | FEAT_DIM: 768 11 | PRETRAIN: True 12 | PRETRAIN_PATH: /export/home/lxy/.cache/torch/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth 13 | STRIDE_SIZE: (16, 16) 14 | DROP_PATH_RATIO: 0.1 15 | DROP_RATIO: 0.0 16 | ATT_DROP_RATE: 0.0 17 | 18 | HEADS: 19 | NAME: EmbeddingHead 20 | NORM: BN 21 | WITH_BNNECK: True 22 | POOL_LAYER: Identity 23 | NECK_FEAT: before 24 | CLS_LAYER: Linear 25 | 26 | LOSSES: 27 | NAME: ("CrossEntropyLoss", "TripletLoss",) 28 | 29 | CE: 30 | EPSILON: 0. # no smooth 31 | SCALE: 1. 32 | 33 | TRI: 34 | MARGIN: 0.0 35 | HARD_MINING: True 36 | NORM_FEAT: False 37 | SCALE: 1. 38 | 39 | INPUT: 40 | SIZE_TRAIN: [ 256, 128 ] 41 | SIZE_TEST: [ 256, 128 ] 42 | 43 | REA: 44 | ENABLED: True 45 | PROB: 0.5 46 | 47 | FLIP: 48 | ENABLED: True 49 | 50 | PADDING: 51 | ENABLED: True 52 | 53 | DATALOADER: 54 | SAMPLER_TRAIN: NaiveIdentitySampler 55 | NUM_INSTANCE: 4 56 | NUM_WORKERS: 8 57 | 58 | SOLVER: 59 | AMP: 60 | ENABLED: False 61 | OPT: SGD 62 | MAX_EPOCH: 120 63 | BASE_LR: 0.008 64 | WEIGHT_DECAY: 0.0001 65 | IMS_PER_BATCH: 64 66 | 67 | SCHED: CosineAnnealingLR 68 | ETA_MIN_LR: 0.000016 69 | 70 | WARMUP_FACTOR: 0.01 71 | WARMUP_ITERS: 1000 72 | 73 | CLIP_GRADIENTS: 74 | ENABLED: True 75 | 76 | CHECKPOINT_PERIOD: 30 77 | 78 | TEST: 79 | EVAL_PERIOD: 5 80 | IMS_PER_BATCH: 128 81 | 82 | CUDNN_BENCHMARK: True 83 | 84 | DATASETS: 85 | NAMES: ("Market1501",) 86 | TESTS: ("Market1501",) 87 | 88 | OUTPUT_DIR: logs/market1501/sbs_vit_base 89 | -------------------------------------------------------------------------------- /configs/Market1501/mgn_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-MGN.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/mgn_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/Market1501/sbs_R101-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 101x 6 | WITH_IBN: True 7 | 8 | DATASETS: 9 | NAMES: ("Market1501",) 10 | TESTS: ("Market1501",) 11 | 12 | OUTPUT_DIR: logs/market1501/sbs_R101-ibn 13 | -------------------------------------------------------------------------------- /configs/Market1501/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/sbs_R50-ibn 12 | -------------------------------------------------------------------------------- /configs/Market1501/sbs_R50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | DATASETS: 4 | NAMES: ("Market1501",) 5 | TESTS: ("Market1501",) 6 | 7 | OUTPUT_DIR: logs/market1501/sbs_R50 8 | -------------------------------------------------------------------------------- /configs/Market1501/sbs_S50.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | 7 | DATASETS: 8 | NAMES: ("Market1501",) 9 | TESTS: ("Market1501",) 10 | 11 | OUTPUT_DIR: logs/market1501/sbs_S50 12 | -------------------------------------------------------------------------------- /configs/VERIWild/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | 11 | HEADS: 12 | POOL_LAYER: GeneralizedMeanPooling 13 | 14 | LOSSES: 15 | TRI: 16 | HARD_MINING: False 17 | MARGIN: 0.0 18 | 19 | DATASETS: 20 | NAMES: ("VeRiWild",) 21 | TESTS: ("SmallVeRiWild", "MediumVeRiWild", "LargeVeRiWild",) 22 | 23 | SOLVER: 24 | IMS_PER_BATCH: 512 # 512 For 4 GPUs 25 | MAX_EPOCH: 120 26 | STEPS: [30, 70, 90] 27 | WARMUP_ITERS: 5000 28 | 29 | CHECKPOINT_PERIOD: 20 30 | 31 | TEST: 32 | EVAL_PERIOD: 10 33 | IMS_PER_BATCH: 128 34 | 35 | OUTPUT_DIR: logs/veriwild/bagtricks_R50-ibn_4gpu 36 | -------------------------------------------------------------------------------- /configs/VeRi/sbs_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-SBS.yml 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | WITH_NL: True 11 | 12 | SOLVER: 13 | OPT: SGD 14 | BASE_LR: 0.01 15 | ETA_MIN_LR: 7.7e-5 16 | 17 | IMS_PER_BATCH: 64 18 | MAX_EPOCH: 60 19 | WARMUP_ITERS: 3000 20 | FREEZE_ITERS: 3000 21 | 22 | CHECKPOINT_PERIOD: 10 23 | 24 | DATASETS: 25 | NAMES: ("VeRi",) 26 | TESTS: ("VeRi",) 27 | 28 | DATALOADER: 29 | SAMPLER_TRAIN: BalancedIdentitySampler 30 | 31 | TEST: 32 | EVAL_PERIOD: 10 33 | IMS_PER_BATCH: 256 34 | 35 | OUTPUT_DIR: logs/veri/sbs_R50-ibn 36 | -------------------------------------------------------------------------------- /configs/VehicleID/bagtricks_R50-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../Base-bagtricks.yml 2 | 3 | INPUT: 4 | SIZE_TRAIN: [256, 256] 5 | SIZE_TEST: [256, 256] 6 | 7 | MODEL: 8 | BACKBONE: 9 | WITH_IBN: True 10 | HEADS: 11 | POOL_LAYER: GeneralizedMeanPooling 12 | 13 | LOSSES: 14 | TRI: 15 | HARD_MINING: False 16 | MARGIN: 0.0 17 | 18 | DATASETS: 19 | NAMES: ("VehicleID",) 20 | TESTS: ("SmallVehicleID", "MediumVehicleID", "LargeVehicleID",) 21 | 22 | SOLVER: 23 | BIAS_LR_FACTOR: 1. 24 | 25 | IMS_PER_BATCH: 512 26 | MAX_EPOCH: 60 27 | STEPS: [30, 50] 28 | WARMUP_ITERS: 2000 29 | 30 | CHECKPOINT_PERIOD: 20 31 | 32 | TEST: 33 | EVAL_PERIOD: 20 34 | IMS_PER_BATCH: 128 35 | 36 | OUTPUT_DIR: logs/vehicleid/bagtricks_R50-ibn_4gpu 37 | -------------------------------------------------------------------------------- /datasets/README.md: -------------------------------------------------------------------------------- 1 | # Setup Buildin Dataset 2 | 3 | Fastreid has buildin support for a few datasets. The datasets are assumed to exist in a directory specified by the environment variable `FASTREID_DATASETS`. Under this directory, fastreid expects to find datasets in the structure described below. 4 | 5 | You can set the location for builtin datasets by `export FASTREID_DATASETS=/path/to/datasets/`. If left unset, the default is `datasets/` relative to your current working directory. 6 | 7 | The [model zoo](https://github.com/JDAI-CV/fast-reid/blob/master/MODEL_ZOO.md) contains configs and models that use these buildin datasets. 8 | 9 | ## Expected dataset structure for [Market1501](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Scalable_Person_Re-Identification_ICCV_2015_paper.pdf) 10 | 11 | 1. Download dataset to `datasets/` from [baidu pan](https://pan.baidu.com/s/1ntIi2Op) or [google driver](https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view) 12 | 2. Extract dataset. The dataset structure would like: 13 | 14 | ```bash 15 | datasets/ 16 | Market-1501-v15.09.15/ 17 | bounding_box_test/ 18 | bounding_box_train/ 19 | ``` 20 | 21 | ## Expected dataset structure for [DukeMTMC-reID](https://openaccess.thecvf.com/content_ICCV_2017/papers/Zheng_Unlabeled_Samples_Generated_ICCV_2017_paper.pdf) 22 | 23 | 1. Download datasets to `datasets/` 24 | 2. Extract dataset. The dataset structure would like: 25 | 26 | ```bash 27 | datasets/ 28 | DukeMTMC-reID/ 29 | bounding_box_train/ 30 | bounding_box_test/ 31 | ``` 32 | 33 | ## Expected dataset structure for [MSMT17](https://arxiv.org/abs/1711.08565) 34 | 35 | 1. Download datasets to `datasets/` 36 | 2. Extract dataset. The dataset structure would like: 37 | 38 | ```bash 39 | datasets/ 40 | MSMT17_V2/ 41 | mask_train_v2/ 42 | mask_test_v2/ 43 | ``` 44 | -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | # FastReID Demo 2 | 3 | We provide a command line tool to run a simple demo of builtin models. 4 | 5 | You can run this command to get cosine similarites between different images 6 | 7 | ```bash 8 | python demo/visualize_result.py --config-file logs/dukemtmc/mgn_R50-ibn/config.yaml \ 9 | --parallel --vis-label --dataset-name DukeMTMC --output logs/mgn_duke_vis \ 10 | --opts MODEL.WEIGHTS logs/dukemtmc/mgn_R50-ibn/model_final.pth 11 | ``` 12 | -------------------------------------------------------------------------------- /demo/plot_roc_with_pickle.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import matplotlib.pyplot as plt 8 | import sys 9 | 10 | sys.path.append('.') 11 | from fastreid.utils.visualizer import Visualizer 12 | 13 | if __name__ == "__main__": 14 | baseline_res = Visualizer.load_roc_info("logs/duke_vis/roc_info.pickle") 15 | mgn_res = Visualizer.load_roc_info("logs/mgn_duke_vis/roc_info.pickle") 16 | 17 | fig = Visualizer.plot_roc_curve(baseline_res['fpr'], baseline_res['tpr'], name='baseline') 18 | Visualizer.plot_roc_curve(mgn_res['fpr'], mgn_res['tpr'], name='mgn', fig=fig) 19 | plt.savefig('roc.jpg') 20 | 21 | fig = Visualizer.plot_distribution(baseline_res['pos'], baseline_res['neg'], name='baseline') 22 | Visualizer.plot_distribution(mgn_res['pos'], mgn_res['neg'], name='mgn', fig=fig) 23 | plt.savefig('dist.jpg') 24 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.1-cudnn7-devel 2 | 3 | # https://github.com/NVIDIA/nvidia-docker/issues/1632 4 | RUN rm /etc/apt/sources.list.d/cuda.list 5 | RUN rm /etc/apt/sources.list.d/nvidia-ml.list 6 | ENV DEBIAN_FRONTEND noninteractive 7 | RUN apt-get update && apt-get install -y \ 8 | python3-opencv ca-certificates python3-dev git wget sudo ninja-build 9 | RUN ln -sv /usr/bin/python3 /usr/bin/python 10 | 11 | # create a non-root user 12 | ARG USER_ID=1000 13 | RUN useradd -m --no-log-init --system --uid ${USER_ID} appuser -g sudo 14 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 15 | USER appuser 16 | WORKDIR /home/appuser 17 | 18 | # https://github.com/facebookresearch/detectron2/issues/3933 19 | ENV PATH="/home/appuser/.local/bin:${PATH}" 20 | RUN wget https://bootstrap.pypa.io/pip/3.6/get-pip.py && \ 21 | python3 get-pip.py --user && \ 22 | rm get-pip.py 23 | 24 | # install dependencies 25 | # See https://pytorch.org/ for other options if you use a different version of CUDA 26 | RUN pip install --user tensorboard cmake # cmake from apt-get is too old 27 | RUN pip install --user torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/cu101/torch_stable.html 28 | RUN pip install --user -i https://pypi.tuna.tsinghua.edu.cn/simple tensorboard opencv-python cython yacs termcolor scikit-learn tabulate gdown gpustat faiss-gpu ipdb h5py 29 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # Use the container 2 | 3 | ```shell script 4 | cd docker/ 5 | # Build: 6 | docker build -t=fastreid:v0 . 7 | # Launch (requires GPUs) 8 | nvidia-docker run -v server_path:docker_path --name=fastreid --net=host --ipc=host -it fastreid:v0 /bin/sh 9 | ``` 10 | 11 | ## Install new dependencies 12 | 13 | Add the following to `Dockerfile` to make persist changes. 14 | ```shell script 15 | RUN sudo apt-get update && sudo apt-get install -y vim 16 | ``` 17 | 18 | Or run them in the container to make temporary changes. 19 | 20 | ## A more complete docker container 21 | 22 | If you want to use a complete docker container which contains many useful tools, you can check my development environment [Dockerfile](https://github.com/L1aoXingyu/fastreid_docker) -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Read the docs: 2 | 3 | The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). 4 | Documents in this directory are not meant to be read on github. 5 | 6 | # Build the docs: 7 | 8 | 1. Install detectron2 according to [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). 9 | 2. Install additional libraries required to build docs: 10 | - docutils==0.16 11 | - Sphinx==3.0.0 12 | - recommonmark==0.6.0 13 | - sphinx_rtd_theme 14 | - mock 15 | 16 | 3. Run `make html` from this directory. 17 | -------------------------------------------------------------------------------- /docs/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * some extra css to make markdown look similar between github/sphinx 4 | */ 5 | 6 | /* 7 | * Below is for install.md: 8 | */ 9 | .rst-content code { 10 | white-space: pre; 11 | border: 0px; 12 | } 13 | 14 | .rst-content th { 15 | border: 1px solid #e1e4e5; 16 | } 17 | 18 | .rst-content th p { 19 | /* otherwise will be default 24px for regular paragraph */ 20 | margin-bottom: 0px; 21 | } 22 | 23 | div.section > details { 24 | padding-bottom: 1em; 25 | } 26 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. fastreid documentation master file, created by 2 | sphinx-quickstart on Sat Sep 21 13:46:45 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to fastreid's documentation! 7 | ====================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | tutorials/index 13 | notes/index 14 | modules/index 15 | -------------------------------------------------------------------------------- /docs/modules/checkpoint.rst: -------------------------------------------------------------------------------- 1 | fastreid.checkpoint 2 | ============================= 3 | 4 | .. automodule:: fastreid.utils.checkpoint 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/config.rst: -------------------------------------------------------------------------------- 1 | fastreid.config 2 | ========================= 3 | 4 | Related tutorials: :doc:`../tutorials/configs`, :doc:`../tutorials/extend`. 5 | 6 | .. automodule:: fastreid.config 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :inherited-members: 11 | 12 | 13 | Config References 14 | ----------------- 15 | 16 | .. literalinclude:: ../../fastreid/config/defaults.py 17 | :language: python 18 | :linenos: 19 | :lines: 4- 20 | -------------------------------------------------------------------------------- /docs/modules/data_transforms.rst: -------------------------------------------------------------------------------- 1 | fastreid.data.transforms 2 | ==================================== 3 | 4 | 5 | .. automodule:: fastreid.data.transforms 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | :imported-members: 10 | -------------------------------------------------------------------------------- /docs/modules/engine.rst: -------------------------------------------------------------------------------- 1 | fastreid.engine 2 | ========================= 3 | 4 | .. automodule:: fastreid.engine 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | 10 | fastreid.engine.defaults module 11 | --------------------------------- 12 | 13 | .. automodule:: fastreid.engine.defaults 14 | :members: 15 | :undoc-members: 16 | :show-inheritance: 17 | 18 | fastreid.engine.hooks module 19 | --------------------------------- 20 | 21 | .. automodule:: fastreid.engine.hooks 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | -------------------------------------------------------------------------------- /docs/modules/evaluation.rst: -------------------------------------------------------------------------------- 1 | fastreid.evaluation 2 | ============================= 3 | 4 | .. automodule:: fastreid.evaluation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/index.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================== 3 | 4 | .. toctree:: 5 | 6 | checkpoint 7 | config 8 | data 9 | data_transforms 10 | engine 11 | evaluation 12 | layers 13 | model_zoo 14 | modeling 15 | solver 16 | utils 17 | export 18 | -------------------------------------------------------------------------------- /docs/modules/layers.rst: -------------------------------------------------------------------------------- 1 | fastreid.layers 2 | ========================= 3 | 4 | .. automodule:: fastreid.layers 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/modeling.rst: -------------------------------------------------------------------------------- 1 | fastreid.modeling 2 | =========================== 3 | 4 | .. automodule:: fastreid.modeling 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | Model Registries 10 | ----------------- 11 | 12 | These are different registries provided in modeling. 13 | Each registry provide you the ability to replace it with your customized component, 14 | without having to modify fastreid's code. 15 | 16 | Note that it is impossible to allow users to customize any line of code directly. 17 | Even just to add one line at some place, 18 | you'll likely need to find out the smallest registry which contains that line, 19 | and register your component to that registry. 20 | 21 | 22 | .. autodata:: fastreid.modeling.BACKBONE_REGISTRY 23 | .. autodata:: fastreid.modeling.META_ARCH_REGISTRY 24 | .. autodata:: fastreid.modeling.REID_HEADS_REGISTRY 25 | -------------------------------------------------------------------------------- /docs/modules/solver.rst: -------------------------------------------------------------------------------- 1 | fastreid.solver 2 | ========================= 3 | 4 | .. automodule:: fastreid.solver 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/utils.rst: -------------------------------------------------------------------------------- 1 | fastreid.utils 2 | ======================== 3 | 4 | fastreid.utils.colormap module 5 | -------------------------------- 6 | 7 | .. automodule:: fastreid.utils.colormap 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | 12 | fastreid.utils.comm module 13 | ---------------------------- 14 | 15 | .. automodule:: fastreid.utils.comm 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | 20 | 21 | fastreid.utils.events module 22 | ------------------------------ 23 | 24 | .. automodule:: fastreid.utils.events 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | 29 | 30 | fastreid.utils.logger module 31 | ------------------------------ 32 | 33 | .. automodule:: fastreid.utils.logger 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | 38 | 39 | fastreid.utils.registry module 40 | -------------------------------- 41 | 42 | .. automodule:: fastreid.utils.registry 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | fastreid.utils.memory module 48 | ---------------------------------- 49 | 50 | .. automodule:: fastreid.utils.memory 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | 56 | fastreid.utils.analysis module 57 | ---------------------------------- 58 | 59 | .. automodule:: fastreid.utils.analysis 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | 65 | fastreid.utils.visualizer module 66 | ---------------------------------- 67 | 68 | .. automodule:: fastreid.utils.visualizer 69 | :members: 70 | :undoc-members: 71 | :show-inheritance: 72 | 73 | fastreid.utils.video\_visualizer module 74 | ----------------------------------------- 75 | 76 | .. automodule:: fastreid.utils.video_visualizer 77 | :members: 78 | :undoc-members: 79 | :show-inheritance: 80 | 81 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | scipy 3 | Pillow 4 | numpy 5 | prettytable 6 | easydict 7 | scikit-learn 8 | pyyaml 9 | yacs 10 | termcolor 11 | tabulate 12 | tensorboard 13 | opencv-python 14 | pyyaml 15 | yacs 16 | termcolor 17 | scikit-learn 18 | tabulate 19 | gdown 20 | faiss-gpu -------------------------------------------------------------------------------- /fastreid/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | __version__ = "1.3" 9 | -------------------------------------------------------------------------------- /fastreid/config/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable 8 | 9 | __all__ = [ 10 | 'CfgNode', 11 | 'get_cfg', 12 | 'global_cfg', 13 | 'set_global_cfg', 14 | 'configurable' 15 | ] 16 | -------------------------------------------------------------------------------- /fastreid/data/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from . import transforms # isort:skip 8 | from .build import ( 9 | build_reid_train_loader, 10 | build_reid_test_loader 11 | ) 12 | from .common import CommDataset 13 | 14 | # ensure the builtin datasets are registered 15 | from . import datasets, samplers # isort:skip 16 | 17 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 18 | -------------------------------------------------------------------------------- /fastreid/data/common.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from torch.utils.data import Dataset 8 | 9 | from .data_utils import read_image 10 | 11 | 12 | class CommDataset(Dataset): 13 | """Image Person ReID Dataset""" 14 | 15 | def __init__(self, img_items, transform=None, relabel=True): 16 | self.img_items = img_items 17 | self.transform = transform 18 | self.relabel = relabel 19 | 20 | pid_set = set() 21 | cam_set = set() 22 | for i in img_items: 23 | pid_set.add(i[1]) 24 | cam_set.add(i[2]) 25 | 26 | self.pids = sorted(list(pid_set)) 27 | self.cams = sorted(list(cam_set)) 28 | if relabel: 29 | self.pid_dict = dict([(p, i) for i, p in enumerate(self.pids)]) 30 | self.cam_dict = dict([(p, i) for i, p in enumerate(self.cams)]) 31 | 32 | def __len__(self): 33 | return len(self.img_items) 34 | 35 | def __getitem__(self, index): 36 | img_item = self.img_items[index] 37 | img_path = img_item[0] 38 | pid = img_item[1] 39 | camid = img_item[2] 40 | img = read_image(img_path) 41 | if self.transform is not None: img = self.transform(img) 42 | if self.relabel: 43 | pid = self.pid_dict[pid] 44 | camid = self.cam_dict[camid] 45 | return { 46 | "images": img, 47 | "targets": pid, 48 | "camids": camid, 49 | "img_paths": img_path, 50 | } 51 | 52 | @property 53 | def num_classes(self): 54 | return len(self.pids) 55 | 56 | @property 57 | def num_cameras(self): 58 | return len(self.cams) 59 | -------------------------------------------------------------------------------- /fastreid/data/datasets/AirportALERT.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | 9 | from fastreid.data.datasets import DATASET_REGISTRY 10 | from fastreid.data.datasets.bases import ImageDataset 11 | 12 | __all__ = ['AirportALERT', ] 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class AirportALERT(ImageDataset): 17 | """Airport 18 | 19 | """ 20 | dataset_dir = "AirportALERT" 21 | dataset_name = "airport" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | self.train_file = os.path.join(self.root, self.dataset_dir, 'filepath.txt') 27 | 28 | required_files = [self.train_file, self.train_path] 29 | self.check_before_run(required_files) 30 | 31 | train = self.process_train(self.train_path, self.train_file) 32 | 33 | super().__init__(train, [], [], **kwargs) 34 | 35 | def process_train(self, dir_path, train_file): 36 | data = [] 37 | with open(train_file, "r") as f: 38 | img_paths = [line.strip('\n') for line in f.readlines()] 39 | 40 | for path in img_paths: 41 | split_path = path.split('\\') 42 | img_path = '/'.join(split_path) 43 | camid = self.dataset_name + "_" + split_path[0] 44 | pid = self.dataset_name + "_" + split_path[1] 45 | img_path = os.path.join(dir_path, img_path) 46 | # if 11001 <= int(split_path[1]) <= 401999: 47 | if 11001 <= int(split_path[1]): 48 | data.append([img_path, pid, camid]) 49 | 50 | return data 51 | -------------------------------------------------------------------------------- /fastreid/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | DATASET_REGISTRY = Registry("DATASET") 10 | DATASET_REGISTRY.__doc__ = """ 11 | Registry for datasets 12 | It must returns an instance of :class:`Backbone`. 13 | """ 14 | 15 | # Person re-id datasets 16 | from .cuhk03 import CUHK03 17 | from .dukemtmcreid import DukeMTMC 18 | from .market1501 import Market1501 19 | from .msmt17 import MSMT17 20 | from .AirportALERT import AirportALERT 21 | from .iLIDS import iLIDS 22 | from .pku import PKU 23 | from .prai import PRAI 24 | from .prid import PRID 25 | from .grid import GRID 26 | from .saivt import SAIVT 27 | from .sensereid import SenseReID 28 | from .sysu_mm import SYSU_mm 29 | from .thermalworld import Thermalworld 30 | from .pes3d import PeS3D 31 | from .caviara import CAVIARa 32 | from .viper import VIPeR 33 | from .lpw import LPW 34 | from .shinpuhkan import Shinpuhkan 35 | from .wildtracker import WildTrackCrop 36 | from .cuhk_sysu import cuhkSYSU 37 | 38 | # Vehicle re-id datasets 39 | from .veri import VeRi 40 | from .vehicleid import VehicleID, SmallVehicleID, MediumVehicleID, LargeVehicleID 41 | from .veriwild import VeRiWild, SmallVeRiWild, MediumVeRiWild, LargeVeRiWild 42 | 43 | __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] 44 | -------------------------------------------------------------------------------- /fastreid/data/datasets/caviara.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['CAVIARa', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class CAVIARa(ImageDataset): 18 | """CAVIARa 19 | """ 20 | dataset_dir = "CAVIARa" 21 | dataset_name = "caviara" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | 37 | img_list = glob(os.path.join(train_path, "*.jpg")) 38 | for img_path in img_list: 39 | img_name = img_path.split('/')[-1] 40 | pid = self.dataset_name + "_" + img_name[:4] 41 | camid = self.dataset_name + "_cam0" 42 | data.append([img_path, pid, camid]) 43 | 44 | return data 45 | -------------------------------------------------------------------------------- /fastreid/data/datasets/cuhk_sysu.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import os.path as osp 9 | import re 10 | import warnings 11 | 12 | from .bases import ImageDataset 13 | from ..datasets import DATASET_REGISTRY 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class cuhkSYSU(ImageDataset): 18 | """CUHK SYSU datasets. 19 | 20 | The dataset is collected from two sources: street snap and movie. 21 | In street snap, 12,490 images and 6,057 query persons were collected 22 | with movable cameras across hundreds of scenes while 5,694 images and 23 | 2,375 query persons were selected from movies and TV dramas. 24 | 25 | Dataset statistics: 26 | - identities: xxx. 27 | - images: 12936 (train). 28 | """ 29 | dataset_dir = 'cuhk_sysu' 30 | dataset_name = "cuhksysu" 31 | 32 | def __init__(self, root='datasets', **kwargs): 33 | self.root = root 34 | self.dataset_dir = osp.join(self.root, self.dataset_dir) 35 | 36 | self.data_dir = osp.join(self.dataset_dir, "cropped_images") 37 | 38 | required_files = [self.data_dir] 39 | self.check_before_run(required_files) 40 | 41 | train = self.process_dir(self.data_dir) 42 | query = [] 43 | gallery = [] 44 | 45 | super(cuhkSYSU, self).__init__(train, query, gallery, **kwargs) 46 | 47 | def process_dir(self, dir_path): 48 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 49 | pattern = re.compile(r'p([-\d]+)_s(\d)') 50 | 51 | data = [] 52 | for img_path in img_paths: 53 | pid, _ = map(int, pattern.search(img_path).groups()) 54 | pid = self.dataset_name + "_" + str(pid) 55 | camid = self.dataset_name + "_0" 56 | data.append((img_path, pid, camid)) 57 | 58 | return data 59 | -------------------------------------------------------------------------------- /fastreid/data/datasets/grid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['GRID', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class GRID(ImageDataset): 18 | """GRID 19 | """ 20 | dataset_dir = "underground_reid" 21 | dataset_name = 'grid' 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir, 'images') 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | img_paths = glob(os.path.join(train_path, "*.jpeg")) 37 | 38 | for img_path in img_paths: 39 | img_name = os.path.basename(img_path) 40 | img_info = img_name.split('_') 41 | pid = self.dataset_name + "_" + img_info[0] 42 | camid = self.dataset_name + "_" + img_info[1] 43 | data.append([img_path, pid, camid]) 44 | return data 45 | -------------------------------------------------------------------------------- /fastreid/data/datasets/iLIDS.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['iLIDS', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class iLIDS(ImageDataset): 18 | """iLIDS 19 | """ 20 | dataset_dir = "iLIDS" 21 | dataset_name = "ilids" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | file_path = os.listdir(train_path) 37 | for pid_dir in file_path: 38 | img_file = os.path.join(train_path, pid_dir) 39 | img_paths = glob(os.path.join(img_file, "*.png")) 40 | for img_path in img_paths: 41 | split_path = img_path.split('/') 42 | pid = self.dataset_name + "_" + split_path[-2] 43 | camid = self.dataset_name + "_" + split_path[-1].split('_')[0] 44 | data.append([img_path, pid, camid]) 45 | return data 46 | -------------------------------------------------------------------------------- /fastreid/data/datasets/lpw.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['LPW', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class LPW(ImageDataset): 18 | """LPW 19 | """ 20 | dataset_dir = "pep_256x128/data_slim" 21 | dataset_name = "lpw" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | 37 | file_path_list = ['scen1', 'scen2', 'scen3'] 38 | 39 | for scene in file_path_list: 40 | cam_list = os.listdir(os.path.join(train_path, scene)) 41 | for cam in cam_list: 42 | camid = self.dataset_name + "_" + cam 43 | pid_list = os.listdir(os.path.join(train_path, scene, cam)) 44 | for pid_dir in pid_list: 45 | img_paths = glob(os.path.join(train_path, scene, cam, pid_dir, "*.jpg")) 46 | for img_path in img_paths: 47 | pid = self.dataset_name + "_" + scene + "-" + pid_dir 48 | data.append([img_path, pid, camid]) 49 | return data 50 | -------------------------------------------------------------------------------- /fastreid/data/datasets/pes3d.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['PeS3D',] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class PeS3D(ImageDataset): 18 | """3Dpes 19 | """ 20 | dataset_dir = "3DPeS" 21 | dataset_name = "pes3d" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | 37 | pid_list = os.listdir(train_path) 38 | for pid_dir in pid_list: 39 | pid = self.dataset_name + "_" + pid_dir 40 | img_list = glob(os.path.join(train_path, pid_dir, "*.bmp")) 41 | for img_path in img_list: 42 | camid = self.dataset_name + "_cam0" 43 | data.append([img_path, pid, camid]) 44 | return data 45 | -------------------------------------------------------------------------------- /fastreid/data/datasets/pku.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['PKU', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class PKU(ImageDataset): 18 | """PKU 19 | """ 20 | dataset_dir = "PKUv1a_128x48" 21 | dataset_name = 'pku' 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | img_paths = glob(os.path.join(train_path, "*.png")) 37 | 38 | for img_path in img_paths: 39 | split_path = img_path.split('/') 40 | img_info = split_path[-1].split('_') 41 | pid = self.dataset_name + "_" + img_info[0] 42 | camid = self.dataset_name + "_" + img_info[1] 43 | data.append([img_path, pid, camid]) 44 | return data 45 | -------------------------------------------------------------------------------- /fastreid/data/datasets/prai.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['PRAI', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class PRAI(ImageDataset): 18 | """PRAI 19 | """ 20 | dataset_dir = "PRAI-1581" 21 | dataset_name = 'prai' 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir, 'images') 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | img_paths = glob(os.path.join(train_path, "*.jpg")) 37 | for img_path in img_paths: 38 | split_path = img_path.split('/') 39 | img_info = split_path[-1].split('_') 40 | pid = self.dataset_name + "_" + img_info[0] 41 | camid = self.dataset_name + "_" + img_info[1] 42 | data.append([img_path, pid, camid]) 43 | return data 44 | -------------------------------------------------------------------------------- /fastreid/data/datasets/prid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | 9 | from fastreid.data.datasets import DATASET_REGISTRY 10 | from fastreid.data.datasets.bases import ImageDataset 11 | 12 | __all__ = ['PRID', ] 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class PRID(ImageDataset): 17 | """PRID 18 | """ 19 | dataset_dir = "prid_2011" 20 | dataset_name = 'prid' 21 | 22 | def __init__(self, root='datasets', **kwargs): 23 | self.root = root 24 | self.train_path = os.path.join(self.root, self.dataset_dir, 'slim_train') 25 | 26 | required_files = [self.train_path] 27 | self.check_before_run(required_files) 28 | 29 | train = self.process_train(self.train_path) 30 | 31 | super().__init__(train, [], [], **kwargs) 32 | 33 | def process_train(self, train_path): 34 | data = [] 35 | for root, dirs, files in os.walk(train_path): 36 | for img_name in filter(lambda x: x.endswith('.png'), files): 37 | img_path = os.path.join(root, img_name) 38 | pid = self.dataset_name + '_' + root.split('/')[-1].split('_')[1] 39 | camid = self.dataset_name + '_' + img_name.split('_')[0] 40 | data.append([img_path, pid, camid]) 41 | return data 42 | -------------------------------------------------------------------------------- /fastreid/data/datasets/saivt.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['SAIVT', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class SAIVT(ImageDataset): 18 | """SAIVT 19 | """ 20 | dataset_dir = "SAIVT-SoftBio" 21 | dataset_name = "saivt" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | 37 | pid_path = os.path.join(train_path, "cropped_images") 38 | pid_list = os.listdir(pid_path) 39 | 40 | for pid_name in pid_list: 41 | pid = self.dataset_name + '_' + pid_name 42 | img_list = glob(os.path.join(pid_path, pid_name, "*.jpeg")) 43 | for img_path in img_list: 44 | img_name = os.path.basename(img_path) 45 | camid = self.dataset_name + '_' + img_name.split('-')[2] 46 | data.append([img_path, pid, camid]) 47 | return data 48 | -------------------------------------------------------------------------------- /fastreid/data/datasets/sensereid.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['SenseReID', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class SenseReID(ImageDataset): 18 | """Sense reid 19 | """ 20 | dataset_dir = "SenseReID" 21 | dataset_name = "senseid" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | file_path_list = ['test_gallery', 'test_prob'] 37 | 38 | for file_path in file_path_list: 39 | sub_file = os.path.join(train_path, file_path) 40 | img_name = glob(os.path.join(sub_file, "*.jpg")) 41 | for img_path in img_name: 42 | img_name = img_path.split('/')[-1] 43 | img_info = img_name.split('_') 44 | pid = self.dataset_name + "_" + img_info[0] 45 | camid = self.dataset_name + "_" + img_info[1].split('.')[0] 46 | data.append([img_path, pid, camid]) 47 | return data 48 | -------------------------------------------------------------------------------- /fastreid/data/datasets/shinpuhkan.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | 9 | from fastreid.data.datasets import DATASET_REGISTRY 10 | from fastreid.data.datasets.bases import ImageDataset 11 | 12 | __all__ = ['Shinpuhkan', ] 13 | 14 | 15 | @DATASET_REGISTRY.register() 16 | class Shinpuhkan(ImageDataset): 17 | """shinpuhkan 18 | """ 19 | dataset_dir = "shinpuhkan" 20 | dataset_name = 'shinpuhkan' 21 | 22 | def __init__(self, root='datasets', **kwargs): 23 | self.root = root 24 | self.train_path = os.path.join(self.root, self.dataset_dir) 25 | 26 | required_files = [self.train_path] 27 | self.check_before_run(required_files) 28 | 29 | train = self.process_train(self.train_path) 30 | 31 | super().__init__(train, [], [], **kwargs) 32 | 33 | def process_train(self, train_path): 34 | data = [] 35 | 36 | for root, dirs, files in os.walk(train_path): 37 | img_names = list(filter(lambda x: x.endswith(".jpg"), files)) 38 | # fmt: off 39 | if len(img_names) == 0: continue 40 | # fmt: on 41 | for img_name in img_names: 42 | img_path = os.path.join(root, img_name) 43 | split_path = img_name.split('_') 44 | pid = self.dataset_name + "_" + split_path[0] 45 | camid = self.dataset_name + "_" + split_path[2] 46 | data.append((img_path, pid, camid)) 47 | 48 | return data 49 | -------------------------------------------------------------------------------- /fastreid/data/datasets/sysu_mm.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['SYSU_mm', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class SYSU_mm(ImageDataset): 18 | """sysu mm 19 | """ 20 | dataset_dir = "SYSU-MM01" 21 | dataset_name = "sysumm01" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | 37 | file_path_list = ['cam1', 'cam2', 'cam4', 'cam5'] 38 | 39 | for file_path in file_path_list: 40 | camid = self.dataset_name + "_" + file_path 41 | pid_list = os.listdir(os.path.join(train_path, file_path)) 42 | for pid_dir in pid_list: 43 | pid = self.dataset_name + "_" + pid_dir 44 | img_list = glob(os.path.join(train_path, file_path, pid_dir, "*.jpg")) 45 | for img_path in img_list: 46 | data.append([img_path, pid, camid]) 47 | return data 48 | -------------------------------------------------------------------------------- /fastreid/data/datasets/thermalworld.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['Thermalworld', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class Thermalworld(ImageDataset): 18 | """thermal world 19 | """ 20 | dataset_dir = "thermalworld_rgb" 21 | dataset_name = "thermalworld" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.train_path = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.train_path] 28 | self.check_before_run(required_files) 29 | 30 | train = self.process_train(self.train_path) 31 | 32 | super().__init__(train, [], [], **kwargs) 33 | 34 | def process_train(self, train_path): 35 | data = [] 36 | pid_list = os.listdir(train_path) 37 | for pid_dir in pid_list: 38 | pid = self.dataset_name + "_" + pid_dir 39 | img_list = glob(os.path.join(train_path, pid_dir, "*.jpg")) 40 | for img_path in img_list: 41 | camid = self.dataset_name + "_cam0" 42 | data.append([img_path, pid, camid]) 43 | return data 44 | -------------------------------------------------------------------------------- /fastreid/data/datasets/viper.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | from glob import glob 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | __all__ = ['VIPeR', ] 14 | 15 | 16 | @DATASET_REGISTRY.register() 17 | class VIPeR(ImageDataset): 18 | dataset_dir = "VIPeR" 19 | dataset_name = "viper" 20 | 21 | def __init__(self, root='datasets', **kwargs): 22 | self.root = root 23 | self.train_path = os.path.join(self.root, self.dataset_dir) 24 | 25 | required_files = [self.train_path] 26 | self.check_before_run(required_files) 27 | 28 | train = self.process_train(self.train_path) 29 | 30 | super().__init__(train, [], [], **kwargs) 31 | 32 | def process_train(self, train_path): 33 | data = [] 34 | 35 | file_path_list = ['cam_a', 'cam_b'] 36 | 37 | for file_path in file_path_list: 38 | camid = self.dataset_name + "_" + file_path 39 | img_list = glob(os.path.join(train_path, file_path, "*.bmp")) 40 | for img_path in img_list: 41 | img_name = img_path.split('/')[-1] 42 | pid = self.dataset_name + "_" + img_name.split('_')[0] 43 | data.append([img_path, pid, camid]) 44 | 45 | return data 46 | -------------------------------------------------------------------------------- /fastreid/data/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .triplet_sampler import BalancedIdentitySampler, NaiveIdentitySampler, SetReWeightSampler 8 | from .data_sampler import TrainingSampler, InferenceSampler 9 | from .imbalance_sampler import ImbalancedDatasetSampler 10 | 11 | __all__ = [ 12 | "BalancedIdentitySampler", 13 | "NaiveIdentitySampler", 14 | "SetReWeightSampler", 15 | "TrainingSampler", 16 | "InferenceSampler", 17 | "ImbalancedDatasetSampler", 18 | ] 19 | -------------------------------------------------------------------------------- /fastreid/data/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .autoaugment import AutoAugment 8 | from .build import build_transforms 9 | from .transforms import * 10 | 11 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 12 | -------------------------------------------------------------------------------- /fastreid/engine/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from .train_loop import * 7 | 8 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 9 | 10 | 11 | # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) 12 | # but still make them available here 13 | from .hooks import * 14 | from .defaults import * 15 | from .launch import * 16 | -------------------------------------------------------------------------------- /fastreid/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluator import DatasetEvaluator, inference_context, inference_on_dataset 2 | from .reid_evaluation import ReidEvaluator 3 | from .clas_evaluator import ClasEvaluator 4 | from .testing import print_csv_format, verify_results 5 | 6 | __all__ = [k for k in globals().keys() if not k.startswith("_")] 7 | -------------------------------------------------------------------------------- /fastreid/evaluation/query_expansion.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # based on 8 | # https://github.com/PyRetri/PyRetri/blob/master/pyretri/index/re_ranker/re_ranker_impl/query_expansion.py 9 | 10 | import numpy as np 11 | import torch 12 | import torch.nn.functional as F 13 | 14 | 15 | def aqe(query_feat: torch.tensor, gallery_feat: torch.tensor, 16 | qe_times: int = 1, qe_k: int = 10, alpha: float = 3.0): 17 | """ 18 | Combining the retrieved topk nearest neighbors with the original query and doing another retrieval. 19 | c.f. https://www.robots.ox.ac.uk/~vgg/publications/papers/chum07b.pdf 20 | Args : 21 | query_feat (torch.tensor): 22 | gallery_feat (torch.tensor): 23 | qe_times (int): number of query expansion times. 24 | qe_k (int): number of the neighbors to be combined. 25 | alpha (float): 26 | """ 27 | num_query = query_feat.shape[0] 28 | all_feat = torch.cat((query_feat, gallery_feat), dim=0) 29 | norm_feat = F.normalize(all_feat, p=2, dim=1) 30 | 31 | all_feat = all_feat.numpy() 32 | for i in range(qe_times): 33 | all_feat_list = [] 34 | sims = torch.mm(norm_feat, norm_feat.t()) 35 | sims = sims.data.cpu().numpy() 36 | for sim in sims: 37 | init_rank = np.argpartition(-sim, range(1, qe_k + 1)) 38 | weights = sim[init_rank[:qe_k]].reshape((-1, 1)) 39 | weights = np.power(weights, alpha) 40 | all_feat_list.append(np.mean(all_feat[init_rank[:qe_k], :] * weights, axis=0)) 41 | all_feat = np.stack(all_feat_list, axis=0) 42 | norm_feat = F.normalize(torch.from_numpy(all_feat), p=2, dim=1) 43 | 44 | query_feat = torch.from_numpy(all_feat[:num_query]) 45 | gallery_feat = torch.from_numpy(all_feat[num_query:]) 46 | return query_feat, gallery_feat 47 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | python3 setup.py build_ext --inplace 3 | rm -rf build 4 | clean: 5 | rm -rf build 6 | rm -f rank_cy.c *.so 7 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | def compile_helper(): 9 | """Compile helper function at runtime. Make sure this 10 | is invoked on a single process.""" 11 | import os 12 | import subprocess 13 | 14 | path = os.path.abspath(os.path.dirname(__file__)) 15 | ret = subprocess.run(["make", "-C", path]) 16 | if ret.returncode != 0: 17 | print("Making cython reid evaluation module failed, exiting.") 18 | import sys 19 | 20 | sys.exit(1) 21 | -------------------------------------------------------------------------------- /fastreid/evaluation/rank_cylib/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | 4 | import numpy as np 5 | from Cython.Build import cythonize 6 | 7 | 8 | def numpy_include(): 9 | try: 10 | numpy_include = np.get_include() 11 | except AttributeError: 12 | numpy_include = np.get_numpy_include() 13 | return numpy_include 14 | 15 | 16 | ext_modules = [ 17 | Extension( 18 | 'rank_cy', 19 | ['rank_cy.pyx'], 20 | include_dirs=[numpy_include()], 21 | ), 22 | Extension( 23 | 'roc_cy', 24 | ['roc_cy.pyx'], 25 | include_dirs=[numpy_include()], 26 | ) 27 | ] 28 | 29 | setup( 30 | name='Cython-based reid evaluation code', 31 | ext_modules=cythonize(ext_modules) 32 | ) 33 | -------------------------------------------------------------------------------- /fastreid/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .activation import * 8 | from .batch_norm import * 9 | from .context_block import ContextBlock 10 | from .drop import DropPath, DropBlock2d, drop_block_2d, drop_path 11 | from .frn import FRN, TLU 12 | from .gather_layer import GatherLayer 13 | from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible 14 | from .non_local import Non_local 15 | from .se_layer import SELayer 16 | from .splat import SplAtConv2d, DropBlock2D 17 | from .weight_init import ( 18 | trunc_normal_, variance_scaling_, lecun_normal_, weights_init_kaiming, weights_init_classifier 19 | ) 20 | -------------------------------------------------------------------------------- /fastreid/layers/activation.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import math 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | 13 | __all__ = [ 14 | 'Mish', 15 | 'Swish', 16 | 'MemoryEfficientSwish', 17 | 'GELU'] 18 | 19 | 20 | class Mish(nn.Module): 21 | def __init__(self): 22 | super().__init__() 23 | 24 | def forward(self, x): 25 | # inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!) 26 | return x * (torch.tanh(F.softplus(x))) 27 | 28 | 29 | class Swish(nn.Module): 30 | def forward(self, x): 31 | return x * torch.sigmoid(x) 32 | 33 | 34 | class SwishImplementation(torch.autograd.Function): 35 | @staticmethod 36 | def forward(ctx, i): 37 | result = i * torch.sigmoid(i) 38 | ctx.save_for_backward(i) 39 | return result 40 | 41 | @staticmethod 42 | def backward(ctx, grad_output): 43 | i = ctx.saved_variables[0] 44 | sigmoid_i = torch.sigmoid(i) 45 | return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) 46 | 47 | 48 | class MemoryEfficientSwish(nn.Module): 49 | def forward(self, x): 50 | return SwishImplementation.apply(x) 51 | 52 | 53 | class GELU(nn.Module): 54 | """ 55 | Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU 56 | """ 57 | 58 | def forward(self, x): 59 | return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) 60 | -------------------------------------------------------------------------------- /fastreid/layers/gather_layer.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # based on: https://github.com/open-mmlab/OpenSelfSup/blob/master/openselfsup/models/utils/gather_layer.py 8 | 9 | import torch 10 | import torch.distributed as dist 11 | 12 | 13 | class GatherLayer(torch.autograd.Function): 14 | """Gather tensors from all process, supporting backward propagation. 15 | """ 16 | 17 | @staticmethod 18 | def forward(ctx, input): 19 | ctx.save_for_backward(input) 20 | output = [torch.zeros_like(input) \ 21 | for _ in range(dist.get_world_size())] 22 | dist.all_gather(output, input) 23 | return tuple(output) 24 | 25 | @staticmethod 26 | def backward(ctx, *grads): 27 | input, = ctx.saved_tensors 28 | grad_out = torch.zeros_like(input) 29 | grad_out[:] = grads[dist.get_rank()] 30 | return grad_out 31 | -------------------------------------------------------------------------------- /fastreid/layers/helpers.py: -------------------------------------------------------------------------------- 1 | """ Layer/Module Helpers 2 | Hacked together by / Copyright 2020 Ross Wightman 3 | """ 4 | import collections.abc 5 | from itertools import repeat 6 | 7 | 8 | # From PyTorch internals 9 | def _ntuple(n): 10 | def parse(x): 11 | if isinstance(x, collections.abc.Iterable): 12 | return x 13 | return tuple(repeat(x, n)) 14 | 15 | return parse 16 | 17 | 18 | to_1tuple = _ntuple(1) 19 | to_2tuple = _ntuple(2) 20 | to_3tuple = _ntuple(3) 21 | to_4tuple = _ntuple(4) 22 | to_ntuple = _ntuple 23 | 24 | 25 | def make_divisible(v, divisor=8, min_value=None): 26 | min_value = min_value or divisor 27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 28 | # Make sure that round down does not go down by more than 10%. 29 | if new_v < 0.9 * v: 30 | new_v += divisor 31 | return new_v 32 | -------------------------------------------------------------------------------- /fastreid/layers/se_layer.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from torch import nn 8 | 9 | 10 | class SELayer(nn.Module): 11 | def __init__(self, channel, reduction=16): 12 | super(SELayer, self).__init__() 13 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 14 | self.fc = nn.Sequential( 15 | nn.Linear(channel, int(channel / reduction), bias=False), 16 | nn.ReLU(inplace=True), 17 | nn.Linear(int(channel / reduction), channel, bias=False), 18 | nn.Sigmoid() 19 | ) 20 | 21 | def forward(self, x): 22 | b, c, _, _ = x.size() 23 | y = self.avg_pool(x).view(b, c) 24 | y = self.fc(y).view(b, c, 1, 1) 25 | return x * y.expand_as(x) 26 | -------------------------------------------------------------------------------- /fastreid/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from . import losses 8 | from .backbones import ( 9 | BACKBONE_REGISTRY, 10 | build_resnet_backbone, 11 | build_backbone, 12 | ) 13 | from .heads import ( 14 | REID_HEADS_REGISTRY, 15 | build_heads, 16 | EmbeddingHead, 17 | ) 18 | from .meta_arch import ( 19 | build_model, 20 | META_ARCH_REGISTRY, 21 | ) 22 | 23 | __all__ = [k for k in globals().keys() if not k.startswith("_")] -------------------------------------------------------------------------------- /fastreid/modeling/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import build_backbone, BACKBONE_REGISTRY 8 | 9 | from .resnet import build_resnet_backbone 10 | from .osnet import build_osnet_backbone 11 | from .resnest import build_resnest_backbone 12 | from .resnext import build_resnext_backbone 13 | from .regnet import build_regnet_backbone, build_effnet_backbone 14 | from .shufflenet import build_shufflenetv2_backbone 15 | from .mobilenet import build_mobilenetv2_backbone 16 | from .mobilenetv3 import build_mobilenetv3_backbone 17 | from .repvgg import build_repvgg_backbone 18 | from .vision_transformer import build_vit_backbone 19 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | BACKBONE_REGISTRY = Registry("BACKBONE") 10 | BACKBONE_REGISTRY.__doc__ = """ 11 | Registry for backbones, which extract feature maps from images 12 | The registered object must be a callable that accepts two arguments: 13 | 1. A :class:`fastreid.config.CfgNode` 14 | It must returns an instance of :class:`Backbone`. 15 | """ 16 | 17 | 18 | def build_backbone(cfg): 19 | """ 20 | Build a backbone from `cfg.MODEL.BACKBONE.NAME`. 21 | Returns: 22 | an instance of :class:`Backbone` 23 | """ 24 | 25 | backbone_name = cfg.MODEL.BACKBONE.NAME 26 | backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg) 27 | return backbone 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from .regnet import build_regnet_backbone 4 | from .effnet import build_effnet_backbone 5 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B0_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 32 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [1, 2, 2, 3, 3, 4, 1] 8 | WIDTHS: [16, 24, 40, 80, 112, 192, 320] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 1280 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.4 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B1_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 32 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [2, 3, 3, 4, 4, 5, 2] 8 | WIDTHS: [16, 24, 40, 80, 112, 192, 320] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 1280 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.4 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 240 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 274 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B2_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 32 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [2, 3, 3, 4, 4, 5, 2] 8 | WIDTHS: [16, 24, 48, 88, 120, 208, 352] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 1408 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.4 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 260 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 298 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B3_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 40 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [2, 3, 3, 5, 5, 6, 2] 8 | WIDTHS: [24, 32, 48, 96, 136, 232, 384] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 1536 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.4 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 300 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 342 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B4_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 48 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [2, 4, 4, 6, 6, 8, 2] 8 | WIDTHS: [24, 32, 56, 112, 160, 272, 448] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 1792 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.2 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 380 21 | BATCH_SIZE: 128 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 434 25 | BATCH_SIZE: 104 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/effnet/EN-B5_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: effnet 3 | NUM_CLASSES: 1000 4 | EN: 5 | STEM_W: 48 6 | STRIDES: [1, 2, 2, 2, 1, 2, 1] 7 | DEPTHS: [3, 5, 5, 7, 7, 9, 3] 8 | WIDTHS: [24, 40, 64, 128, 176, 304, 512] 9 | EXP_RATIOS: [1, 6, 6, 6, 6, 6, 6] 10 | KERNELS: [3, 3, 5, 3, 5, 5, 3] 11 | HEAD_W: 2048 12 | OPTIM: 13 | LR_POLICY: cos 14 | BASE_LR: 0.1 15 | MAX_EPOCH: 100 16 | MOMENTUM: 0.9 17 | WEIGHT_DECAY: 1e-5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 456 21 | BATCH_SIZE: 64 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 522 25 | BATCH_SIZE: 48 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-1.6GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 18 6 | W0: 80 7 | WA: 34.01 8 | WM: 2.25 9 | GROUP_W: 24 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.8 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-12GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 19 6 | W0: 168 7 | WA: 73.36 8 | WM: 2.37 9 | GROUP_W: 112 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-16GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 22 6 | W0: 216 7 | WA: 55.59 8 | WM: 2.1 9 | GROUP_W: 128 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-200MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 13 6 | W0: 24 7 | WA: 36.44 8 | WM: 2.49 9 | GROUP_W: 8 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.8 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-3.2GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 25 6 | W0: 88 7 | WA: 26.31 8 | WM: 2.25 9 | GROUP_W: 48 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-32GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 23 6 | W0: 320 7 | WA: 69.86 8 | WM: 2.0 9 | GROUP_W: 168 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.2 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 256 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 200 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-4.0GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 23 6 | W0: 96 7 | WA: 38.65 8 | WM: 2.43 9 | GROUP_W: 40 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-400MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 22 6 | W0: 24 7 | WA: 24.48 8 | WM: 2.54 9 | GROUP_W: 16 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.8 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-6.4GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 17 6 | W0: 184 7 | WA: 60.83 8 | WM: 2.07 9 | GROUP_W: 56 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-600MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 16 6 | W0: 48 7 | WA: 36.97 8 | WM: 2.24 9 | GROUP_W: 24 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.8 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-8.0GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 23 6 | W0: 80 7 | WA: 49.56 8 | WM: 2.88 9 | GROUP_W: 120 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.4 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 512 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 400 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnetx/RegNetX-800MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | DEPTH: 16 6 | W0: 56 7 | WA: 35.73 8 | WM: 2.28 9 | GROUP_W: 16 10 | OPTIM: 11 | LR_POLICY: cos 12 | BASE_LR: 0.8 13 | MAX_EPOCH: 100 14 | MOMENTUM: 0.9 15 | WEIGHT_DECAY: 5e-5 16 | WARMUP_ITERS: 5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-1.6GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 27 7 | W0: 48 8 | WA: 20.71 9 | WM: 2.65 10 | GROUP_W: 24 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.8 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 1024 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 800 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-12GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 19 7 | W0: 168 8 | WA: 73.36 9 | WM: 2.37 10 | GROUP_W: 112 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.4 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 512 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 400 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-16GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 18 7 | W0: 200 8 | WA: 106.23 9 | WM: 2.48 10 | GROUP_W: 112 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.2 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-200MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 13 7 | W0: 24 8 | WA: 36.44 9 | WM: 2.49 10 | GROUP_W: 8 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.8 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | TRAIN: 18 | DATASET: imagenet 19 | IM_SIZE: 224 20 | BATCH_SIZE: 1024 21 | TEST: 22 | DATASET: imagenet 23 | IM_SIZE: 256 24 | BATCH_SIZE: 800 25 | NUM_GPUS: 8 26 | OUT_DIR: . 27 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-3.2GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 21 7 | W0: 80 8 | WA: 42.63 9 | WM: 2.66 10 | GROUP_W: 24 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.4 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 512 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 400 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-32GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 20 7 | W0: 232 8 | WA: 115.89 9 | WM: 2.53 10 | GROUP_W: 232 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.2 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 256 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 200 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-4.0GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 22 7 | W0: 96 8 | WA: 31.41 9 | WM: 2.24 10 | GROUP_W: 64 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.4 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 512 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 400 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-400MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 16 7 | W0: 48 8 | WA: 27.89 9 | WM: 2.09 10 | GROUP_W: 8 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.8 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 1024 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 800 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-6.4GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 25 7 | W0: 112 8 | WA: 33.22 9 | WM: 2.27 10 | GROUP_W: 72 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.4 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 512 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 400 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-600MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 15 7 | W0: 48 8 | WA: 32.54 9 | WM: 2.32 10 | GROUP_W: 16 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.8 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 1024 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 800 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-8.0GF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: true 6 | DEPTH: 17 7 | W0: 192 8 | WA: 76.82 9 | WM: 2.19 10 | GROUP_W: 56 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.4 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 512 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 400 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/backbones/regnet/regnety/RegNetY-800MF_dds_8gpu.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | TYPE: regnet 3 | NUM_CLASSES: 1000 4 | REGNET: 5 | SE_ON: True 6 | DEPTH: 14 7 | W0: 56 8 | WA: 38.84 9 | WM: 2.4 10 | GROUP_W: 16 11 | OPTIM: 12 | LR_POLICY: cos 13 | BASE_LR: 0.8 14 | MAX_EPOCH: 100 15 | MOMENTUM: 0.9 16 | WEIGHT_DECAY: 5e-5 17 | WARMUP_ITERS: 5 18 | TRAIN: 19 | DATASET: imagenet 20 | IM_SIZE: 224 21 | BATCH_SIZE: 1024 22 | TEST: 23 | DATASET: imagenet 24 | IM_SIZE: 256 25 | BATCH_SIZE: 800 26 | NUM_GPUS: 8 27 | OUT_DIR: . 28 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import REID_HEADS_REGISTRY, build_heads 8 | 9 | # import all the meta_arch, so they will be registered 10 | from .embedding_head import EmbeddingHead 11 | from .clas_head import ClasHead 12 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from ...utils.registry import Registry 8 | 9 | REID_HEADS_REGISTRY = Registry("HEADS") 10 | REID_HEADS_REGISTRY.__doc__ = """ 11 | Registry for reid heads in a baseline model. 12 | 13 | ROIHeads take feature maps and region proposals, and 14 | perform per-region computation. 15 | The registered object will be called with `obj(cfg, input_shape)`. 16 | The call is expected to return an :class:`ROIHeads`. 17 | """ 18 | 19 | 20 | def build_heads(cfg): 21 | """ 22 | Build REIDHeads defined by `cfg.MODEL.REID_HEADS.NAME`. 23 | """ 24 | head = cfg.MODEL.HEADS.NAME 25 | return REID_HEADS_REGISTRY.get(head)(cfg) 26 | -------------------------------------------------------------------------------- /fastreid/modeling/heads/clas_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch.nn.functional as F 8 | 9 | from fastreid.modeling.heads import REID_HEADS_REGISTRY, EmbeddingHead 10 | 11 | 12 | @REID_HEADS_REGISTRY.register() 13 | class ClasHead(EmbeddingHead): 14 | def forward(self, features, targets=None): 15 | """ 16 | See :class:`ClsHeads.forward`. 17 | """ 18 | pool_feat = self.pool_layer(features) 19 | neck_feat = self.bottleneck(pool_feat) 20 | neck_feat = neck_feat.view(neck_feat.size(0), -1) 21 | 22 | if self.cls_layer.__class__.__name__ == 'Linear': 23 | logits = F.linear(neck_feat, self.weight) 24 | else: 25 | logits = F.linear(F.normalize(neck_feat), F.normalize(self.weight)) 26 | 27 | # Evaluation 28 | if not self.training: return logits.mul_(self.cls_layer.s) 29 | 30 | cls_outputs = self.cls_layer(logits.clone(), targets) 31 | 32 | return { 33 | "cls_outputs": cls_outputs, 34 | "pred_class_logits": logits.mul_(self.cls_layer.s), 35 | "features": neck_feat, 36 | } 37 | -------------------------------------------------------------------------------- /fastreid/modeling/losses/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .circle_loss import * 8 | from .cross_entroy_loss import cross_entropy_loss, log_accuracy 9 | from .focal_loss import focal_loss 10 | from .triplet_loss import triplet_loss 11 | 12 | __all__ = [k for k in globals().keys() if not k.startswith("_")] -------------------------------------------------------------------------------- /fastreid/modeling/losses/cross_entroy_loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | import torch.nn.functional as F 8 | 9 | from fastreid.utils.events import get_event_storage 10 | 11 | 12 | def log_accuracy(pred_class_logits, gt_classes, topk=(1,)): 13 | """ 14 | Log the accuracy metrics to EventStorage. 15 | """ 16 | bsz = pred_class_logits.size(0) 17 | maxk = max(topk) 18 | _, pred_class = pred_class_logits.topk(maxk, 1, True, True) 19 | pred_class = pred_class.t() 20 | correct = pred_class.eq(gt_classes.view(1, -1).expand_as(pred_class)) 21 | 22 | ret = [] 23 | for k in topk: 24 | correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True) 25 | ret.append(correct_k.mul_(1. / bsz)) 26 | 27 | storage = get_event_storage() 28 | storage.put_scalar("cls_accuracy", ret[0]) 29 | 30 | 31 | def cross_entropy_loss(pred_class_outputs, gt_classes, eps, alpha=0.2): 32 | num_classes = pred_class_outputs.size(1) 33 | 34 | if eps >= 0: 35 | smooth_param = eps 36 | else: 37 | # Adaptive label smooth regularization 38 | soft_label = F.softmax(pred_class_outputs, dim=1) 39 | smooth_param = alpha * soft_label[torch.arange(soft_label.size(0)), gt_classes].unsqueeze(1) 40 | 41 | log_probs = F.log_softmax(pred_class_outputs, dim=1) 42 | with torch.no_grad(): 43 | targets = torch.ones_like(log_probs) 44 | targets *= smooth_param / (num_classes - 1) 45 | targets.scatter_(1, gt_classes.data.unsqueeze(1), (1 - smooth_param)) 46 | 47 | loss = (-targets * log_probs).sum(dim=1) 48 | 49 | with torch.no_grad(): 50 | non_zero_cnt = max(loss.nonzero(as_tuple=False).size(0), 1) 51 | 52 | loss = loss.sum() / non_zero_cnt 53 | 54 | return loss 55 | -------------------------------------------------------------------------------- /fastreid/modeling/losses/utils.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | 11 | def concat_all_gather(tensor): 12 | """ 13 | Performs all_gather operation on the provided tensors. 14 | *** Warning ***: torch.distributed.all_gather has no gradient. 15 | """ 16 | tensors_gather = [torch.ones_like(tensor) 17 | for _ in range(torch.distributed.get_world_size())] 18 | torch.distributed.all_gather(tensors_gather, tensor, async_op=False) 19 | 20 | output = torch.cat(tensors_gather, dim=0) 21 | return output 22 | 23 | 24 | def normalize(x, axis=-1): 25 | """Normalizing to unit length along the specified dimension. 26 | Args: 27 | x: pytorch Variable 28 | Returns: 29 | x: pytorch Variable, same shape as input 30 | """ 31 | x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12) 32 | return x 33 | 34 | 35 | def euclidean_dist(x, y): 36 | m, n = x.size(0), y.size(0) 37 | xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) 38 | yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() 39 | dist = xx + yy - 2 * torch.matmul(x, y.t()) 40 | dist = dist.clamp(min=1e-12).sqrt() # for numerical stability 41 | return dist 42 | 43 | 44 | def cosine_dist(x, y): 45 | x = F.normalize(x, dim=1) 46 | y = F.normalize(y, dim=1) 47 | dist = 2 - 2 * torch.mm(x, y.t()) 48 | return dist 49 | -------------------------------------------------------------------------------- /fastreid/modeling/meta_arch/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .build import META_ARCH_REGISTRY, build_model 8 | 9 | 10 | # import all the meta_arch, so they will be registered 11 | from .baseline import Baseline 12 | from .mgn import MGN 13 | from .moco import MoCo 14 | from .distiller import Distiller 15 | -------------------------------------------------------------------------------- /fastreid/modeling/meta_arch/build.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | import torch 7 | 8 | from fastreid.utils.registry import Registry 9 | 10 | META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip 11 | META_ARCH_REGISTRY.__doc__ = """ 12 | Registry for meta-architectures, i.e. the whole model. 13 | The registered object will be called with `obj(cfg)` 14 | and expected to return a `nn.Module` object. 15 | """ 16 | 17 | 18 | def build_model(cfg): 19 | """ 20 | Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. 21 | Note that it does not load any weights from ``cfg``. 22 | """ 23 | meta_arch = cfg.MODEL.META_ARCHITECTURE 24 | model = META_ARCH_REGISTRY.get(meta_arch)(cfg) 25 | model.to(torch.device(cfg.MODEL.DEVICE)) 26 | return model 27 | -------------------------------------------------------------------------------- /fastreid/solver/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | from .build import build_lr_scheduler, build_optimizer -------------------------------------------------------------------------------- /fastreid/solver/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .lamb import Lamb 8 | from .swa import SWA 9 | from .radam import RAdam 10 | from torch.optim import * 11 | -------------------------------------------------------------------------------- /fastreid/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /projects/FastAttr/README.md: -------------------------------------------------------------------------------- 1 | # FastAttr in FastReID 2 | 3 | This project provides a strong baseline for pedestrian attribute recognition. 4 | 5 | ## Datasets Preparation 6 | 7 | We use `PA100k` to evaluate the model's performance. 8 | You can do download dataset from [HydraPlus-Net](https://github.com/xh-liu/HydraPlus-Net). 9 | 10 | ## Usage 11 | 12 | The training config file can be found in `projects/FastAttr/config`, which you can use to reproduce the results of the repo. 13 | 14 | For example 15 | 16 | ```bash 17 | python3 projects/FastAttr/train_net.py --config-file projects/FastAttr/configs/pa100.yml --num-gpus 4 18 | ``` 19 | 20 | ## Experiment Results 21 | 22 | We refer to [A Strong Baseline of Pedestrian Attribute Recognition](https://github.com/valencebond/Strong_Baseline_of_Pedestrian_Attribute_Recognition/tree/master) as our baseline methods and conduct the experiment 23 | with 4 GPUs. 24 | More details can be found in the config file and code. 25 | 26 | ### PA100k 27 | 28 | | Method | Pretrained | mA | Accu | Prec | Recall | F1 | 29 | | :---: | :---: | :---: |:---: | :---: | :---: | :---: | 30 | | attribute baseline | ImageNet | 80.50 | 78.84 | 87.24 | 87.12 | 86.78 | 31 | | FastAttr | ImageNet | 77.57 | 78.03 | 88.39 | 84.98 | 86.65 | 32 | -------------------------------------------------------------------------------- /projects/FastAttr/configs/Base-attribute.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: AttrBaseline 3 | 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | NORM: BN 7 | DEPTH: 50x 8 | LAST_STRIDE: 2 9 | FEAT_DIM: 2048 10 | WITH_IBN: False 11 | PRETRAIN: True 12 | PRETRAIN_PATH: /export/home/lxy/.cache/torch/checkpoints/resnet50-19c8e357.pth 13 | 14 | HEADS: 15 | NAME: AttrHead 16 | WITH_BNNECK: True 17 | POOL_LAYER: FastGlobalAvgPool 18 | CLS_LAYER: Linear 19 | NUM_CLASSES: 26 20 | 21 | LOSSES: 22 | NAME: ("BinaryCrossEntropyLoss",) 23 | 24 | BCE: 25 | WEIGHT_ENABLED: True 26 | SCALE: 1. 27 | 28 | INPUT: 29 | SIZE_TRAIN: [ 256, 192 ] 30 | SIZE_TEST: [ 256, 192 ] 31 | 32 | FLIP: 33 | ENABLED: True 34 | 35 | PADDING: 36 | ENABLED: True 37 | 38 | DATALOADER: 39 | SAMPLER_TRAIN: TrainingSampler 40 | NUM_WORKERS: 8 41 | 42 | SOLVER: 43 | MAX_EPOCH: 30 44 | OPT: SGD 45 | BASE_LR: 0.04 46 | BIAS_LR_FACTOR: 2. 47 | HEADS_LR_FACTOR: 10. 48 | WEIGHT_DECAY: 0.0005 49 | WEIGHT_DECAY_BIAS: 0.0005 50 | IMS_PER_BATCH: 256 51 | 52 | NESTEROV: False 53 | SCHED: MultiStepLR 54 | STEPS: [ 15, 20, 25 ] 55 | 56 | WARMUP_FACTOR: 0.1 57 | WARMUP_ITERS: 1000 58 | 59 | CHECKPOINT_PERIOD: 10 60 | 61 | TEST: 62 | EVAL_PERIOD: 10 63 | IMS_PER_BATCH: 256 64 | 65 | CUDNN_BENCHMARK: True 66 | 67 | -------------------------------------------------------------------------------- /projects/FastAttr/configs/dukemtmc.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-attribute.yml 2 | 3 | DATASETS: 4 | NAMES: ("DukeMTMCAttr",) 5 | TESTS: ("DukeMTMCAttr",) 6 | 7 | MODEL: 8 | HEADS: 9 | NUM_CLASSES: 23 10 | 11 | OUTPUT_DIR: projects/FastAttr/logs/dukemtmc/strong_baseline -------------------------------------------------------------------------------- /projects/FastAttr/configs/market1501.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-attribute.yml 2 | 3 | DATASETS: 4 | NAMES: ("Market1501Attr",) 5 | TESTS: ("Market1501Attr",) 6 | 7 | MODEL: 8 | HEADS: 9 | NUM_CLASSES: 27 10 | 11 | OUTPUT_DIR: projects/FastAttr/logs/market1501/strong_baseline -------------------------------------------------------------------------------- /projects/FastAttr/configs/pa100.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-attribute.yml 2 | 3 | DATASETS: 4 | NAMES: ("PA100K",) 5 | TESTS: ("PA100K",) 6 | 7 | OUTPUT_DIR: projects/FastAttr/logs/pa100k/strong_baseline -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .attr_evaluation import AttrEvaluator 8 | from .config import add_attr_config 9 | from .datasets import * 10 | from .modeling import * 11 | from .attr_dataset import AttrDataset 12 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/attr_dataset.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | from torch.utils.data import Dataset 9 | 10 | from fastreid.data.data_utils import read_image 11 | 12 | 13 | class AttrDataset(Dataset): 14 | """Image Person Attribute Dataset""" 15 | 16 | def __init__(self, img_items, transform, attr_dict): 17 | self.img_items = img_items 18 | self.transform = transform 19 | self.attr_dict = attr_dict 20 | 21 | def __len__(self): 22 | return len(self.img_items) 23 | 24 | def __getitem__(self, index): 25 | img_path, labels = self.img_items[index] 26 | img = read_image(img_path) 27 | 28 | if self.transform is not None: img = self.transform(img) 29 | 30 | labels = torch.as_tensor(labels) 31 | 32 | return { 33 | "images": img, 34 | "targets": labels, 35 | "img_paths": img_path, 36 | } 37 | 38 | @property 39 | def num_classes(self): 40 | return len(self.attr_dict) 41 | 42 | @property 43 | def sample_weights(self): 44 | sample_weights = torch.zeros(self.num_classes, dtype=torch.float32) 45 | for _, attr in self.img_items: 46 | sample_weights += torch.as_tensor(attr) 47 | sample_weights /= len(self) 48 | return sample_weights 49 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.config import CfgNode as CN 8 | 9 | 10 | def add_attr_config(cfg): 11 | _C = cfg 12 | 13 | _C.MODEL.LOSSES.BCE = CN({"WEIGHT_ENABLED": True}) 14 | _C.MODEL.LOSSES.BCE.SCALE = 1. 15 | 16 | _C.TEST.THRES = 0.5 17 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | # Attributed datasets 8 | from .pa100k import PA100K 9 | from .market1501attr import Market1501Attr 10 | from .dukemtmcattr import DukeMTMCAttr 11 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .attr_baseline import AttrBaseline 8 | from .attr_head import AttrHead 9 | from .bce_loss import cross_entropy_sigmoid_loss 10 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/modeling/attr_baseline.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.modeling.meta_arch.baseline import Baseline 8 | from fastreid.modeling.meta_arch.build import META_ARCH_REGISTRY 9 | from .bce_loss import cross_entropy_sigmoid_loss 10 | 11 | 12 | @META_ARCH_REGISTRY.register() 13 | class AttrBaseline(Baseline): 14 | 15 | @classmethod 16 | def from_config(cls, cfg): 17 | base_res = Baseline.from_config(cfg) 18 | base_res["loss_kwargs"].update({ 19 | 'bce': { 20 | 'scale': cfg.MODEL.LOSSES.BCE.SCALE 21 | } 22 | }) 23 | return base_res 24 | 25 | def losses(self, outputs, gt_labels): 26 | r""" 27 | Compute loss from modeling's outputs, the loss function input arguments 28 | must be the same as the outputs of the model forwarding. 29 | """ 30 | # model predictions 31 | cls_outputs = outputs["cls_outputs"] 32 | 33 | loss_dict = {} 34 | loss_names = self.loss_kwargs["loss_names"] 35 | 36 | if "BinaryCrossEntropyLoss" in loss_names: 37 | bce_kwargs = self.loss_kwargs.get('bce') 38 | loss_dict["loss_bce"] = cross_entropy_sigmoid_loss( 39 | cls_outputs, 40 | gt_labels, 41 | self.sample_weights, 42 | ) * bce_kwargs.get('scale') 43 | 44 | return loss_dict 45 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/modeling/attr_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | from torch import nn 10 | 11 | from fastreid.modeling.heads import EmbeddingHead 12 | from fastreid.modeling.heads.build import REID_HEADS_REGISTRY 13 | from fastreid.layers.weight_init import weights_init_kaiming 14 | 15 | 16 | @REID_HEADS_REGISTRY.register() 17 | class AttrHead(EmbeddingHead): 18 | def __init__(self, cfg): 19 | super().__init__(cfg) 20 | num_classes = cfg.MODEL.HEADS.NUM_CLASSES 21 | 22 | self.bnneck = nn.BatchNorm1d(num_classes) 23 | self.bnneck.apply(weights_init_kaiming) 24 | 25 | def forward(self, features, targets=None): 26 | """ 27 | See :class:`ReIDHeads.forward`. 28 | """ 29 | pool_feat = self.pool_layer(features) 30 | neck_feat = self.bottleneck(pool_feat) 31 | neck_feat = neck_feat.view(neck_feat.size(0), -1) 32 | 33 | logits = F.linear(neck_feat, self.weight) 34 | logits = self.bnneck(logits) 35 | 36 | # Evaluation 37 | if not self.training: 38 | cls_outptus = torch.sigmoid(logits) 39 | return cls_outptus 40 | 41 | return { 42 | "cls_outputs": logits, 43 | } 44 | -------------------------------------------------------------------------------- /projects/FastAttr/fastattr/modeling/bce_loss.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | 10 | 11 | def ratio2weight(targets, ratio): 12 | pos_weights = targets * (1 - ratio) 13 | neg_weights = (1 - targets) * ratio 14 | weights = torch.exp(neg_weights + pos_weights) 15 | 16 | weights[targets > 1] = 0.0 17 | return weights 18 | 19 | 20 | def cross_entropy_sigmoid_loss(pred_class_logits, gt_classes, sample_weight=None): 21 | loss = F.binary_cross_entropy_with_logits(pred_class_logits, gt_classes, reduction='none') 22 | 23 | if sample_weight is not None: 24 | targets_mask = torch.where(gt_classes.detach() > 0.5, 25 | torch.ones(1, device="cuda"), torch.zeros(1, device="cuda")) # dtype float32 26 | weight = ratio2weight(targets_mask, sample_weight) 27 | loss = loss * weight 28 | 29 | with torch.no_grad(): 30 | non_zero_cnt = max(loss.nonzero(as_tuple=False).size(0), 1) 31 | 32 | loss = loss.sum() / non_zero_cnt 33 | return loss 34 | -------------------------------------------------------------------------------- /projects/FastClas/README.md: -------------------------------------------------------------------------------- 1 | # FastClas in FastReID 2 | 3 | This project provides a baseline and example for image classification based on fastreid. 4 | 5 | ## Datasets Preparation 6 | 7 | We refer to [pytorch tutorial](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html) for dataset 8 | preparation. This is just an example for building a classification task based on fastreid. You can customize 9 | your own datasets and model. 10 | 11 | ## Usage 12 | 13 | If you want to train models with 4 gpus, you can run 14 | ```bash 15 | python3 projects/FastClas/train_net.py --config-file projects/FastClas/config/base-clas.yml --num-gpus 4 16 | ``` 17 | -------------------------------------------------------------------------------- /projects/FastClas/configs/base-clas.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: Baseline 3 | 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | DEPTH: 18x 7 | NORM: BN 8 | LAST_STRIDE: 2 9 | FEAT_DIM: 512 10 | PRETRAIN: True 11 | 12 | HEADS: 13 | NAME: ClasHead 14 | WITH_BNNECK: False 15 | EMBEDDING_DIM: 0 16 | POOL_LAYER: FastGlobalAvgPool 17 | CLS_LAYER: Linear 18 | NUM_CLASSES: 2 19 | 20 | LOSSES: 21 | NAME: ("CrossEntropyLoss",) 22 | 23 | CE: 24 | EPSILON: 0.1 25 | SCALE: 1. 26 | 27 | INPUT: 28 | SIZE_TRAIN: [0,] # no need for resize when training 29 | SIZE_TEST: [256,] 30 | 31 | CROP: 32 | ENABLED: True 33 | SIZE: [224,] 34 | SCALE: [0.08, 1] 35 | RATIO: [0.75, 1.333333333] 36 | 37 | FLIP: 38 | ENABLED: True 39 | 40 | DATALOADER: 41 | SAMPLER_TRAIN: TrainingSampler 42 | NUM_WORKERS: 8 43 | 44 | SOLVER: 45 | MAX_EPOCH: 100 46 | AMP: 47 | ENABLED: True 48 | 49 | OPT: SGD 50 | SCHED: CosineAnnealingLR 51 | 52 | BASE_LR: 0.001 53 | MOMENTUM: 0.9 54 | NESTEROV: False 55 | 56 | BIAS_LR_FACTOR: 1. 57 | WEIGHT_DECAY: 0.0005 58 | WEIGHT_DECAY_BIAS: 0. 59 | IMS_PER_BATCH: 16 60 | 61 | ETA_MIN_LR: 0.00003 62 | 63 | WARMUP_FACTOR: 0.1 64 | WARMUP_ITERS: 100 65 | 66 | CHECKPOINT_PERIOD: 10 67 | 68 | TEST: 69 | EVAL_PERIOD: 10 70 | IMS_PER_BATCH: 256 71 | 72 | DATASETS: 73 | NAMES: ("Hymenoptera",) 74 | TESTS: ("Hymenoptera",) 75 | 76 | OUTPUT_DIR: projects/FastClas/logs/r18_demo -------------------------------------------------------------------------------- /projects/FastClas/fastclas/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .bee_ant import * 8 | from .distracted_driver import * 9 | from .dataset import ClasDataset 10 | from .trainer import ClasTrainer 11 | -------------------------------------------------------------------------------- /projects/FastClas/fastclas/bee_ant.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import os 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | 14 | __all__ = ["Hymenoptera"] 15 | 16 | 17 | @DATASET_REGISTRY.register() 18 | class Hymenoptera(ImageDataset): 19 | """This is a demo dataset for smoke test, you can refer to 20 | https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html 21 | """ 22 | dataset_dir = 'hymenoptera_data' 23 | dataset_name = "hyt" 24 | 25 | def __init__(self, root='datasets', **kwargs): 26 | self.root = root 27 | self.dataset_dir = os.path.join(self.root, self.dataset_dir) 28 | train_dir = os.path.join(self.dataset_dir, "train") 29 | val_dir = os.path.join(self.dataset_dir, "val") 30 | 31 | required_files = [ 32 | self.dataset_dir, 33 | train_dir, 34 | val_dir, 35 | ] 36 | self.check_before_run(required_files) 37 | 38 | train = self.process_dir(train_dir) 39 | val = self.process_dir(val_dir) 40 | 41 | super().__init__(train, val, [], **kwargs) 42 | 43 | def process_dir(self, data_dir): 44 | data = [] 45 | all_dirs = [d.name for d in os.scandir(data_dir) if d.is_dir()] 46 | for dir_name in all_dirs: 47 | all_imgs = glob.glob(os.path.join(data_dir, dir_name, "*.jpg")) 48 | for img_name in all_imgs: 49 | data.append([img_name, dir_name, '0']) 50 | return data 51 | -------------------------------------------------------------------------------- /projects/FastClas/fastclas/dataset.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from torch.utils.data import Dataset 8 | 9 | from fastreid.data.data_utils import read_image 10 | 11 | 12 | class ClasDataset(Dataset): 13 | """Image Person ReID Dataset""" 14 | 15 | def __init__(self, img_items, transform=None, idx_to_class=None): 16 | self.img_items = img_items 17 | self.transform = transform 18 | 19 | if idx_to_class is not None: 20 | self.idx_to_class = idx_to_class 21 | self.class_to_idx = {clas_name: int(i) for i, clas_name in self.idx_to_class.items()} 22 | self.classes = sorted(list(self.idx_to_class.values())) 23 | else: 24 | classes = set() 25 | for i in img_items: 26 | classes.add(i[1]) 27 | 28 | self.classes = sorted(list(classes)) 29 | self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)} 30 | self.idx_to_class = {idx: clas for clas, idx in self.class_to_idx.items()} 31 | 32 | def __len__(self): 33 | return len(self.img_items) 34 | 35 | def __getitem__(self, index): 36 | img_item = self.img_items[index] 37 | img_path = img_item[0] 38 | label = self.class_to_idx[img_item[1]] 39 | img = read_image(img_path) 40 | if self.transform is not None: img = self.transform(img) 41 | 42 | return { 43 | "images": img, 44 | "targets": label, 45 | "img_paths": img_path, 46 | } 47 | 48 | @property 49 | def num_classes(self): 50 | return len(self.classes) 51 | -------------------------------------------------------------------------------- /projects/FastDistill/README.md: -------------------------------------------------------------------------------- 1 | # FastDistill in FastReID 2 | 3 | This project provides a strong distillation method for both embedding and classification training. 4 | The feature distillation comes from [overhaul-distillation](https://github.com/clovaai/overhaul-distillation/tree/master/ImageNet). 5 | 6 | 7 | ## Datasets Prepration 8 | - DukeMTMC-reID 9 | 10 | 11 | ## Train and Evaluation 12 | ```shell 13 | # teacher model training 14 | python3 projects/FastDistill/train_net.py \ 15 | --config-file projects/FastDistill/configs/sbs_r101ibn.yml \ 16 | --num-gpus 4 17 | 18 | # loss distillation 19 | python3 projects/FastDistill/train_net.py \ 20 | --config-file projects/FastDistill/configs/kd-sbs_r101ibn-sbs_r34.yaml \ 21 | --num-gpus 4 \ 22 | MODEL.META_ARCHITECTURE Distiller 23 | KD.MODEL_CONFIG '("projects/FastDistill/logs/dukemtmc/r101_ibn/config.yaml",)' \ 24 | KD.MODEL_WEIGHTS '("projects/FastDistill/logs/dukemtmc/r101_ibn/model_best.pth",)' 25 | 26 | # loss+overhaul distillation 27 | python3 projects/FastDistill/train_net.py \ 28 | --config-file projects/FastDistill/configs/kd-sbs_r101ibn-sbs_r34.yaml \ 29 | --num-gpus 4 \ 30 | MODEL.META_ARCHITECTURE DistillerOverhaul 31 | KD.MODEL_CONFIG '("projects/FastDistill/logs/dukemtmc/r101_ibn/config.yaml",)' \ 32 | KD.MODEL_WEIGHTS '("projects/FastDistill/logs/dukemtmc/r101_ibn/model_best.pth",)' 33 | ``` 34 | 35 | ## Experimental Results 36 | 37 | ### Settings 38 | 39 | All the experiments are conducted with 4 V100 GPUs. 40 | 41 | 42 | ### DukeMTMC-reID 43 | 44 | | Model | Rank@1 | mAP | 45 | | --- | --- | --- | 46 | | R101_ibn (teacher) | 90.66 | 81.14 | 47 | | R34 (student) | 86.31 | 73.28 | 48 | | JS Div | 88.60 | 77.80 | 49 | | JS Div + Overhaul | 88.73 | 78.25 | 50 | 51 | ## Contact 52 | This project is conducted by [Xingyu Liao](https://github.com/L1aoXingyu) and [Guan'an Wang](https://wangguanan.github.io/) (guan.wang0706@gmail). 53 | -------------------------------------------------------------------------------- /projects/FastDistill/configs/Base-kd.yml: -------------------------------------------------------------------------------- 1 | _BASE_: ../../../configs/Base-SBS.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnet_backbone_distill 6 | WITH_IBN: False 7 | WITH_NL: False 8 | PRETRAIN: True 9 | 10 | INPUT: 11 | SIZE_TRAIN: [ 256, 128 ] 12 | SIZE_TEST: [ 256, 128 ] 13 | 14 | SOLVER: 15 | MAX_EPOCH: 60 16 | BASE_LR: 0.0007 17 | IMS_PER_BATCH: 256 18 | 19 | DELAY_EPOCHS: 30 20 | FREEZE_ITERS: 500 21 | 22 | CHECKPOINT_PERIOD: 20 23 | 24 | TEST: 25 | EVAL_PERIOD: 20 26 | IMS_PER_BATCH: 128 27 | 28 | CUDNN_BENCHMARK: True 29 | -------------------------------------------------------------------------------- /projects/FastDistill/configs/kd-sbs_r101ibn-sbs_r34.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-kd.yml 2 | 3 | MODEL: 4 | META_ARCHITECTURE: Distiller 5 | BACKBONE: 6 | DEPTH: 34x 7 | FEAT_DIM: 512 8 | WITH_IBN: False 9 | 10 | KD: 11 | MODEL_CONFIG: ("projects/FastDistill/logs/dukemtmc/r101_ibn/config.yaml",) 12 | MODEL_WEIGHTS: ("projects/FastDistill/logs/dukemtmc/r101_ibn/model_best.pth",) 13 | 14 | DATASETS: 15 | NAMES: ("DukeMTMC",) 16 | TESTS: ("DukeMTMC",) 17 | 18 | OUTPUT_DIR: projects/FastDistill/logs/dukemtmc/kd-r34-r101_ibn -------------------------------------------------------------------------------- /projects/FastDistill/configs/sbs_r101ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-kd.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | WITH_IBN: True 6 | DEPTH: 101x 7 | 8 | DATASETS: 9 | NAMES: ("DukeMTMC",) 10 | TESTS: ("DukeMTMC",) 11 | 12 | OUTPUT_DIR: projects/FastDistill/logs/dukemtmc/r101_ibn -------------------------------------------------------------------------------- /projects/FastDistill/configs/sbs_r34.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-kd.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | DEPTH: 34x 6 | FEAT_DIM: 512 7 | WITH_IBN: False 8 | 9 | DATASETS: 10 | NAMES: ("DukeMTMC",) 11 | TESTS: ("DukeMTMC",) 12 | 13 | OUTPUT_DIR: projects/FastDistill/logs/dukemtmc/r34 -------------------------------------------------------------------------------- /projects/FastDistill/fastdistill/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .overhaul import DistillerOverhaul 8 | from .resnet_distill import build_resnet_backbone_distill 9 | -------------------------------------------------------------------------------- /projects/FastDistill/train_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: L1aoXingyu, guan'an wang 5 | @contact: sherlockliao01@gmail.com, guan.wang0706@gmail.com 6 | """ 7 | 8 | import sys 9 | 10 | sys.path.append('.') 11 | from fastreid.config import get_cfg 12 | from fastreid.engine import default_argument_parser, default_setup, DefaultTrainer, launch 13 | from fastreid.utils.checkpoint import Checkpointer 14 | 15 | from fastdistill import * 16 | 17 | def setup(args): 18 | """ 19 | Create configs and perform basic setups. 20 | """ 21 | cfg = get_cfg() 22 | cfg.merge_from_file(args.config_file) 23 | cfg.merge_from_list(args.opts) 24 | cfg.freeze() 25 | default_setup(cfg, args) 26 | return cfg 27 | 28 | 29 | def main(args): 30 | cfg = setup(args) 31 | 32 | if args.eval_only: 33 | model = DefaultTrainer.build_model(cfg) 34 | Checkpointer(model, save_dir=cfg.OUTPUT_DIR).load(cfg.MODEL.WEIGHTS) 35 | res = DefaultTrainer.test(cfg, model) 36 | return res 37 | 38 | trainer = DefaultTrainer(cfg) 39 | 40 | trainer.resume_or_load(resume=args.resume) 41 | return trainer.train() 42 | 43 | 44 | if __name__ == "__main__": 45 | parser = default_argument_parser() 46 | args = parser.parse_args() 47 | 48 | print("Command Line Args:", args) 49 | launch( 50 | main, 51 | args.num_gpus, 52 | num_machines=args.num_machines, 53 | machine_rank=args.machine_rank, 54 | dist_url=args.dist_url, 55 | args=(args,), 56 | ) 57 | -------------------------------------------------------------------------------- /projects/FastFace/README.md: -------------------------------------------------------------------------------- 1 | # FastFace in FastReID 2 | 3 | This project provides a baseline for face recognition. 4 | 5 | ## Datasets Preparation 6 | 7 | | Function | Dataset | 8 | | --- | --- | 9 | | Train | MS-Celeb-1M | 10 | | Test-1 | LFW | 11 | | Test-2 | CPLFW | 12 | | Test-3 | CALFW | 13 | | Test-4 | VGG2_FP | 14 | | Test-5 | AgeDB-30 | 15 | | Test-6 | CFP_FF | 16 | | Test-7 | CFP-FP | 17 | 18 | We do data wrangling following [InsightFace_Pytorch](https://github.com/TreB1eN/InsightFace_Pytorch) instruction. 19 | 20 | ## Dependencies 21 | 22 | - bcolz 23 | - mxnet (optional) if you want to read `.rec` directly 24 | 25 | ## Experiment Results 26 | 27 | We refer to [insightface_pytorch](https://github.com/TreB1eN/InsightFace_Pytorch) as our baseline methods, and on top of it, we use circle loss and cosine lr scheduler. 28 | 29 | | Method | LFW(%) | CFP-FF(%) | CFP-FP(%)| AgeDB-30(%) | calfw(%) | cplfw(%) | vgg2_fp(%) | 30 | | :---: | :---: | :---: |:---: | :---: | :---: | :---: | :---: | 31 | | [insightface_pytorch](https://github.com/TreB1eN/InsightFace_Pytorch) | 99.52 | 99.62 | 95.04 | 96.22 | 95.57 | 91.07 | 93.86 | 32 | | ir50_se | 99.70 | 99.60 | 96.43 | 97.87 | 95.95 | 91.10 | 94.32 | 33 | | ir100_se | 99.65 | 99.69 | 97.10 | 97.98 | 96.00 | 91.53 | 94.62 | 34 | | ir50_se_0.1 | | | | | | | | 35 | | ir100_se_0.1 | | | | | | | | 36 | -------------------------------------------------------------------------------- /projects/FastFace/configs/face_base.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: FaceBaseline 3 | 4 | PIXEL_MEAN: [127.5, 127.5, 127.5] 5 | PIXEL_STD: [127.5, 127.5, 127.5] 6 | 7 | BACKBONE: 8 | NAME: build_iresnet_backbone 9 | 10 | HEADS: 11 | NAME: FaceHead 12 | WITH_BNNECK: True 13 | NORM: BN 14 | NECK_FEAT: after 15 | EMBEDDING_DIM: 512 16 | POOL_LAYER: Flatten 17 | CLS_LAYER: CosSoftmax 18 | SCALE: 64 19 | MARGIN: 0.4 20 | NUM_CLASSES: 360232 21 | 22 | PFC: 23 | ENABLED: False 24 | SAMPLE_RATE: 0.1 25 | 26 | LOSSES: 27 | NAME: ("CrossEntropyLoss",) 28 | 29 | CE: 30 | EPSILON: 0. 31 | SCALE: 1. 32 | 33 | DATASETS: 34 | REC_PATH: /export/home/DATA/Glint360k/train.rec 35 | NAMES: ("MS1MV2",) 36 | TESTS: ("CFP_FP", "AgeDB_30", "LFW") 37 | 38 | INPUT: 39 | SIZE_TRAIN: [0,] # No need of resize 40 | SIZE_TEST: [0,] 41 | 42 | FLIP: 43 | ENABLED: True 44 | PROB: 0.5 45 | 46 | DATALOADER: 47 | SAMPLER_TRAIN: TrainingSampler 48 | NUM_WORKERS: 8 49 | 50 | SOLVER: 51 | MAX_EPOCH: 20 52 | AMP: 53 | ENABLED: True 54 | 55 | OPT: SGD 56 | BASE_LR: 0.05 57 | MOMENTUM: 0.9 58 | 59 | SCHED: MultiStepLR 60 | STEPS: [8, 12, 15, 18] 61 | 62 | BIAS_LR_FACTOR: 1. 63 | WEIGHT_DECAY: 0.0005 64 | WEIGHT_DECAY_BIAS: 0.0005 65 | IMS_PER_BATCH: 256 66 | 67 | WARMUP_FACTOR: 0.1 68 | WARMUP_ITERS: 0 69 | 70 | CHECKPOINT_PERIOD: 1 71 | 72 | TEST: 73 | EVAL_PERIOD: 1 74 | IMS_PER_BATCH: 1024 75 | 76 | CUDNN_BENCHMARK: True -------------------------------------------------------------------------------- /projects/FastFace/configs/r101_ir.yml: -------------------------------------------------------------------------------- 1 | _BASE_: face_base.yml 2 | 3 | MODEL: 4 | 5 | BACKBONE: 6 | NAME: build_resnetIR_backbone 7 | DEPTH: 100x 8 | FEAT_DIM: 25088 # 512x7x7 9 | WITH_SE: True 10 | 11 | HEADS: 12 | PFC: 13 | ENABLED: True 14 | 15 | OUTPUT_DIR: projects/FastFace/logs/ir_se101-ms1mv2-circle 16 | -------------------------------------------------------------------------------- /projects/FastFace/configs/r50_ir.yml: -------------------------------------------------------------------------------- 1 | _BASE_: face_base.yml 2 | 3 | MODEL: 4 | 5 | BACKBONE: 6 | DEPTH: 50x 7 | FEAT_DIM: 25088 # 512x7x7 8 | DROPOUT: 0. 9 | 10 | HEADS: 11 | PFC: 12 | ENABLED: True 13 | 14 | OUTPUT_DIR: projects/FastFace/logs/pfc0.1_insightface 15 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .modeling import * 8 | from .config import add_face_cfg 9 | from .trainer import FaceTrainer 10 | from .datasets import * 11 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.config import CfgNode as CN 8 | 9 | 10 | def add_face_cfg(cfg): 11 | _C = cfg 12 | 13 | _C.DATASETS.REC_PATH = "" 14 | 15 | _C.MODEL.BACKBONE.DROPOUT = 0. 16 | 17 | _C.MODEL.HEADS.PFC = CN({"ENABLED": False}) 18 | _C.MODEL.HEADS.PFC.SAMPLE_RATE = 0.1 19 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .ms1mv2 import MS1MV2 8 | from .test_dataset import * 9 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/datasets/ms1mv2.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import os 9 | 10 | from fastreid.data.datasets import DATASET_REGISTRY 11 | from fastreid.data.datasets.bases import ImageDataset 12 | 13 | 14 | @DATASET_REGISTRY.register() 15 | class MS1MV2(ImageDataset): 16 | dataset_dir = "MS_Celeb_1M" 17 | dataset_name = "ms1mv2" 18 | 19 | def __init__(self, root="datasets", **kwargs): 20 | self.root = root 21 | self.dataset_dir = os.path.join(self.root, self.dataset_dir) 22 | 23 | required_files = [self.dataset_dir] 24 | self.check_before_run(required_files) 25 | 26 | train = self.process_dirs()[:10000] 27 | super().__init__(train, [], [], **kwargs) 28 | 29 | def process_dirs(self): 30 | train_list = [] 31 | 32 | fid_list = os.listdir(self.dataset_dir) 33 | 34 | for fid in fid_list: 35 | all_imgs = glob.glob(os.path.join(self.dataset_dir, fid, "*.jpg")) 36 | for img_path in all_imgs: 37 | train_list.append([img_path, self.dataset_name + '_' + fid, '0']) 38 | 39 | return train_list 40 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/datasets/test_dataset.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import os 8 | 9 | import bcolz 10 | import numpy as np 11 | 12 | from fastreid.data.datasets import DATASET_REGISTRY 13 | from fastreid.data.datasets.bases import ImageDataset 14 | 15 | __all__ = ["CPLFW", "VGG2_FP", "AgeDB_30", "CALFW", "CFP_FF", "CFP_FP", "LFW"] 16 | 17 | 18 | @DATASET_REGISTRY.register() 19 | class CPLFW(ImageDataset): 20 | dataset_dir = "faces_emore_val" 21 | dataset_name = "cplfw" 22 | 23 | def __init__(self, root='datasets', **kwargs): 24 | self.root = root 25 | self.dataset_dir = os.path.join(self.root, self.dataset_dir) 26 | 27 | required_files = [self.dataset_dir] 28 | 29 | self.check_before_run(required_files) 30 | 31 | carray = bcolz.carray(rootdir=os.path.join(self.dataset_dir, self.dataset_name), mode='r') 32 | is_same = np.load(os.path.join(self.dataset_dir, "{}_list.npy".format(self.dataset_name))) 33 | 34 | self.carray = carray 35 | self.is_same = is_same 36 | 37 | super().__init__([], [], [], **kwargs) 38 | 39 | 40 | @DATASET_REGISTRY.register() 41 | class VGG2_FP(CPLFW): 42 | dataset_name = "vgg2_fp" 43 | 44 | 45 | @DATASET_REGISTRY.register() 46 | class AgeDB_30(CPLFW): 47 | dataset_name = "agedb_30" 48 | 49 | 50 | @DATASET_REGISTRY.register() 51 | class CALFW(CPLFW): 52 | dataset_name = "calfw" 53 | 54 | 55 | @DATASET_REGISTRY.register() 56 | class CFP_FF(CPLFW): 57 | dataset_name = "cfp_ff" 58 | 59 | 60 | @DATASET_REGISTRY.register() 61 | class CFP_FP(CPLFW): 62 | dataset_name = "cfp_fp" 63 | 64 | 65 | @DATASET_REGISTRY.register() 66 | class LFW(CPLFW): 67 | dataset_name = "lfw" 68 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/modeling/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .partial_fc import PartialFC 8 | from .face_baseline import FaceBaseline 9 | from .face_head import FaceHead 10 | from .iresnet import build_iresnet_backbone 11 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/modeling/face_baseline.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import torch 8 | from fastreid.modeling.meta_arch import Baseline 9 | from fastreid.modeling.meta_arch import META_ARCH_REGISTRY 10 | 11 | 12 | @META_ARCH_REGISTRY.register() 13 | class FaceBaseline(Baseline): 14 | def __init__(self, cfg): 15 | super().__init__(cfg) 16 | self.pfc_enabled = cfg.MODEL.HEADS.PFC.ENABLED 17 | self.amp_enabled = cfg.SOLVER.AMP.ENABLED 18 | 19 | def forward(self, batched_inputs): 20 | if not self.pfc_enabled: 21 | return super().forward(batched_inputs) 22 | 23 | images = self.preprocess_image(batched_inputs) 24 | with torch.cuda.amp.autocast(self.amp_enabled): 25 | features = self.backbone(images) 26 | features = features.float() if self.amp_enabled else features 27 | 28 | if self.training: 29 | assert "targets" in batched_inputs, "Person ID annotation are missing in training!" 30 | targets = batched_inputs["targets"] 31 | 32 | # PreciseBN flag, When do preciseBN on different dataset, the number of classes in new dataset 33 | # may be larger than that in the original dataset, so the circle/arcface will 34 | # throw an error. We just set all the targets to 0 to avoid this problem. 35 | if targets.sum() < 0: targets.zero_() 36 | 37 | outputs = self.heads(features, targets) 38 | return outputs, targets 39 | else: 40 | outputs = self.heads(features) 41 | return outputs 42 | -------------------------------------------------------------------------------- /projects/FastFace/fastface/modeling/face_head.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.config import configurable 8 | from fastreid.modeling.heads import EmbeddingHead 9 | from fastreid.modeling.heads.build import REID_HEADS_REGISTRY 10 | 11 | 12 | @REID_HEADS_REGISTRY.register() 13 | class FaceHead(EmbeddingHead): 14 | def __init__(self, cfg): 15 | super().__init__(cfg) 16 | self.pfc_enabled = False 17 | if cfg.MODEL.HEADS.PFC.ENABLED: 18 | # Delete pre-defined linear weights for partial fc sample 19 | del self.weight 20 | self.pfc_enabled = True 21 | 22 | def forward(self, features, targets=None): 23 | """ 24 | Partial FC forward, which will sample positive weights and part of negative weights, 25 | then compute logits and get the grad of features. 26 | """ 27 | if not self.pfc_enabled: 28 | return super().forward(features, targets) 29 | else: 30 | pool_feat = self.pool_layer(features) 31 | neck_feat = self.bottleneck(pool_feat) 32 | neck_feat = neck_feat[..., 0, 0] 33 | return neck_feat 34 | -------------------------------------------------------------------------------- /projects/FastFace/train_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: sherlock 5 | @contact: sherlockliao01@gmail.com 6 | """ 7 | 8 | import sys 9 | 10 | sys.path.append('.') 11 | 12 | from fastreid.config import get_cfg 13 | from fastreid.engine import default_argument_parser, default_setup, launch 14 | from fastreid.utils.checkpoint import Checkpointer 15 | 16 | from fastface import * 17 | 18 | 19 | def setup(args): 20 | """ 21 | Create configs and perform basic setups. 22 | """ 23 | cfg = get_cfg() 24 | add_face_cfg(cfg) 25 | cfg.merge_from_file(args.config_file) 26 | cfg.merge_from_list(args.opts) 27 | cfg.freeze() 28 | default_setup(cfg, args) 29 | return cfg 30 | 31 | 32 | def main(args): 33 | cfg = setup(args) 34 | 35 | if args.eval_only: 36 | cfg.defrost() 37 | cfg.MODEL.BACKBONE.PRETRAIN = False 38 | model = FaceTrainer.build_model(cfg) 39 | 40 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model 41 | 42 | res = FaceTrainer.test(cfg, model) 43 | return res 44 | 45 | trainer = FaceTrainer(cfg) 46 | 47 | trainer.resume_or_load(resume=args.resume) 48 | return trainer.train() 49 | 50 | 51 | if __name__ == "__main__": 52 | args = default_argument_parser().parse_args() 53 | print("Command Line Args:", args) 54 | launch( 55 | main, 56 | args.num_gpus, 57 | num_machines=args.num_machines, 58 | machine_rank=args.machine_rank, 59 | dist_url=args.dist_url, 60 | args=(args,), 61 | ) 62 | -------------------------------------------------------------------------------- /projects/FastRT/.gitignore: -------------------------------------------------------------------------------- 1 | *.wts 2 | 3 | .vscode/ 4 | libs/ 5 | build/ 6 | data/ -------------------------------------------------------------------------------- /projects/FastRT/demo/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | SET(APP_PROJECT_NAME fastrt) 2 | 3 | find_package(CUDA REQUIRED) 4 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different 5 | # cuda 6 | include_directories(/usr/local/cuda/include) 7 | link_directories(/usr/local/cuda/lib64) 8 | # tensorrt 9 | include_directories(/usr/include/x86_64-linux-gnu/) 10 | link_directories(/usr/lib/x86_64-linux-gnu/) 11 | 12 | include_directories(${SOLUTION_DIR}/include) 13 | add_executable(${APP_PROJECT_NAME} inference.cpp) 14 | 15 | # numpy 16 | if(USE_CNUMPY) 17 | include_directories(${SOLUTION_DIR}/libs/cnpy/include) 18 | SET(CNPY_LIB ${SOLUTION_DIR}/libs/cnpy/lib/libcnpy.so) 19 | else() 20 | SET(CNPY_LIB) 21 | endif() 22 | 23 | # OpenCV 24 | find_package(OpenCV) 25 | target_include_directories(${APP_PROJECT_NAME} 26 | PUBLIC 27 | ${OpenCV_INCLUDE_DIRS} 28 | ) 29 | target_link_libraries(${APP_PROJECT_NAME} 30 | PUBLIC 31 | ${OpenCV_LIBS} 32 | ) 33 | 34 | if(BUILD_FASTRT_ENGINE AND BUILD_DEMO) 35 | SET(FASTRTENGINE_LIB FastRTEngine) 36 | else() 37 | SET(FASTRTENGINE_LIB ${SOLUTION_DIR}/libs/FastRTEngine/libFastRTEngine.so) 38 | endif() 39 | 40 | target_link_libraries(${APP_PROJECT_NAME} 41 | PRIVATE 42 | ${FASTRTENGINE_LIB} 43 | nvinfer 44 | ${CNPY_LIB} 45 | ) -------------------------------------------------------------------------------- /projects/FastRT/docker/trt7cu100/Dockerfile: -------------------------------------------------------------------------------- 1 | # cuda10.0 2 | FROM fineyu/tensorrt7:0.0.1 3 | 4 | RUN add-apt-repository -y ppa:timsc/opencv-3.4 && \ 5 | apt-get update && \ 6 | apt-get install -y cmake \ 7 | libopencv-dev \ 8 | libopencv-dnn-dev \ 9 | libopencv-shape3.4-dbg && \ 10 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 11 | -------------------------------------------------------------------------------- /projects/FastRT/docker/trt7cu102/Dockerfile: -------------------------------------------------------------------------------- 1 | # cuda10.2 2 | FROM nvcr.io/nvidia/tensorrt:20.03-py3 3 | 4 | RUN apt-get update && apt-get dist-upgrade -y && \ 5 | apt-get install -y \ 6 | software-properties-common \ 7 | build-essential \ 8 | cmake \ 9 | git \ 10 | libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev \ 11 | python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev \ 12 | libdc1394-22-dev libgl1-mesa-glx && \ 13 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 14 | 15 | RUN mkdir opencv34 && cd opencv34 && \ 16 | git clone -b 3.4 https://github.com/opencv/opencv && \ 17 | git clone -b 3.4 https://github.com/opencv/opencv_contrib && \ 18 | mkdir build && cd build && \ 19 | cmake -DCMAKE_INSTALL_PREFIX=/usr/local/opencv \ 20 | -DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo \ 21 | -DCMAKE_BUILD_TYPE=RELEASE \ 22 | -DBUILD_opencv_xfeatures2d=OFF \ 23 | -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules ../opencv && \ 24 | make -j12 && \ 25 | make install && \ 26 | ldconfig && \ 27 | cd ../.. \ 28 | && rm -rf opencv34 29 | -------------------------------------------------------------------------------- /projects/FastRT/fastrt/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(FastRTEngine) 2 | 3 | file(GLOB_RECURSE COMMON_SRC_FILES 4 | ${CMAKE_CURRENT_SOURCE_DIR}/common/utils.cpp 5 | ${CMAKE_CURRENT_SOURCE_DIR}/common/calibrator.cpp 6 | ) 7 | 8 | find_package(CUDA REQUIRED) 9 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different 10 | # cuda 11 | include_directories(/usr/local/cuda/include) 12 | link_directories(/usr/local/cuda/lib64) 13 | # tensorrt 14 | include_directories(/usr/include/x86_64-linux-gnu/) 15 | link_directories(/usr/lib/x86_64-linux-gnu/) 16 | 17 | # build engine as library 18 | add_library(${PROJECT_NAME} ${TARGET} ${COMMON_SRC_FILES}) 19 | 20 | target_include_directories(${PROJECT_NAME} 21 | PUBLIC 22 | ../include 23 | ) 24 | 25 | find_package(OpenCV) 26 | target_include_directories(${PROJECT_NAME} 27 | PUBLIC 28 | ${OpenCV_INCLUDE_DIRS} 29 | ) 30 | 31 | target_link_libraries(${PROJECT_NAME} 32 | nvinfer 33 | cudart 34 | ${OpenCV_LIBS} 35 | ) 36 | 37 | SET_TARGET_PROPERTIES(${PROJECT_NAME} 38 | PROPERTIES 39 | SOVERSION ${LIBARARY_SOVERSION} 40 | VERSION ${LIBARARY_VERSION} 41 | ) 42 | 43 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") 44 | 45 | install(TARGETS ${PROJECT_NAME} 46 | LIBRARY DESTINATION ${SOLUTION_DIR}/libs/${PROJECT_NAME}) 47 | 48 | add_subdirectory(layers) 49 | add_subdirectory(engine) 50 | add_subdirectory(heads) 51 | add_subdirectory(backbones) 52 | add_subdirectory(meta_arch) 53 | add_subdirectory(factory) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/backbones/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/sbs_resnet.cpp 4 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/engine/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/InferenceEngine.cpp 4 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/factory/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/factory.cpp 4 | ${CMAKE_SOURCE_DIR}/fastrt/layers/poolingLayerRT.h 5 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/heads/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/embedding_head.cpp 4 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/heads/embedding_head.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "fastrt/utils.h" 3 | #include "fastrt/layers.h" 4 | #include "fastrt/embedding_head.h" 5 | 6 | namespace fastrt { 7 | 8 | embedding_head::embedding_head(FastreidConfig& modelCfg) : 9 | _modelCfg(modelCfg), _layerFactory(make_unique()) {} 10 | 11 | embedding_head::embedding_head(FastreidConfig& modelCfg, 12 | std::unique_ptr layerFactory) : _modelCfg(modelCfg), _layerFactory(std::move(layerFactory)) {} 13 | 14 | ILayer* embedding_head::topology(INetworkDefinition *network, std::map& weightMap, ITensor& input) { 15 | /* 16 | * Reference: https://github.com/JDAI-CV/fast-reid/blob/master/fastreid/modeling/heads/embedding_head.py 17 | */ 18 | 19 | ILayer* pooling = _layerFactory->createPoolingLayer(_modelCfg.pooling)->addPooling(network, weightMap, input); 20 | TRTASSERT(pooling); 21 | 22 | // Hint: It's used to be "heads.bnneck.0" before Sep 10, 2020. (JDAI-CV/fast-reid) 23 | std::string bnneck_lname = "heads.bottleneck.0"; 24 | ILayer* reduction_neck{pooling}; 25 | 26 | if(_modelCfg.embedding_dim > 0) { 27 | Weights emptywts{DataType::kFLOAT, nullptr, 0}; 28 | reduction_neck = network->addConvolutionNd(*pooling->getOutput(0), 29 | _modelCfg.embedding_dim, 30 | DimsHW{1, 1}, 31 | weightMap["heads.bottleneck.0.weight"], 32 | emptywts); 33 | TRTASSERT(reduction_neck); 34 | bnneck_lname[bnneck_lname.size()-1] = '1'; 35 | } 36 | 37 | IScaleLayer* bottleneck = trtxapi::addBatchNorm2d(network, weightMap, *reduction_neck->getOutput(0), bnneck_lname, 1e-5); 38 | TRTASSERT(bottleneck); 39 | return bottleneck; 40 | } 41 | 42 | } -------------------------------------------------------------------------------- /projects/FastRT/fastrt/layers/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/layers.cpp 4 | ${CMAKE_CURRENT_SOURCE_DIR}/poolingLayerRT.h 5 | ${CMAKE_CURRENT_SOURCE_DIR}/poolingLayerRT.cpp 6 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/layers/poolingLayerRT.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "fastrt/layers.h" 3 | #include "poolingLayerRT.h" 4 | 5 | namespace fastrt { 6 | 7 | ILayer* MaxPool::addPooling(INetworkDefinition *network, std::map& weightMap, ITensor& input) { 8 | ILayer* pooling = network->addPoolingNd(input, PoolingType::kMAX, DimsHW{input.getDimensions().d[1], input.getDimensions().d[2]}); 9 | auto p = dynamic_cast(pooling); 10 | if(p) p->setStrideNd(DimsHW{input.getDimensions().d[1], input.getDimensions().d[2]}); 11 | else std::cout << "Downcasting failed." << std::endl; 12 | return pooling; 13 | } 14 | 15 | ILayer* AvgPool::addPooling(INetworkDefinition *network, std::map& weightMap, ITensor& input) { 16 | ILayer* pooling = network->addPoolingNd(input, PoolingType::kAVERAGE, DimsHW{input.getDimensions().d[1], input.getDimensions().d[2]}); 17 | auto p = dynamic_cast(pooling); 18 | if(p) p->setStrideNd(DimsHW{input.getDimensions().d[1], input.getDimensions().d[2]}); 19 | else std::cout << "Downcasting failed." << std::endl; 20 | return pooling; 21 | } 22 | 23 | ILayer* GemPool::addPooling(INetworkDefinition *network, std::map& weightMap, ITensor& input) { 24 | return trtxapi::addGeneralizedMeanPooling(network, input); 25 | } 26 | 27 | ILayer* GemPoolP::addPooling(INetworkDefinition *network, std::map& weightMap, ITensor& input) { 28 | return trtxapi::addGeneralizedMeanPooling(network, input, *(float*)weightMap["heads.pool_layer.p"].values); 29 | } 30 | 31 | } -------------------------------------------------------------------------------- /projects/FastRT/fastrt/layers/poolingLayerRT.h: -------------------------------------------------------------------------------- 1 | #include "NvInfer.h" 2 | #include "fastrt/IPoolingLayerRT.h" 3 | using namespace nvinfer1; 4 | 5 | namespace fastrt { 6 | 7 | class MaxPool : public IPoolingLayerRT { 8 | public: 9 | MaxPool() = default; 10 | ~MaxPool() = default; 11 | 12 | ILayer* addPooling(INetworkDefinition *network, 13 | std::map& weightMap, 14 | ITensor& input) override; 15 | }; 16 | 17 | class AvgPool : public IPoolingLayerRT { 18 | public: 19 | AvgPool() = default; 20 | ~AvgPool() = default; 21 | 22 | ILayer* addPooling(INetworkDefinition *network, 23 | std::map& weightMap, 24 | ITensor& input) override; 25 | }; 26 | 27 | class GemPool : public IPoolingLayerRT { 28 | public: 29 | GemPool() = default; 30 | ~GemPool() = default; 31 | 32 | ILayer* addPooling(INetworkDefinition *network, 33 | std::map& weightMap, 34 | ITensor& input) override; 35 | }; 36 | 37 | class GemPoolP : public IPoolingLayerRT { 38 | public: 39 | GemPoolP() = default; 40 | ~GemPoolP() = default; 41 | 42 | ILayer* addPooling(INetworkDefinition *network, 43 | std::map& weightMap, 44 | ITensor& input) override; 45 | }; 46 | } -------------------------------------------------------------------------------- /projects/FastRT/fastrt/meta_arch/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | target_sources(${PROJECT_NAME} 2 | PRIVATE 3 | ${CMAKE_CURRENT_SOURCE_DIR}/model.cpp 4 | ${CMAKE_CURRENT_SOURCE_DIR}/baseline.cpp 5 | ) -------------------------------------------------------------------------------- /projects/FastRT/fastrt/meta_arch/baseline.cpp: -------------------------------------------------------------------------------- 1 | #include "fastrt/layers.h" 2 | #include "fastrt/baseline.h" 3 | 4 | namespace fastrt { 5 | 6 | Baseline::Baseline(const trt::ModelConfig &modelcfg, const std::string input_name, const std::string output_name) 7 | : Model(modelcfg, input_name, output_name) {} 8 | 9 | void Baseline::preprocessing_cpu(const cv::Mat& img, float* const data, const std::size_t stride) { 10 | /* Normalization & BGR->RGB */ 11 | for (std::size_t i = 0; i < stride; ++i) { 12 | data[i] = img.at(i)[2]; 13 | data[i + stride] = img.at(i)[1]; 14 | data[i + (stride<<1)] = img.at(i)[0]; 15 | } 16 | } 17 | 18 | ITensor* Baseline::preprocessing_gpu(INetworkDefinition* network, std::map& weightMap, ITensor* input) { 19 | /* Standardization */ 20 | static const float mean[3] = {123.675, 116.28, 103.53}; 21 | static const float std[3] = {58.395, 57.120000000000005, 57.375}; 22 | return addMeanStd(network, weightMap, input, "", mean, std, false); // true for div 255 23 | } 24 | 25 | } -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/IPoolingLayerRT.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "struct.h" 5 | #include "NvInfer.h" 6 | using namespace nvinfer1; 7 | 8 | namespace fastrt { 9 | 10 | class IPoolingLayerRT { 11 | public: 12 | IPoolingLayerRT() = default; 13 | virtual ~IPoolingLayerRT() = default; 14 | 15 | virtual ILayer* addPooling(INetworkDefinition *network, 16 | std::map& weightMap, 17 | ITensor& input) = 0; 18 | }; 19 | 20 | } -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/baseline.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "model.h" 4 | #include "struct.h" 5 | #include 6 | #include 7 | using namespace trtxapi; 8 | 9 | namespace fastrt { 10 | 11 | class Baseline : public Model { 12 | public: 13 | Baseline(const trt::ModelConfig &modelcfg, 14 | const std::string input_name = "data", 15 | const std::string output_name = "reid_embd"); 16 | ~Baseline() = default; 17 | 18 | private: 19 | void preprocessing_cpu(const cv::Mat& img, float* const data, const std::size_t stride); 20 | ITensor* preprocessing_gpu(INetworkDefinition* network, 21 | std::map& weightMap, 22 | ITensor* input); 23 | }; 24 | } -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/calibrator.h: -------------------------------------------------------------------------------- 1 | #ifndef ENTROPY_CALIBRATOR_H 2 | #define ENTROPY_CALIBRATOR_H 3 | 4 | #include "NvInfer.h" 5 | #include 6 | #include 7 | 8 | //! \class Int8EntropyCalibrator2 9 | //! 10 | //! \brief Implements Entropy calibrator 2. 11 | //! CalibrationAlgoType is kENTROPY_CALIBRATION_2. 12 | //! 13 | class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2 14 | { 15 | public: 16 | Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true); 17 | 18 | virtual ~Int8EntropyCalibrator2(); 19 | int getBatchSize() const override; 20 | bool getBatch(void* bindings[], const char* names[], int nbBindings) override; 21 | const void* readCalibrationCache(size_t& length) override; 22 | void writeCalibrationCache(const void* cache, size_t length) override; 23 | 24 | private: 25 | int batchsize_; 26 | int input_w_; 27 | int input_h_; 28 | int img_idx_; 29 | std::string img_dir_; 30 | std::vector img_files_; 31 | size_t input_count_; 32 | std::string calib_table_name_; 33 | const char* input_blob_name_; 34 | bool read_cache_; 35 | void* device_input_; 36 | std::vector calib_cache_; 37 | }; 38 | 39 | #endif // ENTROPY_CALIBRATOR_H 40 | -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/config.h.in: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef BUILD_INT8 4 | #include 5 | const std::string INT8_CALIBRATE_DATASET_PATH = "@INT8_CALIBRATE_DATASET_PATH@"; 6 | #endif 7 | 8 | -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef TRTX_CUDA_UTILS_H_ 2 | #define TRTX_CUDA_UTILS_H_ 3 | 4 | #include 5 | 6 | #ifndef CUDA_CHECK 7 | #define CUDA_CHECK(callstr)\ 8 | {\ 9 | cudaError_t error_code = callstr;\ 10 | if (error_code != cudaSuccess) {\ 11 | std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\ 12 | assert(0);\ 13 | }\ 14 | } 15 | #endif // CUDA_CHECK 16 | 17 | #endif // TRTX_CUDA_UTILS_H_ 18 | 19 | -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/embedding_head.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "NvInfer.h" 5 | #include "fastrt/module.h" 6 | #include "fastrt/struct.h" 7 | #include "fastrt/factory.h" 8 | using namespace nvinfer1; 9 | 10 | namespace fastrt { 11 | 12 | class embedding_head : public Module { 13 | private: 14 | FastreidConfig& _modelCfg; 15 | std::unique_ptr _layerFactory; 16 | 17 | public: 18 | embedding_head(FastreidConfig& modelCfg); 19 | embedding_head(FastreidConfig& modelCfg, std::unique_ptr layerFactory); 20 | ~embedding_head() = default; 21 | 22 | ILayer* topology(INetworkDefinition *network, 23 | std::map& weightMap, 24 | ITensor& input) override; 25 | }; 26 | 27 | } -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/factory.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "struct.h" 4 | #include "module.h" 5 | #include "IPoolingLayerRT.h" 6 | 7 | namespace fastrt { 8 | 9 | class ModuleFactory { 10 | public: 11 | ModuleFactory() = default; 12 | ~ModuleFactory() = default; 13 | 14 | std::unique_ptr createBackbone(FastreidConfig& modelCfg); 15 | std::unique_ptr createHead(FastreidConfig& modelCfg); 16 | }; 17 | 18 | class LayerFactory { 19 | public: 20 | LayerFactory() = default; 21 | ~LayerFactory() = default; 22 | 23 | std::unique_ptr createPoolingLayer(const FastreidPoolingType& pooltype); 24 | }; 25 | 26 | } -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/holder.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | template 4 | class TensorRTHolder { 5 | T* holder; 6 | public: 7 | explicit TensorRTHolder(T* holder_) : holder(holder_) {} 8 | ~TensorRTHolder() { 9 | if (holder) 10 | holder->destroy(); 11 | } 12 | TensorRTHolder(const TensorRTHolder&) = delete; 13 | TensorRTHolder& operator=(const TensorRTHolder&) = delete; 14 | TensorRTHolder(TensorRTHolder && rhs) noexcept{ 15 | holder = rhs.holder; 16 | rhs.holder = nullptr; 17 | } 18 | TensorRTHolder& operator=(TensorRTHolder&& rhs) noexcept { 19 | if (this == &rhs) { 20 | return *this; 21 | } 22 | if (holder) holder->destroy(); 23 | holder = rhs.holder; 24 | rhs.holder = nullptr; 25 | return *this; 26 | } 27 | T* operator->() { 28 | return holder; 29 | } 30 | T* get() { return holder; } 31 | explicit operator bool() { return holder != nullptr; } 32 | T& operator*() noexcept { return *holder; } 33 | }; 34 | 35 | template 36 | TensorRTHolder make_holder(T* holder) { 37 | return TensorRTHolder(holder); 38 | } 39 | 40 | template 41 | using TensorRTNonHolder = T*; -------------------------------------------------------------------------------- /projects/FastRT/include/fastrt/module.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "struct.h" 5 | #include "NvInfer.h" 6 | using namespace nvinfer1; 7 | 8 | namespace fastrt { 9 | 10 | class Module { 11 | public: 12 | Module() = default; 13 | virtual ~Module() = default; 14 | 15 | virtual ILayer* topology(INetworkDefinition *network, 16 | std::map& weightMap, 17 | ITensor& input) = 0; 18 | }; 19 | 20 | } -------------------------------------------------------------------------------- /projects/FastRT/pybind_interface/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | SET(APP_PROJECT_NAME ReID) 2 | 3 | # pybind 4 | find_package(pybind11) 5 | 6 | find_package(CUDA REQUIRED) 7 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different 8 | # cuda 9 | include_directories(/usr/local/cuda/include) 10 | link_directories(/usr/local/cuda/lib64) 11 | # tensorrt 12 | include_directories(/usr/include/x86_64-linux-gnu/) 13 | link_directories(/usr/lib/x86_64-linux-gnu/) 14 | 15 | include_directories(${SOLUTION_DIR}/include) 16 | 17 | pybind11_add_module(${APP_PROJECT_NAME} ${PROJECT_SOURCE_DIR}/pybind_interface/reid.cpp) 18 | 19 | # OpenCV 20 | find_package(OpenCV) 21 | target_include_directories(${APP_PROJECT_NAME} 22 | PUBLIC 23 | ${OpenCV_INCLUDE_DIRS} 24 | ) 25 | target_link_libraries(${APP_PROJECT_NAME} 26 | PUBLIC 27 | ${OpenCV_LIBS} 28 | ) 29 | 30 | if(BUILD_FASTRT_ENGINE AND BUILD_PYTHON_INTERFACE) 31 | SET(FASTRTENGINE_LIB FastRTEngine) 32 | else() 33 | SET(FASTRTENGINE_LIB ${SOLUTION_DIR}/libs/FastRTEngine/libFastRTEngine.so) 34 | endif() 35 | 36 | target_link_libraries(${APP_PROJECT_NAME} 37 | PRIVATE 38 | ${FASTRTENGINE_LIB} 39 | nvinfer 40 | ) -------------------------------------------------------------------------------- /projects/FastRT/pybind_interface/docker/trt7cu100/Dockerfile: -------------------------------------------------------------------------------- 1 | # cuda10.0 2 | FROM fineyu/tensorrt7:0.0.1 3 | 4 | RUN apt-get update && apt-get install -y \ 5 | build-essential \ 6 | software-properties-common \ 7 | cmake \ 8 | wget \ 9 | python3.7-dev python3-pip 10 | 11 | RUN add-apt-repository -y ppa:timsc/opencv-3.4 && \ 12 | apt-get update && \ 13 | apt-get install -y \ 14 | libopencv-dev \ 15 | libopencv-dnn-dev \ 16 | libopencv-shape3.4-dbg && \ 17 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 18 | 19 | RUN wget https://bootstrap.pypa.io/get-pip.py && \ 20 | python3 get-pip.py --force-reinstall && \ 21 | rm get-pip.py 22 | 23 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 1 && \ 24 | update-alternatives --set python3 /usr/bin/python3.7 25 | 26 | RUN pip install pytest opencv-python 27 | 28 | RUN cd /usr/local/src && \ 29 | wget https://github.com/pybind/pybind11/archive/v2.2.3.tar.gz && \ 30 | tar xvf v2.2.3.tar.gz && \ 31 | cd pybind11-2.2.3 && \ 32 | mkdir build && \ 33 | cd build && \ 34 | cmake .. && \ 35 | make -j12 && \ 36 | make install && \ 37 | cd ../.. && \ 38 | rm -rf pybind11-2.2.3 && \ 39 | rm -rf v2.2.3.tar.gz 40 | -------------------------------------------------------------------------------- /projects/FastRT/pybind_interface/docker/trt7cu102_torch160/Dockerfile: -------------------------------------------------------------------------------- 1 | # cuda10.2 2 | FROM darrenhsieh1717/trt7-cu102-cv34:pybind 3 | 4 | RUN pip install torch==1.6.0 torchvision==0.7.0 5 | 6 | RUN pip install opencv-python tensorboard cython yacs termcolor scikit-learn tabulate gdown gpustat ipdb h5py fs faiss-gpu 7 | 8 | RUN git clone https://github.com/NVIDIA/apex && \ 9 | cd apex && \ 10 | python3 setup.py install 11 | -------------------------------------------------------------------------------- /projects/FastRT/pybind_interface/market_benchmark.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import cv2 4 | import fs 5 | import argparse 6 | import io 7 | import sys 8 | import torch 9 | import time 10 | import os 11 | import torchvision.transforms as T 12 | 13 | sys.path.append('../../..') 14 | sys.path.append('../') 15 | from fastreid.config import get_cfg 16 | from fastreid.modeling.meta_arch import build_model 17 | from fastreid.utils.file_io import PathManager 18 | from fastreid.utils.checkpoint import Checkpointer 19 | from fastreid.utils.logger import setup_logger 20 | from fastreid.data import build_reid_train_loader, build_reid_test_loader 21 | from fastreid.evaluation.rank import eval_market1501 22 | 23 | from build.pybind_interface.ReID import ReID 24 | 25 | 26 | FEATURE_DIM = 2048 27 | GPU_ID = 0 28 | 29 | def map(wrapper): 30 | model = wrapper 31 | cfg = get_cfg() 32 | test_loader, num_query = build_reid_test_loader(cfg, "Market1501", T.Compose([])) 33 | 34 | feats = [] 35 | pids = [] 36 | camids = [] 37 | 38 | for batch in test_loader: 39 | for image_path in batch["img_paths"]: 40 | t = torch.Tensor(np.array([model.infer(cv2.imread(image_path))])) 41 | t.to(torch.device(GPU_ID)) 42 | feats.append(t) 43 | pids.extend(batch["targets"].numpy()) 44 | camids.extend(batch["camids"].numpy()) 45 | 46 | feats = torch.cat(feats, dim=0) 47 | q_feat = feats[:num_query] 48 | g_feat = feats[num_query:] 49 | q_pids = np.asarray(pids[:num_query]) 50 | g_pids = np.asarray(pids[num_query:]) 51 | q_camids = np.asarray(camids[:num_query]) 52 | g_camids = np.asarray(camids[num_query:]) 53 | 54 | 55 | distmat = 1 - torch.mm(q_feat, g_feat.t()) 56 | distmat = distmat.numpy() 57 | all_cmc, all_AP, all_INP = eval_market1501(distmat, q_pids, g_pids, q_camids, g_camids, 5) 58 | mAP = np.mean(all_AP) 59 | print("mAP {}, rank-1 {}".format(mAP, all_cmc[0])) 60 | 61 | 62 | if __name__ == '__main__': 63 | infer = ReID(GPU_ID) 64 | infer.build("../build/sbs_R50-ibn.engine") 65 | map(infer) 66 | -------------------------------------------------------------------------------- /projects/FastRT/pybind_interface/test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append("../") 4 | from build.pybind_interface.ReID import ReID 5 | import cv2 6 | import time 7 | 8 | 9 | if __name__ == '__main__': 10 | iter_ = 10 11 | m = ReID(0) 12 | m.build("../build/sbs_R50-ibn.engine") 13 | print("build done") 14 | 15 | frame = cv2.imread("../data/Market-1501-v15.09.15/calib_set/-1_c1s2_009916_03.jpg") 16 | m.infer(frame) 17 | t0 = time.time() 18 | 19 | for i in range(iter_): 20 | m.infer(frame) 21 | 22 | total = time.time() - t0 23 | print("CPP API fps is {:.1f}, avg infer time is {:.2f}ms".format(iter_ / total, total / iter_ * 1000)) -------------------------------------------------------------------------------- /projects/FastRT/third_party/cnpy/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | CMAKE_MINIMUM_REQUIRED(VERSION 3.0 FATAL_ERROR) 2 | if(COMMAND cmake_policy) 3 | cmake_policy(SET CMP0003 NEW) 4 | endif(COMMAND cmake_policy) 5 | 6 | project(CNPY) 7 | 8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 9 | 10 | option(ENABLE_STATIC "Build static (.a) library" ON) 11 | 12 | find_package(ZLIB REQUIRED) 13 | 14 | include_directories(${ZLIB_INCLUDE_DIRS}) 15 | 16 | add_library(cnpy SHARED "cnpy.cpp") 17 | target_link_libraries(cnpy ${ZLIB_LIBRARIES}) 18 | install(TARGETS "cnpy" LIBRARY DESTINATION lib PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) 19 | 20 | if(ENABLE_STATIC) 21 | add_library(cnpy-static STATIC "cnpy.cpp") 22 | set_target_properties(cnpy-static PROPERTIES OUTPUT_NAME "cnpy") 23 | install(TARGETS "cnpy-static" ARCHIVE DESTINATION lib) 24 | endif(ENABLE_STATIC) 25 | 26 | install(FILES "cnpy.h" DESTINATION include) 27 | install(FILES "mat2npz" "npy2mat" "npz2mat" DESTINATION bin PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) 28 | 29 | add_executable(example1 example1.cpp) 30 | target_link_libraries(example1 cnpy) 31 | -------------------------------------------------------------------------------- /projects/FastRT/third_party/cnpy/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Carl Rogers, 2011 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /projects/FastRT/third_party/cnpy/mat2npz: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | from numpy import savez 5 | from scipy.io import loadmat 6 | 7 | assert len(sys.argv) > 1 8 | 9 | files = sys.argv[1:] 10 | 11 | for f in files: 12 | mat_vars = loadmat(f) 13 | mat_vars.pop('__version__') 14 | mat_vars.pop('__header__') 15 | mat_vars.pop('__globals__') 16 | 17 | fn = f.replace('.mat','.npz') 18 | savez(fn,**mat_vars) 19 | -------------------------------------------------------------------------------- /projects/FastRT/third_party/cnpy/npy2mat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | from numpy import load 5 | from scipy.io import savemat 6 | 7 | assert len(sys.argv) > 1 8 | 9 | files = sys.argv[1:] 10 | 11 | for f in files: 12 | data = load(f) 13 | fn = f.replace('.npy','') 14 | fn = fn.replace('.','_') 15 | savemat(fn,{fn : data}) 16 | -------------------------------------------------------------------------------- /projects/FastRT/third_party/cnpy/npz2mat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | from numpy import load 5 | from scipy.io import savemat 6 | 7 | assert len(sys.argv) > 1 8 | 9 | files = sys.argv[1:] 10 | 11 | for f in files: 12 | data = load(f) 13 | fn = f.replace('.npz','') 14 | fn = fn.replace('.','_') #matlab cant handle dots 15 | savemat(fn,data) 16 | -------------------------------------------------------------------------------- /projects/FastRetri/configs/base-image_retri.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: Baseline 3 | 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | DEPTH: 50x 7 | NORM: FrozenBN 8 | LAST_STRIDE: 1 9 | FEAT_DIM: 2048 10 | PRETRAIN: True 11 | 12 | HEADS: 13 | NAME: EmbeddingHead 14 | NORM: syncBN 15 | WITH_BNNECK: True 16 | NECK_FEAT: after 17 | EMBEDDING_DIM: 0 18 | POOL_LAYER: GeneralizedMeanPooling 19 | CLS_LAYER: Linear 20 | 21 | LOSSES: 22 | NAME: ("CrossEntropyLoss",) 23 | 24 | CE: 25 | EPSILON: 0.1 26 | SCALE: 1. 27 | 28 | INPUT: 29 | SIZE_TRAIN: [256, 256] 30 | SIZE_TEST: [256, 256] 31 | 32 | CROP: 33 | ENABLED: True 34 | SIZE: [224,] 35 | SCALE: [0.16, 1.] 36 | RATIO: [0.75, 1.33333] 37 | 38 | FLIP: 39 | ENABLED: True 40 | 41 | CJ: 42 | ENABLED: False 43 | BRIGHTNESS: 0.3 44 | CONTRAST: 0.3 45 | SATURATION: 0.1 46 | HUE: 0.1 47 | 48 | 49 | DATALOADER: 50 | SAMPLER_TRAIN: TrainingSampler 51 | NUM_WORKERS: 8 52 | 53 | SOLVER: 54 | MAX_EPOCH: 100 55 | AMP: 56 | ENABLED: True 57 | 58 | OPT: SGD 59 | SCHED: CosineAnnealingLR 60 | 61 | BASE_LR: 0.003 62 | MOMENTUM: 0.99 63 | NESTEROV: True 64 | 65 | BIAS_LR_FACTOR: 1. 66 | WEIGHT_DECAY: 0.0005 67 | WEIGHT_DECAY_BIAS: 0. 68 | IMS_PER_BATCH: 128 69 | 70 | ETA_MIN_LR: 0.00003 71 | 72 | WARMUP_FACTOR: 0.1 73 | WARMUP_ITERS: 1000 74 | 75 | CHECKPOINT_PERIOD: 10 76 | 77 | CLIP_GRADIENTS: 78 | ENABLED: True 79 | 80 | TEST: 81 | EVAL_PERIOD: 10 82 | IMS_PER_BATCH: 256 83 | 84 | CUDNN_BENCHMARK: True -------------------------------------------------------------------------------- /projects/FastRetri/configs/cars.yml: -------------------------------------------------------------------------------- 1 | _BASE_: base-image_retri.yml 2 | 3 | MODEL: 4 | LOSSES: 5 | CE: 6 | EPSILON: 0.4 7 | 8 | INPUT: 9 | CJ: 10 | ENABLED: True 11 | BRIGHTNESS: 0.3 12 | CONTRAST: 0.3 13 | SATURATION: 0.3 14 | HUE: 0.1 15 | 16 | CROP: 17 | RATIO: (1., 1.) 18 | 19 | SOLVER: 20 | MAX_EPOCH: 100 21 | 22 | BASE_LR: 0.05 23 | ETA_MIN_LR: 0.0005 24 | 25 | NESTEROV: False 26 | MOMENTUM: 0. 27 | 28 | TEST: 29 | RECALLS: [ 1, 2, 4, 8, 16, 32 ] 30 | 31 | DATASETS: 32 | NAMES: ("Cars196",) 33 | TESTS: ("Cars196",) 34 | 35 | OUTPUT_DIR: projects/FastRetri/logs/r50-base_cars 36 | -------------------------------------------------------------------------------- /projects/FastRetri/configs/cub.yml: -------------------------------------------------------------------------------- 1 | _BASE_: base-image_retri.yml 2 | 3 | MODEL: 4 | LOSSES: 5 | CE: 6 | EPSILON: 0.3 7 | 8 | INPUT: 9 | SIZE_TRAIN: [256,] 10 | SIZE_TEST: [256,] 11 | 12 | CJ: 13 | ENABLED: True 14 | BRIGHTNESS: 0.25 15 | CONTRAST: 0.25 16 | SATURATION: 0.25 17 | HUE: 0.0 18 | 19 | SOLVER: 20 | MAX_EPOCH: 30 21 | 22 | BASE_LR: 0.02 23 | ETA_MIN_LR: 0.00002 24 | 25 | NESTEROV: False 26 | MOMENTUM: 0. 27 | 28 | TEST: 29 | RECALLS: [ 1, 2, 4, 8, 16, 32 ] 30 | 31 | DATASETS: 32 | NAMES: ("CUB",) 33 | TESTS: ("CUB",) 34 | 35 | OUTPUT_DIR: projects/FastRetri/logs/r50-base_cub -------------------------------------------------------------------------------- /projects/FastRetri/configs/inshop.yml: -------------------------------------------------------------------------------- 1 | _BASE_: base-image_retri.yml 2 | 3 | INPUT: 4 | SIZE_TRAIN: [0,] 5 | SIZE_TEST: [0,] 6 | 7 | SOLVER: 8 | MAX_EPOCH: 100 9 | 10 | BASE_LR: 0.003 11 | ETA_MIN_LR: 0.00003 12 | 13 | MOMENTUM: 0.99 14 | NESTEROV: True 15 | 16 | TEST: 17 | RECALLS: [ 1, 10, 20, 30, 40, 50 ] 18 | 19 | DATASETS: 20 | NAMES: ("InShop",) 21 | TESTS: ("InShop",) 22 | 23 | OUTPUT_DIR: projects/FastRetri/logs/r50-base_inshop -------------------------------------------------------------------------------- /projects/FastRetri/configs/sop.yml: -------------------------------------------------------------------------------- 1 | _BASE_: base-image_retri.yml 2 | 3 | SOLVER: 4 | MAX_EPOCH: 100 5 | 6 | BASE_LR: 0.003 7 | ETA_MIN_LR: 0.00003 8 | 9 | MOMENTUM: 0.99 10 | NESTEROV: True 11 | 12 | TEST: 13 | RECALLS: [1, 10, 100, 1000] 14 | 15 | DATASETS: 16 | NAMES: ("SOP",) 17 | TESTS: ("SOP",) 18 | 19 | OUTPUT_DIR: projects/FastRetri/logs/r50-base_sop -------------------------------------------------------------------------------- /projects/FastRetri/fastretri/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .config import add_retri_config 8 | from .datasets import * 9 | from .retri_evaluator import RetriEvaluator 10 | -------------------------------------------------------------------------------- /projects/FastRetri/fastretri/config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | def add_retri_config(cfg): 9 | _C = cfg 10 | 11 | _C.TEST.RECALLS = [1, 2, 4, 8, 16, 32] 12 | -------------------------------------------------------------------------------- /projects/FastRetri/train_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: sherlock 5 | @contact: sherlockliao01@gmail.com 6 | """ 7 | 8 | import sys 9 | 10 | sys.path.append('.') 11 | 12 | from fastreid.config import get_cfg 13 | from fastreid.engine import default_argument_parser, default_setup, launch 14 | from fastreid.utils.checkpoint import Checkpointer 15 | from fastreid.engine.defaults import DefaultTrainer 16 | 17 | from fastretri import * 18 | 19 | 20 | class Trainer(DefaultTrainer): 21 | 22 | @classmethod 23 | def build_evaluator(cls, cfg, dataset_name, output_dir=None): 24 | data_loader, num_query = cls.build_test_loader(cfg, dataset_name) 25 | return data_loader, RetriEvaluator(cfg, num_query, output_dir) 26 | 27 | 28 | def setup(args): 29 | """ 30 | Create configs and perform basic setups. 31 | """ 32 | cfg = get_cfg() 33 | add_retri_config(cfg) 34 | cfg.merge_from_file(args.config_file) 35 | cfg.merge_from_list(args.opts) 36 | cfg.freeze() 37 | default_setup(cfg, args) 38 | return cfg 39 | 40 | 41 | def main(args): 42 | cfg = setup(args) 43 | 44 | if args.eval_only: 45 | cfg.defrost() 46 | cfg.MODEL.BACKBONE.PRETRAIN = False 47 | model = Trainer.build_model(cfg) 48 | 49 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model 50 | 51 | res = Trainer.test(cfg, model) 52 | return res 53 | 54 | trainer = Trainer(cfg) 55 | 56 | trainer.resume_or_load(resume=args.resume) 57 | return trainer.train() 58 | 59 | 60 | if __name__ == "__main__": 61 | args = default_argument_parser().parse_args() 62 | print("Command Line Args:", args) 63 | launch( 64 | main, 65 | args.num_gpus, 66 | num_machines=args.num_machines, 67 | machine_rank=args.machine_rank, 68 | dist_url=args.dist_url, 69 | args=(args,), 70 | ) 71 | -------------------------------------------------------------------------------- /projects/FastTune/README.md: -------------------------------------------------------------------------------- 1 | # Hyper-Parameter Optimization in FastReID 2 | 3 | This project includes training reid models with hyper-parameter optimization. 4 | 5 | Install the following 6 | 7 | ```bash 8 | pip install 'ray[tune]' 9 | pip install hpbandster ConfigSpace hyperopt 10 | ``` 11 | 12 | ## Example 13 | 14 | This is an example for tuning `batch_size` and `num_instance` automatically. 15 | 16 | To train hyperparameter optimization with BOHB(Bayesian Optimization with HyperBand) search algorithm, run 17 | 18 | ```bash 19 | python3 projects/FastTune/tune_net.py --config-file projects/FastTune/configs/search_trial.yml --srch-algo "bohb" 20 | ``` 21 | 22 | ## Known issues 23 | todo -------------------------------------------------------------------------------- /projects/FastTune/autotuner/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .tune_hooks import TuneReportHook 8 | -------------------------------------------------------------------------------- /projects/FastTune/configs/search_trial.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: Baseline 3 | 4 | FREEZE_LAYERS: [ backbone ] 5 | 6 | BACKBONE: 7 | NAME: build_resnet_backbone 8 | DEPTH: 34x 9 | LAST_STRIDE: 1 10 | FEAT_DIM: 512 11 | NORM: BN 12 | WITH_NL: False 13 | WITH_IBN: True 14 | PRETRAIN: True 15 | PRETRAIN_PATH: /export/home/lxy/.cache/torch/checkpoints/resnet34_ibn_a-94bc1577.pth 16 | 17 | HEADS: 18 | NUM_CLASSES: 702 19 | NAME: EmbeddingHead 20 | NORM: BN 21 | NECK_FEAT: after 22 | EMBEDDING_DIM: 0 23 | POOL_LAYER: GeneralizedMeanPooling 24 | CLS_LAYER: CircleSoftmax 25 | SCALE: 64 26 | MARGIN: 0.35 27 | 28 | LOSSES: 29 | NAME: ("CrossEntropyLoss", "TripletLoss",) 30 | 31 | CE: 32 | EPSILON: 0.1 33 | SCALE: 1. 34 | 35 | TRI: 36 | MARGIN: 0.0 37 | HARD_MINING: True 38 | NORM_FEAT: False 39 | SCALE: 1. 40 | 41 | INPUT: 42 | SIZE_TRAIN: [ 256, 128 ] 43 | SIZE_TEST: [ 256, 128 ] 44 | 45 | AUTOAUG: 46 | ENABLED: True 47 | PROB: 0.1 48 | 49 | REA: 50 | ENABLED: True 51 | 52 | CJ: 53 | ENABLED: True 54 | 55 | PADDING: 56 | ENABLED: True 57 | 58 | DATALOADER: 59 | SAMPLER_TRAIN: NaiveIdentitySampler 60 | NUM_INSTANCE: 16 61 | NUM_WORKERS: 8 62 | 63 | SOLVER: 64 | AMP: 65 | ENABLED: False 66 | MAX_EPOCH: 60 67 | OPT: Adam 68 | SCHED: CosineAnnealingLR 69 | BASE_LR: 0.00035 70 | BIAS_LR_FACTOR: 1. 71 | WEIGHT_DECAY: 0.0005 72 | WEIGHT_DECAY_BIAS: 0.0 73 | IMS_PER_BATCH: 64 74 | 75 | DELAY_EPOCHS: 30 76 | ETA_MIN_LR: 0.00000077 77 | 78 | FREEZE_ITERS: 500 79 | 80 | WARMUP_FACTOR: 0.1 81 | WARMUP_ITERS: 1000 82 | 83 | CHECKPOINT_PERIOD: 100 84 | 85 | TEST: 86 | EVAL_PERIOD: 10 87 | IMS_PER_BATCH: 256 88 | 89 | DATASETS: 90 | NAMES: ("DukeMTMC",) 91 | TESTS: ("DukeMTMC",) 92 | COMBINEALL: False 93 | 94 | CUDNN_BENCHMARK: True 95 | 96 | OUTPUT_DIR: projects/FastTune/logs/trial 97 | -------------------------------------------------------------------------------- /projects/HAA/Readme.md: -------------------------------------------------------------------------------- 1 | # Black Re-ID: A Head-shoulder Descriptor for the Challenging Problem of Person Re-Identification 2 | 3 | ## Training 4 | 5 | To train a model, run 6 | 7 | ```bash 8 | CUDA_VISIBLE_DEVICES=gpus python train_net.py --config-file 9 | ``` 10 | 11 | ## Evaluation 12 | 13 | To evaluate the model in test set, run similarly: 14 | 15 | ```bash 16 | CUDA_VISIBLE_DEVICES=gpus python train_net.py --config-file --eval-only MODEL.WEIGHTS model.pth 17 | ``` 18 | 19 | ## Experimental Results 20 | 21 | ### Market1501 dataset 22 | 23 | | Method | Pretrained | Rank@1 | mAP | 24 | | :---: | :---: | :---: |:---: | 25 | | ResNet50 | ImageNet | 93.3% | 84.6% | 26 | | MGN | ImageNet | 95.7% | 86.9% | 27 | | HAA (ResNet50) | ImageNet | 95% | 87.1% | 28 | | HAA (MGN) | ImageNet | 95.8% | 89.5% | 29 | 30 | ### DukeMTMC dataset 31 | 32 | | Method | Pretrained | Rank@1 | mAP | 33 | | :---: | :---: | :---: |:---: | 34 | | ResNet50 | ImageNet | 86.2% | 75.3% | 35 | | MGN | ImageNet | 88.7% | 78.4% | 36 | | HAA (ResNet50) | ImageNet | 87.7% | 75.7% | 37 | | HAA (MGN) | ImageNet | 89% | 80.4% | 38 | 39 | ### Black-reid black group 40 | 41 | | Method | Pretrained | Rank@1 | mAP | 42 | | :---: | :---: | :---: |:---: | 43 | | ResNet50 | ImageNet | 80.9% | 70.8% | 44 | | MGN | ImageNet | 86.7% | 79.1% | 45 | | HAA (ResNet50) | ImageNet | 86.7% | 79% | 46 | | HAA (MGN) | ImageNet | 91.0% | 83.8% | 47 | 48 | ### White-reid white group 49 | 50 | | Method | Pretrained | Rank@1 | mAP | 51 | | :---: | :---: | :---: |:---: | 52 | | ResNet50 | ImageNet | 89.5% | 75.8% | 53 | | MGN | ImageNet | 94.3% | 85.8% | 54 | | HAA (ResNet50) | ImageNet | 93.5% | 84.4% | 55 | | HSE (MGN) | ImageNet | 95.3% | 88.1% | 56 | 57 | -------------------------------------------------------------------------------- /projects/NAIC20/configs/nest101-base.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-naic.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnest_backbone 6 | DEPTH: 101x 7 | WITH_IBN: False 8 | PRETRAIN: True 9 | 10 | OUTPUT_DIR: projects/NAIC20/logs/nest101-128x256 -------------------------------------------------------------------------------- /projects/NAIC20/configs/r34-ibn.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-naic.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | DEPTH: 34x 7 | FEAT_DIM: 512 8 | WITH_IBN: True 9 | PRETRAIN: True 10 | 11 | OUTPUT_DIR: projects/NAIC20/logs/r34_ibn-128x256 -------------------------------------------------------------------------------- /projects/NAIC20/configs/submit.yml: -------------------------------------------------------------------------------- 1 | _BASE_: Base-naic.yml 2 | 3 | MODEL: 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | DEPTH: 34x 7 | FEAT_DIM: 512 8 | WITH_IBN: True 9 | 10 | WEIGHTS: projects/NAIC20/logs/reproduce/r34-tripletx10/model_best.pth 11 | 12 | DATASETS: 13 | TESTS: ("NAIC20_R2A",) 14 | 15 | TEST: 16 | RERANK: 17 | ENABLED: True 18 | K1: 20 19 | K2: 3 20 | LAMBDA: 0.8 21 | 22 | FLIP: 23 | ENABLED: True 24 | 25 | SAVE_DISTMAT: True 26 | 27 | OUTPUT_DIR: projects/NAIC20/logs/r34_ibn-128x256-submit -------------------------------------------------------------------------------- /projects/NAIC20/naic/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .naic_dataset import * 8 | from .config import add_naic_config 9 | from .naic_evaluator import NaicEvaluator 10 | -------------------------------------------------------------------------------- /projects/NAIC20/naic/config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | 8 | def add_naic_config(cfg): 9 | _C = cfg 10 | 11 | _C.DATASETS.RM_LT = True 12 | _C.TEST.SAVE_DISTMAT = False 13 | -------------------------------------------------------------------------------- /projects/PartialReID/configs/partial_market.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: PartialBaseline 3 | 4 | BACKBONE: 5 | NAME: build_resnet_backbone 6 | NORM: BN 7 | DEPTH: 50x 8 | LAST_STRIDE: 1 9 | FEAT_DIM: 2048 10 | WITH_IBN: True 11 | PRETRAIN: True 12 | 13 | HEADS: 14 | NAME: DSRHead 15 | POOL_LAYER: FastGlobalAvgPool 16 | WITH_BNNECK: True 17 | CLS_LAYER: Linear 18 | 19 | LOSSES: 20 | NAME: ("CrossEntropyLoss", "TripletLoss",) 21 | 22 | CE: 23 | EPSILON: 0.12 24 | SCALE: 1. 25 | 26 | TRI: 27 | MARGIN: 0.3 28 | SCALE: 1.0 29 | HARD_MINING: False 30 | 31 | DATASETS: 32 | NAMES: ("Market1501",) 33 | TESTS: ("PartialREID", "PartialiLIDS", "OccludedREID",) 34 | 35 | INPUT: 36 | SIZE_TRAIN: [384, 128] 37 | SIZE_TEST: [384, 128] 38 | 39 | FLIP: 40 | ENABLED: True 41 | 42 | DATALOADER: 43 | SAMPLER_TRAIN: NaiveIdentitySampler 44 | NUM_INSTANCE: 4 45 | NUM_WORKERS: 8 46 | 47 | SOLVER: 48 | AMP: 49 | ENABLED: False 50 | OPT: Adam 51 | MAX_EPOCH: 60 52 | BASE_LR: 0.0007 53 | BIAS_LR_FACTOR: 1. 54 | WEIGHT_DECAY: 0.0005 55 | WEIGHT_DECAY_BIAS: 0.0005 56 | IMS_PER_BATCH: 256 57 | 58 | SCHED: CosineAnnealingLR 59 | DELAY_EPOCHS: 20 60 | ETA_MIN_LR: 0.0000007 61 | 62 | WARMUP_FACTOR: 0.1 63 | WARMUP_ITERS: 500 64 | 65 | CHECKPOINT_PERIOD: 20 66 | 67 | TEST: 68 | EVAL_PERIOD: 10 69 | IMS_PER_BATCH: 128 70 | 71 | CUDNN_BENCHMARK: True 72 | 73 | OUTPUT_DIR: projects/PartialReID/logs/test_partial -------------------------------------------------------------------------------- /projects/PartialReID/partialreid/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: xingyu liao 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .partial_dataset import * 8 | from .partialbaseline import PartialBaseline 9 | from .dsr_head import DSRHead 10 | from .config import add_partialreid_config 11 | from .dsr_evaluation import DsrEvaluator 12 | -------------------------------------------------------------------------------- /projects/PartialReID/partialreid/config.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: l1aoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.config import CfgNode as CN 8 | 9 | 10 | def add_partialreid_config(cfg): 11 | _C = cfg 12 | 13 | _C.TEST.DSR = CN({"ENABLED": True}) 14 | -------------------------------------------------------------------------------- /projects/PartialReID/partialreid/partialbaseline.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @authorr: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from fastreid.modeling.losses import * 8 | from fastreid.modeling.meta_arch import Baseline 9 | from fastreid.modeling.meta_arch.build import META_ARCH_REGISTRY 10 | 11 | 12 | @META_ARCH_REGISTRY.register() 13 | class PartialBaseline(Baseline): 14 | 15 | def losses(self, outputs, gt_labels): 16 | r""" 17 | Compute loss from modeling's outputs, the loss function input arguments 18 | must be the same as the outputs of the model forwarding. 19 | """ 20 | loss_dict = super().losses(outputs, gt_labels) 21 | 22 | fore_cls_outputs = outputs["fore_cls_outputs"] 23 | fore_feat = outputs["foreground_features"] 24 | 25 | loss_names = self.loss_kwargs['loss_names'] 26 | 27 | if 'CrossEntropyLoss' in loss_names: 28 | ce_kwargs = self.loss_kwargs.get('ce') 29 | loss_dict['loss_fore_cls'] = cross_entropy_loss( 30 | fore_cls_outputs, 31 | gt_labels, 32 | ce_kwargs.get('eps'), 33 | ce_kwargs.get('alpha') 34 | ) * ce_kwargs.get('scale') 35 | 36 | if 'TripletLoss' in loss_names: 37 | tri_kwargs = self.loss_kwargs.get('tri') 38 | loss_dict['loss_fore_triplet'] = triplet_loss( 39 | fore_feat, 40 | gt_labels, 41 | tri_kwargs.get('margin'), 42 | tri_kwargs.get('norm_feat'), 43 | tri_kwargs.get('hard_mining') 44 | ) * tri_kwargs.get('scale') 45 | 46 | return loss_dict 47 | -------------------------------------------------------------------------------- /projects/README.md: -------------------------------------------------------------------------------- 1 | 2 | Here are a few projects that are built on fastreid. 3 | They are examples of how to use fastrei as a library, to make your projects more maintainable. 4 | 5 | # Projects by JDAI 6 | 7 | Note that these are research projects, and therefore may not have the same level of support or stability of fastreid. 8 | 9 | - [Deep Spatial Feature Reconstruction for Partial Person Re-identification](https://github.com/JDAI-CV/fast-reid/tree/master/projects/PartialReID) 10 | - [Black Re-ID: A Head-shoulder Descriptor for the Challenging Problem of Person Re-Identification](https://github.com/JDAI-CV/fast-reid/tree/master/projects/HAA) 11 | - [Image Classification](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastCls) 12 | - [Face Recognition](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastFace) 13 | - [Image Retrieval](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastRetri) 14 | - [Attribute Recognition](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastAttr) 15 | - [Hyper-Parameters Optimization](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastTune) 16 | - [Overhaul Distillation](https://github.com/JDAI-CV/fast-reid/tree/master/projects/FastDistill) 17 | - Semi-Supervised Domain Generalizable Person Re-Identification. [code](https://github.com/xiaomingzhid/sskd) and [paper](https://arxiv.org/pdf/2108.05045.pdf) 18 | 19 | # External Projects 20 | 21 | External projects in the community that use fastreid: 22 | 23 | - [FastReID of Interpreter Project (ICCV 2021)](https://github.com/SheldongChen/AMD.github.io) 24 | 25 | # Competitions 26 | 27 | - NAIC20 reid track [1-st solution](https://github.com/JDAI-CV/fast-reid/tree/master/projects/NAIC20) 28 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | -------------------------------------------------------------------------------- /tests/dataset_test.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import sys 8 | sys.path.append('.') 9 | from data import get_dataloader 10 | from config import cfg 11 | import argparse 12 | from data.datasets import init_dataset 13 | # cfg.DATALOADER.SAMPLER = 'triplet' 14 | cfg.DATASETS.NAMES = ("market1501", "dukemtmc", "cuhk03", "msmt17",) 15 | 16 | 17 | if __name__ == '__main__': 18 | parser = argparse.ArgumentParser(description="ReID Baseline Training") 19 | parser.add_argument( 20 | '-cfg', "--config_file", 21 | default="", 22 | metavar="FILE", 23 | help="path to config file", 24 | type=str 25 | ) 26 | # parser.add_argument("--local_rank", type=int, default=0) 27 | parser.add_argument("opts", help="Modify config options using the command-line", default=None, 28 | nargs=argparse.REMAINDER) 29 | args = parser.parse_args() 30 | cfg.merge_from_list(args.opts) 31 | 32 | # dataset = init_dataset('msmt17', combineall=True) 33 | get_dataloader(cfg) 34 | # tng_dataloader, val_dataloader, num_classes, num_query = get_dataloader(cfg) 35 | # def get_ex(): return open_image('datasets/beijingStation/query/000245_c10s2_1561732033722.000000.jpg') 36 | # im = get_ex() 37 | # print(data.train_ds[0]) 38 | # print(data.test_ds[0]) 39 | # a = next(iter(data.train_dl)) 40 | # from IPython import embed; embed() 41 | # from ipdb import set_trace; set_trace() 42 | # im.apply_tfms(crop_pad(size=(300, 300))) 43 | -------------------------------------------------------------------------------- /tests/feature_align.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | import os 4 | from glob import glob 5 | 6 | 7 | class TestFeatureAlign(unittest.TestCase): 8 | def test_caffe_pytorch_feat_align(self): 9 | caffe_feat_path = "/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output" 10 | pytorch_feat_path = "/export/home/lxy/cvpalgo-fast-reid/demo/logs/R50_256x128_pytorch_feat_output" 11 | feat_filenames = os.listdir(caffe_feat_path) 12 | for feat_name in feat_filenames: 13 | caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name)) 14 | pytorch_feat = np.load(os.path.join(pytorch_feat_path, feat_name)) 15 | sim = np.dot(caffe_feat, pytorch_feat.transpose())[0][0] 16 | assert sim > 0.97, f"Got similarity {sim} and feature of {feat_name} is not aligned" 17 | 18 | def test_model_performance(self): 19 | caffe_feat_path = "/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output" 20 | feat_filenames = os.listdir(caffe_feat_path) 21 | feats = [] 22 | for feat_name in feat_filenames: 23 | caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name)) 24 | feats.append(caffe_feat) 25 | from ipdb import set_trace; set_trace() 26 | 27 | 28 | 29 | if __name__ == '__main__': 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /tests/interp_test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from fastai.vision import * 3 | from fastai.basic_data import * 4 | from fastai.layers import * 5 | 6 | import sys 7 | sys.path.append('.') 8 | from engine.interpreter import ReidInterpretation 9 | 10 | from data import get_data_bunch 11 | from modeling import build_model 12 | from config import cfg 13 | cfg.DATASETS.NAMES = ('market1501',) 14 | cfg.DATASETS.TEST_NAMES = 'market1501' 15 | cfg.MODEL.BACKBONE = 'resnet50' 16 | 17 | data_bunch, test_labels, num_query = get_data_bunch(cfg) 18 | 19 | model = build_model(cfg, 10) 20 | model.load_params_wo_fc(torch.load('logs/2019.8.14/market/baseline/models/model_149.pth')['model']) 21 | learn = Learner(data_bunch, model) 22 | 23 | feats, _ = learn.get_preds(DatasetType.Test, activ=Lambda(lambda x: x)) -------------------------------------------------------------------------------- /tests/lr_scheduler_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | import torch 5 | from torch import nn 6 | 7 | sys.path.append('.') 8 | from solver.lr_scheduler import WarmupMultiStepLR 9 | from solver.build import make_optimizer 10 | from config import cfg 11 | 12 | 13 | class MyTestCase(unittest.TestCase): 14 | def test_something(self): 15 | net = nn.Linear(10, 10) 16 | optimizer = make_optimizer(cfg, net) 17 | lr_scheduler = WarmupMultiStepLR(optimizer, [20, 40], warmup_iters=10) 18 | for i in range(50): 19 | lr_scheduler.step() 20 | for j in range(3): 21 | print(i, lr_scheduler.get_lr()[0]) 22 | optimizer.step() 23 | 24 | 25 | if __name__ == '__main__': 26 | unittest.main() 27 | -------------------------------------------------------------------------------- /tests/model_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | 5 | import sys 6 | sys.path.append('.') 7 | from fastreid.config import cfg 8 | from fastreid.modeling.backbones import build_resnet_backbone 9 | from fastreid.modeling.backbones.resnet_ibn_a import se_resnet101_ibn_a 10 | from torch import nn 11 | 12 | 13 | class MyTestCase(unittest.TestCase): 14 | def test_se_resnet101(self): 15 | cfg.MODEL.BACKBONE.NAME = 'resnet101' 16 | cfg.MODEL.BACKBONE.DEPTH = 101 17 | cfg.MODEL.BACKBONE.WITH_IBN = True 18 | cfg.MODEL.BACKBONE.WITH_SE = True 19 | cfg.MODEL.BACKBONE.PRETRAIN_PATH = '/export/home/lxy/.cache/torch/checkpoints/se_resnet101_ibn_a.pth.tar' 20 | 21 | net1 = build_resnet_backbone(cfg) 22 | net1.cuda() 23 | net2 = nn.DataParallel(se_resnet101_ibn_a()) 24 | res = net2.load_state_dict(torch.load(cfg.MODEL.BACKBONE.PRETRAIN_PATH)['state_dict'], strict=False) 25 | net2.cuda() 26 | x = torch.randn(10, 3, 256, 128).cuda() 27 | y1 = net1(x) 28 | y2 = net2(x) 29 | assert y1.sum() == y2.sum(), 'train mode problem' 30 | net1.eval() 31 | net2.eval() 32 | y1 = net1(x) 33 | y2 = net2(x) 34 | assert y1.sum() == y2.sum(), 'eval mode problem' 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /tests/sampler_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | sys.path.append('.') 4 | from fastreid.data.samplers import TrainingSampler 5 | 6 | 7 | class SamplerTestCase(unittest.TestCase): 8 | def test_training_sampler(self): 9 | sampler = TrainingSampler(5) 10 | for i in sampler: 11 | from ipdb import set_trace; set_trace() 12 | print(i) 13 | 14 | 15 | if __name__ == '__main__': 16 | unittest.main() 17 | -------------------------------------------------------------------------------- /tests/test_repvgg.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | import torch 5 | 6 | sys.path.append('.') 7 | from fastreid.config import get_cfg 8 | from fastreid.modeling.backbones import build_backbone 9 | 10 | 11 | class MyTestCase(unittest.TestCase): 12 | def test_fusebn(self): 13 | cfg = get_cfg() 14 | cfg.defrost() 15 | cfg.MODEL.BACKBONE.NAME = 'build_repvgg_backbone' 16 | cfg.MODEL.BACKBONE.DEPTH = 'B1g2' 17 | cfg.MODEL.BACKBONE.PRETRAIN = False 18 | model = build_backbone(cfg) 19 | model.eval() 20 | 21 | test_inp = torch.randn((1, 3, 256, 128)) 22 | 23 | y = model(test_inp) 24 | 25 | model.deploy(mode=True) 26 | from ipdb import set_trace; set_trace() 27 | fused_y = model(test_inp) 28 | 29 | print("final error :", torch.max(torch.abs(fused_y - y)).item()) 30 | 31 | 32 | if __name__ == '__main__': 33 | unittest.main() 34 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/ReadMe.md: -------------------------------------------------------------------------------- 1 | # The Caffe in nn_tools Provides some convenient API 2 | If there are some problem in parse your prototxt or caffemodel, Please replace 3 | the caffe.proto with your own version and compile it with command 4 | `protoc --python_out ./ caffe.proto` 5 | 6 | ## caffe_net.py 7 | Using `from nn_tools.Caffe import caffe_net` to import this model 8 | ### Prototxt 9 | + `net=caffe_net.Prototxt(file_name)` to open a prototxt file 10 | + `net.init_caffemodel(caffe_cmd_path='caffe')` to generate a caffemodel file in the current work directory \ 11 | if your `caffe` cmd not in the $PATH, specify your caffe cmd path by the `caffe_cmd_path` kwargs. 12 | ### Caffemodel 13 | + `net=caffe_net.Caffemodel(file_name)` to open a caffemodel 14 | + `net.save_prototxt(path)` to save the caffemodel to a prototxt file (not containing the weight data) 15 | + `net.get_layer_data(layer_name)` return the numpy ndarray data of the layer 16 | + `net.set_layer_date(layer_name, datas)` specify the data of one layer in the caffemodel .`datas` is normally a list of numpy ndarray `[weights,bias]` 17 | + `net.save(path)` save the changed caffemodel 18 | ### Functions for both Prototxt and Caffemodel 19 | + `net.add_layer(layer_params,before='',after='')` add a new layer with `Layer_Param` object 20 | + `net.remove_layer_by_name(layer_name)` 21 | + `net.get_layer_by_name(layer_name)` or `net.layer(layer_name)` get the raw Layer object defined in caffe_pb2 22 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/Caffe/__init__.py -------------------------------------------------------------------------------- /tools/deploy/Caffe/caffe_lmdb.py: -------------------------------------------------------------------------------- 1 | import lmdb 2 | from Caffe import caffe_pb2 as pb2 3 | import numpy as np 4 | 5 | class Read_Caffe_LMDB(): 6 | def __init__(self,path,dtype=np.uint8): 7 | 8 | self.env=lmdb.open(path, readonly=True) 9 | self.dtype=dtype 10 | self.txn=self.env.begin() 11 | self.cursor=self.txn.cursor() 12 | 13 | @staticmethod 14 | def to_numpy(value,dtype=np.uint8): 15 | datum = pb2.Datum() 16 | datum.ParseFromString(value) 17 | flat_x = np.fromstring(datum.data, dtype=dtype) 18 | data = flat_x.reshape(datum.channels, datum.height, datum.width) 19 | label=flat_x = datum.label 20 | return data,label 21 | 22 | def iterator(self): 23 | while True: 24 | key,value=self.cursor.key(),self.cursor.value() 25 | yield self.to_numpy(value,self.dtype) 26 | if not self.cursor.next(): 27 | return 28 | 29 | def __iter__(self): 30 | self.cursor.first() 31 | it = self.iterator() 32 | return it 33 | 34 | def __len__(self): 35 | return int(self.env.stat()['entries']) 36 | -------------------------------------------------------------------------------- /tools/deploy/Caffe/net.py: -------------------------------------------------------------------------------- 1 | raise ImportError("the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net") 2 | 3 | -------------------------------------------------------------------------------- /tools/deploy/test_data/0022_c6s1_002976_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/test_data/0022_c6s1_002976_01.jpg -------------------------------------------------------------------------------- /tools/deploy/test_data/0027_c2s2_091032_02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/test_data/0027_c2s2_091032_02.jpg -------------------------------------------------------------------------------- /tools/deploy/test_data/0032_c6s1_002851_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/test_data/0032_c6s1_002851_01.jpg -------------------------------------------------------------------------------- /tools/deploy/test_data/0048_c1s1_005351_01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/test_data/0048_c1s1_005351_01.jpg -------------------------------------------------------------------------------- /tools/deploy/test_data/0065_c6s1_009501_02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JDAI-CV/fast-reid/c9bc3ceb2f7a6438b62fb515ea3df6d1e999e95d/tools/deploy/test_data/0065_c6s1_009501_02.jpg -------------------------------------------------------------------------------- /tools/train_net.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | @author: sherlock 5 | @contact: sherlockliao01@gmail.com 6 | """ 7 | 8 | import sys 9 | 10 | sys.path.append('.') 11 | 12 | from fastreid.config import get_cfg 13 | from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch 14 | from fastreid.utils.checkpoint import Checkpointer 15 | 16 | 17 | def setup(args): 18 | """ 19 | Create configs and perform basic setups. 20 | """ 21 | cfg = get_cfg() 22 | cfg.merge_from_file(args.config_file) 23 | cfg.merge_from_list(args.opts) 24 | cfg.freeze() 25 | default_setup(cfg, args) 26 | return cfg 27 | 28 | 29 | def main(args): 30 | cfg = setup(args) 31 | 32 | if args.eval_only: 33 | cfg.defrost() 34 | cfg.MODEL.BACKBONE.PRETRAIN = False 35 | model = DefaultTrainer.build_model(cfg) 36 | 37 | Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model 38 | 39 | res = DefaultTrainer.test(cfg, model) 40 | return res 41 | 42 | trainer = DefaultTrainer(cfg) 43 | 44 | trainer.resume_or_load(resume=args.resume) 45 | return trainer.train() 46 | 47 | 48 | if __name__ == "__main__": 49 | args = default_argument_parser().parse_args() 50 | print("Command Line Args:", args) 51 | launch( 52 | main, 53 | args.num_gpus, 54 | num_machines=args.num_machines, 55 | machine_rank=args.machine_rank, 56 | dist_url=args.dist_url, 57 | args=(args,), 58 | ) 59 | --------------------------------------------------------------------------------