├── LICENSE ├── README.md ├── config ├── __init__.py └── defaults.py ├── configs ├── MSMT17 │ ├── resnet_base.yml │ ├── vit_base.yml │ └── vit_transreid.yml └── Market │ ├── resnet_base.yml │ ├── vit_base.yml │ └── vit_transreid.yml ├── datasets ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-37.pyc │ └── bases.cpython-37.pyc ├── augmentations │ ├── augmentations.py │ └── augmix.py ├── bases.py ├── cuhk03.py ├── make_dataloader.py ├── market1501.py ├── msmt17.py ├── preprocessing.py ├── sampler.py └── sampler_ddp.py ├── imgs ├── market.png ├── market_c.png ├── overview_masked.png └── thumbnail_cil.png ├── loss ├── __init__.py ├── arcface.py ├── center_loss.py ├── make_loss.py ├── metric_learning.py ├── softmax_loss.py └── triplet_loss.py ├── model ├── __init__.py ├── backbones │ ├── __init__.py │ ├── resnest │ │ ├── __init__.py │ │ ├── ablation.py │ │ ├── build.py │ │ ├── resnest.py │ │ ├── resnet.py │ │ └── splat.py │ ├── resnet.py │ ├── resnext │ │ └── model.py │ ├── sa │ │ ├── functional.py │ │ ├── functions │ │ │ ├── __init__.py │ │ │ ├── aggregation_refpad.py │ │ │ ├── aggregation_zeropad.py │ │ │ ├── subtraction2_refpad.py │ │ │ ├── subtraction2_zeropad.py │ │ │ ├── subtraction_refpad.py │ │ │ ├── subtraction_zeropad.py │ │ │ └── utils.py │ │ └── modules │ │ │ ├── __init__.py │ │ │ ├── aggregation.py │ │ │ ├── subtraction.py │ │ │ └── subtraction2.py │ ├── san.py │ ├── timm │ │ ├── __init__.py │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── auto_augment.py │ │ │ ├── config.py │ │ │ ├── constants.py │ │ │ ├── dataset.py │ │ │ ├── dataset_factory.py │ │ │ ├── distributed_sampler.py │ │ │ ├── loader.py │ │ │ ├── mixup.py │ │ │ ├── parsers │ │ │ │ ├── __init__.py │ │ │ │ ├── class_map.py │ │ │ │ ├── constants.py │ │ │ │ ├── parser.py │ │ │ │ ├── parser_factory.py │ │ │ │ ├── parser_image_folder.py │ │ │ │ ├── parser_image_in_tar.py │ │ │ │ ├── parser_image_tar.py │ │ │ │ └── parser_tfds.py │ │ │ ├── random_erasing.py │ │ │ ├── real_labels.py │ │ │ ├── tf_preprocessing.py │ │ │ ├── transforms.py │ │ │ └── transforms_factory.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── asymmetric_loss.py │ │ │ ├── cross_entropy.py │ │ │ └── jsd.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── byoanet.cpython-38.pyc │ │ │ │ ├── byobnet.cpython-38.pyc │ │ │ │ ├── cait.cpython-38.pyc │ │ │ │ ├── coat.cpython-38.pyc │ │ │ │ ├── convit.cpython-38.pyc │ │ │ │ ├── cspnet.cpython-38.pyc │ │ │ │ ├── densenet.cpython-38.pyc │ │ │ │ ├── dla.cpython-38.pyc │ │ │ │ ├── dpn.cpython-38.pyc │ │ │ │ ├── efficientnet.cpython-38.pyc │ │ │ │ ├── efficientnet_blocks.cpython-38.pyc │ │ │ │ ├── efficientnet_builder.cpython-38.pyc │ │ │ │ ├── factory.cpython-38.pyc │ │ │ │ ├── features.cpython-38.pyc │ │ │ │ ├── ghostnet.cpython-38.pyc │ │ │ │ ├── gluon_resnet.cpython-38.pyc │ │ │ │ ├── gluon_xception.cpython-38.pyc │ │ │ │ ├── hardcorenas.cpython-38.pyc │ │ │ │ ├── helpers.cpython-38.pyc │ │ │ │ ├── hrnet.cpython-38.pyc │ │ │ │ ├── hub.cpython-38.pyc │ │ │ │ ├── inception_resnet_v2.cpython-38.pyc │ │ │ │ ├── inception_v3.cpython-38.pyc │ │ │ │ ├── inception_v4.cpython-38.pyc │ │ │ │ ├── levit.cpython-38.pyc │ │ │ │ ├── mlp_mixer.cpython-38.pyc │ │ │ │ ├── mobilenetv3.cpython-38.pyc │ │ │ │ ├── nasnet.cpython-38.pyc │ │ │ │ ├── nfnet.cpython-38.pyc │ │ │ │ ├── pit.cpython-38.pyc │ │ │ │ ├── pnasnet.cpython-38.pyc │ │ │ │ ├── registry.cpython-38.pyc │ │ │ │ ├── regnet.cpython-38.pyc │ │ │ │ ├── res2net.cpython-38.pyc │ │ │ │ ├── resnest.cpython-38.pyc │ │ │ │ ├── resnet.cpython-38.pyc │ │ │ │ ├── resnetv2.cpython-38.pyc │ │ │ │ ├── rexnet.cpython-38.pyc │ │ │ │ ├── selecsls.cpython-38.pyc │ │ │ │ ├── senet.cpython-38.pyc │ │ │ │ ├── sknet.cpython-38.pyc │ │ │ │ ├── swin_transformer.cpython-38.pyc │ │ │ │ ├── tnt.cpython-38.pyc │ │ │ │ ├── tresnet.cpython-38.pyc │ │ │ │ ├── twins.cpython-38.pyc │ │ │ │ ├── vgg.cpython-38.pyc │ │ │ │ ├── visformer.cpython-38.pyc │ │ │ │ ├── vision_transformer.cpython-38.pyc │ │ │ │ ├── vision_transformer_hybrid.cpython-38.pyc │ │ │ │ ├── vovnet.cpython-38.pyc │ │ │ │ ├── xception.cpython-38.pyc │ │ │ │ └── xception_aligned.cpython-38.pyc │ │ │ ├── byoanet.py │ │ │ ├── byobnet.py │ │ │ ├── cait.py │ │ │ ├── coat.py │ │ │ ├── convit.py │ │ │ ├── cspnet.py │ │ │ ├── densenet.py │ │ │ ├── dla.py │ │ │ ├── dpn.py │ │ │ ├── efficientnet.py │ │ │ ├── efficientnet_blocks.py │ │ │ ├── efficientnet_builder.py │ │ │ ├── factory.py │ │ │ ├── features.py │ │ │ ├── ghostnet.py │ │ │ ├── gluon_resnet.py │ │ │ ├── gluon_xception.py │ │ │ ├── hardcorenas.py │ │ │ ├── helpers.py │ │ │ ├── hrnet.py │ │ │ ├── hub.py │ │ │ ├── inception_resnet_v2.py │ │ │ ├── inception_v3.py │ │ │ ├── inception_v4.py │ │ │ ├── layers │ │ │ │ ├── __init__.py │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── activations.cpython-38.pyc │ │ │ │ │ ├── activations_jit.cpython-38.pyc │ │ │ │ │ ├── activations_me.cpython-38.pyc │ │ │ │ │ ├── adaptive_avgmax_pool.cpython-38.pyc │ │ │ │ │ ├── blur_pool.cpython-38.pyc │ │ │ │ │ ├── bottleneck_attn.cpython-38.pyc │ │ │ │ │ ├── cbam.cpython-38.pyc │ │ │ │ │ ├── classifier.cpython-38.pyc │ │ │ │ │ ├── cond_conv2d.cpython-38.pyc │ │ │ │ │ ├── config.cpython-38.pyc │ │ │ │ │ ├── conv2d_same.cpython-38.pyc │ │ │ │ │ ├── conv_bn_act.cpython-38.pyc │ │ │ │ │ ├── create_act.cpython-38.pyc │ │ │ │ │ ├── create_attn.cpython-38.pyc │ │ │ │ │ ├── create_conv2d.cpython-38.pyc │ │ │ │ │ ├── create_norm_act.cpython-38.pyc │ │ │ │ │ ├── create_self_attn.cpython-38.pyc │ │ │ │ │ ├── drop.cpython-38.pyc │ │ │ │ │ ├── eca.cpython-38.pyc │ │ │ │ │ ├── evo_norm.cpython-38.pyc │ │ │ │ │ ├── halo_attn.cpython-38.pyc │ │ │ │ │ ├── helpers.cpython-38.pyc │ │ │ │ │ ├── inplace_abn.cpython-38.pyc │ │ │ │ │ ├── involution.cpython-38.pyc │ │ │ │ │ ├── lambda_layer.cpython-38.pyc │ │ │ │ │ ├── linear.cpython-38.pyc │ │ │ │ │ ├── mixed_conv2d.cpython-38.pyc │ │ │ │ │ ├── mlp.cpython-38.pyc │ │ │ │ │ ├── norm.cpython-38.pyc │ │ │ │ │ ├── norm_act.cpython-38.pyc │ │ │ │ │ ├── padding.cpython-38.pyc │ │ │ │ │ ├── patch_embed.cpython-38.pyc │ │ │ │ │ ├── pool2d_same.cpython-38.pyc │ │ │ │ │ ├── se.cpython-38.pyc │ │ │ │ │ ├── selective_kernel.cpython-38.pyc │ │ │ │ │ ├── separable_conv.cpython-38.pyc │ │ │ │ │ ├── space_to_depth.cpython-38.pyc │ │ │ │ │ ├── split_attn.cpython-38.pyc │ │ │ │ │ ├── split_batchnorm.cpython-38.pyc │ │ │ │ │ ├── std_conv.cpython-38.pyc │ │ │ │ │ ├── swin_attn.cpython-38.pyc │ │ │ │ │ ├── test_time_pool.cpython-38.pyc │ │ │ │ │ └── weight_init.cpython-38.pyc │ │ │ │ ├── activations.py │ │ │ │ ├── activations_jit.py │ │ │ │ ├── activations_me.py │ │ │ │ ├── adaptive_avgmax_pool.py │ │ │ │ ├── blur_pool.py │ │ │ │ ├── bottleneck_attn.py │ │ │ │ ├── cbam.py │ │ │ │ ├── classifier.py │ │ │ │ ├── cond_conv2d.py │ │ │ │ ├── config.py │ │ │ │ ├── conv2d_same.py │ │ │ │ ├── conv_bn_act.py │ │ │ │ ├── create_act.py │ │ │ │ ├── create_attn.py │ │ │ │ ├── create_conv2d.py │ │ │ │ ├── create_norm_act.py │ │ │ │ ├── create_self_attn.py │ │ │ │ ├── drop.py │ │ │ │ ├── eca.py │ │ │ │ ├── evo_norm.py │ │ │ │ ├── halo_attn.py │ │ │ │ ├── helpers.py │ │ │ │ ├── inplace_abn.py │ │ │ │ ├── involution.py │ │ │ │ ├── lambda_layer.py │ │ │ │ ├── linear.py │ │ │ │ ├── median_pool.py │ │ │ │ ├── mixed_conv2d.py │ │ │ │ ├── mlp.py │ │ │ │ ├── norm.py │ │ │ │ ├── norm_act.py │ │ │ │ ├── padding.py │ │ │ │ ├── patch_embed.py │ │ │ │ ├── pool2d_same.py │ │ │ │ ├── se.py │ │ │ │ ├── selective_kernel.py │ │ │ │ ├── separable_conv.py │ │ │ │ ├── space_to_depth.py │ │ │ │ ├── split_attn.py │ │ │ │ ├── split_batchnorm.py │ │ │ │ ├── std_conv.py │ │ │ │ ├── swin_attn.py │ │ │ │ ├── test_time_pool.py │ │ │ │ └── weight_init.py │ │ │ ├── levit.py │ │ │ ├── mlp_mixer.py │ │ │ ├── mobilenetv3.py │ │ │ ├── nasnet.py │ │ │ ├── nfnet.py │ │ │ ├── pit.py │ │ │ ├── pnasnet.py │ │ │ ├── pruned │ │ │ │ ├── ecaresnet101d_pruned.txt │ │ │ │ ├── ecaresnet50d_pruned.txt │ │ │ │ ├── efficientnet_b1_pruned.txt │ │ │ │ ├── efficientnet_b2_pruned.txt │ │ │ │ └── efficientnet_b3_pruned.txt │ │ │ ├── registry.py │ │ │ ├── regnet.py │ │ │ ├── res2net.py │ │ │ ├── resnest.py │ │ │ ├── resnet.py │ │ │ ├── resnetv2.py │ │ │ ├── rexnet.py │ │ │ ├── selecsls.py │ │ │ ├── senet.py │ │ │ ├── sknet.py │ │ │ ├── swin_transformer.py │ │ │ ├── tnt.py │ │ │ ├── tresnet.py │ │ │ ├── twins.py │ │ │ ├── vgg.py │ │ │ ├── visformer.py │ │ │ ├── vision_transformer.py │ │ │ ├── vision_transformer_hybrid.py │ │ │ ├── vovnet.py │ │ │ ├── xception.py │ │ │ └── xception_aligned.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── adabelief.py │ │ │ ├── adafactor.py │ │ │ ├── adahessian.py │ │ │ ├── adamp.py │ │ │ ├── adamw.py │ │ │ ├── lookahead.py │ │ │ ├── nadam.py │ │ │ ├── novograd.py │ │ │ ├── nvnovograd.py │ │ │ ├── optim_factory.py │ │ │ ├── radam.py │ │ │ ├── rmsprop_tf.py │ │ │ └── sgdp.py │ │ ├── scheduler │ │ │ ├── __init__.py │ │ │ ├── cosine_lr.py │ │ │ ├── plateau_lr.py │ │ │ ├── scheduler.py │ │ │ ├── scheduler_factory.py │ │ │ ├── step_lr.py │ │ │ └── tanh_lr.py │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── agc.py │ │ │ ├── checkpoint_saver.py │ │ │ ├── clip_grad.py │ │ │ ├── cuda.py │ │ │ ├── distributed.py │ │ │ ├── jit.py │ │ │ ├── log.py │ │ │ ├── metrics.py │ │ │ ├── misc.py │ │ │ ├── model.py │ │ │ ├── model_ema.py │ │ │ ├── random.py │ │ │ └── summary.py │ │ └── version.py │ ├── vision_transformer_linear.py │ └── vit_pytorch.py └── make_model.py ├── processor ├── __init__.py └── processor.py ├── requirements.txt ├── scripts ├── eval_market.sh ├── eval_msmt.sh ├── train_market.sh ├── train_msmt.sh ├── train_vit_base.sh └── train_vit_transreid.sh ├── solver ├── __init__.py ├── cosine_lr.py ├── lr_scheduler.py ├── make_optimizer.py ├── ranger.py ├── scheduler.py └── scheduler_factory.py ├── test.py ├── train.py └── utils ├── __init__.py ├── __pycache__ ├── __init__.cpython-38.pyc ├── iotools.cpython-38.pyc ├── logger.cpython-38.pyc ├── meter.cpython-38.pyc ├── metrics.cpython-38.pyc └── reranking.cpython-38.pyc ├── iotools.py ├── logger.py ├── meter.py ├── metrics.py ├── rank_cylib ├── Makefile ├── __pycache__ │ └── __init__.cpython-36.pyc ├── build │ └── temp.linux-x86_64-3.7 │ │ └── rank_cy.o ├── rank_cy.c ├── rank_cy.cpython-37m-x86_64-linux-gnu.so ├── rank_cy.pyx ├── setup.py ├── setup.sh └── test_cython.py └── reranking.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Minghui Chen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /config/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | from .defaults import _C as cfg 8 | from .defaults import _C as cfg_test 9 | -------------------------------------------------------------------------------- /configs/MSMT17/resnet_base.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | NAME: 'resnet50' 6 | NO_MARGIN: True 7 | DEVICE_ID: ('5') 8 | TRANSFORMER_TYPE: '' 9 | STRIDE_SIZE: [12, 12] 10 | SIE_CAMERA: True 11 | SIE_COE: 3.0 12 | JPM: True 13 | LINEAR_BLOCK: False 14 | RE_ARRANGE: True 15 | GEM_POOL: True 16 | WRTRIPLET: False 17 | IF_LABELSMOOTH: 'on' 18 | IF_WITH_CENTER: 'no' 19 | 20 | INPUT: 21 | SIZE_TRAIN: [256, 128] 22 | SIZE_TEST: [256, 128] 23 | PROB: 0.5 # random horizontal flip 24 | RE_PROB: 0.5 # random erasing 25 | ERASING_TYPE: 'normal' 26 | MIXING_COEFF: [1.0, 1.0] 27 | PADDING: 10 28 | PIXEL_MEAN: [0.5, 0.5, 0.5] 29 | PIXEL_STD: [0.5, 0.5, 0.5] 30 | MEAN_FEAT: False 31 | SELF_ID: True 32 | 33 | 34 | DATASETS: 35 | NAMES: ('msmt17') 36 | ROOT_DIR: ('/data/wzq/') 37 | 38 | DATALOADER: 39 | SAMPLER: 'softmax_triplet' 40 | NUM_INSTANCE: 4 41 | NUM_WORKERS: 8 42 | 43 | SOLVER: 44 | OPTIMIZER_NAME: 'SGD' 45 | MAX_EPOCHS: 120 46 | BASE_LR: 0.008 47 | IMS_PER_BATCH: 64 48 | WARMUP_METHOD: 'linear' 49 | LARGE_FC_LR: False 50 | CHECKPOINT_PERIOD: 120 51 | LOG_PERIOD: 50 52 | EVAL_PERIOD: 120 53 | WEIGHT_DECAY: 1e-4 54 | WEIGHT_DECAY_BIAS: 1e-4 55 | BIAS_LR_FACTOR: 2 56 | 57 | TEST: 58 | EVAL: True 59 | IMS_PER_BATCH: 256 60 | RE_RANKING: False 61 | WEIGHT: '' 62 | NECK_FEAT: 'before' 63 | FEAT_NORM: 'yes' 64 | 65 | OUTPUT_DIR: './logs/msmt17_resnet_base' 66 | 67 | 68 | -------------------------------------------------------------------------------- /configs/MSMT17/vit_base.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | IF_LABELSMOOTH: 'off' 6 | IF_WITH_CENTER: 'no' 7 | NAME: 'transformer' 8 | NO_MARGIN: True 9 | DEVICE_ID: ('4') 10 | TRANSFORMER_TYPE: 'vit_base_patch16_224_TransReID' 11 | STRIDE_SIZE: [16, 16] 12 | 13 | INPUT: 14 | SIZE_TRAIN: [256, 128] 15 | SIZE_TEST: [256, 128] 16 | PROB: 0.5 # random horizontal flip 17 | RE_PROB: 0.5 # random erasing 18 | PADDING: 10 19 | PIXEL_MEAN: [0.5, 0.5, 0.5] 20 | PIXEL_STD: [0.5, 0.5, 0.5] 21 | 22 | DATASETS: 23 | NAMES: ('msmt17') 24 | ROOT_DIR: ('/data/wzq/') 25 | 26 | DATALOADER: 27 | SAMPLER: 'softmax_triplet' 28 | NUM_INSTANCE: 4 29 | NUM_WORKERS: 8 30 | 31 | SOLVER: 32 | OPTIMIZER_NAME: 'SGD' 33 | MAX_EPOCHS: 120 34 | BASE_LR: 0.008 35 | IMS_PER_BATCH: 64 36 | WARMUP_METHOD: 'linear' 37 | LARGE_FC_LR: False 38 | CHECKPOINT_PERIOD: 120 39 | LOG_PERIOD: 50 40 | EVAL_PERIOD: 120 41 | WEIGHT_DECAY: 1e-4 42 | WEIGHT_DECAY_BIAS: 1e-4 43 | BIAS_LR_FACTOR: 2 44 | 45 | TEST: 46 | EVAL: True 47 | IMS_PER_BATCH: 256 48 | RE_RANKING: False 49 | WEIGHT: '' 50 | NECK_FEAT: 'before' 51 | FEAT_NORM: 'yes' 52 | 53 | OUTPUT_DIR: '../logs/msmt17_vit_base' 54 | 55 | 56 | -------------------------------------------------------------------------------- /configs/MSMT17/vit_transreid.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | IF_LABELSMOOTH: 'off' 6 | IF_WITH_CENTER: 'no' 7 | NAME: 'transformer' 8 | NO_MARGIN: True 9 | DEVICE_ID: ('3') 10 | TRANSFORMER_TYPE: 'vit_base_patch16_224_TransReID' 11 | STRIDE_SIZE: [16, 16] 12 | SIE_CAMERA: True 13 | SIE_COE: 3.0 14 | JPM: True 15 | RE_ARRANGE: True 16 | 17 | INPUT: 18 | SIZE_TRAIN: [256, 128] 19 | SIZE_TEST: [256, 128] 20 | PROB: 0.5 # random horizontal flip 21 | RE_PROB: 0.5 # random erasing 22 | PADDING: 10 23 | PIXEL_MEAN: [0.5, 0.5, 0.5] 24 | PIXEL_STD: [0.5, 0.5, 0.5] 25 | 26 | DATASETS: 27 | NAMES: ('msmt17') 28 | ROOT_DIR: ('/data/wzq/') 29 | 30 | DATALOADER: 31 | SAMPLER: 'softmax_triplet' 32 | NUM_INSTANCE: 4 33 | NUM_WORKERS: 8 34 | 35 | SOLVER: 36 | OPTIMIZER_NAME: 'SGD' 37 | MAX_EPOCHS: 120 38 | BASE_LR: 0.008 39 | IMS_PER_BATCH: 64 40 | WARMUP_METHOD: 'linear' 41 | LARGE_FC_LR: False 42 | CHECKPOINT_PERIOD: 120 43 | LOG_PERIOD: 50 44 | EVAL_PERIOD: 120 45 | WEIGHT_DECAY: 1e-4 46 | WEIGHT_DECAY_BIAS: 1e-4 47 | BIAS_LR_FACTOR: 2 48 | 49 | TEST: 50 | EVAL: True 51 | IMS_PER_BATCH: 256 52 | RE_RANKING: False 53 | WEIGHT: '' 54 | NECK_FEAT: 'before' 55 | FEAT_NORM: 'yes' 56 | 57 | OUTPUT_DIR: '../logs/msmt17_vit_transreid' 58 | 59 | 60 | -------------------------------------------------------------------------------- /configs/Market/resnet_base.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | NAME: 'resnet50' 6 | NO_MARGIN: True 7 | DEVICE_ID: ('5') 8 | TRANSFORMER_TYPE: '' 9 | STRIDE_SIZE: [12, 12] 10 | SIE_CAMERA: True 11 | SIE_COE: 3.0 12 | JPM: True 13 | LINEAR_BLOCK: False 14 | RE_ARRANGE: True 15 | GEM_POOL: True 16 | WRTRIPLET: False 17 | IF_LABELSMOOTH: 'on' 18 | IF_WITH_CENTER: 'no' 19 | 20 | INPUT: 21 | SIZE_TRAIN: [256, 128] 22 | SIZE_TEST: [256, 128] 23 | PROB: 0.5 # random horizontal flip 24 | RE_PROB: 0.5 # random erasing 25 | ERASING_TYPE: 'normal' 26 | MIXING_COEFF: [1.0, 1.0] 27 | PADDING: 10 28 | PIXEL_MEAN: [0.5, 0.5, 0.5] 29 | PIXEL_STD: [0.5, 0.5, 0.5] 30 | MEAN_FEAT: False 31 | SELF_ID: True 32 | 33 | 34 | DATASETS: 35 | NAMES: ('market1501') 36 | ROOT_DIR: ('/home/chenminghui/data/ReID_Dataset/') 37 | 38 | DATALOADER: 39 | SAMPLER: 'softmax_triplet' 40 | NUM_INSTANCE: 4 41 | NUM_WORKERS: 8 42 | 43 | SOLVER: 44 | OPTIMIZER_NAME: 'SGD' 45 | MAX_EPOCHS: 120 46 | BASE_LR: 0.008 47 | IMS_PER_BATCH: 64 48 | WARMUP_METHOD: 'linear' 49 | LARGE_FC_LR: False 50 | CHECKPOINT_PERIOD: 120 51 | LOG_PERIOD: 50 52 | EVAL_PERIOD: 120 53 | WEIGHT_DECAY: 1e-4 54 | WEIGHT_DECAY_BIAS: 1e-4 55 | BIAS_LR_FACTOR: 2 56 | 57 | TEST: 58 | EVAL: True 59 | IMS_PER_BATCH: 256 60 | RE_RANKING: False 61 | WEIGHT: '' 62 | NECK_FEAT: 'before' 63 | FEAT_NORM: 'yes' 64 | 65 | OUTPUT_DIR: './logs/market_resnet_base' 66 | 67 | 68 | -------------------------------------------------------------------------------- /configs/Market/vit_base.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | IF_LABELSMOOTH: 'off' 6 | IF_WITH_CENTER: 'no' 7 | NAME: 'transformer' 8 | NO_MARGIN: True 9 | DEVICE_ID: ('7') 10 | TRANSFORMER_TYPE: 'vit_base_patch16_224_TransReID' 11 | STRIDE_SIZE: [16, 16] 12 | 13 | INPUT: 14 | SIZE_TRAIN: [256, 128] 15 | SIZE_TEST: [256, 128] 16 | PROB: 0.5 # random horizontal flip 17 | RE_PROB: 0.5 # random erasing 18 | PADDING: 10 19 | PIXEL_MEAN: [0.5, 0.5, 0.5] 20 | PIXEL_STD: [0.5, 0.5, 0.5] 21 | 22 | DATASETS: 23 | NAMES: ('market1501') 24 | ROOT_DIR: ('/home/wangzhiqiang/data/ReID_Dataset/') 25 | 26 | DATALOADER: 27 | SAMPLER: 'softmax_triplet' 28 | NUM_INSTANCE: 4 29 | NUM_WORKERS: 8 30 | 31 | SOLVER: 32 | OPTIMIZER_NAME: 'SGD' 33 | MAX_EPOCHS: 120 34 | BASE_LR: 0.008 35 | IMS_PER_BATCH: 64 36 | WARMUP_METHOD: 'linear' 37 | LARGE_FC_LR: False 38 | CHECKPOINT_PERIOD: 120 39 | LOG_PERIOD: 50 40 | EVAL_PERIOD: 120 41 | WEIGHT_DECAY: 1e-4 42 | WEIGHT_DECAY_BIAS: 1e-4 43 | BIAS_LR_FACTOR: 2 44 | 45 | TEST: 46 | EVAL: True 47 | IMS_PER_BATCH: 256 48 | RE_RANKING: False 49 | WEIGHT: '../logs/0321_market_vit_base/transformer_120.pth' 50 | NECK_FEAT: 'before' 51 | FEAT_NORM: 'yes' 52 | 53 | OUTPUT_DIR: '../logs/0321_market_vit_base' 54 | 55 | 56 | -------------------------------------------------------------------------------- /configs/Market/vit_transreid.yml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | PRETRAIN_CHOICE: 'imagenet' 3 | PRETRAIN_PATH: '/home/wangzhiqiang/.cache/torch/hub/checkpoints/jx_vit_base_p16_224-80ecf9dd.pth' 4 | METRIC_LOSS_TYPE: 'triplet' 5 | IF_LABELSMOOTH: 'off' 6 | IF_WITH_CENTER: 'no' 7 | NAME: 'transformer' 8 | NO_MARGIN: True 9 | DEVICE_ID: ('5') 10 | TRANSFORMER_TYPE: 'vit_base_patch16_224_TransReID' 11 | STRIDE_SIZE: [16, 16] 12 | SIE_CAMERA: True 13 | SIE_COE: 3.0 14 | JPM: True 15 | RE_ARRANGE: True 16 | 17 | INPUT: 18 | SIZE_TRAIN: [256, 128] 19 | SIZE_TEST: [256, 128] 20 | PROB: 0.5 # random horizontal flip 21 | RE_PROB: 0.5 # random erasing 22 | PADDING: 10 23 | PIXEL_MEAN: [0.5, 0.5, 0.5] 24 | PIXEL_STD: [0.5, 0.5, 0.5] 25 | 26 | DATASETS: 27 | NAMES: ('market1501') 28 | ROOT_DIR: ('/home/wangzhiqiang/data/ReID_Dataset/') 29 | 30 | DATALOADER: 31 | SAMPLER: 'softmax_triplet' 32 | NUM_INSTANCE: 4 33 | NUM_WORKERS: 8 34 | 35 | SOLVER: 36 | OPTIMIZER_NAME: 'SGD' 37 | MAX_EPOCHS: 120 38 | BASE_LR: 0.008 39 | IMS_PER_BATCH: 64 40 | WARMUP_METHOD: 'linear' 41 | LARGE_FC_LR: False 42 | CHECKPOINT_PERIOD: 120 43 | LOG_PERIOD: 50 44 | EVAL_PERIOD: 120 45 | WEIGHT_DECAY: 1e-4 46 | WEIGHT_DECAY_BIAS: 1e-4 47 | BIAS_LR_FACTOR: 2 48 | 49 | TEST: 50 | EVAL: True 51 | IMS_PER_BATCH: 256 52 | RE_RANKING: False 53 | WEIGHT: '../logs/market_vit_transreid/transformer_120.pth' 54 | NECK_FEAT: 'before' 55 | FEAT_NORM: 'yes' 56 | 57 | OUTPUT_DIR: '../logs/market_vit_transreid' 58 | 59 | 60 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader -------------------------------------------------------------------------------- /datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/bases.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/datasets/__pycache__/bases.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/augmentations/augmix.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Reference implementation of AugMix's data augmentation method in numpy.""" 16 | from datasets.augmentations.augmentations import augmentations 17 | import numpy as np 18 | from PIL import Image 19 | 20 | # CIFAR-10 constants 21 | # MEAN = [0.4914, 0.4822, 0.4465] 22 | # STD = [0.2023, 0.1994, 0.2010] 23 | MEAN = [0.5, 0.5, 0.5] 24 | STD = [0.5, 0.5, 0.5] 25 | 26 | 27 | def normalize(image): 28 | """Normalize input image channel-wise to zero mean and unit variance.""" 29 | image = image.transpose(2, 0, 1) # Switch to channel-first 30 | mean, std = np.array(MEAN), np.array(STD) 31 | image = (image - mean[:, None, None]) / std[:, None, None] 32 | return image.transpose(1, 2, 0) 33 | 34 | 35 | def apply_op(image, op, severity, image_size): 36 | image = np.clip(image * 255., 0, 255).astype(np.uint8) 37 | pil_img = Image.fromarray(image) # Convert to PIL.Image 38 | pil_img = op(pil_img, severity, image_size) 39 | return np.asarray(pil_img) / 255. 40 | 41 | 42 | def augmix(image, severity=3, width=3, depth=-1, alpha=1.): 43 | """Perform AugMix augmentations and compute mixture. 44 | Args: 45 | image: Raw input image as float32 np.ndarray of shape (h, w, c) 46 | severity: Severity of underlying augmentation operators (between 1 to 10). 47 | width: Width of augmentation chain 48 | depth: Depth of augmentation chain. -1 enables stochastic depth uniformly 49 | from [1, 3] 50 | alpha: Probability coefficient for Beta and Dirichlet distributions. 51 | Returns: 52 | mixed: Augmented and mixed image. 53 | """ 54 | ws = np.float32(np.random.dirichlet([alpha] * width)) 55 | m = np.float32(np.random.beta(alpha, alpha)) 56 | 57 | mix = np.zeros_like(image) 58 | for i in range(width): 59 | image_aug = image.copy() 60 | d = depth if depth > 0 else np.random.randint(1, 4) 61 | for _ in range(d): 62 | op = np.random.choice(augmentations) 63 | image_aug = apply_op(image_aug, op, severity, 64 | [image.shape[1], image.shape[0]]) 65 | # Preprocessing commutes since all coefficients are convex 66 | # mix += ws[i] * normalize(image_aug) 67 | mix += ws[i] * image_aug 68 | 69 | # mixed = (1 - m) * normalize(image) + m * mix 70 | mixed = (1 - m) * image + m * mix 71 | return mixed 72 | -------------------------------------------------------------------------------- /datasets/market1501.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import glob 8 | import re 9 | 10 | import os.path as osp 11 | 12 | from .bases import BaseImageDataset 13 | from collections import defaultdict 14 | import pickle 15 | class Market1501(BaseImageDataset): 16 | """ 17 | Market1501 18 | Reference: 19 | Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015. 20 | URL: http://www.liangzheng.org/Project/project_reid.html 21 | 22 | Dataset statistics: 23 | # identities: 1501 (+1 for background) 24 | # images: 12936 (train) + 3368 (query) + 15913 (gallery) 25 | """ 26 | dataset_dir = 'market1501' 27 | 28 | def __init__(self, root='', verbose=True, pid_begin = 0, **kwargs): 29 | super(Market1501, self).__init__() 30 | self.dataset_dir = osp.join(root, self.dataset_dir) 31 | self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train') 32 | self.query_dir = osp.join(self.dataset_dir, 'query') 33 | self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test') 34 | 35 | self._check_before_run() 36 | self.pid_begin = pid_begin 37 | train = self._process_dir(self.train_dir, relabel=True) 38 | query = self._process_dir(self.query_dir, relabel=False) 39 | gallery = self._process_dir(self.gallery_dir, relabel=False) 40 | 41 | if verbose: 42 | print("=> Market1501 loaded") 43 | self.print_dataset_statistics(train, query, gallery) 44 | 45 | self.train = train 46 | self.query = query 47 | self.gallery = gallery 48 | 49 | self.num_train_pids, self.num_train_imgs, self.num_train_cams, self.num_train_vids = self.get_imagedata_info(self.train) 50 | self.num_query_pids, self.num_query_imgs, self.num_query_cams, self.num_query_vids = self.get_imagedata_info(self.query) 51 | self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams, self.num_gallery_vids = self.get_imagedata_info(self.gallery) 52 | 53 | def _check_before_run(self): 54 | """Check if all files are available before going deeper""" 55 | if not osp.exists(self.dataset_dir): 56 | raise RuntimeError("'{}' is not available".format(self.dataset_dir)) 57 | if not osp.exists(self.train_dir): 58 | raise RuntimeError("'{}' is not available".format(self.train_dir)) 59 | if not osp.exists(self.query_dir): 60 | raise RuntimeError("'{}' is not available".format(self.query_dir)) 61 | if not osp.exists(self.gallery_dir): 62 | raise RuntimeError("'{}' is not available".format(self.gallery_dir)) 63 | 64 | def _process_dir(self, dir_path, relabel=False): 65 | img_paths = glob.glob(osp.join(dir_path, '*.jpg')) 66 | pattern = re.compile(r'([-\d]+)_c(\d)') 67 | 68 | pid_container = set() 69 | for img_path in sorted(img_paths): 70 | pid, _ = map(int, pattern.search(img_path).groups()) 71 | if pid == -1: continue # junk images are just ignored 72 | pid_container.add(pid) 73 | pid2label = {pid: label for label, pid in enumerate(pid_container)} 74 | dataset = [] 75 | for img_path in sorted(img_paths): 76 | pid, camid = map(int, pattern.search(img_path).groups()) 77 | if pid == -1: continue # junk images are just ignored 78 | assert 0 <= pid <= 1501 # pid == 0 means background 79 | assert 1 <= camid <= 6 80 | camid -= 1 # index starts from 0 81 | if relabel: pid = pid2label[pid] 82 | 83 | dataset.append((img_path, self.pid_begin + pid, camid, 1)) 84 | return dataset 85 | -------------------------------------------------------------------------------- /datasets/preprocessing.py: -------------------------------------------------------------------------------- 1 | import random 2 | import math 3 | 4 | 5 | class RandomErasing(object): 6 | """ Randomly selects a rectangle region in an image and erases its pixels. 7 | 'Random Erasing Data Augmentation' by Zhong et al. 8 | See https://arxiv.org/pdf/1708.04896.pdf 9 | Args: 10 | probability: The probability that the Random Erasing operation will be performed. 11 | sl: Minimum proportion of erased area against input image. 12 | sh: Maximum proportion of erased area against input image. 13 | r1: Minimum aspect ratio of erased area. 14 | mean: Erasing value. 15 | """ 16 | 17 | def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)): 18 | self.probability = probability 19 | self.mean = mean 20 | self.sl = sl 21 | self.sh = sh 22 | self.r1 = r1 23 | 24 | def __call__(self, img): 25 | 26 | if random.uniform(0, 1) >= self.probability: 27 | return img 28 | 29 | for attempt in range(100): 30 | area = img.size()[1] * img.size()[2] 31 | 32 | target_area = random.uniform(self.sl, self.sh) * area 33 | aspect_ratio = random.uniform(self.r1, 1 / self.r1) 34 | 35 | h = int(round(math.sqrt(target_area * aspect_ratio))) 36 | w = int(round(math.sqrt(target_area / aspect_ratio))) 37 | 38 | if w < img.size()[2] and h < img.size()[1]: 39 | x1 = random.randint(0, img.size()[1] - h) 40 | y1 = random.randint(0, img.size()[2] - w) 41 | if img.size()[0] == 3: 42 | img[0, x1:x1 + h, y1:y1 + w] = self.mean[0] 43 | img[1, x1:x1 + h, y1:y1 + w] = self.mean[1] 44 | img[2, x1:x1 + h, y1:y1 + w] = self.mean[2] 45 | else: 46 | img[0, x1:x1 + h, y1:y1 + w] = self.mean[0] 47 | return img 48 | 49 | return img 50 | 51 | -------------------------------------------------------------------------------- /datasets/sampler.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.sampler import Sampler 2 | from collections import defaultdict 3 | import copy 4 | import random 5 | import numpy as np 6 | 7 | class RandomIdentitySampler(Sampler): 8 | """ 9 | Randomly sample N identities, then for each identity, 10 | randomly sample K instances, therefore batch size is N*K. 11 | Args: 12 | - data_source (list): list of (img_path, pid, camid). 13 | - num_instances (int): number of instances per identity in a batch. 14 | - batch_size (int): number of examples in a batch. 15 | """ 16 | 17 | def __init__(self, data_source, batch_size, num_instances): 18 | self.data_source = data_source 19 | self.batch_size = batch_size 20 | self.num_instances = num_instances 21 | self.num_pids_per_batch = self.batch_size // self.num_instances 22 | self.index_dic = defaultdict(list) #dict with list value 23 | #{783: [0, 5, 116, 876, 1554, 2041],...,} 24 | for index, (_, pid, _, _) in enumerate(self.data_source): 25 | self.index_dic[pid].append(index) 26 | self.pids = list(self.index_dic.keys()) 27 | 28 | # estimate number of examples in an epoch 29 | self.length = 0 30 | for pid in self.pids: 31 | idxs = self.index_dic[pid] 32 | num = len(idxs) 33 | if num < self.num_instances: 34 | num = self.num_instances 35 | self.length += num - num % self.num_instances 36 | 37 | def __iter__(self): 38 | batch_idxs_dict = defaultdict(list) 39 | 40 | for pid in self.pids: 41 | idxs = copy.deepcopy(self.index_dic[pid]) 42 | if len(idxs) < self.num_instances: 43 | idxs = np.random.choice(idxs, size=self.num_instances, replace=True) 44 | random.shuffle(idxs) 45 | batch_idxs = [] 46 | for idx in idxs: 47 | batch_idxs.append(idx) 48 | if len(batch_idxs) == self.num_instances: 49 | batch_idxs_dict[pid].append(batch_idxs) 50 | batch_idxs = [] 51 | 52 | avai_pids = copy.deepcopy(self.pids) 53 | final_idxs = [] 54 | 55 | while len(avai_pids) >= self.num_pids_per_batch: 56 | selected_pids = random.sample(avai_pids, self.num_pids_per_batch) 57 | for pid in selected_pids: 58 | batch_idxs = batch_idxs_dict[pid].pop(0) 59 | final_idxs.extend(batch_idxs) 60 | if len(batch_idxs_dict[pid]) == 0: 61 | avai_pids.remove(pid) 62 | 63 | return iter(final_idxs) 64 | 65 | def __len__(self): 66 | return self.length 67 | 68 | -------------------------------------------------------------------------------- /imgs/market.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/imgs/market.png -------------------------------------------------------------------------------- /imgs/market_c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/imgs/market_c.png -------------------------------------------------------------------------------- /imgs/overview_masked.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/imgs/overview_masked.png -------------------------------------------------------------------------------- /imgs/thumbnail_cil.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/imgs/thumbnail_cil.png -------------------------------------------------------------------------------- /loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_loss import make_loss 2 | from .arcface import ArcFace -------------------------------------------------------------------------------- /loss/arcface.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn import Parameter 5 | import math 6 | 7 | 8 | class ArcFace(nn.Module): 9 | def __init__(self, in_features, out_features, s=30.0, m=0.50, bias=False): 10 | super(ArcFace, self).__init__() 11 | self.in_features = in_features 12 | self.out_features = out_features 13 | self.s = s 14 | self.m = m 15 | self.cos_m = math.cos(m) 16 | self.sin_m = math.sin(m) 17 | 18 | self.th = math.cos(math.pi - m) 19 | self.mm = math.sin(math.pi - m) * m 20 | 21 | self.weight = Parameter(torch.Tensor(out_features, in_features)) 22 | if bias: 23 | self.bias = Parameter(torch.Tensor(out_features)) 24 | else: 25 | self.register_parameter('bias', None) 26 | self.reset_parameters() 27 | 28 | def reset_parameters(self): 29 | nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 30 | if self.bias is not None: 31 | fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) 32 | bound = 1 / math.sqrt(fan_in) 33 | nn.init.uniform_(self.bias, -bound, bound) 34 | 35 | def forward(self, input, label): 36 | cosine = F.linear(F.normalize(input), F.normalize(self.weight)) 37 | sine = torch.sqrt((1.0 - torch.pow(cosine, 2)).clamp(0, 1)) 38 | phi = cosine * self.cos_m - sine * self.sin_m 39 | phi = torch.where(cosine > self.th, phi, cosine - self.mm) 40 | # --------------------------- convert label to one-hot --------------------------- 41 | # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda') 42 | one_hot = torch.zeros(cosine.size(), device='cuda') 43 | one_hot.scatter_(1, label.view(-1, 1).long(), 1) 44 | # -------------torch.where(out_i = {x_i if condition_i else y_i) ------------- 45 | output = (one_hot * phi) + ( 46 | (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4 47 | output *= self.s 48 | # print(output) 49 | 50 | return output 51 | 52 | class CircleLoss(nn.Module): 53 | def __init__(self, in_features, num_classes, s=256, m=0.25): 54 | super(CircleLoss, self).__init__() 55 | self.weight = Parameter(torch.Tensor(num_classes, in_features)) 56 | self.s = s 57 | self.m = m 58 | self._num_classes = num_classes 59 | self.reset_parameters() 60 | 61 | 62 | def reset_parameters(self): 63 | nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) 64 | 65 | def __call__(self, bn_feat, targets): 66 | 67 | sim_mat = F.linear(F.normalize(bn_feat), F.normalize(self.weight)) 68 | alpha_p = torch.clamp_min(-sim_mat.detach() + 1 + self.m, min=0.) 69 | alpha_n = torch.clamp_min(sim_mat.detach() + self.m, min=0.) 70 | delta_p = 1 - self.m 71 | delta_n = self.m 72 | 73 | s_p = self.s * alpha_p * (sim_mat - delta_p) 74 | s_n = self.s * alpha_n * (sim_mat - delta_n) 75 | 76 | targets = F.one_hot(targets, num_classes=self._num_classes) 77 | 78 | pred_class_logits = targets * s_p + (1.0 - targets) * s_n 79 | 80 | return pred_class_logits -------------------------------------------------------------------------------- /loss/center_loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | from torch import nn 5 | 6 | 7 | class CenterLoss(nn.Module): 8 | """Center loss. 9 | 10 | Reference: 11 | Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016. 12 | 13 | Args: 14 | num_classes (int): number of classes. 15 | feat_dim (int): feature dimension. 16 | """ 17 | 18 | def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True): 19 | super(CenterLoss, self).__init__() 20 | self.num_classes = num_classes 21 | self.feat_dim = feat_dim 22 | self.use_gpu = use_gpu 23 | 24 | if self.use_gpu: 25 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda()) 26 | else: 27 | self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) 28 | 29 | def forward(self, x, labels): 30 | """ 31 | Args: 32 | x: feature matrix with shape (batch_size, feat_dim). 33 | labels: ground truth labels with shape (num_classes). 34 | """ 35 | assert x.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)" 36 | 37 | batch_size = x.size(0) 38 | distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \ 39 | torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t() 40 | distmat.addmm_(1, -2, x, self.centers.t()) 41 | 42 | classes = torch.arange(self.num_classes).long() 43 | if self.use_gpu: classes = classes.cuda() 44 | labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) 45 | mask = labels.eq(classes.expand(batch_size, self.num_classes)) 46 | 47 | dist = [] 48 | for i in range(batch_size): 49 | value = distmat[i][mask[i]] 50 | value = value.clamp(min=1e-12, max=1e+12) # for numerical stability 51 | dist.append(value) 52 | dist = torch.cat(dist) 53 | loss = dist.mean() 54 | return loss 55 | 56 | 57 | if __name__ == '__main__': 58 | use_gpu = False 59 | center_loss = CenterLoss(use_gpu=use_gpu) 60 | features = torch.rand(16, 2048) 61 | targets = torch.Tensor([0, 1, 2, 3, 2, 3, 1, 4, 5, 3, 2, 1, 0, 0, 5, 4]).long() 62 | if use_gpu: 63 | features = torch.rand(16, 2048).cuda() 64 | targets = torch.Tensor([0, 1, 2, 3, 2, 3, 1, 4, 5, 3, 2, 1, 0, 0, 5, 4]).cuda() 65 | 66 | loss = center_loss(features, targets) 67 | print(loss) 68 | -------------------------------------------------------------------------------- /loss/softmax_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | class CrossEntropyLabelSmooth(nn.Module): 5 | """Cross entropy loss with label smoothing regularizer. 6 | 7 | Reference: 8 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 9 | Equation: y = (1 - epsilon) * y + epsilon / K. 10 | 11 | Args: 12 | num_classes (int): number of classes. 13 | epsilon (float): weight. 14 | """ 15 | 16 | def __init__(self, num_classes, epsilon=0.1, use_gpu=True): 17 | super(CrossEntropyLabelSmooth, self).__init__() 18 | self.num_classes = num_classes 19 | self.epsilon = epsilon 20 | self.use_gpu = use_gpu 21 | self.logsoftmax = nn.LogSoftmax(dim=1) 22 | 23 | def forward(self, inputs, targets): 24 | """ 25 | Args: 26 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 27 | targets: ground truth labels with shape (num_classes) 28 | """ 29 | log_probs = self.logsoftmax(inputs) 30 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) 31 | if self.use_gpu: targets = targets.cuda() 32 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 33 | loss = (- targets * log_probs).mean(0).sum() 34 | return loss 35 | 36 | class LabelSmoothingCrossEntropy(nn.Module): 37 | """ 38 | NLL loss with label smoothing. 39 | """ 40 | def __init__(self, smoothing=0.1): 41 | """ 42 | Constructor for the LabelSmoothing module. 43 | :param smoothing: label smoothing factor 44 | """ 45 | super(LabelSmoothingCrossEntropy, self).__init__() 46 | assert smoothing < 1.0 47 | self.smoothing = smoothing 48 | self.confidence = 1. - smoothing 49 | 50 | def forward(self, x, target): 51 | logprobs = F.log_softmax(x, dim=-1) 52 | nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) 53 | nll_loss = nll_loss.squeeze(1) 54 | smooth_loss = -logprobs.mean(dim=-1) 55 | loss = self.confidence * nll_loss + self.smoothing * smooth_loss 56 | return loss.mean() -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /model/backbones/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/__init__.py -------------------------------------------------------------------------------- /model/backbones/resnest/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnest import * 2 | from .ablation import * 3 | -------------------------------------------------------------------------------- /model/backbones/resnest/build.py: -------------------------------------------------------------------------------- 1 | from fvcore.common.registry import Registry 2 | 3 | RESNEST_MODELS_REGISTRY = Registry('RESNEST_MODELS') 4 | 5 | def get_model(model_name): 6 | return RESNEST_MODELS_REGISTRY.get(model_name) 7 | -------------------------------------------------------------------------------- /model/backbones/sa/functional.py: -------------------------------------------------------------------------------- 1 | from . import functions 2 | 3 | 4 | def aggregation(input, weight, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1): 5 | assert input.shape[0] == weight.shape[0] and (input.shape[1] % weight.shape[1] == 0) and pad_mode in [0, 1] 6 | if input.is_cuda: 7 | if pad_mode == 0: 8 | out = functions.aggregation_zeropad(input, weight, kernel_size, stride, padding, dilation) 9 | elif pad_mode == 1: 10 | out = functions.aggregation_refpad(input, weight, kernel_size, stride, padding, dilation) 11 | else: 12 | raise NotImplementedError 13 | return out 14 | 15 | 16 | def subtraction(input, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1): 17 | assert input.dim() == 4 and pad_mode in [0, 1] 18 | if input.is_cuda: 19 | if pad_mode == 0: 20 | out = functions.subtraction_zeropad(input, kernel_size, stride, padding, dilation) 21 | elif pad_mode == 1: 22 | out = functions.subtraction_refpad(input, kernel_size, stride, padding, dilation) 23 | else: 24 | raise NotImplementedError 25 | return out 26 | 27 | 28 | def subtraction2(input1, input2, kernel_size=3, stride=1, padding=0, dilation=1, pad_mode=1): 29 | assert input1.dim() == 4 and input2.dim() == 4 and pad_mode in [0, 1] 30 | if input1.is_cuda: 31 | if pad_mode == 0: 32 | out = functions.subtraction2_zeropad(input1, input2, kernel_size, stride, padding, dilation) 33 | elif pad_mode == 1: 34 | out = functions.subtraction2_refpad(input1, input2, kernel_size, stride, padding, dilation) 35 | else: 36 | raise NotImplementedError 37 | return out 38 | -------------------------------------------------------------------------------- /model/backbones/sa/functions/__init__.py: -------------------------------------------------------------------------------- 1 | from .aggregation_zeropad import * 2 | from .aggregation_refpad import * 3 | from .subtraction_zeropad import * 4 | from .subtraction_refpad import * 5 | from .subtraction2_zeropad import * 6 | from .subtraction2_refpad import * 7 | from .utils import * 8 | -------------------------------------------------------------------------------- /model/backbones/sa/functions/utils.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from string import Template 3 | import cupy 4 | import torch 5 | 6 | Stream = namedtuple('Stream', ['ptr']) 7 | 8 | 9 | def Dtype(t): 10 | if isinstance(t, torch.cuda.FloatTensor): 11 | return 'float' 12 | elif isinstance(t, torch.cuda.DoubleTensor): 13 | return 'double' 14 | elif isinstance(t, torch.cuda.IntTensor): 15 | return 'int' 16 | elif isinstance(t, torch.cuda.HalfTensor): 17 | return 'double' 18 | else: 19 | print("instance t:", t) 20 | raise ValueError('WIP. Check pyinn-issue-#10') 21 | 22 | 23 | @cupy.memoize(for_each_device=True) 24 | def load_kernel(kernel_name, code, **kwargs): 25 | code = Template(code).substitute(**kwargs) 26 | kernel_code = cupy.cuda.compile_with_cache(code) 27 | return kernel_code.get_function(kernel_name) 28 | -------------------------------------------------------------------------------- /model/backbones/sa/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .aggregation import * 2 | from .subtraction import * 3 | from .subtraction2 import * 4 | -------------------------------------------------------------------------------- /model/backbones/sa/modules/aggregation.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.nn.modules.utils import _pair 3 | 4 | from .. import functional as F 5 | 6 | 7 | class Aggregation(nn.Module): 8 | 9 | def __init__(self, kernel_size, stride, padding, dilation, pad_mode): 10 | super(Aggregation, self).__init__() 11 | self.kernel_size = _pair(kernel_size) 12 | self.stride = _pair(stride) 13 | self.padding = _pair(padding) 14 | self.dilation = _pair(dilation) 15 | self.pad_mode = pad_mode 16 | 17 | def forward(self, input, weight): 18 | return F.aggregation(input, weight, self.kernel_size, self.stride, self.padding, self.dilation, self.pad_mode) 19 | -------------------------------------------------------------------------------- /model/backbones/sa/modules/subtraction.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.nn.modules.utils import _pair 3 | 4 | from .. import functional as F 5 | 6 | 7 | class Subtraction(nn.Module): 8 | 9 | def __init__(self, kernel_size, stride, padding, dilation, pad_mode): 10 | super(Subtraction, self).__init__() 11 | self.kernel_size = _pair(kernel_size) 12 | self.stride = _pair(stride) 13 | self.padding = _pair(padding) 14 | self.dilation = _pair(dilation) 15 | self.pad_mode = pad_mode 16 | 17 | def forward(self, input): 18 | return F.subtraction(input, self.kernel_size, self.stride, self.padding, self.dilation, self.pad_mode) 19 | -------------------------------------------------------------------------------- /model/backbones/sa/modules/subtraction2.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.nn.modules.utils import _pair 3 | 4 | from .. import functional as F 5 | 6 | 7 | class Subtraction2(nn.Module): 8 | 9 | def __init__(self, kernel_size, stride, padding, dilation, pad_mode): 10 | super(Subtraction2, self).__init__() 11 | self.kernel_size = _pair(kernel_size) 12 | self.stride = _pair(stride) 13 | self.padding = _pair(padding) 14 | self.dilation = _pair(dilation) 15 | self.pad_mode = pad_mode 16 | 17 | def forward(self, input1, input2): 18 | return F.subtraction2(input1, input2, self.kernel_size, self.stride, self.padding, self.dilation, self.pad_mode) 19 | -------------------------------------------------------------------------------- /model/backbones/timm/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ 3 | is_scriptable, is_exportable, set_scriptable, set_exportable, has_model_default_key, is_model_default_key, \ 4 | get_model_default_value, is_model_pretrained 5 | -------------------------------------------------------------------------------- /model/backbones/timm/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ 2 | rand_augment_transform, auto_augment_transform 3 | from .config import resolve_data_config 4 | from .constants import * 5 | from .dataset import ImageDataset, IterableImageDataset, AugMixDataset 6 | from .dataset_factory import create_dataset 7 | from .loader import create_loader 8 | from .mixup import Mixup, FastCollateMixup 9 | from .parsers import create_parser 10 | from .real_labels import RealLabelsImagenet 11 | from .transforms import * 12 | from .transforms_factory import create_transform -------------------------------------------------------------------------------- /model/backbones/timm/data/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .constants import * 3 | 4 | 5 | _logger = logging.getLogger(__name__) 6 | 7 | 8 | def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): 9 | new_config = {} 10 | default_cfg = default_cfg 11 | if not default_cfg and model is not None and hasattr(model, 'default_cfg'): 12 | default_cfg = model.default_cfg 13 | 14 | # Resolve input/image size 15 | in_chans = 3 16 | if 'chans' in args and args['chans'] is not None: 17 | in_chans = args['chans'] 18 | 19 | input_size = (in_chans, 224, 224) 20 | if 'input_size' in args and args['input_size'] is not None: 21 | assert isinstance(args['input_size'], (tuple, list)) 22 | assert len(args['input_size']) == 3 23 | input_size = tuple(args['input_size']) 24 | in_chans = input_size[0] # input_size overrides in_chans 25 | elif 'img_size' in args and args['img_size'] is not None: 26 | assert isinstance(args['img_size'], int) 27 | input_size = (in_chans, args['img_size'], args['img_size']) 28 | else: 29 | if use_test_size and 'test_input_size' in default_cfg: 30 | input_size = default_cfg['test_input_size'] 31 | elif 'input_size' in default_cfg: 32 | input_size = default_cfg['input_size'] 33 | new_config['input_size'] = input_size 34 | 35 | # resolve interpolation method 36 | new_config['interpolation'] = 'bicubic' 37 | if 'interpolation' in args and args['interpolation']: 38 | new_config['interpolation'] = args['interpolation'] 39 | elif 'interpolation' in default_cfg: 40 | new_config['interpolation'] = default_cfg['interpolation'] 41 | 42 | # resolve dataset + model mean for normalization 43 | new_config['mean'] = IMAGENET_DEFAULT_MEAN 44 | if 'mean' in args and args['mean'] is not None: 45 | mean = tuple(args['mean']) 46 | if len(mean) == 1: 47 | mean = tuple(list(mean) * in_chans) 48 | else: 49 | assert len(mean) == in_chans 50 | new_config['mean'] = mean 51 | elif 'mean' in default_cfg: 52 | new_config['mean'] = default_cfg['mean'] 53 | 54 | # resolve dataset + model std deviation for normalization 55 | new_config['std'] = IMAGENET_DEFAULT_STD 56 | if 'std' in args and args['std'] is not None: 57 | std = tuple(args['std']) 58 | if len(std) == 1: 59 | std = tuple(list(std) * in_chans) 60 | else: 61 | assert len(std) == in_chans 62 | new_config['std'] = std 63 | elif 'std' in default_cfg: 64 | new_config['std'] = default_cfg['std'] 65 | 66 | # resolve default crop percentage 67 | new_config['crop_pct'] = DEFAULT_CROP_PCT 68 | if 'crop_pct' in args and args['crop_pct'] is not None: 69 | new_config['crop_pct'] = args['crop_pct'] 70 | elif 'crop_pct' in default_cfg: 71 | new_config['crop_pct'] = default_cfg['crop_pct'] 72 | 73 | if verbose: 74 | _logger.info('Data processing configuration for current model + dataset:') 75 | for n, v in new_config.items(): 76 | _logger.info('\t%s: %s' % (n, str(v))) 77 | 78 | return new_config 79 | -------------------------------------------------------------------------------- /model/backbones/timm/data/constants.py: -------------------------------------------------------------------------------- 1 | DEFAULT_CROP_PCT = 0.875 2 | IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) 3 | IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) 4 | IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) 5 | IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) 6 | IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) 7 | IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) 8 | -------------------------------------------------------------------------------- /model/backbones/timm/data/dataset_factory.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .dataset import IterableImageDataset, ImageDataset 4 | 5 | 6 | def _search_split(root, split): 7 | # look for sub-folder with name of split in root and use that if it exists 8 | split_name = split.split('[')[0] 9 | try_root = os.path.join(root, split_name) 10 | if os.path.exists(try_root): 11 | return try_root 12 | if split_name == 'validation': 13 | try_root = os.path.join(root, 'val') 14 | if os.path.exists(try_root): 15 | return try_root 16 | return root 17 | 18 | 19 | def create_dataset(name, root, split='validation', search_split=True, is_training=False, batch_size=None, **kwargs): 20 | name = name.lower() 21 | if name.startswith('tfds'): 22 | ds = IterableImageDataset( 23 | root, parser=name, split=split, is_training=is_training, batch_size=batch_size, **kwargs) 24 | else: 25 | # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future 26 | kwargs.pop('repeats', 0) # FIXME currently only Iterable dataset support the repeat multiplier 27 | if search_split and os.path.isdir(root): 28 | root = _search_split(root, split) 29 | ds = ImageDataset(root, parser=name, **kwargs) 30 | return ds 31 | -------------------------------------------------------------------------------- /model/backbones/timm/data/distributed_sampler.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | from torch.utils.data import Sampler 4 | import torch.distributed as dist 5 | 6 | 7 | class OrderedDistributedSampler(Sampler): 8 | """Sampler that restricts data loading to a subset of the dataset. 9 | It is especially useful in conjunction with 10 | :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each 11 | process can pass a DistributedSampler instance as a DataLoader sampler, 12 | and load a subset of the original dataset that is exclusive to it. 13 | .. note:: 14 | Dataset is assumed to be of constant size. 15 | Arguments: 16 | dataset: Dataset used for sampling. 17 | num_replicas (optional): Number of processes participating in 18 | distributed training. 19 | rank (optional): Rank of the current process within num_replicas. 20 | """ 21 | 22 | def __init__(self, dataset, num_replicas=None, rank=None): 23 | if num_replicas is None: 24 | if not dist.is_available(): 25 | raise RuntimeError("Requires distributed package to be available") 26 | num_replicas = dist.get_world_size() 27 | if rank is None: 28 | if not dist.is_available(): 29 | raise RuntimeError("Requires distributed package to be available") 30 | rank = dist.get_rank() 31 | self.dataset = dataset 32 | self.num_replicas = num_replicas 33 | self.rank = rank 34 | self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) 35 | self.total_size = self.num_samples * self.num_replicas 36 | 37 | def __iter__(self): 38 | indices = list(range(len(self.dataset))) 39 | 40 | # add extra samples to make it evenly divisible 41 | indices += indices[:(self.total_size - len(indices))] 42 | assert len(indices) == self.total_size 43 | 44 | # subsample 45 | indices = indices[self.rank:self.total_size:self.num_replicas] 46 | assert len(indices) == self.num_samples 47 | 48 | return iter(indices) 49 | 50 | def __len__(self): 51 | return self.num_samples 52 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/__init__.py: -------------------------------------------------------------------------------- 1 | from .parser_factory import create_parser 2 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/class_map.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def load_class_map(filename, root=''): 5 | class_map_path = filename 6 | if not os.path.exists(class_map_path): 7 | class_map_path = os.path.join(root, filename) 8 | assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename 9 | class_map_ext = os.path.splitext(filename)[-1].lower() 10 | if class_map_ext == '.txt': 11 | with open(class_map_path) as f: 12 | class_to_idx = {v.strip(): k for k, v in enumerate(f)} 13 | else: 14 | assert False, 'Unsupported class map extension' 15 | return class_to_idx 16 | 17 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/constants.py: -------------------------------------------------------------------------------- 1 | IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') 2 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/parser.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | 3 | 4 | class Parser: 5 | def __init__(self): 6 | pass 7 | 8 | @abstractmethod 9 | def _filename(self, index, basename=False, absolute=False): 10 | pass 11 | 12 | def filename(self, index, basename=False, absolute=False): 13 | return self._filename(index, basename=basename, absolute=absolute) 14 | 15 | def filenames(self, basename=False, absolute=False): 16 | return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] 17 | 18 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/parser_factory.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .parser_image_folder import ParserImageFolder 4 | from .parser_image_tar import ParserImageTar 5 | from .parser_image_in_tar import ParserImageInTar 6 | 7 | 8 | def create_parser(name, root, split='train', **kwargs): 9 | name = name.lower() 10 | name = name.split('/', 2) 11 | prefix = '' 12 | if len(name) > 1: 13 | prefix = name[0] 14 | name = name[-1] 15 | 16 | # FIXME improve the selection right now just tfds prefix or fallback path, will need options to 17 | # explicitly select other options shortly 18 | if prefix == 'tfds': 19 | from .parser_tfds import ParserTfds # defer tensorflow import 20 | parser = ParserTfds(root, name, split=split, shuffle=kwargs.pop('shuffle', False), **kwargs) 21 | else: 22 | assert os.path.exists(root) 23 | # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder 24 | # FIXME support split here, in parser? 25 | if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': 26 | parser = ParserImageInTar(root, **kwargs) 27 | else: 28 | parser = ParserImageFolder(root, **kwargs) 29 | return parser 30 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/parser_image_folder.py: -------------------------------------------------------------------------------- 1 | """ A dataset parser that reads images from folders 2 | 3 | Folders are scannerd recursively to find image files. Labels are based 4 | on the folder hierarchy, just leaf folders by default. 5 | 6 | Hacked together by / Copyright 2020 Ross Wightman 7 | """ 8 | import os 9 | 10 | from timm.utils.misc import natural_key 11 | 12 | from .parser import Parser 13 | from .class_map import load_class_map 14 | from .constants import IMG_EXTENSIONS 15 | 16 | 17 | def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): 18 | labels = [] 19 | filenames = [] 20 | for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): 21 | rel_path = os.path.relpath(root, folder) if (root != folder) else '' 22 | label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') 23 | for f in files: 24 | base, ext = os.path.splitext(f) 25 | if ext.lower() in types: 26 | filenames.append(os.path.join(root, f)) 27 | labels.append(label) 28 | if class_to_idx is None: 29 | # building class index 30 | unique_labels = set(labels) 31 | sorted_labels = list(sorted(unique_labels, key=natural_key)) 32 | class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} 33 | images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] 34 | if sort: 35 | images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) 36 | return images_and_targets, class_to_idx 37 | 38 | 39 | class ParserImageFolder(Parser): 40 | 41 | def __init__( 42 | self, 43 | root, 44 | class_map=''): 45 | super().__init__() 46 | 47 | self.root = root 48 | class_to_idx = None 49 | if class_map: 50 | class_to_idx = load_class_map(class_map, root) 51 | self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) 52 | if len(self.samples) == 0: 53 | raise RuntimeError( 54 | f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}') 55 | 56 | def __getitem__(self, index): 57 | path, target = self.samples[index] 58 | return open(path, 'rb'), target 59 | 60 | def __len__(self): 61 | return len(self.samples) 62 | 63 | def _filename(self, index, basename=False, absolute=False): 64 | filename = self.samples[index][0] 65 | if basename: 66 | filename = os.path.basename(filename) 67 | elif not absolute: 68 | filename = os.path.relpath(filename, self.root) 69 | return filename 70 | -------------------------------------------------------------------------------- /model/backbones/timm/data/parsers/parser_image_tar.py: -------------------------------------------------------------------------------- 1 | """ A dataset parser that reads single tarfile based datasets 2 | 3 | This parser can read datasets consisting if a single tarfile containing images. 4 | I am planning to deprecated it in favour of ParerImageInTar. 5 | 6 | Hacked together by / Copyright 2020 Ross Wightman 7 | """ 8 | import os 9 | import tarfile 10 | 11 | from .parser import Parser 12 | from .class_map import load_class_map 13 | from .constants import IMG_EXTENSIONS 14 | from timm.utils.misc import natural_key 15 | 16 | 17 | def extract_tarinfo(tarfile, class_to_idx=None, sort=True): 18 | files = [] 19 | labels = [] 20 | for ti in tarfile.getmembers(): 21 | if not ti.isfile(): 22 | continue 23 | dirname, basename = os.path.split(ti.path) 24 | label = os.path.basename(dirname) 25 | ext = os.path.splitext(basename)[1] 26 | if ext.lower() in IMG_EXTENSIONS: 27 | files.append(ti) 28 | labels.append(label) 29 | if class_to_idx is None: 30 | unique_labels = set(labels) 31 | sorted_labels = list(sorted(unique_labels, key=natural_key)) 32 | class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} 33 | tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] 34 | if sort: 35 | tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) 36 | return tarinfo_and_targets, class_to_idx 37 | 38 | 39 | class ParserImageTar(Parser): 40 | """ Single tarfile dataset where classes are mapped to folders within tar 41 | NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can 42 | operate on folders of tars or tars in tars. 43 | """ 44 | def __init__(self, root, class_map=''): 45 | super().__init__() 46 | 47 | class_to_idx = None 48 | if class_map: 49 | class_to_idx = load_class_map(class_map, root) 50 | assert os.path.isfile(root) 51 | self.root = root 52 | 53 | with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later 54 | self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) 55 | self.imgs = self.samples 56 | self.tarfile = None # lazy init in __getitem__ 57 | 58 | def __getitem__(self, index): 59 | if self.tarfile is None: 60 | self.tarfile = tarfile.open(self.root) 61 | tarinfo, target = self.samples[index] 62 | fileobj = self.tarfile.extractfile(tarinfo) 63 | return fileobj, target 64 | 65 | def __len__(self): 66 | return len(self.samples) 67 | 68 | def _filename(self, index, basename=False, absolute=False): 69 | filename = self.samples[index][0].name 70 | if basename: 71 | filename = os.path.basename(filename) 72 | return filename 73 | -------------------------------------------------------------------------------- /model/backbones/timm/data/real_labels.py: -------------------------------------------------------------------------------- 1 | """ Real labels evaluator for ImageNet 2 | Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159 3 | Based on Numpy example at https://github.com/google-research/reassessed-imagenet 4 | 5 | Hacked together by / Copyright 2020 Ross Wightman 6 | """ 7 | import os 8 | import json 9 | import numpy as np 10 | 11 | 12 | class RealLabelsImagenet: 13 | 14 | def __init__(self, filenames, real_json='real.json', topk=(1, 5)): 15 | with open(real_json) as real_labels: 16 | real_labels = json.load(real_labels) 17 | real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)} 18 | self.real_labels = real_labels 19 | self.filenames = filenames 20 | assert len(self.filenames) == len(self.real_labels) 21 | self.topk = topk 22 | self.is_correct = {k: [] for k in topk} 23 | self.sample_idx = 0 24 | 25 | def add_result(self, output): 26 | maxk = max(self.topk) 27 | _, pred_batch = output.topk(maxk, 1, True, True) 28 | pred_batch = pred_batch.cpu().numpy() 29 | for pred in pred_batch: 30 | filename = self.filenames[self.sample_idx] 31 | filename = os.path.basename(filename) 32 | if self.real_labels[filename]: 33 | for k in self.topk: 34 | self.is_correct[k].append( 35 | any([p in self.real_labels[filename] for p in pred[:k]])) 36 | self.sample_idx += 1 37 | 38 | def get_accuracy(self, k=None): 39 | if k is None: 40 | return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} 41 | else: 42 | return float(np.mean(self.is_correct[k])) * 100 43 | -------------------------------------------------------------------------------- /model/backbones/timm/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy 2 | from .jsd import JsdCrossEntropy 3 | from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel -------------------------------------------------------------------------------- /model/backbones/timm/loss/asymmetric_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class AsymmetricLossMultiLabel(nn.Module): 6 | def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): 7 | super(AsymmetricLossMultiLabel, self).__init__() 8 | 9 | self.gamma_neg = gamma_neg 10 | self.gamma_pos = gamma_pos 11 | self.clip = clip 12 | self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss 13 | self.eps = eps 14 | 15 | def forward(self, x, y): 16 | """" 17 | Parameters 18 | ---------- 19 | x: input logits 20 | y: targets (multi-label binarized vector) 21 | """ 22 | 23 | # Calculating Probabilities 24 | x_sigmoid = torch.sigmoid(x) 25 | xs_pos = x_sigmoid 26 | xs_neg = 1 - x_sigmoid 27 | 28 | # Asymmetric Clipping 29 | if self.clip is not None and self.clip > 0: 30 | xs_neg = (xs_neg + self.clip).clamp(max=1) 31 | 32 | # Basic CE calculation 33 | los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) 34 | los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) 35 | loss = los_pos + los_neg 36 | 37 | # Asymmetric Focusing 38 | if self.gamma_neg > 0 or self.gamma_pos > 0: 39 | if self.disable_torch_grad_focal_loss: 40 | torch._C.set_grad_enabled(False) 41 | pt0 = xs_pos * y 42 | pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p 43 | pt = pt0 + pt1 44 | one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) 45 | one_sided_w = torch.pow(1 - pt, one_sided_gamma) 46 | if self.disable_torch_grad_focal_loss: 47 | torch._C.set_grad_enabled(True) 48 | loss *= one_sided_w 49 | 50 | return -loss.sum() 51 | 52 | 53 | class AsymmetricLossSingleLabel(nn.Module): 54 | def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): 55 | super(AsymmetricLossSingleLabel, self).__init__() 56 | 57 | self.eps = eps 58 | self.logsoftmax = nn.LogSoftmax(dim=-1) 59 | self.targets_classes = [] # prevent gpu repeated memory allocation 60 | self.gamma_pos = gamma_pos 61 | self.gamma_neg = gamma_neg 62 | self.reduction = reduction 63 | 64 | def forward(self, inputs, target, reduction=None): 65 | """" 66 | Parameters 67 | ---------- 68 | x: input logits 69 | y: targets (1-hot vector) 70 | """ 71 | 72 | num_classes = inputs.size()[-1] 73 | log_preds = self.logsoftmax(inputs) 74 | self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) 75 | 76 | # ASL weights 77 | targets = self.targets_classes 78 | anti_targets = 1 - targets 79 | xs_pos = torch.exp(log_preds) 80 | xs_neg = 1 - xs_pos 81 | xs_pos = xs_pos * targets 82 | xs_neg = xs_neg * anti_targets 83 | asymmetric_w = torch.pow(1 - xs_pos - xs_neg, 84 | self.gamma_pos * targets + self.gamma_neg * anti_targets) 85 | log_preds = log_preds * asymmetric_w 86 | 87 | if self.eps > 0: # label smoothing 88 | self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes) 89 | 90 | # loss calculation 91 | loss = - self.targets_classes.mul(log_preds) 92 | 93 | loss = loss.sum(dim=-1) 94 | if self.reduction == 'mean': 95 | loss = loss.mean() 96 | 97 | return loss 98 | -------------------------------------------------------------------------------- /model/backbones/timm/loss/cross_entropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class LabelSmoothingCrossEntropy(nn.Module): 7 | """ 8 | NLL loss with label smoothing. 9 | """ 10 | def __init__(self, smoothing=0.1): 11 | """ 12 | Constructor for the LabelSmoothing module. 13 | :param smoothing: label smoothing factor 14 | """ 15 | super(LabelSmoothingCrossEntropy, self).__init__() 16 | assert smoothing < 1.0 17 | self.smoothing = smoothing 18 | self.confidence = 1. - smoothing 19 | 20 | def forward(self, x, target): 21 | logprobs = F.log_softmax(x, dim=-1) 22 | nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) 23 | nll_loss = nll_loss.squeeze(1) 24 | smooth_loss = -logprobs.mean(dim=-1) 25 | loss = self.confidence * nll_loss + self.smoothing * smooth_loss 26 | return loss.mean() 27 | 28 | 29 | class SoftTargetCrossEntropy(nn.Module): 30 | 31 | def __init__(self): 32 | super(SoftTargetCrossEntropy, self).__init__() 33 | 34 | def forward(self, x, target): 35 | loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) 36 | return loss.mean() 37 | -------------------------------------------------------------------------------- /model/backbones/timm/loss/jsd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .cross_entropy import LabelSmoothingCrossEntropy 6 | 7 | 8 | class JsdCrossEntropy(nn.Module): 9 | """ Jensen-Shannon Divergence + Cross-Entropy Loss 10 | 11 | Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py 12 | From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - 13 | https://arxiv.org/abs/1912.02781 14 | 15 | Hacked together by / Copyright 2020 Ross Wightman 16 | """ 17 | def __init__(self, num_splits=3, alpha=12, smoothing=0.1): 18 | super().__init__() 19 | self.num_splits = num_splits 20 | self.alpha = alpha 21 | if smoothing is not None and smoothing > 0: 22 | self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) 23 | else: 24 | self.cross_entropy_loss = torch.nn.CrossEntropyLoss() 25 | 26 | def __call__(self, output, target): 27 | split_size = output.shape[0] // self.num_splits 28 | assert split_size * self.num_splits == output.shape[0] 29 | logits_split = torch.split(output, split_size) 30 | 31 | # Cross-entropy is only computed on clean images 32 | loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) 33 | probs = [F.softmax(logits, dim=1) for logits in logits_split] 34 | 35 | # Clamp mixture distribution to avoid exploding KL divergence 36 | logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() 37 | loss += self.alpha * sum([F.kl_div( 38 | logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) 39 | return loss 40 | -------------------------------------------------------------------------------- /model/backbones/timm/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .byoanet import * 2 | from .byobnet import * 3 | from .cait import * 4 | from .coat import * 5 | from .convit import * 6 | from .cspnet import * 7 | from .densenet import * 8 | from .dla import * 9 | from .dpn import * 10 | from .efficientnet import * 11 | from .ghostnet import * 12 | from .gluon_resnet import * 13 | from .gluon_xception import * 14 | from .hardcorenas import * 15 | from .hrnet import * 16 | from .inception_resnet_v2 import * 17 | from .inception_v3 import * 18 | from .inception_v4 import * 19 | from .levit import * 20 | #from .levit import * 21 | from .mlp_mixer import * 22 | from .mobilenetv3 import * 23 | from .nasnet import * 24 | from .nfnet import * 25 | from .pit import * 26 | from .pnasnet import * 27 | from .regnet import * 28 | from .res2net import * 29 | from .resnest import * 30 | from .resnet import * 31 | from .resnetv2 import * 32 | from .rexnet import * 33 | from .selecsls import * 34 | from .senet import * 35 | from .sknet import * 36 | from .swin_transformer import * 37 | from .tnt import * 38 | from .tresnet import * 39 | from .vgg import * 40 | from .visformer import * 41 | from .vision_transformer import * 42 | from .vision_transformer_hybrid import * 43 | from .vovnet import * 44 | from .xception import * 45 | from .xception_aligned import * 46 | from .twins import * 47 | 48 | from .factory import create_model, split_model_name, safe_model_name 49 | from .helpers import load_checkpoint, resume_checkpoint, model_parameters 50 | from .layers import TestTimePoolHead, apply_test_time_pool 51 | from .layers import convert_splitbn_model 52 | from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit 53 | from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ 54 | has_model_default_key, is_model_default_key, get_model_default_value, is_model_pretrained 55 | -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/byoanet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/byoanet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/byobnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/byobnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/cait.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/cait.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/coat.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/coat.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/convit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/convit.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/cspnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/cspnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/densenet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/densenet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/dla.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/dla.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/dpn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/dpn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/efficientnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/efficientnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/efficientnet_blocks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/efficientnet_blocks.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/efficientnet_builder.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/efficientnet_builder.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/factory.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/factory.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/features.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/features.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/ghostnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/ghostnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/gluon_resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/gluon_resnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/gluon_xception.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/gluon_xception.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/hardcorenas.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/hardcorenas.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/helpers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/helpers.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/hrnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/hrnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/hub.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/hub.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/inception_resnet_v2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/inception_resnet_v2.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/inception_v3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/inception_v3.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/inception_v4.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/inception_v4.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/levit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/levit.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/mlp_mixer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/mlp_mixer.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/mobilenetv3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/mobilenetv3.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/nasnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/nasnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/nfnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/nfnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/pit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/pit.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/pnasnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/pnasnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/registry.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/registry.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/regnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/regnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/res2net.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/res2net.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/resnest.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/resnest.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/resnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/resnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/resnetv2.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/resnetv2.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/rexnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/rexnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/selecsls.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/selecsls.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/senet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/senet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/sknet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/sknet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/swin_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/swin_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/tnt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/tnt.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/tresnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/tresnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/twins.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/twins.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/vgg.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/vgg.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/visformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/visformer.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/vision_transformer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/vision_transformer.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/vision_transformer_hybrid.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/vision_transformer_hybrid.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/vovnet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/vovnet.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/xception.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/xception.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/__pycache__/xception_aligned.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/__pycache__/xception_aligned.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .activations import * 2 | from .adaptive_avgmax_pool import \ 3 | adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d 4 | from .blur_pool import BlurPool2d 5 | from .classifier import ClassifierHead, create_classifier 6 | from .cond_conv2d import CondConv2d, get_condconv_initializer 7 | from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ 8 | set_layer_config 9 | from .conv2d_same import Conv2dSame, conv2d_same 10 | from .conv_bn_act import ConvBnAct 11 | from .create_act import create_act_layer, get_act_layer, get_act_fn 12 | from .create_attn import get_attn, create_attn 13 | from .create_conv2d import create_conv2d 14 | from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act 15 | from .create_self_attn import get_self_attn, create_self_attn 16 | from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path 17 | from .eca import EcaModule, CecaModule 18 | from .evo_norm import EvoNormBatch2d, EvoNormSample2d 19 | from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible 20 | from .inplace_abn import InplaceAbn 21 | from .involution import Involution 22 | from .linear import Linear 23 | from .mixed_conv2d import MixedConv2d 24 | from .mlp import Mlp, GluMlp, GatedMlp 25 | from .norm import GroupNorm 26 | from .norm_act import BatchNormAct2d, GroupNormAct 27 | from .padding import get_padding, get_same_padding, pad_same 28 | from .patch_embed import PatchEmbed 29 | from .pool2d_same import AvgPool2dSame, create_pool2d 30 | from .se import SEModule 31 | from .selective_kernel import SelectiveKernelConv 32 | from .separable_conv import SeparableConv2d, SeparableConvBnAct 33 | from .space_to_depth import SpaceToDepthModule 34 | from .split_attn import SplitAttnConv2d 35 | from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model 36 | from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame 37 | from .test_time_pool import TestTimePoolHead, apply_test_time_pool 38 | from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ 39 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/activations.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/activations.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/activations_jit.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/activations_jit.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/activations_me.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/activations_me.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/adaptive_avgmax_pool.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/adaptive_avgmax_pool.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/blur_pool.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/blur_pool.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/bottleneck_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/bottleneck_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/cbam.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/cbam.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/classifier.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/classifier.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/cond_conv2d.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/cond_conv2d.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/config.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/config.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/conv2d_same.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/conv2d_same.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/conv_bn_act.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/conv_bn_act.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/create_act.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/create_act.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/create_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/create_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/create_conv2d.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/create_conv2d.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/create_norm_act.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/create_norm_act.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/create_self_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/create_self_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/drop.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/drop.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/eca.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/eca.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/evo_norm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/evo_norm.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/halo_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/halo_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/helpers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/helpers.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/inplace_abn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/inplace_abn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/involution.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/involution.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/lambda_layer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/lambda_layer.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/linear.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/linear.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/mixed_conv2d.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/mixed_conv2d.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/mlp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/mlp.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/norm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/norm.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/norm_act.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/norm_act.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/padding.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/padding.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/patch_embed.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/patch_embed.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/pool2d_same.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/pool2d_same.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/se.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/se.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/selective_kernel.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/selective_kernel.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/separable_conv.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/separable_conv.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/space_to_depth.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/space_to_depth.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/split_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/split_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/split_batchnorm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/split_batchnorm.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/std_conv.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/std_conv.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/swin_attn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/swin_attn.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/test_time_pool.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/test_time_pool.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/__pycache__/weight_init.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/model/backbones/timm/models/layers/__pycache__/weight_init.cpython-38.pyc -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/activations_jit.py: -------------------------------------------------------------------------------- 1 | """ Activations 2 | 3 | A collection of jit-scripted activations fn and modules with a common interface so that they can 4 | easily be swapped. All have an `inplace` arg even if not used. 5 | 6 | All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not 7 | currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted 8 | versions if they contain in-place ops. 9 | 10 | Hacked together by / Copyright 2020 Ross Wightman 11 | """ 12 | 13 | import torch 14 | from torch import nn as nn 15 | from torch.nn import functional as F 16 | 17 | 18 | @torch.jit.script 19 | def swish_jit(x, inplace: bool = False): 20 | """Swish - Described in: https://arxiv.org/abs/1710.05941 21 | """ 22 | return x.mul(x.sigmoid()) 23 | 24 | 25 | @torch.jit.script 26 | def mish_jit(x, _inplace: bool = False): 27 | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 28 | """ 29 | return x.mul(F.softplus(x).tanh()) 30 | 31 | 32 | class SwishJit(nn.Module): 33 | def __init__(self, inplace: bool = False): 34 | super(SwishJit, self).__init__() 35 | 36 | def forward(self, x): 37 | return swish_jit(x) 38 | 39 | 40 | class MishJit(nn.Module): 41 | def __init__(self, inplace: bool = False): 42 | super(MishJit, self).__init__() 43 | 44 | def forward(self, x): 45 | return mish_jit(x) 46 | 47 | 48 | @torch.jit.script 49 | def hard_sigmoid_jit(x, inplace: bool = False): 50 | # return F.relu6(x + 3.) / 6. 51 | return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? 52 | 53 | 54 | class HardSigmoidJit(nn.Module): 55 | def __init__(self, inplace: bool = False): 56 | super(HardSigmoidJit, self).__init__() 57 | 58 | def forward(self, x): 59 | return hard_sigmoid_jit(x) 60 | 61 | 62 | @torch.jit.script 63 | def hard_swish_jit(x, inplace: bool = False): 64 | # return x * (F.relu6(x + 3.) / 6) 65 | return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? 66 | 67 | 68 | class HardSwishJit(nn.Module): 69 | def __init__(self, inplace: bool = False): 70 | super(HardSwishJit, self).__init__() 71 | 72 | def forward(self, x): 73 | return hard_swish_jit(x) 74 | 75 | 76 | @torch.jit.script 77 | def hard_mish_jit(x, inplace: bool = False): 78 | """ Hard Mish 79 | Experimental, based on notes by Mish author Diganta Misra at 80 | https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md 81 | """ 82 | return 0.5 * x * (x + 2).clamp(min=0, max=2) 83 | 84 | 85 | class HardMishJit(nn.Module): 86 | def __init__(self, inplace: bool = False): 87 | super(HardMishJit, self).__init__() 88 | 89 | def forward(self, x): 90 | return hard_mish_jit(x) 91 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/blur_pool.py: -------------------------------------------------------------------------------- 1 | """ 2 | BlurPool layer inspired by 3 | - Kornia's Max_BlurPool2d 4 | - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` 5 | 6 | Hacked together by Chris Ha and Ross Wightman 7 | """ 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | import numpy as np 13 | from .padding import get_padding 14 | 15 | 16 | class BlurPool2d(nn.Module): 17 | r"""Creates a module that computes blurs and downsample a given feature map. 18 | See :cite:`zhang2019shiftinvar` for more details. 19 | Corresponds to the Downsample class, which does blurring and subsampling 20 | 21 | Args: 22 | channels = Number of input channels 23 | filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. 24 | stride (int): downsampling filter stride 25 | 26 | Returns: 27 | torch.Tensor: the transformed tensor. 28 | """ 29 | def __init__(self, channels, filt_size=3, stride=2) -> None: 30 | super(BlurPool2d, self).__init__() 31 | assert filt_size > 1 32 | self.channels = channels 33 | self.filt_size = filt_size 34 | self.stride = stride 35 | self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 36 | coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) 37 | blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) 38 | self.register_buffer('filt', blur_filter, persistent=False) 39 | 40 | def forward(self, x: torch.Tensor) -> torch.Tensor: 41 | x = F.pad(x, self.padding, 'reflect') 42 | return F.conv2d(x, self.filt, stride=self.stride, groups=x.shape[1]) 43 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/classifier.py: -------------------------------------------------------------------------------- 1 | """ Classifier head and layer factory 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from torch import nn as nn 6 | from torch.nn import functional as F 7 | 8 | from .adaptive_avgmax_pool import SelectAdaptivePool2d 9 | from .linear import Linear 10 | 11 | 12 | def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): 13 | flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling 14 | if not pool_type: 15 | assert num_classes == 0 or use_conv,\ 16 | 'Pooling can only be disabled if classifier is also removed or conv classifier is used' 17 | flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) 18 | global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) 19 | num_pooled_features = num_features * global_pool.feat_mult() 20 | return global_pool, num_pooled_features 21 | 22 | 23 | def _create_fc(num_features, num_classes, pool_type='avg', use_conv=False): 24 | if num_classes <= 0: 25 | fc = nn.Identity() # pass-through (no classifier) 26 | elif use_conv: 27 | fc = nn.Conv2d(num_features, num_classes, 1, bias=True) 28 | else: 29 | # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue 30 | fc = Linear(num_features, num_classes, bias=True) 31 | return fc 32 | 33 | 34 | def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): 35 | global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) 36 | fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) 37 | return global_pool, fc 38 | 39 | 40 | class ClassifierHead(nn.Module): 41 | """Classifier head w/ configurable global pooling and dropout.""" 42 | 43 | def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): 44 | super(ClassifierHead, self).__init__() 45 | self.drop_rate = drop_rate 46 | self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) 47 | self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) 48 | self.flatten_after_fc = use_conv and pool_type 49 | 50 | def forward(self, x): 51 | x = self.global_pool(x) 52 | if self.drop_rate: 53 | x = F.dropout(x, p=float(self.drop_rate), training=self.training) 54 | x = self.fc(x) 55 | return x 56 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/config.py: -------------------------------------------------------------------------------- 1 | """ Model / Layer Config singleton state 2 | """ 3 | from typing import Any, Optional 4 | 5 | __all__ = [ 6 | 'is_exportable', 'is_scriptable', 'is_no_jit', 7 | 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' 8 | ] 9 | 10 | # Set to True if prefer to have layers with no jit optimization (includes activations) 11 | _NO_JIT = False 12 | 13 | # Set to True if prefer to have activation layers with no jit optimization 14 | # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying 15 | # the jit flags so far are activations. This will change as more layers are updated and/or added. 16 | _NO_ACTIVATION_JIT = False 17 | 18 | # Set to True if exporting a model with Same padding via ONNX 19 | _EXPORTABLE = False 20 | 21 | # Set to True if wanting to use torch.jit.script on a model 22 | _SCRIPTABLE = False 23 | 24 | 25 | def is_no_jit(): 26 | return _NO_JIT 27 | 28 | 29 | class set_no_jit: 30 | def __init__(self, mode: bool) -> None: 31 | global _NO_JIT 32 | self.prev = _NO_JIT 33 | _NO_JIT = mode 34 | 35 | def __enter__(self) -> None: 36 | pass 37 | 38 | def __exit__(self, *args: Any) -> bool: 39 | global _NO_JIT 40 | _NO_JIT = self.prev 41 | return False 42 | 43 | 44 | def is_exportable(): 45 | return _EXPORTABLE 46 | 47 | 48 | class set_exportable: 49 | def __init__(self, mode: bool) -> None: 50 | global _EXPORTABLE 51 | self.prev = _EXPORTABLE 52 | _EXPORTABLE = mode 53 | 54 | def __enter__(self) -> None: 55 | pass 56 | 57 | def __exit__(self, *args: Any) -> bool: 58 | global _EXPORTABLE 59 | _EXPORTABLE = self.prev 60 | return False 61 | 62 | 63 | def is_scriptable(): 64 | return _SCRIPTABLE 65 | 66 | 67 | class set_scriptable: 68 | def __init__(self, mode: bool) -> None: 69 | global _SCRIPTABLE 70 | self.prev = _SCRIPTABLE 71 | _SCRIPTABLE = mode 72 | 73 | def __enter__(self) -> None: 74 | pass 75 | 76 | def __exit__(self, *args: Any) -> bool: 77 | global _SCRIPTABLE 78 | _SCRIPTABLE = self.prev 79 | return False 80 | 81 | 82 | class set_layer_config: 83 | """ Layer config context manager that allows setting all layer config flags at once. 84 | If a flag arg is None, it will not change the current value. 85 | """ 86 | def __init__( 87 | self, 88 | scriptable: Optional[bool] = None, 89 | exportable: Optional[bool] = None, 90 | no_jit: Optional[bool] = None, 91 | no_activation_jit: Optional[bool] = None): 92 | global _SCRIPTABLE 93 | global _EXPORTABLE 94 | global _NO_JIT 95 | global _NO_ACTIVATION_JIT 96 | self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT 97 | if scriptable is not None: 98 | _SCRIPTABLE = scriptable 99 | if exportable is not None: 100 | _EXPORTABLE = exportable 101 | if no_jit is not None: 102 | _NO_JIT = no_jit 103 | if no_activation_jit is not None: 104 | _NO_ACTIVATION_JIT = no_activation_jit 105 | 106 | def __enter__(self) -> None: 107 | pass 108 | 109 | def __exit__(self, *args: Any) -> bool: 110 | global _SCRIPTABLE 111 | global _EXPORTABLE 112 | global _NO_JIT 113 | global _NO_ACTIVATION_JIT 114 | _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev 115 | return False 116 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/conv2d_same.py: -------------------------------------------------------------------------------- 1 | """ Conv2d w/ Same Padding 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | from typing import Tuple, Optional 9 | 10 | from .padding import pad_same, get_padding_value 11 | 12 | 13 | def conv2d_same( 14 | x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), 15 | padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): 16 | x = pad_same(x, weight.shape[-2:], stride, dilation) 17 | return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) 18 | 19 | 20 | class Conv2dSame(nn.Conv2d): 21 | """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions 22 | """ 23 | 24 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, 25 | padding=0, dilation=1, groups=1, bias=True): 26 | super(Conv2dSame, self).__init__( 27 | in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) 28 | 29 | def forward(self, x): 30 | return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) 31 | 32 | 33 | def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): 34 | padding = kwargs.pop('padding', '') 35 | kwargs.setdefault('bias', False) 36 | padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) 37 | if is_dynamic: 38 | return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) 39 | else: 40 | return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) 41 | 42 | 43 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/conv_bn_act.py: -------------------------------------------------------------------------------- 1 | """ Conv2d + BN + Act 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from torch import nn as nn 6 | 7 | from .create_conv2d import create_conv2d 8 | from .create_norm_act import convert_norm_act 9 | 10 | 11 | class ConvBnAct(nn.Module): 12 | def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, 13 | bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, 14 | drop_block=None): 15 | super(ConvBnAct, self).__init__() 16 | use_aa = aa_layer is not None 17 | 18 | self.conv = create_conv2d( 19 | in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, 20 | padding=padding, dilation=dilation, groups=groups, bias=bias) 21 | 22 | # NOTE for backwards compatibility with models that use separate norm and act layer definitions 23 | norm_act_layer = convert_norm_act(norm_layer, act_layer) 24 | self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) 25 | self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None 26 | 27 | @property 28 | def in_channels(self): 29 | return self.conv.in_channels 30 | 31 | @property 32 | def out_channels(self): 33 | return self.conv.out_channels 34 | 35 | def forward(self, x): 36 | x = self.conv(x) 37 | x = self.bn(x) 38 | if self.aa is not None: 39 | x = self.aa(x) 40 | return x 41 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/create_attn.py: -------------------------------------------------------------------------------- 1 | """ Select AttentionFactory Method 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | from .se import SEModule, EffectiveSEModule 7 | from .eca import EcaModule, CecaModule 8 | from .cbam import CbamModule, LightCbamModule 9 | 10 | 11 | def get_attn(attn_type): 12 | if isinstance(attn_type, torch.nn.Module): 13 | return attn_type 14 | module_cls = None 15 | if attn_type is not None: 16 | if isinstance(attn_type, str): 17 | attn_type = attn_type.lower() 18 | if attn_type == 'se': 19 | module_cls = SEModule 20 | elif attn_type == 'ese': 21 | module_cls = EffectiveSEModule 22 | elif attn_type == 'eca': 23 | module_cls = EcaModule 24 | elif attn_type == 'ceca': 25 | module_cls = CecaModule 26 | elif attn_type == 'cbam': 27 | module_cls = CbamModule 28 | elif attn_type == 'lcbam': 29 | module_cls = LightCbamModule 30 | else: 31 | assert False, "Invalid attn module (%s)" % attn_type 32 | elif isinstance(attn_type, bool): 33 | if attn_type: 34 | module_cls = SEModule 35 | else: 36 | module_cls = attn_type 37 | return module_cls 38 | 39 | 40 | def create_attn(attn_type, channels, **kwargs): 41 | module_cls = get_attn(attn_type) 42 | if module_cls is not None: 43 | # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels 44 | return module_cls(channels, **kwargs) 45 | return None 46 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/create_conv2d.py: -------------------------------------------------------------------------------- 1 | """ Create Conv2d Factory Method 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | 6 | from .mixed_conv2d import MixedConv2d 7 | from .cond_conv2d import CondConv2d 8 | from .conv2d_same import create_conv2d_pad 9 | 10 | 11 | def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): 12 | """ Select a 2d convolution implementation based on arguments 13 | Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. 14 | 15 | Used extensively by EfficientNet, MobileNetv3 and related networks. 16 | """ 17 | if isinstance(kernel_size, list): 18 | assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently 19 | assert 'groups' not in kwargs # MixedConv groups are defined by kernel list 20 | # We're going to use only lists for defining the MixedConv2d kernel groups, 21 | # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. 22 | m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) 23 | else: 24 | depthwise = kwargs.pop('depthwise', False) 25 | # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 26 | groups = in_channels if depthwise else kwargs.pop('groups', 1) 27 | if 'num_experts' in kwargs and kwargs['num_experts'] > 0: 28 | m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) 29 | else: 30 | m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) 31 | return m 32 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/create_self_attn.py: -------------------------------------------------------------------------------- 1 | from .bottleneck_attn import BottleneckAttn 2 | from .halo_attn import HaloAttn 3 | from .involution import Involution 4 | from .lambda_layer import LambdaLayer 5 | from .swin_attn import WindowAttention 6 | 7 | 8 | def get_self_attn(attn_type): 9 | if attn_type == 'bottleneck': 10 | return BottleneckAttn 11 | elif attn_type == 'halo': 12 | return HaloAttn 13 | elif attn_type == 'lambda': 14 | return LambdaLayer 15 | elif attn_type == 'swin': 16 | return WindowAttention 17 | elif attn_type == 'involution': 18 | return Involution 19 | else: 20 | assert False, f"Unknown attn type ({attn_type})" 21 | 22 | 23 | def create_self_attn(attn_type, dim, stride=1, **kwargs): 24 | attn_fn = get_self_attn(attn_type) 25 | return attn_fn(dim, stride=stride, **kwargs) 26 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/evo_norm.py: -------------------------------------------------------------------------------- 1 | """EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch 2 | 3 | An attempt at getting decent performing EvoNorms running in PyTorch. 4 | While currently faster than other impl, still quite a ways off the built-in BN 5 | in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed). 6 | 7 | Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. 8 | 9 | Hacked together by / Copyright 2020 Ross Wightman 10 | """ 11 | 12 | import torch 13 | import torch.nn as nn 14 | 15 | 16 | class EvoNormBatch2d(nn.Module): 17 | def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None): 18 | super(EvoNormBatch2d, self).__init__() 19 | self.apply_act = apply_act # apply activation (non-linearity) 20 | self.momentum = momentum 21 | self.eps = eps 22 | param_shape = (1, num_features, 1, 1) 23 | self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) 24 | self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) 25 | if apply_act: 26 | self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) 27 | self.register_buffer('running_var', torch.ones(1, num_features, 1, 1)) 28 | self.reset_parameters() 29 | 30 | def reset_parameters(self): 31 | nn.init.ones_(self.weight) 32 | nn.init.zeros_(self.bias) 33 | if self.apply_act: 34 | nn.init.ones_(self.v) 35 | 36 | def forward(self, x): 37 | assert x.dim() == 4, 'expected 4D input' 38 | x_type = x.dtype 39 | if self.training: 40 | var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) 41 | n = x.numel() / x.shape[1] 42 | self.running_var.copy_( 43 | var.detach() * self.momentum * (n / (n - 1)) + self.running_var * (1 - self.momentum)) 44 | else: 45 | var = self.running_var 46 | 47 | if self.apply_act: 48 | v = self.v.to(dtype=x_type) 49 | d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) 50 | d = d.max((var + self.eps).sqrt().to(dtype=x_type)) 51 | x = x / d 52 | return x * self.weight + self.bias 53 | 54 | 55 | class EvoNormSample2d(nn.Module): 56 | def __init__(self, num_features, apply_act=True, groups=8, eps=1e-5, drop_block=None): 57 | super(EvoNormSample2d, self).__init__() 58 | self.apply_act = apply_act # apply activation (non-linearity) 59 | self.groups = groups 60 | self.eps = eps 61 | param_shape = (1, num_features, 1, 1) 62 | self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) 63 | self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) 64 | if apply_act: 65 | self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) 66 | self.reset_parameters() 67 | 68 | def reset_parameters(self): 69 | nn.init.ones_(self.weight) 70 | nn.init.zeros_(self.bias) 71 | if self.apply_act: 72 | nn.init.ones_(self.v) 73 | 74 | def forward(self, x): 75 | assert x.dim() == 4, 'expected 4D input' 76 | B, C, H, W = x.shape 77 | assert C % self.groups == 0 78 | if self.apply_act: 79 | n = x * (x * self.v).sigmoid() 80 | x = x.reshape(B, self.groups, -1) 81 | x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt() 82 | x = x.reshape(B, C, H, W) 83 | return x * self.weight + self.bias 84 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/helpers.py: -------------------------------------------------------------------------------- 1 | """ Layer/Module Helpers 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from itertools import repeat 6 | import collections.abc 7 | 8 | 9 | # From PyTorch internals 10 | def _ntuple(n): 11 | def parse(x): 12 | if isinstance(x, collections.abc.Iterable): 13 | return x 14 | return tuple(repeat(x, n)) 15 | return parse 16 | 17 | 18 | to_1tuple = _ntuple(1) 19 | to_2tuple = _ntuple(2) 20 | to_3tuple = _ntuple(3) 21 | to_4tuple = _ntuple(4) 22 | to_ntuple = _ntuple 23 | 24 | 25 | def make_divisible(v, divisor=8, min_value=None, round_limit=.9): 26 | min_value = min_value or divisor 27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) 28 | # Make sure that round down does not go down by more than 10%. 29 | if new_v < round_limit * v: 30 | new_v += divisor 31 | return new_v -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/inplace_abn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn as nn 3 | 4 | try: 5 | from inplace_abn.functions import inplace_abn, inplace_abn_sync 6 | has_iabn = True 7 | except ImportError: 8 | has_iabn = False 9 | 10 | def inplace_abn(x, weight, bias, running_mean, running_var, 11 | training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): 12 | raise ImportError( 13 | "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") 14 | 15 | def inplace_abn_sync(**kwargs): 16 | inplace_abn(**kwargs) 17 | 18 | 19 | class InplaceAbn(nn.Module): 20 | """Activated Batch Normalization 21 | 22 | This gathers a BatchNorm and an activation function in a single module 23 | 24 | Parameters 25 | ---------- 26 | num_features : int 27 | Number of feature channels in the input and output. 28 | eps : float 29 | Small constant to prevent numerical issues. 30 | momentum : float 31 | Momentum factor applied to compute running statistics. 32 | affine : bool 33 | If `True` apply learned scale and shift transformation after normalization. 34 | act_layer : str or nn.Module type 35 | Name or type of the activation functions, one of: `leaky_relu`, `elu` 36 | act_param : float 37 | Negative slope for the `leaky_relu` activation. 38 | """ 39 | 40 | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, 41 | act_layer="leaky_relu", act_param=0.01, drop_block=None): 42 | super(InplaceAbn, self).__init__() 43 | self.num_features = num_features 44 | self.affine = affine 45 | self.eps = eps 46 | self.momentum = momentum 47 | if apply_act: 48 | if isinstance(act_layer, str): 49 | assert act_layer in ('leaky_relu', 'elu', 'identity', '') 50 | self.act_name = act_layer if act_layer else 'identity' 51 | else: 52 | # convert act layer passed as type to string 53 | if act_layer == nn.ELU: 54 | self.act_name = 'elu' 55 | elif act_layer == nn.LeakyReLU: 56 | self.act_name = 'leaky_relu' 57 | elif act_layer == nn.Identity: 58 | self.act_name = 'identity' 59 | else: 60 | assert False, f'Invalid act layer {act_layer.__name__} for IABN' 61 | else: 62 | self.act_name = 'identity' 63 | self.act_param = act_param 64 | if self.affine: 65 | self.weight = nn.Parameter(torch.ones(num_features)) 66 | self.bias = nn.Parameter(torch.zeros(num_features)) 67 | else: 68 | self.register_parameter('weight', None) 69 | self.register_parameter('bias', None) 70 | self.register_buffer('running_mean', torch.zeros(num_features)) 71 | self.register_buffer('running_var', torch.ones(num_features)) 72 | self.reset_parameters() 73 | 74 | def reset_parameters(self): 75 | nn.init.constant_(self.running_mean, 0) 76 | nn.init.constant_(self.running_var, 1) 77 | if self.affine: 78 | nn.init.constant_(self.weight, 1) 79 | nn.init.constant_(self.bias, 0) 80 | 81 | def forward(self, x): 82 | output = inplace_abn( 83 | x, self.weight, self.bias, self.running_mean, self.running_var, 84 | self.training, self.momentum, self.eps, self.act_name, self.act_param) 85 | if isinstance(output, tuple): 86 | output = output[0] 87 | return output 88 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/involution.py: -------------------------------------------------------------------------------- 1 | """ PyTorch Involution Layer 2 | 3 | Official impl: https://github.com/d-li14/involution/blob/main/cls/mmcls/models/utils/involution_naive.py 4 | Paper: `Involution: Inverting the Inherence of Convolution for Visual Recognition` - https://arxiv.org/abs/2103.06255 5 | """ 6 | import torch.nn as nn 7 | from .conv_bn_act import ConvBnAct 8 | from .create_conv2d import create_conv2d 9 | 10 | 11 | class Involution(nn.Module): 12 | 13 | def __init__( 14 | self, 15 | channels, 16 | kernel_size=3, 17 | stride=1, 18 | group_size=16, 19 | reduction_ratio=4, 20 | norm_layer=nn.BatchNorm2d, 21 | act_layer=nn.ReLU, 22 | ): 23 | super(Involution, self).__init__() 24 | self.kernel_size = kernel_size 25 | self.stride = stride 26 | self.channels = channels 27 | self.group_size = group_size 28 | self.groups = self.channels // self.group_size 29 | self.conv1 = ConvBnAct( 30 | in_channels=channels, 31 | out_channels=channels // reduction_ratio, 32 | kernel_size=1, 33 | norm_layer=norm_layer, 34 | act_layer=act_layer) 35 | self.conv2 = self.conv = create_conv2d( 36 | in_channels=channels // reduction_ratio, 37 | out_channels=kernel_size**2 * self.groups, 38 | kernel_size=1, 39 | stride=1) 40 | self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity() 41 | self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride) 42 | 43 | def forward(self, x): 44 | weight = self.conv2(self.conv1(self.avgpool(x))) 45 | B, C, H, W = weight.shape 46 | KK = int(self.kernel_size ** 2) 47 | weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2) 48 | out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W) 49 | out = (weight * out).sum(dim=3).view(B, self.channels, H, W) 50 | return out 51 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/lambda_layer.py: -------------------------------------------------------------------------------- 1 | """ Lambda Layer 2 | 3 | Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` 4 | - https://arxiv.org/abs/2102.08602 5 | 6 | @misc{2102.08602, 7 | Author = {Irwan Bello}, 8 | Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, 9 | Year = {2021}, 10 | } 11 | 12 | Status: 13 | This impl is a WIP. Code snippets in the paper were used as reference but 14 | good chance some details are missing/wrong. 15 | 16 | I've only implemented local lambda conv based pos embeddings. 17 | 18 | For a PyTorch impl that includes other embedding options checkout 19 | https://github.com/lucidrains/lambda-networks 20 | 21 | Hacked together by / Copyright 2021 Ross Wightman 22 | """ 23 | import torch 24 | from torch import nn 25 | import torch.nn.functional as F 26 | 27 | from .weight_init import trunc_normal_ 28 | 29 | 30 | class LambdaLayer(nn.Module): 31 | """Lambda Layer w/ lambda conv position embedding 32 | 33 | Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` 34 | - https://arxiv.org/abs/2102.08602 35 | """ 36 | def __init__( 37 | self, 38 | dim, dim_out=None, stride=1, num_heads=4, dim_head=16, r=7, qkv_bias=False): 39 | super().__init__() 40 | self.dim = dim 41 | self.dim_out = dim_out or dim 42 | self.dim_k = dim_head # query depth 'k' 43 | self.num_heads = num_heads 44 | assert self.dim_out % num_heads == 0, ' should be divided by num_heads' 45 | self.dim_v = self.dim_out // num_heads # value depth 'v' 46 | self.r = r # relative position neighbourhood (lambda conv kernel size) 47 | 48 | self.qkv = nn.Conv2d( 49 | dim, 50 | num_heads * dim_head + dim_head + self.dim_v, 51 | kernel_size=1, bias=qkv_bias) 52 | self.norm_q = nn.BatchNorm2d(num_heads * dim_head) 53 | self.norm_v = nn.BatchNorm2d(self.dim_v) 54 | 55 | # NOTE currently only supporting the local lambda convolutions for positional 56 | self.conv_lambda = nn.Conv3d(1, dim_head, (r, r, 1), padding=(r // 2, r // 2, 0)) 57 | 58 | self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() 59 | 60 | def reset_parameters(self): 61 | trunc_normal_(self.qkv.weight, std=self.dim ** -0.5) 62 | trunc_normal_(self.conv_lambda.weight, std=self.dim_k ** -0.5) 63 | 64 | def forward(self, x): 65 | B, C, H, W = x.shape 66 | M = H * W 67 | 68 | qkv = self.qkv(x) 69 | q, k, v = torch.split(qkv, [ 70 | self.num_heads * self.dim_k, self.dim_k, self.dim_v], dim=1) 71 | q = self.norm_q(q).reshape(B, self.num_heads, self.dim_k, M).transpose(-1, -2) # B, num_heads, M, K 72 | v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V 73 | k = F.softmax(k.reshape(B, self.dim_k, M), dim=-1) # B, K, M 74 | 75 | content_lam = k @ v # B, K, V 76 | content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V 77 | 78 | position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K 79 | position_lam = position_lam.reshape(B, 1, self.dim_k, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V 80 | position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V 81 | 82 | out = (content_out + position_out).transpose(3, 1).reshape(B, C, H, W) # B, C (num_heads * V), H, W 83 | out = self.pool(out) 84 | return out 85 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/linear.py: -------------------------------------------------------------------------------- 1 | """ Linear layer (alternate definition) 2 | """ 3 | import torch 4 | import torch.nn.functional as F 5 | from torch import nn as nn 6 | 7 | 8 | class Linear(nn.Linear): 9 | r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` 10 | 11 | Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting 12 | weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. 13 | """ 14 | def forward(self, input: torch.Tensor) -> torch.Tensor: 15 | if torch.jit.is_scripting(): 16 | bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None 17 | return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) 18 | else: 19 | return F.linear(input, self.weight, self.bias) 20 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/median_pool.py: -------------------------------------------------------------------------------- 1 | """ Median Pool 2 | Hacked together by / Copyright 2020 Ross Wightman 3 | """ 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from .helpers import to_2tuple, to_4tuple 7 | 8 | 9 | class MedianPool2d(nn.Module): 10 | """ Median pool (usable as median filter when stride=1) module. 11 | 12 | Args: 13 | kernel_size: size of pooling kernel, int or 2-tuple 14 | stride: pool stride, int or 2-tuple 15 | padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad 16 | same: override padding and enforce same padding, boolean 17 | """ 18 | def __init__(self, kernel_size=3, stride=1, padding=0, same=False): 19 | super(MedianPool2d, self).__init__() 20 | self.k = to_2tuple(kernel_size) 21 | self.stride = to_2tuple(stride) 22 | self.padding = to_4tuple(padding) # convert to l, r, t, b 23 | self.same = same 24 | 25 | def _padding(self, x): 26 | if self.same: 27 | ih, iw = x.size()[2:] 28 | if ih % self.stride[0] == 0: 29 | ph = max(self.k[0] - self.stride[0], 0) 30 | else: 31 | ph = max(self.k[0] - (ih % self.stride[0]), 0) 32 | if iw % self.stride[1] == 0: 33 | pw = max(self.k[1] - self.stride[1], 0) 34 | else: 35 | pw = max(self.k[1] - (iw % self.stride[1]), 0) 36 | pl = pw // 2 37 | pr = pw - pl 38 | pt = ph // 2 39 | pb = ph - pt 40 | padding = (pl, pr, pt, pb) 41 | else: 42 | padding = self.padding 43 | return padding 44 | 45 | def forward(self, x): 46 | x = F.pad(x, self._padding(x), mode='reflect') 47 | x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) 48 | x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] 49 | return x 50 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/mixed_conv2d.py: -------------------------------------------------------------------------------- 1 | """ PyTorch Mixed Convolution 2 | 3 | Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) 4 | 5 | Hacked together by / Copyright 2020 Ross Wightman 6 | """ 7 | 8 | import torch 9 | from torch import nn as nn 10 | 11 | from .conv2d_same import create_conv2d_pad 12 | 13 | 14 | def _split_channels(num_chan, num_groups): 15 | split = [num_chan // num_groups for _ in range(num_groups)] 16 | split[0] += num_chan - sum(split) 17 | return split 18 | 19 | 20 | class MixedConv2d(nn.ModuleDict): 21 | """ Mixed Grouped Convolution 22 | 23 | Based on MDConv and GroupedConv in MixNet impl: 24 | https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py 25 | """ 26 | def __init__(self, in_channels, out_channels, kernel_size=3, 27 | stride=1, padding='', dilation=1, depthwise=False, **kwargs): 28 | super(MixedConv2d, self).__init__() 29 | 30 | kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] 31 | num_groups = len(kernel_size) 32 | in_splits = _split_channels(in_channels, num_groups) 33 | out_splits = _split_channels(out_channels, num_groups) 34 | self.in_channels = sum(in_splits) 35 | self.out_channels = sum(out_splits) 36 | for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): 37 | conv_groups = in_ch if depthwise else 1 38 | # use add_module to keep key space clean 39 | self.add_module( 40 | str(idx), 41 | create_conv2d_pad( 42 | in_ch, out_ch, k, stride=stride, 43 | padding=padding, dilation=dilation, groups=conv_groups, **kwargs) 44 | ) 45 | self.splits = in_splits 46 | 47 | def forward(self, x): 48 | x_split = torch.split(x, self.splits, 1) 49 | x_out = [c(x_split[i]) for i, c in enumerate(self.values())] 50 | x = torch.cat(x_out, 1) 51 | return x 52 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/mlp.py: -------------------------------------------------------------------------------- 1 | """ MLP module w/ dropout and configurable activation layer 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from torch import nn as nn 6 | 7 | 8 | class Mlp(nn.Module): 9 | """ MLP as used in Vision Transformer, MLP-Mixer and related networks 10 | """ 11 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): 12 | super().__init__() 13 | out_features = out_features or in_features 14 | hidden_features = hidden_features or in_features 15 | self.fc1 = nn.Linear(in_features, hidden_features) 16 | self.act = act_layer() 17 | self.fc2 = nn.Linear(hidden_features, out_features) 18 | self.drop = nn.Dropout(drop) 19 | 20 | def forward(self, x): 21 | x = self.fc1(x) 22 | x = self.act(x) 23 | x = self.drop(x) 24 | x = self.fc2(x) 25 | x = self.drop(x) 26 | return x 27 | 28 | 29 | class GluMlp(nn.Module): 30 | """ MLP w/ GLU style gating 31 | See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 32 | """ 33 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): 34 | super().__init__() 35 | out_features = out_features or in_features 36 | hidden_features = hidden_features or in_features 37 | assert hidden_features % 2 == 0 38 | self.fc1 = nn.Linear(in_features, hidden_features) 39 | self.act = act_layer() 40 | self.fc2 = nn.Linear(hidden_features // 2, out_features) 41 | self.drop = nn.Dropout(drop) 42 | 43 | def forward(self, x): 44 | x = self.fc1(x) 45 | x, gates = x.chunk(2, dim=-1) 46 | x = x * self.act(gates) 47 | x = self.drop(x) 48 | x = self.fc2(x) 49 | x = self.drop(x) 50 | return x 51 | 52 | 53 | class GatedMlp(nn.Module): 54 | """ MLP as used in gMLP 55 | """ 56 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, 57 | gate_layer=None, drop=0.): 58 | super().__init__() 59 | out_features = out_features or in_features 60 | hidden_features = hidden_features or in_features 61 | self.fc1 = nn.Linear(in_features, hidden_features) 62 | self.act = act_layer() 63 | if gate_layer is not None: 64 | assert hidden_features % 2 == 0 65 | self.gate = gate_layer(hidden_features) 66 | hidden_features = hidden_features // 2 # FIXME base reduction on gate property? 67 | else: 68 | self.gate = nn.Identity() 69 | self.fc2 = nn.Linear(hidden_features, out_features) 70 | self.drop = nn.Dropout(drop) 71 | 72 | def forward(self, x): 73 | x = self.fc1(x) 74 | x = self.act(x) 75 | x = self.drop(x) 76 | x = self.gate(x) 77 | x = self.fc2(x) 78 | x = self.drop(x) 79 | return x 80 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/norm.py: -------------------------------------------------------------------------------- 1 | """ Normalization layers and wrappers 2 | """ 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class GroupNorm(nn.GroupNorm): 9 | def __init__(self, num_channels, num_groups, eps=1e-5, affine=True): 10 | # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN 11 | super().__init__(num_groups, num_channels, eps=eps, affine=affine) 12 | 13 | def forward(self, x): 14 | return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) 15 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/padding.py: -------------------------------------------------------------------------------- 1 | """ Padding Helpers 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import math 6 | from typing import List, Tuple 7 | 8 | import torch.nn.functional as F 9 | 10 | 11 | # Calculate symmetric padding for a convolution 12 | def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: 13 | padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 14 | return padding 15 | 16 | 17 | # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution 18 | def get_same_padding(x: int, k: int, s: int, d: int): 19 | return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) 20 | 21 | 22 | # Can SAME padding for given args be done statically? 23 | def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): 24 | return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 25 | 26 | 27 | # Dynamically pad input x with 'SAME' padding for conv with specified args 28 | def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): 29 | ih, iw = x.size()[-2:] 30 | pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) 31 | if pad_h > 0 or pad_w > 0: 32 | x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) 33 | return x 34 | 35 | 36 | def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: 37 | dynamic = False 38 | if isinstance(padding, str): 39 | # for any string padding, the padding will be calculated for you, one of three ways 40 | padding = padding.lower() 41 | if padding == 'same': 42 | # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact 43 | if is_static_pad(kernel_size, **kwargs): 44 | # static case, no extra overhead 45 | padding = get_padding(kernel_size, **kwargs) 46 | else: 47 | # dynamic 'SAME' padding, has runtime/GPU memory overhead 48 | padding = 0 49 | dynamic = True 50 | elif padding == 'valid': 51 | # 'VALID' padding, same as padding=0 52 | padding = 0 53 | else: 54 | # Default to PyTorch style 'same'-ish symmetric padding 55 | padding = get_padding(kernel_size, **kwargs) 56 | return padding, dynamic 57 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/patch_embed.py: -------------------------------------------------------------------------------- 1 | """ Image to Patch Embedding using Conv2d 2 | 3 | A convolution based approach to patchifying a 2D image w/ embedding projection. 4 | 5 | Based on the impl in https://github.com/google-research/vision_transformer 6 | 7 | Hacked together by / Copyright 2020 Ross Wightman 8 | """ 9 | 10 | from torch import nn as nn 11 | 12 | from .helpers import to_2tuple 13 | 14 | 15 | class PatchEmbed(nn.Module): 16 | """ 2D Image to Patch Embedding 17 | """ 18 | def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): 19 | super().__init__() 20 | img_size = to_2tuple(img_size) 21 | patch_size = to_2tuple(patch_size) 22 | self.img_size = img_size 23 | self.patch_size = patch_size 24 | self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) 25 | self.num_patches = self.grid_size[0] * self.grid_size[1] 26 | self.flatten = flatten 27 | 28 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) 29 | self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() 30 | 31 | def forward(self, x): 32 | B, C, H, W = x.shape 33 | assert H == self.img_size[0] and W == self.img_size[1], \ 34 | f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." 35 | x = self.proj(x) 36 | if self.flatten: 37 | x = x.flatten(2).transpose(1, 2) # BCHW -> BNC 38 | x = self.norm(x) 39 | return x 40 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/pool2d_same.py: -------------------------------------------------------------------------------- 1 | """ AvgPool2d w/ Same Padding 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | from typing import List, Tuple, Optional 9 | 10 | from .helpers import to_2tuple 11 | from .padding import pad_same, get_padding_value 12 | 13 | 14 | def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), 15 | ceil_mode: bool = False, count_include_pad: bool = True): 16 | # FIXME how to deal with count_include_pad vs not for external padding? 17 | x = pad_same(x, kernel_size, stride) 18 | return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) 19 | 20 | 21 | class AvgPool2dSame(nn.AvgPool2d): 22 | """ Tensorflow like 'SAME' wrapper for 2D average pooling 23 | """ 24 | def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): 25 | kernel_size = to_2tuple(kernel_size) 26 | stride = to_2tuple(stride) 27 | super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) 28 | 29 | def forward(self, x): 30 | return avg_pool2d_same( 31 | x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) 32 | 33 | 34 | def max_pool2d_same( 35 | x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), 36 | dilation: List[int] = (1, 1), ceil_mode: bool = False): 37 | x = pad_same(x, kernel_size, stride, value=-float('inf')) 38 | return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) 39 | 40 | 41 | class MaxPool2dSame(nn.MaxPool2d): 42 | """ Tensorflow like 'SAME' wrapper for 2D max pooling 43 | """ 44 | def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False, count_include_pad=True): 45 | kernel_size = to_2tuple(kernel_size) 46 | stride = to_2tuple(stride) 47 | dilation = to_2tuple(dilation) 48 | super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode, count_include_pad) 49 | 50 | def forward(self, x): 51 | return max_pool2d_same(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode) 52 | 53 | 54 | def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): 55 | stride = stride or kernel_size 56 | padding = kwargs.pop('padding', '') 57 | padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) 58 | if is_dynamic: 59 | if pool_type == 'avg': 60 | return AvgPool2dSame(kernel_size, stride=stride, **kwargs) 61 | elif pool_type == 'max': 62 | return MaxPool2dSame(kernel_size, stride=stride, **kwargs) 63 | else: 64 | assert False, f'Unsupported pool type {pool_type}' 65 | else: 66 | if pool_type == 'avg': 67 | return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) 68 | elif pool_type == 'max': 69 | return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) 70 | else: 71 | assert False, f'Unsupported pool type {pool_type}' 72 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/se.py: -------------------------------------------------------------------------------- 1 | from torch import nn as nn 2 | import torch.nn.functional as F 3 | 4 | from .create_act import create_act_layer 5 | from .helpers import make_divisible 6 | 7 | 8 | class SEModule(nn.Module): 9 | """ SE Module as defined in original SE-Nets with a few additions 10 | Additions include: 11 | * min_channels can be specified to keep reduced channel count at a minimum (default: 8) 12 | * divisor can be specified to keep channels rounded to specified values (default: 1) 13 | * reduction channels can be specified directly by arg (if reduction_channels is set) 14 | * reduction channels can be specified by float ratio (if reduction_ratio is set) 15 | """ 16 | def __init__(self, channels, reduction=16, act_layer=nn.ReLU, gate_layer='sigmoid', 17 | reduction_ratio=None, reduction_channels=None, min_channels=8, divisor=1): 18 | super(SEModule, self).__init__() 19 | if reduction_channels is not None: 20 | reduction_channels = reduction_channels # direct specification highest priority, no rounding/min done 21 | elif reduction_ratio is not None: 22 | reduction_channels = make_divisible(channels * reduction_ratio, divisor, min_channels) 23 | else: 24 | reduction_channels = make_divisible(channels // reduction, divisor, min_channels) 25 | self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, bias=True) 26 | self.act = act_layer(inplace=True) 27 | self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, bias=True) 28 | self.gate = create_act_layer(gate_layer) 29 | 30 | def forward(self, x): 31 | x_se = x.mean((2, 3), keepdim=True) 32 | x_se = self.fc1(x_se) 33 | x_se = self.act(x_se) 34 | x_se = self.fc2(x_se) 35 | return x * self.gate(x_se) 36 | 37 | 38 | class EffectiveSEModule(nn.Module): 39 | """ 'Effective Squeeze-Excitation 40 | From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 41 | """ 42 | def __init__(self, channels, gate_layer='hard_sigmoid'): 43 | super(EffectiveSEModule, self).__init__() 44 | self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) 45 | self.gate = create_act_layer(gate_layer, inplace=True) 46 | 47 | def forward(self, x): 48 | x_se = x.mean((2, 3), keepdim=True) 49 | x_se = self.fc(x_se) 50 | return x * self.gate(x_se) 51 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/separable_conv.py: -------------------------------------------------------------------------------- 1 | """ Depthwise Separable Conv Modules 2 | 3 | Basic DWS convs. Other variations of DWS exist with batch norm or activations between the 4 | DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. 5 | 6 | Hacked together by / Copyright 2020 Ross Wightman 7 | """ 8 | from torch import nn as nn 9 | 10 | from .create_conv2d import create_conv2d 11 | from .create_norm_act import convert_norm_act 12 | 13 | 14 | class SeparableConvBnAct(nn.Module): 15 | """ Separable Conv w/ trailing Norm and Activation 16 | """ 17 | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, 18 | channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, 19 | apply_act=True, drop_block=None): 20 | super(SeparableConvBnAct, self).__init__() 21 | 22 | self.conv_dw = create_conv2d( 23 | in_channels, int(in_channels * channel_multiplier), kernel_size, 24 | stride=stride, dilation=dilation, padding=padding, depthwise=True) 25 | 26 | self.conv_pw = create_conv2d( 27 | int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) 28 | 29 | norm_act_layer = convert_norm_act(norm_layer, act_layer) 30 | self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) 31 | 32 | @property 33 | def in_channels(self): 34 | return self.conv_dw.in_channels 35 | 36 | @property 37 | def out_channels(self): 38 | return self.conv_pw.out_channels 39 | 40 | def forward(self, x): 41 | x = self.conv_dw(x) 42 | x = self.conv_pw(x) 43 | if self.bn is not None: 44 | x = self.bn(x) 45 | return x 46 | 47 | 48 | class SeparableConv2d(nn.Module): 49 | """ Separable Conv 50 | """ 51 | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, 52 | channel_multiplier=1.0, pw_kernel_size=1): 53 | super(SeparableConv2d, self).__init__() 54 | 55 | self.conv_dw = create_conv2d( 56 | in_channels, int(in_channels * channel_multiplier), kernel_size, 57 | stride=stride, dilation=dilation, padding=padding, depthwise=True) 58 | 59 | self.conv_pw = create_conv2d( 60 | int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) 61 | 62 | @property 63 | def in_channels(self): 64 | return self.conv_dw.in_channels 65 | 66 | @property 67 | def out_channels(self): 68 | return self.conv_pw.out_channels 69 | 70 | def forward(self, x): 71 | x = self.conv_dw(x) 72 | x = self.conv_pw(x) 73 | return x 74 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/space_to_depth.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SpaceToDepth(nn.Module): 6 | def __init__(self, block_size=4): 7 | super().__init__() 8 | assert block_size == 4 9 | self.bs = block_size 10 | 11 | def forward(self, x): 12 | N, C, H, W = x.size() 13 | x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) 14 | x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) 15 | x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) 16 | return x 17 | 18 | 19 | @torch.jit.script 20 | class SpaceToDepthJit(object): 21 | def __call__(self, x: torch.Tensor): 22 | # assuming hard-coded that block_size==4 for acceleration 23 | N, C, H, W = x.size() 24 | x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) 25 | x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) 26 | x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) 27 | return x 28 | 29 | 30 | class SpaceToDepthModule(nn.Module): 31 | def __init__(self, no_jit=False): 32 | super().__init__() 33 | if not no_jit: 34 | self.op = SpaceToDepthJit() 35 | else: 36 | self.op = SpaceToDepth() 37 | 38 | def forward(self, x): 39 | return self.op(x) 40 | 41 | 42 | class DepthToSpace(nn.Module): 43 | 44 | def __init__(self, block_size): 45 | super().__init__() 46 | self.bs = block_size 47 | 48 | def forward(self, x): 49 | N, C, H, W = x.size() 50 | x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) 51 | x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) 52 | x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) 53 | return x 54 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/split_attn.py: -------------------------------------------------------------------------------- 1 | """ Split Attention Conv2d (for ResNeSt Models) 2 | 3 | Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 4 | 5 | Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt 6 | 7 | Modified for torchscript compat, performance, and consistency with timm by Ross Wightman 8 | """ 9 | import torch 10 | import torch.nn.functional as F 11 | from torch import nn 12 | 13 | 14 | class RadixSoftmax(nn.Module): 15 | def __init__(self, radix, cardinality): 16 | super(RadixSoftmax, self).__init__() 17 | self.radix = radix 18 | self.cardinality = cardinality 19 | 20 | def forward(self, x): 21 | batch = x.size(0) 22 | if self.radix > 1: 23 | x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) 24 | x = F.softmax(x, dim=1) 25 | x = x.reshape(batch, -1) 26 | else: 27 | x = torch.sigmoid(x) 28 | return x 29 | 30 | 31 | class SplitAttnConv2d(nn.Module): 32 | """Split-Attention Conv2d 33 | """ 34 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, 35 | dilation=1, groups=1, bias=False, radix=2, reduction_factor=4, 36 | act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): 37 | super(SplitAttnConv2d, self).__init__() 38 | self.radix = radix 39 | self.drop_block = drop_block 40 | mid_chs = out_channels * radix 41 | attn_chs = max(in_channels * radix // reduction_factor, 32) 42 | 43 | self.conv = nn.Conv2d( 44 | in_channels, mid_chs, kernel_size, stride, padding, dilation, 45 | groups=groups * radix, bias=bias, **kwargs) 46 | self.bn0 = norm_layer(mid_chs) if norm_layer is not None else None 47 | self.act0 = act_layer(inplace=True) 48 | self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) 49 | self.bn1 = norm_layer(attn_chs) if norm_layer is not None else None 50 | self.act1 = act_layer(inplace=True) 51 | self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) 52 | self.rsoftmax = RadixSoftmax(radix, groups) 53 | 54 | @property 55 | def in_channels(self): 56 | return self.conv.in_channels 57 | 58 | @property 59 | def out_channels(self): 60 | return self.fc1.out_channels 61 | 62 | def forward(self, x): 63 | x = self.conv(x) 64 | if self.bn0 is not None: 65 | x = self.bn0(x) 66 | if self.drop_block is not None: 67 | x = self.drop_block(x) 68 | x = self.act0(x) 69 | 70 | B, RC, H, W = x.shape 71 | if self.radix > 1: 72 | x = x.reshape((B, self.radix, RC // self.radix, H, W)) 73 | x_gap = x.sum(dim=1) 74 | else: 75 | x_gap = x 76 | x_gap = F.adaptive_avg_pool2d(x_gap, 1) 77 | x_gap = self.fc1(x_gap) 78 | if self.bn1 is not None: 79 | x_gap = self.bn1(x_gap) 80 | x_gap = self.act1(x_gap) 81 | x_attn = self.fc2(x_gap) 82 | 83 | x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) 84 | if self.radix > 1: 85 | out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) 86 | else: 87 | out = x * x_attn 88 | return out.contiguous() 89 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/test_time_pool.py: -------------------------------------------------------------------------------- 1 | """ Test Time Pooling (Average-Max Pool) 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | 6 | import logging 7 | from torch import nn 8 | import torch.nn.functional as F 9 | 10 | from .adaptive_avgmax_pool import adaptive_avgmax_pool2d 11 | 12 | 13 | _logger = logging.getLogger(__name__) 14 | 15 | 16 | class TestTimePoolHead(nn.Module): 17 | def __init__(self, base, original_pool=7): 18 | super(TestTimePoolHead, self).__init__() 19 | self.base = base 20 | self.original_pool = original_pool 21 | base_fc = self.base.get_classifier() 22 | if isinstance(base_fc, nn.Conv2d): 23 | self.fc = base_fc 24 | else: 25 | self.fc = nn.Conv2d( 26 | self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) 27 | self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) 28 | self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) 29 | self.base.reset_classifier(0) # delete original fc layer 30 | 31 | def forward(self, x): 32 | x = self.base.forward_features(x) 33 | x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) 34 | x = self.fc(x) 35 | x = adaptive_avgmax_pool2d(x, 1) 36 | return x.view(x.size(0), -1) 37 | 38 | 39 | def apply_test_time_pool(model, config, use_test_size=True): 40 | test_time_pool = False 41 | if not hasattr(model, 'default_cfg') or not model.default_cfg: 42 | return model, False 43 | if use_test_size and 'test_input_size' in model.default_cfg: 44 | df_input_size = model.default_cfg['test_input_size'] 45 | else: 46 | df_input_size = model.default_cfg['input_size'] 47 | if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: 48 | _logger.info('Target input size %s > pretrained default %s, using test time pooling' % 49 | (str(config['input_size'][-2:]), str(df_input_size[-2:]))) 50 | model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) 51 | test_time_pool = True 52 | return model, test_time_pool 53 | -------------------------------------------------------------------------------- /model/backbones/timm/models/layers/weight_init.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | import warnings 4 | 5 | from torch.nn.init import _calculate_fan_in_and_fan_out 6 | 7 | 8 | def _no_grad_trunc_normal_(tensor, mean, std, a, b): 9 | # Cut & paste from PyTorch official master until it's in a few official releases - RW 10 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf 11 | def norm_cdf(x): 12 | # Computes standard normal cumulative distribution function 13 | return (1. + math.erf(x / math.sqrt(2.))) / 2. 14 | 15 | if (mean < a - 2 * std) or (mean > b + 2 * std): 16 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " 17 | "The distribution of values may be incorrect.", 18 | stacklevel=2) 19 | 20 | with torch.no_grad(): 21 | # Values are generated by using a truncated uniform distribution and 22 | # then using the inverse CDF for the normal distribution. 23 | # Get upper and lower cdf values 24 | l = norm_cdf((a - mean) / std) 25 | u = norm_cdf((b - mean) / std) 26 | 27 | # Uniformly fill tensor with values from [l, u], then translate to 28 | # [2l-1, 2u-1]. 29 | tensor.uniform_(2 * l - 1, 2 * u - 1) 30 | 31 | # Use inverse cdf transform for normal distribution to get truncated 32 | # standard normal 33 | tensor.erfinv_() 34 | 35 | # Transform to proper mean, std 36 | tensor.mul_(std * math.sqrt(2.)) 37 | tensor.add_(mean) 38 | 39 | # Clamp to ensure it's in the proper range 40 | tensor.clamp_(min=a, max=b) 41 | return tensor 42 | 43 | 44 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): 45 | # type: (Tensor, float, float, float, float) -> Tensor 46 | r"""Fills the input Tensor with values drawn from a truncated 47 | normal distribution. The values are effectively drawn from the 48 | normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` 49 | with values outside :math:`[a, b]` redrawn until they are within 50 | the bounds. The method used for generating the random values works 51 | best when :math:`a \leq \text{mean} \leq b`. 52 | Args: 53 | tensor: an n-dimensional `torch.Tensor` 54 | mean: the mean of the normal distribution 55 | std: the standard deviation of the normal distribution 56 | a: the minimum cutoff value 57 | b: the maximum cutoff value 58 | Examples: 59 | >>> w = torch.empty(3, 5) 60 | >>> nn.init.trunc_normal_(w) 61 | """ 62 | return _no_grad_trunc_normal_(tensor, mean, std, a, b) 63 | 64 | 65 | def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): 66 | fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) 67 | if mode == 'fan_in': 68 | denom = fan_in 69 | elif mode == 'fan_out': 70 | denom = fan_out 71 | elif mode == 'fan_avg': 72 | denom = (fan_in + fan_out) / 2 73 | 74 | variance = scale / denom 75 | 76 | if distribution == "truncated_normal": 77 | # constant is stddev of standard normal truncated to (-2, 2) 78 | trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) 79 | elif distribution == "normal": 80 | tensor.normal_(std=math.sqrt(variance)) 81 | elif distribution == "uniform": 82 | bound = math.sqrt(3 * variance) 83 | tensor.uniform_(-bound, bound) 84 | else: 85 | raise ValueError(f"invalid distribution {distribution}") 86 | 87 | 88 | def lecun_normal_(tensor): 89 | variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') 90 | -------------------------------------------------------------------------------- /model/backbones/timm/optim/__init__.py: -------------------------------------------------------------------------------- 1 | from .adamp import AdamP 2 | from .adamw import AdamW 3 | from .adafactor import Adafactor 4 | from .adahessian import Adahessian 5 | from .lookahead import Lookahead 6 | from .nadam import Nadam 7 | from .novograd import NovoGrad 8 | from .nvnovograd import NvNovoGrad 9 | from .radam import RAdam 10 | from .rmsprop_tf import RMSpropTF 11 | from .sgdp import SGDP 12 | from .adabelief import AdaBelief 13 | from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs 14 | -------------------------------------------------------------------------------- /model/backbones/timm/optim/novograd.py: -------------------------------------------------------------------------------- 1 | """NovoGrad Optimizer. 2 | Original impl by Masashi Kimura (Convergence Lab): https://github.com/convergence-lab/novograd 3 | Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` 4 | - https://arxiv.org/abs/1905.11286 5 | """ 6 | 7 | import torch 8 | from torch.optim.optimizer import Optimizer 9 | import math 10 | 11 | 12 | class NovoGrad(Optimizer): 13 | def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0): 14 | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) 15 | super(NovoGrad, self).__init__(params, defaults) 16 | self._lr = lr 17 | self._beta1 = betas[0] 18 | self._beta2 = betas[1] 19 | self._eps = eps 20 | self._wd = weight_decay 21 | self._grad_averaging = grad_averaging 22 | 23 | self._momentum_initialized = False 24 | 25 | def step(self, closure=None): 26 | loss = None 27 | if closure is not None: 28 | loss = closure() 29 | 30 | if not self._momentum_initialized: 31 | for group in self.param_groups: 32 | for p in group['params']: 33 | if p.grad is None: 34 | continue 35 | state = self.state[p] 36 | grad = p.grad.data 37 | if grad.is_sparse: 38 | raise RuntimeError('NovoGrad does not support sparse gradients') 39 | 40 | v = torch.norm(grad)**2 41 | m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data 42 | state['step'] = 0 43 | state['v'] = v 44 | state['m'] = m 45 | state['grad_ema'] = None 46 | self._momentum_initialized = True 47 | 48 | for group in self.param_groups: 49 | for p in group['params']: 50 | if p.grad is None: 51 | continue 52 | state = self.state[p] 53 | state['step'] += 1 54 | 55 | step, v, m = state['step'], state['v'], state['m'] 56 | grad_ema = state['grad_ema'] 57 | 58 | grad = p.grad.data 59 | g2 = torch.norm(grad)**2 60 | grad_ema = g2 if grad_ema is None else grad_ema * \ 61 | self._beta2 + g2 * (1. - self._beta2) 62 | grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps) 63 | 64 | if self._grad_averaging: 65 | grad *= (1. - self._beta1) 66 | 67 | g2 = torch.norm(grad)**2 68 | v = self._beta2*v + (1. - self._beta2)*g2 69 | m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data) 70 | bias_correction1 = 1 - self._beta1 ** step 71 | bias_correction2 = 1 - self._beta2 ** step 72 | step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 73 | 74 | state['v'], state['m'] = v, m 75 | state['grad_ema'] = grad_ema 76 | p.data.add_(-step_size, m) 77 | return loss 78 | -------------------------------------------------------------------------------- /model/backbones/timm/optim/sgdp.py: -------------------------------------------------------------------------------- 1 | """ 2 | SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py 3 | 4 | Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 5 | Code: https://github.com/clovaai/AdamP 6 | 7 | Copyright (c) 2020-present NAVER Corp. 8 | MIT license 9 | """ 10 | 11 | import torch 12 | import torch.nn as nn 13 | from torch.optim.optimizer import Optimizer, required 14 | import math 15 | 16 | class SGDP(Optimizer): 17 | def __init__(self, params, lr=required, momentum=0, dampening=0, 18 | weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): 19 | defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, 20 | nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) 21 | super(SGDP, self).__init__(params, defaults) 22 | 23 | def _channel_view(self, x): 24 | return x.view(x.size(0), -1) 25 | 26 | def _layer_view(self, x): 27 | return x.view(1, -1) 28 | 29 | def _cosine_similarity(self, x, y, eps, view_func): 30 | x = view_func(x) 31 | y = view_func(y) 32 | 33 | x_norm = x.norm(dim=1).add_(eps) 34 | y_norm = y.norm(dim=1).add_(eps) 35 | dot = (x * y).sum(dim=1) 36 | 37 | return dot.abs() / x_norm / y_norm 38 | 39 | def _projection(self, p, grad, perturb, delta, wd_ratio, eps): 40 | wd = 1 41 | expand_size = [-1] + [1] * (len(p.shape) - 1) 42 | for view_func in [self._channel_view, self._layer_view]: 43 | 44 | cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func) 45 | 46 | if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)): 47 | p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps) 48 | perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size) 49 | wd = wd_ratio 50 | 51 | return perturb, wd 52 | 53 | return perturb, wd 54 | 55 | def step(self, closure=None): 56 | loss = None 57 | if closure is not None: 58 | loss = closure() 59 | 60 | for group in self.param_groups: 61 | weight_decay = group['weight_decay'] 62 | momentum = group['momentum'] 63 | dampening = group['dampening'] 64 | nesterov = group['nesterov'] 65 | 66 | for p in group['params']: 67 | if p.grad is None: 68 | continue 69 | grad = p.grad.data 70 | state = self.state[p] 71 | 72 | # State initialization 73 | if len(state) == 0: 74 | state['momentum'] = torch.zeros_like(p.data) 75 | 76 | # SGD 77 | buf = state['momentum'] 78 | buf.mul_(momentum).add_(1 - dampening, grad) 79 | if nesterov: 80 | d_p = grad + momentum * buf 81 | else: 82 | d_p = buf 83 | 84 | # Projection 85 | wd_ratio = 1 86 | if len(p.shape) > 1: 87 | d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) 88 | 89 | # Weight decay 90 | if weight_decay != 0: 91 | p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) 92 | 93 | # Step 94 | p.data.add_(-group['lr'], d_p) 95 | 96 | return loss 97 | -------------------------------------------------------------------------------- /model/backbones/timm/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | from .cosine_lr import CosineLRScheduler 2 | from .plateau_lr import PlateauLRScheduler 3 | from .step_lr import StepLRScheduler 4 | from .tanh_lr import TanhLRScheduler 5 | from .scheduler_factory import create_scheduler 6 | -------------------------------------------------------------------------------- /model/backbones/timm/scheduler/scheduler_factory.py: -------------------------------------------------------------------------------- 1 | """ Scheduler Factory 2 | Hacked together by / Copyright 2020 Ross Wightman 3 | """ 4 | from .cosine_lr import CosineLRScheduler 5 | from .tanh_lr import TanhLRScheduler 6 | from .step_lr import StepLRScheduler 7 | from .plateau_lr import PlateauLRScheduler 8 | 9 | 10 | def create_scheduler(args, optimizer): 11 | num_epochs = args.epochs 12 | 13 | if getattr(args, 'lr_noise', None) is not None: 14 | lr_noise = getattr(args, 'lr_noise') 15 | if isinstance(lr_noise, (list, tuple)): 16 | noise_range = [n * num_epochs for n in lr_noise] 17 | if len(noise_range) == 1: 18 | noise_range = noise_range[0] 19 | else: 20 | noise_range = lr_noise * num_epochs 21 | else: 22 | noise_range = None 23 | 24 | lr_scheduler = None 25 | if args.sched == 'cosine': 26 | lr_scheduler = CosineLRScheduler( 27 | optimizer, 28 | t_initial=num_epochs, 29 | t_mul=getattr(args, 'lr_cycle_mul', 1.), 30 | lr_min=args.min_lr, 31 | decay_rate=args.decay_rate, 32 | warmup_lr_init=args.warmup_lr, 33 | warmup_t=args.warmup_epochs, 34 | cycle_limit=getattr(args, 'lr_cycle_limit', 1), 35 | t_in_epochs=True, 36 | noise_range_t=noise_range, 37 | noise_pct=getattr(args, 'lr_noise_pct', 0.67), 38 | noise_std=getattr(args, 'lr_noise_std', 1.), 39 | noise_seed=getattr(args, 'seed', 42), 40 | ) 41 | num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs 42 | elif args.sched == 'tanh': 43 | lr_scheduler = TanhLRScheduler( 44 | optimizer, 45 | t_initial=num_epochs, 46 | t_mul=getattr(args, 'lr_cycle_mul', 1.), 47 | lr_min=args.min_lr, 48 | warmup_lr_init=args.warmup_lr, 49 | warmup_t=args.warmup_epochs, 50 | cycle_limit=getattr(args, 'lr_cycle_limit', 1), 51 | t_in_epochs=True, 52 | noise_range_t=noise_range, 53 | noise_pct=getattr(args, 'lr_noise_pct', 0.67), 54 | noise_std=getattr(args, 'lr_noise_std', 1.), 55 | noise_seed=getattr(args, 'seed', 42), 56 | ) 57 | num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs 58 | elif args.sched == 'step': 59 | lr_scheduler = StepLRScheduler( 60 | optimizer, 61 | decay_t=args.decay_epochs, 62 | decay_rate=args.decay_rate, 63 | warmup_lr_init=args.warmup_lr, 64 | warmup_t=args.warmup_epochs, 65 | noise_range_t=noise_range, 66 | noise_pct=getattr(args, 'lr_noise_pct', 0.67), 67 | noise_std=getattr(args, 'lr_noise_std', 1.), 68 | noise_seed=getattr(args, 'seed', 42), 69 | ) 70 | elif args.sched == 'plateau': 71 | mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' 72 | lr_scheduler = PlateauLRScheduler( 73 | optimizer, 74 | decay_rate=args.decay_rate, 75 | patience_t=args.patience_epochs, 76 | lr_min=args.min_lr, 77 | mode=mode, 78 | warmup_lr_init=args.warmup_lr, 79 | warmup_t=args.warmup_epochs, 80 | cooldown_t=0, 81 | noise_range_t=noise_range, 82 | noise_pct=getattr(args, 'lr_noise_pct', 0.67), 83 | noise_std=getattr(args, 'lr_noise_std', 1.), 84 | noise_seed=getattr(args, 'seed', 42), 85 | ) 86 | 87 | return lr_scheduler, num_epochs 88 | -------------------------------------------------------------------------------- /model/backbones/timm/scheduler/step_lr.py: -------------------------------------------------------------------------------- 1 | """ Step Scheduler 2 | 3 | Basic step LR schedule with warmup, noise. 4 | 5 | Hacked together by / Copyright 2020 Ross Wightman 6 | """ 7 | import math 8 | import torch 9 | 10 | from .scheduler import Scheduler 11 | 12 | 13 | class StepLRScheduler(Scheduler): 14 | """ 15 | """ 16 | 17 | def __init__(self, 18 | optimizer: torch.optim.Optimizer, 19 | decay_t: float, 20 | decay_rate: float = 1., 21 | warmup_t=0, 22 | warmup_lr_init=0, 23 | t_in_epochs=True, 24 | noise_range_t=None, 25 | noise_pct=0.67, 26 | noise_std=1.0, 27 | noise_seed=42, 28 | initialize=True, 29 | ) -> None: 30 | super().__init__( 31 | optimizer, param_group_field="lr", 32 | noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, 33 | initialize=initialize) 34 | 35 | self.decay_t = decay_t 36 | self.decay_rate = decay_rate 37 | self.warmup_t = warmup_t 38 | self.warmup_lr_init = warmup_lr_init 39 | self.t_in_epochs = t_in_epochs 40 | if self.warmup_t: 41 | self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] 42 | super().update_groups(self.warmup_lr_init) 43 | else: 44 | self.warmup_steps = [1 for _ in self.base_values] 45 | 46 | def _get_lr(self, t): 47 | if t < self.warmup_t: 48 | lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] 49 | else: 50 | lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] 51 | return lrs 52 | 53 | def get_epoch_values(self, epoch: int): 54 | if self.t_in_epochs: 55 | return self._get_lr(epoch) 56 | else: 57 | return None 58 | 59 | def get_update_values(self, num_updates: int): 60 | if not self.t_in_epochs: 61 | return self._get_lr(num_updates) 62 | else: 63 | return None 64 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .agc import adaptive_clip_grad 2 | from .checkpoint_saver import CheckpointSaver 3 | from .clip_grad import dispatch_clip_grad 4 | from .cuda import ApexScaler, NativeScaler 5 | from .distributed import distribute_bn, reduce_tensor 6 | from .jit import set_jit_legacy 7 | from .log import setup_default_logging, FormatterNoInfo 8 | from .metrics import AverageMeter, accuracy 9 | from .misc import natural_key, add_bool_arg 10 | from .model import unwrap_model, get_state_dict 11 | from .model_ema import ModelEma, ModelEmaV2 12 | from .random import random_seed 13 | from .summary import update_summary, get_outdir 14 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/agc.py: -------------------------------------------------------------------------------- 1 | """ Adaptive Gradient Clipping 2 | 3 | An impl of AGC, as per (https://arxiv.org/abs/2102.06171): 4 | 5 | @article{brock2021high, 6 | author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, 7 | title={High-Performance Large-Scale Image Recognition Without Normalization}, 8 | journal={arXiv preprint arXiv:}, 9 | year={2021} 10 | } 11 | 12 | Code references: 13 | * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets 14 | * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c 15 | 16 | Hacked together by / Copyright 2021 Ross Wightman 17 | """ 18 | import torch 19 | 20 | 21 | def unitwise_norm(x, norm_type=2.0): 22 | if x.ndim <= 1: 23 | return x.norm(norm_type) 24 | else: 25 | # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor 26 | # might need special cases for other weights (possibly MHA) where this may not be true 27 | return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) 28 | 29 | 30 | def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): 31 | if isinstance(parameters, torch.Tensor): 32 | parameters = [parameters] 33 | for p in parameters: 34 | if p.grad is None: 35 | continue 36 | p_data = p.detach() 37 | g_data = p.grad.detach() 38 | max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) 39 | grad_norm = unitwise_norm(g_data, norm_type=norm_type) 40 | clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) 41 | new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) 42 | p.grad.detach().copy_(new_grads) 43 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/clip_grad.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from timm.utils.agc import adaptive_clip_grad 4 | 5 | 6 | def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): 7 | """ Dispatch to gradient clipping method 8 | 9 | Args: 10 | parameters (Iterable): model parameters to clip 11 | value (float): clipping value/factor/norm, mode dependant 12 | mode (str): clipping mode, one of 'norm', 'value', 'agc' 13 | norm_type (float): p-norm, default 2.0 14 | """ 15 | if mode == 'norm': 16 | torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) 17 | elif mode == 'value': 18 | torch.nn.utils.clip_grad_value_(parameters, value) 19 | elif mode == 'agc': 20 | adaptive_clip_grad(parameters, value, norm_type=norm_type) 21 | else: 22 | assert False, f"Unknown clip mode ({mode})." 23 | 24 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/cuda.py: -------------------------------------------------------------------------------- 1 | """ CUDA / AMP utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | 7 | try: 8 | from apex import amp 9 | has_apex = True 10 | except ImportError: 11 | amp = None 12 | has_apex = False 13 | 14 | from .clip_grad import dispatch_clip_grad 15 | 16 | 17 | class ApexScaler: 18 | state_dict_key = "amp" 19 | 20 | def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): 21 | with amp.scale_loss(loss, optimizer) as scaled_loss: 22 | scaled_loss.backward(create_graph=create_graph) 23 | if clip_grad is not None: 24 | dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) 25 | optimizer.step() 26 | 27 | def state_dict(self): 28 | if 'state_dict' in amp.__dict__: 29 | return amp.state_dict() 30 | 31 | def load_state_dict(self, state_dict): 32 | if 'load_state_dict' in amp.__dict__: 33 | amp.load_state_dict(state_dict) 34 | 35 | 36 | class NativeScaler: 37 | state_dict_key = "amp_scaler" 38 | 39 | def __init__(self): 40 | self._scaler = torch.cuda.amp.GradScaler() 41 | 42 | def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): 43 | self._scaler.scale(loss).backward(create_graph=create_graph) 44 | if clip_grad is not None: 45 | assert parameters is not None 46 | self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place 47 | dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) 48 | self._scaler.step(optimizer) 49 | self._scaler.update() 50 | 51 | def state_dict(self): 52 | return self._scaler.state_dict() 53 | 54 | def load_state_dict(self, state_dict): 55 | self._scaler.load_state_dict(state_dict) 56 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/distributed.py: -------------------------------------------------------------------------------- 1 | """ Distributed training/validation utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | from torch import distributed as dist 7 | 8 | from .model import unwrap_model 9 | 10 | 11 | def reduce_tensor(tensor, n): 12 | rt = tensor.clone() 13 | dist.all_reduce(rt, op=dist.ReduceOp.SUM) 14 | rt /= n 15 | return rt 16 | 17 | 18 | def distribute_bn(model, world_size, reduce=False): 19 | # ensure every node has the same running bn stats 20 | for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): 21 | if ('running_mean' in bn_name) or ('running_var' in bn_name): 22 | if reduce: 23 | # average bn stats across whole group 24 | torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) 25 | bn_buf /= float(world_size) 26 | else: 27 | # broadcast bn stats from rank 0 to whole group 28 | torch.distributed.broadcast(bn_buf, 0) 29 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/jit.py: -------------------------------------------------------------------------------- 1 | """ JIT scripting/tracing utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import torch 6 | 7 | 8 | def set_jit_legacy(): 9 | """ Set JIT executor to legacy w/ support for op fusion 10 | This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes 11 | in the JIT exectutor. These API are not supported so could change. 12 | """ 13 | # 14 | assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" 15 | torch._C._jit_set_profiling_executor(False) 16 | torch._C._jit_set_profiling_mode(False) 17 | torch._C._jit_override_can_fuse_on_gpu(True) 18 | #torch._C._jit_set_texpr_fuser_enabled(True) 19 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/log.py: -------------------------------------------------------------------------------- 1 | """ Logging helpers 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import logging 6 | import logging.handlers 7 | 8 | 9 | class FormatterNoInfo(logging.Formatter): 10 | def __init__(self, fmt='%(levelname)s: %(message)s'): 11 | logging.Formatter.__init__(self, fmt) 12 | 13 | def format(self, record): 14 | if record.levelno == logging.INFO: 15 | return str(record.getMessage()) 16 | return logging.Formatter.format(self, record) 17 | 18 | 19 | def setup_default_logging(default_level=logging.INFO, log_path=''): 20 | console_handler = logging.StreamHandler() 21 | console_handler.setFormatter(FormatterNoInfo()) 22 | logging.root.addHandler(console_handler) 23 | logging.root.setLevel(default_level) 24 | if log_path: 25 | file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) 26 | file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") 27 | file_handler.setFormatter(file_formatter) 28 | logging.root.addHandler(file_handler) 29 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/metrics.py: -------------------------------------------------------------------------------- 1 | """ Eval metrics and related 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | 6 | 7 | class AverageMeter: 8 | """Computes and stores the average and current value""" 9 | def __init__(self): 10 | self.reset() 11 | 12 | def reset(self): 13 | self.val = 0 14 | self.avg = 0 15 | self.sum = 0 16 | self.count = 0 17 | 18 | def update(self, val, n=1): 19 | self.val = val 20 | self.sum += val * n 21 | self.count += n 22 | self.avg = self.sum / self.count 23 | 24 | 25 | def accuracy(output, target, topk=(1,)): 26 | """Computes the accuracy over the k top predictions for the specified values of k""" 27 | maxk = max(topk) 28 | batch_size = target.size(0) 29 | _, pred = output.topk(maxk, 1, True, True) 30 | pred = pred.t() 31 | correct = pred.eq(target.reshape(1, -1).expand_as(pred)) 32 | return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] 33 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/misc.py: -------------------------------------------------------------------------------- 1 | """ Misc utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import re 6 | 7 | 8 | def natural_key(string_): 9 | """See http://www.codinghorror.com/blog/archives/001018.html""" 10 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] 11 | 12 | 13 | def add_bool_arg(parser, name, default=False, help=''): 14 | dest_name = name.replace('-', '_') 15 | group = parser.add_mutually_exclusive_group(required=False) 16 | group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) 17 | group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) 18 | parser.set_defaults(**{dest_name: default}) 19 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/model.py: -------------------------------------------------------------------------------- 1 | """ Model / state_dict utils 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | from .model_ema import ModelEma 6 | import torch 7 | import fnmatch 8 | 9 | def unwrap_model(model): 10 | if isinstance(model, ModelEma): 11 | return unwrap_model(model.ema) 12 | else: 13 | return model.module if hasattr(model, 'module') else model 14 | 15 | 16 | def get_state_dict(model, unwrap_fn=unwrap_model): 17 | return unwrap_fn(model).state_dict() 18 | 19 | 20 | def avg_sq_ch_mean(model, input, output): 21 | "calculate average channel square mean of output activations" 22 | return torch.mean(output.mean(axis=[0,2,3])**2).item() 23 | 24 | 25 | def avg_ch_var(model, input, output): 26 | "calculate average channel variance of output activations" 27 | return torch.mean(output.var(axis=[0,2,3])).item()\ 28 | 29 | 30 | def avg_ch_var_residual(model, input, output): 31 | "calculate average channel variance of output activations" 32 | return torch.mean(output.var(axis=[0,2,3])).item() 33 | 34 | 35 | class ActivationStatsHook: 36 | """Iterates through each of `model`'s modules and matches modules using unix pattern 37 | matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is 38 | a match. 39 | 40 | Arguments: 41 | model (nn.Module): model from which we will extract the activation stats 42 | hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string 43 | matching with the name of model's modules. 44 | hook_fns (List[Callable]): List of hook functions to be registered at every 45 | module in `layer_names`. 46 | 47 | Inspiration from https://docs.fast.ai/callback.hook.html. 48 | 49 | Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example 50 | on how to plot Signal Propogation Plots using `ActivationStatsHook`. 51 | """ 52 | 53 | def __init__(self, model, hook_fn_locs, hook_fns): 54 | self.model = model 55 | self.hook_fn_locs = hook_fn_locs 56 | self.hook_fns = hook_fns 57 | if len(hook_fn_locs) != len(hook_fns): 58 | raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ 59 | their lengths are different.") 60 | self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) 61 | for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): 62 | self.register_hook(hook_fn_loc, hook_fn) 63 | 64 | def _create_hook(self, hook_fn): 65 | def append_activation_stats(module, input, output): 66 | out = hook_fn(module, input, output) 67 | self.stats[hook_fn.__name__].append(out) 68 | return append_activation_stats 69 | 70 | def register_hook(self, hook_fn_loc, hook_fn): 71 | for name, module in self.model.named_modules(): 72 | if not fnmatch.fnmatch(name, hook_fn_loc): 73 | continue 74 | module.register_forward_hook(self._create_hook(hook_fn)) 75 | 76 | 77 | def extract_spp_stats(model, 78 | hook_fn_locs, 79 | hook_fns, 80 | input_shape=[8, 3, 224, 224]): 81 | """Extract average square channel mean and variance of activations during 82 | forward pass to plot Signal Propogation Plots (SPP). 83 | 84 | Paper: https://arxiv.org/abs/2101.08692 85 | 86 | Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 87 | """ 88 | x = torch.normal(0., 1., input_shape) 89 | hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) 90 | _ = model(x) 91 | return hook.stats 92 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/random.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | 5 | 6 | def random_seed(seed=42, rank=0): 7 | torch.manual_seed(seed + rank) 8 | np.random.seed(seed + rank) 9 | random.seed(seed + rank) 10 | -------------------------------------------------------------------------------- /model/backbones/timm/utils/summary.py: -------------------------------------------------------------------------------- 1 | """ Summary utilities 2 | 3 | Hacked together by / Copyright 2020 Ross Wightman 4 | """ 5 | import csv 6 | import os 7 | from collections import OrderedDict 8 | try: 9 | import wandb 10 | except ImportError: 11 | pass 12 | 13 | def get_outdir(path, *paths, inc=False): 14 | outdir = os.path.join(path, *paths) 15 | if not os.path.exists(outdir): 16 | os.makedirs(outdir) 17 | elif inc: 18 | count = 1 19 | outdir_inc = outdir + '-' + str(count) 20 | while os.path.exists(outdir_inc): 21 | count = count + 1 22 | outdir_inc = outdir + '-' + str(count) 23 | assert count < 100 24 | outdir = outdir_inc 25 | os.makedirs(outdir) 26 | return outdir 27 | 28 | 29 | def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): 30 | rowd = OrderedDict(epoch=epoch) 31 | rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) 32 | rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) 33 | if log_wandb: 34 | wandb.log(rowd) 35 | with open(filename, mode='a') as cf: 36 | dw = csv.DictWriter(cf, fieldnames=rowd.keys()) 37 | if write_header: # first iteration (epoch == 1 can't be used) 38 | dw.writeheader() 39 | dw.writerow(rowd) 40 | -------------------------------------------------------------------------------- /model/backbones/timm/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.4.10' 2 | -------------------------------------------------------------------------------- /processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_train, do_inference -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python=3.7.0 2 | pytorch=1.6.0 3 | torchvision=0.7.0 4 | timm=0.4.9 5 | albumentations=0.5.2 6 | imagecorruptions=1.1.2 7 | h5py=2.10.0 8 | cython=0.29.24 9 | yacs=0.1.6 -------------------------------------------------------------------------------- /scripts/eval_market.sh: -------------------------------------------------------------------------------- 1 | python test.py \ 2 | --config_file configs/Market/resnet_base.yml \ 3 | MODEL.DEVICE_ID "('1')" \ 4 | OUTPUT_DIR "('./logs/market/')" \ 5 | TEST.WEIGHT "('./logs/market/resnet50_120.pth')" 6 | -------------------------------------------------------------------------------- /scripts/eval_msmt.sh: -------------------------------------------------------------------------------- 1 | # python test.py \ 2 | # --config_file configs/MSMT17/resnet_base.yml \ 3 | # MODEL.DEVICE_ID "('2')" \ 4 | # OUTPUT_DIR "('./logs/msmt/')" \ 5 | # TEST.WEIGHT "('./logs/msmt/resnet50_120.pth')" 6 | 7 | python test.py \ 8 | --config_file configs/MSMT17/resnet_base.yml \ 9 | MODEL.DEVICE_ID "('0')" \ 10 | OUTPUT_DIR "('./logs/msmt/official/')" \ 11 | TEST.WEIGHT "('/home/wangzhiqiang/Github/CIL-ReID/logs/final_ckpt/msmt17_resnet50.pth')" 12 | -------------------------------------------------------------------------------- /scripts/train_market.sh: -------------------------------------------------------------------------------- 1 | # settings: augmix + soft erasing + pre part remix 2 | python train.py \ 3 | --config_file configs/Market/resnet_base.yml \ 4 | MODEL.DEVICE_ID "('0')" \ 5 | OUTPUT_DIR "('./logs/market')" INPUT.AUGMIX "(True)" \ 6 | INPUT.ERASING_TYPE "('soft')" \ 7 | INPUT.RE_PROB "(0.5)" \ 8 | INPUT.MIXING_COEFF "([0.5, 1.0])" 9 | -------------------------------------------------------------------------------- /scripts/train_msmt.sh: -------------------------------------------------------------------------------- 1 | # settings: augmix + soft erasing + pre part remix 2 | python train.py \ 3 | --config_file configs/MSMT17/resnet_base.yml \ 4 | MODEL.DEVICE_ID "('2')" \ 5 | OUTPUT_DIR "('./logs/msmt')" INPUT.AUGMIX "(True)" \ 6 | INPUT.ERASING_TYPE "('soft')" \ 7 | INPUT.RE_PROB "(0.5)" \ 8 | INPUT.MIXING_COEFF "([0.5, 1.0])" 9 | -------------------------------------------------------------------------------- /scripts/train_vit_base.sh: -------------------------------------------------------------------------------- 1 | python train.py --config_file configs/Market/vit_base.yml MODEL.DEVICE_ID "('0')"; -------------------------------------------------------------------------------- /scripts/train_vit_transreid.sh: -------------------------------------------------------------------------------- 1 | python train.py --config_file configs/Market/vit_transreid.yml MODEL.DEVICE_ID "('0')"; 2 | -------------------------------------------------------------------------------- /solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_scheduler import WarmupMultiStepLR 2 | from .make_optimizer import make_optimizer -------------------------------------------------------------------------------- /solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from bisect import bisect_right 7 | import torch 8 | 9 | 10 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 11 | # separating MultiStepLR with WarmupLR 12 | # but the current LRScheduler design doesn't allow it 13 | 14 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 15 | def __init__( 16 | self, 17 | optimizer, 18 | milestones, # steps 19 | gamma=0.1, 20 | warmup_factor=1.0 / 3, 21 | warmup_iters=500, 22 | warmup_method="linear", 23 | last_epoch=-1, 24 | ): 25 | if not list(milestones) == sorted(milestones): 26 | raise ValueError( 27 | "Milestones should be a list of" " increasing integers. Got {}", 28 | milestones, 29 | ) 30 | 31 | if warmup_method not in ("constant", "linear"): 32 | raise ValueError( 33 | "Only 'constant' or 'linear' warmup_method accepted" 34 | "got {}".format(warmup_method) 35 | ) 36 | self.milestones = milestones 37 | self.gamma = gamma 38 | self.warmup_factor = warmup_factor 39 | self.warmup_iters = warmup_iters 40 | self.warmup_method = warmup_method 41 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 42 | 43 | def _get_lr(self): 44 | warmup_factor = 1 45 | if self.last_epoch < self.warmup_iters: 46 | if self.warmup_method == "constant": 47 | warmup_factor = self.warmup_factor 48 | elif self.warmup_method == "linear": 49 | alpha = self.last_epoch / self.warmup_iters 50 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 51 | return [ 52 | base_lr 53 | * warmup_factor 54 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 55 | for base_lr in self.base_lrs 56 | ] 57 | -------------------------------------------------------------------------------- /solver/make_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .ranger import Ranger 3 | 4 | 5 | def make_optimizer(cfg, model, center_criterion): 6 | params = [] 7 | for key, value in model.named_parameters(): 8 | if not value.requires_grad: 9 | continue 10 | lr = cfg.SOLVER.BASE_LR 11 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 12 | if "bias" in key: 13 | lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR 14 | weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS 15 | if cfg.SOLVER.LARGE_FC_LR: 16 | if "classifier" in key or "arcface" in key: 17 | lr = cfg.SOLVER.BASE_LR * 2 18 | print('Using two times learning rate for fc ') 19 | 20 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 21 | 22 | if cfg.SOLVER.OPTIMIZER_NAME == 'SGD': 23 | optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)( 24 | params, momentum=cfg.SOLVER.MOMENTUM) 25 | elif cfg.SOLVER.OPTIMIZER_NAME == 'AdamW': 26 | optimizer = torch.optim.AdamW(params, 27 | lr=cfg.SOLVER.BASE_LR, 28 | weight_decay=cfg.SOLVER.WEIGHT_DECAY) 29 | elif cfg.SOLVER.OPTIMIZER_NAME == 'Ranger': 30 | optimizer = Ranger( 31 | params, 32 | lr=cfg.SOLVER.BASE_LR, 33 | ) 34 | else: 35 | optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params) 36 | optimizer_center = torch.optim.SGD(center_criterion.parameters(), 37 | lr=cfg.SOLVER.CENTER_LR) 38 | 39 | return optimizer, optimizer_center 40 | -------------------------------------------------------------------------------- /solver/scheduler_factory.py: -------------------------------------------------------------------------------- 1 | """ Scheduler Factory 2 | Hacked together by / Copyright 2020 Ross Wightman 3 | """ 4 | from .cosine_lr import CosineLRScheduler 5 | 6 | 7 | def create_scheduler(cfg, optimizer): 8 | num_epochs = cfg.SOLVER.MAX_EPOCHS 9 | # type 1 10 | # lr_min = 0.01 * cfg.SOLVER.BASE_LR 11 | # warmup_lr_init = 0.001 * cfg.SOLVER.BASE_LR 12 | # type 2 13 | lr_min = 0.002 * cfg.SOLVER.BASE_LR 14 | warmup_lr_init = 0.01 * cfg.SOLVER.BASE_LR 15 | # type 3 16 | # lr_min = 0.001 * cfg.SOLVER.BASE_LR 17 | # warmup_lr_init = 0.01 * cfg.SOLVER.BASE_LR 18 | 19 | warmup_t = cfg.SOLVER.WARMUP_EPOCHS 20 | noise_range = None 21 | 22 | lr_scheduler = CosineLRScheduler( 23 | optimizer, 24 | t_initial=num_epochs, 25 | lr_min=lr_min, 26 | t_mul= 1., 27 | decay_rate=0.1, 28 | warmup_lr_init=warmup_lr_init, 29 | warmup_t=warmup_t, 30 | cycle_limit=1, 31 | t_in_epochs=True, 32 | noise_range_t=noise_range, 33 | noise_pct= 0.67, 34 | noise_std= 1., 35 | noise_seed=42, 36 | ) 37 | 38 | return lr_scheduler 39 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from config import cfg 3 | import argparse 4 | from datasets import make_dataloader 5 | from model import make_model 6 | from processor import do_inference 7 | from utils.logger import setup_logger 8 | 9 | if __name__ == "__main__": 10 | parser = argparse.ArgumentParser(description="ReID Baseline Training") 11 | parser.add_argument("--config_file", 12 | default="", 13 | help="path to config file", 14 | type=str) 15 | 16 | parser.add_argument("opts", 17 | help="Modify config options using the command-line", 18 | default=None, 19 | nargs=argparse.REMAINDER) 20 | 21 | args = parser.parse_args() 22 | 23 | if args.config_file != "": 24 | cfg.merge_from_file(args.config_file) 25 | cfg.merge_from_list(args.opts) 26 | cfg.freeze() 27 | 28 | output_dir = cfg.OUTPUT_DIR 29 | if output_dir and not os.path.exists(output_dir): 30 | os.makedirs(output_dir) 31 | 32 | logger = setup_logger("transreid", output_dir, if_train=False) 33 | logger.info(args) 34 | 35 | if args.config_file != "": 36 | logger.info("Loaded configuration file {}".format(args.config_file)) 37 | with open(args.config_file, 'r') as cf: 38 | config_str = "\n" + cf.read() 39 | logger.info(config_str) 40 | logger.info("Running with config:\n{}".format(cfg)) 41 | 42 | os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID 43 | 44 | train_loader, train_loader_normal, val_loader, corrupted_val_loader, corrupted_query_loader, corrupted_gallery_loader, num_query, num_classes, camera_num, view_num = make_dataloader( 45 | cfg) 46 | 47 | # market: 751 48 | # cuhk: 767 49 | # msmt: 1041 50 | num_classes = 751 51 | 52 | model = make_model(cfg, 53 | num_class=num_classes, 54 | camera_num=camera_num, 55 | view_num=view_num) 56 | model.load_param(cfg.TEST.WEIGHT) 57 | 58 | 59 | for eval_epoch in range(10): 60 | print("Eval epoch ", eval_epoch) 61 | print("=" * 64) 62 | loader_list = [ 63 | val_loader, corrupted_val_loader, corrupted_query_loader, 64 | corrupted_gallery_loader 65 | ] 66 | name = [ 67 | "Clean eval", "Corrupted eval", "Corrupted query", 68 | "Corrupted gallery" 69 | ] 70 | for loader_i in range(4): 71 | print("Evaluating on ", name[loader_i]) 72 | mINP, mAP, rank1, rank5, rank10 = do_inference( 73 | cfg, model, loader_list[loader_i], num_query) 74 | mINP = round(mINP * 100, 2) 75 | mAP = round(mAP * 100, 2) 76 | rank1 = round(rank1 * 100, 2) 77 | rank5 = round(rank5 * 100, 2) 78 | rank10 = round(rank10 * 100, 2) 79 | path = cfg.OUTPUT_DIR + '/' + cfg.DATASETS.NAMES + '_eval_info.csv' 80 | import csv 81 | with open(path, 'a+') as f: 82 | csv_write = csv.writer(f) 83 | data_row = [mINP, mAP, rank1, rank5, rank10] 84 | csv_write.writerow(data_row) 85 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from utils.logger import setup_logger 2 | from datasets import make_dataloader 3 | from model import make_model 4 | from solver import make_optimizer 5 | from solver.scheduler_factory import create_scheduler 6 | from loss import make_loss 7 | from processor import do_train 8 | import random 9 | import torch 10 | import numpy as np 11 | import os 12 | import argparse 13 | # from timm.scheduler import create_scheduler 14 | from config import cfg 15 | 16 | 17 | def set_seed(seed): 18 | torch.manual_seed(seed) 19 | torch.cuda.manual_seed(seed) 20 | torch.cuda.manual_seed_all(seed) 21 | np.random.seed(seed) 22 | random.seed(seed) 23 | torch.backends.cudnn.deterministic = True 24 | torch.backends.cudnn.benchmark = True 25 | 26 | 27 | if __name__ == '__main__': 28 | 29 | parser = argparse.ArgumentParser(description="ReID Baseline Training") 30 | parser.add_argument("--config_file", 31 | default="", 32 | help="path to config file", 33 | type=str) 34 | 35 | parser.add_argument("opts", 36 | help="Modify config options using the command-line", 37 | default=None, 38 | nargs=argparse.REMAINDER) 39 | parser.add_argument("--local_rank", default=0, type=int) 40 | args = parser.parse_args() 41 | 42 | if args.config_file != "": 43 | cfg.merge_from_file(args.config_file) 44 | cfg.merge_from_list(args.opts) 45 | cfg.freeze() 46 | 47 | set_seed(cfg.SOLVER.SEED) 48 | 49 | 50 | output_dir = cfg.OUTPUT_DIR 51 | if output_dir and not os.path.exists(output_dir): 52 | os.makedirs(output_dir) 53 | 54 | logger = setup_logger("transreid", output_dir, if_train=True) 55 | logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR)) 56 | logger.info(args) 57 | 58 | if args.config_file != "": 59 | logger.info("Loaded configuration file {}".format(args.config_file)) 60 | with open(args.config_file, 'r') as cf: 61 | config_str = "\n" + cf.read() 62 | logger.info(config_str) 63 | logger.info("Running with config:\n{}".format(cfg)) 64 | 65 | 66 | os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID 67 | train_loader, train_loader_normal, val_loader, corrupted_val_loader, corrupted_query_loader, corrupted_gallery_loader, num_query, num_classes, camera_num, view_num = make_dataloader( 68 | cfg) 69 | 70 | model = make_model(cfg, 71 | num_class=num_classes, 72 | camera_num=camera_num, 73 | view_num=view_num) 74 | 75 | loss_func, center_criterion = make_loss(cfg, num_classes=num_classes) 76 | 77 | optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion) 78 | 79 | scheduler = create_scheduler(cfg, optimizer) 80 | 81 | do_train(cfg, model, center_criterion, train_loader, val_loader, optimizer, 82 | optimizer_center, scheduler, loss_func, num_query, 83 | args.local_rank) 84 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__init__.py -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/iotools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/iotools.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/logger.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/logger.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/meter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/meter.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/metrics.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/metrics.cpython-38.pyc -------------------------------------------------------------------------------- /utils/__pycache__/reranking.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/__pycache__/reranking.cpython-38.pyc -------------------------------------------------------------------------------- /utils/iotools.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: sherlock 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | 7 | import errno 8 | import json 9 | import os 10 | 11 | import os.path as osp 12 | 13 | 14 | def mkdir_if_missing(directory): 15 | if not osp.exists(directory): 16 | try: 17 | os.makedirs(directory) 18 | except OSError as e: 19 | if e.errno != errno.EEXIST: 20 | raise 21 | 22 | 23 | def check_isfile(path): 24 | isfile = osp.isfile(path) 25 | if not isfile: 26 | print("=> Warning: no file found at '{}' (ignored)".format(path)) 27 | return isfile 28 | 29 | 30 | def read_json(fpath): 31 | with open(fpath, 'r') as f: 32 | obj = json.load(f) 33 | return obj 34 | 35 | 36 | def write_json(obj, fpath): 37 | mkdir_if_missing(osp.dirname(fpath)) 38 | with open(fpath, 'w') as f: 39 | json.dump(obj, f, indent=4, separators=(',', ': ')) 40 | -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import os.path as osp 5 | def setup_logger(name, save_dir, if_train): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | if not osp.exists(save_dir): 17 | os.makedirs(save_dir) 18 | if if_train: 19 | fh = logging.FileHandler(os.path.join(save_dir, "train_log.txt"), mode='w') 20 | else: 21 | fh = logging.FileHandler(os.path.join(save_dir, "test_log.txt"), mode='w') 22 | fh.setLevel(logging.DEBUG) 23 | fh.setFormatter(formatter) 24 | logger.addHandler(fh) 25 | 26 | return logger -------------------------------------------------------------------------------- /utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /utils/rank_cylib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | $(PYTHON) setup.py build_ext --inplace 3 | rm -rf build 4 | clean: 5 | rm -rf build 6 | rm -f rank_cy.c *.so -------------------------------------------------------------------------------- /utils/rank_cylib/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/rank_cylib/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /utils/rank_cylib/build/temp.linux-x86_64-3.7/rank_cy.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/rank_cylib/build/temp.linux-x86_64-3.7/rank_cy.o -------------------------------------------------------------------------------- /utils/rank_cylib/rank_cy.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MinghuiChen43/CIL-ReID/ac44dfaf4c0ebd47089e785b8a67a270896f6125/utils/rank_cylib/rank_cy.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/rank_cylib/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy as np 5 | 6 | 7 | def numpy_include(): 8 | try: 9 | numpy_include = np.get_include() 10 | except AttributeError: 11 | numpy_include = np.get_numpy_include() 12 | return numpy_include 13 | 14 | 15 | ext_modules = [ 16 | Extension( 17 | 'rank_cy', 18 | ['rank_cy.pyx'], 19 | include_dirs=[numpy_include()], 20 | ) 21 | ] 22 | 23 | setup(name='Cython-based reid evaluation code', 24 | ext_modules=cythonize(ext_modules)) 25 | -------------------------------------------------------------------------------- /utils/rank_cylib/setup.sh: -------------------------------------------------------------------------------- 1 | python setup.py build_ext --inplace -------------------------------------------------------------------------------- /utils/rank_cylib/test_cython.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import sys 4 | import os.path as osp 5 | import timeit 6 | import numpy as np 7 | 8 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..') 9 | from torchreid import metrics 10 | 11 | """ 12 | Test the speed of cython-based evaluation code. The speed improvements 13 | can be much bigger when using the real reid data, which contains a larger 14 | amount of query and gallery images. 15 | 16 | Note: you might encounter the following error: 17 | 'AssertionError: Error: all query identities do not appear in gallery'. 18 | This is normal because the inputs are random numbers. Just try again. 19 | """ 20 | 21 | print('*** Compare running time ***') 22 | 23 | setup = ''' 24 | import sys 25 | import os.path as osp 26 | import numpy as np 27 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..') 28 | from torchreid import metrics 29 | num_q = 30 30 | num_g = 300 31 | max_rank = 5 32 | distmat = np.random.rand(num_q, num_g) * 20 33 | q_pids = np.random.randint(0, num_q, size=num_q) 34 | g_pids = np.random.randint(0, num_g, size=num_g) 35 | q_camids = np.random.randint(0, 5, size=num_q) 36 | g_camids = np.random.randint(0, 5, size=num_g) 37 | ''' 38 | 39 | print('=> Using market1501\'s metric') 40 | pytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)', setup=setup, number=20) 41 | cytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)', setup=setup, number=20) 42 | print('Python time: {} s'.format(pytime)) 43 | print('Cython time: {} s'.format(cytime)) 44 | print('Cython is {} times faster than python\n'.format(pytime / cytime)) 45 | 46 | print('=> Using cuhk03\'s metric') 47 | pytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)', setup=setup, number=20) 48 | cytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)', setup=setup, number=20) 49 | print('Python time: {} s'.format(pytime)) 50 | print('Cython time: {} s'.format(cytime)) 51 | print('Cython is {} times faster than python\n'.format(pytime / cytime)) 52 | 53 | """ 54 | print("=> Check precision") 55 | 56 | num_q = 30 57 | num_g = 300 58 | max_rank = 5 59 | distmat = np.random.rand(num_q, num_g) * 20 60 | q_pids = np.random.randint(0, num_q, size=num_q) 61 | g_pids = np.random.randint(0, num_g, size=num_g) 62 | q_camids = np.random.randint(0, 5, size=num_q) 63 | g_camids = np.random.randint(0, 5, size=num_g) 64 | 65 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False) 66 | print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc)) 67 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True) 68 | print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc)) 69 | """ --------------------------------------------------------------------------------