├── .DS_Store
├── .gitignore
├── README.md
├── conf
├── resnet18.yaml
├── resnet18_multilabel.yaml
└── resnet50_multilabel.yaml
├── data
├── .DS_Store
├── data.csv
├── data_mutilabel.csv
├── img
│ ├── 0male
│ │ ├── 0_1.jpg
│ │ ├── 0_3.jpg
│ │ └── 1_2.jpg
│ └── 1female
│ │ ├── 1_1.jpg
│ │ ├── 1_2.jpg
│ │ ├── 1_4.jpg
│ │ ├── 2_3.jpg
│ │ ├── 2_5.jpg
│ │ └── 2_6.jpg
├── test.csv
└── train.csv
├── docs
└── Tensorrt_installation_guide_on_Ubuntu1804.md
├── infer_multi_label.py
├── infer_multi_task.py
├── onnx_tensorrt
├── __init__.py
├── backend.py
├── config.py
└── tensorrt_engine.py
├── qdnet
├── conf
│ ├── config.py
│ └── constant.py
├── dataaug
│ └── dataaug.py
├── dataset
│ ├── dataset.py
│ ├── dataset_multi_label.py
│ └── dataset_multi_task.py
├── loss
│ ├── ce_label_smoothing.py
│ ├── focal_loss.py
│ ├── loss.py
│ └── multilabel_loss.py
├── models
│ ├── effnet.py
│ ├── metric_strategy.py
│ ├── resnest.py
│ ├── resnet.py
│ ├── se_resnext.py
│ └── squeezenet.py
└── optimizer
│ └── optimizer.py
├── qdnet_classifier
├── classifier_multi_label.py
├── classifier_multi_task.py
└── models
│ ├── __init__.py
│ ├── byobnet.py
│ ├── cspnet.py
│ ├── densenet.py
│ ├── dla.py
│ ├── dpn.py
│ ├── efficientnet.py
│ ├── efficientnet_blocks.py
│ ├── efficientnet_builder.py
│ ├── factory.py
│ ├── features.py
│ ├── gluon_resnet.py
│ ├── gluon_xception.py
│ ├── helpers.py
│ ├── hrnet.py
│ ├── inception_resnet_v2.py
│ ├── inception_v3.py
│ ├── inception_v4.py
│ ├── layers
│ ├── __init__.py
│ ├── activations.py
│ ├── activations_jit.py
│ ├── activations_me.py
│ ├── adaptive_avgmax_pool.py
│ ├── anti_aliasing.py
│ ├── blur_pool.py
│ ├── cbam.py
│ ├── classifier.py
│ ├── cond_conv2d.py
│ ├── config.py
│ ├── conv2d_same.py
│ ├── conv_bn_act.py
│ ├── create_act.py
│ ├── create_attn.py
│ ├── create_conv2d.py
│ ├── create_norm_act.py
│ ├── drop.py
│ ├── eca.py
│ ├── evo_norm.py
│ ├── helpers.py
│ ├── inplace_abn.py
│ ├── linear.py
│ ├── median_pool.py
│ ├── mixed_conv2d.py
│ ├── norm_act.py
│ ├── padding.py
│ ├── pool2d_same.py
│ ├── se.py
│ ├── selective_kernel.py
│ ├── separable_conv.py
│ ├── space_to_depth.py
│ ├── split_attn.py
│ ├── split_batchnorm.py
│ ├── std_conv.py
│ ├── test_time_pool.py
│ └── weight_init.py
│ ├── mobilenetv3.py
│ ├── multi_label_model.py
│ ├── nasnet.py
│ ├── nfnet.py
│ ├── pnasnet.py
│ ├── pruned
│ ├── ecaresnet101d_pruned.txt
│ ├── ecaresnet50d_pruned.txt
│ ├── efficientnet_b1_pruned.txt
│ ├── efficientnet_b2_pruned.txt
│ └── efficientnet_b3_pruned.txt
│ ├── registry.py
│ ├── regnet.py
│ ├── res2net.py
│ ├── resnest.py
│ ├── resnet.py
│ ├── resnetv2.py
│ ├── rexnet.py
│ ├── selecsls.py
│ ├── senet.py
│ ├── sknet.py
│ ├── tresnet.py
│ ├── vgg.py
│ ├── vision_transformer.py
│ ├── vovnet.py
│ ├── xception.py
│ └── xception_aligned.py
├── requirements.txt
├── serving
├── README.md
├── core
│ ├── inference.py
│ ├── models.py
│ └── url2img.py
├── docker-compose.yml
├── dockerfile
├── flask_run.py
├── tests
│ ├── post.py
│ └── test_qdmodel.py
└── utils
│ └── logging.py
├── tools
├── data_clean.py
├── data_preprocess_multi_label.py
├── data_preprocess_multi_task.py
├── generate_csv.py
├── generate_label.py
├── generate_label_tree.py
├── label_json.py
├── onnx_to_tensorrt_multi_task.py
├── pytorch_to_onnx_muliti_task.py
├── search_father.py
├── search_leaf.py
└── torch2trt_new.py
├── train_multi_label.py
└── train_multi_task.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *__pycache__
2 | __pycache__
3 | .idea/
4 | output/
5 | inputs/
6 | thumbnails/
7 | .vscode/
8 | venv/
9 | .env
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Image multi-task classfication
4 |
5 | Easy-to-use/Easy-to-deploy/Easy-to-develop
6 |
7 |
8 |
9 |
10 | | *** | | example |
11 | | :-----------------: | :---------:| :---------:|
12 | | models | (...等) | [1](./qdnet_classifier/models/) |
13 | | metric | (Swish/ArcMarginProduct_subcenter/ArcFaceLossAdaptiveMargin/...) | [2](./qdnet/models/metric_strategy.py) |
14 | | data aug | (rotate/flip/...、mixup/cutmix) | [3](./qdnet/dataaug/) |
15 | | loss | (ce_loss/ce_smothing_loss/focal_loss/bce_loss/...) | [4](./qdnet/loss/) |
16 | | deploy | (flask/grpc/BentoML等) | [5](./serving/) |
17 | | onnx/trt | () | [6](./tools/) |
18 |
19 |
20 | #
21 |
22 | ## train/test/deploy
23 | 0、Data format transform
24 | ```
25 | git clone https://github.com/MachineLP/PyTorch_image_classifier
26 | pip install -r requirements.txt
27 | cd PyTorch_image_classifier
28 | python tools/data_preprocess_multi_task.py --data_dir "./data/data.csv" --n_splits 3 --output_dir "./data/train.csv" --random_state 2020
29 | ```
30 |
31 | ## resnet18
32 | 1、Modify configuration file
33 |
34 | ```
35 | cp conf/resnet18.yaml conf/resnet18.yaml
36 | vim conf/resnet18.yaml
37 | ```
38 |
39 | 2、Train:
40 |
41 | ```
42 | python train_multi_task.py --config_path conf/resnet18.yaml
43 | ```
44 |
45 | 3、Infer
46 | ```
47 | python infer_multi_task.py --config_path "conf/resnet18.yaml" --img_path "./data/img/0male/1_2.jpg" --fold "0"
48 | pre>>>>> [0] [0.6254628] [2] [0.8546583]
49 | python infer_multi_task.py --config_path "conf/resnet18.yaml" --img_path "./data/img/1female/2_5.jpg" --fold "1"
50 | ```
51 |
52 |
53 | 4、Models transform ( https://github.com/NVIDIA-AI-IOT/torch2trt ) ([Tensorrt installation guide on Ubuntu1804](./docs/Tensorrt_installation_guide_on_Ubuntu1804.md))
54 |
55 | ```
56 | onnx:python tools/pytorch_to_onnx_multi_task.py --config_path "conf/resnet18.yaml" --img_path "./data/img/0male/1_2.jpg" --batch_size 4 --fold 0 --save_path "lp.onnx"
57 |
58 | tensorrt:python tools/onnx_to_tensorrt_multi_task.py --config_path "conf/resnet18.yaml" --img_path "./data/img/0male/1_2.jpg" --batch_size 4 --fold 0 --save_path "lp_pp.onnx" --trt_save_path "lp.trt"
59 | ```
60 |
61 |
62 | 5、Deploying models
63 | [serving](./serving/)
64 |
65 |
66 |
67 | #
68 |
69 | #
70 |
71 | #
72 |
73 | #
74 |
75 | #
76 |
77 | #
78 |
79 | #
80 |
81 | #### ref
82 | ```
83 | (1)https://github.com/haqishen/SIIM-ISIC-Melanoma-Classification-1st-Place-Solution
84 | (2)https://github.com/BADBADBADBOY/pytorchOCR
85 | (3)https://github.com/MachineLP/QDServing
86 | (4)https://github.com/bentoml/BentoML
87 | (5)mixup-cutmix:https://blog.csdn.net/u014365862/article/details/104216086
88 | (7)focalloss:https://blog.csdn.net/u014365862/article/details/104216192
89 | (8)https://blog.csdn.net/u014365862/article/details/106728375 / https://blog.csdn.net/u014365862/article/details/106728402
90 | ```
91 |
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/conf/resnet18.yaml:
--------------------------------------------------------------------------------
1 |
2 | data_dir : "./data/"
3 | data_folder : "./data/"
4 | image_size : 156
5 | enet_type : "resnet18" # tf_efficientnet_b4_ns/resnest101/seresnext101
6 | metric_strategy : False
7 | batch_size : 64
8 | num_workers : 12
9 | init_lr : 3e-5
10 | out_dim1 : 4
11 | out_dim2 : 3
12 | n_epochs : 30
13 | drop_nums : 1
14 | loss_type : "ce_loss" # ce_loss, ce_smothing_loss, focal_loss, bce_loss(多标签,待支持), mlsm_loss(多标签,待支持)
15 | use_amp : False
16 | mixup_cutmix : False
17 | model_dir : "./resnet18/weight/"
18 | log_dir : "./resnet18/logs/"
19 | CUDA_VISIBLE_DEVICES : "0" # 多GPU训练:"0,1,2,3"
20 | fold : "0,1,2,3,4"
21 | pretrained : True
22 | eval : "best" # "best", "final"
23 | oof_dir : "./resnet18/oofs/"
24 | auc_index : "punch"
--------------------------------------------------------------------------------
/conf/resnet18_multilabel.yaml:
--------------------------------------------------------------------------------
1 |
2 | data_dir : "./data/"
3 | data_folder : "./data/"
4 | image_size : 256
5 | enet_type : "resnet18" # tf_efficientnet_b4_ns/resnest101/seresnext101
6 | metric_strategy : False
7 | batch_size : 64
8 | num_workers : 12
9 | init_lr : 3e-5
10 | out_dim : 509
11 | n_epochs : 30
12 | drop_nums : 1
13 | loss_type : "ce_loss" # ce_loss, ce_smothing_loss, focal_loss, bce_loss(多标签,待支持), mlsm_loss(多标签,待支持)
14 | use_amp : False
15 | mixup_cutmix : False
16 | model_dir : "./resnet18/weight/"
17 | log_dir : "./resnet18/logs/"
18 | CUDA_VISIBLE_DEVICES : "0" # 多GPU训练:"0,1,2,3"
19 | fold : "0,1,2,3,4"
20 | pretrained : True
21 | eval : "best" # "best", "final"
22 | oof_dir : "./resnet18/oofs/"
23 | auc_index : "punch"
--------------------------------------------------------------------------------
/conf/resnet50_multilabel.yaml:
--------------------------------------------------------------------------------
1 |
2 | data_dir : "./data/"
3 | data_folder : "./data/"
4 | image_size : 256
5 | enet_type : "resnet50" # tf_efficientnet_b4_ns/resnest101/seresnext101
6 | metric_strategy : False
7 | batch_size : 64
8 | num_workers : 12
9 | init_lr : 3e-5
10 | out_dim : 509
11 | n_epochs : 30
12 | drop_nums : 1
13 | loss_type : "ce_loss" # ce_loss, ce_smothing_loss, focal_loss, bce_loss(多标签,待支持), mlsm_loss(多标签,待支持)
14 | use_amp : False
15 | mixup_cutmix : False
16 | model_dir : "./resnet50/weight/"
17 | log_dir : "./resnet50/logs/"
18 | CUDA_VISIBLE_DEVICES : "0" # 多GPU训练:"0,1,2,3"
19 | fold : "0,1,2,3,4"
20 | pretrained : True
21 | eval : "best" # "best", "final"
22 | oof_dir : "./resnet50/oofs/"
23 | auc_index : "punch"
--------------------------------------------------------------------------------
/data/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/.DS_Store
--------------------------------------------------------------------------------
/data/data.csv:
--------------------------------------------------------------------------------
1 | filepath,target1,target2,fold
2 | ./data/img/0male/0_1.jpg,red,kick,0
3 | ./data/img/0male/1_2.jpg,red,kick,0
4 | ./data/img/0male/0_3.jpg,red,kick,0
5 | ./data/img/0male/0_1.jpg,red,kick,0
6 | ./data/img/0male/1_2.jpg,red,kick,0
7 | ./data/img/0male/0_3.jpg,red,kick,0
8 | ./data/img/0male/0_1.jpg,red,kick,0
9 | ./data/img/0male/1_2.jpg,red,kick,0
10 | ./data/img/0male/0_3.jpg,red,kick,0
11 | ./data/img/0male/0_1.jpg,red,kick,0
12 | ./data/img/0male/1_2.jpg,red,kick,0
13 | ./data/img/0male/0_3.jpg,blue,normal,0
14 | ./data/img/0male/0_1.jpg,blue,normal,0
15 | ./data/img/0male/1_2.jpg,blue,normal,0
16 | ./data/img/0male/0_3.jpg,blue,normal,0
17 | ./data/img/0male/0_1.jpg,unclear,normal,0
18 | ./data/img/0male/1_2.jpg,unclear,normal,0
19 | ./data/img/0male/0_3.jpg,blue,normal,0
20 | ./data/img/0male/0_1.jpg,unclear,normal,0
21 | ./data/img/0male/1_2.jpg,blue,normal,0
22 | ./data/img/0male/0_3.jpg,blue,normal,0
23 | ./data/img/0male/0_1.jpg,blue,normal,0
24 | ./data/img/0male/1_2.jpg,blue,normal,0
25 | ./data/img/0male/0_3.jpg,red,punch,0
26 | ./data/img/0male/0_1.jpg,red,punch,0
27 | ./data/img/0male/1_2.jpg,red,punch,0
28 | ./data/img/0male/0_3.jpg,red,punch,0
29 | ./data/img/0male/0_1.jpg,red,punch,0
30 | ./data/img/0male/1_2.jpg,red,punch,0
31 | ./data/img/0male/0_3.jpg,red,punch,0
32 | ./data/img/0male/0_1.jpg,red,punch,0
33 | ./data/img/0male/1_2.jpg,red,punch,0
34 | ./data/img/0male/0_3.jpg,unclear,punch,0
35 | ./data/img/0male/0_1.jpg,blue,punch,0
36 | ./data/img/0male/1_2.jpg,blue,punch,0
37 | ./data/img/0male/0_3.jpg,unclear,punch,0
38 | ./data/img/0male/0_1.jpg,blue,punch,0
39 | ./data/img/0male/1_2.jpg,unclear,punch,0
40 | ./data/img/0male/0_3.jpg,blue,punch,0
41 | ./data/img/0male/0_1.jpg,blue,punch,0
42 | ./data/img/0male/1_2.jpg,error,normal,0
43 | ./data/img/0male/0_3.jpg,error,normal,0
44 | ./data/img/0male/0_1.jpg,unclear,normal,0
45 | ./data/img/0male/1_2.jpg,error,normal,0
46 | ./data/img/0male/0_3.jpg,unclear,normal,0
47 | ./data/img/0male/0_1.jpg,error,normal,0
48 | ./data/img/0male/1_2.jpg,error,normal,0
49 | ./data/img/0male/0_3.jpg,error,normal,0
50 | ./data/img/1female/1_1.jpg,red,kick,0
51 | ./data/img/1female/1_2.jpg,red,kick,0
52 | ./data/img/1female/2_3.jpg,red,kick,0
53 | ./data/img/1female/1_4.jpg,red,kick,0
54 | ./data/img/1female/2_5.jpg,red,kick,0
55 | ./data/img/1female/2_6.jpg,red,kick,0
56 | ./data/img/1female/1_1.jpg,red,kick,0
57 | ./data/img/1female/1_2.jpg,red,kick,0
58 | ./data/img/1female/2_3.jpg,red,kick,0
59 | ./data/img/1female/1_4.jpg,red,kick,0
60 | ./data/img/1female/2_5.jpg,blue,normal,0
61 | ./data/img/1female/2_6.jpg,blue,normal,0
62 | ./data/img/1female/1_1.jpg,blue,normal,0
63 | ./data/img/1female/1_2.jpg,blue,normal,0
64 | ./data/img/1female/2_3.jpg,blue,normal,0
65 | ./data/img/1female/1_4.jpg,blue,normal,0
66 | ./data/img/1female/2_5.jpg,blue,normal,0
67 | ./data/img/1female/2_6.jpg,blue,normal,0
68 | ./data/img/1female/1_1.jpg,blue,normal,0
69 | ./data/img/1female/1_2.jpg,blue,normal,0
70 | ./data/img/1female/2_3.jpg,red,punch,0
71 | ./data/img/1female/1_4.jpg,red,punch,0
72 | ./data/img/1female/2_5.jpg,red,punch,0
73 | ./data/img/1female/2_6.jpg,red,punch,0
74 | ./data/img/1female/1_1.jpg,red,punch,0
75 | ./data/img/1female/1_2.jpg,red,punch,0
76 | ./data/img/1female/2_3.jpg,red,punch,0
77 | ./data/img/1female/1_4.jpg,red,punch,0
78 | ./data/img/1female/2_5.jpg,red,punch,0
79 | ./data/img/1female/2_6.jpg,red,punch,0
80 | ./data/img/1female/1_1.jpg,red,punch,0
81 | ./data/img/1female/1_2.jpg,unclear,punch,0
82 | ./data/img/1female/2_3.jpg,unclear,punch,0
83 | ./data/img/1female/1_4.jpg,unclear,punch,0
84 | ./data/img/1female/2_5.jpg,unclear,punch,0
85 | ./data/img/1female/2_6.jpg,unclear,punch,0
86 | ./data/img/1female/1_1.jpg,unclear,punch,0
87 | ./data/img/1female/1_2.jpg,unclear,punch,0
88 | ./data/img/1female/2_3.jpg,unclear,punch,0
89 | ./data/img/1female/1_4.jpg,unclear,punch,0
90 | ./data/img/1female/2_5.jpg,unclear,punch,0
91 | ./data/img/1female/2_6.jpg,unclear,punch,0
92 | ./data/img/1female/1_1.jpg,unclear,punch,0
93 | ./data/img/1female/1_2.jpg,error,normal,0
94 | ./data/img/1female/2_3.jpg,error,normal,0
95 | ./data/img/1female/1_4.jpg,error,normal,0
96 | ./data/img/1female/2_5.jpg,error,normal,0
97 | ./data/img/1female/2_6.jpg,error,normal,0
98 |
--------------------------------------------------------------------------------
/data/data_mutilabel.csv:
--------------------------------------------------------------------------------
1 | filepath,target1,target2,fold
2 | ./data/img/0male/0_1.jpg,1,0,1,1,0
3 | '''
4 | 训练时候需要将1,0,1,1,0,转为numpy
5 | 用strip().split(',')[1:]先转为 list
6 | 再numpy()
7 | '''
--------------------------------------------------------------------------------
/data/img/0male/0_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/0male/0_1.jpg
--------------------------------------------------------------------------------
/data/img/0male/0_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/0male/0_3.jpg
--------------------------------------------------------------------------------
/data/img/0male/1_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/0male/1_2.jpg
--------------------------------------------------------------------------------
/data/img/1female/1_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/1_1.jpg
--------------------------------------------------------------------------------
/data/img/1female/1_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/1_2.jpg
--------------------------------------------------------------------------------
/data/img/1female/1_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/1_4.jpg
--------------------------------------------------------------------------------
/data/img/1female/2_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/2_3.jpg
--------------------------------------------------------------------------------
/data/img/1female/2_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/2_5.jpg
--------------------------------------------------------------------------------
/data/img/1female/2_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/data/img/1female/2_6.jpg
--------------------------------------------------------------------------------
/data/test.csv:
--------------------------------------------------------------------------------
1 | filepath,target1,target2,fold
2 | ./data/img/0male/0_1.jpg,red,kick,0
3 | ./data/img/0male/1_2.jpg,red,kick,0
4 | ./data/img/0male/0_3.jpg,red,kick,0
5 | ./data/img/0male/0_1.jpg,red,kick,0
6 | ./data/img/0male/1_2.jpg,red,kick,0
7 | ./data/img/0male/0_3.jpg,red,kick,0
8 | ./data/img/0male/0_1.jpg,red,kick,0
9 | ./data/img/0male/1_2.jpg,red,kick,0
10 | ./data/img/0male/0_3.jpg,red,kick,0
11 | ./data/img/0male/0_1.jpg,red,kick,0
12 | ./data/img/0male/1_2.jpg,red,kick,0
13 | ./data/img/0male/0_3.jpg,blue,normal,0
14 | ./data/img/0male/0_1.jpg,blue,normal,0
15 | ./data/img/0male/1_2.jpg,blue,normal,0
16 | ./data/img/0male/0_3.jpg,blue,normal,0
17 | ./data/img/0male/0_1.jpg,unclear,normal,0
18 | ./data/img/0male/1_2.jpg,unclear,normal,0
19 | ./data/img/0male/0_3.jpg,blue,normal,0
20 | ./data/img/0male/0_1.jpg,unclear,normal,0
21 | ./data/img/0male/1_2.jpg,blue,normal,0
22 | ./data/img/0male/0_3.jpg,blue,normal,0
23 | ./data/img/0male/0_1.jpg,blue,normal,0
24 | ./data/img/0male/1_2.jpg,blue,normal,0
25 | ./data/img/0male/0_3.jpg,red,punch,0
26 | ./data/img/0male/0_1.jpg,red,punch,0
27 | ./data/img/0male/1_2.jpg,red,punch,0
28 | ./data/img/0male/0_3.jpg,red,punch,0
29 | ./data/img/0male/0_1.jpg,red,punch,0
30 | ./data/img/0male/1_2.jpg,red,punch,0
31 | ./data/img/0male/0_3.jpg,red,punch,0
32 | ./data/img/0male/0_1.jpg,red,punch,0
33 | ./data/img/0male/1_2.jpg,red,punch,0
34 | ./data/img/0male/0_3.jpg,unclear,punch,0
35 | ./data/img/0male/0_1.jpg,blue,punch,0
36 | ./data/img/0male/1_2.jpg,blue,punch,0
37 | ./data/img/0male/0_3.jpg,unclear,punch,0
38 | ./data/img/0male/0_1.jpg,blue,punch,0
39 | ./data/img/0male/1_2.jpg,unclear,punch,0
40 | ./data/img/0male/0_3.jpg,blue,punch,0
41 | ./data/img/0male/0_1.jpg,blue,punch,0
42 | ./data/img/0male/1_2.jpg,error,normal,0
43 | ./data/img/0male/0_3.jpg,error,normal,0
44 | ./data/img/0male/0_1.jpg,unclear,normal,0
45 | ./data/img/0male/1_2.jpg,error,normal,0
46 | ./data/img/0male/0_3.jpg,unclear,normal,0
47 | ./data/img/0male/0_1.jpg,error,normal,0
48 | ./data/img/0male/1_2.jpg,error,normal,0
49 | ./data/img/0male/0_3.jpg,error,normal,0
50 | ./data/img/1female/1_1.jpg,red,kick,0
51 | ./data/img/1female/1_2.jpg,red,kick,0
52 | ./data/img/1female/2_3.jpg,red,kick,0
53 | ./data/img/1female/1_4.jpg,red,kick,0
54 | ./data/img/1female/2_5.jpg,red,kick,0
55 | ./data/img/1female/2_6.jpg,red,kick,0
56 | ./data/img/1female/1_1.jpg,red,kick,0
57 | ./data/img/1female/1_2.jpg,red,kick,0
58 | ./data/img/1female/2_3.jpg,red,kick,0
59 | ./data/img/1female/1_4.jpg,red,kick,0
60 | ./data/img/1female/2_5.jpg,blue,normal,0
61 | ./data/img/1female/2_6.jpg,blue,normal,0
62 | ./data/img/1female/1_1.jpg,blue,normal,0
63 | ./data/img/1female/1_2.jpg,blue,normal,0
64 | ./data/img/1female/2_3.jpg,blue,normal,0
65 | ./data/img/1female/1_4.jpg,blue,normal,0
66 | ./data/img/1female/2_5.jpg,blue,normal,0
67 | ./data/img/1female/2_6.jpg,blue,normal,0
68 | ./data/img/1female/1_1.jpg,blue,normal,0
69 | ./data/img/1female/1_2.jpg,blue,normal,0
70 | ./data/img/1female/2_3.jpg,red,punch,0
71 | ./data/img/1female/1_4.jpg,red,punch,0
72 | ./data/img/1female/2_5.jpg,red,punch,0
73 | ./data/img/1female/2_6.jpg,red,punch,0
74 | ./data/img/1female/1_1.jpg,red,punch,0
75 | ./data/img/1female/1_2.jpg,red,punch,0
76 | ./data/img/1female/2_3.jpg,red,punch,0
77 | ./data/img/1female/1_4.jpg,red,punch,0
78 | ./data/img/1female/2_5.jpg,red,punch,0
79 | ./data/img/1female/2_6.jpg,red,punch,0
80 | ./data/img/1female/1_1.jpg,red,punch,0
81 | ./data/img/1female/1_2.jpg,unclear,punch,0
82 | ./data/img/1female/2_3.jpg,unclear,punch,0
83 | ./data/img/1female/1_4.jpg,unclear,punch,0
84 | ./data/img/1female/2_5.jpg,unclear,punch,0
85 | ./data/img/1female/2_6.jpg,unclear,punch,0
86 | ./data/img/1female/1_1.jpg,unclear,punch,0
87 | ./data/img/1female/1_2.jpg,unclear,punch,0
88 | ./data/img/1female/2_3.jpg,unclear,punch,0
89 | ./data/img/1female/1_4.jpg,unclear,punch,0
90 | ./data/img/1female/2_5.jpg,unclear,punch,0
91 | ./data/img/1female/2_6.jpg,unclear,punch,0
92 | ./data/img/1female/1_1.jpg,unclear,punch,0
93 | ./data/img/1female/1_2.jpg,error,normal,0
94 | ./data/img/1female/2_3.jpg,error,normal,0
95 | ./data/img/1female/1_4.jpg,error,normal,0
96 | ./data/img/1female/2_5.jpg,error,normal,0
97 | ./data/img/1female/2_6.jpg,error,normal,0
98 |
--------------------------------------------------------------------------------
/data/train.csv:
--------------------------------------------------------------------------------
1 | filepath,target1,target2,fold
2 | ./data/img/0male/0_1.jpg,red,kick,1
3 | ./data/img/0male/1_2.jpg,red,kick,0
4 | ./data/img/0male/0_3.jpg,red,kick,1
5 | ./data/img/0male/0_1.jpg,red,kick,1
6 | ./data/img/0male/1_2.jpg,red,kick,0
7 | ./data/img/0male/0_3.jpg,red,kick,1
8 | ./data/img/0male/0_1.jpg,red,kick,1
9 | ./data/img/0male/1_2.jpg,red,kick,0
10 | ./data/img/0male/0_3.jpg,red,kick,1
11 | ./data/img/0male/0_1.jpg,red,kick,1
12 | ./data/img/0male/1_2.jpg,red,kick,0
13 | ./data/img/0male/0_3.jpg,blue,normal,1
14 | ./data/img/0male/0_1.jpg,blue,normal,1
15 | ./data/img/0male/1_2.jpg,blue,normal,0
16 | ./data/img/0male/0_3.jpg,blue,normal,1
17 | ./data/img/0male/0_1.jpg,unclear,normal,1
18 | ./data/img/0male/1_2.jpg,unclear,normal,0
19 | ./data/img/0male/0_3.jpg,blue,normal,1
20 | ./data/img/0male/0_1.jpg,unclear,normal,1
21 | ./data/img/0male/1_2.jpg,blue,normal,0
22 | ./data/img/0male/0_3.jpg,blue,normal,1
23 | ./data/img/0male/0_1.jpg,blue,normal,1
24 | ./data/img/0male/1_2.jpg,blue,normal,0
25 | ./data/img/0male/0_3.jpg,red,punch,1
26 | ./data/img/0male/0_1.jpg,red,punch,1
27 | ./data/img/0male/1_2.jpg,red,punch,0
28 | ./data/img/0male/0_3.jpg,red,punch,1
29 | ./data/img/0male/0_1.jpg,red,punch,1
30 | ./data/img/0male/1_2.jpg,red,punch,0
31 | ./data/img/0male/0_3.jpg,red,punch,1
32 | ./data/img/0male/0_1.jpg,red,punch,1
33 | ./data/img/0male/1_2.jpg,red,punch,0
34 | ./data/img/0male/0_3.jpg,unclear,punch,1
35 | ./data/img/0male/0_1.jpg,blue,punch,1
36 | ./data/img/0male/1_2.jpg,blue,punch,0
37 | ./data/img/0male/0_3.jpg,unclear,punch,1
38 | ./data/img/0male/0_1.jpg,blue,punch,1
39 | ./data/img/0male/1_2.jpg,unclear,punch,0
40 | ./data/img/0male/0_3.jpg,blue,punch,1
41 | ./data/img/0male/0_1.jpg,blue,punch,1
42 | ./data/img/0male/1_2.jpg,error,normal,0
43 | ./data/img/0male/0_3.jpg,error,normal,1
44 | ./data/img/0male/0_1.jpg,unclear,normal,1
45 | ./data/img/0male/1_2.jpg,error,normal,0
46 | ./data/img/0male/0_3.jpg,unclear,normal,1
47 | ./data/img/0male/0_1.jpg,error,normal,1
48 | ./data/img/0male/1_2.jpg,error,normal,0
49 | ./data/img/0male/0_3.jpg,error,normal,1
50 | ./data/img/1female/1_1.jpg,red,kick,0
51 | ./data/img/1female/1_2.jpg,red,kick,0
52 | ./data/img/1female/2_3.jpg,red,kick,2
53 | ./data/img/1female/1_4.jpg,red,kick,0
54 | ./data/img/1female/2_5.jpg,red,kick,2
55 | ./data/img/1female/2_6.jpg,red,kick,2
56 | ./data/img/1female/1_1.jpg,red,kick,0
57 | ./data/img/1female/1_2.jpg,red,kick,0
58 | ./data/img/1female/2_3.jpg,red,kick,2
59 | ./data/img/1female/1_4.jpg,red,kick,0
60 | ./data/img/1female/2_5.jpg,blue,normal,2
61 | ./data/img/1female/2_6.jpg,blue,normal,2
62 | ./data/img/1female/1_1.jpg,blue,normal,0
63 | ./data/img/1female/1_2.jpg,blue,normal,0
64 | ./data/img/1female/2_3.jpg,blue,normal,2
65 | ./data/img/1female/1_4.jpg,blue,normal,0
66 | ./data/img/1female/2_5.jpg,blue,normal,2
67 | ./data/img/1female/2_6.jpg,blue,normal,2
68 | ./data/img/1female/1_1.jpg,blue,normal,0
69 | ./data/img/1female/1_2.jpg,blue,normal,0
70 | ./data/img/1female/2_3.jpg,red,punch,2
71 | ./data/img/1female/1_4.jpg,red,punch,0
72 | ./data/img/1female/2_5.jpg,red,punch,2
73 | ./data/img/1female/2_6.jpg,red,punch,2
74 | ./data/img/1female/1_1.jpg,red,punch,0
75 | ./data/img/1female/1_2.jpg,red,punch,0
76 | ./data/img/1female/2_3.jpg,red,punch,2
77 | ./data/img/1female/1_4.jpg,red,punch,0
78 | ./data/img/1female/2_5.jpg,red,punch,2
79 | ./data/img/1female/2_6.jpg,red,punch,2
80 | ./data/img/1female/1_1.jpg,red,punch,0
81 | ./data/img/1female/1_2.jpg,unclear,punch,0
82 | ./data/img/1female/2_3.jpg,unclear,punch,2
83 | ./data/img/1female/1_4.jpg,unclear,punch,0
84 | ./data/img/1female/2_5.jpg,unclear,punch,2
85 | ./data/img/1female/2_6.jpg,unclear,punch,2
86 | ./data/img/1female/1_1.jpg,unclear,punch,0
87 | ./data/img/1female/1_2.jpg,unclear,punch,0
88 | ./data/img/1female/2_3.jpg,unclear,punch,2
89 | ./data/img/1female/1_4.jpg,unclear,punch,0
90 | ./data/img/1female/2_5.jpg,unclear,punch,2
91 | ./data/img/1female/2_6.jpg,unclear,punch,2
92 | ./data/img/1female/1_1.jpg,unclear,punch,0
93 | ./data/img/1female/1_2.jpg,error,normal,0
94 | ./data/img/1female/2_3.jpg,error,normal,2
95 | ./data/img/1female/1_4.jpg,error,normal,0
96 | ./data/img/1female/2_5.jpg,error,normal,2
97 | ./data/img/1female/2_6.jpg,error,normal,2
98 |
--------------------------------------------------------------------------------
/docs/Tensorrt_installation_guide_on_Ubuntu1804.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | Install CUDA 11.1: https://developer.nvidia.com/cuda-11.1.0-download-archive
4 | OR
5 | ```
6 | wget https://developer.download.nvidia.com/compute/cuda/11.1.0/local_installers/cuda_11.1.0_455.23.05_linux.run
7 | sudo sh cuda_11.1.0_455.23.05_linux.run
8 | ```
9 |
10 | export environment : vim ~/.bashrc
11 | ```
12 | export PATH=/usr/local/cuda/bin:${PATH}
13 | export LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH}
14 | export LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH}
15 | export CUDA_HOME=/usr/local/cuda
16 | ```
17 |
18 |
19 | Install CUDNN :
20 | ```
21 | wget https://developer.download.nvidia.com/compute/machine-learning/cudnn/secure/8.2.1.32/11.3_06072021/cudnn-11.3-linux-x64-v8.2.1.32.tgz?CglCMsW0l-97IQHomlsL6lsBZ0fRvOCNZYrKZfyQWcmWjUYdtpyhc7SN5_mitLNz8v8drlztiAHqnX_D6Mw5XzanXyobErgAQo7jAY_8sbCALpKccMW26hhkBfVwb4ficnR26cTQF7qhW2QsfIIbjFooFNUWnGz7Z2_4_FaojFXfx-rhRU8msFxAr_Piv6BhzNNWOqie2gC5_eT_6s
22 | mv cudnn-11.3-linux-x64-v8.2.1.32.tgz?CglCMsW0l-97IQHomlsL6lsBZ0fRvOCNZYrKZfyQWc......... cudnn-11.3-linux-x64-v8.2.1.32.tgz
23 | tar zxvf cudnn-11.3-linux-x64-v8.2.1.32.tgz
24 | sudo cp cuda/include/cudnn.h /usr/local/cuda/include/
25 | sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64/
26 | sudo chmod a+r /usr/local/cuda/include/cudnn.h
27 | sudo chmod a+r /usr/local/cuda/lib64/libcudnn*
28 | ```
29 |
30 |
31 | Install TENSORRT(TensorRT-7.2.3.4): https://developer.nvidia.com/nvidia-tensorrt-7x-download
32 | ```
33 | export LD_LIBRARY_PATH=/mnt/sdc/machinelp/TensorRT-7.2.3.4/lib:${LD_LIBRARY_PATH}
34 |
35 | tar zxvf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz
36 | cd TensorRT-7.2.3.4
37 | cd python
38 | pip install tensorrt-7.2.3.4-cp38-none-linux_x86_64.whl
39 | ```
--------------------------------------------------------------------------------
/infer_multi_label.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet infer
5 | Author : machinelp
6 | Date : 2020-10-20
7 | -------------------------------------------------
8 | '''
9 | import os
10 | import time
11 | import random
12 | import cv2
13 | import argparse
14 | import numpy as np
15 | import pandas as pd
16 | from tqdm import tqdm
17 | from sklearn.metrics import roc_auc_score
18 | from sklearn.model_selection import StratifiedKFold
19 | import torch
20 | import torch.nn as nn
21 | import torch.nn.functional as F
22 | import torch.optim as optim
23 | from torch.optim import lr_scheduler
24 | from torch.utils.data.sampler import RandomSampler
25 | from torch.utils.data import DataLoader, SequentialSampler
26 | from torch.optim.lr_scheduler import CosineAnnealingLR
27 |
28 | from qdnet.conf.config import load_yaml
29 | from qdnet.optimizer.optimizer import GradualWarmupSchedulerV2
30 | from qdnet.dataset.dataset import get_df, QDDataset
31 | from qdnet.dataaug.dataaug import get_transforms
32 | from qdnet.models.effnet import Effnet
33 | from qdnet.models.resnest import Resnest
34 | from qdnet.models.se_resnext import SeResnext
35 | from qdnet.models.resnet import Resnet
36 | from qdnet.conf.constant import Constant
37 | from qdnet_classifier.classifier_multi_label import MultiLabelModel
38 |
39 |
40 |
41 | device = torch.device('cuda')
42 |
43 | parser = argparse.ArgumentParser(description='Hyperparams')
44 | parser.add_argument('--config_path', help='config file path')
45 | parser.add_argument('--img_path', help='config file path')
46 | parser.add_argument('--fold', help='config file path')
47 | args = parser.parse_args()
48 | config = load_yaml(args.config_path, args)
49 | Sigmoid_fun = nn.Sigmoid()
50 |
51 |
52 | class QDNetModel():
53 |
54 | def __init__(self, config, fold):
55 |
56 | if config["enet_type"] in Constant.RESNET_LIST:
57 | ModelClass = MultiLabelModel
58 | else:
59 | raise NotImplementedError()
60 |
61 | if config["eval"] == 'best':
62 | model_file = os.path.join(config["model_dir"], f'best_fold{fold}.pth')
63 | if config["eval"] == 'final':
64 | model_file = os.path.join(config["model_dir"], f'final_fold{fold}.pth')
65 | self.model = ModelClass(
66 | config["enet_type"],
67 | config["out_dim"],
68 | pretrained = config["pretrained"] )
69 | self.model = self.model.to(device)
70 |
71 | try: # single GPU model_file
72 | self.model.load_state_dict(torch.load(model_file), strict=True)
73 | except: # multi GPU model_file
74 | state_dict = torch.load(model_file)
75 | state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
76 | self.model.load_state_dict(state_dict, strict=True)
77 | self.model.eval()
78 |
79 | _, self.transforms_val = get_transforms(config["image_size"])
80 |
81 |
82 | def predict(self, data):
83 | if os.path.isfile(data):
84 | image = cv2.imread(data)
85 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
86 | else:
87 | image = cv2.imread(data)
88 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
89 | res = self.transforms_val(image=image)
90 | image = res['image'].astype(np.float32)
91 |
92 | image = image.transpose(2, 0, 1)
93 | data = torch.tensor([image]).float()
94 | probs = self.model( data.to(device) )
95 | return probs
96 |
97 |
98 | if __name__ == '__main__':
99 |
100 | qd_model = QDNetModel(config, args.fold)
101 | start_time = time.time()
102 | for i in range (10):
103 | probs = qd_model.predict(args.img_path)
104 | print ("time>>>>", (time.time() - start_time)/10.0 )
105 | print ("pre>>>>>", Sigmoid_fun(probs))
106 |
--------------------------------------------------------------------------------
/infer_multi_task.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet infer
5 | Author : machinelp
6 | Date : 2020-10-20
7 | -------------------------------------------------
8 | '''
9 | import os
10 | import time
11 | import random
12 | import cv2
13 | import argparse
14 | import numpy as np
15 | import pandas as pd
16 | from tqdm import tqdm
17 | from sklearn.metrics import roc_auc_score
18 | from sklearn.model_selection import StratifiedKFold
19 | import torch
20 | import torch.nn as nn
21 | import torch.nn.functional as F
22 | import torch.optim as optim
23 | from torch.optim import lr_scheduler
24 | from torch.utils.data.sampler import RandomSampler
25 | from torch.utils.data import DataLoader, SequentialSampler
26 | from torch.optim.lr_scheduler import CosineAnnealingLR
27 |
28 | from qdnet.conf.config import load_yaml
29 | from qdnet.optimizer.optimizer import GradualWarmupSchedulerV2
30 | from qdnet.dataset.dataset import get_df, QDDataset
31 | from qdnet.dataaug.dataaug import get_transforms
32 | from qdnet.models.effnet import Effnet
33 | from qdnet.models.resnest import Resnest
34 | from qdnet.models.se_resnext import SeResnext
35 | from qdnet.models.resnet import Resnet
36 | from qdnet.conf.constant import Constant
37 | from qdnet_classifier.classifier_multi_task import MultiLabelModel
38 |
39 |
40 |
41 | device = torch.device('cuda')
42 |
43 | parser = argparse.ArgumentParser(description='Hyperparams')
44 | parser.add_argument('--config_path', help='config file path')
45 | parser.add_argument('--img_path', help='config file path')
46 | parser.add_argument('--fold', help='config file path')
47 | args = parser.parse_args()
48 | config = load_yaml(args.config_path, args)
49 |
50 |
51 |
52 | class QDNetModel():
53 |
54 | def __init__(self, config, fold):
55 |
56 | if config["enet_type"] in Constant.RESNET_LIST:
57 | ModelClass = MultiLabelModel
58 | else:
59 | raise NotImplementedError()
60 |
61 | if config["eval"] == 'best':
62 | model_file = os.path.join(config["model_dir"], f'best_fold{fold}.pth')
63 | if config["eval"] == 'final':
64 | model_file = os.path.join(config["model_dir"], f'final_fold{fold}.pth')
65 | self.model = ModelClass(
66 | config["enet_type"],
67 | config["out_dim1"],
68 | config["out_dim2"],
69 | pretrained = config["pretrained"] )
70 | self.model = self.model.to(device)
71 |
72 | try: # single GPU model_file
73 | self.model.load_state_dict(torch.load(model_file), strict=True)
74 | except: # multi GPU model_file
75 | state_dict = torch.load(model_file)
76 | state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
77 | self.model.load_state_dict(state_dict, strict=True)
78 | self.model.eval()
79 |
80 | _, self.transforms_val = get_transforms(config["image_size"])
81 |
82 |
83 | def predict(self, data):
84 | if os.path.isfile(data):
85 | image = cv2.imread(data)
86 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
87 | else:
88 | image = cv2.imread(data)
89 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
90 | res = self.transforms_val(image=image)
91 | image = res['image'].astype(np.float32)
92 |
93 | image = image.transpose(2, 0, 1)
94 | data = torch.tensor([image]).float()
95 | probs = self.model( data.to(device) )
96 |
97 | probs_color = F.softmax(probs['color'],dim =1)
98 | probs_color = probs_color.cpu().detach().numpy()
99 | ouputs_color = probs_color.argmax(1)
100 | probs_color = [probs_color[i][ouputs_color[i]] for i in range(len(ouputs_color))]
101 | probs_action = F.softmax(probs['action'],dim =1)
102 | probs_action = probs_action.cpu().detach().numpy()
103 | ouputs_action = probs_action.argmax(1)
104 | probs_action = [probs_action[i][ouputs_action[i]] for i in range(len(ouputs_action))]
105 | return ouputs_color, probs_color, ouputs_action, probs_action
106 |
107 |
108 | if __name__ == '__main__':
109 |
110 | qd_model = QDNetModel(config, args.fold)
111 | start_time = time.time()
112 | for i in range (10):
113 | ouputs_color, probs_color, ouputs_action, probs_action = qd_model.predict(args.img_path)
114 | print ("time>>>>", (time.time() - start_time)/10.0 )
115 | print ("pre>>>>>", ouputs_color, probs_color, ouputs_action, probs_action)
116 |
--------------------------------------------------------------------------------
/onnx_tensorrt/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # Permission is hereby granted, free of charge, to any person obtaining a
4 | # copy of this software and associated documentation files (the "Software"),
5 | # to deal in the Software without restriction, including without limitation
6 | # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 | # and/or sell copies of the Software, and to permit persons to whom the
8 | # Software is furnished to do so, subject to the following conditions:
9 | #
10 | # The above copyright notice and this permission notice shall be included in
11 | # all copies or substantial portions of the Software.
12 | #
13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 | # DEALINGS IN THE SOFTWARE.
20 |
21 | from __future__ import absolute_import
22 |
23 | from . import backend
24 |
25 | __version__ = "7.2.1.6.0"
26 |
--------------------------------------------------------------------------------
/onnx_tensorrt/config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # Permission is hereby granted, free of charge, to any person obtaining a
4 | # copy of this software and associated documentation files (the "Software"),
5 | # to deal in the Software without restriction, including without limitation
6 | # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 | # and/or sell copies of the Software, and to permit persons to whom the
8 | # Software is furnished to do so, subject to the following conditions:
9 | #
10 | # The above copyright notice and this permission notice shall be included in
11 | # all copies or substantial portions of the Software.
12 | #
13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 | # DEALINGS IN THE SOFTWARE.
20 |
21 | import tensorrt as trt
22 |
23 | class Config():
24 | def __init__(self):
25 | # If TensorRT major is >= 5, then we use new Python bindings
26 | _tensorrt_version = [int(n) for n in trt.__version__.split('.')]
27 | self.USE_PYBIND = _tensorrt_version[0] >= 5
28 |
29 | def USE_PYBIND(self):
30 | return self.USE_PYBIND
--------------------------------------------------------------------------------
/qdnet/conf/config.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import yaml
4 | import threading
5 | import numpy as np
6 |
7 | MEAN = (0.485, 0.456, 0.406)
8 | STD = (0.229, 0.224, 0.225)
9 |
10 | def merge_config(config,args):
11 | for key_1 in config.keys():
12 | if(isinstance(config[key_1],dict)):
13 | for key_2 in config[key_1].keys():
14 | if(key_2) in dir(args):
15 | config[key_1][key_2] = getattr(args,key_2)
16 | return config
17 |
18 | def load_yaml(yaml_name, args):
19 | config = yaml.load(open(yaml_name, 'r', encoding='utf-8'),Loader=yaml.FullLoader)
20 | config = merge_config(config, args)
21 | return config
22 |
--------------------------------------------------------------------------------
/qdnet/conf/constant.py:
--------------------------------------------------------------------------------
1 | import os
2 | import threading
3 |
4 | class Constant():
5 |
6 | RESNEST_LIST = ['resnest50', 'resnest101', 'resnest200', 'resnest269']
7 | RESNET_LIST = ['resnet18', 'resnet34', 'resnet50']
8 | SERESNEXT_LIST = ['seresnext101']
9 | GEFFNET_LIST = ['GenEfficientNet', 'mnasnet_050', 'mnasnet_075', 'mnasnet_100', 'mnasnet_b1',
10 | 'mnasnet_140', 'semnasnet_050', 'semnasnet_075', 'semnasnet_100', 'mnasnet_a1', 'semnasnet_140',
11 | 'mnasnet_small','mobilenetv2_100', 'mobilenetv2_140', 'mobilenetv2_110d', 'mobilenetv2_120d', 'fbnetc_100',
12 | 'spnasnet_100', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5',
13 | 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8', 'efficientnet_l2', 'efficientnet_es', 'efficientnet_em', 'efficientnet_el',
14 | 'efficientnet_cc_b0_4e', 'efficientnet_cc_b0_8e', 'efficientnet_cc_b1_8e', 'efficientnet_lite0', 'efficientnet_lite1', 'efficientnet_lite2',
15 | 'efficientnet_lite3', 'efficientnet_lite4', 'tf_efficientnet_b0', 'tf_efficientnet_b1', 'tf_efficientnet_b2', 'tf_efficientnet_b3',
16 | 'tf_efficientnet_b4', 'tf_efficientnet_b5', 'tf_efficientnet_b6', 'tf_efficientnet_b7', 'tf_efficientnet_b8', 'tf_efficientnet_b0_ap',
17 | 'tf_efficientnet_b1_ap', 'tf_efficientnet_b2_ap', 'tf_efficientnet_b3_ap', 'tf_efficientnet_b4_ap', 'tf_efficientnet_b5_ap',
18 | 'tf_efficientnet_b6_ap', 'tf_efficientnet_b7_ap', 'tf_efficientnet_b8_ap', 'tf_efficientnet_b0_ns', 'tf_efficientnet_b1_ns',
19 | 'tf_efficientnet_b2_ns', 'tf_efficientnet_b3_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5_ns', 'tf_efficientnet_b6_ns',
20 | 'tf_efficientnet_b7_ns', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', 'tf_efficientnet_es', 'tf_efficientnet_em',
21 | 'tf_efficientnet_el', 'tf_efficientnet_cc_b0_4e', 'tf_efficientnet_cc_b0_8e', 'tf_efficientnet_cc_b1_8e', 'tf_efficientnet_lite0',
22 | 'tf_efficientnet_lite1', 'tf_efficientnet_lite2', 'tf_efficientnet_lite3', 'tf_efficientnet_lite4', 'mixnet_s', 'mixnet_m', 'mixnet_l',
23 | 'mixnet_xl', 'tf_mixnet_s', 'tf_mixnet_m', 'tf_mixnet_l', 'mobilenetv3_rw',
24 | 'mobilenetv3_large_075', 'mobilenetv3_large_100', 'mobilenetv3_large_minimal_100',
25 | 'mobilenetv3_small_075', 'mobilenetv3_small_100', 'mobilenetv3_small_minimal_100',
26 | 'tf_mobilenetv3_large_075', 'tf_mobilenetv3_large_100', 'tf_mobilenetv3_large_minimal_100',
27 | 'tf_mobilenetv3_small_075', 'tf_mobilenetv3_small_100', 'tf_mobilenetv3_small_minimal_100']
28 |
29 |
30 |
--------------------------------------------------------------------------------
/qdnet/dataaug/dataaug.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import cv2
4 | import numpy as np
5 | import pandas as pd
6 | import albumentations
7 | import torch
8 | from torch.utils.data import Dataset
9 |
10 | from tqdm import tqdm
11 |
12 | def get_transforms_(image_size):
13 |
14 | transforms_train = albumentations.Compose([
15 | albumentations.Transpose(p=0.5),
16 | albumentations.VerticalFlip(p=0.5),
17 | albumentations.HorizontalFlip(p=0.5),
18 | albumentations.RandomBrightness(limit=0.2, p=0.75),
19 | albumentations.RandomContrast(limit=0.2, p=0.75),
20 | albumentations.OneOf([
21 | albumentations.MotionBlur(blur_limit=5),
22 | albumentations.MedianBlur(blur_limit=5),
23 | albumentations.GaussianBlur(blur_limit=5),
24 | albumentations.GaussNoise(var_limit=(5.0, 30.0)),
25 | ], p=0.7),
26 |
27 | albumentations.OneOf([
28 | albumentations.OpticalDistortion(distort_limit=1.0),
29 | albumentations.GridDistortion(num_steps=5, distort_limit=1.),
30 | albumentations.ElasticTransform(alpha=3),
31 | ], p=0.7),
32 |
33 | albumentations.CLAHE(clip_limit=4.0, p=0.7),
34 | albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.5),
35 | albumentations.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, border_mode=0, p=0.85),
36 | albumentations.Resize(image_size, image_size),
37 | albumentations.Cutout(max_h_size=int(image_size * 0.375), max_w_size=int(image_size * 0.375), num_holes=1, p=0.7),
38 | albumentations.Normalize()
39 | ])
40 |
41 | transforms_val = albumentations.Compose([
42 | albumentations.Resize(image_size, image_size),
43 | albumentations.Normalize()
44 | ])
45 |
46 | return transforms_train, transforms_val
47 |
48 |
49 | def get_transforms(image_size):
50 |
51 | transforms_train = albumentations.Compose([
52 | albumentations.HorizontalFlip(p=0.5),
53 | albumentations.ImageCompression(quality_lower=90, quality_upper=100),
54 | albumentations.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=10, border_mode=0, p=0.7),
55 | albumentations.Resize(image_size, image_size),
56 | albumentations.Cutout(max_h_size=int(image_size * 0.4), max_w_size=int(image_size * 0.4), num_holes=1, p=0.5),
57 | albumentations.Normalize()
58 | ])
59 |
60 | transforms_val = albumentations.Compose([
61 | albumentations.Resize(image_size, image_size),
62 | albumentations.Normalize()
63 | ])
64 |
65 | return transforms_train, transforms_val
66 |
67 |
68 |
--------------------------------------------------------------------------------
/qdnet/dataset/dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import pandas as pd
5 | import albumentations
6 | import torch
7 | from torch.utils.data import Dataset
8 |
9 | from tqdm import tqdm
10 |
11 |
12 | class QDDataset(Dataset):
13 | def __init__(self, csv, mode, transform=None):
14 |
15 | self.csv = csv.reset_index(drop=True)
16 | self.mode = mode
17 | self.transform = transform
18 |
19 | def __len__(self):
20 | return self.csv.shape[0]
21 |
22 | def __getitem__(self, index):
23 |
24 | row = self.csv.iloc[index]
25 |
26 | image = cv2.imread(row.filepath)
27 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
28 |
29 | if self.transform is not None:
30 | res = self.transform(image=image)
31 | image = res['image'].astype(np.float32)
32 | else:
33 | image = image.astype(np.float32)
34 |
35 | image = image.transpose(2, 0, 1)
36 | data = torch.tensor(image).float()
37 |
38 | if self.mode == 'test':
39 | return data
40 | else:
41 | return data, torch.tensor(self.csv.iloc[index].target).long()
42 |
43 |
44 |
45 | def get_df(data_dir, auc_index):
46 |
47 | # train data
48 | df_train = pd.read_csv(os.path.join(data_dir, 'train.csv'))
49 | df_train['filepath'] = df_train['filepath']
50 |
51 | # test data
52 | df_test = pd.read_csv(os.path.join(data_dir, 'test.csv'))
53 | df_test['filepath'] = df_test['filepath']
54 |
55 | # class mapping
56 | label2idx = {d: idx for idx, d in enumerate(sorted(df_train.target.unique()))}
57 | print ("label2idx:", label2idx)
58 | df_train['target'] = df_train['target'].map(label2idx)
59 | label_idx = label2idx[auc_index]
60 |
61 | return df_train, df_test, label_idx
62 |
63 |
--------------------------------------------------------------------------------
/qdnet/dataset/dataset_multi_task.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import pandas as pd
5 | import albumentations
6 | import torch
7 | from torch.utils.data import Dataset
8 |
9 | from tqdm import tqdm
10 |
11 |
12 | class QDDataset(Dataset):
13 | def __init__(self, csv, mode, transform=None):
14 |
15 | self.csv = csv.reset_index(drop=True)
16 | self.mode = mode
17 | self.transform = transform
18 |
19 | def __len__(self):
20 | return self.csv.shape[0]
21 |
22 | def __getitem__(self, index):
23 |
24 | row = self.csv.iloc[index]
25 |
26 | image = cv2.imread(row.filepath)
27 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
28 |
29 | if self.transform is not None:
30 | res = self.transform(image=image)
31 | image = res['image'].astype(np.float32)
32 | else:
33 | image = image.astype(np.float32)
34 |
35 | image = image.transpose(2, 0, 1)
36 | data = torch.tensor(image).float()
37 |
38 | if self.mode == 'test':
39 | return data
40 | else:
41 | return data, {'color': torch.tensor(self.csv.iloc[index].target1).long(), 'action': torch.tensor(self.csv.iloc[index].target2).long()}
42 |
43 |
44 |
45 | def get_df(data_dir):
46 |
47 | # train data
48 | df_train = pd.read_csv(os.path.join(data_dir, 'train.csv'))
49 | df_train['filepath'] = df_train['filepath']
50 |
51 | # test data
52 | df_test = pd.read_csv(os.path.join(data_dir, 'test.csv'))
53 | df_test['filepath'] = df_test['filepath']
54 |
55 | # class mapping
56 | label2idx = {d: idx for idx, d in enumerate(sorted(df_train.target1.unique()))}
57 | print ("target1 label2idx:", label2idx)
58 | df_train['target1'] = df_train['target1'].map(label2idx)
59 |
60 | label2idx = {d: idx for idx, d in enumerate(sorted(df_train.target2.unique()))}
61 | print ("target2 label2idx:", label2idx)
62 | df_train['target2'] = df_train['target2'].map(label2idx)
63 |
64 | return df_train, df_test
65 |
66 |
--------------------------------------------------------------------------------
/qdnet/loss/ce_label_smoothing.py:
--------------------------------------------------------------------------------
1 |
2 | import torch
3 | from torch import nn
4 |
5 | class CrossEntropyLossWithLabelSmoothing(nn.Module):
6 | def __init__(self, n_dim, ls_=0.9):
7 | super().__init__()
8 | self.n_dim = n_dim
9 | self.ls_ = ls_
10 |
11 | def forward(self, x, target):
12 | target = F.one_hot(target, self.n_dim).float()
13 | target *= self.ls_
14 | target += (1 - self.ls_) / self.n_dim
15 |
16 | logprobs = torch.nn.functional.log_softmax(x, dim=-1)
17 | loss = -logprobs * target
18 | loss = loss.sum(-1)
19 | return loss.mean()
20 |
21 |
--------------------------------------------------------------------------------
/qdnet/loss/focal_loss.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import torch
4 | import torch.nn as nn
5 | from torch.autograd import Variable
6 |
7 | class FocalLoss(nn.Module):
8 |
9 | def __init__(self, class_num=5, alpha=None, gamma=2, size_average=True):
10 | super(FocalLoss, self).__init__()
11 | if alpha is None:
12 | self.alpha = Variable(torch.ones(class_num, 1))
13 | else:
14 | if isinstance(alpha, Variable):
15 | self.alpha = alpha
16 | else:
17 | self.alpha = Variable(alpha)
18 | self.gamma = gamma
19 | self.class_num = class_num
20 | self.size_average = size_average
21 |
22 | def forward(self, inputs, targets):
23 | N = inputs.size(0)
24 | C = inputs.size(1)
25 | P = F.softmax(inputs)
26 |
27 | class_mask = inputs.data.new(N, C).fill_(0)
28 | class_mask = Variable(class_mask)
29 | ids = targets.view(-1, 1)
30 | class_mask.scatter_(1, ids.data, 1.)
31 | #print(class_mask)
32 |
33 |
34 | if inputs.is_cuda and not self.alpha.is_cuda:
35 | self.alpha = self.alpha.cuda()
36 | alpha = self.alpha[ids.data.view(-1)]
37 |
38 | probs = (P*class_mask).sum(1).view(-1,1)
39 |
40 | log_p = probs.log()
41 | #print('probs size= {}'.format(probs.size()))
42 | #print(probs)
43 |
44 | batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
45 | #print('-----bacth_loss------')
46 | #print(batch_loss)
47 |
48 |
49 | if self.size_average:
50 | loss = batch_loss.mean()
51 | else:
52 | loss = batch_loss.sum()
53 | return loss
54 |
55 |
56 | class FocalLoss_(nn.Module):
57 | def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):
58 | super(FocalLoss, self).__init__()
59 | self.alpha = alpha
60 | self.gamma = gamma
61 | self.logits = logits
62 | self.reduce = reduce
63 |
64 | def forward(self, inputs, targets):
65 | inputs = torch.sigmoid(inputs)
66 | if self.logits:
67 | BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets)
68 | else:
69 | BCE_loss = F.binary_cross_entropy(inputs, targets)
70 | #BCE_loss = torch.nn.BCEWithLogitsLoss(inputs, targets)
71 |
72 | pt = torch.exp(-BCE_loss)
73 | F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
74 |
75 | if self.reduce:
76 | return torch.mean(F_loss)
77 | else:
78 | return F_loss
79 |
--------------------------------------------------------------------------------
/qdnet/loss/loss.py:
--------------------------------------------------------------------------------
1 |
2 | import torch
3 | import numpy as np
4 | import torch.nn as nn
5 | from qdnet.loss.focal_loss import FocalLoss
6 | from qdnet.loss.ce_label_smoothing import CrossEntropyLossWithLabelSmoothing
7 |
8 | class Loss(object):
9 | def __init__(self, out_dim, loss_type="ce_loss", w=None):
10 | # w=torch.tensor([10,2,15,20],dtype=torch.float)
11 | if loss_type == "ce_loss":
12 | self.criterion = nn.CrossEntropyLoss(reduction='mean')
13 | elif loss_type == "ce_smothing_loss":
14 | self.criterion = CrossEntropyLossWithLabelSmoothing(out_dim)
15 | elif loss_type == "focal_loss":
16 | device = torch.device('cuda')
17 | self.criterion = FocalLoss().to(device)
18 | elif loss_type == "bce_loss":
19 | self.criterion = nn.BCEWithLogitsLoss(w)
20 | elif loss_type == "mlsm_loss":
21 | self.criterion = nn.MultiLabelSoftMarginLoss(w)
22 | else:
23 | raise NotImplementedError()
24 |
25 | #def __init__(self):
26 | # self.criterion = nn.CrossEntropyLoss(reduction='mean')
27 | # # self.criterion = FocalLoss().to(self.device)
28 |
29 | def rand_bbox(self, size, lam):
30 | W = size[2]
31 | H = size[3]
32 | cut_rat = np.sqrt(1. - lam)
33 | cut_w = np.int(W * cut_rat)
34 | cut_h = np.int(H * cut_rat)
35 |
36 | # uniform
37 | cx = np.random.randint(W)
38 | cy = np.random.randint(H)
39 |
40 | bbx1 = np.clip(cx - cut_w // 2, 0, W)
41 | bby1 = np.clip(cy - cut_h // 2, 0, H)
42 | bbx2 = np.clip(cx + cut_w // 2, 0, W)
43 | bby2 = np.clip(cy + cut_h // 2, 0, H)
44 |
45 | return bbx1, bby1, bbx2, bby2
46 |
47 |
48 | def cutmix(self, data, targets, alpha):
49 | indices = torch.randperm(data.size(0))
50 | shuffled_data = data[indices]
51 | shuffled_targets = targets[indices]
52 |
53 | lam = np.random.beta(alpha, alpha)
54 | bbx1, bby1, bbx2, bby2 = self.rand_bbox(data.size(), lam)
55 | data[:, :, bbx1:bbx2, bby1:bby2] = data[indices, :, bbx1:bbx2, bby1:bby2]
56 | lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (data.size()[-1] * data.size()[-2]))
57 |
58 | targets = [targets, shuffled_targets, lam]
59 | return data, targets
60 |
61 |
62 | def cutmix_criterion(self, preds, targets):
63 | targets1, targets2, lam = targets[0], targets[1], targets[2]
64 | return lam * self.criterion(preds, targets1) + (1 - lam) * self.criterion(preds, targets2)
65 |
66 |
67 | def mixup(self, data, targets, alpha):
68 | indices = torch.randperm(data.size(0))
69 | shuffled_data = data[indices]
70 | shuffled_targets = targets[indices]
71 |
72 | lam = np.random.beta(alpha, alpha)
73 | data = data * lam + shuffled_data * (1 - lam)
74 | targets1 = [targets, shuffled_targets, lam]
75 | return data, targets1
76 |
77 |
78 | def mixup_criterion(self, preds, targets):
79 | targets1, targets2, lam = targets[0], targets[1], targets[2]
80 | return lam * self.criterion(preds, targets1) + (1 - lam) * self.criterion(preds, targets2)
81 |
82 |
83 | def __call__(self, model, images, targets, mixup_cutmix=False, alpha1=0.2, alpha2=1.0):
84 | if mixup_cutmix:
85 | if np.random.rand()<0.5:
86 | with torch.no_grad():
87 | images_mixup, targets_mixup = self.mixup(images, targets, alpha1)
88 |
89 | outputs_mixup = model(images_mixup)
90 | loss = self.mixup_criterion(outputs_mixup, targets_mixup)
91 |
92 | else:
93 | with torch.no_grad():
94 | images_cutmix, targets_cutmix = self.cutmix(images, targets, alpha2)
95 | outputs_cutmix = model(images_cutmix)
96 | loss = self.cutmix_criterion(outputs_cutmix, targets_cutmix)
97 |
98 | else:
99 | outputs = model(images)
100 | loss = self.criterion(outputs, targets)
101 |
102 | return loss
103 |
104 |
--------------------------------------------------------------------------------
/qdnet/loss/multilabel_loss.py:
--------------------------------------------------------------------------------
1 |
2 | import torch
3 | from torch import nn
4 |
5 | class WeightedMultilabel(nn.Module):
6 | def __init__(self, weights: torch.Tensor):
7 | super(WeightedMultilabel, self).__init__()
8 | self.cerition = nn.BCEWithLogitsLoss(reduction='none')
9 | self.weights = weights
10 |
11 | def forward(self, outputs, targets):
12 | loss = self.cerition(outputs, targets)
13 | return (loss * self.weights).mean()
14 |
15 |
16 | class Loss(object):
17 |
18 | def __init__(self, loss_type="bce", w=None):
19 | # w=torch.tensor([10,2,15,20],dtype=torch.float)
20 | if loss_type == "bce":
21 | self.criterion = nn.BCEWithLogitsLoss(w)
22 | elif loss_type == "wml":
23 | self.criterion = WeightedMultilabel(w)
24 | elif loss_type == "mlsl":
25 | self.criterion = nn.MultiLabelSoftMarginLoss(w)
26 | else:
27 | raise NotImplementedError()
28 |
29 | def __call__(self, model, images, targets):
30 | outputs = model(images)
31 | loss = self.criterion(outputs, targets)
32 |
33 |
34 |
35 | # 验证三个函数是等价的
36 | if __name__ == '__main__':
37 |
38 | x=torch.randn(3,4)
39 | y=torch.randn(3,4)
40 | #损失函数对应类别的权重
41 | w=torch.tensor([10,2,15,20],dtype=torch.float)
42 | #测试不同的损失函数
43 | criterion_BCE=nn.BCEWithLogitsLoss(w)
44 | criterion_mult=WeightedMultilabel(w)
45 | criterion_mult2=nn.MultiLabelSoftMarginLoss(w)
46 |
47 | loss1=criterion_BCE(x,y)
48 | loss2=criterion_mult(x,y)
49 | loss3=criterion_mult2(x,y)
50 |
51 | print(loss1)
52 | print(loss2)
53 | print(loss3)
54 |
55 | # tensor(7.8804)
56 | # tensor(7.8804)
57 | # tensor(7.8804)
--------------------------------------------------------------------------------
/qdnet/models/effnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet Model effnet
5 | Author : machinelp
6 | Date : 2020-06-04
7 | -------------------------------------------------
8 | '''
9 |
10 |
11 |
12 | import torch
13 | import torch.nn as nn
14 | import geffnet
15 | from qdnet.models.metric_strategy import Swish_module, ArcMarginProduct_subcenter, ArcFaceLossAdaptiveMargin
16 |
17 |
18 | class Effnet(nn.Module):
19 | '''
20 | '''
21 | def __init__(self, enet_type, out_dim, drop_nums=1, pretrained=False, metric_strategy=False):
22 | super(Effnet, self).__init__()
23 | self.enet = geffnet.create_model(enet_type, pretrained=pretrained)
24 | self.dropouts = nn.ModuleList([ nn.Dropout(0.5) for _ in range(drop_nums) ])
25 | in_ch = self.enet.classifier.in_features
26 | self.fc = nn.Linear(in_ch, 512)
27 | self.swish = Swish_module()
28 | self.metric_classify = ArcMarginProduct_subcenter(512, out_dim)
29 | self.classify = nn.Linear(in_ch, out_dim)
30 | self.enet.classifier = nn.Identity()
31 | self.metric_strategy = metric_strategy
32 |
33 | def extract(self, x):
34 | x = self.enet(x)
35 | return x
36 |
37 | def forward(self, x):
38 | x = self.extract(x).squeeze(-1).squeeze(-1)
39 | if self.metric_strategy:
40 | out = self.metric_classify(self.swish(self.fc(x)))
41 | else:
42 | for i, dropout in enumerate(self.dropouts):
43 | if i == 0:
44 | out = self.classify(dropout(x))
45 | else:
46 | out += self.classify(dropout(x))
47 | out /= len(self.dropouts)
48 | return out
49 |
--------------------------------------------------------------------------------
/qdnet/models/metric_strategy.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import math
4 | import torch
5 | import torch.nn as nn
6 |
7 | class Swish(torch.autograd.Function):
8 |
9 | @staticmethod
10 | def forward(ctx, i):
11 | result = i * torch.sigmoid(i)
12 | ctx.save_for_backward(i)
13 | return result
14 |
15 | @staticmethod
16 | def backward(ctx, grad_output):
17 | i = ctx.saved_variables[0]
18 | sigmoid_i = torch.sigmoid(i)
19 | return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
20 |
21 |
22 | class Swish_module(nn.Module):
23 | def forward(self, x):
24 | return Swish.apply(x)
25 |
26 |
27 | class ArcMarginProduct_subcenter(nn.Module):
28 | def __init__(self, in_features, out_features, k=3):
29 | super().__init__()
30 | self.weight = nn.Parameter(torch.FloatTensor(out_features*k, in_features))
31 | self.reset_parameters()
32 | self.k = k
33 | self.out_features = out_features
34 |
35 | def reset_parameters(self):
36 | stdv = 1. / math.sqrt(self.weight.size(1))
37 | self.weight.data.uniform_(-stdv, stdv)
38 |
39 | def forward(self, features):
40 | cosine_all = F.linear(F.normalize(features), F.normalize(self.weight))
41 | cosine_all = cosine_all.view(-1, self.out_features, self.k)
42 | cosine, _ = torch.max(cosine_all, dim=2)
43 | return cosine
44 |
45 |
46 |
47 | class DenseCrossEntropy(nn.Module):
48 | def forward(self, x, target):
49 | x = x.float()
50 | target = target.float()
51 | logprobs = torch.nn.functional.log_softmax(x, dim=-1)
52 |
53 | loss = -logprobs * target
54 | loss = loss.sum(-1)
55 | return loss.mean()
56 |
57 |
58 | class ArcFaceLossAdaptiveMargin(nn.modules.Module):
59 | def __init__(self, margins, s=30.0):
60 | super().__init__()
61 | self.crit = DenseCrossEntropy()
62 | self.s = s
63 | self.margins = margins
64 |
65 | def forward(self, logits, labels, out_dim):
66 | ms = []
67 | ms = self.margins[labels.cpu().numpy()]
68 | cos_m = torch.from_numpy(np.cos(ms)).float().cuda()
69 | sin_m = torch.from_numpy(np.sin(ms)).float().cuda()
70 | th = torch.from_numpy(np.cos(math.pi - ms)).float().cuda()
71 | mm = torch.from_numpy(np.sin(math.pi - ms) * ms).float().cuda()
72 | labels = F.one_hot(labels, out_dim).float()
73 | logits = logits.float()
74 | cosine = logits
75 | sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
76 | phi = cosine * cos_m.view(-1,1) - sine * sin_m.view(-1,1)
77 | phi = torch.where(cosine > th.view(-1,1), phi, cosine - mm.view(-1,1))
78 | output = (labels * phi) + ((1.0 - labels) * cosine)
79 | output *= self.s
80 | loss = self.crit(output, labels)
81 | return loss
82 |
83 |
--------------------------------------------------------------------------------
/qdnet/models/resnest.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet Model effnet
5 | Author : machinelp
6 | Date : 2020-06-04
7 | -------------------------------------------------
8 | '''
9 |
10 |
11 |
12 | import torch
13 | import torch.nn as nn
14 | from resnest.torch import resnest50
15 | from resnest.torch import resnest101
16 | from resnest.torch import resnest200
17 | from resnest.torch import resnest269
18 | from qdnet.models.metric_strategy import Swish_module, ArcMarginProduct_subcenter, ArcFaceLossAdaptiveMargin
19 |
20 |
21 | class Resnest(nn.Module):
22 | '''
23 | '''
24 | def __init__(self, enet_type, out_dim, drop_nums=1, pretrained=False, metric_strategy=False):
25 | super(Resnest, self).__init__()
26 | if enet_type in ["resnest50", "resnest101", "resnest200", "resnest269"]:
27 | # self.enet = locals()[enet_type](pretrained=pretrained)
28 | self.enet = eval(enet_type)(pretrained=pretrained)
29 | self.dropouts = nn.ModuleList([ nn.Dropout(0.5) for _ in range(drop_nums) ])
30 | in_ch = self.enet.fc.in_features
31 | self.fc = nn.Linear(in_ch, 512)
32 | self.swish = Swish_module()
33 | self.metric_classify = ArcMarginProduct_subcenter(512, out_dim)
34 | self.classify = nn.Linear(in_ch, out_dim)
35 | self.enet.fc = nn.Identity()
36 | self.metric_strategy = metric_strategy
37 |
38 | def extract(self, x):
39 | x = self.enet(x)
40 | return x
41 |
42 | def forward(self, x):
43 | x = self.extract(x).squeeze(-1).squeeze(-1)
44 | if self.metric_strategy:
45 | out = self.metric_classify(self.swish(self.fc(x)))
46 | else:
47 | for i, dropout in enumerate(self.dropouts):
48 | if i == 0:
49 | out = self.classify(dropout(x))
50 | else:
51 | out += self.classify(dropout(x))
52 | out /= len(self.dropouts)
53 | return out
54 |
55 |
--------------------------------------------------------------------------------
/qdnet/models/resnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet Model effnet
5 | Author : machinelp
6 | Date : 2020-06-04
7 | -------------------------------------------------
8 | '''
9 |
10 |
11 |
12 | import torch
13 | import torch.nn as nn
14 | import torchvision
15 | from qdnet.conf.constant import Constant
16 | from qdnet.models.metric_strategy import Swish_module, ArcMarginProduct_subcenter, ArcFaceLossAdaptiveMargin
17 |
18 | class Resnet(nn.Module):
19 | '''
20 | '''
21 | def __init__(self, enet_type, out_dim, drop_nums=1, pretrained=False, metric_strategy=False):
22 | super(Resnet, self).__init__()
23 | if enet_type == Constant.RESNET_LIST[0]:
24 | self.model = torchvision.models.resnet18(pretrained=pretrained)
25 | self.model.fc = torch.nn.Sequential( torch.nn.Linear( in_features=512, out_features=out_dim ) )
26 | if enet_type == Constant.RESNET_LIST[1]:
27 | self.model = torchvision.models.resnet34(pretrained=pretrained)
28 | self.model.fc = torch.nn.Sequential( torch.nn.Linear( in_features=1024, out_features=out_dim ) )
29 | if enet_type == Constant.RESNET_LIST[2]:
30 | self.model = torchvision.models.resnet50(pretrained=pretrained)
31 | self.model.fc = torch.nn.Sequential( torch.nn.Linear( in_features=2048, out_features=out_dim ) )
32 |
33 |
34 | def forward(self, x):
35 | out = self.model(x)
36 | return out
37 |
38 |
--------------------------------------------------------------------------------
/qdnet/models/se_resnext.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNet Model effnet
5 | Author : machinelp
6 | Date : 2020-06-04
7 | -------------------------------------------------
8 | '''
9 |
10 |
11 |
12 | import torch
13 | import torch.nn as nn
14 | from pretrainedmodels import se_resnext101_32x4d
15 | from qdnet.models.metric_strategy import Swish_module, ArcMarginProduct_subcenter, ArcFaceLossAdaptiveMargin
16 |
17 |
18 | class SeResnext(nn.Module):
19 | '''
20 | '''
21 | def __init__(self, enet_type, out_dim, drop_nums=1, pretrained=False, metric_strategy=False):
22 | super(SeResnext, self).__init__()
23 | if pretrained:
24 | self.enet = se_resnext101_32x4d(num_classes=1000, pretrained='imagenet')
25 | else:
26 | self.enet = se_resnext101_32x4d(num_classes=1000, pretrained=None)
27 | self.enet.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
28 | self.dropouts = nn.ModuleList([ nn.Dropout(0.5) for _ in range(drop_nums) ])
29 | in_ch = self.enet.last_linear.in_features
30 | self.fc = nn.Linear(in_ch, 512)
31 | self.swish = Swish_module()
32 | self.metric_classify = ArcMarginProduct_subcenter(512, out_dim)
33 | self.classify = nn.Linear(in_ch, out_dim)
34 | self.enet.fc = nn.Identity()
35 | self.metric_strategy = metric_strategy
36 |
37 | def extract(self, x):
38 | x = self.enet(x)
39 | return x
40 |
41 | def forward(self, x):
42 | x = self.extract(x).squeeze(-1).squeeze(-1)
43 | if self.metric_strategy:
44 | out = self.metric_classify(self.swish(self.fc(x)))
45 | else:
46 | for i, dropout in enumerate(self.dropouts):
47 | if i == 0:
48 | out = self.classify(dropout(x))
49 | else:
50 | out += self.classify(dropout(x))
51 | out /= len(self.dropouts)
52 | return out
53 |
--------------------------------------------------------------------------------
/qdnet/models/squeezenet.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MachineLP/Pytorch_multi_task_classifier/c2c3686bfe0348bbf61afddc191cde3961bcbcd8/qdnet/models/squeezenet.py
--------------------------------------------------------------------------------
/qdnet/optimizer/optimizer.py:
--------------------------------------------------------------------------------
1 | # Fix Warmup Bug
2 | from warmup_scheduler import GradualWarmupScheduler # https://github.com/ildoonet/pytorch-gradual-warmup-lr
3 |
4 |
5 | class GradualWarmupSchedulerV2(GradualWarmupScheduler):
6 | def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
7 | super(GradualWarmupSchedulerV2, self).__init__(optimizer, multiplier, total_epoch, after_scheduler)
8 | def get_lr(self):
9 | if self.last_epoch > self.total_epoch:
10 | if self.after_scheduler:
11 | if not self.finished:
12 | self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
13 | self.finished = True
14 | return self.after_scheduler.get_lr()
15 | return [base_lr * self.multiplier for base_lr in self.base_lrs]
16 | if self.multiplier == 1.0:
17 | return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
18 | else:
19 | return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
20 |
21 |
22 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .byobnet import *
2 | from .cspnet import *
3 | from .densenet import *
4 | from .dla import *
5 | from .dpn import *
6 | from .efficientnet import *
7 | from .gluon_resnet import *
8 | from .gluon_xception import *
9 | from .hrnet import *
10 | from .inception_resnet_v2 import *
11 | from .inception_v3 import *
12 | from .inception_v4 import *
13 | from .mobilenetv3 import *
14 | from .nasnet import *
15 | from .nfnet import *
16 | from .pnasnet import *
17 | from .regnet import *
18 | from .res2net import *
19 | from .resnest import *
20 | from .resnet import *
21 | from .resnetv2 import *
22 | from .rexnet import *
23 | from .selecsls import *
24 | from .senet import *
25 | from .sknet import *
26 | from .tresnet import *
27 | from .vgg import *
28 | from .vision_transformer import *
29 | from .vovnet import *
30 | from .xception import *
31 | from .xception_aligned import *
32 |
33 | from .factory import create_model
34 | from .helpers import load_checkpoint, resume_checkpoint, model_parameters
35 | from .layers import TestTimePoolHead, apply_test_time_pool
36 | from .layers import convert_splitbn_model
37 | from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit
38 | from .registry import *
39 |
40 | from .multi_label_model import * # ================================
41 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/factory.py:
--------------------------------------------------------------------------------
1 | from .registry import is_model, is_model_in_modules, model_entrypoint
2 | from .helpers import load_checkpoint
3 | from .layers import set_layer_config
4 |
5 |
6 | def create_model(
7 | model_name,
8 | pretrained=False,
9 | checkpoint_path='',
10 | scriptable=None,
11 | exportable=None,
12 | no_jit=None,
13 | **kwargs):
14 | """Create a model
15 |
16 | Args:
17 | model_name (str): name of model to instantiate
18 | pretrained (bool): load pretrained ImageNet-1k weights if true
19 | checkpoint_path (str): path of checkpoint to load after model is initialized
20 | scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet)
21 | exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet)
22 | no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only)
23 |
24 | Keyword Args:
25 | drop_rate (float): dropout rate for training (default: 0.0)
26 | global_pool (str): global pool type (default: 'avg')
27 | **: other kwargs are model specific
28 | """
29 | model_args = dict(pretrained=pretrained)
30 |
31 | # Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args
32 | is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3'])
33 | if not is_efficientnet:
34 | kwargs.pop('bn_tf', None)
35 | kwargs.pop('bn_momentum', None)
36 | kwargs.pop('bn_eps', None)
37 |
38 | # handle backwards compat with drop_connect -> drop_path change
39 | drop_connect_rate = kwargs.pop('drop_connect_rate', None)
40 | if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None:
41 | print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'."
42 | " Setting drop_path to %f." % drop_connect_rate)
43 | kwargs['drop_path_rate'] = drop_connect_rate
44 |
45 | # Parameters that aren't supported by all models or are intended to only override model defaults if set
46 | # should default to None in command line args/cfg. Remove them if they are present and not set so that
47 | # non-supporting models don't break and default args remain in effect.
48 | kwargs = {k: v for k, v in kwargs.items() if v is not None}
49 |
50 | with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit):
51 | if is_model(model_name):
52 | create_fn = model_entrypoint(model_name)
53 | model = create_fn(**model_args, **kwargs)
54 | else:
55 | raise RuntimeError('Unknown model (%s)' % model_name)
56 |
57 | if checkpoint_path:
58 | load_checkpoint(model, checkpoint_path)
59 |
60 | return model
61 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/__init__.py:
--------------------------------------------------------------------------------
1 | from .activations import *
2 | from .adaptive_avgmax_pool import \
3 | adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
4 | from .anti_aliasing import AntiAliasDownsampleLayer
5 | from .blur_pool import BlurPool2d
6 | from .classifier import ClassifierHead, create_classifier
7 | from .cond_conv2d import CondConv2d, get_condconv_initializer
8 | from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
9 | set_layer_config
10 | from .conv2d_same import Conv2dSame, conv2d_same
11 | from .conv_bn_act import ConvBnAct
12 | from .create_act import create_act_layer, get_act_layer, get_act_fn
13 | from .create_attn import get_attn, create_attn
14 | from .create_conv2d import create_conv2d
15 | from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act
16 | from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
17 | from .eca import EcaModule, CecaModule
18 | from .evo_norm import EvoNormBatch2d, EvoNormSample2d
19 | from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible
20 | from .inplace_abn import InplaceAbn
21 | from .linear import Linear
22 | from .mixed_conv2d import MixedConv2d
23 | from .norm_act import BatchNormAct2d, GroupNormAct
24 | from .padding import get_padding, get_same_padding, pad_same
25 | from .pool2d_same import AvgPool2dSame, create_pool2d
26 | from .se import SEModule
27 | from .selective_kernel import SelectiveKernelConv
28 | from .separable_conv import SeparableConv2d, SeparableConvBnAct
29 | from .space_to_depth import SpaceToDepthModule
30 | from .split_attn import SplitAttnConv2d
31 | from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
32 | from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
33 | from .test_time_pool import TestTimePoolHead, apply_test_time_pool
34 | from .weight_init import trunc_normal_
35 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/activations.py:
--------------------------------------------------------------------------------
1 | """ Activations
2 |
3 | A collection of activations fn and modules with a common interface so that they can
4 | easily be swapped. All have an `inplace` arg even if not used.
5 |
6 | Hacked together by / Copyright 2020 Ross Wightman
7 | """
8 |
9 | import torch
10 | from torch import nn as nn
11 | from torch.nn import functional as F
12 |
13 |
14 | def swish(x, inplace: bool = False):
15 | """Swish - Described in: https://arxiv.org/abs/1710.05941
16 | """
17 | return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
18 |
19 |
20 | class Swish(nn.Module):
21 | def __init__(self, inplace: bool = False):
22 | super(Swish, self).__init__()
23 | self.inplace = inplace
24 |
25 | def forward(self, x):
26 | return swish(x, self.inplace)
27 |
28 |
29 | def mish(x, inplace: bool = False):
30 | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
31 | NOTE: I don't have a working inplace variant
32 | """
33 | return x.mul(F.softplus(x).tanh())
34 |
35 |
36 | class Mish(nn.Module):
37 | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
38 | """
39 | def __init__(self, inplace: bool = False):
40 | super(Mish, self).__init__()
41 |
42 | def forward(self, x):
43 | return mish(x)
44 |
45 |
46 | def sigmoid(x, inplace: bool = False):
47 | return x.sigmoid_() if inplace else x.sigmoid()
48 |
49 |
50 | # PyTorch has this, but not with a consistent inplace argmument interface
51 | class Sigmoid(nn.Module):
52 | def __init__(self, inplace: bool = False):
53 | super(Sigmoid, self).__init__()
54 | self.inplace = inplace
55 |
56 | def forward(self, x):
57 | return x.sigmoid_() if self.inplace else x.sigmoid()
58 |
59 |
60 | def tanh(x, inplace: bool = False):
61 | return x.tanh_() if inplace else x.tanh()
62 |
63 |
64 | # PyTorch has this, but not with a consistent inplace argmument interface
65 | class Tanh(nn.Module):
66 | def __init__(self, inplace: bool = False):
67 | super(Tanh, self).__init__()
68 | self.inplace = inplace
69 |
70 | def forward(self, x):
71 | return x.tanh_() if self.inplace else x.tanh()
72 |
73 |
74 | def hard_swish(x, inplace: bool = False):
75 | inner = F.relu6(x + 3.).div_(6.)
76 | return x.mul_(inner) if inplace else x.mul(inner)
77 |
78 |
79 | class HardSwish(nn.Module):
80 | def __init__(self, inplace: bool = False):
81 | super(HardSwish, self).__init__()
82 | self.inplace = inplace
83 |
84 | def forward(self, x):
85 | return hard_swish(x, self.inplace)
86 |
87 |
88 | def hard_sigmoid(x, inplace: bool = False):
89 | if inplace:
90 | return x.add_(3.).clamp_(0., 6.).div_(6.)
91 | else:
92 | return F.relu6(x + 3.) / 6.
93 |
94 |
95 | class HardSigmoid(nn.Module):
96 | def __init__(self, inplace: bool = False):
97 | super(HardSigmoid, self).__init__()
98 | self.inplace = inplace
99 |
100 | def forward(self, x):
101 | return hard_sigmoid(x, self.inplace)
102 |
103 |
104 | def hard_mish(x, inplace: bool = False):
105 | """ Hard Mish
106 | Experimental, based on notes by Mish author Diganta Misra at
107 | https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
108 | """
109 | if inplace:
110 | return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
111 | else:
112 | return 0.5 * x * (x + 2).clamp(min=0, max=2)
113 |
114 |
115 | class HardMish(nn.Module):
116 | def __init__(self, inplace: bool = False):
117 | super(HardMish, self).__init__()
118 | self.inplace = inplace
119 |
120 | def forward(self, x):
121 | return hard_mish(x, self.inplace)
122 |
123 |
124 | class PReLU(nn.PReLU):
125 | """Applies PReLU (w/ dummy inplace arg)
126 | """
127 | def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
128 | super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
129 |
130 | def forward(self, input: torch.Tensor) -> torch.Tensor:
131 | return F.prelu(input, self.weight)
132 |
133 |
134 | def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
135 | return F.gelu(x)
136 |
137 |
138 | class GELU(nn.Module):
139 | """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
140 | """
141 | def __init__(self, inplace: bool = False):
142 | super(GELU, self).__init__()
143 |
144 | def forward(self, input: torch.Tensor) -> torch.Tensor:
145 | return F.gelu(input)
146 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/activations_jit.py:
--------------------------------------------------------------------------------
1 | """ Activations
2 |
3 | A collection of jit-scripted activations fn and modules with a common interface so that they can
4 | easily be swapped. All have an `inplace` arg even if not used.
5 |
6 | All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
7 | currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
8 | versions if they contain in-place ops.
9 |
10 | Hacked together by / Copyright 2020 Ross Wightman
11 | """
12 |
13 | import torch
14 | from torch import nn as nn
15 | from torch.nn import functional as F
16 |
17 |
18 | @torch.jit.script
19 | def swish_jit(x, inplace: bool = False):
20 | """Swish - Described in: https://arxiv.org/abs/1710.05941
21 | """
22 | return x.mul(x.sigmoid())
23 |
24 |
25 | @torch.jit.script
26 | def mish_jit(x, _inplace: bool = False):
27 | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
28 | """
29 | return x.mul(F.softplus(x).tanh())
30 |
31 |
32 | class SwishJit(nn.Module):
33 | def __init__(self, inplace: bool = False):
34 | super(SwishJit, self).__init__()
35 |
36 | def forward(self, x):
37 | return swish_jit(x)
38 |
39 |
40 | class MishJit(nn.Module):
41 | def __init__(self, inplace: bool = False):
42 | super(MishJit, self).__init__()
43 |
44 | def forward(self, x):
45 | return mish_jit(x)
46 |
47 |
48 | @torch.jit.script
49 | def hard_sigmoid_jit(x, inplace: bool = False):
50 | # return F.relu6(x + 3.) / 6.
51 | return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
52 |
53 |
54 | class HardSigmoidJit(nn.Module):
55 | def __init__(self, inplace: bool = False):
56 | super(HardSigmoidJit, self).__init__()
57 |
58 | def forward(self, x):
59 | return hard_sigmoid_jit(x)
60 |
61 |
62 | @torch.jit.script
63 | def hard_swish_jit(x, inplace: bool = False):
64 | # return x * (F.relu6(x + 3.) / 6)
65 | return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
66 |
67 |
68 | class HardSwishJit(nn.Module):
69 | def __init__(self, inplace: bool = False):
70 | super(HardSwishJit, self).__init__()
71 |
72 | def forward(self, x):
73 | return hard_swish_jit(x)
74 |
75 |
76 | @torch.jit.script
77 | def hard_mish_jit(x, inplace: bool = False):
78 | """ Hard Mish
79 | Experimental, based on notes by Mish author Diganta Misra at
80 | https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
81 | """
82 | return 0.5 * x * (x + 2).clamp(min=0, max=2)
83 |
84 |
85 | class HardMishJit(nn.Module):
86 | def __init__(self, inplace: bool = False):
87 | super(HardMishJit, self).__init__()
88 |
89 | def forward(self, x):
90 | return hard_mish_jit(x)
91 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/adaptive_avgmax_pool.py:
--------------------------------------------------------------------------------
1 | """ PyTorch selectable adaptive pooling
2 | Adaptive pooling with the ability to select the type of pooling from:
3 | * 'avg' - Average pooling
4 | * 'max' - Max pooling
5 | * 'avgmax' - Sum of average and max pooling re-scaled by 0.5
6 | * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
7 |
8 | Both a functional and a nn.Module version of the pooling is provided.
9 |
10 | Hacked together by / Copyright 2020 Ross Wightman
11 | """
12 | import torch
13 | import torch.nn as nn
14 | import torch.nn.functional as F
15 |
16 |
17 | def adaptive_pool_feat_mult(pool_type='avg'):
18 | if pool_type == 'catavgmax':
19 | return 2
20 | else:
21 | return 1
22 |
23 |
24 | def adaptive_avgmax_pool2d(x, output_size=1):
25 | x_avg = F.adaptive_avg_pool2d(x, output_size)
26 | x_max = F.adaptive_max_pool2d(x, output_size)
27 | return 0.5 * (x_avg + x_max)
28 |
29 |
30 | def adaptive_catavgmax_pool2d(x, output_size=1):
31 | x_avg = F.adaptive_avg_pool2d(x, output_size)
32 | x_max = F.adaptive_max_pool2d(x, output_size)
33 | return torch.cat((x_avg, x_max), 1)
34 |
35 |
36 | def select_adaptive_pool2d(x, pool_type='avg', output_size=1):
37 | """Selectable global pooling function with dynamic input kernel size
38 | """
39 | if pool_type == 'avg':
40 | x = F.adaptive_avg_pool2d(x, output_size)
41 | elif pool_type == 'avgmax':
42 | x = adaptive_avgmax_pool2d(x, output_size)
43 | elif pool_type == 'catavgmax':
44 | x = adaptive_catavgmax_pool2d(x, output_size)
45 | elif pool_type == 'max':
46 | x = F.adaptive_max_pool2d(x, output_size)
47 | else:
48 | assert False, 'Invalid pool type: %s' % pool_type
49 | return x
50 |
51 |
52 | class FastAdaptiveAvgPool2d(nn.Module):
53 | def __init__(self, flatten=False):
54 | super(FastAdaptiveAvgPool2d, self).__init__()
55 | self.flatten = flatten
56 |
57 | def forward(self, x):
58 | return x.mean((2, 3)) if self.flatten else x.mean((2, 3), keepdim=True)
59 |
60 |
61 | class AdaptiveAvgMaxPool2d(nn.Module):
62 | def __init__(self, output_size=1):
63 | super(AdaptiveAvgMaxPool2d, self).__init__()
64 | self.output_size = output_size
65 |
66 | def forward(self, x):
67 | return adaptive_avgmax_pool2d(x, self.output_size)
68 |
69 |
70 | class AdaptiveCatAvgMaxPool2d(nn.Module):
71 | def __init__(self, output_size=1):
72 | super(AdaptiveCatAvgMaxPool2d, self).__init__()
73 | self.output_size = output_size
74 |
75 | def forward(self, x):
76 | return adaptive_catavgmax_pool2d(x, self.output_size)
77 |
78 |
79 | class SelectAdaptivePool2d(nn.Module):
80 | """Selectable global pooling layer with dynamic input kernel size
81 | """
82 | def __init__(self, output_size=1, pool_type='fast', flatten=False):
83 | super(SelectAdaptivePool2d, self).__init__()
84 | self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing
85 | self.flatten = flatten
86 | if pool_type == '':
87 | self.pool = nn.Identity() # pass through
88 | elif pool_type == 'fast':
89 | assert output_size == 1
90 | self.pool = FastAdaptiveAvgPool2d(self.flatten)
91 | self.flatten = False
92 | elif pool_type == 'avg':
93 | self.pool = nn.AdaptiveAvgPool2d(output_size)
94 | elif pool_type == 'avgmax':
95 | self.pool = AdaptiveAvgMaxPool2d(output_size)
96 | elif pool_type == 'catavgmax':
97 | self.pool = AdaptiveCatAvgMaxPool2d(output_size)
98 | elif pool_type == 'max':
99 | self.pool = nn.AdaptiveMaxPool2d(output_size)
100 | else:
101 | assert False, 'Invalid pool type: %s' % pool_type
102 |
103 | def is_identity(self):
104 | return self.pool_type == ''
105 |
106 | def forward(self, x):
107 | x = self.pool(x)
108 | if self.flatten:
109 | x = x.flatten(1)
110 | return x
111 |
112 | def feat_mult(self):
113 | return adaptive_pool_feat_mult(self.pool_type)
114 |
115 | def __repr__(self):
116 | return self.__class__.__name__ + ' (' \
117 | + 'pool_type=' + self.pool_type \
118 | + ', flatten=' + str(self.flatten) + ')'
119 |
120 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/anti_aliasing.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.parallel
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 |
6 |
7 | class AntiAliasDownsampleLayer(nn.Module):
8 | def __init__(self, channels: int = 0, filt_size: int = 3, stride: int = 2, no_jit: bool = False):
9 | super(AntiAliasDownsampleLayer, self).__init__()
10 | if no_jit:
11 | self.op = Downsample(channels, filt_size, stride)
12 | else:
13 | self.op = DownsampleJIT(channels, filt_size, stride)
14 |
15 | # FIXME I should probably override _apply and clear DownsampleJIT filter cache for .cuda(), .half(), etc calls
16 |
17 | def forward(self, x):
18 | return self.op(x)
19 |
20 |
21 | @torch.jit.script
22 | class DownsampleJIT(object):
23 | def __init__(self, channels: int = 0, filt_size: int = 3, stride: int = 2):
24 | self.channels = channels
25 | self.stride = stride
26 | self.filt_size = filt_size
27 | assert self.filt_size == 3
28 | assert stride == 2
29 | self.filt = {} # lazy init by device for DataParallel compat
30 |
31 | def _create_filter(self, like: torch.Tensor):
32 | filt = torch.tensor([1., 2., 1.], dtype=like.dtype, device=like.device)
33 | filt = filt[:, None] * filt[None, :]
34 | filt = filt / torch.sum(filt)
35 | return filt[None, None, :, :].repeat((self.channels, 1, 1, 1))
36 |
37 | def __call__(self, input: torch.Tensor):
38 | input_pad = F.pad(input, (1, 1, 1, 1), 'reflect')
39 | filt = self.filt.get(str(input.device), self._create_filter(input))
40 | return F.conv2d(input_pad, filt, stride=2, padding=0, groups=input.shape[1])
41 |
42 |
43 | class Downsample(nn.Module):
44 | def __init__(self, channels=None, filt_size=3, stride=2):
45 | super(Downsample, self).__init__()
46 | self.channels = channels
47 | self.filt_size = filt_size
48 | self.stride = stride
49 |
50 | assert self.filt_size == 3
51 | filt = torch.tensor([1., 2., 1.])
52 | filt = filt[:, None] * filt[None, :]
53 | filt = filt / torch.sum(filt)
54 |
55 | # self.filt = filt[None, None, :, :].repeat((self.channels, 1, 1, 1))
56 | self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
57 |
58 | def forward(self, input):
59 | input_pad = F.pad(input, (1, 1, 1, 1), 'reflect')
60 | return F.conv2d(input_pad, self.filt, stride=self.stride, padding=0, groups=input.shape[1])
61 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/blur_pool.py:
--------------------------------------------------------------------------------
1 | """
2 | BlurPool layer inspired by
3 | - Kornia's Max_BlurPool2d
4 | - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar`
5 |
6 | FIXME merge this impl with those in `anti_aliasing.py`
7 |
8 | Hacked together by Chris Ha and Ross Wightman
9 | """
10 |
11 | import torch
12 | import torch.nn as nn
13 | import torch.nn.functional as F
14 | import numpy as np
15 | from typing import Dict
16 | from .padding import get_padding
17 |
18 |
19 | class BlurPool2d(nn.Module):
20 | r"""Creates a module that computes blurs and downsample a given feature map.
21 | See :cite:`zhang2019shiftinvar` for more details.
22 | Corresponds to the Downsample class, which does blurring and subsampling
23 |
24 | Args:
25 | channels = Number of input channels
26 | filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5.
27 | stride (int): downsampling filter stride
28 |
29 | Returns:
30 | torch.Tensor: the transformed tensor.
31 | """
32 | filt: Dict[str, torch.Tensor]
33 |
34 | def __init__(self, channels, filt_size=3, stride=2) -> None:
35 | super(BlurPool2d, self).__init__()
36 | assert filt_size > 1
37 | self.channels = channels
38 | self.filt_size = filt_size
39 | self.stride = stride
40 | pad_size = [get_padding(filt_size, stride, dilation=1)] * 4
41 | self.padding = nn.ReflectionPad2d(pad_size)
42 | self._coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs) # for torchscript compat
43 | self.filt = {} # lazy init by device for DataParallel compat
44 |
45 | def _create_filter(self, like: torch.Tensor):
46 | blur_filter = (self._coeffs[:, None] * self._coeffs[None, :]).to(dtype=like.dtype, device=like.device)
47 | return blur_filter[None, None, :, :].repeat(self.channels, 1, 1, 1)
48 |
49 | def _apply(self, fn):
50 | # override nn.Module _apply, reset filter cache if used
51 | self.filt = {}
52 | super(BlurPool2d, self)._apply(fn)
53 |
54 | def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
55 | C = input_tensor.shape[1]
56 | blur_filt = self.filt.get(str(input_tensor.device), self._create_filter(input_tensor))
57 | return F.conv2d(
58 | self.padding(input_tensor), blur_filt, stride=self.stride, groups=C)
59 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/cbam.py:
--------------------------------------------------------------------------------
1 | """ CBAM (sort-of) Attention
2 |
3 | Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521
4 |
5 | WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on
6 | some tasks, especially fine-grained it seems. I may end up removing this impl.
7 |
8 | Hacked together by / Copyright 2020 Ross Wightman
9 | """
10 |
11 | import torch
12 | from torch import nn as nn
13 | import torch.nn.functional as F
14 | from .conv_bn_act import ConvBnAct
15 |
16 |
17 | class ChannelAttn(nn.Module):
18 | """ Original CBAM channel attention module, currently avg + max pool variant only.
19 | """
20 | def __init__(self, channels, reduction=16, act_layer=nn.ReLU):
21 | super(ChannelAttn, self).__init__()
22 | self.fc1 = nn.Conv2d(channels, channels // reduction, 1, bias=False)
23 | self.act = act_layer(inplace=True)
24 | self.fc2 = nn.Conv2d(channels // reduction, channels, 1, bias=False)
25 |
26 | def forward(self, x):
27 | x_avg = x.mean((2, 3), keepdim=True)
28 | x_max = F.adaptive_max_pool2d(x, 1)
29 | x_avg = self.fc2(self.act(self.fc1(x_avg)))
30 | x_max = self.fc2(self.act(self.fc1(x_max)))
31 | x_attn = x_avg + x_max
32 | return x * x_attn.sigmoid()
33 |
34 |
35 | class LightChannelAttn(ChannelAttn):
36 | """An experimental 'lightweight' that sums avg + max pool first
37 | """
38 | def __init__(self, channels, reduction=16):
39 | super(LightChannelAttn, self).__init__(channels, reduction)
40 |
41 | def forward(self, x):
42 | x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * F.adaptive_max_pool2d(x, 1)
43 | x_attn = self.fc2(self.act(self.fc1(x_pool)))
44 | return x * x_attn.sigmoid()
45 |
46 |
47 | class SpatialAttn(nn.Module):
48 | """ Original CBAM spatial attention module
49 | """
50 | def __init__(self, kernel_size=7):
51 | super(SpatialAttn, self).__init__()
52 | self.conv = ConvBnAct(2, 1, kernel_size, act_layer=None)
53 |
54 | def forward(self, x):
55 | x_avg = torch.mean(x, dim=1, keepdim=True)
56 | x_max = torch.max(x, dim=1, keepdim=True)[0]
57 | x_attn = torch.cat([x_avg, x_max], dim=1)
58 | x_attn = self.conv(x_attn)
59 | return x * x_attn.sigmoid()
60 |
61 |
62 | class LightSpatialAttn(nn.Module):
63 | """An experimental 'lightweight' variant that sums avg_pool and max_pool results.
64 | """
65 | def __init__(self, kernel_size=7):
66 | super(LightSpatialAttn, self).__init__()
67 | self.conv = ConvBnAct(1, 1, kernel_size, act_layer=None)
68 |
69 | def forward(self, x):
70 | x_avg = torch.mean(x, dim=1, keepdim=True)
71 | x_max = torch.max(x, dim=1, keepdim=True)[0]
72 | x_attn = 0.5 * x_avg + 0.5 * x_max
73 | x_attn = self.conv(x_attn)
74 | return x * x_attn.sigmoid()
75 |
76 |
77 | class CbamModule(nn.Module):
78 | def __init__(self, channels, spatial_kernel_size=7):
79 | super(CbamModule, self).__init__()
80 | self.channel = ChannelAttn(channels)
81 | self.spatial = SpatialAttn(spatial_kernel_size)
82 |
83 | def forward(self, x):
84 | x = self.channel(x)
85 | x = self.spatial(x)
86 | return x
87 |
88 |
89 | class LightCbamModule(nn.Module):
90 | def __init__(self, channels, spatial_kernel_size=7):
91 | super(LightCbamModule, self).__init__()
92 | self.channel = LightChannelAttn(channels)
93 | self.spatial = LightSpatialAttn(spatial_kernel_size)
94 |
95 | def forward(self, x):
96 | x = self.channel(x)
97 | x = self.spatial(x)
98 | return x
99 |
100 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/classifier.py:
--------------------------------------------------------------------------------
1 | """ Classifier head and layer factory
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | from torch import nn as nn
6 | from torch.nn import functional as F
7 |
8 | from .adaptive_avgmax_pool import SelectAdaptivePool2d
9 | from .linear import Linear
10 |
11 |
12 | def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
13 | flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
14 | if not pool_type:
15 | assert num_classes == 0 or use_conv,\
16 | 'Pooling can only be disabled if classifier is also removed or conv classifier is used'
17 | flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
18 | global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool)
19 | num_pooled_features = num_features * global_pool.feat_mult()
20 | return global_pool, num_pooled_features
21 |
22 |
23 | def _create_fc(num_features, num_classes, pool_type='avg', use_conv=False):
24 | if num_classes <= 0:
25 | fc = nn.Identity() # pass-through (no classifier)
26 | elif use_conv:
27 | fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
28 | else:
29 | # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue
30 | fc = Linear(num_features, num_classes, bias=True)
31 | return fc
32 |
33 |
34 | def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False):
35 | global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv)
36 | fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
37 | return global_pool, fc
38 |
39 |
40 | class ClassifierHead(nn.Module):
41 | """Classifier head w/ configurable global pooling and dropout."""
42 |
43 | def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False):
44 | super(ClassifierHead, self).__init__()
45 | self.drop_rate = drop_rate
46 | self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv)
47 | self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
48 | self.flatten_after_fc = use_conv and pool_type
49 |
50 | def forward(self, x):
51 | x = self.global_pool(x)
52 | if self.drop_rate:
53 | x = F.dropout(x, p=float(self.drop_rate), training=self.training)
54 | x = self.fc(x)
55 | return x
56 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/cond_conv2d.py:
--------------------------------------------------------------------------------
1 | """ PyTorch Conditionally Parameterized Convolution (CondConv)
2 |
3 | Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
4 | (https://arxiv.org/abs/1904.04971)
5 |
6 | Hacked together by / Copyright 2020 Ross Wightman
7 | """
8 |
9 | import math
10 | from functools import partial
11 | import numpy as np
12 | import torch
13 | from torch import nn as nn
14 | from torch.nn import functional as F
15 |
16 | from .helpers import to_2tuple
17 | from .conv2d_same import conv2d_same
18 | from .padding import get_padding_value
19 |
20 |
21 | def get_condconv_initializer(initializer, num_experts, expert_shape):
22 | def condconv_initializer(weight):
23 | """CondConv initializer function."""
24 | num_params = np.prod(expert_shape)
25 | if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
26 | weight.shape[1] != num_params):
27 | raise (ValueError(
28 | 'CondConv variables must have shape [num_experts, num_params]'))
29 | for i in range(num_experts):
30 | initializer(weight[i].view(expert_shape))
31 | return condconv_initializer
32 |
33 |
34 | class CondConv2d(nn.Module):
35 | """ Conditionally Parameterized Convolution
36 | Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
37 |
38 | Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
39 | https://github.com/pytorch/pytorch/issues/17983
40 | """
41 | __constants__ = ['in_channels', 'out_channels', 'dynamic_padding']
42 |
43 | def __init__(self, in_channels, out_channels, kernel_size=3,
44 | stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
45 | super(CondConv2d, self).__init__()
46 |
47 | self.in_channels = in_channels
48 | self.out_channels = out_channels
49 | self.kernel_size = to_2tuple(kernel_size)
50 | self.stride = to_2tuple(stride)
51 | padding_val, is_padding_dynamic = get_padding_value(
52 | padding, kernel_size, stride=stride, dilation=dilation)
53 | self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
54 | self.padding = to_2tuple(padding_val)
55 | self.dilation = to_2tuple(dilation)
56 | self.groups = groups
57 | self.num_experts = num_experts
58 |
59 | self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
60 | weight_num_param = 1
61 | for wd in self.weight_shape:
62 | weight_num_param *= wd
63 | self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
64 |
65 | if bias:
66 | self.bias_shape = (self.out_channels,)
67 | self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
68 | else:
69 | self.register_parameter('bias', None)
70 |
71 | self.reset_parameters()
72 |
73 | def reset_parameters(self):
74 | init_weight = get_condconv_initializer(
75 | partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
76 | init_weight(self.weight)
77 | if self.bias is not None:
78 | fan_in = np.prod(self.weight_shape[1:])
79 | bound = 1 / math.sqrt(fan_in)
80 | init_bias = get_condconv_initializer(
81 | partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
82 | init_bias(self.bias)
83 |
84 | def forward(self, x, routing_weights):
85 | B, C, H, W = x.shape
86 | weight = torch.matmul(routing_weights, self.weight)
87 | new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
88 | weight = weight.view(new_weight_shape)
89 | bias = None
90 | if self.bias is not None:
91 | bias = torch.matmul(routing_weights, self.bias)
92 | bias = bias.view(B * self.out_channels)
93 | # move batch elements with channels so each batch element can be efficiently convolved with separate kernel
94 | x = x.view(1, B * C, H, W)
95 | if self.dynamic_padding:
96 | out = conv2d_same(
97 | x, weight, bias, stride=self.stride, padding=self.padding,
98 | dilation=self.dilation, groups=self.groups * B)
99 | else:
100 | out = F.conv2d(
101 | x, weight, bias, stride=self.stride, padding=self.padding,
102 | dilation=self.dilation, groups=self.groups * B)
103 | out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
104 |
105 | # Literal port (from TF definition)
106 | # x = torch.split(x, 1, 0)
107 | # weight = torch.split(weight, 1, 0)
108 | # if self.bias is not None:
109 | # bias = torch.matmul(routing_weights, self.bias)
110 | # bias = torch.split(bias, 1, 0)
111 | # else:
112 | # bias = [None] * B
113 | # out = []
114 | # for xi, wi, bi in zip(x, weight, bias):
115 | # wi = wi.view(*self.weight_shape)
116 | # if bi is not None:
117 | # bi = bi.view(*self.bias_shape)
118 | # out.append(self.conv_fn(
119 | # xi, wi, bi, stride=self.stride, padding=self.padding,
120 | # dilation=self.dilation, groups=self.groups))
121 | # out = torch.cat(out, 0)
122 | return out
123 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/config.py:
--------------------------------------------------------------------------------
1 | """ Model / Layer Config singleton state
2 | """
3 | from typing import Any, Optional
4 |
5 | __all__ = [
6 | 'is_exportable', 'is_scriptable', 'is_no_jit',
7 | 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config'
8 | ]
9 |
10 | # Set to True if prefer to have layers with no jit optimization (includes activations)
11 | _NO_JIT = False
12 |
13 | # Set to True if prefer to have activation layers with no jit optimization
14 | # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
15 | # the jit flags so far are activations. This will change as more layers are updated and/or added.
16 | _NO_ACTIVATION_JIT = False
17 |
18 | # Set to True if exporting a model with Same padding via ONNX
19 | _EXPORTABLE = False
20 |
21 | # Set to True if wanting to use torch.jit.script on a model
22 | _SCRIPTABLE = False
23 |
24 |
25 | def is_no_jit():
26 | return _NO_JIT
27 |
28 |
29 | class set_no_jit:
30 | def __init__(self, mode: bool) -> None:
31 | global _NO_JIT
32 | self.prev = _NO_JIT
33 | _NO_JIT = mode
34 |
35 | def __enter__(self) -> None:
36 | pass
37 |
38 | def __exit__(self, *args: Any) -> bool:
39 | global _NO_JIT
40 | _NO_JIT = self.prev
41 | return False
42 |
43 |
44 | def is_exportable():
45 | return _EXPORTABLE
46 |
47 |
48 | class set_exportable:
49 | def __init__(self, mode: bool) -> None:
50 | global _EXPORTABLE
51 | self.prev = _EXPORTABLE
52 | _EXPORTABLE = mode
53 |
54 | def __enter__(self) -> None:
55 | pass
56 |
57 | def __exit__(self, *args: Any) -> bool:
58 | global _EXPORTABLE
59 | _EXPORTABLE = self.prev
60 | return False
61 |
62 |
63 | def is_scriptable():
64 | return _SCRIPTABLE
65 |
66 |
67 | class set_scriptable:
68 | def __init__(self, mode: bool) -> None:
69 | global _SCRIPTABLE
70 | self.prev = _SCRIPTABLE
71 | _SCRIPTABLE = mode
72 |
73 | def __enter__(self) -> None:
74 | pass
75 |
76 | def __exit__(self, *args: Any) -> bool:
77 | global _SCRIPTABLE
78 | _SCRIPTABLE = self.prev
79 | return False
80 |
81 |
82 | class set_layer_config:
83 | """ Layer config context manager that allows setting all layer config flags at once.
84 | If a flag arg is None, it will not change the current value.
85 | """
86 | def __init__(
87 | self,
88 | scriptable: Optional[bool] = None,
89 | exportable: Optional[bool] = None,
90 | no_jit: Optional[bool] = None,
91 | no_activation_jit: Optional[bool] = None):
92 | global _SCRIPTABLE
93 | global _EXPORTABLE
94 | global _NO_JIT
95 | global _NO_ACTIVATION_JIT
96 | self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
97 | if scriptable is not None:
98 | _SCRIPTABLE = scriptable
99 | if exportable is not None:
100 | _EXPORTABLE = exportable
101 | if no_jit is not None:
102 | _NO_JIT = no_jit
103 | if no_activation_jit is not None:
104 | _NO_ACTIVATION_JIT = no_activation_jit
105 |
106 | def __enter__(self) -> None:
107 | pass
108 |
109 | def __exit__(self, *args: Any) -> bool:
110 | global _SCRIPTABLE
111 | global _EXPORTABLE
112 | global _NO_JIT
113 | global _NO_ACTIVATION_JIT
114 | _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
115 | return False
116 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/conv2d_same.py:
--------------------------------------------------------------------------------
1 | """ Conv2d w/ Same Padding
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | from typing import Tuple, Optional
9 |
10 | from .padding import pad_same, get_padding_value
11 |
12 |
13 | def conv2d_same(
14 | x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1),
15 | padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1):
16 | x = pad_same(x, weight.shape[-2:], stride, dilation)
17 | return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
18 |
19 |
20 | class Conv2dSame(nn.Conv2d):
21 | """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions
22 | """
23 |
24 | def __init__(self, in_channels, out_channels, kernel_size, stride=1,
25 | padding=0, dilation=1, groups=1, bias=True):
26 | super(Conv2dSame, self).__init__(
27 | in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
28 |
29 | def forward(self, x):
30 | return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
31 |
32 |
33 | def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
34 | padding = kwargs.pop('padding', '')
35 | kwargs.setdefault('bias', False)
36 | padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
37 | if is_dynamic:
38 | return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
39 | else:
40 | return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
41 |
42 |
43 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/conv_bn_act.py:
--------------------------------------------------------------------------------
1 | """ Conv2d + BN + Act
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | from torch import nn as nn
6 |
7 | from .create_conv2d import create_conv2d
8 | from .create_norm_act import convert_norm_act
9 |
10 |
11 | class ConvBnAct(nn.Module):
12 | def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1,
13 | bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None,
14 | drop_block=None):
15 | super(ConvBnAct, self).__init__()
16 | use_aa = aa_layer is not None
17 |
18 | self.conv = create_conv2d(
19 | in_channels, out_channels, kernel_size, stride=1 if use_aa else stride,
20 | padding=padding, dilation=dilation, groups=groups, bias=bias)
21 |
22 | # NOTE for backwards compatibility with models that use separate norm and act layer definitions
23 | norm_act_layer = convert_norm_act(norm_layer, act_layer)
24 | self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block)
25 | self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None
26 |
27 | @property
28 | def in_channels(self):
29 | return self.conv.in_channels
30 |
31 | @property
32 | def out_channels(self):
33 | return self.conv.out_channels
34 |
35 | def forward(self, x):
36 | x = self.conv(x)
37 | x = self.bn(x)
38 | if self.aa is not None:
39 | x = self.aa(x)
40 | return x
41 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/create_act.py:
--------------------------------------------------------------------------------
1 | """ Activation Factory
2 | Hacked together by / Copyright 2020 Ross Wightman
3 | """
4 | from .activations import *
5 | from .activations_jit import *
6 | from .activations_me import *
7 | from .config import is_exportable, is_scriptable, is_no_jit
8 |
9 | # PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. This code
10 | # will use native version if present. Eventually, the custom Swish layers will be removed
11 | # and only native 'silu' will be used.
12 | _has_silu = 'silu' in dir(torch.nn.functional)
13 |
14 | _ACT_FN_DEFAULT = dict(
15 | silu=F.silu if _has_silu else swish,
16 | swish=F.silu if _has_silu else swish,
17 | mish=mish,
18 | relu=F.relu,
19 | relu6=F.relu6,
20 | leaky_relu=F.leaky_relu,
21 | elu=F.elu,
22 | celu=F.celu,
23 | selu=F.selu,
24 | gelu=gelu,
25 | sigmoid=sigmoid,
26 | tanh=tanh,
27 | hard_sigmoid=hard_sigmoid,
28 | hard_swish=hard_swish,
29 | hard_mish=hard_mish,
30 | )
31 |
32 | _ACT_FN_JIT = dict(
33 | silu=F.silu if _has_silu else swish_jit,
34 | swish=F.silu if _has_silu else swish_jit,
35 | mish=mish_jit,
36 | hard_sigmoid=hard_sigmoid_jit,
37 | hard_swish=hard_swish_jit,
38 | hard_mish=hard_mish_jit
39 | )
40 |
41 | _ACT_FN_ME = dict(
42 | silu=F.silu if _has_silu else swish_me,
43 | swish=F.silu if _has_silu else swish_me,
44 | mish=mish_me,
45 | hard_sigmoid=hard_sigmoid_me,
46 | hard_swish=hard_swish_me,
47 | hard_mish=hard_mish_me,
48 | )
49 |
50 | _ACT_LAYER_DEFAULT = dict(
51 | silu=nn.SiLU if _has_silu else Swish,
52 | swish=nn.SiLU if _has_silu else Swish,
53 | mish=Mish,
54 | relu=nn.ReLU,
55 | relu6=nn.ReLU6,
56 | leaky_relu=nn.LeakyReLU,
57 | elu=nn.ELU,
58 | prelu=PReLU,
59 | celu=nn.CELU,
60 | selu=nn.SELU,
61 | gelu=GELU,
62 | sigmoid=Sigmoid,
63 | tanh=Tanh,
64 | hard_sigmoid=HardSigmoid,
65 | hard_swish=HardSwish,
66 | hard_mish=HardMish,
67 | )
68 |
69 | _ACT_LAYER_JIT = dict(
70 | silu=nn.SiLU if _has_silu else SwishJit,
71 | swish=nn.SiLU if _has_silu else SwishJit,
72 | mish=MishJit,
73 | hard_sigmoid=HardSigmoidJit,
74 | hard_swish=HardSwishJit,
75 | hard_mish=HardMishJit
76 | )
77 |
78 | _ACT_LAYER_ME = dict(
79 | silu=nn.SiLU if _has_silu else SwishMe,
80 | swish=nn.SiLU if _has_silu else SwishMe,
81 | mish=MishMe,
82 | hard_sigmoid=HardSigmoidMe,
83 | hard_swish=HardSwishMe,
84 | hard_mish=HardMishMe,
85 | )
86 |
87 |
88 | def get_act_fn(name='relu'):
89 | """ Activation Function Factory
90 | Fetching activation fns by name with this function allows export or torch script friendly
91 | functions to be returned dynamically based on current config.
92 | """
93 | if not name:
94 | return None
95 | if not (is_no_jit() or is_exportable() or is_scriptable()):
96 | # If not exporting or scripting the model, first look for a memory-efficient version with
97 | # custom autograd, then fallback
98 | if name in _ACT_FN_ME:
99 | return _ACT_FN_ME[name]
100 | if is_exportable() and name in ('silu', 'swish'):
101 | # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
102 | return swish
103 | if not (is_no_jit() or is_exportable()):
104 | if name in _ACT_FN_JIT:
105 | return _ACT_FN_JIT[name]
106 | return _ACT_FN_DEFAULT[name]
107 |
108 |
109 | def get_act_layer(name='relu'):
110 | """ Activation Layer Factory
111 | Fetching activation layers by name with this function allows export or torch script friendly
112 | functions to be returned dynamically based on current config.
113 | """
114 | if not name:
115 | return None
116 | if not (is_no_jit() or is_exportable() or is_scriptable()):
117 | if name in _ACT_LAYER_ME:
118 | return _ACT_LAYER_ME[name]
119 | if is_exportable() and name in ('silu', 'swish'):
120 | # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
121 | return Swish
122 | if not (is_no_jit() or is_exportable()):
123 | if name in _ACT_LAYER_JIT:
124 | return _ACT_LAYER_JIT[name]
125 | return _ACT_LAYER_DEFAULT[name]
126 |
127 |
128 | def create_act_layer(name, inplace=False, **kwargs):
129 | act_layer = get_act_layer(name)
130 | if act_layer is not None:
131 | return act_layer(inplace=inplace, **kwargs)
132 | else:
133 | return None
134 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/create_attn.py:
--------------------------------------------------------------------------------
1 | """ Select AttentionFactory Method
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | import torch
6 | from .se import SEModule, EffectiveSEModule
7 | from .eca import EcaModule, CecaModule
8 | from .cbam import CbamModule, LightCbamModule
9 |
10 |
11 | def get_attn(attn_type):
12 | if isinstance(attn_type, torch.nn.Module):
13 | return attn_type
14 | module_cls = None
15 | if attn_type is not None:
16 | if isinstance(attn_type, str):
17 | attn_type = attn_type.lower()
18 | if attn_type == 'se':
19 | module_cls = SEModule
20 | elif attn_type == 'ese':
21 | module_cls = EffectiveSEModule
22 | elif attn_type == 'eca':
23 | module_cls = EcaModule
24 | elif attn_type == 'ceca':
25 | module_cls = CecaModule
26 | elif attn_type == 'cbam':
27 | module_cls = CbamModule
28 | elif attn_type == 'lcbam':
29 | module_cls = LightCbamModule
30 | else:
31 | assert False, "Invalid attn module (%s)" % attn_type
32 | elif isinstance(attn_type, bool):
33 | if attn_type:
34 | module_cls = SEModule
35 | else:
36 | module_cls = attn_type
37 | return module_cls
38 |
39 |
40 | def create_attn(attn_type, channels, **kwargs):
41 | module_cls = get_attn(attn_type)
42 | if module_cls is not None:
43 | # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
44 | return module_cls(channels, **kwargs)
45 | return None
46 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/create_conv2d.py:
--------------------------------------------------------------------------------
1 | """ Create Conv2d Factory Method
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 |
6 | from .mixed_conv2d import MixedConv2d
7 | from .cond_conv2d import CondConv2d
8 | from .conv2d_same import create_conv2d_pad
9 |
10 |
11 | def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
12 | """ Select a 2d convolution implementation based on arguments
13 | Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
14 |
15 | Used extensively by EfficientNet, MobileNetv3 and related networks.
16 | """
17 | if isinstance(kernel_size, list):
18 | assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
19 | assert 'groups' not in kwargs # MixedConv groups are defined by kernel list
20 | # We're going to use only lists for defining the MixedConv2d kernel groups,
21 | # ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
22 | m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
23 | else:
24 | depthwise = kwargs.pop('depthwise', False)
25 | # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
26 | groups = in_channels if depthwise else kwargs.pop('groups', 1)
27 | if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
28 | m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
29 | else:
30 | m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
31 | return m
32 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/create_norm_act.py:
--------------------------------------------------------------------------------
1 | """ NormAct (Normalizaiton + Activation Layer) Factory
2 |
3 | Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
4 | isntances in models. Where these are used it will be possible to swap separate BN + act layers with
5 | combined modules like IABN or EvoNorms.
6 |
7 | Hacked together by / Copyright 2020 Ross Wightman
8 | """
9 | import types
10 | import functools
11 |
12 | import torch
13 | import torch.nn as nn
14 |
15 | from .evo_norm import EvoNormBatch2d, EvoNormSample2d
16 | from .norm_act import BatchNormAct2d, GroupNormAct
17 | from .inplace_abn import InplaceAbn
18 |
19 | _NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn}
20 | _NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type
21 |
22 |
23 | def get_norm_act_layer(layer_class):
24 | layer_class = layer_class.replace('_', '').lower()
25 | if layer_class.startswith("batchnorm"):
26 | layer = BatchNormAct2d
27 | elif layer_class.startswith("groupnorm"):
28 | layer = GroupNormAct
29 | elif layer_class == "evonormbatch":
30 | layer = EvoNormBatch2d
31 | elif layer_class == "evonormsample":
32 | layer = EvoNormSample2d
33 | elif layer_class == "iabn" or layer_class == "inplaceabn":
34 | layer = InplaceAbn
35 | else:
36 | assert False, "Invalid norm_act layer (%s)" % layer_class
37 | return layer
38 |
39 |
40 | def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs):
41 | layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu
42 | assert len(layer_parts) in (1, 2)
43 | layer = get_norm_act_layer(layer_parts[0])
44 | #activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection?
45 | layer_instance = layer(num_features, apply_act=apply_act, **kwargs)
46 | if jit:
47 | layer_instance = torch.jit.script(layer_instance)
48 | return layer_instance
49 |
50 |
51 | def convert_norm_act(norm_layer, act_layer):
52 | assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
53 | assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))
54 | norm_act_kwargs = {}
55 |
56 | # unbind partial fn, so args can be rebound later
57 | if isinstance(norm_layer, functools.partial):
58 | norm_act_kwargs.update(norm_layer.keywords)
59 | norm_layer = norm_layer.func
60 |
61 | if isinstance(norm_layer, str):
62 | norm_act_layer = get_norm_act_layer(norm_layer)
63 | elif norm_layer in _NORM_ACT_TYPES:
64 | norm_act_layer = norm_layer
65 | elif isinstance(norm_layer, types.FunctionType):
66 | # if function type, must be a lambda/fn that creates a norm_act layer
67 | norm_act_layer = norm_layer
68 | else:
69 | type_name = norm_layer.__name__.lower()
70 | if type_name.startswith('batchnorm'):
71 | norm_act_layer = BatchNormAct2d
72 | elif type_name.startswith('groupnorm'):
73 | norm_act_layer = GroupNormAct
74 | else:
75 | assert False, f"No equivalent norm_act layer for {type_name}"
76 |
77 | if norm_act_layer in _NORM_ACT_REQUIRES_ARG:
78 | # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.
79 | # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types
80 | norm_act_kwargs.setdefault('act_layer', act_layer)
81 | if norm_act_kwargs:
82 | norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args
83 | return norm_act_layer
84 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/eca.py:
--------------------------------------------------------------------------------
1 | """
2 | ECA module from ECAnet
3 |
4 | paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
5 | https://arxiv.org/abs/1910.03151
6 |
7 | Original ECA model borrowed from https://github.com/BangguWu/ECANet
8 |
9 | Modified circular ECA implementation and adaption for use in classifier package
10 | by Chris Ha https://github.com/VRandme
11 |
12 | Original License:
13 |
14 | MIT License
15 |
16 | Copyright (c) 2019 BangguWu, Qilong Wang
17 |
18 | Permission is hereby granted, free of charge, to any person obtaining a copy
19 | of this software and associated documentation files (the "Software"), to deal
20 | in the Software without restriction, including without limitation the rights
21 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
22 | copies of the Software, and to permit persons to whom the Software is
23 | furnished to do so, subject to the following conditions:
24 |
25 | The above copyright notice and this permission notice shall be included in all
26 | copies or substantial portions of the Software.
27 |
28 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
33 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 | SOFTWARE.
35 | """
36 | import math
37 | from torch import nn
38 | import torch.nn.functional as F
39 |
40 |
41 | class EcaModule(nn.Module):
42 | """Constructs an ECA module.
43 |
44 | Args:
45 | channels: Number of channels of the input feature map for use in adaptive kernel sizes
46 | for actual calculations according to channel.
47 | gamma, beta: when channel is given parameters of mapping function
48 | refer to original paper https://arxiv.org/pdf/1910.03151.pdf
49 | (default=None. if channel size not given, use k_size given for kernel size.)
50 | kernel_size: Adaptive selection of kernel size (default=3)
51 | """
52 | def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
53 | super(EcaModule, self).__init__()
54 | assert kernel_size % 2 == 1
55 | if channels is not None:
56 | t = int(abs(math.log(channels, 2) + beta) / gamma)
57 | kernel_size = max(t if t % 2 else t + 1, 3)
58 |
59 | self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
60 |
61 | def forward(self, x):
62 | y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv
63 | y = self.conv(y)
64 | y = y.view(x.shape[0], -1, 1, 1).sigmoid()
65 | return x * y.expand_as(x)
66 |
67 |
68 | class CecaModule(nn.Module):
69 | """Constructs a circular ECA module.
70 |
71 | ECA module where the conv uses circular padding rather than zero padding.
72 | Unlike the spatial dimension, the channels do not have inherent ordering nor
73 | locality. Although this module in essence, applies such an assumption, it is unnecessary
74 | to limit the channels on either "edge" from being circularly adapted to each other.
75 | This will fundamentally increase connectivity and possibly increase performance metrics
76 | (accuracy, robustness), without significantly impacting resource metrics
77 | (parameter size, throughput,latency, etc)
78 |
79 | Args:
80 | channels: Number of channels of the input feature map for use in adaptive kernel sizes
81 | for actual calculations according to channel.
82 | gamma, beta: when channel is given parameters of mapping function
83 | refer to original paper https://arxiv.org/pdf/1910.03151.pdf
84 | (default=None. if channel size not given, use k_size given for kernel size.)
85 | kernel_size: Adaptive selection of kernel size (default=3)
86 | """
87 |
88 | def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):
89 | super(CecaModule, self).__init__()
90 | assert kernel_size % 2 == 1
91 | if channels is not None:
92 | t = int(abs(math.log(channels, 2) + beta) / gamma)
93 | kernel_size = max(t if t % 2 else t + 1, 3)
94 |
95 | # PyTorch circular padding mode is buggy as of pytorch 1.4
96 | # see https://github.com/pytorch/pytorch/pull/17240
97 | # implement manual circular padding
98 | self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=False)
99 | self.padding = (kernel_size - 1) // 2
100 |
101 | def forward(self, x):
102 | y = x.mean((2, 3)).view(x.shape[0], 1, -1)
103 | # Manually implement circular padding, F.pad does not seemed to be bugged
104 | y = F.pad(y, (self.padding, self.padding), mode='circular')
105 | y = self.conv(y)
106 | y = y.view(x.shape[0], -1, 1, 1).sigmoid()
107 | return x * y.expand_as(x)
108 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/evo_norm.py:
--------------------------------------------------------------------------------
1 | """EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch
2 |
3 | An attempt at getting decent performing EvoNorms running in PyTorch.
4 | While currently faster than other impl, still quite a ways off the built-in BN
5 | in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed).
6 |
7 | Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts.
8 |
9 | Hacked together by / Copyright 2020 Ross Wightman
10 | """
11 |
12 | import torch
13 | import torch.nn as nn
14 |
15 |
16 | class EvoNormBatch2d(nn.Module):
17 | def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None):
18 | super(EvoNormBatch2d, self).__init__()
19 | self.apply_act = apply_act # apply activation (non-linearity)
20 | self.momentum = momentum
21 | self.eps = eps
22 | param_shape = (1, num_features, 1, 1)
23 | self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
24 | self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
25 | if apply_act:
26 | self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True)
27 | self.register_buffer('running_var', torch.ones(1, num_features, 1, 1))
28 | self.reset_parameters()
29 |
30 | def reset_parameters(self):
31 | nn.init.ones_(self.weight)
32 | nn.init.zeros_(self.bias)
33 | if self.apply_act:
34 | nn.init.ones_(self.v)
35 |
36 | def forward(self, x):
37 | assert x.dim() == 4, 'expected 4D input'
38 | x_type = x.dtype
39 | if self.training:
40 | var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True)
41 | n = x.numel() / x.shape[1]
42 | self.running_var.copy_(
43 | var.detach() * self.momentum * (n / (n - 1)) + self.running_var * (1 - self.momentum))
44 | else:
45 | var = self.running_var
46 |
47 | if self.apply_act:
48 | v = self.v.to(dtype=x_type)
49 | d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type)
50 | d = d.max((var + self.eps).sqrt().to(dtype=x_type))
51 | x = x / d
52 | return x * self.weight + self.bias
53 |
54 |
55 | class EvoNormSample2d(nn.Module):
56 | def __init__(self, num_features, apply_act=True, groups=8, eps=1e-5, drop_block=None):
57 | super(EvoNormSample2d, self).__init__()
58 | self.apply_act = apply_act # apply activation (non-linearity)
59 | self.groups = groups
60 | self.eps = eps
61 | param_shape = (1, num_features, 1, 1)
62 | self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
63 | self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
64 | if apply_act:
65 | self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True)
66 | self.reset_parameters()
67 |
68 | def reset_parameters(self):
69 | nn.init.ones_(self.weight)
70 | nn.init.zeros_(self.bias)
71 | if self.apply_act:
72 | nn.init.ones_(self.v)
73 |
74 | def forward(self, x):
75 | assert x.dim() == 4, 'expected 4D input'
76 | B, C, H, W = x.shape
77 | assert C % self.groups == 0
78 | if self.apply_act:
79 | n = x * (x * self.v).sigmoid()
80 | x = x.reshape(B, self.groups, -1)
81 | x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt()
82 | x = x.reshape(B, C, H, W)
83 | return x * self.weight + self.bias
84 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/helpers.py:
--------------------------------------------------------------------------------
1 | """ Layer/Module Helpers
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | from itertools import repeat
6 | import collections.abc
7 |
8 |
9 | # From PyTorch internals
10 | def _ntuple(n):
11 | def parse(x):
12 | if isinstance(x, collections.abc.Iterable):
13 | return x
14 | return tuple(repeat(x, n))
15 | return parse
16 |
17 |
18 | to_1tuple = _ntuple(1)
19 | to_2tuple = _ntuple(2)
20 | to_3tuple = _ntuple(3)
21 | to_4tuple = _ntuple(4)
22 | to_ntuple = _ntuple
23 |
24 |
25 | def make_divisible(v, divisor=8, min_value=None):
26 | min_value = min_value or divisor
27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
28 | # Make sure that round down does not go down by more than 10%.
29 | if new_v < 0.9 * v:
30 | new_v += divisor
31 | return new_v
32 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/inplace_abn.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn as nn
3 |
4 | try:
5 | from inplace_abn.functions import inplace_abn, inplace_abn_sync
6 | has_iabn = True
7 | except ImportError:
8 | has_iabn = False
9 |
10 | def inplace_abn(x, weight, bias, running_mean, running_var,
11 | training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01):
12 | raise ImportError(
13 | "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.11'")
14 |
15 | def inplace_abn_sync(**kwargs):
16 | inplace_abn(**kwargs)
17 |
18 |
19 | class InplaceAbn(nn.Module):
20 | """Activated Batch Normalization
21 |
22 | This gathers a BatchNorm and an activation function in a single module
23 |
24 | Parameters
25 | ----------
26 | num_features : int
27 | Number of feature channels in the input and output.
28 | eps : float
29 | Small constant to prevent numerical issues.
30 | momentum : float
31 | Momentum factor applied to compute running statistics.
32 | affine : bool
33 | If `True` apply learned scale and shift transformation after normalization.
34 | act_layer : str or nn.Module type
35 | Name or type of the activation functions, one of: `leaky_relu`, `elu`
36 | act_param : float
37 | Negative slope for the `leaky_relu` activation.
38 | """
39 |
40 | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True,
41 | act_layer="leaky_relu", act_param=0.01, drop_block=None):
42 | super(InplaceAbn, self).__init__()
43 | self.num_features = num_features
44 | self.affine = affine
45 | self.eps = eps
46 | self.momentum = momentum
47 | if apply_act:
48 | if isinstance(act_layer, str):
49 | assert act_layer in ('leaky_relu', 'elu', 'identity', '')
50 | self.act_name = act_layer if act_layer else 'identity'
51 | else:
52 | # convert act layer passed as type to string
53 | if act_layer == nn.ELU:
54 | self.act_name = 'elu'
55 | elif act_layer == nn.LeakyReLU:
56 | self.act_name = 'leaky_relu'
57 | elif act_layer == nn.Identity:
58 | self.act_name = 'identity'
59 | else:
60 | assert False, f'Invalid act layer {act_layer.__name__} for IABN'
61 | else:
62 | self.act_name = 'identity'
63 | self.act_param = act_param
64 | if self.affine:
65 | self.weight = nn.Parameter(torch.ones(num_features))
66 | self.bias = nn.Parameter(torch.zeros(num_features))
67 | else:
68 | self.register_parameter('weight', None)
69 | self.register_parameter('bias', None)
70 | self.register_buffer('running_mean', torch.zeros(num_features))
71 | self.register_buffer('running_var', torch.ones(num_features))
72 | self.reset_parameters()
73 |
74 | def reset_parameters(self):
75 | nn.init.constant_(self.running_mean, 0)
76 | nn.init.constant_(self.running_var, 1)
77 | if self.affine:
78 | nn.init.constant_(self.weight, 1)
79 | nn.init.constant_(self.bias, 0)
80 |
81 | def forward(self, x):
82 | output = inplace_abn(
83 | x, self.weight, self.bias, self.running_mean, self.running_var,
84 | self.training, self.momentum, self.eps, self.act_name, self.act_param)
85 | if isinstance(output, tuple):
86 | output = output[0]
87 | return output
88 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/linear.py:
--------------------------------------------------------------------------------
1 | """ Linear layer (alternate definition)
2 | """
3 | import torch
4 | import torch.nn.functional as F
5 | from torch import nn as nn
6 |
7 |
8 | class Linear(nn.Linear):
9 | r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
10 |
11 | Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
12 | weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case.
13 | """
14 | def forward(self, input: torch.Tensor) -> torch.Tensor:
15 | if torch.jit.is_scripting():
16 | bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None
17 | return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias)
18 | else:
19 | return F.linear(input, self.weight, self.bias)
20 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/median_pool.py:
--------------------------------------------------------------------------------
1 | """ Median Pool
2 | Hacked together by / Copyright 2020 Ross Wightman
3 | """
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | from .helpers import to_2tuple, to_4tuple
7 |
8 |
9 | class MedianPool2d(nn.Module):
10 | """ Median pool (usable as median filter when stride=1) module.
11 |
12 | Args:
13 | kernel_size: size of pooling kernel, int or 2-tuple
14 | stride: pool stride, int or 2-tuple
15 | padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
16 | same: override padding and enforce same padding, boolean
17 | """
18 | def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
19 | super(MedianPool2d, self).__init__()
20 | self.k = to_2tuple(kernel_size)
21 | self.stride = to_2tuple(stride)
22 | self.padding = to_4tuple(padding) # convert to l, r, t, b
23 | self.same = same
24 |
25 | def _padding(self, x):
26 | if self.same:
27 | ih, iw = x.size()[2:]
28 | if ih % self.stride[0] == 0:
29 | ph = max(self.k[0] - self.stride[0], 0)
30 | else:
31 | ph = max(self.k[0] - (ih % self.stride[0]), 0)
32 | if iw % self.stride[1] == 0:
33 | pw = max(self.k[1] - self.stride[1], 0)
34 | else:
35 | pw = max(self.k[1] - (iw % self.stride[1]), 0)
36 | pl = pw // 2
37 | pr = pw - pl
38 | pt = ph // 2
39 | pb = ph - pt
40 | padding = (pl, pr, pt, pb)
41 | else:
42 | padding = self.padding
43 | return padding
44 |
45 | def forward(self, x):
46 | x = F.pad(x, self._padding(x), mode='reflect')
47 | x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
48 | x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
49 | return x
50 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/mixed_conv2d.py:
--------------------------------------------------------------------------------
1 | """ PyTorch Mixed Convolution
2 |
3 | Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
4 |
5 | Hacked together by / Copyright 2020 Ross Wightman
6 | """
7 |
8 | import torch
9 | from torch import nn as nn
10 |
11 | from .conv2d_same import create_conv2d_pad
12 |
13 |
14 | def _split_channels(num_chan, num_groups):
15 | split = [num_chan // num_groups for _ in range(num_groups)]
16 | split[0] += num_chan - sum(split)
17 | return split
18 |
19 |
20 | class MixedConv2d(nn.ModuleDict):
21 | """ Mixed Grouped Convolution
22 |
23 | Based on MDConv and GroupedConv in MixNet impl:
24 | https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py
25 | """
26 | def __init__(self, in_channels, out_channels, kernel_size=3,
27 | stride=1, padding='', dilation=1, depthwise=False, **kwargs):
28 | super(MixedConv2d, self).__init__()
29 |
30 | kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
31 | num_groups = len(kernel_size)
32 | in_splits = _split_channels(in_channels, num_groups)
33 | out_splits = _split_channels(out_channels, num_groups)
34 | self.in_channels = sum(in_splits)
35 | self.out_channels = sum(out_splits)
36 | for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)):
37 | conv_groups = in_ch if depthwise else 1
38 | # use add_module to keep key space clean
39 | self.add_module(
40 | str(idx),
41 | create_conv2d_pad(
42 | in_ch, out_ch, k, stride=stride,
43 | padding=padding, dilation=dilation, groups=conv_groups, **kwargs)
44 | )
45 | self.splits = in_splits
46 |
47 | def forward(self, x):
48 | x_split = torch.split(x, self.splits, 1)
49 | x_out = [c(x_split[i]) for i, c in enumerate(self.values())]
50 | x = torch.cat(x_out, 1)
51 | return x
52 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/norm_act.py:
--------------------------------------------------------------------------------
1 | """ Normalization + Activation Layers
2 | """
3 | import torch
4 | from torch import nn as nn
5 | from torch.nn import functional as F
6 |
7 | from .create_act import get_act_layer
8 |
9 |
10 | class BatchNormAct2d(nn.BatchNorm2d):
11 | """BatchNorm + Activation
12 |
13 | This module performs BatchNorm + Activation in a manner that will remain backwards
14 | compatible with weights trained with separate bn, act. This is why we inherit from BN
15 | instead of composing it as a .bn member.
16 | """
17 | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
18 | apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
19 | super(BatchNormAct2d, self).__init__(
20 | num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
21 | if isinstance(act_layer, str):
22 | act_layer = get_act_layer(act_layer)
23 | if act_layer is not None and apply_act:
24 | act_args = dict(inplace=True) if inplace else {}
25 | self.act = act_layer(**act_args)
26 | else:
27 | self.act = nn.Identity()
28 |
29 | def _forward_jit(self, x):
30 | """ A cut & paste of the contents of the PyTorch BatchNorm2d forward function
31 | """
32 | # exponential_average_factor is self.momentum set to
33 | # (when it is available) only so that if gets updated
34 | # in ONNX graph when this node is exported to ONNX.
35 | if self.momentum is None:
36 | exponential_average_factor = 0.0
37 | else:
38 | exponential_average_factor = self.momentum
39 |
40 | if self.training and self.track_running_stats:
41 | # TODO: if statement only here to tell the jit to skip emitting this when it is None
42 | if self.num_batches_tracked is not None:
43 | self.num_batches_tracked += 1
44 | if self.momentum is None: # use cumulative moving average
45 | exponential_average_factor = 1.0 / float(self.num_batches_tracked)
46 | else: # use exponential moving average
47 | exponential_average_factor = self.momentum
48 |
49 | x = F.batch_norm(
50 | x, self.running_mean, self.running_var, self.weight, self.bias,
51 | self.training or not self.track_running_stats,
52 | exponential_average_factor, self.eps)
53 | return x
54 |
55 | @torch.jit.ignore
56 | def _forward_python(self, x):
57 | return super(BatchNormAct2d, self).forward(x)
58 |
59 | def forward(self, x):
60 | # FIXME cannot call parent forward() and maintain jit.script compatibility?
61 | if torch.jit.is_scripting():
62 | x = self._forward_jit(x)
63 | else:
64 | x = self._forward_python(x)
65 | x = self.act(x)
66 | return x
67 |
68 |
69 | class GroupNormAct(nn.GroupNorm):
70 | # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args
71 | def __init__(self, num_channels, num_groups, eps=1e-5, affine=True,
72 | apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None):
73 | super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine)
74 | if isinstance(act_layer, str):
75 | act_layer = get_act_layer(act_layer)
76 | if act_layer is not None and apply_act:
77 | act_args = dict(inplace=True) if inplace else {}
78 | self.act = act_layer(**act_args)
79 | else:
80 | self.act = nn.Identity()
81 |
82 | def forward(self, x):
83 | x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
84 | x = self.act(x)
85 | return x
86 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/padding.py:
--------------------------------------------------------------------------------
1 | """ Padding Helpers
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | import math
6 | from typing import List, Tuple
7 |
8 | import torch.nn.functional as F
9 |
10 |
11 | # Calculate symmetric padding for a convolution
12 | def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
13 | padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
14 | return padding
15 |
16 |
17 | # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
18 | def get_same_padding(x: int, k: int, s: int, d: int):
19 | return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
20 |
21 |
22 | # Can SAME padding for given args be done statically?
23 | def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
24 | return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
25 |
26 |
27 | # Dynamically pad input x with 'SAME' padding for conv with specified args
28 | def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
29 | ih, iw = x.size()[-2:]
30 | pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
31 | if pad_h > 0 or pad_w > 0:
32 | x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
33 | return x
34 |
35 |
36 | def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
37 | dynamic = False
38 | if isinstance(padding, str):
39 | # for any string padding, the padding will be calculated for you, one of three ways
40 | padding = padding.lower()
41 | if padding == 'same':
42 | # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
43 | if is_static_pad(kernel_size, **kwargs):
44 | # static case, no extra overhead
45 | padding = get_padding(kernel_size, **kwargs)
46 | else:
47 | # dynamic 'SAME' padding, has runtime/GPU memory overhead
48 | padding = 0
49 | dynamic = True
50 | elif padding == 'valid':
51 | # 'VALID' padding, same as padding=0
52 | padding = 0
53 | else:
54 | # Default to PyTorch style 'same'-ish symmetric padding
55 | padding = get_padding(kernel_size, **kwargs)
56 | return padding, dynamic
57 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/pool2d_same.py:
--------------------------------------------------------------------------------
1 | """ AvgPool2d w/ Same Padding
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | from typing import List, Tuple, Optional
9 |
10 | from .helpers import to_2tuple
11 | from .padding import pad_same, get_padding_value
12 |
13 |
14 | def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
15 | ceil_mode: bool = False, count_include_pad: bool = True):
16 | # FIXME how to deal with count_include_pad vs not for external padding?
17 | x = pad_same(x, kernel_size, stride)
18 | return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
19 |
20 |
21 | class AvgPool2dSame(nn.AvgPool2d):
22 | """ Tensorflow like 'SAME' wrapper for 2D average pooling
23 | """
24 | def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
25 | kernel_size = to_2tuple(kernel_size)
26 | stride = to_2tuple(stride)
27 | super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
28 |
29 | def forward(self, x):
30 | return avg_pool2d_same(
31 | x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
32 |
33 |
34 | def max_pool2d_same(
35 | x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
36 | dilation: List[int] = (1, 1), ceil_mode: bool = False):
37 | x = pad_same(x, kernel_size, stride, value=-float('inf'))
38 | return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode)
39 |
40 |
41 | class MaxPool2dSame(nn.MaxPool2d):
42 | """ Tensorflow like 'SAME' wrapper for 2D max pooling
43 | """
44 | def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False, count_include_pad=True):
45 | kernel_size = to_2tuple(kernel_size)
46 | stride = to_2tuple(stride)
47 | dilation = to_2tuple(dilation)
48 | super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode, count_include_pad)
49 |
50 | def forward(self, x):
51 | return max_pool2d_same(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode)
52 |
53 |
54 | def create_pool2d(pool_type, kernel_size, stride=None, **kwargs):
55 | stride = stride or kernel_size
56 | padding = kwargs.pop('padding', '')
57 | padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs)
58 | if is_dynamic:
59 | if pool_type == 'avg':
60 | return AvgPool2dSame(kernel_size, stride=stride, **kwargs)
61 | elif pool_type == 'max':
62 | return MaxPool2dSame(kernel_size, stride=stride, **kwargs)
63 | else:
64 | assert False, f'Unsupported pool type {pool_type}'
65 | else:
66 | if pool_type == 'avg':
67 | return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
68 | elif pool_type == 'max':
69 | return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
70 | else:
71 | assert False, f'Unsupported pool type {pool_type}'
72 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/se.py:
--------------------------------------------------------------------------------
1 | from torch import nn as nn
2 | import torch.nn.functional as F
3 |
4 | from .create_act import create_act_layer
5 | from .helpers import make_divisible
6 |
7 |
8 | class SEModule(nn.Module):
9 | """ SE Module as defined in original SE-Nets with a few additions
10 | Additions include:
11 | * min_channels can be specified to keep reduced channel count at a minimum (default: 8)
12 | * divisor can be specified to keep channels rounded to specified values (default: 1)
13 | * reduction channels can be specified directly by arg (if reduction_channels is set)
14 | * reduction channels can be specified by float ratio (if reduction_ratio is set)
15 | """
16 | def __init__(self, channels, reduction=16, act_layer=nn.ReLU, gate_layer='sigmoid',
17 | reduction_ratio=None, reduction_channels=None, min_channels=8, divisor=1):
18 | super(SEModule, self).__init__()
19 | if reduction_channels is not None:
20 | reduction_channels = reduction_channels # direct specification highest priority, no rounding/min done
21 | elif reduction_ratio is not None:
22 | reduction_channels = make_divisible(channels * reduction_ratio, divisor, min_channels)
23 | else:
24 | reduction_channels = make_divisible(channels // reduction, divisor, min_channels)
25 | self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, bias=True)
26 | self.act = act_layer(inplace=True)
27 | self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, bias=True)
28 | self.gate = create_act_layer(gate_layer)
29 |
30 | def forward(self, x):
31 | x_se = x.mean((2, 3), keepdim=True)
32 | x_se = self.fc1(x_se)
33 | x_se = self.act(x_se)
34 | x_se = self.fc2(x_se)
35 | return x * self.gate(x_se)
36 |
37 |
38 | class EffectiveSEModule(nn.Module):
39 | """ 'Effective Squeeze-Excitation
40 | From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
41 | """
42 | def __init__(self, channels, gate_layer='hard_sigmoid'):
43 | super(EffectiveSEModule, self).__init__()
44 | self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0)
45 | self.gate = create_act_layer(gate_layer, inplace=True)
46 |
47 | def forward(self, x):
48 | x_se = x.mean((2, 3), keepdim=True)
49 | x_se = self.fc(x_se)
50 | return x * self.gate(x_se)
51 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/selective_kernel.py:
--------------------------------------------------------------------------------
1 | """ Selective Kernel Convolution/Attention
2 |
3 | Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
4 |
5 | Hacked together by / Copyright 2020 Ross Wightman
6 | """
7 | import torch
8 | from torch import nn as nn
9 |
10 | from .conv_bn_act import ConvBnAct
11 |
12 |
13 | def _kernel_valid(k):
14 | if isinstance(k, (list, tuple)):
15 | for ki in k:
16 | return _kernel_valid(ki)
17 | assert k >= 3 and k % 2
18 |
19 |
20 | class SelectiveKernelAttn(nn.Module):
21 | def __init__(self, channels, num_paths=2, attn_channels=32,
22 | act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
23 | """ Selective Kernel Attention Module
24 |
25 | Selective Kernel attention mechanism factored out into its own module.
26 |
27 | """
28 | super(SelectiveKernelAttn, self).__init__()
29 | self.num_paths = num_paths
30 | self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
31 | self.bn = norm_layer(attn_channels)
32 | self.act = act_layer(inplace=True)
33 | self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False)
34 |
35 | def forward(self, x):
36 | assert x.shape[1] == self.num_paths
37 | x = x.sum(1).mean((2, 3), keepdim=True)
38 | x = self.fc_reduce(x)
39 | x = self.bn(x)
40 | x = self.act(x)
41 | x = self.fc_select(x)
42 | B, C, H, W = x.shape
43 | x = x.view(B, self.num_paths, C // self.num_paths, H, W)
44 | x = torch.softmax(x, dim=1)
45 | return x
46 |
47 |
48 | class SelectiveKernelConv(nn.Module):
49 |
50 | def __init__(self, in_channels, out_channels, kernel_size=None, stride=1, dilation=1, groups=1,
51 | attn_reduction=16, min_attn_channels=32, keep_3x3=True, split_input=False,
52 | drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None):
53 | """ Selective Kernel Convolution Module
54 |
55 | As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications.
56 |
57 | Largest change is the input split, which divides the input channels across each convolution path, this can
58 | be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps
59 | the parameter count from ballooning when the convolutions themselves don't have groups, but still provides
60 | a noteworthy increase in performance over similar param count models without this attention layer. -Ross W
61 |
62 | Args:
63 | in_channels (int): module input (feature) channel count
64 | out_channels (int): module output (feature) channel count
65 | kernel_size (int, list): kernel size for each convolution branch
66 | stride (int): stride for convolutions
67 | dilation (int): dilation for module as a whole, impacts dilation of each branch
68 | groups (int): number of groups for each branch
69 | attn_reduction (int, float): reduction factor for attention features
70 | min_attn_channels (int): minimum attention feature channels
71 | keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations
72 | split_input (bool): split input channels evenly across each convolution branch, keeps param count lower,
73 | can be viewed as grouping by path, output expands to module out_channels count
74 | drop_block (nn.Module): drop block module
75 | act_layer (nn.Module): activation layer to use
76 | norm_layer (nn.Module): batchnorm/norm layer to use
77 | """
78 | super(SelectiveKernelConv, self).__init__()
79 | kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation
80 | _kernel_valid(kernel_size)
81 | if not isinstance(kernel_size, list):
82 | kernel_size = [kernel_size] * 2
83 | if keep_3x3:
84 | dilation = [dilation * (k - 1) // 2 for k in kernel_size]
85 | kernel_size = [3] * len(kernel_size)
86 | else:
87 | dilation = [dilation] * len(kernel_size)
88 | self.num_paths = len(kernel_size)
89 | self.in_channels = in_channels
90 | self.out_channels = out_channels
91 | self.split_input = split_input
92 | if self.split_input:
93 | assert in_channels % self.num_paths == 0
94 | in_channels = in_channels // self.num_paths
95 | groups = min(out_channels, groups)
96 |
97 | conv_kwargs = dict(
98 | stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer,
99 | aa_layer=aa_layer)
100 | self.paths = nn.ModuleList([
101 | ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs)
102 | for k, d in zip(kernel_size, dilation)])
103 |
104 | attn_channels = max(int(out_channels / attn_reduction), min_attn_channels)
105 | self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels)
106 | self.drop_block = drop_block
107 |
108 | def forward(self, x):
109 | if self.split_input:
110 | x_split = torch.split(x, self.in_channels // self.num_paths, 1)
111 | x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)]
112 | else:
113 | x_paths = [op(x) for op in self.paths]
114 | x = torch.stack(x_paths, dim=1)
115 | x_attn = self.attn(x)
116 | x = x * x_attn
117 | x = torch.sum(x, dim=1)
118 | return x
119 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/separable_conv.py:
--------------------------------------------------------------------------------
1 | """ Depthwise Separable Conv Modules
2 |
3 | Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
4 | DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
5 |
6 | Hacked together by / Copyright 2020 Ross Wightman
7 | """
8 | from torch import nn as nn
9 |
10 | from .create_conv2d import create_conv2d
11 | from .create_norm_act import convert_norm_act
12 |
13 |
14 | class SeparableConvBnAct(nn.Module):
15 | """ Separable Conv w/ trailing Norm and Activation
16 | """
17 | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
18 | channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU,
19 | apply_act=True, drop_block=None):
20 | super(SeparableConvBnAct, self).__init__()
21 |
22 | self.conv_dw = create_conv2d(
23 | in_channels, int(in_channels * channel_multiplier), kernel_size,
24 | stride=stride, dilation=dilation, padding=padding, depthwise=True)
25 |
26 | self.conv_pw = create_conv2d(
27 | int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
28 |
29 | norm_act_layer = convert_norm_act(norm_layer, act_layer)
30 | self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block)
31 |
32 | @property
33 | def in_channels(self):
34 | return self.conv_dw.in_channels
35 |
36 | @property
37 | def out_channels(self):
38 | return self.conv_pw.out_channels
39 |
40 | def forward(self, x):
41 | x = self.conv_dw(x)
42 | x = self.conv_pw(x)
43 | if self.bn is not None:
44 | x = self.bn(x)
45 | return x
46 |
47 |
48 | class SeparableConv2d(nn.Module):
49 | """ Separable Conv
50 | """
51 | def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
52 | channel_multiplier=1.0, pw_kernel_size=1):
53 | super(SeparableConv2d, self).__init__()
54 |
55 | self.conv_dw = create_conv2d(
56 | in_channels, int(in_channels * channel_multiplier), kernel_size,
57 | stride=stride, dilation=dilation, padding=padding, depthwise=True)
58 |
59 | self.conv_pw = create_conv2d(
60 | int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
61 |
62 | @property
63 | def in_channels(self):
64 | return self.conv_dw.in_channels
65 |
66 | @property
67 | def out_channels(self):
68 | return self.conv_pw.out_channels
69 |
70 | def forward(self, x):
71 | x = self.conv_dw(x)
72 | x = self.conv_pw(x)
73 | return x
74 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/space_to_depth.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class SpaceToDepth(nn.Module):
6 | def __init__(self, block_size=4):
7 | super().__init__()
8 | assert block_size == 4
9 | self.bs = block_size
10 |
11 | def forward(self, x):
12 | N, C, H, W = x.size()
13 | x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
14 | x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
15 | x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
16 | return x
17 |
18 |
19 | @torch.jit.script
20 | class SpaceToDepthJit(object):
21 | def __call__(self, x: torch.Tensor):
22 | # assuming hard-coded that block_size==4 for acceleration
23 | N, C, H, W = x.size()
24 | x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)
25 | x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
26 | x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)
27 | return x
28 |
29 |
30 | class SpaceToDepthModule(nn.Module):
31 | def __init__(self, no_jit=False):
32 | super().__init__()
33 | if not no_jit:
34 | self.op = SpaceToDepthJit()
35 | else:
36 | self.op = SpaceToDepth()
37 |
38 | def forward(self, x):
39 | return self.op(x)
40 |
41 |
42 | class DepthToSpace(nn.Module):
43 |
44 | def __init__(self, block_size):
45 | super().__init__()
46 | self.bs = block_size
47 |
48 | def forward(self, x):
49 | N, C, H, W = x.size()
50 | x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
51 | x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
52 | x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
53 | return x
54 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/split_attn.py:
--------------------------------------------------------------------------------
1 | """ Split Attention Conv2d (for ResNeSt Models)
2 |
3 | Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955
4 |
5 | Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt
6 |
7 | Modified for torchscript compat, performance, and consistency with classifier by Ross Wightman
8 | """
9 | import torch
10 | import torch.nn.functional as F
11 | from torch import nn
12 |
13 |
14 | class RadixSoftmax(nn.Module):
15 | def __init__(self, radix, cardinality):
16 | super(RadixSoftmax, self).__init__()
17 | self.radix = radix
18 | self.cardinality = cardinality
19 |
20 | def forward(self, x):
21 | batch = x.size(0)
22 | if self.radix > 1:
23 | x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
24 | x = F.softmax(x, dim=1)
25 | x = x.reshape(batch, -1)
26 | else:
27 | x = torch.sigmoid(x)
28 | return x
29 |
30 |
31 | class SplitAttnConv2d(nn.Module):
32 | """Split-Attention Conv2d
33 | """
34 | def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
35 | dilation=1, groups=1, bias=False, radix=2, reduction_factor=4,
36 | act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs):
37 | super(SplitAttnConv2d, self).__init__()
38 | self.radix = radix
39 | self.drop_block = drop_block
40 | mid_chs = out_channels * radix
41 | attn_chs = max(in_channels * radix // reduction_factor, 32)
42 |
43 | self.conv = nn.Conv2d(
44 | in_channels, mid_chs, kernel_size, stride, padding, dilation,
45 | groups=groups * radix, bias=bias, **kwargs)
46 | self.bn0 = norm_layer(mid_chs) if norm_layer is not None else None
47 | self.act0 = act_layer(inplace=True)
48 | self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups)
49 | self.bn1 = norm_layer(attn_chs) if norm_layer is not None else None
50 | self.act1 = act_layer(inplace=True)
51 | self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups)
52 | self.rsoftmax = RadixSoftmax(radix, groups)
53 |
54 | @property
55 | def in_channels(self):
56 | return self.conv.in_channels
57 |
58 | @property
59 | def out_channels(self):
60 | return self.fc1.out_channels
61 |
62 | def forward(self, x):
63 | x = self.conv(x)
64 | if self.bn0 is not None:
65 | x = self.bn0(x)
66 | if self.drop_block is not None:
67 | x = self.drop_block(x)
68 | x = self.act0(x)
69 |
70 | B, RC, H, W = x.shape
71 | if self.radix > 1:
72 | x = x.reshape((B, self.radix, RC // self.radix, H, W))
73 | x_gap = x.sum(dim=1)
74 | else:
75 | x_gap = x
76 | x_gap = F.adaptive_avg_pool2d(x_gap, 1)
77 | x_gap = self.fc1(x_gap)
78 | if self.bn1 is not None:
79 | x_gap = self.bn1(x_gap)
80 | x_gap = self.act1(x_gap)
81 | x_attn = self.fc2(x_gap)
82 |
83 | x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1)
84 | if self.radix > 1:
85 | out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1)
86 | else:
87 | out = x * x_attn
88 | return out.contiguous()
89 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/split_batchnorm.py:
--------------------------------------------------------------------------------
1 | """ Split BatchNorm
2 |
3 | A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
4 | a separate BN layer. The first split is passed through the parent BN layers with weight/bias
5 | keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
6 | namespace.
7 |
8 | This allows easily removing the auxiliary BN layers after training to efficiently
9 | achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
10 | 'Disentangled Learning via An Auxiliary BN'
11 |
12 | Hacked together by / Copyright 2020 Ross Wightman
13 | """
14 | import torch
15 | import torch.nn as nn
16 |
17 |
18 | class SplitBatchNorm2d(torch.nn.BatchNorm2d):
19 |
20 | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
21 | track_running_stats=True, num_splits=2):
22 | super().__init__(num_features, eps, momentum, affine, track_running_stats)
23 | assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
24 | self.num_splits = num_splits
25 | self.aux_bn = nn.ModuleList([
26 | nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
27 |
28 | def forward(self, input: torch.Tensor):
29 | if self.training: # aux BN only relevant while training
30 | split_size = input.shape[0] // self.num_splits
31 | assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
32 | split_input = input.split(split_size)
33 | x = [super().forward(split_input[0])]
34 | for i, a in enumerate(self.aux_bn):
35 | x.append(a(split_input[i + 1]))
36 | return torch.cat(x, dim=0)
37 | else:
38 | return super().forward(input)
39 |
40 |
41 | def convert_splitbn_model(module, num_splits=2):
42 | """
43 | Recursively traverse module and its children to replace all instances of
44 | ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
45 | Args:
46 | module (torch.nn.Module): input module
47 | num_splits: number of separate batchnorm layers to split input across
48 | Example::
49 | >>> # model is an instance of torch.nn.Module
50 | >>> model = classifier.models.convert_splitbn_model(model, num_splits=2)
51 | """
52 | mod = module
53 | if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
54 | return module
55 | if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
56 | mod = SplitBatchNorm2d(
57 | module.num_features, module.eps, module.momentum, module.affine,
58 | module.track_running_stats, num_splits=num_splits)
59 | mod.running_mean = module.running_mean
60 | mod.running_var = module.running_var
61 | mod.num_batches_tracked = module.num_batches_tracked
62 | if module.affine:
63 | mod.weight.data = module.weight.data.clone().detach()
64 | mod.bias.data = module.bias.data.clone().detach()
65 | for aux in mod.aux_bn:
66 | aux.running_mean = module.running_mean.clone()
67 | aux.running_var = module.running_var.clone()
68 | aux.num_batches_tracked = module.num_batches_tracked.clone()
69 | if module.affine:
70 | aux.weight.data = module.weight.data.clone().detach()
71 | aux.bias.data = module.bias.data.clone().detach()
72 | for name, child in module.named_children():
73 | mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
74 | del module
75 | return mod
76 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/test_time_pool.py:
--------------------------------------------------------------------------------
1 | """ Test Time Pooling (Average-Max Pool)
2 |
3 | Hacked together by / Copyright 2020 Ross Wightman
4 | """
5 |
6 | import logging
7 | from torch import nn
8 | import torch.nn.functional as F
9 |
10 | from .adaptive_avgmax_pool import adaptive_avgmax_pool2d
11 |
12 |
13 | _logger = logging.getLogger(__name__)
14 |
15 |
16 | class TestTimePoolHead(nn.Module):
17 | def __init__(self, base, original_pool=7):
18 | super(TestTimePoolHead, self).__init__()
19 | self.base = base
20 | self.original_pool = original_pool
21 | base_fc = self.base.get_classifier()
22 | if isinstance(base_fc, nn.Conv2d):
23 | self.fc = base_fc
24 | else:
25 | self.fc = nn.Conv2d(
26 | self.base.num_features, self.base.num_classes, kernel_size=1, bias=True)
27 | self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size()))
28 | self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size()))
29 | self.base.reset_classifier(0) # delete original fc layer
30 |
31 | def forward(self, x):
32 | x = self.base.forward_features(x)
33 | x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1)
34 | x = self.fc(x)
35 | x = adaptive_avgmax_pool2d(x, 1)
36 | return x.view(x.size(0), -1)
37 |
38 |
39 | def apply_test_time_pool(model, config, use_test_size=True):
40 | test_time_pool = False
41 | if not hasattr(model, 'default_cfg') or not model.default_cfg:
42 | return model, False
43 | if use_test_size and 'test_input_size' in model.default_cfg:
44 | df_input_size = model.default_cfg['test_input_size']
45 | else:
46 | df_input_size = model.default_cfg['input_size']
47 | if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]:
48 | _logger.info('Target input size %s > pretrained default %s, using test time pooling' %
49 | (str(config['input_size'][-2:]), str(df_input_size[-2:])))
50 | model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])
51 | test_time_pool = True
52 | return model, test_time_pool
53 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/layers/weight_init.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import math
3 | import warnings
4 |
5 |
6 | def _no_grad_trunc_normal_(tensor, mean, std, a, b):
7 | # Cut & paste from PyTorch official master until it's in a few official releases - RW
8 | # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
9 | def norm_cdf(x):
10 | # Computes standard normal cumulative distribution function
11 | return (1. + math.erf(x / math.sqrt(2.))) / 2.
12 |
13 | if (mean < a - 2 * std) or (mean > b + 2 * std):
14 | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
15 | "The distribution of values may be incorrect.",
16 | stacklevel=2)
17 |
18 | with torch.no_grad():
19 | # Values are generated by using a truncated uniform distribution and
20 | # then using the inverse CDF for the normal distribution.
21 | # Get upper and lower cdf values
22 | l = norm_cdf((a - mean) / std)
23 | u = norm_cdf((b - mean) / std)
24 |
25 | # Uniformly fill tensor with values from [l, u], then translate to
26 | # [2l-1, 2u-1].
27 | tensor.uniform_(2 * l - 1, 2 * u - 1)
28 |
29 | # Use inverse cdf transform for normal distribution to get truncated
30 | # standard normal
31 | tensor.erfinv_()
32 |
33 | # Transform to proper mean, std
34 | tensor.mul_(std * math.sqrt(2.))
35 | tensor.add_(mean)
36 |
37 | # Clamp to ensure it's in the proper range
38 | tensor.clamp_(min=a, max=b)
39 | return tensor
40 |
41 |
42 | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
43 | # type: (Tensor, float, float, float, float) -> Tensor
44 | r"""Fills the input Tensor with values drawn from a truncated
45 | normal distribution. The values are effectively drawn from the
46 | normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
47 | with values outside :math:`[a, b]` redrawn until they are within
48 | the bounds. The method used for generating the random values works
49 | best when :math:`a \leq \text{mean} \leq b`.
50 | Args:
51 | tensor: an n-dimensional `torch.Tensor`
52 | mean: the mean of the normal distribution
53 | std: the standard deviation of the normal distribution
54 | a: the minimum cutoff value
55 | b: the maximum cutoff value
56 | Examples:
57 | >>> w = torch.empty(3, 5)
58 | >>> nn.init.trunc_normal_(w)
59 | """
60 | return _no_grad_trunc_normal_(tensor, mean, std, a, b)
61 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/multi_label_model.py:
--------------------------------------------------------------------------------
1 | from sklearn.metrics import balanced_accuracy_score
2 | import torch
3 | import torch.nn as nn
4 | import warnings
5 |
6 | WEIGHT_COLOR = 0.75
7 | WEIGHT_ACTION = 0.25
8 | assert (WEIGHT_COLOR + WEIGHT_ACTION == 1.0)
9 |
10 |
11 | class MultiLabelModel(nn.Module):
12 | def __init__(self, model, n_color_classes, n_action_classes):
13 | super().__init__()
14 | self.base_model = model.as_sequential_for_ML()
15 | last_channel = model.num_features
16 |
17 | self.pool = nn.AdaptiveAvgPool2d((1, 1))
18 |
19 | # create separate classifiers for our outputs
20 | self.color = nn.Sequential(
21 | nn.Dropout(p=0.2),
22 | nn.Linear(in_features=last_channel, out_features=n_color_classes)
23 | )
24 | self.action = nn.Sequential(
25 | nn.Dropout(p=0.2),
26 | nn.Linear(in_features=last_channel, out_features=n_action_classes)
27 | )
28 |
29 | def forward(self, x):
30 | x = self.base_model(x)
31 | x = self.pool(x)
32 | x = torch.flatten(x, 1)
33 |
34 | return {
35 | 'color': self.color(x),
36 | 'action': self.action(x),
37 | }
38 |
39 | @staticmethod
40 | def get_loss(loss_fn, output, target):
41 | loss_color = loss_fn(output['color'], target['color_labels'].cuda())
42 | loss_action = loss_fn(output['action'], target['action_labels'].cuda())
43 |
44 | loss = WEIGHT_COLOR * loss_color + WEIGHT_ACTION * loss_action
45 | return loss
46 |
47 | @staticmethod
48 | def get_accuracy(accuracy, output, target, topk=(1,)):
49 | acc1_color, acc3_color = accuracy(output['color'], target['color_labels'].cuda(), topk=topk)
50 | acc1_action, acc3_action = accuracy(output['action'], target['action_labels'].cuda(), topk=topk)
51 |
52 | acc1 = WEIGHT_COLOR * acc1_color + WEIGHT_ACTION * acc1_action
53 | acc3 = WEIGHT_COLOR * acc3_color + WEIGHT_ACTION * acc3_action
54 | return acc1, acc3, {'color': acc1_color, 'action': acc1_action}
55 |
56 | @staticmethod
57 | def calculate_metrics(output, target):
58 | predicted_color = output['color'].cpu().argmax(1)
59 | gt_color = target['color_labels'].cpu()
60 |
61 | predicted_action = output['action'].cpu().argmax(1)
62 | gt_action = target['action_labels'].cpu()
63 |
64 | with warnings.catch_warnings():
65 | warnings.simplefilter("ignore")
66 | accuracy_color = balanced_accuracy_score(y_true=gt_color.numpy(), y_pred=predicted_color.numpy())
67 | accuracy_action = balanced_accuracy_score(y_true=gt_action.numpy(), y_pred=predicted_action.numpy())
68 |
69 | return accuracy_color, accuracy_action
70 |
--------------------------------------------------------------------------------
/qdnet_classifier/models/pruned/ecaresnet50d_pruned.txt:
--------------------------------------------------------------------------------
1 | conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022]
--------------------------------------------------------------------------------
/qdnet_classifier/models/registry.py:
--------------------------------------------------------------------------------
1 | """ Model Registry
2 | Hacked together by / Copyright 2020 Ross Wightman
3 | """
4 |
5 | import sys
6 | import re
7 | import fnmatch
8 | from collections import defaultdict
9 |
10 | __all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules']
11 |
12 | _module_to_models = defaultdict(set) # dict of sets to check membership of model in module
13 | _model_to_module = {} # mapping of model names to module names
14 | _model_entrypoints = {} # mapping of model names to entrypoint fns
15 | _model_has_pretrained = set() # set of model names that have pretrained weight url present
16 |
17 |
18 | def register_model(fn):
19 | # lookup containing module
20 | mod = sys.modules[fn.__module__]
21 | module_name_split = fn.__module__.split('.')
22 | module_name = module_name_split[-1] if len(module_name_split) else ''
23 |
24 | # add model to __all__ in module
25 | model_name = fn.__name__
26 | if hasattr(mod, '__all__'):
27 | mod.__all__.append(model_name)
28 | else:
29 | mod.__all__ = [model_name]
30 |
31 | # add entries to registry dict/sets
32 | _model_entrypoints[model_name] = fn
33 | _model_to_module[model_name] = module_name
34 | _module_to_models[module_name].add(model_name)
35 | has_pretrained = False # check if model has a pretrained url to allow filtering on this
36 | if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:
37 | # this will catch all models that have entrypoint matching cfg key, but miss any aliasing
38 | # entrypoints or non-matching combos
39 | has_pretrained = 'url' in mod.default_cfgs[model_name] and 'http' in mod.default_cfgs[model_name]['url']
40 | if has_pretrained:
41 | _model_has_pretrained.add(model_name)
42 | return fn
43 |
44 |
45 | def _natural_key(string_):
46 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
47 |
48 |
49 | def list_models(filter='', module='', pretrained=False, exclude_filters=''):
50 | """ Return list of available model names, sorted alphabetically
51 |
52 | Args:
53 | filter (str) - Wildcard filter string that works with fnmatch
54 | module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet')
55 | pretrained (bool) - Include only models with pretrained weights if True
56 | exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter
57 |
58 | Example:
59 | model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet'
60 | model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module
61 | """
62 | if module:
63 | models = list(_module_to_models[module])
64 | else:
65 | models = _model_entrypoints.keys()
66 | if filter:
67 | models = fnmatch.filter(models, filter) # include these models
68 | if exclude_filters:
69 | if not isinstance(exclude_filters, list):
70 | exclude_filters = [exclude_filters]
71 | for xf in exclude_filters:
72 | exclude_models = fnmatch.filter(models, xf) # exclude these models
73 | if len(exclude_models):
74 | models = set(models).difference(exclude_models)
75 | if pretrained:
76 | models = _model_has_pretrained.intersection(models)
77 | return list(sorted(models, key=_natural_key))
78 |
79 |
80 | def is_model(model_name):
81 | """ Check if a model name exists
82 | """
83 | return model_name in _model_entrypoints
84 |
85 |
86 | def model_entrypoint(model_name):
87 | """Fetch a model entrypoint for specified model name
88 | """
89 | return _model_entrypoints[model_name]
90 |
91 |
92 | def list_modules():
93 | """ Return list of module names that contain models / model entrypoints
94 | """
95 | modules = _module_to_models.keys()
96 | return list(sorted(modules))
97 |
98 |
99 | def is_model_in_modules(model_name, module_names):
100 | """Check if a model exists within a subset of modules
101 | Args:
102 | model_name (str) - name of model to check
103 | module_names (tuple, list, set) - names of modules to search in
104 | """
105 | assert isinstance(module_names, (tuple, list, set))
106 | return any(model_name in _module_to_models[n] for n in module_names)
107 |
108 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch==1.5.0
2 | albumentations==0.4.6
3 | resnest==0.0.6b20200823
4 | geffnet==0.9.8
5 | numpy==1.18.1
6 | pandas==0.24.2
7 | apex==0.1
8 | opencv_python==4.4.0.42
9 | pretrainedmodels==0.7.4
10 | tqdm==4.31.1
11 | Pillow==7.2.0
12 | scikit_learn==0.23.2
13 | kaggle==1.5.6
14 | -e git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git#egg=pytorch-gradual-warmup-lr
15 |
16 | # pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
17 |
--------------------------------------------------------------------------------
/serving/README.md:
--------------------------------------------------------------------------------
1 |
2 | Pytorch Serving (待debug)
3 |
4 | ```
5 | docker-compose up -d
6 | ```
7 |
8 |
9 | 可参考:https://github.com/MachineLP/QDServing
10 |
--------------------------------------------------------------------------------
/serving/core/inference.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : QDNetInference
5 | Author : machinelp
6 | Date : 2020-08-10
7 | -------------------------------------------------
8 |
9 | '''
10 |
11 | import os
12 | import sys
13 | import json
14 | import argparse
15 | from utils.logging import logging
16 | from flask_restful import Resource,Api
17 | from core.models import QDNetModel
18 | from qdnet.conf.config import load_yaml
19 | from flask import Flask,abort, make_response, request, jsonify
20 | from serving.core.url2img import url2imgcv2
21 |
22 | parser = argparse.ArgumentParser(description='Hyperparams')
23 | parser.add_argument('--config_path', help='config file path')
24 | parser.add_argument('--fold', help='config file path')
25 | args = parser.parse_args()
26 | config = load_yaml(args.config_path, args)
27 |
28 | qdnet_model = QDNetModel(config, args.fold)
29 |
30 | class QDNetInference(Resource):
31 | def __init__(self):
32 | pass
33 |
34 | def post(self):
35 |
36 | if not request.json or 'content' not in request.json :
37 | res = { "code": "400", "data": {}, "message": "request is not json or content not in json" }
38 | return jsonify ( res )
39 |
40 | else:
41 | logging.info( "[QDNetInference] [post] request.json:{}".format( request.json ) )
42 | url = request.json["content"]
43 | logging.info( "[QDNetInference] [post] url:{}".format( url ) )
44 | data = url2imgcv2(url)
45 | pre = qdnet_model.predict(data)
46 | res = { "code": "200", "data": pre, "message": "" }
47 | return jsonify ( res )
48 |
--------------------------------------------------------------------------------
/serving/core/models.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : models
5 | Author : machinelp
6 | Date : 2020-08-27
7 | -------------------------------------------------
8 |
9 | '''
10 |
11 | import os
12 | import time
13 | import random
14 | import cv2
15 | import argparse
16 | import numpy as np
17 | import pandas as pd
18 | from tqdm import tqdm
19 | import torch
20 | import torch.nn as nn
21 | import torch.nn.functional as F
22 |
23 | from qdnet.conf.config import load_yaml
24 | from qdnet.optimizer.optimizer import GradualWarmupSchedulerV2
25 | from qdnet.dataset.dataset import get_df, QDDataset
26 | from qdnet.dataaug.dataaug import get_transforms
27 | from qdnet.models.effnet import Effnet
28 | from qdnet.models.resnest import Resnest
29 | from qdnet.models.se_resnext import SeResnext
30 | from qdnet.conf.constant import Constant
31 | device = torch.device('cuda')
32 |
33 | class QDNetModel():
34 |
35 | def __init__(self, config, fold):
36 |
37 | if config["enet_type"] in Constant.RESNEST_LIST:
38 | ModelClass = Resnest
39 | elif config["enet_type"] in Constant.SERESNEXT_LIST:
40 | ModelClass = SeResnext
41 | elif config["enet_type"] in Constant.GEFFNET_LIST:
42 | ModelClass = Effnet
43 | else:
44 | raise NotImplementedError()
45 |
46 | if config["eval"] == 'best':
47 | model_file = os.path.join(config["model_dir"], f'best_fold{fold}.pth')
48 | if config["eval"] == 'final':
49 | model_file = os.path.join(config["model_dir"], f'final_fold{fold}.pth')
50 | self.model = ModelClass(
51 | enet_type = config["enet_type"],
52 | out_dim = int(config["out_dim"]),
53 | drop_nums = int(config["drop_nums"]),
54 | metric_strategy = config["metric_strategy"]
55 | )
56 | self.model = self.model.to(device)
57 |
58 | try: # single GPU model_file
59 | self.model.load_state_dict(torch.load(model_file), strict=True)
60 | except: # multi GPU model_file
61 | state_dict = torch.load(model_file)
62 | state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
63 | self.model.load_state_dict(state_dict, strict=True)
64 | self.model.eval()
65 |
66 | _, self.transforms_val = get_transforms(config["image_size"])
67 |
68 |
69 | def predict(self, data):
70 | if os.path.isfile(data):
71 | image = cv2.imread(data)
72 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
73 | else:
74 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
75 | res = self.transforms_val(image=image)
76 | image = res['image'].astype(np.float32)
77 |
78 | image = image.transpose(2, 0, 1)
79 | data = torch.tensor([image]).float()
80 | probs = self.model( data.to(device) )
81 | probs = F.softmax(probs,dim =1)
82 | probs = probs.cpu().detach().numpy()
83 | return probs.argmax(1)
84 |
85 |
--------------------------------------------------------------------------------
/serving/core/url2img.py:
--------------------------------------------------------------------------------
1 | # coding = utf-8
2 |
3 | import os
4 | import sys
5 | import numpy as np
6 | import cv2
7 | from urllib import request
8 | import time
9 |
10 | # URL到图片mat
11 | def url2imgcv2(url):
12 | # download the image, convert it to a NumPy array, and then read
13 | # it into OpenCV format
14 | start_time = time.time()
15 | # resp = request.urlopen(url, timeout=5)
16 | resp = request.urlopen(url, timeout=3)
17 | try:
18 | # bytearray将数据转换成(返回)一个新的字节数组
19 | # asarray 复制数据,将结构化数据转换成ndarray
20 | image = np.asarray(bytearray(resp.read()), dtype="uint8")
21 | # cv2.imdecode()函数将数据解码成Opencv图像格式
22 | image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
23 | # return the imagea
24 | # logger.info("url2imgcv2, time: %fs, img_shape: %d, img_url: %s" % ( time.time()-start_time, image.shape[0], url) )
25 | except:
26 | print ('url2imgcv2 拉取图片超时!')
27 | image = []
28 | h, w, c = image.shape
29 | if c==4:
30 | image = image[:,:,:3]
31 | return image
32 |
--------------------------------------------------------------------------------
/serving/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.3'
2 | services:
3 | tensorflowtts:
4 | build: .
5 | restart: always
6 | ports:
7 | - "9930-9939:9930-9939"
8 | volumes:
9 | - .:/workspace
10 | runtime: nvidia
11 | tty: true
12 | command: python serving/flask_run.py --config_path "conf/effb3_ns.yaml" --fold "0" > run.log
13 | # command: gunicorn -w 3 -b 0.0.0.0:9949 serving/flask_run:app
14 | environment:
15 | - CUDA_VISIBLE_DEVICES=2
16 |
--------------------------------------------------------------------------------
/serving/dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.2.0-gpu
2 | RUN apt-get update
3 | RUN apt-get install -y zsh tmux wget git libsndfile1
4 | RUN pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple && \
5 | pip --default-timeout=600 install torch==1.5.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
6 | pip --default-timeout=600 install resnest==0.0.6b20200823 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
7 | pip --default-timeout=600 install geffnet==0.9.8 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
8 | pip --default-timeout=600 install opencv_python==4.4.0.42 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
9 | pip --default-timeout=600 install pretrainedmodels==0.7.4 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
10 | pip --default-timeout=600 install Pillow==7.2.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
11 | pip --default-timeout=600 install grpcio-tools==1.31.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
12 | pip --default-timeout=600 install ipython -i https://pypi.tuna.tsinghua.edu.cn/simple && \
13 | pip --default-timeout=600 install flask==1.1.2 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
14 | pip --default-timeout=600 install requests==2.23.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
15 | pip --default-timeout=600 install gunicorn==19.9.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
16 | pip --default-timeout=600 install flask_restful==0.3.8 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
17 | pip --default-timeout=600 install jieba==0.42.1 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
18 | pip --default-timeout=600 install dataclasses==0.6 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
19 | pip --default-timeout=600 install zipp==3.1.0 -i https://pypi.tuna.tsinghua.edu.cn/simple && \
20 | pip --default-timeout=600 install pandas==1.0.5 -i https://pypi.tuna.tsinghua.edu.cn/simple
21 |
22 |
23 | RUN mkdir /workspace
24 | WORKDIR /workspace
25 |
26 | COPY ../ /workspace
27 |
28 |
--------------------------------------------------------------------------------
/serving/flask_run.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : Flask
5 | Author : machinelp
6 | Date : 2020-08-27
7 | -------------------------------------------------
8 |
9 | '''
10 | from core.inference import QDNetInference
11 | from flask_restful import Resource,Api
12 | from flask import Flask,abort, make_response, request, jsonify
13 |
14 |
15 | app = Flask(__name__)
16 | api = Api(app)
17 |
18 | class HelloWorld(Resource):
19 | def get(self):
20 | return {'hello': 'world'}
21 |
22 | api.add_resource(HelloWorld, '/')
23 | api.add_resource(QDNetInference, '/infer')
24 |
25 | if __name__ == '__main__':
26 | app.run(host="0.0.0.0", port=9939, debug=True)
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/serving/tests/post.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import pandas as pd
3 | import json
4 | import time
5 | import numpy as np
6 | import librosa
7 | import argparse
8 |
9 | def parse_args():
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--url', help="update mode", type=str, default="https://st0.dancf.com/www/15018/design/20180521-101156-6.png")
12 | args = parser.parse_args()
13 | return args
14 |
15 | if __name__ == '__main__':
16 | args = parse_args()
17 | url = "http://127.0.0.1:9949/infer"
18 | print ( ">>>>>", args.url )
19 | json_data = json.dumps( {"content": args.url} )
20 | headers = {'content-type': 'application/json'}
21 | start_time = time.time()
22 | respond = requests.request("POST", url, data=json_data, headers=headers)
23 | print ("time>>>>>>>", time.time() - start_time)
24 | pre = np.array( respond.json()["data"] )
25 | print ("pre>>>>>>>", pre)
26 |
27 |
--------------------------------------------------------------------------------
/serving/tests/test_qdmodel.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 | '''
3 | -------------------------------------------------
4 | Description : models
5 | Author : machinelp
6 | Date : 2020-08-27
7 | -------------------------------------------------
8 |
9 | '''
10 |
11 | import os
12 | import sys
13 | import time
14 | import json
15 | import argparse
16 | import numpy as np
17 | from core.models import QDNetModel
18 | from qdnet.conf.config import load_yaml
19 |
20 | parser = argparse.ArgumentParser(description='Hyperparams')
21 | parser.add_argument('--config_path', help='config file path')
22 | parser.add_argument('--img_path', help='config file path')
23 | parser.add_argument('--fold', help='config file path')
24 | args = parser.parse_args()
25 | config = load_yaml(args.config_path, args)
26 |
27 | if __name__ == '__main__':
28 |
29 | qd_model = QDNetModel(config, args.fold)
30 | pre = qd_model.predict(args.img_path)
31 | print (">>>>>", pre)
32 |
33 |
--------------------------------------------------------------------------------
/serving/utils/logging.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import sys
5 | import json
6 | import time
7 | import logging
8 | import logging.handlers
9 | from conf.config import config
10 |
11 | start_time = time.time()
12 | local_time = time.localtime(start_time)
13 |
14 | exec_day = time.strftime('%Y-%m-%d', local_time) # Execution date
15 | exec_hour = time.strftime('%H', local_time)
16 | exec_minute = time.strftime('%M', local_time)
17 |
18 | fmt_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
19 | # logging.basicConfig( level=logging.INFO, format=fmt_str, filename=config.LOG_PATH.format(exec_day) )
20 | log_file_handler = logging.handlers.TimedRotatingFileHandler(config.LOG_PATH, when='D', interval=1, backupCount=3)
21 | log_file_handler.suffix = "%Y%m%d_%H%M%S.log"
22 | log_file_handler.setLevel(logging.INFO)
23 | formatter = logging.Formatter(fmt_str)
24 | log_file_handler.setFormatter(formatter)
25 | logging.getLogger('').addHandler(log_file_handler)
26 | logging.info('exec_day :{}, exec_hour :{}, exec_minute :{}'.format(exec_day, exec_hour, exec_minute))
27 |
--------------------------------------------------------------------------------
/tools/data_clean.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import cv2
4 |
5 |
6 | video_path = "./data/download_images/"
7 | all_labels_path = os.listdir(video_path)
8 |
9 | for per_label_path in all_labels_path:
10 | if 'csv' in per_label_path:
11 | continue
12 | per_all_img_path = os.path.join(video_path, per_label_path)
13 | all_image_path = os.listdir(per_all_img_path)
14 | for per_image_path in all_image_path:
15 | per_img_path = os.path.join(video_path, per_label_path, per_image_path)
16 | try:
17 | img = cv2.imread( per_img_path )
18 | if img is None:
19 | os.system( "rm -r '{}'".format(per_img_path) )
20 | print (222)
21 | except:
22 | os.system( "rm -r {}".format(per_img_path) )
23 | print (222)
24 | continue
25 |
26 | print ( "Finish!!" )
27 |
--------------------------------------------------------------------------------
/tools/data_preprocess_multi_label.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import numpy as np
5 | import pandas as pd
6 | from pandas.core.frame import DataFrame
7 | from sklearn.model_selection import train_test_split
8 | from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
9 |
10 | parser = argparse.ArgumentParser(description='Hyperparams')
11 | parser.add_argument('--data_dir', help='data path', type=str)
12 | parser.add_argument('--n_splits', help='n_splits', type=int)
13 | parser.add_argument('--output_dir', help='output_dir', type=str)
14 | parser.add_argument('--random_state', help='random_state', type=int)
15 | args = parser.parse_args()
16 |
17 |
18 |
19 | if __name__ == '__main__':
20 |
21 | df_data = pd.read_csv(args.data_dir)
22 | img_path_list = df_data['filepath'].values.tolist()
23 | label_list = df_data['target'].values.tolist()
24 |
25 |
26 | data_label = []
27 | for per_img_path, per_label in zip( img_path_list, label_list ):
28 | data_label.append( [ per_img_path, per_label ] )
29 |
30 |
31 | train_list = []
32 | val_list = []
33 | kf = KFold(n_splits=args.n_splits, shuffle=True, random_state=args.random_state)
34 | for index, (train_index, val_index) in enumerate(kf.split(data_label)):
35 | for i in val_index:
36 | data_label[i].append(index)
37 | data_label = np.array( data_label )
38 | # print (data_label)
39 |
40 |
41 | res = DataFrame()
42 | res['filepath'] = data_label[:,0]
43 | res['target'] = data_label[:,1]
44 | res['fold'] = data_label[:,2]
45 | res[ ['filepath', 'target', 'fold'] ].to_csv(args.output_dir, index=False)
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tools/data_preprocess_multi_task.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import numpy as np
5 | import pandas as pd
6 | from pandas.core.frame import DataFrame
7 | from sklearn.model_selection import train_test_split
8 | from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
9 |
10 | parser = argparse.ArgumentParser(description='Hyperparams')
11 | parser.add_argument('--data_dir', help='data path', type=str)
12 | parser.add_argument('--n_splits', help='n_splits', type=int)
13 | parser.add_argument('--output_dir', help='output_dir', type=str)
14 | parser.add_argument('--random_state', help='random_state', type=int)
15 | args = parser.parse_args()
16 |
17 |
18 |
19 | if __name__ == '__main__':
20 |
21 | df_data = pd.read_csv(args.data_dir)
22 | img_path_list = df_data['filepath'].values.tolist()
23 | label1_list = df_data['target1'].values.tolist()
24 | label2_list = df_data['target2'].values.tolist()
25 |
26 |
27 | data_label = []
28 | video_index = []
29 | video_dict = {} #...
30 | for m, (per_img_path, per_label1, per_label2) in enumerate(zip( img_path_list, label1_list, label2_list )):
31 | data_label.append( [ per_img_path, per_label1, per_label2 ] )
32 | video_index.append( per_img_path.split("_")[-2] )
33 | video_dict.setdefault(per_img_path.split("_")[-2], []).append( m )
34 | video_index = list(set(video_index))
35 |
36 | train_list = []
37 | val_list = []
38 | kf = KFold(n_splits=args.n_splits, shuffle=True, random_state=args.random_state)
39 | for index, (train_index, val_index) in enumerate(kf.split(video_index)):
40 | for i in val_index:
41 | #for j, per_data_label in enumerate(data_label):
42 | # # print(">>>>", per_data_label[-2].split("_")[3])
43 | # if per_data_label[0].split("_")[-2] == video_index[i] :
44 | # data_label[j].append(index)
45 | for per_index in video_dict[video_index[i]]:
46 | data_label[per_index].append(index)
47 |
48 | data_label = np.array( data_label )
49 | # print (data_label)
50 |
51 |
52 | res = DataFrame()
53 | res['filepath'] = data_label[:,0]
54 | res['target1'] = data_label[:,1]
55 | res['target2'] = data_label[:,2]
56 | res['fold'] = data_label[:,3]
57 | res[ ['filepath', 'target1', 'target2', 'fold'] ].to_csv(args.output_dir, index=False)
58 |
59 |
60 |
--------------------------------------------------------------------------------
/tools/generate_csv.py:
--------------------------------------------------------------------------------
1 |
2 | from pathlib import Path
3 | import os
4 | from unicodedata import category
5 | from pandas.core.frame import DataFrame
6 |
7 | # leaf node
8 | leaf_node_list = ['13314', '11270', '11284', '10273', '10294', '11321', '13369', '13372', '10301', '10302', '13374', '13376', '10306', '10307', '11339', '13388', '13406', '10338', '12387', '13422', '13423', '13426', '10355', '13427', '10356', '13428', '13432', '10362', '10364', '10365', '10366', '13439', '10368', '10369', '13459', '11412', '13469', '13470', '10399', '13472', '13473', '13474', '13475', '10404', '13476', '10405', '10407', '13479', '10410', '10411', '13489', '10418', '10419', '13491', '10421', '10422', '13496', '10431', '10432', '10434', '10435', '10436', '10437', '10438', '10440', '13513', '12494', '10449', '10452', '10453', '13525', '10454', '10456', '13530', '10464', '13536', '10466', '10467', '11491', '10468', '10469', '11493', '11496', '10474', '10476', '10479', '10480', '11504', '10481', '10483', '10486', '10487', '11511', '10488', '10491', '10493', '10494', '13567', '11527', '10504', '10505', '10509', '10511', '13584', '13585', '13586', '10515', '13588', '11541', '13589', '10518', '13590', '10519', '13591', '10520', '13596', '10531', '13606', '10536', '13608', '13609', '13610', '10539', '13611', '13612', '12590', '11567', '10544', '10547', '13620', '10550', '13633', '11589', '10566', '10570', '11597', '11598', '13647', '10578', '11604', '10584', '13656', '13657', '10591', '10598', '10603', '11629', '13680', '10609', '10611', '13686', '10615', '10616', '11642', '10619', '11643', '11644', '11645', '11647', '11648', '11649', '11650', '11651', '10628', '13710', '13714', '13716', '10645', '10648', '13722', '10653', '12701', '13725', '10666', '10668', '13740', '10669', '11695', '10679', '11710', '13760', '10691', '10692', '10693', '11719', '13767', '13768', '11721', '13778', '13779', '13780', '12757', '12766', '12767', '13794', '13797', '12777', '13801', '10746', '12795', '11773', '12797', '10752', '10754', '11780', '10757', '10758', '10759', '11783', '10761', '13834', '11793', '10770', '10776', '10778', '12828', '10782', '10784', '11813', '11814', '13863', '11816', '11818', '11819', '11820', '11825', '12849', '11828', '11831', '11838', '11839', '11840', '12864', '12865', '11842', '12866', '12867', '10829', '11870', '10857', '11883', '13936', '13937', '13938', '13939', '13940', '13941', '13942', '10872', '13945', '10876', '10877', '11901', '12926', '10880', '10881', '10886', '10889', '10890', '10891', '11917', '10899', '12950', '12952', '11929', '10913', '11937', '11939', '11940', '11941', '11943', '10920', '11944', '11945', '11946', '10923', '11948', '10925', '11949', '11963', '11964', '11965', '12997', '11980', '10963', '11987', '11005', '11006', '11008', '11009', '11020', '11022', '11028', '12066', '12067', '13094', '10027', '10032', '10035', '12083', '12084', '10039', '10040', '13116', '10048', '10049', '10050', '10054', '10057', '10058', '10059', '10061', '12110', '12113', '11099', '10080', '10084', '12138', '10092', '10094', '12143', '10096', '10098', '10106', '12161', '10115', '11141', '10118', '10120', '10133', '10135', '12184', '12187', '10147', '10150', '10151', '10153', '10154', '12206', '10159', '13250', '12228', '10182', '10183', '13260', '10191', '10193', '10206', '10209', '13283', '13284', '12261', '13286', '12263', '13289', '13291', '13292', '13293', '10224', '13296', '13299', '13300', '13301', '10238']
9 |
10 |
11 | f = open("./data/category.txt")
12 | lines = f.readlines()
13 | category_dict = {}
14 | for line in lines:
15 | per_line = line.strip().split(' ')
16 | if per_line[0] in leaf_node_list:
17 | category_dict[per_line[2]] = per_line[0]
18 |
19 |
20 |
21 |
22 | video_path = "./data/download_images/"
23 | all_labels_path = os.listdir(video_path)
24 |
25 | all_img_path_list = []
26 | content_label_list = []
27 | fold_list = []
28 | for per_label_path in all_labels_path:
29 | if 'csv' in per_label_path:
30 | continue
31 | per_all_img_path = os.path.join(video_path, per_label_path)
32 | all_image_path = os.listdir(per_all_img_path)
33 | for per_image_path in all_image_path:
34 | per_img_path = os.path.join(video_path, per_label_path, per_image_path)
35 | sub_category = per_label_path.split('ads')[0][:-1]
36 | if sub_category in category_dict.keys():
37 |
38 | all_img_path_list.append( per_img_path )
39 | content_label_list.append( category_dict[ sub_category ] )
40 | fold_list.append( 0 )
41 |
42 |
43 |
44 |
45 | res = DataFrame()
46 | res['filepath'] = list( all_img_path_list )
47 | res['target'] = list( content_label_list )
48 | res['fold'] = list( fold_list )
49 | res[ ['filepath', 'target', 'fold'] ].to_csv('./data/data.csv', index=False)
50 |
51 |
--------------------------------------------------------------------------------
/tools/generate_label.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import cv2
4 | import json
5 | import pandas as pd
6 | import numpy as np
7 |
8 |
9 |
10 | leaf_node = ['13314', '11270', '11284', '10273', '10294', '11321', '13369', '13372', '10301', '10302', '13374', '13376', '10306', '10307', '11339', '13388', '13406', '10338', '12387', '13422', '13423', '13426', '10355', '13427', '10356', '13428', '13432', '10362', '10364', '10365', '10366', '13439', '10368', '10369', '13459', '11412', '13469', '13470', '10399', '13472', '13473', '13474', '13475', '10404', '13476', '10405', '10407', '13479', '10410', '10411', '13489', '10418', '10419', '13491', '10421', '10422', '13496', '10431', '10432', '10434', '10435', '10436', '10437', '10438', '10440', '13513', '12494', '10449', '10452', '10453', '13525', '10454', '10456', '13530', '10464', '13536', '10466', '10467', '11491', '10468', '10469', '11493', '11496', '10474', '10476', '10479', '10480', '11504', '10481', '10483', '10486', '10487', '11511', '10488', '10491', '10493', '10494', '13567', '11527', '10504', '10505', '10509', '10511', '13584', '13585', '13586', '10515', '13588', '11541', '13589', '10518', '13590', '10519', '13591', '10520', '13596', '10531', '13606', '10536', '13608', '13609', '13610', '10539', '13611', '13612', '12590', '11567', '10544', '10547', '13620', '10550', '13633', '11589', '10566', '10570', '11597', '11598', '13647', '10578', '11604', '10584', '13656', '13657', '10591', '10598', '10603', '11629', '13680', '10609', '10611', '13686', '10615', '10616', '11642', '10619', '11643', '11644', '11645', '11647', '11648', '11649', '11650', '11651', '10628', '13710', '13714', '13716', '10645', '10648', '13722', '10653', '12701', '13725', '10666', '10668', '13740', '10669', '11695', '10679', '11710', '13760', '10691', '10692', '10693', '11719', '13767', '13768', '11721', '13778', '13779', '13780', '12757', '12766', '12767', '13794', '13797', '12777', '13801', '10746', '12795', '11773', '12797', '10752', '10754', '11780', '10757', '10758', '10759', '11783', '10761', '13834', '11793', '10770', '10776', '10778', '12828', '10782', '10784', '11813', '11814', '13863', '11816', '11818', '11819', '11820', '11825', '12849', '11828', '11831', '11838', '11839', '11840', '12864', '12865', '11842', '12866', '12867', '10829', '11870', '10857', '11883', '13936', '13937', '13938', '13939', '13940', '13941', '13942', '10872', '13945', '10876', '10877', '11901', '12926', '10880', '10881', '10886', '10889', '10890', '10891', '11917', '10899', '12950', '12952', '11929', '10913', '11937', '11939', '11940', '11941', '11943', '10920', '11944', '11945', '11946', '10923', '11948', '10925', '11949', '11963', '11964', '11965', '12997', '11980', '10963', '11987', '11005', '11006', '11008', '11009', '11020', '11022', '11028', '12066', '12067', '13094', '10027', '10032', '10035', '12083', '12084', '10039', '10040', '13116', '10048', '10049', '10050', '10054', '10057', '10058', '10059', '10061', '12110', '12113', '11099', '10080', '10084', '12138', '10092', '10094', '12143', '10096', '10098', '10106', '12161', '10115', '11141', '10118', '10120', '10133', '10135', '12184', '12187', '10147', '10150', '10151', '10153', '10154', '12206', '10159', '13250', '12228', '10182', '10183', '13260', '10191', '10193', '10206', '10209', '13283', '13284', '12261', '13286', '12263', '13289', '13291', '13292', '13293', '10224', '13296', '13299', '13300', '13301', '10238']
11 |
12 | def get_pid_list(df, id):
13 | tmp_df = df[df['id'] == id]
14 |
15 | if tmp_df.empty or tmp_df['pid'].values[0] is None:
16 | return pd.DataFrame(columns=['id', 'pid'])
17 | else:
18 | return get_pid_list(df, tmp_df['pid'].values[0]).append(tmp_df)
19 |
20 |
21 |
22 | f = open("./data/category.txt")
23 | lines = f.readlines()
24 |
25 | id_list = []
26 | pid_list = []
27 | permission_source = []
28 | for line in lines:
29 | per_line = line.strip().split(' ')
30 | id_list.append( per_line[0])
31 | if per_line[1] != 'null':
32 | pid_list.append( per_line[1] )
33 | else:
34 | pid_list.append( None )
35 |
36 | df = pd.DataFrame(
37 | {
38 | 'id': id_list,
39 | 'pid': pid_list
40 | }
41 | )
42 |
43 | id_list = df['id'].values
44 | father_dict = {}
45 | for i in id_list:
46 |
47 | pid_list = get_pid_list(df, i)['pid'].values.tolist()
48 | # print(i, pid_list)
49 | father_dict[i] = pid_list
50 |
51 | # 构建索引, 为每一层
52 | label_list_dict = {0:[], 1:[], 2:[], 3:[], 4:[], 5:[]}
53 |
54 | for per_leaf in leaf_node:
55 | # get father list
56 | # 5
57 | pid_list = father_dict[per_leaf] + [str(per_leaf)]
58 | for i in range(6):
59 | try:
60 | father_id = pid_list[i]
61 | if father_id not in label_list_dict[i]:
62 | label_list_dict[i].append( father_id )
63 | except:
64 | continue
65 | for label_key, label_value in label_list_dict.items():
66 | # print ( "label_key:{}, label_value:{}".format( label_key, label_value ) )
67 | print ( "label_key num:{}, label_value num :{}".format( label_key, len(label_value) ) )
68 |
69 |
70 | '''
71 | label_key num:0, label_value num :22
72 | label_key num:1, label_value num :146
73 | label_key num:2, label_value num :233
74 | label_key num:3, label_value num :77
75 | label_key num:4, label_value num :20
76 | label_key num:5, label_value num :11
77 | '''
78 | all_num = 0
79 | for label_key, label_value in label_list_dict.items():
80 | all_num += len(label_value)
81 |
82 | label_dict = {}
83 | for label_index, per_leaf in enumerate( leaf_node ):
84 | # get father list
85 | # 5
86 | pid_list = father_dict[per_leaf] + [str(per_leaf)]
87 | sum = 0
88 | label = np.zeros([all_num,], dtype=np.float32)
89 | for i in range(6):
90 | try:
91 | father_id = pid_list[i]
92 | label[int(sum+label_list_dict[i].index(father_id))] = 1.0
93 | sum += len( label_list_dict[i] )
94 | except:
95 | continue
96 |
97 | label_dict[per_leaf] = label
98 | # break
99 |
100 | print ( "label_dict>>>>>>", label_dict )
101 |
102 |
103 | # 根据多标签的结果得到 具体类别
104 | # label_dict[13314]
105 | # label_dict[13314][0:22]
106 | # label_dict[13314][22:22+146]
107 | # label_dict[13314][22+146:22+146+233]
108 | # label_dict[13314][22+146+233:22+146+233+77]
109 | # label_dict[13314][22+146+233+77:22+146+233+77+20]
110 | # label_dict[13314][22+146+233+77+20:22+146+233+77+20+11]
111 |
--------------------------------------------------------------------------------
/tools/generate_label_tree.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import cv2
4 | import json
5 |
6 |
7 |
8 | def generate_tree(source, parent):
9 | tree = []
10 | for item in source:
11 | if item["parent"] == parent:
12 | item["child"] = generate_tree(source, item["id"])
13 | tree.append(item)
14 | return tree
15 |
16 |
17 | f = open("./data/category.txt")
18 | lines = f.readlines()
19 |
20 | permission_source = []
21 | for line in lines:
22 | per_line = line.strip().split(' ')
23 | per_item_dict = {}
24 | per_item_dict["id"] = per_line[0]
25 | if per_line[1] != "null":
26 | per_item_dict["parent"] = per_line[1]
27 | else:
28 | per_item_dict["parent"] = 0
29 | per_item_dict["name"] = per_line[2]
30 | permission_source.append( per_item_dict )
31 |
32 |
33 | permission_tree = generate_tree(permission_source, 0)
34 |
35 | print(json.dumps(permission_tree, ensure_ascii=False))
36 |
37 |
38 |
--------------------------------------------------------------------------------
/tools/pytorch_to_onnx_muliti_task.py:
--------------------------------------------------------------------------------
1 | #-*- coding:utf-8 _*-
2 | import os
3 | import sys
4 | sys.path.append('./')
5 | import yaml
6 | import math
7 | import argparse
8 | import torch.nn as nn
9 | import torch
10 | import cv2
11 | import numpy as np
12 | import onnx
13 | import time
14 | import onnxruntime
15 | from PIL import Image
16 | from qdnet.dataaug.dataaug import get_transforms
17 | from qdnet.conf.config import load_yaml
18 |
19 | from qdnet.conf.config import load_yaml
20 | from qdnet.optimizer.optimizer import GradualWarmupSchedulerV2
21 | from qdnet.dataset.dataset import get_df, QDDataset
22 | from qdnet.dataaug.dataaug import get_transforms
23 | from qdnet.models.effnet import Effnet
24 | from qdnet.models.resnest import Resnest
25 | from qdnet.models.se_resnext import SeResnext
26 | from qdnet.loss.loss import Loss
27 | from qdnet.conf.constant import Constant
28 | from qdnet_classifier.classifier_multi_task import MultiLabelModel
29 | parser = argparse.ArgumentParser(description='Hyperparams')
30 | parser.add_argument('--img_path', nargs='?', type=str, default=None)
31 | parser.add_argument('--config_path', help='config file path')
32 | parser.add_argument('--batch_size', nargs='?', type=int, default=None)
33 | parser.add_argument('--fold', help='config file path', type=int)
34 | parser.add_argument('--save_path', help='config file path', type=str)
35 | args = parser.parse_args()
36 | config = load_yaml(args.config_path, args)
37 |
38 |
39 | if config["enet_type"] in Constant.RESNET_LIST:
40 | ModelClass = MultiLabelModel
41 | else:
42 | raise NotImplementedError()
43 |
44 | model = ModelClass(
45 | config["enet_type"],
46 | config["out_dim1"],
47 | config["out_dim2"],
48 | pretrained = config["pretrained"] )
49 | device = torch.device('cuda')
50 | model = model.to(device)
51 |
52 |
53 | def gen_onnx(args):
54 |
55 |
56 | if config["eval"] == 'best':
57 | model_file = os.path.join(config["model_dir"], f'best_fold{args.fold}.pth')
58 | if config["eval"] == 'final':
59 | model_file = os.path.join(config["model_dir"], f'final_fold{args.fold}.pth')
60 |
61 |
62 | try: # single GPU model_file
63 | model.load_state_dict(torch.load(model_file), strict=True)
64 | except: # multi GPU model_file
65 | state_dict = torch.load(model_file)
66 | state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
67 | model.load_state_dict(state_dict, strict=True)
68 |
69 | model.eval()
70 |
71 | print('load model ok.....')
72 |
73 |
74 | img = cv2.imread(args.img_path)
75 | transforms_train, transforms_val = get_transforms(config["image_size"])
76 | # img1 = transforms.ToTensor()(img1)
77 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
78 | res = transforms_val(image=img)
79 | img1 = res['image'].astype(np.float32)
80 | img1 = img1.transpose(2, 0, 1)
81 | img1 = torch.tensor([img1]).float()
82 |
83 | s = time.time()
84 | with torch.no_grad():
85 | out = model(img1.to(device))
86 | probs = out
87 | print ("probs>>>>>",probs)
88 |
89 | print('cost time:',time.time()-s)
90 |
91 | output_onnx = args.save_path
92 | print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
93 | input_names = ["input"]
94 | # output_names = ["hm" , "wh" , "reg"]
95 | output_names = ["out"]
96 | dynamic_axes = {'input': {0: 'batch'}, 'out': {0: 'batch'}}
97 | inputs = torch.randn(args.batch_size, 3,156,156).cuda()
98 | '''
99 | export_type = torch.onnx.OperatorExportTypes.ONNX
100 | torch_out = torch.onnx._export(model, inputs, output_onnx, export_params=True, verbose=False,do_constant_folding=False,keep_initializers_as_inputs=True,
101 | input_names=input_names, output_names=output_names, operator_export_type=export_type, dynamic_axes=dynamic_axes)
102 | '''
103 | # torch.onnx.export(model, inputs, output_onnx, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes)
104 | # torch.onnx.export(model, inputs, output_onnx, verbose=False, export_params=True, training=False, opset_version=10, example_outputs=probs, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes)
105 | torch.onnx.export(model, inputs, output_onnx)
106 | onnx_path = args.save_path
107 | session = onnxruntime.InferenceSession(onnx_path)
108 | input_name = session.get_inputs()[0].name
109 |
110 | image = img1.cpu().detach().numpy()
111 | image = image.astype(np.float32)
112 | print (">>>>>", image.shape)
113 | s = time.time()
114 | preds = session.run([], {input_name: image})
115 | print ("preds>>>>>",preds)
116 | preds = preds[0]
117 | print('cost time:', time.time()-s)
118 | if isinstance(preds,dict):
119 | preds = preds['f_score']
120 |
121 | cv2.imwrite('./onnx/onnx_output.jpg',preds[0,0]*255)
122 |
123 | print('error_distance:',np.abs((out.cpu().detach().numpy()-preds)).mean())
124 |
125 | if __name__ == "__main__":
126 | gen_onnx(args)
127 | os.system("pip install onnx-simplifier")
128 | os.system("python -m onnxsim lp.onnx lp_pp.onnx")
129 |
130 |
--------------------------------------------------------------------------------
/tools/search_leaf.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import cv2
4 | import json
5 | import pandas as pd
6 |
7 |
8 |
9 | f = open("./data/category.txt")
10 | lines = f.readlines()
11 |
12 | id_list = []
13 | pid_list = []
14 | leaf_node = []
15 | for line in lines:
16 | per_line = line.strip().split(' ')
17 | id_list.append( per_line[0])
18 | if per_line[1] != 'null':
19 | pid_list.append( per_line[1] )
20 | else:
21 | pid_list.append( None )
22 |
23 | for per_id in id_list:
24 | if per_id not in pid_list:
25 | leaf_node.append(per_id)
26 |
27 | print ( "leaf_node>>>>>>", leaf_node )
28 |
29 |
30 | '''
31 | ['13314', '11270', '11284', '10273', '10294', '11321', '13369', '13372', '10301', '10302', '13374', '13376', '10306', '10307', '11339', '13388', '13406', '10338', '12387', '13422', '13423', '13426', '10355', '13427', '10356', '13428', '13432', '10362', '10364', '10365', '10366', '13439', '10368', '10369', '13459', '11412', '13469', '13470', '10399', '13472', '13473', '13474', '13475', '10404', '13476', '10405', '10407', '13479', '10410', '10411', '13489', '10418', '10419', '13491', '10421', '10422', '13496', '10431', '10432', '10434', '10435', '10436', '10437', '10438', '10440', '13513', '12494', '10449', '10452', '10453', '13525', '10454', '10456', '13530', '10464', '13536', '10466', '10467', '11491', '10468', '10469', '11493', '11496', '10474', '10476', '10479', '10480', '11504', '10481', '10483', '10486', '10487', '11511', '10488', '10491', '10493', '10494', '13567', '11527', '10504', '10505', '10509', '10511', '13584', '13585', '13586', '10515', '13588', '11541', '13589', '10518', '13590', '10519', '13591', '10520', '13596', '10531', '13606', '10536', '13608', '13609', '13610', '10539', '13611', '13612', '12590', '11567', '10544', '10547', '13620', '10550', '13633', '11589', '10566', '10570', '11597', '11598', '13647', '10578', '11604', '10584', '13656', '13657', '10591', '10598', '10603', '11629', '13680', '10609', '10611', '13686', '10615', '10616', '11642', '10619', '11643', '11644', '11645', '11647', '11648', '11649', '11650', '11651', '10628', '13710', '13714', '13716', '10645', '10648', '13722', '10653', '12701', '13725', '10666', '10668', '13740', '10669', '11695', '10679', '11710', '13760', '10691', '10692', '10693', '11719', '13767', '13768', '11721', '13778', '13779', '13780', '12757', '12766', '12767', '13794', '13797', '12777', '13801', '10746', '12795', '11773', '12797', '10752', '10754', '11780', '10757', '10758', '10759', '11783', '10761', '13834', '11793', '10770', '10776', '10778', '12828', '10782', '10784', '11813', '11814', '13863', '11816', '11818', '11819', '11820', '11825', '12849', '11828', '11831', '11838', '11839', '11840', '12864', '12865', '11842', '12866', '12867', '10829', '11870', '10857', '11883', '13936', '13937', '13938', '13939', '13940', '13941', '13942', '10872', '13945', '10876', '10877', '11901', '12926', '10880', '10881', '10886', '10889', '10890', '10891', '11917', '10899', '12950', '12952', '11929', '10913', '11937', '11939', '11940', '11941', '11943', '10920', '11944', '11945', '11946', '10923', '11948', '10925', '11949', '11963', '11964', '11965', '12997', '11980', '10963', '11987', '11005', '11006', '11008', '11009', '11020', '11022', '11028', '12066', '12067', '13094', '10027', '10032', '10035', '12083', '12084', '10039', '10040', '13116', '10048', '10049', '10050', '10054', '10057', '10058', '10059', '10061', '12110', '12113', '11099', '10080', '10084', '12138', '10092', '10094', '12143', '10096', '10098', '10106', '12161', '10115', '11141', '10118', '10120', '10133', '10135', '12184', '12187', '10147', '10150', '10151', '10153', '10154', '12206', '10159', '13250', '12228', '10182', '10183', '13260', '10191', '10193', '10206', '10209', '13283', '13284', '12261', '13286', '12263', '13289', '13291', '13292', '13293', '10224', '13296', '13299', '13300', '13301', '10238']
32 | '''
33 |
--------------------------------------------------------------------------------
/tools/torch2trt_new.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 | # conda环境,python3.8
4 |
5 | # 安装torch, https://pytorch.org/get-started/locally/
6 | python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
7 |
8 | # 安装tensorrt,https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-pip
9 | pip install nvidia-pyindex
10 | pip install --upgrade nvidia-tensorrt==8.0.1.6
11 |
12 | # 安装torch2trt,https://github.com/NVIDIA-AI-IOT/torch2trt
13 | git clone https://github.com/NVIDIA-AI-IOT/torch2trt
14 | cd torch2trt
15 | python setup.py install
16 |
17 | # 安装opencv
18 | pip install opencv-contrib-python
19 |
20 | # 安装 albumentations
21 | pip install albumentations
22 |
23 | # 安装cuda
24 | https://developer.nvidia.com/cuda-11-4-0-download-archive?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=runfile_local
25 | """
26 |
27 | import torch
28 | from torch2trt import torch2trt, TRTModule
29 | import tensorrt as trt
30 | import cv2
31 |
32 | from infer import QDNetModel, get_transforms
33 |
34 | def pth2trt(model_pth, torch2trtPath, fp16, image_size=320):
35 | print("torch2trt, may take 1 minute...")
36 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37 | x = torch.ones(1, 3, image_size, image_size).to(device)
38 | model_pth.float()
39 | model_trt = torch2trt(model_pth, [x], fp16_mode=fp16,
40 | log_level=trt.Logger.INFO,
41 | max_workspace_size=(1 << 32),)
42 | #能被torch2trt.TRTModule导入的pytorch模型
43 | pred = model_trt(x)
44 | torch.save(model_trt.state_dict(), torch2trtPath)
45 |
46 | class QDNetModel_Trt(QDNetModel):
47 | def __init__(self, torch2trtPath, image_size=256):
48 | self.transforms_val = get_transforms(image_size)
49 | self.model = TRTModule()
50 | self.model.load_state_dict(torch.load(torch2trtPath))
51 |
52 | torch2trtPath = "./model1.torch2trt"
53 | infer_obj = QDNetModel()
54 | model_pth = infer_obj.modeltorch2trt
55 | # pth2trt(model_pth, torch2trtPath, fp16=1, image_size=256)
56 |
57 | infer_obj_trt = QDNetModel_Trt(torch2trtPath, image_size=256)
58 | img1 = cv2.imread("./finance.png")
59 | l1 = infer_obj.predict(img1)
60 | l2 = infer_obj_trt.predict(img1)
61 | print(l1, l2)
62 |
63 |
--------------------------------------------------------------------------------