├── LICENSE
├── README.md
├── __pycache__
├── engine_multi.cpython-37.pyc
├── engine_multi_mm.cpython-37.pyc
└── engine_single.cpython-37.pyc
├── benchmark.py
├── configs
├── .r101_train_multi_mm1.sh.swp
├── r101_eval_multi.sh
├── r101_eval_single.sh
├── r101_train_multi.sh
├── r101_train_single.sh
├── r50_eval_multi.sh
├── r50_eval_single.sh
├── r50_train_multi.sh
└── r50_train_single.sh
├── datasets
├── .vid_multi_mm.py.swp
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── coco.cpython-37.pyc
│ ├── coco_eval.cpython-37.pyc
│ ├── coco_video_parser.cpython-37.pyc
│ ├── data_prefetcher_multi.cpython-37.pyc
│ ├── data_prefetcher_single.cpython-37.pyc
│ ├── panoptic_eval.cpython-37.pyc
│ ├── samplers.cpython-37.pyc
│ ├── transforms.cpython-37.pyc
│ ├── transforms_mm.cpython-37.pyc
│ ├── transforms_multi.cpython-37.pyc
│ ├── transforms_single.cpython-37.pyc
│ ├── vid_multi.cpython-37.pyc
│ ├── vid_multi_mm.cpython-37.pyc
│ └── vid_single.cpython-37.pyc
├── coco.py
├── coco_eval.py
├── coco_panoptic.py
├── coco_video_parser.py
├── data_prefetcher_multi.py
├── data_prefetcher_single.py
├── panoptic_eval.py
├── parsers
│ ├── __init__.py
│ └── coco_video_parser.py
├── samplers.py
├── torchvision_datasets
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-37.pyc
│ │ └── coco.cpython-37.pyc
│ └── coco.py
├── transforms_multi.py
├── transforms_single.py
├── vid_multi.py
└── vid_single.py
├── docs
└── changelog.md
├── engine_multi.py
├── engine_single.py
├── figs
└── teaser.png
├── main.py
├── models
├── .deformable_transformer_mm.py.swp
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── backbone.cpython-37.pyc
│ ├── deformable_detr.cpython-37.pyc
│ ├── deformable_detr_multi.cpython-37.pyc
│ ├── deformable_detr_multi_mm.cpython-37.pyc
│ ├── deformable_detr_single.cpython-37.pyc
│ ├── deformable_transformer.cpython-37.pyc
│ ├── deformable_transformer_mm.cpython-37.pyc
│ ├── deformable_transformer_multi.cpython-37.pyc
│ ├── deformable_transformer_single.cpython-37.pyc
│ ├── matcher.cpython-37.pyc
│ ├── position_encoding.cpython-37.pyc
│ └── segmentation.cpython-37.pyc
├── backbone.py
├── deformable_detr_multi.py
├── deformable_detr_single.py
├── deformable_transformer_multi.py
├── deformable_transformer_single.py
├── matcher.py
├── ops
│ ├── MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so
│ ├── MultiScaleDeformableAttention.egg-info
│ │ ├── PKG-INFO
│ │ ├── SOURCES.txt
│ │ ├── dependency_links.txt
│ │ └── top_level.txt
│ ├── build
│ │ ├── lib.linux-x86_64-3.7
│ │ │ ├── MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ └── ms_deform_attn_func.py
│ │ │ └── modules
│ │ │ │ ├── __init__.py
│ │ │ │ └── ms_deform_attn.py
│ │ └── temp.linux-x86_64-3.7
│ │ │ ├── .ninja_deps
│ │ │ ├── .ninja_log
│ │ │ ├── build.ninja
│ │ │ └── mnt
│ │ │ └── lustre
│ │ │ └── zhouqianyu
│ │ │ └── vod
│ │ │ └── workspace
│ │ │ └── TransVOD
│ │ │ └── models
│ │ │ └── ops
│ │ │ └── src
│ │ │ ├── cpu
│ │ │ └── ms_deform_attn_cpu.o
│ │ │ ├── cuda
│ │ │ └── ms_deform_attn_cuda.o
│ │ │ └── vision.o
│ ├── functions
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── ms_deform_attn_func.cpython-37.pyc
│ │ └── ms_deform_attn_func.py
│ ├── make.sh
│ ├── modules
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── ms_deform_attn.cpython-37.pyc
│ │ └── ms_deform_attn.py
│ ├── setup.py
│ ├── src
│ │ ├── cpu
│ │ │ ├── ms_deform_attn_cpu.cpp
│ │ │ └── ms_deform_attn_cpu.h
│ │ ├── cuda
│ │ │ ├── ms_deform_attn_cuda.cu
│ │ │ ├── ms_deform_attn_cuda.h
│ │ │ └── ms_deform_im2col_cuda.cuh
│ │ ├── ms_deform_attn.h
│ │ └── vision.cpp
│ └── test.py
├── position_encoding.py
└── segmentation.py
├── requirements.txt
├── tools
├── launch.py
├── run_dist_launch.sh
└── run_dist_slurm.sh
└── util
├── __init__.py
├── __pycache__
├── __init__.cpython-37.pyc
├── box_ops.cpython-37.pyc
├── misc.cpython-37.pyc
├── misc_mm.cpython-37.pyc
└── misc_multi.cpython-37.pyc
├── box_ops.py
├── misc.py
├── misc_multi.py
└── plot_utils.py
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | [](https://paperswithcode.com/sota/video-object-detection-on-imagenet-vid?p=transvod-end-to-end-video-object-detection)
4 |
5 |
6 | This repository is an official implementation of the paper [End-to-End Video Object Detection with Spatial-Temporal Transformers](https://dlnext.acm.org/doi/10.1145/3474085.3475285).
7 |
8 | ## Our New Model-> TransVOD Lite && TransVOD ++ is accepted by TPAMI-2022. [paper link](https://arxiv.org/abs/2201.05047), [TransVOD Lite code link](https://github.com/qianyuzqy/TransVOD_Lite), [TransVOD++ code link](https://github.com/qianyuzqy/TransVOD_plusplus)
9 |
10 | # End-to-End Video Object Detection with Spatial-Temporal Transformers (ACM MM 2022)
11 |
12 | ## Introduction
13 |
14 | **TransVOD** is a fully end-to-end video object dectection framework based on Transformer. It directly outputs the detection results without any complicated post-processing methods.
15 |
16 |
17 |

18 |
19 |
20 | **Abstract.**
21 | Recently, DETR and Deformable DETR have been proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance as previous complex hand-crafted detectors. However, their performance on Video Object Detection (VOD) has not been well explored. In this paper, we present TransVOD, an end-to-end video object detection model based on a spatial-temporal Transformer architecture. The goal of this paper is to streamline the pipeline of VOD, effectively removing the need for many hand-crafted components for feature aggregation, e.g., optical flow, recurrent neural networks, relation networks. Besides, benefited from the object query design in DETR, our method does not need complicated post-processing methods such as Seq-NMS or Tubelet rescoring, which keeps the pipeline simple and clean. In particular, we present temporal Transformer to aggregate both the spatial object queries and the feature memories of each frame. Our temporal Transformer consists of three components: Temporal Deformable Transformer Encoder (TDTE) to encode the multiple frame spatial details, Temporal Query Encoder (TQE) to fuse object queries, and Temporal Deformable Transformer Decoder (TDTD) to obtain current frame detection results. These designs boost the strong baseline deformable DETR by a significant margin (3%-4% mAP) on the ImageNet VID dataset. TransVOD yields comparable results performance on the benchmark of ImageNet VID. We hope our TransVOD can provide a new perspective for video object detection.
22 |
23 | ## Updates
24 | - (2022/04/03) Code and pretrained weights for TransVOD released.
25 |
26 |
27 | ## Main Results
28 |
29 | | **Method** | **Backbone** | **Frame Numbers** | **AP50** | **URL** |
30 | | :--------: | :---------: | :------------: | :------: | :-----------------------------------------------------------------------------------------: |
31 | | Deformable DETR | ResNet50 | 1 | 76 |[model](https://drive.google.com/drive/folders/1FTRz-O1_-IL_la-2jQzDiZgvI_NLRPme?usp=sharing) |
32 | | Deformable DETR | ResNet101 | 1 | 78.3 |[model](https://drive.google.com/drive/folders/1FTRz-O1_-IL_la-2jQzDiZgvI_NLRPme?usp=sharing) |
33 | | TransVOD | ResNet50 | 15 | 79.9 |[model](https://drive.google.com/drive/folders/1FTRz-O1_-IL_la-2jQzDiZgvI_NLRPme?usp=sharing) |
34 | | TransVOD | ResNet101 | 15 | 81.9 |[model](https://drive.google.com/drive/folders/1FTRz-O1_-IL_la-2jQzDiZgvI_NLRPme?usp=sharing) |
35 |
36 |
37 |
38 | *Note:*
39 | 1. All models of TransVOD are trained with pre-trained weights on COCO dataset.
40 |
41 |
42 | ## Installation
43 |
44 | The codebase is built on top of [Deformable DETR](https://github.com/fundamentalvision/Deformable-DETR).
45 |
46 | ### Requirements
47 |
48 | * Linux, CUDA>=9.2, GCC>=5.4
49 |
50 | * Python>=3.7
51 |
52 | We recommend you to use Anaconda to create a conda environment:
53 | ```bash
54 | conda create -n TransVOD python=3.7 pip
55 | ```
56 | Then, activate the environment:
57 | ```bash
58 | conda activate TransVOD
59 | ```
60 |
61 | * PyTorch>=1.5.1, torchvision>=0.6.1 (following instructions [here](https://pytorch.org/)
62 |
63 | For example, if your CUDA version is 9.2, you could install pytorch and torchvision as following:
64 | ```bash
65 | conda install pytorch=1.5.1 torchvision=0.6.1 cudatoolkit=9.2 -c pytorch
66 | ```
67 |
68 | * Other requirements
69 | ```bash
70 | pip install -r requirements.txt
71 | ```
72 |
73 | * Build MultiScaleDeformableAttention
74 | ```bash
75 | cd ./models/ops
76 | sh ./make.sh
77 | ```
78 |
79 | ## Usage
80 |
81 | ### Dataset preparation
82 |
83 | 1. Please download ILSVRC2015 DET and ILSVRC2015 VID dataset from [here](https://image-net.org/challenges/LSVRC/2015/2015-downloads). Then we covert jsons of two datasets by using the [code](https://github.com/open-mmlab/mmtracking/blob/master/tools/convert_datasets/ilsvrc/). The joint [json](https://drive.google.com/drive/folders/1cCXY41IFsLT-P06xlPAGptG7sc-zmGKF?usp=sharing) of two datasets is provided. The After that, we recommend to symlink the path to the datasets to datasets/. And the path structure should be as follows:
84 |
85 | ```
86 | code_root/
87 | └── data/
88 | └── vid/
89 | ├── Data
90 | ├── VID/
91 | └── DET/
92 | └── annotations/
93 | ├── imagenet_vid_train.json
94 | ├── imagenet_vid_train_joint_30.json
95 | └── imagenet_vid_val.json
96 |
97 | ```
98 |
99 | ### Training
100 | We use ResNet50 and ResNet101 as the network backbone. We train our TransVOD with ResNet50 as backbone as following:
101 |
102 | #### Training on single node
103 | 1. Train SingleBaseline. You can download COCO pretrained weights from [Deformable DETR](https://github.com/fundamentalvision/Deformable-DETR).
104 |
105 | ```bash
106 | GPUS_PER_NODE=8 ./tools/run_dist_launch.sh $1 r50 $2 configs/r50_train_single.sh
107 | ```
108 | 1. Train TransVOD. Using the model weights of SingleBaseline as the resume model.
109 |
110 | ```bash
111 | GPUS_PER_NODE=8 ./tools/run_dist_launch.sh $1 r50 $2 configs/r50_train_multi.sh
112 | ```
113 |
114 |
115 | #### Training on slurm cluster
116 | If you are using slurm cluster, you can simply run the following command to train on 1 node with 8 GPUs:
117 | ```bash
118 | GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh r50 8 configs/r50_train_multi.sh
119 | ```
120 |
121 | ### Evaluation
122 | You can get the config file and pretrained model of TransVOD (the link is in "Main Results" session), then put the pretrained_model into correponding folder.
123 | ```
124 | code_root/
125 | └── exps/
126 | └── our_models/
127 | ├── COCO_pretrained_model
128 | ├── exps_single
129 | └── exps_multi
130 | ```
131 | And then run following command to evaluate it on ImageNET VID validation set:
132 | ```bash
133 | GPUS_PER_NODE=8 ./tools/run_dist_launch.sh $1 eval_r50 $2 configs/r50_eval_multi.sh
134 | ```
135 |
136 |
137 |
138 | ## Citing TransVOD
139 | If you find TransVOD useful in your research, please consider citing:
140 | ```bibtex
141 | @inproceedings{he2021end,
142 | title={End-to-End Video Object Detection with Spatial-Temporal Transformers},
143 | author={He, Lu and Zhou, Qianyu and Li, Xiangtai and Niu, Li and Cheng, Guangliang and Li, Xiao and Liu, Wenxuan and Tong, Yunhai and Ma, Lizhuang and Zhang, Liqing},
144 | booktitle={Proceedings of the 29th ACM International Conference on Multimedia},
145 | pages={1507--1516},
146 | year={2021}
147 | }
148 | @article{zhou2022transvod,
149 | title={TransVOD: End-to-end Video Object Detection with Spatial-Temporal Transformers},
150 | author={Zhou, Qianyu and Li, Xiangtai and He, Lu and Yang, Yibo and Cheng, Guangliang and Tong, Yunhai and Ma, Lizhuang and Tao, Dacheng},
151 | journal={arXiv preprint arXiv:2201.05047},
152 | year={2022}
153 | }
154 |
--------------------------------------------------------------------------------
/__pycache__/engine_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/__pycache__/engine_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/engine_multi_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/__pycache__/engine_multi_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/engine_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/__pycache__/engine_single.cpython-37.pyc
--------------------------------------------------------------------------------
/benchmark.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 |
7 | """
8 | Benchmark inference speed of Deformable DETR.
9 | """
10 | import os
11 | import time
12 | import argparse
13 |
14 | import torch
15 |
16 | from main import get_args_parser as get_main_args_parser
17 | from models import build_model
18 | from datasets import build_dataset
19 | from util.misc import nested_tensor_from_tensor_list
20 |
21 |
22 | def get_benckmark_arg_parser():
23 | parser = argparse.ArgumentParser('Benchmark inference speed of Deformable DETR.')
24 | parser.add_argument('--num_iters', type=int, default=300, help='total iters to benchmark speed')
25 | parser.add_argument('--warm_iters', type=int, default=5, help='ignore first several iters that are very slow')
26 | parser.add_argument('--batch_size', type=int, default=1, help='batch size in inference')
27 | parser.add_argument('--resume', type=str, help='load the pre-trained checkpoint')
28 | return parser
29 |
30 |
31 | @torch.no_grad()
32 | def measure_average_inference_time(model, inputs, num_iters=100, warm_iters=5):
33 | ts = []
34 | for iter_ in range(num_iters):
35 | torch.cuda.synchronize()
36 | t_ = time.perf_counter()
37 | model(inputs)
38 | torch.cuda.synchronize()
39 | t = time.perf_counter() - t_
40 | if iter_ >= warm_iters:
41 | ts.append(t)
42 | print(ts)
43 | return sum(ts) / len(ts)
44 |
45 |
46 | def benchmark():
47 | args, _ = get_benckmark_arg_parser().parse_known_args()
48 | main_args = get_main_args_parser().parse_args(_)
49 | assert args.warm_iters < args.num_iters and args.num_iters > 0 and args.warm_iters >= 0
50 | assert args.batch_size > 0
51 | assert args.resume is None or os.path.exists(args.resume)
52 | dataset = build_dataset('val', main_args)
53 | model, _, _ = build_model(main_args)
54 | model.cuda()
55 | model.eval()
56 | if args.resume is not None:
57 | ckpt = torch.load(args.resume, map_location=lambda storage, loc: storage)
58 | model.load_state_dict(ckpt['model'])
59 | inputs = nested_tensor_from_tensor_list([dataset.__getitem__(0)[0].cuda() for _ in range(args.batch_size)])
60 | t = measure_average_inference_time(model, inputs, args.num_iters, args.warm_iters)
61 | return 1.0 / t * args.batch_size
62 |
63 |
64 | if __name__ == '__main__':
65 | fps = benchmark()
66 | print(f'Inference Speed: {fps:.1f} FPS')
67 |
68 |
--------------------------------------------------------------------------------
/configs/.r101_train_multi_mm1.sh.swp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/configs/.r101_train_multi_mm1.sh.swp
--------------------------------------------------------------------------------
/configs/r101_eval_multi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/our_models/exps_multi/r101_81.7
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --backbone resnet101 \
11 | --epochs 7 \
12 | --eval \
13 | --num_feature_levels 1 \
14 | --num_queries 300 \
15 | --dilation \
16 | --batch_size 1 \
17 | --num_ref_frames 14 \
18 | --resume ${EXP_DIR}/checkpoint0006.pth \
19 | --lr_drop_epochs 4 6 \
20 | --num_workers 16 \
21 | --with_box_refine \
22 | --dataset_file vid_multi \
23 | --output_dir ${EXP_DIR} \
24 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
25 |
--------------------------------------------------------------------------------
/configs/r101_eval_single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/our_models/exps_single/r101
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --backbone resnet101 \
11 | --epochs 7 \
12 | --eval \
13 | --num_feature_levels 1 \
14 | --num_queries 300 \
15 | --dilation \
16 | --batch_size 1 \
17 | --resume ${EXP_DIR}/r101checkpoint0009.pth \
18 | --lr_drop_epochs 4 6 \
19 | --num_workers 16 \
20 | --with_box_refine \
21 | --dataset_file vid_single \
22 | --output_dir ${EXP_DIR} \
23 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
24 |
--------------------------------------------------------------------------------
/configs/r101_train_multi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/multibaseline/r101_grad/e7_nf1_ld4,6_lr0.0002_nq300_wbox_MEGA_detrNorm_preSingle_nr14_dc5_nql3_filter150_75_40
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --backbone resnet101 \
11 | --epochs 7 \
12 | --num_feature_levels 1 \
13 | --num_queries 300 \
14 | --dilation \
15 | --batch_size 1 \
16 | --num_ref_frames 14 \
17 | --resume exps/r101checkpoint0009.pth \
18 | --lr_drop_epochs 4 6 \
19 | --num_workers 16 \
20 | --with_box_refine \
21 | --dataset_file vid_multi \
22 | --output_dir ${EXP_DIR} \
23 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
24 |
--------------------------------------------------------------------------------
/configs/r101_train_single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/singlebaseline/r101_e8_nf4_ld6,7_lr0.0002_nq300_bs4_wbox_joint_MEGA_detrNorm_class31_pretrain_coco_dc5
7 | mkdir ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --backbone resnet101 \
11 | --epochs 10 \
12 | --num_feature_levels 1\
13 | --num_queries 300 \
14 | --dilation \
15 | --batch_size 4 \
16 | --num_workers 8 \
17 | --resume ./exps/our_models/COCO_pretrained_model/r101_deformable_detr_single_scale_bbox_refinement-dc5_checkpoint0049.pth \
18 | --lr_drop_epochs 7 9 \
19 | --with_box_refine \
20 | --coco_pretrain \
21 | --dataset_file vid_single \
22 | --output_dir ${EXP_DIR} \
23 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
24 | # --resume /mnt/lustre/helu/code/vod/video_object_detection/exps/pretrainModel/r50_deformable_detr_single_scale_dc5-checkpoint.pth \
25 |
--------------------------------------------------------------------------------
/configs/r50_eval_multi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/our_models/exps_multi/r50_79.9
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --epochs 7 \
11 | --eval \
12 | --num_feature_levels 1 \
13 | --num_queries 300 \
14 | --dilation \
15 | --batch_size 1 \
16 | --num_ref_frames 14 \
17 | --resume ${EXP_DIR}/checkpoint0006.pth \
18 | --lr_drop_epochs 4 6 \
19 | --num_workers 16 \
20 | --with_box_refine \
21 | --dataset_file vid_multi \
22 | --output_dir ${EXP_DIR} \
23 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
24 |
--------------------------------------------------------------------------------
/configs/r50_eval_single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/our_models/exps_single/r50
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --epochs 7 \
11 | --eval \
12 | --num_feature_levels 1 \
13 | --num_queries 300 \
14 | --dilation \
15 | --batch_size 1 \
16 | --resume ${EXP_DIR}/checkpoint0009.pth \
17 | --lr_drop_epochs 4 6 \
18 | --num_workers 16 \
19 | --with_box_refine \
20 | --dataset_file vid_single \
21 | --output_dir ${EXP_DIR} \
22 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
23 |
--------------------------------------------------------------------------------
/configs/r50_train_multi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/multibaseline/r50_grad/e7_nf1_ld4,6_lr0.0002_nq300_wbox_MEGA_detrNorm_preSingle_nr14_dc5_nql3_filter80_50_30
7 | mkdir -p ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --backbone resnet50 \
11 | --epochs 7 \
12 | --num_feature_levels 1 \
13 | --num_queries 300 \
14 | --dilation \
15 | --batch_size 1 \
16 | --num_ref_frames 14 \
17 | --resume ./exps/our_models/exps_single/r50/checkpoint0009.pth \
18 | --lr_drop_epochs 4 6 \
19 | --num_workers 16 \
20 | --with_box_refine \
21 | --dataset_file vid_multi \
22 | --output_dir ${EXP_DIR} \
23 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
24 |
--------------------------------------------------------------------------------
/configs/r50_train_single.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | T=`date +%m%d%H%M`
5 |
6 | EXP_DIR=exps/singlebaseline/r50_e8_nf4_ld6,7_lr0.0002_nq300_bs4_wbox_joint_MEGA_detrNorm_class31_pretrain_coco_dc5
7 | mkdir ${EXP_DIR}
8 | PY_ARGS=${@:1}
9 | python -u main.py \
10 | --epochs 8 \
11 | --num_feature_levels 1\
12 | --num_queries 300 \
13 | --dilation \
14 | --batch_size 4 \
15 | --num_workers 8 \
16 | --lr_drop_epochs 6 7 \
17 | --with_box_refine \
18 | --dataset_file vid_single \
19 | --output_dir ${EXP_DIR} \
20 | --coco_pretrain \
21 | --resume ./exps/our_models/COCO_pretrained_model/r50_deformable_detr_single_scale_dc5-checkpoint.pth \
22 | ${PY_ARGS} 2>&1 | tee ${EXP_DIR}/log.train.$T
23 | # --resume /mnt/lustre/helu/code/vod/video_object_detection/exps/pretrainModel/r50_deformable_detr_single_scale_dc5-checkpoint.pth \
24 |
--------------------------------------------------------------------------------
/datasets/.vid_multi_mm.py.swp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/.vid_multi_mm.py.swp
--------------------------------------------------------------------------------
/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | import torch.utils.data
11 | from .torchvision_datasets import CocoDetection
12 |
13 | from .coco import build as build_coco
14 | from .vid_multi import build as build_vid_multi
15 | from .vid_single import build as build_vid_single
16 |
17 |
18 | def get_coco_api_from_dataset(dataset):
19 | for _ in range(10):
20 | # if isinstance(dataset, torchvision.datasets.CocoDetection):
21 | # break
22 | if isinstance(dataset, torch.utils.data.Subset):
23 | dataset = dataset.dataset
24 | if isinstance(dataset, CocoDetection):
25 | return dataset.coco
26 |
27 |
28 | def build_dataset(image_set, args):
29 | if args.dataset_file == 'coco':
30 | return build_coco(image_set, args)
31 | if args.dataset_file == 'coco_panoptic':
32 | # to avoid making panopticapi required for coco
33 | from .coco_panoptic import build as build_coco_panoptic
34 | return build_coco_panoptic(image_set, args)
35 |
36 | if args.dataset_file == 'vid_single':
37 | return build_vid_single(image_set, args)
38 | if args.dataset_file == "vid_multi":
39 | return build_vid_multi(image_set, args)
40 | raise ValueError(f'dataset {args.dataset_file} not supported')
41 |
--------------------------------------------------------------------------------
/datasets/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/coco.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/coco_eval.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/coco_eval.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/coco_video_parser.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/coco_video_parser.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/data_prefetcher_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/data_prefetcher_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/data_prefetcher_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/data_prefetcher_single.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/panoptic_eval.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/panoptic_eval.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/samplers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/samplers.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/transforms.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/transforms.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/transforms_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/transforms_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/transforms_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/transforms_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/transforms_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/transforms_single.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/vid_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/vid_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/vid_multi_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/vid_multi_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/__pycache__/vid_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/__pycache__/vid_single.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/coco.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | COCO dataset which returns image_id for evaluation.
12 |
13 | Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
14 | """
15 | from pathlib import Path
16 |
17 | import torch
18 | import torch.utils.data
19 | from pycocotools import mask as coco_mask
20 |
21 | from .torchvision_datasets import CocoDetection as TvCocoDetection
22 | from util.misc import get_local_rank, get_local_size
23 | import datasets.transforms_single as T
24 |
25 |
26 | class CocoDetection(TvCocoDetection):
27 | def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1):
28 | super(CocoDetection, self).__init__(img_folder, ann_file,
29 | cache_mode=cache_mode, local_rank=local_rank, local_size=local_size)
30 | self._transforms = transforms
31 | self.prepare = ConvertCocoPolysToMask(return_masks)
32 |
33 | def __getitem__(self, idx):
34 | img, target = super(CocoDetection, self).__getitem__(idx)
35 | image_id = self.ids[idx]
36 | target = {'image_id': image_id, 'annotations': target}
37 | img, target = self.prepare(img, target)
38 | if self._transforms is not None:
39 | img, target = self._transforms(img, target)
40 | return img, target
41 |
42 |
43 | def convert_coco_poly_to_mask(segmentations, height, width):
44 | masks = []
45 | for polygons in segmentations:
46 | rles = coco_mask.frPyObjects(polygons, height, width)
47 | mask = coco_mask.decode(rles)
48 | if len(mask.shape) < 3:
49 | mask = mask[..., None]
50 | mask = torch.as_tensor(mask, dtype=torch.uint8)
51 | mask = mask.any(dim=2)
52 | masks.append(mask)
53 | if masks:
54 | masks = torch.stack(masks, dim=0)
55 | else:
56 | masks = torch.zeros((0, height, width), dtype=torch.uint8)
57 | return masks
58 |
59 |
60 | class ConvertCocoPolysToMask(object):
61 | def __init__(self, return_masks=False):
62 | self.return_masks = return_masks
63 |
64 | def __call__(self, image, target):
65 | w, h = image.size
66 |
67 | image_id = target["image_id"]
68 | image_id = torch.tensor([image_id])
69 |
70 | anno = target["annotations"]
71 |
72 | anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
73 |
74 | boxes = [obj["bbox"] for obj in anno]
75 | # guard against no boxes via resizing
76 | boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
77 | boxes[:, 2:] += boxes[:, :2]
78 | boxes[:, 0::2].clamp_(min=0, max=w)
79 | boxes[:, 1::2].clamp_(min=0, max=h)
80 |
81 | classes = [obj["category_id"] for obj in anno]
82 | classes = torch.tensor(classes, dtype=torch.int64)
83 |
84 | if self.return_masks:
85 | segmentations = [obj["segmentation"] for obj in anno]
86 | masks = convert_coco_poly_to_mask(segmentations, h, w)
87 |
88 | keypoints = None
89 | if anno and "keypoints" in anno[0]:
90 | keypoints = [obj["keypoints"] for obj in anno]
91 | keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
92 | num_keypoints = keypoints.shape[0]
93 | if num_keypoints:
94 | keypoints = keypoints.view(num_keypoints, -1, 3)
95 |
96 | keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
97 | boxes = boxes[keep]
98 | classes = classes[keep]
99 | if self.return_masks:
100 | masks = masks[keep]
101 | if keypoints is not None:
102 | keypoints = keypoints[keep]
103 |
104 | target = {}
105 | target["boxes"] = boxes
106 | target["labels"] = classes
107 | if self.return_masks:
108 | target["masks"] = masks
109 | target["image_id"] = image_id
110 | if keypoints is not None:
111 | target["keypoints"] = keypoints
112 |
113 | # for conversion to coco api
114 | area = torch.tensor([obj["area"] for obj in anno])
115 | iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
116 | target["area"] = area[keep]
117 | target["iscrowd"] = iscrowd[keep]
118 |
119 | target["orig_size"] = torch.as_tensor([int(h), int(w)])
120 | target["size"] = torch.as_tensor([int(h), int(w)])
121 |
122 | return image, target
123 |
124 |
125 | def make_coco_transforms(image_set):
126 |
127 | normalize = T.Compose([
128 | T.ToTensor(),
129 | T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
130 | ])
131 |
132 | scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
133 |
134 | if image_set == 'train':
135 | return T.Compose([
136 | T.RandomHorizontalFlip(),
137 | T.RandomSelect(
138 | T.RandomResize(scales, max_size=1333),
139 | T.Compose([
140 | T.RandomResize([400, 500, 600]),
141 | T.RandomSizeCrop(384, 600),
142 | T.RandomResize(scales, max_size=1333),
143 | ])
144 | ),
145 | normalize,
146 | ])
147 |
148 | if image_set == 'val':
149 | return T.Compose([
150 | T.RandomResize([800], max_size=1333),
151 | normalize,
152 | ])
153 |
154 | raise ValueError(f'unknown {image_set}')
155 |
156 |
157 | def build(image_set, args):
158 | root = Path(args.coco_path)
159 | assert root.exists(), f'provided COCO path {root} does not exist'
160 | mode = 'instances'
161 | PATHS = {
162 | "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
163 | "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
164 | }
165 |
166 | img_folder, ann_file = PATHS[image_set]
167 | dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks,
168 | cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
169 | return dataset
170 |
--------------------------------------------------------------------------------
/datasets/coco_eval.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | COCO evaluator that works in distributed mode.
12 |
13 | Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
14 | The difference is that there is less copy-pasting from pycocotools
15 | in the end of the file, as python3 can suppress prints with contextlib
16 | """
17 | import os
18 | import contextlib
19 | import copy
20 | import numpy as np
21 | import torch
22 |
23 | from pycocotools.cocoeval import COCOeval
24 | from pycocotools.coco import COCO
25 | import pycocotools.mask as mask_util
26 |
27 | from util.misc import all_gather
28 |
29 |
30 | class CocoEvaluator(object):
31 | def __init__(self, coco_gt, iou_types):
32 | assert isinstance(iou_types, (list, tuple))
33 | coco_gt = copy.deepcopy(coco_gt)
34 | self.coco_gt = coco_gt
35 |
36 | self.iou_types = iou_types
37 | self.coco_eval = {}
38 | for iou_type in iou_types:
39 | self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
40 |
41 | self.img_ids = []
42 | self.eval_imgs = {k: [] for k in iou_types}
43 |
44 | def update(self, predictions):
45 | img_ids = list(np.unique(list(predictions.keys())))
46 | self.img_ids.extend(img_ids)
47 |
48 | for iou_type in self.iou_types:
49 | results = self.prepare(predictions, iou_type)
50 |
51 | # suppress pycocotools prints
52 | with open(os.devnull, 'w') as devnull:
53 | with contextlib.redirect_stdout(devnull):
54 | coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
55 | coco_eval = self.coco_eval[iou_type]
56 |
57 | coco_eval.cocoDt = coco_dt
58 | coco_eval.params.imgIds = list(img_ids)
59 | img_ids, eval_imgs = evaluate(coco_eval)
60 |
61 | self.eval_imgs[iou_type].append(eval_imgs)
62 |
63 | def synchronize_between_processes(self):
64 | for iou_type in self.iou_types:
65 | self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
66 | create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
67 |
68 | def accumulate(self):
69 | for coco_eval in self.coco_eval.values():
70 | coco_eval.accumulate()
71 |
72 | def summarize(self):
73 | for iou_type, coco_eval in self.coco_eval.items():
74 | print("IoU metric: {}".format(iou_type))
75 | coco_eval.summarize()
76 |
77 | def prepare(self, predictions, iou_type):
78 | if iou_type == "bbox":
79 | return self.prepare_for_coco_detection(predictions)
80 | elif iou_type == "segm":
81 | return self.prepare_for_coco_segmentation(predictions)
82 | elif iou_type == "keypoints":
83 | return self.prepare_for_coco_keypoint(predictions)
84 | else:
85 | raise ValueError("Unknown iou type {}".format(iou_type))
86 |
87 | def prepare_for_coco_detection(self, predictions):
88 | coco_results = []
89 | for original_id, prediction in predictions.items():
90 | if len(prediction) == 0:
91 | continue
92 |
93 | boxes = prediction["boxes"]
94 | boxes = convert_to_xywh(boxes).tolist()
95 | scores = prediction["scores"].tolist()
96 | labels = prediction["labels"].tolist()
97 |
98 | coco_results.extend(
99 | [
100 | {
101 | "image_id": original_id,
102 | "category_id": labels[k],
103 | "bbox": box,
104 | "score": scores[k],
105 | }
106 | for k, box in enumerate(boxes)
107 | ]
108 | )
109 | return coco_results
110 |
111 | def prepare_for_coco_segmentation(self, predictions):
112 | coco_results = []
113 | for original_id, prediction in predictions.items():
114 | if len(prediction) == 0:
115 | continue
116 |
117 | scores = prediction["scores"]
118 | labels = prediction["labels"]
119 | masks = prediction["masks"]
120 |
121 | masks = masks > 0.5
122 |
123 | scores = prediction["scores"].tolist()
124 | labels = prediction["labels"].tolist()
125 |
126 | rles = [
127 | mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
128 | for mask in masks
129 | ]
130 | for rle in rles:
131 | rle["counts"] = rle["counts"].decode("utf-8")
132 |
133 | coco_results.extend(
134 | [
135 | {
136 | "image_id": original_id,
137 | "category_id": labels[k],
138 | "segmentation": rle,
139 | "score": scores[k],
140 | }
141 | for k, rle in enumerate(rles)
142 | ]
143 | )
144 | return coco_results
145 |
146 | def prepare_for_coco_keypoint(self, predictions):
147 | coco_results = []
148 | for original_id, prediction in predictions.items():
149 | if len(prediction) == 0:
150 | continue
151 |
152 | boxes = prediction["boxes"]
153 | boxes = convert_to_xywh(boxes).tolist()
154 | scores = prediction["scores"].tolist()
155 | labels = prediction["labels"].tolist()
156 | keypoints = prediction["keypoints"]
157 | keypoints = keypoints.flatten(start_dim=1).tolist()
158 |
159 | coco_results.extend(
160 | [
161 | {
162 | "image_id": original_id,
163 | "category_id": labels[k],
164 | 'keypoints': keypoint,
165 | "score": scores[k],
166 | }
167 | for k, keypoint in enumerate(keypoints)
168 | ]
169 | )
170 | return coco_results
171 |
172 |
173 | def convert_to_xywh(boxes):
174 | xmin, ymin, xmax, ymax = boxes.unbind(1)
175 | return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
176 |
177 |
178 | def merge(img_ids, eval_imgs):
179 | all_img_ids = all_gather(img_ids)
180 | all_eval_imgs = all_gather(eval_imgs)
181 |
182 | merged_img_ids = []
183 | for p in all_img_ids:
184 | merged_img_ids.extend(p)
185 |
186 | merged_eval_imgs = []
187 | for p in all_eval_imgs:
188 | merged_eval_imgs.append(p)
189 |
190 | merged_img_ids = np.array(merged_img_ids)
191 | merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
192 |
193 | # keep only unique (and in sorted order) images
194 | merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
195 | merged_eval_imgs = merged_eval_imgs[..., idx]
196 |
197 | return merged_img_ids, merged_eval_imgs
198 |
199 |
200 | def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
201 | img_ids, eval_imgs = merge(img_ids, eval_imgs)
202 | img_ids = list(img_ids)
203 | eval_imgs = list(eval_imgs.flatten())
204 |
205 | coco_eval.evalImgs = eval_imgs
206 | coco_eval.params.imgIds = img_ids
207 | coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
208 |
209 |
210 | #################################################################
211 | # From pycocotools, just removed the prints and fixed
212 | # a Python3 bug about unicode not defined
213 | #################################################################
214 |
215 |
216 | def evaluate(self):
217 | '''
218 | Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
219 | :return: None
220 | '''
221 | # tic = time.time()
222 | # print('Running per image evaluation...')
223 | p = self.params
224 | # add backward compatibility if useSegm is specified in params
225 | if p.useSegm is not None:
226 | p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
227 | print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
228 | # print('Evaluate annotation type *{}*'.format(p.iouType))
229 | p.imgIds = list(np.unique(p.imgIds))
230 | if p.useCats:
231 | p.catIds = list(np.unique(p.catIds))
232 | p.maxDets = sorted(p.maxDets)
233 | self.params = p
234 |
235 | self._prepare()
236 | # loop through images, area range, max detection number
237 | catIds = p.catIds if p.useCats else [-1]
238 |
239 | if p.iouType == 'segm' or p.iouType == 'bbox':
240 | computeIoU = self.computeIoU
241 | elif p.iouType == 'keypoints':
242 | computeIoU = self.computeOks
243 | self.ious = {
244 | (imgId, catId): computeIoU(imgId, catId)
245 | for imgId in p.imgIds
246 | for catId in catIds}
247 |
248 | evaluateImg = self.evaluateImg
249 | maxDet = p.maxDets[-1]
250 | evalImgs = [
251 | evaluateImg(imgId, catId, areaRng, maxDet)
252 | for catId in catIds
253 | for areaRng in p.areaRng
254 | for imgId in p.imgIds
255 | ]
256 | # this is NOT in the pycocotools code, but could be done outside
257 | evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
258 | self._paramsEval = copy.deepcopy(self.params)
259 | # toc = time.time()
260 | # print('DONE (t={:0.2f}s).'.format(toc-tic))
261 | return p.imgIds, evalImgs
262 |
263 | #################################################################
264 | # end of straight copy from pycocotools, just removing the prints
265 | #################################################################
266 |
--------------------------------------------------------------------------------
/datasets/coco_panoptic.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | import json
11 | from pathlib import Path
12 |
13 | import numpy as np
14 | import torch
15 | from PIL import Image
16 |
17 | from panopticapi.utils import rgb2id
18 | from util.box_ops import masks_to_boxes
19 |
20 | from .coco import make_coco_transforms
21 |
22 |
23 | class CocoPanoptic:
24 | def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
25 | with open(ann_file, 'r') as f:
26 | self.coco = json.load(f)
27 |
28 | # sort 'images' field so that they are aligned with 'annotations'
29 | # i.e., in alphabetical order
30 | self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id'])
31 | # sanity check
32 | if "annotations" in self.coco:
33 | for img, ann in zip(self.coco['images'], self.coco['annotations']):
34 | assert img['file_name'][:-4] == ann['file_name'][:-4]
35 |
36 | self.img_folder = img_folder
37 | self.ann_folder = ann_folder
38 | self.ann_file = ann_file
39 | self.transforms = transforms
40 | self.return_masks = return_masks
41 |
42 | def __getitem__(self, idx):
43 | ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx]
44 | img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg')
45 | ann_path = Path(self.ann_folder) / ann_info['file_name']
46 |
47 | img = Image.open(img_path).convert('RGB')
48 | w, h = img.size
49 | if "segments_info" in ann_info:
50 | masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
51 | masks = rgb2id(masks)
52 |
53 | ids = np.array([ann['id'] for ann in ann_info['segments_info']])
54 | masks = masks == ids[:, None, None]
55 |
56 | masks = torch.as_tensor(masks, dtype=torch.uint8)
57 | labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64)
58 |
59 | target = {}
60 | target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
61 | if self.return_masks:
62 | target['masks'] = masks
63 | target['labels'] = labels
64 |
65 | target["boxes"] = masks_to_boxes(masks)
66 |
67 | target['size'] = torch.as_tensor([int(h), int(w)])
68 | target['orig_size'] = torch.as_tensor([int(h), int(w)])
69 | if "segments_info" in ann_info:
70 | for name in ['iscrowd', 'area']:
71 | target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
72 |
73 | if self.transforms is not None:
74 | img, target = self.transforms(img, target)
75 |
76 | return img, target
77 |
78 | def __len__(self):
79 | return len(self.coco['images'])
80 |
81 | def get_height_and_width(self, idx):
82 | img_info = self.coco['images'][idx]
83 | height = img_info['height']
84 | width = img_info['width']
85 | return height, width
86 |
87 |
88 | def build(image_set, args):
89 | img_folder_root = Path(args.coco_path)
90 | ann_folder_root = Path(args.coco_panoptic_path)
91 | assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
92 | assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
93 | mode = 'panoptic'
94 | PATHS = {
95 | "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
96 | "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
97 | }
98 |
99 | img_folder, ann_file = PATHS[image_set]
100 | img_folder_path = img_folder_root / img_folder
101 | ann_folder = ann_folder_root / f'{mode}_{img_folder}'
102 | ann_file = ann_folder_root / ann_file
103 |
104 | dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
105 | transforms=make_coco_transforms(image_set), return_masks=args.masks)
106 |
107 | return dataset
108 |
--------------------------------------------------------------------------------
/datasets/coco_video_parser.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 |
3 | import numpy as np
4 | from pycocotools.coco import COCO, _isArrayLike
5 | import random
6 | class CocoVID(COCO):
7 | """Inherit official COCO class in order to parse the annotations of bbox-
8 | related video tasks.
9 | Args:
10 | annotation_file (str): location of annotation file. Defaults to None.
11 | load_img_as_vid (bool): If True, convert image data to video data,
12 | which means each image is converted to a video. Defaults to False.
13 | """
14 |
15 | def __init__(self, annotation_file=None, load_img_as_vid=False):
16 | assert annotation_file, 'Annotation file must be provided.'
17 | self.load_img_as_vid = load_img_as_vid
18 | super(CocoVID, self).__init__(annotation_file=annotation_file)
19 |
20 | def convert_img_to_vid(self, dataset):
21 | """Convert image data to video data."""
22 | if 'images' in self.dataset:
23 | videos = []
24 | for i, img in enumerate(self.dataset['images']):
25 | videos.append(dict(id=img['id'], name=img['file_name']))
26 | img['video_id'] = img['id']
27 | img['frame_id'] = 0
28 | dataset['videos'] = videos
29 |
30 | if 'annotations' in self.dataset:
31 | for i, ann in enumerate(self.dataset['annotations']):
32 | ann['video_id'] = ann['image_id']
33 | ann['instance_id'] = ann['id']
34 | return dataset
35 |
36 | def createIndex(self):
37 | """Create index."""
38 | print('creating index...')
39 | anns, cats, imgs, vids = {}, {}, {}, {}
40 | (imgToAnns, catToImgs, vidToImgs, vidToInstances,
41 | instancesToImgs) = defaultdict(list), defaultdict(list), defaultdict(
42 | list), defaultdict(list), defaultdict(list)
43 |
44 | if 'videos' not in self.dataset and self.load_img_as_vid:
45 | self.dataset = self.convert_img_to_vid(self.dataset)
46 |
47 | if 'videos' in self.dataset:
48 | for video in self.dataset['videos']:
49 | vids[video['id']] = video
50 |
51 | if 'annotations' in self.dataset:
52 | for ann in self.dataset['annotations']:
53 | imgToAnns[ann['image_id']].append(ann)
54 | anns[ann['id']] = ann
55 | if 'instance_id' in ann:
56 | instancesToImgs[ann['instance_id']].append(ann['image_id'])
57 | if 'video_id' in ann and \
58 | ann['instance_id'] not in \
59 | vidToInstances[ann['video_id']]:
60 | vidToInstances[ann['video_id']].append(
61 | ann['instance_id'])
62 |
63 | if 'images' in self.dataset:
64 | for img in self.dataset['images']:
65 | vidToImgs[img['video_id']].append(img)
66 | imgs[img['id']] = img
67 |
68 | if 'categories' in self.dataset:
69 | for cat in self.dataset['categories']:
70 | cats[cat['id']] = cat
71 |
72 | if 'annotations' in self.dataset and 'categories' in self.dataset:
73 | for ann in self.dataset['annotations']:
74 | catToImgs[ann['category_id']].append(ann['image_id'])
75 |
76 | print('index created!')
77 |
78 | self.anns = anns
79 | self.imgToAnns = imgToAnns
80 | self.catToImgs = catToImgs
81 | self.imgs = imgs
82 | self.cats = cats
83 | self.videos = vids
84 | self.vidToImgs = vidToImgs
85 | self.vidToInstances = vidToInstances
86 | self.instancesToImgs = instancesToImgs
87 |
88 | def get_vid_ids(self, vidIds=[]):
89 | """Get video ids that satisfy given filter conditions.
90 | Default return all video ids.
91 | Args:
92 | vidIds (list[int]): The given video ids. Defaults to [].
93 | Returns:
94 | list[int]: Video ids.
95 | """
96 | vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]
97 |
98 | if len(vidIds) == 0:
99 | ids = self.videos.keys()
100 | else:
101 | ids = set(vidIds)
102 |
103 | return list(ids)
104 |
105 | def get_img_ids_from_vid(self, vidId):
106 | """Get image ids from given video id.
107 | Args:
108 | vidId (int): The given video id.
109 | Returns:
110 | list[int]: Image ids of given video id.
111 | """
112 | img_infos = self.vidToImgs[vidId]
113 | ids = list(np.zeros([len(img_infos)], dtype=np.int))
114 |
115 | for i, img_info in enumerate(img_infos):
116 | ids[i] = img_info["id"]
117 | # for img_info in img_infos:
118 | # ids[img_info['frame_id']] = img_info['id']
119 |
120 | return ids
121 |
122 | def get_ins_ids_from_vid(self, vidId):
123 | """Get instance ids from given video id.
124 | Args:
125 | vidId (int): The given video id.
126 | Returns:
127 | list[int]: Instance ids of given video id.
128 | """
129 | return self.vidToInstances[vidId]
130 |
131 | def get_img_ids_from_ins_id(self, insId):
132 | """Get image ids from given instance id.
133 | Args:
134 | insId (int): The given instance id.
135 | Returns:
136 | list[int]: Image ids of given instance id.
137 | """
138 | return self.instancesToImgs[insId]
139 |
140 | def load_vids(self, ids=[]):
141 | """Get video information of given video ids.
142 | Default return all videos information.
143 | Args:
144 | ids (list[int]): The given video ids. Defaults to [].
145 | Returns:
146 | list[dict]: List of video information.
147 | """
148 | if _isArrayLike(ids):
149 | return [self.videos[id] for id in ids]
150 | elif type(ids) == int:
151 | return [self.videos[ids]]
--------------------------------------------------------------------------------
/datasets/data_prefetcher_multi.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 |
7 | import torch
8 |
9 | # def to_cuda(samples, targets, device):
10 | # samples = samples.to(device, non_blocking=True)
11 | # targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
12 | # return samples, targets
13 |
14 | def to_cuda(samples, ref_samples, targets, device):
15 | ref_samples = [ref_sample.to(device, non_blocking=True) for ref_sample in ref_samples]
16 | samples = samples.to(device, non_blocking=True)
17 | targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
18 | return samples, ref_samples, targets
19 |
20 | class data_prefetcher():
21 | def __init__(self, loader, device, prefetch=True):
22 | self.loader = iter(loader)
23 | self.prefetch = prefetch
24 | self.device = device
25 | if prefetch:
26 | self.stream = torch.cuda.Stream()
27 | self.preload()
28 |
29 | def preload(self):
30 | try:
31 | self.next_samples, self.next_ref_samples, self.next_targets = next(self.loader)
32 | except StopIteration:
33 | self.next_samples = None
34 | self.next_targets = None
35 | self.next_ref_samples =None
36 | return
37 | # if record_stream() doesn't work, another option is to make sure device inputs are created
38 | # on the main stream.
39 | # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
40 | # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
41 | # Need to make sure the memory allocated for next_* is not still in use by the main stream
42 | # at the time we start copying to next_*:
43 | # self.stream.wait_stream(torch.cuda.current_stream())
44 | with torch.cuda.stream(self.stream):
45 | self.next_samples, self.next_ref_samples, self.next_targets = to_cuda(self.next_samples, self.next_ref_samples, self.next_targets, self.device)
46 | # more code for the alternative if record_stream() doesn't work:
47 | # copy_ will record the use of the pinned source tensor in this side stream.
48 | # self.next_input_gpu.copy_(self.next_input, non_blocking=True)
49 | # self.next_target_gpu.copy_(self.next_target, non_blocking=True)
50 | # self.next_input = self.next_input_gpu
51 | # self.next_target = self.next_target_gpu
52 |
53 | # With Amp, it isn't necessary to manually convert data to half.
54 | # if args.fp16:
55 | # self.next_input = self.next_input.half()
56 | # else:
57 |
58 | def next(self):
59 | if self.prefetch:
60 | torch.cuda.current_stream().wait_stream(self.stream)
61 | samples = self.next_samples
62 | targets = self.next_targets
63 | if samples is not None:
64 | samples.record_stream(torch.cuda.current_stream())
65 | if targets is not None:
66 | for t in targets:
67 | for k, v in t.items():
68 | v.record_stream(torch.cuda.current_stream())
69 | self.preload()
70 | else:
71 | try:
72 | # nested tensor, list[tensor]
73 | samples, ref_samples, targets = next(self.loader)
74 | assert ref_samples is None, [type(ref_samples[0]), type(ref_samples), type(samples), len(ref_samples), "ref_samples", ref_samples]
75 | samples, ref_samples, targets = to_cuda(samples, ref_samples, targets, self.device)
76 | except StopIteration:
77 | samples = None
78 | targets = None
79 | ref_samples = None
80 | assert True, "wwww"
81 | return samples, targets
82 |
--------------------------------------------------------------------------------
/datasets/data_prefetcher_single.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 |
7 | import torch
8 |
9 | def to_cuda(samples, targets, device):
10 | samples = samples.to(device, non_blocking=True)
11 | targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
12 | return samples, targets
13 |
14 | class data_prefetcher():
15 | def __init__(self, loader, device, prefetch=True):
16 | self.loader = iter(loader)
17 | self.prefetch = prefetch
18 | self.device = device
19 | if prefetch:
20 | self.stream = torch.cuda.Stream()
21 | self.preload()
22 |
23 | def preload(self):
24 | try:
25 | self.next_samples, self.next_targets = next(self.loader)
26 | except StopIteration:
27 | self.next_samples = None
28 | self.next_targets = None
29 | return
30 | # if record_stream() doesn't work, another option is to make sure device inputs are created
31 | # on the main stream.
32 | # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
33 | # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
34 | # Need to make sure the memory allocated for next_* is not still in use by the main stream
35 | # at the time we start copying to next_*:
36 | # self.stream.wait_stream(torch.cuda.current_stream())
37 | with torch.cuda.stream(self.stream):
38 | self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)
39 | # more code for the alternative if record_stream() doesn't work:
40 | # copy_ will record the use of the pinned source tensor in this side stream.
41 | # self.next_input_gpu.copy_(self.next_input, non_blocking=True)
42 | # self.next_target_gpu.copy_(self.next_target, non_blocking=True)
43 | # self.next_input = self.next_input_gpu
44 | # self.next_target = self.next_target_gpu
45 |
46 | # With Amp, it isn't necessary to manually convert data to half.
47 | # if args.fp16:
48 | # self.next_input = self.next_input.half()
49 | # else:
50 |
51 | def next(self):
52 | if self.prefetch:
53 | torch.cuda.current_stream().wait_stream(self.stream)
54 | samples = self.next_samples
55 | targets = self.next_targets
56 | if samples is not None:
57 | samples.record_stream(torch.cuda.current_stream())
58 | if targets is not None:
59 | for t in targets:
60 | for k, v in t.items():
61 | v.record_stream(torch.cuda.current_stream())
62 | self.preload()
63 | else:
64 | try:
65 | samples, targets = next(self.loader)
66 | samples, targets = to_cuda(samples, targets, self.device)
67 | except StopIteration:
68 | samples = None
69 | targets = None
70 | return samples, targets
71 |
--------------------------------------------------------------------------------
/datasets/panoptic_eval.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | import json
11 | import os
12 |
13 | import util.misc as utils
14 |
15 | try:
16 | from panopticapi.evaluation import pq_compute
17 | except ImportError:
18 | pass
19 |
20 |
21 | class PanopticEvaluator(object):
22 | def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
23 | self.gt_json = ann_file
24 | self.gt_folder = ann_folder
25 | if utils.is_main_process():
26 | if not os.path.exists(output_dir):
27 | os.mkdir(output_dir)
28 | self.output_dir = output_dir
29 | self.predictions = []
30 |
31 | def update(self, predictions):
32 | for p in predictions:
33 | with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
34 | f.write(p.pop("png_string"))
35 |
36 | self.predictions += predictions
37 |
38 | def synchronize_between_processes(self):
39 | all_predictions = utils.all_gather(self.predictions)
40 | merged_predictions = []
41 | for p in all_predictions:
42 | merged_predictions += p
43 | self.predictions = merged_predictions
44 |
45 | def summarize(self):
46 | if utils.is_main_process():
47 | json_data = {"annotations": self.predictions}
48 | predictions_json = os.path.join(self.output_dir, "predictions.json")
49 | with open(predictions_json, "w") as f:
50 | f.write(json.dumps(json_data))
51 | return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
52 | return None
53 |
--------------------------------------------------------------------------------
/datasets/parsers/__init__.py:
--------------------------------------------------------------------------------
1 | from .coco_video_parser import CocoVID
2 |
3 | __all__ = ['CocoVID']
4 |
--------------------------------------------------------------------------------
/datasets/parsers/coco_video_parser.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 |
3 | import numpy as np
4 | from pycocotools.coco import COCO, _isArrayLike
5 |
6 |
7 | class CocoVID(COCO):
8 | """Inherit official COCO class in order to parse the annotations of bbox-
9 | related video tasks.
10 |
11 | Args:
12 | annotation_file (str): location of annotation file. Defaults to None.
13 | load_img_as_vid (bool): If True, convert image data to video data,
14 | which means each image is converted to a video. Defaults to False.
15 | """
16 |
17 | def __init__(self, annotation_file=None, load_img_as_vid=False):
18 | assert annotation_file, 'Annotation file must be provided.'
19 | self.load_img_as_vid = load_img_as_vid
20 | super(CocoVID, self).__init__(annotation_file=annotation_file)
21 |
22 | def convert_img_to_vid(self, dataset):
23 | """Convert image data to video data."""
24 | if 'images' in self.dataset:
25 | videos = []
26 | for i, img in enumerate(self.dataset['images']):
27 | videos.append(dict(id=img['id'], name=img['file_name']))
28 | img['video_id'] = img['id']
29 | img['frame_id'] = 0
30 | dataset['videos'] = videos
31 |
32 | if 'annotations' in self.dataset:
33 | for i, ann in enumerate(self.dataset['annotations']):
34 | ann['video_id'] = ann['image_id']
35 | ann['instance_id'] = ann['id']
36 | return dataset
37 |
38 | def createIndex(self):
39 | """Create index."""
40 | print('creating index...')
41 | anns, cats, imgs, vids = {}, {}, {}, {}
42 | (imgToAnns, catToImgs, vidToImgs, vidToInstances,
43 | instancesToImgs) = defaultdict(list), defaultdict(list), defaultdict(
44 | list), defaultdict(list), defaultdict(list)
45 |
46 | if 'videos' not in self.dataset and self.load_img_as_vid:
47 | self.dataset = self.convert_img_to_vid(self.dataset)
48 |
49 | if 'videos' in self.dataset:
50 | for video in self.dataset['videos']:
51 | vids[video['id']] = video
52 |
53 | if 'annotations' in self.dataset:
54 | for ann in self.dataset['annotations']:
55 | imgToAnns[ann['image_id']].append(ann)
56 | anns[ann['id']] = ann
57 | if 'instance_id' in ann:
58 | instancesToImgs[ann['instance_id']].append(ann['image_id'])
59 | if 'video_id' in ann and \
60 | ann['instance_id'] not in \
61 | vidToInstances[ann['video_id']]:
62 | vidToInstances[ann['video_id']].append(
63 | ann['instance_id'])
64 |
65 | if 'images' in self.dataset:
66 | for img in self.dataset['images']:
67 | vidToImgs[img['video_id']].append(img)
68 | imgs[img['id']] = img
69 |
70 | if 'categories' in self.dataset:
71 | for cat in self.dataset['categories']:
72 | cats[cat['id']] = cat
73 |
74 | if 'annotations' in self.dataset and 'categories' in self.dataset:
75 | for ann in self.dataset['annotations']:
76 | catToImgs[ann['category_id']].append(ann['image_id'])
77 |
78 | print('index created!')
79 |
80 | self.anns = anns
81 | self.imgToAnns = imgToAnns
82 | self.catToImgs = catToImgs
83 | self.imgs = imgs
84 | self.cats = cats
85 | self.videos = vids
86 | self.vidToImgs = vidToImgs
87 | self.vidToInstances = vidToInstances
88 | self.instancesToImgs = instancesToImgs
89 |
90 | def get_vid_ids(self, vidIds=[]):
91 | """Get video ids that satisfy given filter conditions.
92 |
93 | Default return all video ids.
94 |
95 | Args:
96 | vidIds (list[int]): The given video ids. Defaults to [].
97 |
98 | Returns:
99 | list[int]: Video ids.
100 | """
101 | vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]
102 |
103 | if len(vidIds) == 0:
104 | ids = self.videos.keys()
105 | else:
106 | ids = set(vidIds)
107 |
108 | return list(ids)
109 |
110 | def get_img_ids_from_vid(self, vidId):
111 | """Get image ids from given video id.
112 |
113 | Args:
114 | vidId (int): The given video id.
115 |
116 | Returns:
117 | list[int]: Image ids of given video id.
118 | """
119 | img_infos = self.vidToImgs[vidId]
120 | ids = list(np.zeros([len(img_infos)], dtype=np.int))
121 | for img_info in img_infos:
122 | ids[img_info['frame_id']] = img_info['id']
123 | return ids
124 |
125 | def get_ins_ids_from_vid(self, vidId):
126 | """Get instance ids from given video id.
127 |
128 | Args:
129 | vidId (int): The given video id.
130 |
131 | Returns:
132 | list[int]: Instance ids of given video id.
133 | """
134 | return self.vidToInstances[vidId]
135 |
136 | def get_img_ids_from_ins_id(self, insId):
137 | """Get image ids from given instance id.
138 |
139 | Args:
140 | insId (int): The given instance id.
141 |
142 | Returns:
143 | list[int]: Image ids of given instance id.
144 | """
145 | return self.instancesToImgs[insId]
146 |
147 | def load_vids(self, ids=[]):
148 | """Get video information of given video ids.
149 |
150 | Default return all videos information.
151 |
152 | Args:
153 | ids (list[int]): The given video ids. Defaults to [].
154 |
155 | Returns:
156 | list[dict]: List of video information.
157 | """
158 | if _isArrayLike(ids):
159 | return [self.videos[id] for id in ids]
160 | elif type(ids) == int:
161 | return [self.videos[ids]]
162 |
--------------------------------------------------------------------------------
/datasets/samplers.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from codes in torch.utils.data.distributed
7 | # ------------------------------------------------------------------------
8 |
9 | import os
10 | import math
11 | import torch
12 | import torch.distributed as dist
13 | from torch.utils.data.sampler import Sampler
14 |
15 |
16 | class DistributedSampler(Sampler):
17 | """Sampler that restricts data loading to a subset of the dataset.
18 | It is especially useful in conjunction with
19 | :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
20 | process can pass a DistributedSampler instance as a DataLoader sampler,
21 | and load a subset of the original dataset that is exclusive to it.
22 | .. note::
23 | Dataset is assumed to be of constant size.
24 | Arguments:
25 | dataset: Dataset used for sampling.
26 | num_replicas (optional): Number of processes participating in
27 | distributed training.
28 | rank (optional): Rank of the current process within num_replicas.
29 | """
30 |
31 | def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
32 | if num_replicas is None:
33 | if not dist.is_available():
34 | raise RuntimeError("Requires distributed package to be available")
35 | num_replicas = dist.get_world_size()
36 | if rank is None:
37 | if not dist.is_available():
38 | raise RuntimeError("Requires distributed package to be available")
39 | rank = dist.get_rank()
40 | self.dataset = dataset
41 | self.num_replicas = num_replicas
42 | self.rank = rank
43 | self.epoch = 0
44 | self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
45 | self.total_size = self.num_samples * self.num_replicas
46 | self.shuffle = shuffle
47 |
48 | def __iter__(self):
49 | if self.shuffle:
50 | # deterministically shuffle based on epoch
51 | g = torch.Generator()
52 | g.manual_seed(self.epoch)
53 | indices = torch.randperm(len(self.dataset), generator=g).tolist()
54 | else:
55 | indices = torch.arange(len(self.dataset)).tolist()
56 |
57 | # add extra samples to make it evenly divisible
58 | indices += indices[: (self.total_size - len(indices))]
59 | assert len(indices) == self.total_size
60 |
61 | # subsample
62 | offset = self.num_samples * self.rank
63 | indices = indices[offset : offset + self.num_samples]
64 | assert len(indices) == self.num_samples
65 |
66 | return iter(indices)
67 |
68 | def __len__(self):
69 | return self.num_samples
70 |
71 | def set_epoch(self, epoch):
72 | self.epoch = epoch
73 |
74 |
75 | class NodeDistributedSampler(Sampler):
76 | """Sampler that restricts data loading to a subset of the dataset.
77 | It is especially useful in conjunction with
78 | :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
79 | process can pass a DistributedSampler instance as a DataLoader sampler,
80 | and load a subset of the original dataset that is exclusive to it.
81 | .. note::
82 | Dataset is assumed to be of constant size.
83 | Arguments:
84 | dataset: Dataset used for sampling.
85 | num_replicas (optional): Number of processes participating in
86 | distributed training.
87 | rank (optional): Rank of the current process within num_replicas.
88 | """
89 |
90 | def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
91 | if num_replicas is None:
92 | if not dist.is_available():
93 | raise RuntimeError("Requires distributed package to be available")
94 | num_replicas = dist.get_world_size()
95 | if rank is None:
96 | if not dist.is_available():
97 | raise RuntimeError("Requires distributed package to be available")
98 | rank = dist.get_rank()
99 | if local_rank is None:
100 | local_rank = int(os.environ.get('LOCAL_RANK', 0))
101 | if local_size is None:
102 | local_size = int(os.environ.get('LOCAL_SIZE', 1))
103 | self.dataset = dataset
104 | self.shuffle = shuffle
105 | self.num_replicas = num_replicas
106 | self.num_parts = local_size
107 | self.rank = rank
108 | self.local_rank = local_rank
109 | self.epoch = 0
110 | self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
111 | self.total_size = self.num_samples * self.num_replicas
112 |
113 | self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts
114 |
115 | def __iter__(self):
116 | if self.shuffle:
117 | # deterministically shuffle based on epoch
118 | g = torch.Generator()
119 | g.manual_seed(self.epoch)
120 | indices = torch.randperm(len(self.dataset), generator=g).tolist()
121 | else:
122 | indices = torch.arange(len(self.dataset)).tolist()
123 | indices = [i for i in indices if i % self.num_parts == self.local_rank]
124 |
125 | # add extra samples to make it evenly divisible
126 | indices += indices[:(self.total_size_parts - len(indices))]
127 | assert len(indices) == self.total_size_parts
128 |
129 | # subsample
130 | indices = indices[self.rank // self.num_parts:self.total_size_parts:self.num_replicas // self.num_parts]
131 | assert len(indices) == self.num_samples
132 |
133 | return iter(indices)
134 |
135 | def __len__(self):
136 | return self.num_samples
137 |
138 | def set_epoch(self, epoch):
139 | self.epoch = epoch
140 |
--------------------------------------------------------------------------------
/datasets/torchvision_datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 |
7 | from .coco import CocoDetection
8 |
--------------------------------------------------------------------------------
/datasets/torchvision_datasets/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/torchvision_datasets/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/torchvision_datasets/__pycache__/coco.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/datasets/torchvision_datasets/__pycache__/coco.cpython-37.pyc
--------------------------------------------------------------------------------
/datasets/torchvision_datasets/coco.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from torchvision
7 | # ------------------------------------------------------------------------
8 |
9 | """
10 | Copy-Paste from torchvision, but add utility of caching images on memory
11 | """
12 | from torchvision.datasets.vision import VisionDataset
13 | from PIL import Image
14 | import os
15 | import os.path
16 | import tqdm
17 | from io import BytesIO
18 |
19 |
20 | class CocoDetection(VisionDataset):
21 | """`MS Coco Detection `_ Dataset.
22 | Args:
23 | root (string): Root directory where images are downloaded to.
24 | annFile (string): Path to json annotation file.
25 | transform (callable, optional): A function/transform that takes in an PIL image
26 | and returns a transformed version. E.g, ``transforms.ToTensor``
27 | target_transform (callable, optional): A function/transform that takes in the
28 | target and transforms it.
29 | transforms (callable, optional): A function/transform that takes input sample and its target as entry
30 | and returns a transformed version.
31 | """
32 |
33 | def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None,
34 | cache_mode=False, local_rank=0, local_size=1):
35 | super(CocoDetection, self).__init__(root, transforms, transform, target_transform)
36 | from pycocotools.coco import COCO
37 | self.coco = COCO(annFile)
38 | self.ids = list(sorted(self.coco.imgs.keys()))
39 | self.cache_mode = cache_mode
40 | self.local_rank = local_rank
41 | self.local_size = local_size
42 | if cache_mode:
43 | self.cache = {}
44 | self.cache_images()
45 |
46 | def cache_images(self):
47 | self.cache = {}
48 | for index, img_id in zip(tqdm.trange(len(self.ids)), self.ids):
49 | if index % self.local_size != self.local_rank:
50 | continue
51 | path = self.coco.loadImgs(img_id)[0]['file_name']
52 | with open(os.path.join(self.root, path), 'rb') as f:
53 | self.cache[path] = f.read()
54 |
55 | def get_image(self, path):
56 | if self.cache_mode:
57 | if path not in self.cache.keys():
58 | with open(os.path.join(self.root, path), 'rb') as f:
59 | self.cache[path] = f.read()
60 | return Image.open(BytesIO(self.cache[path])).convert('RGB')
61 | return Image.open(os.path.join(self.root, path)).convert('RGB')
62 |
63 | def __getitem__(self, index):
64 | """
65 | Args:
66 | index (int): Index
67 | Returns:
68 | tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
69 | """
70 | coco = self.coco
71 | img_id = self.ids[index]
72 | ann_ids = coco.getAnnIds(imgIds=img_id)
73 | target = coco.loadAnns(ann_ids)
74 |
75 | path = coco.loadImgs(img_id)[0]['file_name']
76 |
77 | img = self.get_image(path)
78 | if self.transforms is not None:
79 | img, target = self.transforms(img, target)
80 |
81 | return img, target
82 |
83 | def __len__(self):
84 | return len(self.ids)
85 |
--------------------------------------------------------------------------------
/datasets/transforms_single.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Transforms and data augmentation for both image + bbox.
12 | """
13 | import random
14 |
15 | import PIL
16 | import torch
17 | import torchvision.transforms as T
18 | import torchvision.transforms.functional as F
19 |
20 | from util.box_ops import box_xyxy_to_cxcywh
21 | from util.misc import interpolate
22 |
23 |
24 | def crop(image, target, region):
25 | cropped_image = F.crop(image, *region)
26 |
27 | target = target.copy()
28 | i, j, h, w = region
29 |
30 | # should we do something wrt the original size?
31 | target["size"] = torch.tensor([h, w])
32 |
33 | fields = ["labels", "area", "iscrowd"]
34 |
35 | if "boxes" in target:
36 | boxes = target["boxes"]
37 | max_size = torch.as_tensor([w, h], dtype=torch.float32)
38 | cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
39 | cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
40 | cropped_boxes = cropped_boxes.clamp(min=0)
41 | area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
42 | target["boxes"] = cropped_boxes.reshape(-1, 4)
43 | target["area"] = area
44 | fields.append("boxes")
45 |
46 | if "masks" in target:
47 | # FIXME should we update the area here if there are no boxes?
48 | target['masks'] = target['masks'][:, i:i + h, j:j + w]
49 | fields.append("masks")
50 |
51 | # remove elements for which the boxes or masks that have zero area
52 | if "boxes" in target or "masks" in target:
53 | # favor boxes selection when defining which elements to keep
54 | # this is compatible with previous implementation
55 | if "boxes" in target:
56 | cropped_boxes = target['boxes'].reshape(-1, 2, 2)
57 | keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
58 | else:
59 | keep = target['masks'].flatten(1).any(1)
60 |
61 | for field in fields:
62 | target[field] = target[field][keep]
63 |
64 | return cropped_image, target
65 |
66 |
67 | def hflip(image, target):
68 | flipped_image = F.hflip(image)
69 |
70 | w, h = image.size
71 |
72 | target = target.copy()
73 | if "boxes" in target:
74 | boxes = target["boxes"]
75 | boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
76 | target["boxes"] = boxes
77 |
78 | if "masks" in target:
79 | target['masks'] = target['masks'].flip(-1)
80 |
81 | return flipped_image, target
82 |
83 |
84 | def resize(image, target, size, max_size=None):
85 | # size can be min_size (scalar) or (w, h) tuple
86 |
87 | def get_size_with_aspect_ratio(image_size, size, max_size=None):
88 | w, h = image_size
89 | if max_size is not None:
90 | min_original_size = float(min((w, h)))
91 | max_original_size = float(max((w, h)))
92 | if max_original_size / min_original_size * size > max_size:
93 | size = int(round(max_size * min_original_size / max_original_size))
94 |
95 | if (w <= h and w == size) or (h <= w and h == size):
96 | return (h, w)
97 |
98 | if w < h:
99 | ow = size
100 | oh = int(size * h / w)
101 | else:
102 | oh = size
103 | ow = int(size * w / h)
104 |
105 | return (oh, ow)
106 |
107 | def get_size(image_size, size, max_size=None):
108 | if isinstance(size, (list, tuple)):
109 | return size[::-1]
110 | else:
111 | return get_size_with_aspect_ratio(image_size, size, max_size)
112 |
113 | size = get_size(image.size, size, max_size)
114 | rescaled_image = F.resize(image, size)
115 |
116 | if target is None:
117 | return rescaled_image, None
118 |
119 | ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
120 | ratio_width, ratio_height = ratios
121 |
122 | target = target.copy()
123 | if "boxes" in target:
124 | boxes = target["boxes"]
125 | scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
126 | target["boxes"] = scaled_boxes
127 |
128 | if "area" in target:
129 | area = target["area"]
130 | scaled_area = area * (ratio_width * ratio_height)
131 | target["area"] = scaled_area
132 |
133 | h, w = size
134 | target["size"] = torch.tensor([h, w])
135 |
136 | if "masks" in target:
137 | target['masks'] = interpolate(
138 | target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
139 |
140 | return rescaled_image, target
141 |
142 |
143 | def pad(image, target, padding):
144 | # assumes that we only pad on the bottom right corners
145 | padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
146 | if target is None:
147 | return padded_image, None
148 | target = target.copy()
149 | # should we do something wrt the original size?
150 | target["size"] = torch.tensor(padded_image[::-1])
151 | if "masks" in target:
152 | target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
153 | return padded_image, target
154 |
155 |
156 | class RandomCrop(object):
157 | def __init__(self, size):
158 | self.size = size
159 |
160 | def __call__(self, img, target):
161 | region = T.RandomCrop.get_params(img, self.size)
162 | return crop(img, target, region)
163 |
164 |
165 | class RandomSizeCrop(object):
166 | def __init__(self, min_size: int, max_size: int):
167 | self.min_size = min_size
168 | self.max_size = max_size
169 |
170 | def __call__(self, img: PIL.Image.Image, target: dict):
171 | w = random.randint(self.min_size, min(img.width, self.max_size))
172 | h = random.randint(self.min_size, min(img.height, self.max_size))
173 | region = T.RandomCrop.get_params(img, [h, w])
174 | return crop(img, target, region)
175 |
176 |
177 | class CenterCrop(object):
178 | def __init__(self, size):
179 | self.size = size
180 |
181 | def __call__(self, img, target):
182 | image_width, image_height = img.size
183 | crop_height, crop_width = self.size
184 | crop_top = int(round((image_height - crop_height) / 2.))
185 | crop_left = int(round((image_width - crop_width) / 2.))
186 | return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
187 |
188 |
189 | class RandomHorizontalFlip(object):
190 | def __init__(self, p=0.5):
191 | self.p = p
192 |
193 | def __call__(self, img, target):
194 | if random.random() < self.p:
195 | return hflip(img, target)
196 | return img, target
197 |
198 |
199 | class RandomResize(object):
200 | def __init__(self, sizes, max_size=None):
201 | assert isinstance(sizes, (list, tuple))
202 | self.sizes = sizes
203 | self.max_size = max_size
204 |
205 | def __call__(self, img, target=None):
206 | size = random.choice(self.sizes)
207 | return resize(img, target, size, self.max_size)
208 |
209 |
210 | class RandomPad(object):
211 | def __init__(self, max_pad):
212 | self.max_pad = max_pad
213 |
214 | def __call__(self, img, target):
215 | pad_x = random.randint(0, self.max_pad)
216 | pad_y = random.randint(0, self.max_pad)
217 | return pad(img, target, (pad_x, pad_y))
218 |
219 |
220 | class RandomSelect(object):
221 | """
222 | Randomly selects between transforms1 and transforms2,
223 | with probability p for transforms1 and (1 - p) for transforms2
224 | """
225 | def __init__(self, transforms1, transforms2, p=0.5):
226 | self.transforms1 = transforms1
227 | self.transforms2 = transforms2
228 | self.p = p
229 |
230 | def __call__(self, img, target):
231 | if random.random() < self.p:
232 | return self.transforms1(img, target)
233 | return self.transforms2(img, target)
234 |
235 |
236 | class ToTensor(object):
237 | def __call__(self, img, target):
238 | return F.to_tensor(img), target
239 |
240 |
241 | class RandomErasing(object):
242 |
243 | def __init__(self, *args, **kwargs):
244 | self.eraser = T.RandomErasing(*args, **kwargs)
245 |
246 | def __call__(self, img, target):
247 | return self.eraser(img), target
248 |
249 |
250 | class Normalize(object):
251 | def __init__(self, mean, std):
252 | self.mean = mean
253 | self.std = std
254 |
255 | def __call__(self, image, target=None):
256 | image = F.normalize(image, mean=self.mean, std=self.std)
257 | if target is None:
258 | return image, None
259 | target = target.copy()
260 | h, w = image.shape[-2:]
261 | if "boxes" in target:
262 | boxes = target["boxes"]
263 | boxes = box_xyxy_to_cxcywh(boxes)
264 | boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
265 | target["boxes"] = boxes
266 | return image, target
267 |
268 |
269 | class Compose(object):
270 | def __init__(self, transforms):
271 | self.transforms = transforms
272 |
273 | def __call__(self, image, target):
274 | for t in self.transforms:
275 | image, target = t(image, target)
276 | return image, target
277 |
278 | def __repr__(self):
279 | format_string = self.__class__.__name__ + "("
280 | for t in self.transforms:
281 | format_string += "\n"
282 | format_string += " {0}".format(t)
283 | format_string += "\n)"
284 | return format_string
285 |
--------------------------------------------------------------------------------
/datasets/vid_single.py:
--------------------------------------------------------------------------------
1 | # Modified by Lu He
2 | # ------------------------------------------------------------------------
3 | # Deformable DETR
4 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | # ------------------------------------------------------------------------
7 | # Modified from DETR (https://github.com/facebookresearch/detr)
8 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
9 | # ------------------------------------------------------------------------
10 |
11 | """
12 | COCO dataset which returns image_id for evaluation.
13 |
14 | Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
15 | """
16 | from pathlib import Path
17 |
18 | import torch
19 | import torch.utils.data
20 | from pycocotools import mask as coco_mask
21 |
22 | from .torchvision_datasets import CocoDetection as TvCocoDetection
23 | from util.misc import get_local_rank, get_local_size
24 | import datasets.transforms_single as T
25 | from torch.utils.data.dataset import ConcatDataset
26 |
27 | class CocoDetection(TvCocoDetection):
28 | def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1):
29 | super(CocoDetection, self).__init__(img_folder, ann_file,
30 | cache_mode=cache_mode, local_rank=local_rank, local_size=local_size)
31 | self._transforms = transforms
32 | self.prepare = ConvertCocoPolysToMask(return_masks)
33 |
34 | def __getitem__(self, idx):
35 | """
36 | Args:
37 | index (int): Index
38 | Returns:
39 | tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
40 | """
41 | # idx若为675834,则img_id为675835(img_id=idx+1)
42 | coco = self.coco
43 | img_id = self.ids[idx]
44 | ann_ids = coco.getAnnIds(imgIds=img_id)
45 | target = coco.loadAnns(ann_ids)
46 |
47 | path = coco.loadImgs(img_id)[0]['file_name']
48 |
49 | img = self.get_image(path)
50 |
51 | image_id = img_id
52 | target = {'image_id': image_id, 'annotations': target}
53 |
54 | img, target = self.prepare(img, target)
55 | if self._transforms is not None:
56 | img, target = self._transforms(img, target)
57 |
58 | return img, target
59 |
60 |
61 | def convert_coco_poly_to_mask(segmentations, height, width):
62 | masks = []
63 | for polygons in segmentations:
64 | rles = coco_mask.frPyObjects(polygons, height, width)
65 | mask = coco_mask.decode(rles)
66 | if len(mask.shape) < 3:
67 | mask = mask[..., None]
68 | mask = torch.as_tensor(mask, dtype=torch.uint8)
69 | mask = mask.any(dim=2)
70 | masks.append(mask)
71 | if masks:
72 | masks = torch.stack(masks, dim=0)
73 | else:
74 | masks = torch.zeros((0, height, width), dtype=torch.uint8)
75 | return masks
76 |
77 |
78 | class ConvertCocoPolysToMask(object):
79 | def __init__(self, return_masks=False):
80 | self.return_masks = return_masks
81 |
82 | def __call__(self, image, target):
83 | w, h = image.size
84 |
85 | image_id = target["image_id"]
86 | image_id = torch.tensor([image_id])
87 |
88 | anno = target["annotations"]
89 |
90 | anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
91 |
92 | boxes = [obj["bbox"] for obj in anno]
93 | # guard against no boxes via resizing
94 | boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
95 | boxes[:, 2:] += boxes[:, :2]
96 | boxes[:, 0::2].clamp_(min=0, max=w)
97 | boxes[:, 1::2].clamp_(min=0, max=h)
98 |
99 | classes = [obj["category_id"] for obj in anno]
100 | classes = torch.tensor(classes, dtype=torch.int64)
101 |
102 | if self.return_masks:
103 | segmentations = [obj["segmentation"] for obj in anno]
104 | masks = convert_coco_poly_to_mask(segmentations, h, w)
105 |
106 | keypoints = None
107 | if anno and "keypoints" in anno[0]:
108 | keypoints = [obj["keypoints"] for obj in anno]
109 | keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
110 | num_keypoints = keypoints.shape[0]
111 | if num_keypoints:
112 | keypoints = keypoints.view(num_keypoints, -1, 3)
113 |
114 | keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
115 | boxes = boxes[keep]
116 | classes = classes[keep]
117 | if self.return_masks:
118 | masks = masks[keep]
119 | if keypoints is not None:
120 | keypoints = keypoints[keep]
121 |
122 | target = {}
123 | target["boxes"] = boxes
124 | target["labels"] = classes
125 | if self.return_masks:
126 | target["masks"] = masks
127 | target["image_id"] = image_id
128 | if keypoints is not None:
129 | target["keypoints"] = keypoints
130 |
131 | # for conversion to coco api
132 | area = torch.tensor([obj["area"] for obj in anno])
133 | iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
134 | target["area"] = area[keep]
135 | target["iscrowd"] = iscrowd[keep]
136 |
137 | target["orig_size"] = torch.as_tensor([int(h), int(w)])
138 | target["size"] = torch.as_tensor([int(h), int(w)])
139 |
140 | return image, target
141 |
142 |
143 | def make_coco_transforms(image_set):
144 |
145 | normalize = T.Compose([
146 | T.ToTensor(),
147 | T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
148 | ])
149 |
150 | if image_set == 'train_vid' or image_set == "train_det" or image_set == "train_joint":
151 | return T.Compose([
152 | T.RandomHorizontalFlip(),
153 | T.RandomResize([600], max_size=1000),
154 | normalize,
155 | ])
156 |
157 | if image_set == 'val':
158 | return T.Compose([
159 | T.RandomResize([600], max_size=1000),
160 | normalize,
161 | ])
162 |
163 | raise ValueError(f'unknown {image_set}')
164 |
165 |
166 | def build(image_set, args):
167 | root = Path(args.vid_path)
168 | assert root.exists(), f'provided COCO path {root} does not exist'
169 | mode = 'instances'
170 | PATHS = {
171 | # "train_joint": [(root / "Data" / "DET", root / "annotations" / 'imagenet_det_30plus1cls_vid_train.json'), (root / "Data" / "VID", root / "annotations_10true" / 'imagenet_vid_train.json')],
172 | "train_det": [(root / "Data" / "DET", root / "annotations" / 'imagenet_det_30plus1cls_vid_train.json')],
173 | "train_vid": [(root / "Data" / "VID", root / "annotations" / 'imagenet_vid_train.json')],
174 | "train_joint": [(root / "Data" , root / "annotations" / 'imagenet_vid_train_joint_30.json')],
175 | "val": [(root / "Data" / "VID", root / "annotations" / 'imagenet_vid_val.json')],
176 | }
177 | datasets = []
178 | for (img_folder, ann_file) in PATHS[image_set]:
179 | dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks, cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
180 | datasets.append(dataset)
181 | if len(datasets) == 1:
182 | return datasets[0]
183 | return ConcatDataset(datasets)
184 |
185 |
186 |
--------------------------------------------------------------------------------
/docs/changelog.md:
--------------------------------------------------------------------------------
1 | ## Changelog
2 |
3 | **[2020.12.07]** Fix a bug of sampling offset normalization (see [this issue](https://github.com/fundamentalvision/Deformable-DETR/issues/6)) in the MSDeformAttn module. The final accuracy on COCO is slightly improved. Code and pre-trained models have been updated. This bug only occurs in this released version but not in the original implementation used in our paper.
--------------------------------------------------------------------------------
/engine_multi.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Train and eval functions used in main.py
12 | """
13 | import math
14 | import os
15 | import sys
16 | from typing import Iterable
17 |
18 | import torch
19 | import util.misc as utils
20 | from datasets.coco_eval import CocoEvaluator
21 | from datasets.panoptic_eval import PanopticEvaluator
22 | from datasets.data_prefetcher_multi import data_prefetcher
23 |
24 | def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
25 | data_loader: Iterable, optimizer: torch.optim.Optimizer,
26 | device: torch.device, epoch: int, max_norm: float = 0):
27 | model.train()
28 | criterion.train()
29 | metric_logger = utils.MetricLogger(delimiter=" ")
30 | metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
31 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
32 | metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
33 | header = 'Epoch: [{}]'.format(epoch)
34 | print_freq = 10
35 |
36 | for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
37 |
38 | samples = samples.to(device)
39 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
40 | outputs = model(samples)
41 | loss_dict = criterion(outputs, targets)
42 | weight_dict = criterion.weight_dict
43 | losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
44 |
45 | # reduce losses over all GPUs for logging purposes
46 | loss_dict_reduced = utils.reduce_dict(loss_dict)
47 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v
48 | for k, v in loss_dict_reduced.items()}
49 | loss_dict_reduced_scaled = {k: v * weight_dict[k]
50 | for k, v in loss_dict_reduced.items() if k in weight_dict}
51 | losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
52 |
53 | loss_value = losses_reduced_scaled.item()
54 |
55 | if not math.isfinite(loss_value):
56 | print("Loss is {}, stopping training".format(loss_value))
57 | print(loss_dict_reduced)
58 | sys.exit(1)
59 |
60 | optimizer.zero_grad()
61 | losses.backward()
62 | if max_norm > 0:
63 | grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
64 | else:
65 | grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
66 | optimizer.step()
67 |
68 | metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
69 | metric_logger.update(class_error=loss_dict_reduced['class_error'])
70 | metric_logger.update(lr=optimizer.param_groups[0]["lr"])
71 | metric_logger.update(grad_norm=grad_total_norm)
72 |
73 | # gather the stats from all processes
74 | metric_logger.synchronize_between_processes()
75 | print("Averaged stats:", metric_logger)
76 | return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
77 |
78 | @torch.no_grad()
79 | def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
80 | model.eval()
81 | criterion.eval()
82 |
83 | metric_logger = utils.MetricLogger(delimiter=" ")
84 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
85 | header = 'Test:'
86 |
87 | iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
88 | coco_evaluator = CocoEvaluator(base_ds, iou_types)
89 | # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
90 |
91 | panoptic_evaluator = None
92 | if 'panoptic' in postprocessors.keys():
93 | panoptic_evaluator = PanopticEvaluator(
94 | data_loader.dataset.ann_file,
95 | data_loader.dataset.ann_folder,
96 | output_dir=os.path.join(output_dir, "panoptic_eval"),
97 | )
98 |
99 | for samples, targets in metric_logger.log_every(data_loader, 10, header):
100 | samples = samples.to(device)
101 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
102 |
103 | outputs = model(samples)
104 | loss_dict = criterion(outputs, targets)
105 | weight_dict = criterion.weight_dict
106 |
107 | # reduce losses over all GPUs for logging purposes
108 | loss_dict_reduced = utils.reduce_dict(loss_dict)
109 | loss_dict_reduced_scaled = {k: v * weight_dict[k]
110 | for k, v in loss_dict_reduced.items() if k in weight_dict}
111 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v
112 | for k, v in loss_dict_reduced.items()}
113 | metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
114 | **loss_dict_reduced_scaled,
115 | **loss_dict_reduced_unscaled)
116 | metric_logger.update(class_error=loss_dict_reduced['class_error'])
117 |
118 | orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
119 | results = postprocessors['bbox'](outputs, orig_target_sizes)
120 | if 'segm' in postprocessors.keys():
121 | target_sizes = torch.stack([t["size"] for t in targets], dim=0)
122 | results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
123 | res = {target['image_id'].item(): output for target, output in zip(targets, results)}
124 | if coco_evaluator is not None:
125 | coco_evaluator.update(res)
126 |
127 | if panoptic_evaluator is not None:
128 | res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
129 | for i, target in enumerate(targets):
130 | image_id = target["image_id"].item()
131 | file_name = f"{image_id:012d}.png"
132 | res_pano[i]["image_id"] = image_id
133 | res_pano[i]["file_name"] = file_name
134 |
135 | panoptic_evaluator.update(res_pano)
136 |
137 | # gather the stats from all processes
138 | metric_logger.synchronize_between_processes()
139 | print("Averaged stats:", metric_logger)
140 | if coco_evaluator is not None:
141 | coco_evaluator.synchronize_between_processes()
142 | if panoptic_evaluator is not None:
143 | panoptic_evaluator.synchronize_between_processes()
144 |
145 | # accumulate predictions from all images
146 | if coco_evaluator is not None:
147 | coco_evaluator.accumulate()
148 | coco_evaluator.summarize()
149 | panoptic_res = None
150 | if panoptic_evaluator is not None:
151 | panoptic_res = panoptic_evaluator.summarize()
152 | stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
153 | if coco_evaluator is not None:
154 | if 'bbox' in postprocessors.keys():
155 | stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
156 | if 'segm' in postprocessors.keys():
157 | stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
158 | if panoptic_res is not None:
159 | stats['PQ_all'] = panoptic_res["All"]
160 | stats['PQ_th'] = panoptic_res["Things"]
161 | stats['PQ_st'] = panoptic_res["Stuff"]
162 | return stats, coco_evaluator
163 |
--------------------------------------------------------------------------------
/engine_single.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Train and eval functions used in main.py
12 | """
13 | import math
14 | import os
15 | import sys
16 | from typing import Iterable
17 |
18 | import torch
19 | import util.misc as utils
20 | from datasets.coco_eval import CocoEvaluator
21 | from datasets.panoptic_eval import PanopticEvaluator
22 | from datasets.data_prefetcher_single import data_prefetcher
23 |
24 | def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
25 | data_loader: Iterable, optimizer: torch.optim.Optimizer,
26 | device: torch.device, epoch: int, max_norm: float = 0):
27 | model.train()
28 | criterion.train()
29 | metric_logger = utils.MetricLogger(delimiter=" ")
30 | metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
31 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
32 | metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
33 | header = 'Epoch: [{}]'.format(epoch)
34 | print_freq = 10
35 | print("------------------------------------------------------!!!!")
36 | prefetcher = data_prefetcher(data_loader, device, prefetch=True)
37 | samples, targets = prefetcher.next()
38 |
39 | for _ in metric_logger.log_every(range(len(data_loader)), print_freq, header):
40 |
41 | outputs = model(samples)
42 | loss_dict = criterion(outputs, targets)
43 | weight_dict = criterion.weight_dict
44 | losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
45 |
46 | # reduce losses over all GPUs for logging purposes
47 | loss_dict_reduced = utils.reduce_dict(loss_dict)
48 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v
49 | for k, v in loss_dict_reduced.items()}
50 | loss_dict_reduced_scaled = {k: v * weight_dict[k]
51 | for k, v in loss_dict_reduced.items() if k in weight_dict}
52 | losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
53 |
54 | loss_value = losses_reduced_scaled.item()
55 |
56 | if not math.isfinite(loss_value):
57 | print("Loss is {}, stopping training".format(loss_value))
58 | print(loss_dict_reduced)
59 | sys.exit(1)
60 |
61 | optimizer.zero_grad()
62 | losses.backward()
63 | if max_norm > 0:
64 | grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
65 | else:
66 | grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm)
67 | optimizer.step()
68 |
69 | metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
70 | metric_logger.update(class_error=loss_dict_reduced['class_error'])
71 | metric_logger.update(lr=optimizer.param_groups[0]["lr"])
72 | metric_logger.update(grad_norm=grad_total_norm)
73 |
74 | samples, targets = prefetcher.next()
75 | # gather the stats from all processes
76 | metric_logger.synchronize_between_processes()
77 | print("Averaged stats:", metric_logger)
78 | return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
79 |
80 |
81 | @torch.no_grad()
82 | def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
83 | model.eval()
84 | criterion.eval()
85 |
86 | metric_logger = utils.MetricLogger(delimiter=" ")
87 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
88 | header = 'Test:'
89 |
90 | iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
91 | coco_evaluator = CocoEvaluator(base_ds, iou_types)
92 | # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
93 |
94 | panoptic_evaluator = None
95 | if 'panoptic' in postprocessors.keys():
96 | panoptic_evaluator = PanopticEvaluator(
97 | data_loader.dataset.ann_file,
98 | data_loader.dataset.ann_folder,
99 | output_dir=os.path.join(output_dir, "panoptic_eval"),
100 | )
101 |
102 | for samples, targets in metric_logger.log_every(data_loader, 10, header):
103 | samples = samples.to(device)
104 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
105 |
106 | outputs = model(samples)
107 | loss_dict = criterion(outputs, targets)
108 | weight_dict = criterion.weight_dict
109 |
110 | # reduce losses over all GPUs for logging purposes
111 | loss_dict_reduced = utils.reduce_dict(loss_dict)
112 | loss_dict_reduced_scaled = {k: v * weight_dict[k]
113 | for k, v in loss_dict_reduced.items() if k in weight_dict}
114 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v
115 | for k, v in loss_dict_reduced.items()}
116 | metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
117 | **loss_dict_reduced_scaled,
118 | **loss_dict_reduced_unscaled)
119 | metric_logger.update(class_error=loss_dict_reduced['class_error'])
120 |
121 | orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
122 | results = postprocessors['bbox'](outputs, orig_target_sizes)
123 | if 'segm' in postprocessors.keys():
124 | target_sizes = torch.stack([t["size"] for t in targets], dim=0)
125 | results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
126 | res = {target['image_id'].item(): output for target, output in zip(targets, results)}
127 | if coco_evaluator is not None:
128 | coco_evaluator.update(res)
129 |
130 | if panoptic_evaluator is not None:
131 | res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
132 | for i, target in enumerate(targets):
133 | image_id = target["image_id"].item()
134 | file_name = f"{image_id:012d}.png"
135 | res_pano[i]["image_id"] = image_id
136 | res_pano[i]["file_name"] = file_name
137 |
138 | panoptic_evaluator.update(res_pano)
139 |
140 | # gather the stats from all processes
141 | metric_logger.synchronize_between_processes()
142 | print("Averaged stats:", metric_logger)
143 | if coco_evaluator is not None:
144 | coco_evaluator.synchronize_between_processes()
145 | if panoptic_evaluator is not None:
146 | panoptic_evaluator.synchronize_between_processes()
147 |
148 | # accumulate predictions from all images
149 | if coco_evaluator is not None:
150 | coco_evaluator.accumulate()
151 | coco_evaluator.summarize()
152 | panoptic_res = None
153 | if panoptic_evaluator is not None:
154 | panoptic_res = panoptic_evaluator.summarize()
155 | stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
156 | if coco_evaluator is not None:
157 | if 'bbox' in postprocessors.keys():
158 | stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
159 | if 'segm' in postprocessors.keys():
160 | stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
161 | if panoptic_res is not None:
162 | stats['PQ_all'] = panoptic_res["All"]
163 | stats['PQ_th'] = panoptic_res["Things"]
164 | stats['PQ_st'] = panoptic_res["Stuff"]
165 | return stats, coco_evaluator
166 |
--------------------------------------------------------------------------------
/figs/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/figs/teaser.png
--------------------------------------------------------------------------------
/models/.deformable_transformer_mm.py.swp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/.deformable_transformer_mm.py.swp
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | from .deformable_detr_single import build as build_single
11 | from .deformable_detr_multi import build as build_multi
12 |
13 |
14 |
15 | def build_model(args):
16 | if args.dataset_file == "vid_single":
17 | return build_single(args)
18 | else:
19 | return build_multi(args)
20 |
21 |
22 |
--------------------------------------------------------------------------------
/models/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/backbone.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/backbone.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_detr.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_detr.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_detr_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_detr_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_detr_multi_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_detr_multi_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_detr_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_detr_single.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_transformer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_transformer.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_transformer_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_transformer_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_transformer_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_transformer_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/deformable_transformer_single.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/deformable_transformer_single.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/matcher.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/matcher.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/position_encoding.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/position_encoding.cpython-37.pyc
--------------------------------------------------------------------------------
/models/__pycache__/segmentation.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/__pycache__/segmentation.cpython-37.pyc
--------------------------------------------------------------------------------
/models/backbone.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Backbone modules.
12 | """
13 | from collections import OrderedDict
14 |
15 | import torch
16 | import torch.nn.functional as F
17 | import torchvision
18 | from torch import nn
19 | from torchvision.models._utils import IntermediateLayerGetter
20 | from typing import Dict, List
21 |
22 | from util.misc import NestedTensor, is_main_process
23 |
24 | from .position_encoding import build_position_encoding
25 |
26 |
27 | class FrozenBatchNorm2d(torch.nn.Module):
28 | """
29 | BatchNorm2d where the batch statistics and the affine parameters are fixed.
30 |
31 | Copy-paste from torchvision.misc.ops with added eps before rqsrt,
32 | without which any other models than torchvision.models.resnet[18,34,50,101]
33 | produce nans.
34 | """
35 |
36 | def __init__(self, n, eps=1e-5):
37 | super(FrozenBatchNorm2d, self).__init__()
38 | self.register_buffer("weight", torch.ones(n))
39 | self.register_buffer("bias", torch.zeros(n))
40 | self.register_buffer("running_mean", torch.zeros(n))
41 | self.register_buffer("running_var", torch.ones(n))
42 | self.eps = eps
43 |
44 | def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
45 | missing_keys, unexpected_keys, error_msgs):
46 | num_batches_tracked_key = prefix + 'num_batches_tracked'
47 | if num_batches_tracked_key in state_dict:
48 | del state_dict[num_batches_tracked_key]
49 |
50 | super(FrozenBatchNorm2d, self)._load_from_state_dict(
51 | state_dict, prefix, local_metadata, strict,
52 | missing_keys, unexpected_keys, error_msgs)
53 |
54 | def forward(self, x):
55 | # move reshapes to the beginning
56 | # to make it fuser-friendly
57 | w = self.weight.reshape(1, -1, 1, 1)
58 | b = self.bias.reshape(1, -1, 1, 1)
59 | rv = self.running_var.reshape(1, -1, 1, 1)
60 | rm = self.running_mean.reshape(1, -1, 1, 1)
61 | eps = self.eps
62 | scale = w * (rv + eps).rsqrt()
63 | bias = b - rm * scale
64 | return x * scale + bias
65 |
66 |
67 | class BackboneBase(nn.Module):
68 | # backbone, 是否训练backbone, 是否返回中间值
69 | def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool):
70 | super().__init__()
71 | for name, parameter in backbone.named_parameters():
72 | if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
73 | parameter.requires_grad_(False)
74 | if return_interm_layers:
75 | # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
76 | return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
77 | self.strides = [8, 16, 32]
78 | self.num_channels = [512, 1024, 2048]
79 | else:
80 | return_layers = {'layer4': "0"}
81 | self.strides = [32]
82 | self.num_channels = [2048]
83 | self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
84 |
85 | def forward(self, tensor_list: NestedTensor):
86 | # tensor list
87 | xs = self.body(tensor_list.tensors)
88 | out: Dict[str, NestedTensor] = {}
89 | for name, x in xs.items():
90 | m = tensor_list.mask
91 | assert m is not None
92 | mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
93 | out[name] = NestedTensor(x, mask)
94 | return out
95 |
96 |
97 | class Backbone(BackboneBase):
98 | """ResNet backbone with frozen BatchNorm."""
99 | def __init__(self, name: str,
100 | train_backbone: bool,
101 | return_interm_layers: bool,
102 | dilation: bool):
103 | norm_layer = FrozenBatchNorm2d
104 | backbone = getattr(torchvision.models, name)(
105 | replace_stride_with_dilation=[False, False, dilation],
106 | pretrained=is_main_process(), norm_layer=norm_layer)
107 | assert name not in ('resnet18', 'resnet34'), "number of channels are hard coded"
108 | super().__init__(backbone, train_backbone, return_interm_layers)
109 | if dilation:
110 | self.strides[-1] = self.strides[-1] // 2
111 |
112 |
113 | class Joiner(nn.Sequential):
114 | def __init__(self, backbone, position_embedding):
115 | super().__init__(backbone, position_embedding)
116 | self.strides = backbone.strides
117 | self.num_channels = backbone.num_channels
118 |
119 | def forward(self, tensor_list: NestedTensor):
120 | xs = self[0](tensor_list)
121 | out: List[NestedTensor] = []
122 | pos = []
123 | for name, x in sorted(xs.items()):
124 | out.append(x)
125 |
126 | # position encoding
127 | for x in out:
128 | pos.append(self[1](x).to(x.tensors.dtype))
129 |
130 | return out, pos
131 |
132 |
133 | def build_backbone(args):
134 | position_embedding = build_position_encoding(args)
135 | train_backbone = args.lr_backbone > 0
136 | return_interm_layers = args.masks or (args.num_feature_levels > 1 )
137 | backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
138 | model = Joiner(backbone, position_embedding)
139 | return model
140 |
--------------------------------------------------------------------------------
/models/matcher.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Modules to compute the matching cost and solve the corresponding LSAP.
12 | """
13 | import torch
14 | from scipy.optimize import linear_sum_assignment
15 | from torch import nn
16 |
17 | from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
18 |
19 |
20 | class HungarianMatcher(nn.Module):
21 | """This class computes an assignment between the targets and the predictions of the network
22 |
23 | For efficiency reasons, the targets don't include the no_object. Because of this, in general,
24 | there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
25 | while the others are un-matched (and thus treated as non-objects).
26 | """
27 |
28 | def __init__(self,
29 | cost_class: float = 1,
30 | cost_bbox: float = 1,
31 | cost_giou: float = 1):
32 | """Creates the matcher
33 |
34 | Params:
35 | cost_class: This is the relative weight of the classification error in the matching cost
36 | cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
37 | cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
38 | """
39 | super().__init__()
40 | self.cost_class = cost_class
41 | self.cost_bbox = cost_bbox
42 | self.cost_giou = cost_giou
43 | assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
44 |
45 | def forward(self, outputs, targets):
46 | """ Performs the matching
47 |
48 | Params:
49 | outputs: This is a dict that contains at least these entries:
50 | "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
51 | "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
52 |
53 | targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
54 | "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
55 | objects in the target) containing the class labels
56 | "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
57 |
58 | Returns:
59 | A list of size batch_size, containing tuples of (index_i, index_j) where:
60 | - index_i is the indices of the selected predictions (in order)
61 | - index_j is the indices of the corresponding selected targets (in order)
62 | For each batch element, it holds:
63 | len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
64 | """
65 | with torch.no_grad():
66 | bs, num_queries = outputs["pred_logits"].shape[:2]
67 |
68 | # We flatten to compute the cost matrices in a batch
69 | out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
70 | out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
71 |
72 | # Also concat the target labels and boxes
73 | tgt_ids = torch.cat([v["labels"] for v in targets])
74 | # print("tgt_ids_shape", tgt_ids.shape)
75 | tgt_bbox = torch.cat([v["boxes"] for v in targets])
76 |
77 | # Compute the classification cost.
78 | alpha = 0.25
79 | gamma = 2.0
80 | neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
81 | pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
82 | #print("pos_cost_class_shape", pos_cost_class.shape)
83 | cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
84 | #print("cost_class_shape", cost_class.shape)
85 |
86 | # Compute the L1 cost between boxes
87 | cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
88 |
89 | # Compute the giou cost betwen boxes
90 | cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),
91 | box_cxcywh_to_xyxy(tgt_bbox))
92 |
93 | # Final cost matrix
94 | C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
95 | C = C.view(bs, num_queries, -1).cpu()
96 |
97 | sizes = [len(v["boxes"]) for v in targets]
98 | #print("size", sizes)
99 | indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
100 | return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
101 |
102 |
103 | def build_matcher(args):
104 | return HungarianMatcher(cost_class=args.set_cost_class,
105 | cost_bbox=args.set_cost_bbox,
106 | cost_giou=args.set_cost_giou)
107 |
--------------------------------------------------------------------------------
/models/ops/MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/models/ops/MultiScaleDeformableAttention.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.0
2 | Name: MultiScaleDeformableAttention
3 | Version: 1.0
4 | Summary: PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention
5 | Home-page: https://github.com/fundamentalvision/Deformable-DETR
6 | Author: Weijie Su
7 | Author-email: UNKNOWN
8 | License: UNKNOWN
9 | Description: UNKNOWN
10 | Platform: UNKNOWN
11 |
--------------------------------------------------------------------------------
/models/ops/MultiScaleDeformableAttention.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | setup.py
2 | /mnt/lustre/helu/code/vod/video_object_detection/models/ops/src/vision.cpp
3 | /mnt/lustre/helu/code/vod/video_object_detection/models/ops/src/cpu/ms_deform_attn_cpu.cpp
4 | /mnt/lustre/helu/code/vod/video_object_detection/models/ops/src/cuda/ms_deform_attn_cuda.cu
5 | MultiScaleDeformableAttention.egg-info/PKG-INFO
6 | MultiScaleDeformableAttention.egg-info/SOURCES.txt
7 | MultiScaleDeformableAttention.egg-info/dependency_links.txt
8 | MultiScaleDeformableAttention.egg-info/top_level.txt
9 | functions/__init__.py
10 | functions/ms_deform_attn_func.py
11 | modules/__init__.py
12 | modules/ms_deform_attn.py
--------------------------------------------------------------------------------
/models/ops/MultiScaleDeformableAttention.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/models/ops/MultiScaleDeformableAttention.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | MultiScaleDeformableAttention
2 | functions
3 | modules
4 |
--------------------------------------------------------------------------------
/models/ops/build/lib.linux-x86_64-3.7/MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/build/lib.linux-x86_64-3.7/MultiScaleDeformableAttention.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/models/ops/build/lib.linux-x86_64-3.7/functions/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from .ms_deform_attn_func import MSDeformAttnFunction
10 |
11 |
--------------------------------------------------------------------------------
/models/ops/build/lib.linux-x86_64-3.7/functions/ms_deform_attn_func.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import torch
14 | import torch.nn.functional as F
15 | from torch.autograd import Function
16 | from torch.autograd.function import once_differentiable
17 |
18 | import MultiScaleDeformableAttention as MSDA
19 |
20 |
21 | class MSDeformAttnFunction(Function):
22 | @staticmethod
23 | def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
24 | ctx.im2col_step = im2col_step
25 | output = MSDA.ms_deform_attn_forward(
26 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
27 | ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
28 | return output
29 |
30 | @staticmethod
31 | @once_differentiable
32 | def backward(ctx, grad_output):
33 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
34 | grad_value, grad_sampling_loc, grad_attn_weight = \
35 | MSDA.ms_deform_attn_backward(
36 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
37 |
38 | return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
39 |
40 |
41 | def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
42 | # for debug and test only,
43 | # need to use cuda version instead
44 | N_, S_, M_, D_ = value.shape
45 | _, Lq_, M_, L_, P_, _ = sampling_locations.shape
46 | value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
47 | sampling_grids = 2 * sampling_locations - 1
48 | sampling_value_list = []
49 | for lid_, (H_, W_) in enumerate(value_spatial_shapes):
50 | # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
51 | value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
52 | # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
53 | sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
54 | # N_*M_, D_, Lq_, P_
55 | sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
56 | mode='bilinear', padding_mode='zeros', align_corners=False)
57 | sampling_value_list.append(sampling_value_l_)
58 | # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
59 | attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
60 | output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
61 | return output.transpose(1, 2).contiguous()
62 |
--------------------------------------------------------------------------------
/models/ops/build/lib.linux-x86_64-3.7/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from .ms_deform_attn import MSDeformAttn
10 |
--------------------------------------------------------------------------------
/models/ops/build/lib.linux-x86_64-3.7/modules/ms_deform_attn.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import warnings
14 | import math
15 |
16 | import torch
17 | from torch import nn
18 | import torch.nn.functional as F
19 | from torch.nn.init import xavier_uniform_, constant_
20 |
21 | from ..functions import MSDeformAttnFunction
22 |
23 |
24 | def _is_power_of_2(n):
25 | if (not isinstance(n, int)) or (n < 0):
26 | raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
27 | return (n & (n-1) == 0) and n != 0
28 |
29 |
30 | class MSDeformAttn(nn.Module):
31 | def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
32 | """
33 | Multi-Scale Deformable Attention Module
34 | :param d_model hidden dimension
35 | :param n_levels number of feature levels
36 | :param n_heads number of attention heads
37 | :param n_points number of sampling points per attention head per feature level
38 | """
39 | super().__init__()
40 | if d_model % n_heads != 0:
41 | raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
42 | _d_per_head = d_model // n_heads
43 | # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
44 | if not _is_power_of_2(_d_per_head):
45 | warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
46 | "which is more efficient in our CUDA implementation.")
47 |
48 | self.im2col_step = 64
49 |
50 | self.d_model = d_model
51 | self.n_levels = n_levels
52 | self.n_heads = n_heads
53 | self.n_points = n_points
54 |
55 | self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
56 | self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
57 | self.value_proj = nn.Linear(d_model, d_model)
58 | self.output_proj = nn.Linear(d_model, d_model)
59 |
60 | self._reset_parameters()
61 |
62 | def _reset_parameters(self):
63 | constant_(self.sampling_offsets.weight.data, 0.)
64 | thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
65 | grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
66 | grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
67 | for i in range(self.n_points):
68 | grid_init[:, :, i, :] *= i + 1
69 | with torch.no_grad():
70 | self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
71 | constant_(self.attention_weights.weight.data, 0.)
72 | constant_(self.attention_weights.bias.data, 0.)
73 | xavier_uniform_(self.value_proj.weight.data)
74 | constant_(self.value_proj.bias.data, 0.)
75 | xavier_uniform_(self.output_proj.weight.data)
76 | constant_(self.output_proj.bias.data, 0.)
77 |
78 | def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
79 | """
80 | :param query (N, Length_{query}, C)
81 | :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
82 | or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
83 | :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
84 | :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
85 | :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
86 | :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
87 |
88 | :return output (N, Length_{query}, C)
89 | """
90 | N, Len_q, _ = query.shape
91 | N, Len_in, _ = input_flatten.shape
92 | assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
93 |
94 | value = self.value_proj(input_flatten)
95 | if input_padding_mask is not None:
96 | value = value.masked_fill(input_padding_mask[..., None], float(0))
97 | value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
98 | sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
99 | attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
100 | attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
101 | # N, Len_q, n_heads, n_levels, n_points, 2
102 | if reference_points.shape[-1] == 2:
103 | offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
104 | sampling_locations = reference_points[:, :, None, :, None, :] \
105 | + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
106 | elif reference_points.shape[-1] == 4:
107 | sampling_locations = reference_points[:, :, None, :, None, :2] \
108 | + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
109 | else:
110 | raise ValueError(
111 | 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
112 | output = MSDeformAttnFunction.apply(
113 | value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
114 | output = self.output_proj(output)
115 | return output
116 |
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/.ninja_deps:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/build/temp.linux-x86_64-3.7/.ninja_deps
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/.ninja_log:
--------------------------------------------------------------------------------
1 | # ninja log v5
2 | 39 21554 1611922976000000000 /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cpu/ms_deform_attn_cpu.o f6529e61a8c88466
3 | 39 47827 1611923004000000000 /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/vision.o 208a34d5e4a0f397
4 | 39 142518 1611923099000000000 /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cuda/ms_deform_attn_cuda.o a3a712f845c1ccba
5 |
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/build.ninja:
--------------------------------------------------------------------------------
1 | ninja_required_version = 1.3
2 | cxx = c++
3 | nvcc = /mnt/lustre/share/cuda-10.1/bin/nvcc
4 |
5 | cflags = -pthread -B /mnt/lustre/zhouqianyu/miniconda3/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -DWITH_CUDA -I/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/TH -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/THC -I/mnt/lustre/share/cuda-10.1/include -I/mnt/lustre/zhouqianyu/miniconda3/include/python3.7m -c
6 | post_cflags = -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=MultiScaleDeformableAttention -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++14
7 | cuda_cflags = -DWITH_CUDA -I/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/TH -I/mnt/lustre/zhouqianyu/envs/torch1.5.0/lib/python3.7/site-packages/torch/include/THC -I/mnt/lustre/share/cuda-10.1/include -I/mnt/lustre/zhouqianyu/miniconda3/include/python3.7m -c
8 | cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options '-fPIC' -DCUDA_HAS_FP16=1 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=MultiScaleDeformableAttention -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_50,code=compute_50 -gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -std=c++14
9 | ldflags =
10 |
11 | rule compile
12 | command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags
13 | depfile = $out.d
14 | deps = gcc
15 |
16 | rule cuda_compile
17 | command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags
18 |
19 |
20 |
21 | build /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/vision.o: compile /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/vision.cpp
22 | build /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cpu/ms_deform_attn_cpu.o: compile /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cpu/ms_deform_attn_cpu.cpp
23 | build /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cuda/ms_deform_attn_cuda.o: cuda_compile /mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cuda/ms_deform_attn_cuda.cu
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cpu/ms_deform_attn_cpu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cpu/ms_deform_attn_cpu.o
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cuda/ms_deform_attn_cuda.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/cuda/ms_deform_attn_cuda.o
--------------------------------------------------------------------------------
/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/vision.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/build/temp.linux-x86_64-3.7/mnt/lustre/zhouqianyu/vod/workspace/TransVOD/models/ops/src/vision.o
--------------------------------------------------------------------------------
/models/ops/functions/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from .ms_deform_attn_func import MSDeformAttnFunction
10 |
11 |
--------------------------------------------------------------------------------
/models/ops/functions/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/functions/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/ops/functions/__pycache__/ms_deform_attn_func.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/functions/__pycache__/ms_deform_attn_func.cpython-37.pyc
--------------------------------------------------------------------------------
/models/ops/functions/ms_deform_attn_func.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import torch
14 | import torch.nn.functional as F
15 | from torch.autograd import Function
16 | from torch.autograd.function import once_differentiable
17 |
18 | import MultiScaleDeformableAttention as MSDA
19 |
20 |
21 | class MSDeformAttnFunction(Function):
22 | @staticmethod
23 | def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
24 | ctx.im2col_step = im2col_step
25 | output = MSDA.ms_deform_attn_forward(
26 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
27 | ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
28 | return output
29 |
30 | @staticmethod
31 | @once_differentiable
32 | def backward(ctx, grad_output):
33 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
34 | grad_value, grad_sampling_loc, grad_attn_weight = \
35 | MSDA.ms_deform_attn_backward(
36 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
37 |
38 | return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
39 |
40 |
41 | def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
42 | # for debug and test only,
43 | # need to use cuda version instead
44 | N_, S_, M_, D_ = value.shape
45 | _, Lq_, M_, L_, P_, _ = sampling_locations.shape
46 | value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
47 | sampling_grids = 2 * sampling_locations - 1
48 | sampling_value_list = []
49 | for lid_, (H_, W_) in enumerate(value_spatial_shapes):
50 | # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
51 | value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
52 | # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
53 | sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
54 | # N_*M_, D_, Lq_, P_
55 | sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
56 | mode='bilinear', padding_mode='zeros', align_corners=False)
57 | sampling_value_list.append(sampling_value_l_)
58 | # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
59 | attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
60 | output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
61 | return output.transpose(1, 2).contiguous()
62 |
--------------------------------------------------------------------------------
/models/ops/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bi/nenv bash
2 | export PATH=/mnt/lustre/share/gcc/gcc-5.4/bin/:$PATH
3 | export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH
4 | export TORCH_CUDA_ARCH_LIST='3.5;5.0+PTX;6.0;7.0'
5 |
6 | partition=$1
7 | srun -p $partition --gres=gpu:1 python setup.py build develop --user
8 |
9 | # ------------------------------------------------------------------------------------------------
10 | # Deformable DETR
11 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
12 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
13 | # ------------------------------------------------------------------------------------------------
14 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
15 | # ------------------------------------------------------------------------------------------------
16 |
17 |
--------------------------------------------------------------------------------
/models/ops/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from .ms_deform_attn import MSDeformAttn
10 |
--------------------------------------------------------------------------------
/models/ops/modules/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/modules/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/models/ops/modules/__pycache__/ms_deform_attn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/models/ops/modules/__pycache__/ms_deform_attn.cpython-37.pyc
--------------------------------------------------------------------------------
/models/ops/modules/ms_deform_attn.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import warnings
14 | import math
15 |
16 | import torch
17 | from torch import nn
18 | import torch.nn.functional as F
19 | from torch.nn.init import xavier_uniform_, constant_
20 |
21 | from ..functions import MSDeformAttnFunction
22 |
23 |
24 | def _is_power_of_2(n):
25 | if (not isinstance(n, int)) or (n < 0):
26 | raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
27 | return (n & (n-1) == 0) and n != 0
28 |
29 |
30 | class MSDeformAttn(nn.Module):
31 | def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
32 | """
33 | Multi-Scale Deformable Attention Module
34 | :param d_model hidden dimension
35 | :param n_levels number of feature levels
36 | :param n_heads number of attention heads
37 | :param n_points number of sampling points per attention head per feature level
38 | """
39 | super().__init__()
40 | if d_model % n_heads != 0:
41 | raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
42 | _d_per_head = d_model // n_heads
43 | # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
44 | if not _is_power_of_2(_d_per_head):
45 | warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
46 | "which is more efficient in our CUDA implementation.")
47 |
48 | self.im2col_step = 64
49 |
50 | self.d_model = d_model
51 | self.n_levels = n_levels
52 | self.n_heads = n_heads
53 | self.n_points = n_points
54 |
55 | self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
56 | self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
57 | self.value_proj = nn.Linear(d_model, d_model)
58 | self.output_proj = nn.Linear(d_model, d_model)
59 |
60 | self._reset_parameters()
61 |
62 | def _reset_parameters(self):
63 | constant_(self.sampling_offsets.weight.data, 0.)
64 | thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
65 | grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
66 | grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
67 | for i in range(self.n_points):
68 | grid_init[:, :, i, :] *= i + 1
69 | with torch.no_grad():
70 | self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
71 | constant_(self.attention_weights.weight.data, 0.)
72 | constant_(self.attention_weights.bias.data, 0.)
73 | xavier_uniform_(self.value_proj.weight.data)
74 | constant_(self.value_proj.bias.data, 0.)
75 | xavier_uniform_(self.output_proj.weight.data)
76 | constant_(self.output_proj.bias.data, 0.)
77 |
78 | def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
79 | """
80 | :param query (N, Length_{query}, C)
81 | :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
82 | or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
83 | :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
84 | :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
85 | :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
86 | :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
87 |
88 | :return output (N, Length_{query}, C)
89 | """
90 | N, Len_q, _ = query.shape
91 | N, Len_in, _ = input_flatten.shape
92 | assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
93 |
94 | value = self.value_proj(input_flatten)
95 | if input_padding_mask is not None:
96 | value = value.masked_fill(input_padding_mask[..., None], float(0))
97 | value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
98 | sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
99 | attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
100 | attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
101 | # N, Len_q, n_heads, n_levels, n_points, 2
102 | if reference_points.shape[-1] == 2:
103 | offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
104 | # print("shape122", offset_normalizer.shape)
105 | # print(sampling_offsets.shape)
106 | sampling_locations = reference_points[:, :, None, :, None, :] \
107 | + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
108 | elif reference_points.shape[-1] == 4:
109 | sampling_locations = reference_points[:, :, None, :, None, :2] \
110 | + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
111 | else:
112 | raise ValueError(
113 | 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
114 | output = MSDeformAttnFunction.apply(
115 | value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
116 | output = self.output_proj(output)
117 | return output
118 |
--------------------------------------------------------------------------------
/models/ops/setup.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | import os
10 | import glob
11 |
12 | import torch
13 |
14 | from torch.utils.cpp_extension import CUDA_HOME
15 | from torch.utils.cpp_extension import CppExtension
16 | from torch.utils.cpp_extension import CUDAExtension
17 |
18 | from setuptools import find_packages
19 | from setuptools import setup
20 |
21 | requirements = ["torch", "torchvision"]
22 |
23 | def get_extensions():
24 | this_dir = os.path.dirname(os.path.abspath(__file__))
25 | extensions_dir = os.path.join(this_dir, "src")
26 |
27 | main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
28 | source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
29 | source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
30 |
31 | sources = main_file + source_cpu
32 | extension = CppExtension
33 | extra_compile_args = {"cxx": []}
34 | define_macros = []
35 |
36 | if torch.cuda.is_available() and CUDA_HOME is not None:
37 | extension = CUDAExtension
38 | sources += source_cuda
39 | define_macros += [("WITH_CUDA", None)]
40 | extra_compile_args["nvcc"] = [
41 | "-DCUDA_HAS_FP16=1",
42 | "-D__CUDA_NO_HALF_OPERATORS__",
43 | "-D__CUDA_NO_HALF_CONVERSIONS__",
44 | "-D__CUDA_NO_HALF2_OPERATORS__",
45 | ]
46 | else:
47 | raise NotImplementedError('Cuda is not availabel')
48 |
49 | sources = [os.path.join(extensions_dir, s) for s in sources]
50 | include_dirs = [extensions_dir]
51 | ext_modules = [
52 | extension(
53 | "MultiScaleDeformableAttention",
54 | sources,
55 | include_dirs=include_dirs,
56 | define_macros=define_macros,
57 | extra_compile_args=extra_compile_args,
58 | )
59 | ]
60 | return ext_modules
61 |
62 | setup(
63 | name="MultiScaleDeformableAttention",
64 | version="1.0",
65 | author="Weijie Su",
66 | url="https://github.com/fundamentalvision/Deformable-DETR",
67 | description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
68 | packages=find_packages(exclude=("configs", "tests",)),
69 | ext_modules=get_extensions(),
70 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
71 | )
72 |
--------------------------------------------------------------------------------
/models/ops/src/cpu/ms_deform_attn_cpu.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #include
12 |
13 | #include
14 | #include
15 |
16 |
17 | at::Tensor
18 | ms_deform_attn_cpu_forward(
19 | const at::Tensor &value,
20 | const at::Tensor &spatial_shapes,
21 | const at::Tensor &level_start_index,
22 | const at::Tensor &sampling_loc,
23 | const at::Tensor &attn_weight,
24 | const int im2col_step)
25 | {
26 | AT_ERROR("Not implement on cpu");
27 | }
28 |
29 | std::vector
30 | ms_deform_attn_cpu_backward(
31 | const at::Tensor &value,
32 | const at::Tensor &spatial_shapes,
33 | const at::Tensor &level_start_index,
34 | const at::Tensor &sampling_loc,
35 | const at::Tensor &attn_weight,
36 | const at::Tensor &grad_output,
37 | const int im2col_step)
38 | {
39 | AT_ERROR("Not implement on cpu");
40 | }
41 |
42 |
--------------------------------------------------------------------------------
/models/ops/src/cpu/ms_deform_attn_cpu.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #pragma once
12 | #include
13 |
14 | at::Tensor
15 | ms_deform_attn_cpu_forward(
16 | const at::Tensor &value,
17 | const at::Tensor &spatial_shapes,
18 | const at::Tensor &level_start_index,
19 | const at::Tensor &sampling_loc,
20 | const at::Tensor &attn_weight,
21 | const int im2col_step);
22 |
23 | std::vector
24 | ms_deform_attn_cpu_backward(
25 | const at::Tensor &value,
26 | const at::Tensor &spatial_shapes,
27 | const at::Tensor &level_start_index,
28 | const at::Tensor &sampling_loc,
29 | const at::Tensor &attn_weight,
30 | const at::Tensor &grad_output,
31 | const int im2col_step);
32 |
33 |
34 |
--------------------------------------------------------------------------------
/models/ops/src/cuda/ms_deform_attn_cuda.cu:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #include
12 | #include "cuda/ms_deform_im2col_cuda.cuh"
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 |
20 | at::Tensor ms_deform_attn_cuda_forward(
21 | const at::Tensor &value,
22 | const at::Tensor &spatial_shapes,
23 | const at::Tensor &level_start_index,
24 | const at::Tensor &sampling_loc,
25 | const at::Tensor &attn_weight,
26 | const int im2col_step)
27 | {
28 | AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
29 | AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
30 | AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
31 | AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
32 | AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
33 |
34 | AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
35 | AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
36 | AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
37 | AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
38 | AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
39 |
40 | const int batch = value.size(0);
41 | const int spatial_size = value.size(1);
42 | const int num_heads = value.size(2);
43 | const int channels = value.size(3);
44 |
45 | const int num_levels = spatial_shapes.size(0);
46 |
47 | const int num_query = sampling_loc.size(1);
48 | const int num_point = sampling_loc.size(4);
49 |
50 | const int im2col_step_ = std::min(batch, im2col_step);
51 |
52 | AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
53 |
54 | auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
55 |
56 | const int batch_n = im2col_step_;
57 | auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
58 | auto per_value_size = spatial_size * num_heads * channels;
59 | auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
60 | auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
61 | for (int n = 0; n < batch/im2col_step_; ++n)
62 | {
63 | auto columns = output_n.select(0, n);
64 | AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
65 | ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
66 | value.data() + n * im2col_step_ * per_value_size,
67 | spatial_shapes.data(),
68 | level_start_index.data(),
69 | sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
70 | attn_weight.data() + n * im2col_step_ * per_attn_weight_size,
71 | batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
72 | columns.data());
73 |
74 | }));
75 | }
76 |
77 | output = output.view({batch, num_query, num_heads*channels});
78 |
79 | return output;
80 | }
81 |
82 |
83 | std::vector ms_deform_attn_cuda_backward(
84 | const at::Tensor &value,
85 | const at::Tensor &spatial_shapes,
86 | const at::Tensor &level_start_index,
87 | const at::Tensor &sampling_loc,
88 | const at::Tensor &attn_weight,
89 | const at::Tensor &grad_output,
90 | const int im2col_step)
91 | {
92 |
93 | AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
94 | AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
95 | AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
96 | AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
97 | AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
98 | AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
99 |
100 | AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
101 | AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
102 | AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
103 | AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
104 | AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
105 | AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
106 |
107 | const int batch = value.size(0);
108 | const int spatial_size = value.size(1);
109 | const int num_heads = value.size(2);
110 | const int channels = value.size(3);
111 |
112 | const int num_levels = spatial_shapes.size(0);
113 |
114 | const int num_query = sampling_loc.size(1);
115 | const int num_point = sampling_loc.size(4);
116 |
117 | const int im2col_step_ = std::min(batch, im2col_step);
118 |
119 | AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
120 |
121 | auto grad_value = at::zeros_like(value);
122 | auto grad_sampling_loc = at::zeros_like(sampling_loc);
123 | auto grad_attn_weight = at::zeros_like(attn_weight);
124 |
125 | const int batch_n = im2col_step_;
126 | auto per_value_size = spatial_size * num_heads * channels;
127 | auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
128 | auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
129 | auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
130 |
131 | for (int n = 0; n < batch/im2col_step_; ++n)
132 | {
133 | auto grad_output_g = grad_output_n.select(0, n);
134 | AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
135 | ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
136 | grad_output_g.data(),
137 | value.data() + n * im2col_step_ * per_value_size,
138 | spatial_shapes.data(),
139 | level_start_index.data(),
140 | sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
141 | attn_weight.data() + n * im2col_step_ * per_attn_weight_size,
142 | batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
143 | grad_value.data() + n * im2col_step_ * per_value_size,
144 | grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size,
145 | grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size);
146 |
147 | }));
148 | }
149 |
150 | return {
151 | grad_value, grad_sampling_loc, grad_attn_weight
152 | };
153 | }
--------------------------------------------------------------------------------
/models/ops/src/cuda/ms_deform_attn_cuda.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #pragma once
12 | #include
13 |
14 | at::Tensor ms_deform_attn_cuda_forward(
15 | const at::Tensor &value,
16 | const at::Tensor &spatial_shapes,
17 | const at::Tensor &level_start_index,
18 | const at::Tensor &sampling_loc,
19 | const at::Tensor &attn_weight,
20 | const int im2col_step);
21 |
22 | std::vector ms_deform_attn_cuda_backward(
23 | const at::Tensor &value,
24 | const at::Tensor &spatial_shapes,
25 | const at::Tensor &level_start_index,
26 | const at::Tensor &sampling_loc,
27 | const at::Tensor &attn_weight,
28 | const at::Tensor &grad_output,
29 | const int im2col_step);
30 |
31 |
--------------------------------------------------------------------------------
/models/ops/src/ms_deform_attn.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #pragma once
12 |
13 | #include "cpu/ms_deform_attn_cpu.h"
14 |
15 | #ifdef WITH_CUDA
16 | #include "cuda/ms_deform_attn_cuda.h"
17 | #endif
18 |
19 |
20 | at::Tensor
21 | ms_deform_attn_forward(
22 | const at::Tensor &value,
23 | const at::Tensor &spatial_shapes,
24 | const at::Tensor &level_start_index,
25 | const at::Tensor &sampling_loc,
26 | const at::Tensor &attn_weight,
27 | const int im2col_step)
28 | {
29 | if (value.type().is_cuda())
30 | {
31 | #ifdef WITH_CUDA
32 | return ms_deform_attn_cuda_forward(
33 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
34 | #else
35 | AT_ERROR("Not compiled with GPU support");
36 | #endif
37 | }
38 | AT_ERROR("Not implemented on the CPU");
39 | }
40 |
41 | std::vector
42 | ms_deform_attn_backward(
43 | const at::Tensor &value,
44 | const at::Tensor &spatial_shapes,
45 | const at::Tensor &level_start_index,
46 | const at::Tensor &sampling_loc,
47 | const at::Tensor &attn_weight,
48 | const at::Tensor &grad_output,
49 | const int im2col_step)
50 | {
51 | if (value.type().is_cuda())
52 | {
53 | #ifdef WITH_CUDA
54 | return ms_deform_attn_cuda_backward(
55 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
56 | #else
57 | AT_ERROR("Not compiled with GPU support");
58 | #endif
59 | }
60 | AT_ERROR("Not implemented on the CPU");
61 | }
62 |
63 |
--------------------------------------------------------------------------------
/models/ops/src/vision.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | #include "ms_deform_attn.h"
12 |
13 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
14 | m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
15 | m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
16 | }
17 |
--------------------------------------------------------------------------------
/models/ops/test.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | from __future__ import absolute_import
10 | from __future__ import print_function
11 | from __future__ import division
12 |
13 | import time
14 | import torch
15 | import torch.nn as nn
16 | from torch.autograd import gradcheck
17 |
18 | from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch
19 |
20 |
21 | N, M, D = 1, 2, 2
22 | Lq, L, P = 2, 2, 2
23 | shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
24 | level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))
25 | S = sum([(H*W).item() for H, W in shapes])
26 |
27 |
28 | torch.manual_seed(3)
29 |
30 |
31 | @torch.no_grad()
32 | def check_forward_equal_with_pytorch_double():
33 | value = torch.rand(N, S, M, D).cuda() * 0.01
34 | sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
35 | attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
36 | attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
37 | im2col_step = 2
38 | output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
39 | output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
40 | fwdok = torch.allclose(output_cuda, output_pytorch)
41 | max_abs_err = (output_cuda - output_pytorch).abs().max()
42 | max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
43 |
44 | print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
45 |
46 |
47 | @torch.no_grad()
48 | def check_forward_equal_with_pytorch_float():
49 | value = torch.rand(N, S, M, D).cuda() * 0.01
50 | sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
51 | attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
52 | attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
53 | im2col_step = 2
54 | output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
55 | output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
56 | fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
57 | max_abs_err = (output_cuda - output_pytorch).abs().max()
58 | max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
59 |
60 | print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
61 |
62 |
63 | def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
64 |
65 | value = torch.rand(N, S, M, channels).cuda() * 0.01
66 | sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
67 | attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
68 | attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
69 | im2col_step = 2
70 | func = MSDeformAttnFunction.apply
71 |
72 | value.requires_grad = grad_value
73 | sampling_locations.requires_grad = grad_sampling_loc
74 | attention_weights.requires_grad = grad_attn_weight
75 |
76 | gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
77 |
78 | print(f'* {gradok} check_gradient_numerical(D={channels})')
79 |
80 |
81 | if __name__ == '__main__':
82 | check_forward_equal_with_pytorch_double()
83 | check_forward_equal_with_pytorch_float()
84 |
85 | for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
86 | check_gradient_numerical(channels, True, True, True)
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/models/position_encoding.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Various positional encodings for the transformer.
12 | """
13 | import math
14 | import torch
15 | from torch import nn
16 |
17 | from util.misc import NestedTensor
18 |
19 |
20 | class PositionEmbeddingSine(nn.Module):
21 | """
22 | This is a more standard version of the position embedding, very similar to the one
23 | used by the Attention is all you need paper, generalized to work on images.
24 | """
25 | def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
26 | super().__init__()
27 | self.num_pos_feats = num_pos_feats
28 | self.temperature = temperature
29 | self.normalize = normalize
30 | if scale is not None and normalize is False:
31 | raise ValueError("normalize should be True if scale is passed")
32 | if scale is None:
33 | scale = 2 * math.pi
34 | self.scale = scale
35 |
36 | def forward(self, tensor_list: NestedTensor):
37 | x = tensor_list.tensors
38 | mask = tensor_list.mask
39 | assert mask is not None
40 | not_mask = ~mask
41 | y_embed = not_mask.cumsum(1, dtype=torch.float32)
42 | x_embed = not_mask.cumsum(2, dtype=torch.float32)
43 | if self.normalize:
44 | eps = 1e-6
45 | y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
46 | x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
47 |
48 | dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
49 | dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
50 |
51 | pos_x = x_embed[:, :, :, None] / dim_t
52 | pos_y = y_embed[:, :, :, None] / dim_t
53 | pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
54 | pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
55 | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
56 | return pos
57 |
58 |
59 | class PositionEmbeddingLearned(nn.Module):
60 | """
61 | Absolute pos embedding, learned.
62 | """
63 | def __init__(self, num_pos_feats=256):
64 | super().__init__()
65 | self.row_embed = nn.Embedding(50, num_pos_feats)
66 | self.col_embed = nn.Embedding(50, num_pos_feats)
67 | self.reset_parameters()
68 |
69 | def reset_parameters(self):
70 | nn.init.uniform_(self.row_embed.weight)
71 | nn.init.uniform_(self.col_embed.weight)
72 |
73 | def forward(self, tensor_list: NestedTensor):
74 | x = tensor_list.tensors
75 | h, w = x.shape[-2:]
76 | i = torch.arange(w, device=x.device)
77 | j = torch.arange(h, device=x.device)
78 | x_emb = self.col_embed(i)
79 | y_emb = self.row_embed(j)
80 | pos = torch.cat([
81 | x_emb.unsqueeze(0).repeat(h, 1, 1),
82 | y_emb.unsqueeze(1).repeat(1, w, 1),
83 | ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
84 | return pos
85 |
86 |
87 | def build_position_encoding(args):
88 | N_steps = args.hidden_dim // 2
89 | if args.position_embedding in ('v2', 'sine'):
90 | # TODO find a better way of exposing other arguments
91 | position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
92 | elif args.position_embedding in ('v3', 'learned'):
93 | position_embedding = PositionEmbeddingLearned(N_steps)
94 | else:
95 | raise ValueError(f"not supported {args.position_embedding}")
96 |
97 | return position_embedding
98 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pycocotools
2 | tqdm
3 | cython
4 | scipy
5 |
--------------------------------------------------------------------------------
/tools/launch.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # --------------------------------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/pytorch/pytorch/blob/173f224570017b4b1a3a1a13d0bff280a54d9cd9/torch/distributed/launch.py
7 | # --------------------------------------------------------------------------------------------------------------------------
8 |
9 | r"""
10 | `torch.distributed.launch` is a module that spawns up multiple distributed
11 | training processes on each of the training nodes.
12 | The utility can be used for single-node distributed training, in which one or
13 | more processes per node will be spawned. The utility can be used for either
14 | CPU training or GPU training. If the utility is used for GPU training,
15 | each distributed process will be operating on a single GPU. This can achieve
16 | well-improved single-node training performance. It can also be used in
17 | multi-node distributed training, by spawning up multiple processes on each node
18 | for well-improved multi-node distributed training performance as well.
19 | This will especially be benefitial for systems with multiple Infiniband
20 | interfaces that have direct-GPU support, since all of them can be utilized for
21 | aggregated communication bandwidth.
22 | In both cases of single-node distributed training or multi-node distributed
23 | training, this utility will launch the given number of processes per node
24 | (``--nproc_per_node``). If used for GPU training, this number needs to be less
25 | or euqal to the number of GPUs on the current system (``nproc_per_node``),
26 | and each process will be operating on a single GPU from *GPU 0 to
27 | GPU (nproc_per_node - 1)*.
28 | **How to use this module:**
29 | 1. Single-Node multi-process distributed training
30 | ::
31 | >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
32 | YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
33 | arguments of your training script)
34 | 2. Multi-Node multi-process distributed training: (e.g. two nodes)
35 | Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
36 | ::
37 | >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
38 | --nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
39 | --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
40 | and all other arguments of your training script)
41 | Node 2:
42 | ::
43 | >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
44 | --nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
45 | --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
46 | and all other arguments of your training script)
47 | 3. To look up what optional arguments this module offers:
48 | ::
49 | >>> python -m torch.distributed.launch --help
50 | **Important Notices:**
51 | 1. This utilty and multi-process distributed (single-node or
52 | multi-node) GPU training currently only achieves the best performance using
53 | the NCCL distributed backend. Thus NCCL backend is the recommended backend to
54 | use for GPU training.
55 | 2. In your training program, you must parse the command-line argument:
56 | ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
57 | If your training program uses GPUs, you should ensure that your code only
58 | runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
59 | Parsing the local_rank argument
60 | ::
61 | >>> import argparse
62 | >>> parser = argparse.ArgumentParser()
63 | >>> parser.add_argument("--local_rank", type=int)
64 | >>> args = parser.parse_args()
65 | Set your device to local rank using either
66 | ::
67 | >>> torch.cuda.set_device(arg.local_rank) # before your code runs
68 | or
69 | ::
70 | >>> with torch.cuda.device(arg.local_rank):
71 | >>> # your code to run
72 | 3. In your training program, you are supposed to call the following function
73 | at the beginning to start the distributed backend. You need to make sure that
74 | the init_method uses ``env://``, which is the only supported ``init_method``
75 | by this module.
76 | ::
77 | torch.distributed.init_process_group(backend='YOUR BACKEND',
78 | init_method='env://')
79 | 4. In your training program, you can either use regular distributed functions
80 | or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
81 | training program uses GPUs for training and you would like to use
82 | :func:`torch.nn.parallel.DistributedDataParallel` module,
83 | here is how to configure it.
84 | ::
85 | model = torch.nn.parallel.DistributedDataParallel(model,
86 | device_ids=[arg.local_rank],
87 | output_device=arg.local_rank)
88 | Please ensure that ``device_ids`` argument is set to be the only GPU device id
89 | that your code will be operating on. This is generally the local rank of the
90 | process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
91 | and ``output_device`` needs to be ``args.local_rank`` in order to use this
92 | utility
93 | 5. Another way to pass ``local_rank`` to the subprocesses via environment variable
94 | ``LOCAL_RANK``. This behavior is enabled when you launch the script with
95 | ``--use_env=True``. You must adjust the subprocess example above to replace
96 | ``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
97 | will not pass ``--local_rank`` when you specify this flag.
98 | .. warning::
99 | ``local_rank`` is NOT globally unique: it is only unique per process
100 | on a machine. Thus, don't use it to decide if you should, e.g.,
101 | write to a networked filesystem. See
102 | https://github.com/pytorch/pytorch/issues/12042 for an example of
103 | how things can go wrong if you don't do this correctly.
104 | """
105 |
106 |
107 | import sys
108 | import subprocess
109 | import os
110 | import socket
111 | from argparse import ArgumentParser, REMAINDER
112 |
113 | import torch
114 |
115 |
116 | def parse_args():
117 | """
118 | Helper function parsing the command line options
119 | @retval ArgumentParser
120 | """
121 | parser = ArgumentParser(description="PyTorch distributed training launch "
122 | "helper utilty that will spawn up "
123 | "multiple distributed processes")
124 |
125 | # Optional arguments for the launch helper
126 | parser.add_argument("--nnodes", type=int, default=1,
127 | help="The number of nodes to use for distributed "
128 | "training")
129 | parser.add_argument("--node_rank", type=int, default=0,
130 | help="The rank of the node for multi-node distributed "
131 | "training")
132 | parser.add_argument("--nproc_per_node", type=int, default=1,
133 | help="The number of processes to launch on each node, "
134 | "for GPU training, this is recommended to be set "
135 | "to the number of GPUs in your system so that "
136 | "each process can be bound to a single GPU.")
137 | parser.add_argument("--master_addr", default="127.0.0.1", type=str,
138 | help="Master node (rank 0)'s address, should be either "
139 | "the IP address or the hostname of node 0, for "
140 | "single node multi-proc training, the "
141 | "--master_addr can simply be 127.0.0.1")
142 | parser.add_argument("--master_port", default=29501, type=int,
143 | help="Master node (rank 0)'s free port that needs to "
144 | "be used for communciation during distributed "
145 | "training")
146 |
147 | # positional
148 | parser.add_argument("training_script", type=str,
149 | help="The full path to the single GPU training "
150 | "program/script to be launched in parallel, "
151 | "followed by all the arguments for the "
152 | "training script")
153 |
154 | # rest from the training program
155 | parser.add_argument('training_script_args', nargs=REMAINDER)
156 | return parser.parse_args()
157 |
158 |
159 | def main():
160 | args = parse_args()
161 |
162 | # world size in terms of number of processes
163 | dist_world_size = args.nproc_per_node * args.nnodes
164 |
165 | # set PyTorch distributed related environmental variables
166 | current_env = os.environ.copy()
167 | current_env["MASTER_ADDR"] = args.master_addr
168 | current_env["MASTER_PORT"] = str(args.master_port)
169 | current_env["WORLD_SIZE"] = str(dist_world_size)
170 |
171 | processes = []
172 |
173 | for local_rank in range(0, args.nproc_per_node):
174 | # each process's rank
175 | dist_rank = args.nproc_per_node * args.node_rank + local_rank
176 | current_env["RANK"] = str(dist_rank)
177 | current_env["LOCAL_RANK"] = str(local_rank)
178 |
179 | cmd = [args.training_script] + args.training_script_args
180 |
181 | process = subprocess.Popen(cmd, env=current_env)
182 | processes.append(process)
183 |
184 | for process in processes:
185 | process.wait()
186 | if process.returncode != 0:
187 | raise subprocess.CalledProcessError(returncode=process.returncode,
188 | cmd=process.args)
189 |
190 |
191 | if __name__ == "__main__":
192 | main()
193 |
--------------------------------------------------------------------------------
/tools/run_dist_launch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ------------------------------------------------------------------------
3 | # Deformable DETR
4 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | # ------------------------------------------------------------------------
7 |
8 | set -x
9 |
10 | GPUS=$1
11 | RUN_COMMAND=${@:2}
12 | if [ $GPUS -lt 8 ]; then
13 | GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS}
14 | else
15 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
16 | fi
17 | MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
18 | MASTER_PORT=${MASTER_PORT:-"29500"}
19 | NODE_RANK=${NODE_RANK:-0}
20 |
21 | # let "NNODES=GPUS/GPUS_PER_NODE"
22 | NNODES=1
23 | CUDA_VISIBLE_DEVICES=1
24 | # export CUDA_LAUNCH_BLOCKING=0
25 | python ./tools/launch.py \
26 | --nnodes ${NNODES} \
27 | --node_rank ${NODE_RANK} \
28 | --master_addr ${MASTER_ADDR} \
29 | --master_port ${MASTER_PORT} \
30 | --nproc_per_node ${GPUS_PER_NODE} \
31 | ${RUN_COMMAND}
--------------------------------------------------------------------------------
/tools/run_dist_slurm.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # --------------------------------------------------------------------------------------------------------------------------
3 | # Deformable DETR
4 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | # --------------------------------------------------------------------------------------------------------------------------
7 | # Modified from https://github.com/open-mmlab/mmdetection/blob/3b53fe15d87860c6941f3dda63c0f27422da6266/tools/slurm_train.sh
8 | # --------------------------------------------------------------------------------------------------------------------------
9 |
10 | set -x
11 |
12 | PARTITION=$1
13 | JOB_NAME=$2
14 | GPUS=$3
15 | RUN_COMMAND=${@:4}
16 | if [ $GPUS -lt 8 ]; then
17 | GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS}
18 | else
19 | GPUS_PER_NODE=${GPUS_PER_NODE:-8}
20 | fi
21 | CPUS_PER_TASK=${CPUS_PER_TASK:-4}
22 | SRUN_ARGS=${SRUN_ARGS:-""}
23 |
24 | srun -p ${PARTITION} \
25 | --job-name=${JOB_NAME} \
26 | --gres=gpu:${GPUS_PER_NODE} \
27 | --ntasks=${GPUS} \
28 | --ntasks-per-node=${GPUS_PER_NODE} \
29 | --cpus-per-task=${CPUS_PER_TASK} \
30 | --kill-on-bad-exit=1 \
31 | ${SRUN_ARGS} \
32 | ${RUN_COMMAND}
33 |
34 |
--------------------------------------------------------------------------------
/util/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
--------------------------------------------------------------------------------
/util/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/util/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/util/__pycache__/box_ops.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/util/__pycache__/box_ops.cpython-37.pyc
--------------------------------------------------------------------------------
/util/__pycache__/misc.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/util/__pycache__/misc.cpython-37.pyc
--------------------------------------------------------------------------------
/util/__pycache__/misc_mm.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/util/__pycache__/misc_mm.cpython-37.pyc
--------------------------------------------------------------------------------
/util/__pycache__/misc_multi.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SJTU-LuHe/TransVOD/5a4464084b166e40680b8a071d9756f847876acc/util/__pycache__/misc_multi.cpython-37.pyc
--------------------------------------------------------------------------------
/util/box_ops.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Utilities for bounding box manipulation and GIoU.
12 | """
13 | import torch
14 | from torchvision.ops.boxes import box_area
15 |
16 |
17 | def box_cxcywh_to_xyxy(x):
18 | x_c, y_c, w, h = x.unbind(-1)
19 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
20 | (x_c + 0.5 * w), (y_c + 0.5 * h)]
21 | return torch.stack(b, dim=-1)
22 |
23 |
24 | def box_xyxy_to_cxcywh(x):
25 | x0, y0, x1, y1 = x.unbind(-1)
26 | b = [(x0 + x1) / 2, (y0 + y1) / 2,
27 | (x1 - x0), (y1 - y0)]
28 | return torch.stack(b, dim=-1)
29 |
30 |
31 | # modified from torchvision to also return the union
32 | def box_iou(boxes1, boxes2):
33 | area1 = box_area(boxes1)
34 | area2 = box_area(boxes2)
35 |
36 | lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
37 | rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
38 |
39 | wh = (rb - lt).clamp(min=0) # [N,M,2]
40 | inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
41 |
42 | union = area1[:, None] + area2 - inter
43 |
44 | iou = inter / union
45 | return iou, union
46 |
47 |
48 | def generalized_box_iou(boxes1, boxes2):
49 | """
50 | Generalized IoU from https://giou.stanford.edu/
51 |
52 | The boxes should be in [x0, y0, x1, y1] format
53 |
54 | Returns a [N, M] pairwise matrix, where N = len(boxes1)
55 | and M = len(boxes2)
56 | """
57 | # degenerate boxes gives inf / nan results
58 | # so do an early check
59 | assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
60 | assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
61 | iou, union = box_iou(boxes1, boxes2)
62 |
63 | lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
64 | rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
65 |
66 | wh = (rb - lt).clamp(min=0) # [N,M,2]
67 | area = wh[:, :, 0] * wh[:, :, 1]
68 |
69 | return iou - (area - union) / area
70 |
71 |
72 | def masks_to_boxes(masks):
73 | """Compute the bounding boxes around the provided masks
74 |
75 | The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
76 |
77 | Returns a [N, 4] tensors, with the boxes in xyxy format
78 | """
79 | if masks.numel() == 0:
80 | return torch.zeros((0, 4), device=masks.device)
81 |
82 | h, w = masks.shape[-2:]
83 |
84 | y = torch.arange(0, h, dtype=torch.float)
85 | x = torch.arange(0, w, dtype=torch.float)
86 | y, x = torch.meshgrid(y, x)
87 |
88 | x_mask = (masks * x.unsqueeze(0))
89 | x_max = x_mask.flatten(1).max(-1)[0]
90 | x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
91 |
92 | y_mask = (masks * y.unsqueeze(0))
93 | y_max = y_mask.flatten(1).max(-1)[0]
94 | y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
95 |
96 | return torch.stack([x_min, y_min, x_max, y_max], 1)
97 |
--------------------------------------------------------------------------------
/util/plot_utils.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------
6 | # Modified from DETR (https://github.com/facebookresearch/detr)
7 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8 | # ------------------------------------------------------------------------
9 |
10 | """
11 | Plotting utilities to visualize training logs.
12 | """
13 | import torch
14 | import pandas as pd
15 | import seaborn as sns
16 | import matplotlib.pyplot as plt
17 |
18 | from pathlib import Path, PurePath
19 |
20 |
21 | def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
22 | '''
23 | Function to plot specific fields from training log(s). Plots both training and test results.
24 |
25 | :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
26 | - fields = which results to plot from each log file - plots both training and test for each field.
27 | - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
28 | - log_name = optional, name of log file if different than default 'log.txt'.
29 |
30 | :: Outputs - matplotlib plots of results in fields, color coded for each log file.
31 | - solid lines are training results, dashed lines are test results.
32 |
33 | '''
34 | func_name = "plot_utils.py::plot_logs"
35 |
36 | # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
37 | # convert single Path to list to avoid 'not iterable' error
38 |
39 | if not isinstance(logs, list):
40 | if isinstance(logs, PurePath):
41 | logs = [logs]
42 | print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
43 | else:
44 | raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
45 | Expect list[Path] or single Path obj, received {type(logs)}")
46 |
47 | # verify valid dir(s) and that every item in list is Path object
48 | for i, dir in enumerate(logs):
49 | if not isinstance(dir, PurePath):
50 | raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
51 | if dir.exists():
52 | continue
53 | raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
54 |
55 | # load log file(s) and plot
56 | dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
57 |
58 | fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
59 |
60 | for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
61 | for j, field in enumerate(fields):
62 | if field == 'mAP':
63 | coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()
64 | axs[j].plot(coco_eval, c=color)
65 | else:
66 | df.interpolate().ewm(com=ewm_col).mean().plot(
67 | y=[f'train_{field}', f'test_{field}'],
68 | ax=axs[j],
69 | color=[color] * 2,
70 | style=['-', '--']
71 | )
72 | for ax, field in zip(axs, fields):
73 | ax.legend([Path(p).name for p in logs])
74 | ax.set_title(field)
75 |
76 |
77 | def plot_precision_recall(files, naming_scheme='iter'):
78 | if naming_scheme == 'exp_id':
79 | # name becomes exp_id
80 | names = [f.parts[-3] for f in files]
81 | elif naming_scheme == 'iter':
82 | names = [f.stem for f in files]
83 | else:
84 | raise ValueError(f'not supported {naming_scheme}')
85 | fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
86 | for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
87 | data = torch.load(f)
88 | # precision is n_iou, n_points, n_cat, n_area, max_det
89 | precision = data['precision']
90 | recall = data['params'].recThrs
91 | scores = data['scores']
92 | # take precision for all classes, all areas and 100 detections
93 | precision = precision[0, :, :, 0, -1].mean(1)
94 | scores = scores[0, :, :, 0, -1].mean(1)
95 | prec = precision.mean()
96 | rec = data['recall'][0, :, 0, -1].mean()
97 | print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
98 | f'score={scores.mean():0.3f}, ' +
99 | f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
100 | )
101 | axs[0].plot(recall, precision, c=color)
102 | axs[1].plot(recall, scores, c=color)
103 |
104 | axs[0].set_title('Precision / Recall')
105 | axs[0].legend(names)
106 | axs[1].set_title('Scores / Recall')
107 | axs[1].legend(names)
108 | return fig, axs
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------