├── README.md ├── TrackEval ├── LICENSE ├── Readme.md ├── docs │ ├── BDD100k-format.txt │ ├── DAVIS-format.txt │ ├── How_To │ │ └── Add_a_new_metric.md │ ├── KITTI-format.txt │ ├── MOTChallenge-Official │ │ └── Readme.md │ ├── MOTChallenge-format.txt │ ├── MOTS-format.txt │ ├── OpenWorldTracking-Official │ │ └── Readme.md │ ├── RobMOTS-Official │ │ └── Readme.md │ ├── TAO-format.txt │ └── YouTube-VIS-format.txt ├── minimum_requirements.txt ├── pyproject.toml ├── requirements.txt ├── scripts │ ├── comparison_plots.py │ ├── eval_mot.py │ ├── run_bdd.py │ ├── run_burst.py │ ├── run_burst_ow.py │ ├── run_davis.py │ ├── run_headtracking_challenge.py │ ├── run_kitti.py │ ├── run_kitti_mots.py │ ├── run_mot_challenge.py │ ├── run_mots_challenge.py │ ├── run_person_path_22.py │ ├── run_rob_mots.py │ ├── run_tao.py │ ├── run_tao_ow.py │ └── run_youtube_vis.py ├── setup.cfg ├── setup.py ├── tests │ ├── test_all_quick.py │ ├── test_davis.py │ ├── test_metrics.py │ ├── test_mot17.py │ └── test_mots.py └── trackeval │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── __init__.cpython-38.pyc │ ├── _timing.cpython-37.pyc │ ├── _timing.cpython-38.pyc │ ├── eval.cpython-37.pyc │ ├── eval.cpython-38.pyc │ ├── plotting.cpython-37.pyc │ ├── plotting.cpython-38.pyc │ ├── utils.cpython-37.pyc │ └── utils.cpython-38.pyc │ ├── _timing.py │ ├── baselines │ ├── __init__.py │ ├── baseline_utils.py │ ├── non_overlap.py │ ├── pascal_colormap.py │ ├── stp.py │ ├── thresholder.py │ └── vizualize.py │ ├── datasets │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── _base_dataset.cpython-37.pyc │ │ ├── _base_dataset.cpython-38.pyc │ │ ├── bdd100k.cpython-37.pyc │ │ ├── bdd100k.cpython-38.pyc │ │ ├── burst.cpython-37.pyc │ │ ├── burst.cpython-38.pyc │ │ ├── burst_ow.cpython-37.pyc │ │ ├── burst_ow.cpython-38.pyc │ │ ├── davis.cpython-37.pyc │ │ ├── davis.cpython-38.pyc │ │ ├── head_tracking_challenge.cpython-37.pyc │ │ ├── head_tracking_challenge.cpython-38.pyc │ │ ├── kitti_2d_box.cpython-37.pyc │ │ ├── kitti_2d_box.cpython-38.pyc │ │ ├── kitti_mots.cpython-37.pyc │ │ ├── kitti_mots.cpython-38.pyc │ │ ├── mot_challenge_2d_box.cpython-37.pyc │ │ ├── mot_challenge_2d_box.cpython-38.pyc │ │ ├── mots_challenge.cpython-37.pyc │ │ ├── mots_challenge.cpython-38.pyc │ │ ├── person_path_22.cpython-37.pyc │ │ ├── person_path_22.cpython-38.pyc │ │ ├── rob_mots.cpython-37.pyc │ │ ├── rob_mots.cpython-38.pyc │ │ ├── rob_mots_classmap.cpython-37.pyc │ │ ├── rob_mots_classmap.cpython-38.pyc │ │ ├── tao.cpython-37.pyc │ │ ├── tao.cpython-38.pyc │ │ ├── tao_ow.cpython-37.pyc │ │ ├── tao_ow.cpython-38.pyc │ │ ├── youtube_vis.cpython-37.pyc │ │ └── youtube_vis.cpython-38.pyc │ ├── _base_dataset.py │ ├── bdd100k.py │ ├── burst.py │ ├── burst_helpers │ │ ├── BURST_SPECIFIC_ISSUES.md │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── burst_base.cpython-37.pyc │ │ │ ├── burst_base.cpython-38.pyc │ │ │ ├── burst_ow_base.cpython-37.pyc │ │ │ ├── burst_ow_base.cpython-38.pyc │ │ │ ├── format_converter.cpython-37.pyc │ │ │ └── format_converter.cpython-38.pyc │ │ ├── burst_base.py │ │ ├── burst_ow_base.py │ │ ├── convert_burst_format_to_tao_format.py │ │ ├── format_converter.py │ │ └── tao_categories.json │ ├── burst_ow.py │ ├── davis.py │ ├── head_tracking_challenge.py │ ├── kitti_2d_box.py │ ├── kitti_mots.py │ ├── mot_challenge_2d_box.py │ ├── mots_challenge.py │ ├── person_path_22.py │ ├── rob_mots.py │ ├── rob_mots_classmap.py │ ├── run_rob_mots.py │ ├── tao.py │ ├── tao_ow.py │ └── youtube_vis.py │ ├── eval.py │ ├── metrics │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── __init__.cpython-38.pyc │ │ ├── _base_metric.cpython-37.pyc │ │ ├── _base_metric.cpython-38.pyc │ │ ├── clear.cpython-37.pyc │ │ ├── clear.cpython-38.pyc │ │ ├── count.cpython-37.pyc │ │ ├── count.cpython-38.pyc │ │ ├── hota.cpython-37.pyc │ │ ├── hota.cpython-38.pyc │ │ ├── identity.cpython-37.pyc │ │ ├── identity.cpython-38.pyc │ │ ├── ideucl.cpython-37.pyc │ │ ├── ideucl.cpython-38.pyc │ │ ├── j_and_f.cpython-37.pyc │ │ ├── j_and_f.cpython-38.pyc │ │ ├── track_map.cpython-37.pyc │ │ ├── track_map.cpython-38.pyc │ │ ├── vace.cpython-37.pyc │ │ └── vace.cpython-38.pyc │ ├── _base_metric.py │ ├── clear.py │ ├── count.py │ ├── hota.py │ ├── identity.py │ ├── ideucl.py │ ├── j_and_f.py │ ├── track_map.py │ └── vace.py │ ├── plotting.py │ └── utils.py ├── checkpoints └── generaltrack_bdd.pth ├── configs ├── BDD100K.py ├── __pycache__ │ └── config_utils.cpython-38.pyc └── config_utils.py ├── core ├── Point2InstanceRelation.py ├── __pycache__ │ ├── Point2InstanceRelation.cpython-38.pyc │ ├── corr.cpython-38.pyc │ ├── extractor.cpython-38.pyc │ ├── raft.cpython-38.pyc │ ├── update.cpython-38.pyc │ └── utils.cpython-38.pyc ├── corr.py ├── extractor.py ├── update.py └── utils.py ├── exps └── example │ ├── bdd100k │ ├── __pycache__ │ │ └── yolox_x.cpython-38.pyc │ └── yolox_x.py │ ├── dancetrack │ └── yolox_x.py │ ├── mot │ ├── yolox_l_mix_det.py │ ├── yolox_m_mix_det.py │ ├── yolox_nano_mix_det.py │ ├── yolox_s_mix_det.py │ ├── yolox_tiny_mix_det.py │ ├── yolox_x_ablation.py │ ├── yolox_x_ch.py │ ├── yolox_x_mix_det.py │ ├── yolox_x_mix_mot20_ch.py │ └── yolox_x_mot17_half.py │ └── sportsmot │ ├── yolox_x_sportsmot.py │ └── yolox_x_sportsmot_mix.py ├── requirements.txt ├── setup.py ├── tools ├── convert_bdd100k_to_coco.py ├── interpolation.py ├── track.py ├── txt2json_down.py ├── txt2json_trackeval.py ├── txt2json_web.py └── visual_json.py └── yolox ├── __init__.py ├── __pycache__ └── __init__.cpython-38.pyc ├── core ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── launch.cpython-38.pyc │ └── trainer.cpython-38.pyc ├── launch.py └── trainer.py ├── data ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── data_augment.cpython-38.pyc │ ├── data_prefetcher.cpython-38.pyc │ ├── dataloading.cpython-38.pyc │ └── samplers.cpython-38.pyc ├── data_augment.py ├── data_prefetcher.py ├── dataloading.py ├── datasets │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-38.pyc │ │ ├── datasets_wrapper.cpython-38.pyc │ │ ├── mosaicdetection.cpython-38.pyc │ │ ├── mot.cpython-38.pyc │ │ └── mot_bdd.cpython-38.pyc │ ├── datasets_wrapper.py │ ├── mosaicdetection.py │ ├── mot.py │ └── mot_bdd.py └── samplers.py ├── deepsort_tracker ├── __pycache__ │ ├── deepsort.cpython-38.pyc │ ├── detection.cpython-38.pyc │ ├── iou_matching.cpython-38.pyc │ ├── kalman_filter.cpython-38.pyc │ ├── linear_assignment.cpython-38.pyc │ ├── reid_model.cpython-38.pyc │ └── track.cpython-38.pyc ├── deepsort.py ├── detection.py ├── iou_matching.py ├── kalman_filter.py ├── linear_assignment.py ├── reid_model.py └── track.py ├── evaluators ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── coco_evaluator.cpython-38.pyc │ └── mot_evaluator.cpython-38.pyc ├── coco_evaluator.py ├── evaluation.py └── mot_evaluator.py ├── exp ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── base_exp.cpython-38.pyc │ ├── build.cpython-38.pyc │ └── yolox_base.cpython-38.pyc ├── base_exp.py ├── build.py └── yolox_base.py ├── layers ├── __init__.py ├── csrc │ ├── cocoeval │ │ ├── cocoeval.cpp │ │ └── cocoeval.h │ └── vision.cpp └── fast_coco_eval_api.py ├── models ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── darknet.cpython-38.pyc │ ├── losses.cpython-38.pyc │ ├── network_blocks.cpython-38.pyc │ ├── yolo_fpn.cpython-38.pyc │ ├── yolo_head.cpython-38.pyc │ ├── yolo_pafpn.cpython-38.pyc │ └── yolox.cpython-38.pyc ├── darknet.py ├── losses.py ├── network_blocks.py ├── yolo_fpn.py ├── yolo_head.py ├── yolo_pafpn.py └── yolox.py ├── motdt_tracker ├── __pycache__ │ ├── basetrack.cpython-38.pyc │ ├── kalman_filter.cpython-38.pyc │ ├── matching.cpython-38.pyc │ ├── motdt_tracker.cpython-38.pyc │ └── reid_model.cpython-38.pyc ├── basetrack.py ├── kalman_filter.py ├── matching.py ├── motdt_tracker.py └── reid_model.py ├── sort_tracker ├── __pycache__ │ └── sort.cpython-38.pyc └── sort.py ├── tracker ├── __pycache__ │ ├── basetrack.cpython-38.pyc │ ├── byte_tracker_bdd.cpython-38.pyc │ ├── kalman_filter.cpython-38.pyc │ └── matching_bdd.cpython-38.pyc ├── basetrack.py ├── byte_tracker_bdd.py ├── kalman_filter.py └── matching_bdd.py ├── tracking_utils ├── evaluation.py ├── io.py └── timer.py └── utils ├── __init__.py ├── __pycache__ ├── __init__.cpython-38.pyc ├── allreduce_norm.cpython-38.pyc ├── boxes.cpython-38.pyc ├── checkpoint.cpython-38.pyc ├── demo_utils.cpython-38.pyc ├── dist.cpython-38.pyc ├── ema.cpython-38.pyc ├── logger.cpython-38.pyc ├── lr_scheduler.cpython-38.pyc ├── metric.cpython-38.pyc ├── model_utils.cpython-38.pyc ├── setup_env.cpython-38.pyc └── visualize.cpython-38.pyc ├── allreduce_norm.py ├── boxes.py ├── checkpoint.py ├── demo_utils.py ├── dist.py ├── ema.py ├── logger.py ├── lr_scheduler.py ├── metric.py ├── model_utils.py ├── setup_env.py └── visualize.py /README.md: -------------------------------------------------------------------------------- 1 | # GeneralTrack 2 | 3 | > [**Towards Generalizable Multi-Object Tracking**]() 4 | > 5 | > Zheng Qin, Le Wang, Sanping Zhou, Panpan Fu, Gang Hua, Wei Tang 6 | > 7 | 8 | 9 | ## Installation 10 | ### 1. Installing on the host machine 11 | ```shell 12 | git clone 13 | cd GeneralTrack 14 | conda create -n generaltrack python=3.8 -y 15 | conda activate generaltrack 16 | pip install torch==1.10.0+cu111 torchvision==0.11.0+cu111 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html 17 | pip install -r requirements.txt 18 | python setup.py develop 19 | pip install cython 20 | pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' 21 | pip install cython_bbox 22 | ``` 23 | 24 | 25 | ## Data preparation 26 | 27 | Download [BDD100k](https://bdd-data.berkeley.edu/) for MOT 2020 Labels and MOT 2020 images. Unzip all of them to 28 | ```datasets```. 29 | 30 | Also download [detections](https://vision.in.tum.de/webshare/u/seidensc/GHOST/detections_GHOST.zip) from GHOST and also extract into ```dataset```. 31 | ``` 32 | datasets/ 33 | - bdd100k 34 | - images 35 | - track 36 | - train 37 | - val 38 | - test 39 | - labels 40 | - box_track_20 41 | - train 42 | - val 43 | - detections_GHOST 44 | - bdd100k 45 | - train 46 | - val 47 | - test 48 | ``` 49 | Packaging detection results and inference files together. 50 | ```shell 51 | cd 52 | python3 tools/convert_bdd100k_to_coco.py 53 | ``` 54 | 55 | ## Tracking 56 | 57 | **Evaluation on BDD100K** 58 | * **Validation set** 59 | ```shell 60 | cd 61 | python3 tools/track.py 62 | python3 tools/txt2json_trackeval.py 63 | 64 | # Unzip 'data.zip'(https://drive.google.com/file/d/1ZAemZSiRtJNIL68g2mYViBDfVMt4igL1/view?usp=drive_link). Put the json file into 'TrackEval/data/trackers/bdd100k/bdd100k_val/xxtrack/data' 65 | python3 TrackEval/scripts/run_bdd.py --USE_PARALLEL True --NUM_PARALLEL_CORES 64 66 | ``` 67 | 68 | * **Test set** 69 | 70 | ```shell 71 | cd 72 | python3 tools/track.py --test 73 | python3 tools/txt2json_web.py 74 | ``` 75 | Submit to [BDD server](https://eval.ai/web/challenges/challenge-page/1836/overview) 76 | 77 | 78 | 79 | ## Citation 80 | 81 | ``` 82 | 83 | ``` 84 | 85 | ## Acknowledgement 86 | 87 | A large part of the code and the detection results are borrowed from [ByteTrack](https://github.com/ifzhang/ByteTrack), [RAFT](https://github.com/princeton-vl/RAFT), [GHOST](https://github.com/dvl-tum/GHOST). Many thanks for their wonderful works. 88 | -------------------------------------------------------------------------------- /TrackEval/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Jonathon Luiten 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /TrackEval/docs/DAVIS-format.txt: -------------------------------------------------------------------------------- 1 | Annotation Format: 2 | 3 | 4 | The annotations in each frame are stored in png format. 5 | This png is stored indexed i.e. it has a single channel and each pixel has a value from 0 to 254 that corresponds to a color palette attached to the png file. 6 | It is important to take this into account when decoding the png i.e. the output of decoding should be a single channel image and it should not be necessary to do any remap from RGB to indexes. 7 | The latter is crucial to preseve the index of each object so it can match to the correct object in evaluation. 8 | 9 | Each pixel that belongs to the same object has the same value in this png map through the whole video. 10 | Start at 1 for the first object, then 2, 3, 4 etc. 11 | The background (not an object) has value 0. 12 | Also note that invalid/void pixels are stored with a 254 value. 13 | 14 | 15 | These can be read like this: 16 | 17 | import PIL.Image as Image 18 | img = np.array(Image.open("000005.png")) 19 | 20 | 21 | or like this: 22 | 23 | ann_data = tf.read_file(ann_filename) 24 | ann = tf.image.decode_image(ann_data, dtype=tf.uint8, channels=1) 25 | 26 | 27 | See the code for loading the davis dataset for more details. 28 | 29 | -------------------------------------------------------------------------------- /TrackEval/docs/How_To/Add_a_new_metric.md: -------------------------------------------------------------------------------- 1 | # How to add a new or custom family of evaluation metrics to TrackEval 2 | 3 | - Create your metrics code in ```trackeval/metrics/.py```. 4 | - It's probably easiest to start by copying an existing metrics code and editing it, e.g. ```trackeval/metrics/identity.py``` is probably the simplest. 5 | - Your metric should be class, and it should inherit from the ```trackeval.metrics._base_metric._BaseMetric``` class. 6 | - Define an ```__init__``` function that defines the different ```fields``` (values) that your metric will calculate. See ```trackeval/metrics/_base_metric.py``` for a list of currently used field types. Feel free to add new types. 7 | - Define your code to actually calculate your metric for a single sequence and single class in a function called ```eval_sequence```, which takes a data dictionary as input, and returns a results dictionary as output. 8 | - Define functions for how to combine your metric field values over a) sequences ```combine_sequences```, b) over classes ```combine_classes_class_averaged```, and c) over classes weighted by the number of detections ```combine_classes_det_averaged```. 9 | - We find using a function such as the ```_compute_final_fields``` function that we use in the current metrics is convienient because it is likely used for metrics calculation and for the different metric combination, however this is not required. 10 | - Register your new metric by adding it to ```trackeval/metrics/init.py``` 11 | - Your new metric can be used by passing the metrics class to a list of metrics which is passed to the evaluator (see files in ```scripts/*```). 12 | -------------------------------------------------------------------------------- /TrackEval/docs/MOTChallenge-format.txt: -------------------------------------------------------------------------------- 1 | Taken from: https://motchallenge.net/instructions/ 2 | 3 | File Format 4 | 5 | Please submit your results as a single .zip file. The results for each sequence must be stored in a separate .txt file in the archive's root folder. The file name must be exactly like the sequence name (case sensitive). 6 | 7 | The file format should be the same as the ground truth file, which is a CSV text-file containing one object instance per line. Each line must contain 10 values: 8 | 9 | , , , , , , , , , 10 | The conf value contains the detection confidence in the det.txt files. For the ground truth, it acts as a flag whether the entry is to be considered. A value of 0 means that this particular instance is ignored in the evaluation, while any other value can be used to mark it as active. For submitted results, all lines in the .txt file are considered. The world coordinates x,y,z are ignored for the 2D challenge and can be filled with -1. Similarly, the bounding boxes are ignored for the 3D challenge. However, each line is still required to contain 10 values. 11 | 12 | All frame numbers, target IDs and bounding boxes are 1-based. Here is an example: 13 | 14 | Tracking with bounding boxes 15 | (MOT15, MOT16, MOT17, MOT20) 16 | 1, 3, 794.27, 247.59, 71.245, 174.88, -1, -1, -1, -1 17 | 1, 6, 1648.1, 119.61, 66.504, 163.24, -1, -1, -1, -1 18 | 1, 8, 875.49, 399.98, 95.303, 233.93, -1, -1, -1, -1 19 | ... 20 | 21 | Multi Object Tracking & Segmentation 22 | (MOTS Challenge) 23 | Each line of an annotation txt file is structured like this (where rle means run-length encoding from COCO): 24 | 25 | time_frame id class_id img_height img_width rle 26 | An example line from a txt file: 27 | 28 | 52 1005 1 375 1242 WSV:2d;1O10000O10000O1O100O100O1O100O1000000000000000O100O102N5K00O1O1N2O110OO2O001O1NTga3 29 | Meaning: 30 | time frame 52 31 | object id 1005 (meaning class id is 1, i.e. car and instance id is 5) 32 | class id 1 33 | image height 375 34 | image width 1242 35 | rle WSV:2d;1O10000O10000O1O100O100O1O100O1000000000000000O100O...1O1N 36 | 37 | image height, image width, and rle can be used together to decode a mask using cocotools(https://github.com/cocodataset/cocoapi) . -------------------------------------------------------------------------------- /TrackEval/docs/MOTS-format.txt: -------------------------------------------------------------------------------- 1 | Taken from: https://www.vision.rwth-aachen.de/page/mots 2 | 3 | 4 | Annotation Format 5 | We provide two alternative and equivalent formats, one encoded as png images, and one encoded as txt files. The txt files are smaller, and faster to be read in, but the cocotools are needed to decode the masks. For code to read the annotations also see mots_tools/blob/master/mots_common/io.py 6 | 7 | Note that in both formats an id value of 10,000 denotes an ignore region and 0 is background. The class id can be obtained by floor divison of the object id by 1000 (class_id = obj_id // 1000) and the instance id can be obtained by the object id modulo 1000 (instance_id = obj_id % 1000). The object ids are consistent over time. 8 | 9 | The class ids are the following 10 | 11 | car 1 12 | pedestrian 2 13 | png format 14 | The png format has a single color channel with 16 bits and can for example be read like this: 15 | 16 | import PIL.Image as Image 17 | img = np.array(Image.open("000005.png")) 18 | obj_ids = np.unique(img) 19 | # to correctly interpret the id of a single object 20 | obj_id = obj_ids[0] 21 | class_id = obj_id // 1000 22 | obj_instance_id = obj_id % 1000 23 | When using a TensorFlow input pipeline for reading the annotations, you can use 24 | 25 | ann_data = tf.read_file(ann_filename) 26 | ann = tf.image.decode_image(ann_data, dtype=tf.uint16, channels=1) 27 | 28 | 29 | txt format 30 | Each line of an annotation txt file is structured like this (where rle means run-length encoding from COCO): 31 | 32 | time_frame id class_id img_height img_width rle 33 | An example line from a txt file: 34 | 35 | 52 1005 1 375 1242 WSV:2d;1O10000O10000O1O100O100O1O100O1000000000000000O100O102N5K00O1O1N2O110OO2O001O1NTga3 36 | Which means 37 | 38 | time frame 52 39 | object id 1005 (meaning class id is 1, i.e. car and instance id is 5) 40 | class id 1 41 | image height 375 42 | image width 1242 43 | rle WSV:2d;1O10000O10000O1O100O100O1O100O1000000000000000O100O...1O1N 44 | 45 | image height, image width, and rle can be used together to decode a mask using cocotools. -------------------------------------------------------------------------------- /TrackEval/docs/OpenWorldTracking-Official/Readme.md: -------------------------------------------------------------------------------- 1 | ![owt](https://user-images.githubusercontent.com/23000532/160293694-6fc0a3da-c177-4776-8472-49ff6ff375a3.jpg) 2 | # Opening Up Open-World Tracking - Official Evaluation Code 3 | 4 | TrackEval now contains the official evalution code for evaluating the task of **Open World Tracking**. 5 | 6 | This is the official code from the following paper: 7 | 8 |
Opening up Open-World Tracking
 9 | Yang Liu*, Idil Esen Zulfikar*, Jonathon Luiten*, Achal Dave*, Deva Ramanan, Bastian Leibe, Aljoša Ošep, Laura Leal-Taixé
10 | *Equal contribution
11 | CVPR 2022
12 | 13 | [Paper](https://arxiv.org/abs/2104.11221) 14 | 15 | [Website](https://openworldtracking.github.io) 16 | 17 | ## Running and understanding the code 18 | 19 | The code can be run by running the following script (see script for arguments and how to run): 20 | [TAO-OW run script](https://github.com/JonathonLuiten/TrackEval/blob/master/scripts/run_tao_ow.py) 21 | 22 | To understand the the data is being read and used, see the TAO-OW dataset class: 23 | [TAO-OW dataset class](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/datasets/tao_ow.py) 24 | 25 | The implementation of the 'Open World Tracking Accuracy' (OWTA) metric proposed in the paper can be found here: 26 | [OWTA metric](https://github.com/JonathonLuiten/TrackEval/blob/master/trackeval/metrics/hota.py) 27 | 28 | ## Citation 29 | If you work with the code and the benchmark, please cite: 30 | 31 | ***Opening Up Open-World Tracking*** 32 | ``` 33 | @inproceedings{liu2022opening, 34 | title={Opening up Open-World Tracking}, 35 | author={Liu, Yang and Zulfikar, Idil Esen and Luiten, Jonathon and Dave, Achal and Ramanan, Deva and Leibe, Bastian and O{\v{s}}ep, Aljo{\v{s}}a and Leal-Taix{\'e}, Laura}, 36 | journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 37 | year={2022} 38 | } 39 | ``` 40 | 41 | ***TrackEval*** 42 | ``` 43 | @misc{luiten2020trackeval, 44 | author = {Jonathon Luiten, Arne Hoffhues}, 45 | title = {TrackEval}, 46 | howpublished = {\url{https://github.com/JonathonLuiten/TrackEval}}, 47 | year = {2020} 48 | } 49 | ``` 50 | -------------------------------------------------------------------------------- /TrackEval/docs/TAO-format.txt: -------------------------------------------------------------------------------- 1 | Taken from: https://github.com/TAO-Dataset/tao/blob/master/tao/toolkit/tao/tao.py 2 | 3 | Annotation file format: 4 | { 5 | "info" : info, 6 | "images" : [image], 7 | "videos": [video], 8 | "tracks": [track], 9 | "annotations" : [annotation], 10 | "categories": [category], 11 | "licenses" : [license], 12 | } 13 | info: As in MS COCO 14 | image: { 15 | "id" : int, 16 | "video_id": int, 17 | "file_name" : str, 18 | "license" : int, 19 | # Redundant fields for COCO-compatibility 20 | "width": int, 21 | "height": int, 22 | "frame_index": int 23 | } 24 | video: { 25 | "id": int, 26 | "name": str, 27 | "width" : int, 28 | "height" : int, 29 | "neg_category_ids": [int], 30 | "not_exhaustive_category_ids": [int], 31 | "metadata": dict, # Metadata about the video 32 | } 33 | track: { 34 | "id": int, 35 | "category_id": int, 36 | "video_id": int 37 | } 38 | category: { 39 | "id": int, 40 | "name": str, 41 | "synset": str, # For non-LVIS objects, this is "unknown" 42 | ... [other fields copied from LVIS v0.5 and unused] 43 | } 44 | annotation: { 45 | "image_id": int, 46 | "track_id": int, 47 | "bbox": [x,y,width,height], 48 | "area": float, 49 | # Redundant field for compatibility with COCO scripts 50 | "category_id": int 51 | } 52 | license: { 53 | "id" : int, 54 | "name" : str, 55 | "url" : str, 56 | } 57 | -------------------------------------------------------------------------------- /TrackEval/docs/YouTube-VIS-format.txt: -------------------------------------------------------------------------------- 1 | Taken from: https://competitions.codalab.org/competitions/20128#participate-get-data 2 | 3 | The label file follows MSCOCO's style in json format. We adapt the entry name and label format for video. The definition of json file is: 4 | 5 | 6 | { 7 | "info" : info, 8 | "videos" : [video], 9 | "annotations" : [annotation], 10 | "categories" : [category], 11 | } 12 | video{ 13 | "id" : int, 14 | "width" : int, 15 | "height" : int, 16 | "length" : int, 17 | "file_names" : [file_name], 18 | } 19 | annotation{ 20 | "id" : int, 21 | "video_id" : int, 22 | "category_id" : int, 23 | "segmentations" : [RLE or [polygon] or None], 24 | "areas" : [float or None], 25 | "bboxes" : [[x,y,width,height] or None], 26 | "iscrowd" : 0 or 1, 27 | } 28 | category{ 29 | "id" : int, 30 | "name" : str, 31 | "supercategory" : str, 32 | } 33 | 34 | The submission file is also in json format. The file should contain a list of predictions: 35 | 36 | 37 | prediction{ 38 | "video_id" : int, 39 | "category_id" : int, 40 | "segmentations" : [RLE or [polygon] or None], 41 | "score" : float, 42 | } 43 | 44 | The submission file should be named as "results.json", and compressed without any subfolder. There is an example "valid_submission_sample.zip" in download links above. The example is generated by our proposed MaskTrack R-CNN algorithm. -------------------------------------------------------------------------------- /TrackEval/minimum_requirements.txt: -------------------------------------------------------------------------------- 1 | scipy==1.4.1 2 | numpy==1.18.1 3 | -------------------------------------------------------------------------------- /TrackEval/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /TrackEval/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.18.1 2 | scipy==1.4.1 3 | pycocotools==2.0.2 4 | matplotlib==3.2.1 5 | opencv_python==4.4.0.46 6 | scikit_image==0.16.2 7 | pytest==6.0.1 8 | Pillow==8.1.2 9 | tqdm==4.64.0 10 | tabulate 11 | -------------------------------------------------------------------------------- /TrackEval/scripts/comparison_plots.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 5 | import trackeval # noqa: E402 6 | 7 | plots_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'plots')) 8 | tracker_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'trackers')) 9 | 10 | # dataset = os.path.join('kitti', 'kitti_2d_box_train') 11 | # classes = ['cars', 'pedestrian'] 12 | 13 | dataset = os.path.join('mot_challenge', 'MOT17-train') 14 | classes = ['pedestrian'] 15 | 16 | data_fol = os.path.join(tracker_folder, dataset) 17 | trackers = os.listdir(data_fol) 18 | out_loc = os.path.join(plots_folder, dataset) 19 | for cls in classes: 20 | trackeval.plotting.plot_compare_trackers(data_fol, trackers, cls, out_loc) 21 | -------------------------------------------------------------------------------- /TrackEval/scripts/eval_mot.py: -------------------------------------------------------------------------------- 1 | 2 | """ run_mot_challenge.py 3 | 4 | Run example: 5 | run_mot_challenge.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL Lif_T 6 | 7 | Command Line Arguments: Defaults, # Comments 8 | Eval arguments: 9 | 'USE_PARALLEL': False, 10 | 'NUM_PARALLEL_CORES': 8, 11 | 'BREAK_ON_ERROR': True, 12 | 'PRINT_RESULTS': True, 13 | 'PRINT_ONLY_COMBINED': False, 14 | 'PRINT_CONFIG': True, 15 | 'TIME_PROGRESS': True, 16 | 'OUTPUT_SUMMARY': True, 17 | 'OUTPUT_DETAILED': True, 18 | 'PLOT_CURVES': True, 19 | Dataset arguments: 20 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data 21 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location 22 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) 23 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) 24 | 'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian'] 25 | 'BENCHMARK': 'MOT17', # Valid: 'MOT17', 'MOT16', 'MOT20', 'MOT15' 26 | 'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test', 'all' 27 | 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped 28 | 'PRINT_CONFIG': True, # Whether to print current config 29 | 'DO_PREPROC': True, # Whether to perform preprocessing (never done for 2D_MOT_2015) 30 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER 31 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER 32 | Metric arguments: 33 | 'METRICS': ['HOTA', 'CLEAR', 'Identity', 'VACE'] 34 | """ 35 | 36 | import sys 37 | import os 38 | 39 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 40 | import trackeval 41 | 42 | def eval_mot(**kargs): 43 | # Command line interface: 44 | eval_config = trackeval.Evaluator.get_default_eval_config() 45 | eval_config['DISPLAY_LESS_PROGRESS'] = False 46 | dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config() 47 | metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5} 48 | dataset_config.update(kargs) 49 | 50 | # Run code 51 | evaluator = trackeval.Evaluator(eval_config) 52 | dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)] 53 | metrics_list = [] 54 | for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]: 55 | if metric.get_name() in metrics_config['METRICS']: 56 | metrics_list.append(metric(metrics_config)) 57 | if len(metrics_list) == 0: 58 | raise Exception('No metrics selected for evaluation') 59 | evaluator.evaluate(dataset_list, metrics_list) 60 | 61 | if __name__ == '__main__': 62 | eval_mot(TRACKERS_TO_EVAL=['exp_name'], 63 | BENCHMARK='sports', 64 | SPLIT_TO_EVAL='val', 65 | GT_LOC_FORMAT='{gt_folder}/{seq}/gt/gt.txt' # '{gt_folder}/{seq}/gt/gt_val_half.txt' for MOT17 val_half 66 | ) 67 | 68 | -------------------------------------------------------------------------------- /TrackEval/scripts/run_bdd.py: -------------------------------------------------------------------------------- 1 | 2 | """ run_bdd.py 3 | 4 | Run example: 5 | run_bdd.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL qdtrack 6 | 7 | Command Line Arguments: Defaults, # Comments 8 | Eval arguments: 9 | 'USE_PARALLEL': False, 10 | 'NUM_PARALLEL_CORES': 8, 11 | 'BREAK_ON_ERROR': True, 12 | 'PRINT_RESULTS': True, 13 | 'PRINT_ONLY_COMBINED': False, 14 | 'PRINT_CONFIG': True, 15 | 'TIME_PROGRESS': True, 16 | 'OUTPUT_SUMMARY': True, 17 | 'OUTPUT_DETAILED': True, 18 | 'PLOT_CURVES': True, 19 | Dataset arguments: 20 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/bdd100k/bdd100k_val'), # Location of GT data 21 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/bdd100k/bdd100k_val'), # Trackers location 22 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) 23 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) 24 | 'CLASSES_TO_EVAL': ['pedestrian', 'rider', 'car', 'bus', 'truck', 'train', 'motorcycle', 'bicycle'], 25 | # Valid: ['pedestrian', 'rider', 'car', 'bus', 'truck', 'train', 'motorcycle', 'bicycle'] 26 | 'SPLIT_TO_EVAL': 'val', # Valid: 'training', 'val', 27 | 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped 28 | 'PRINT_CONFIG': True, # Whether to print current config 29 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER 30 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER 31 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL 32 | Metric arguments: 33 | 'METRICS': ['Hota','Clear', 'ID', 'Count'] 34 | """ 35 | 36 | import sys 37 | import os 38 | import argparse 39 | from multiprocessing import freeze_support 40 | 41 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 42 | import trackeval # noqa: E402 43 | 44 | if __name__ == '__main__': 45 | freeze_support() 46 | 47 | # Command line interface: 48 | default_eval_config = trackeval.Evaluator.get_default_eval_config() 49 | default_eval_config['PRINT_ONLY_COMBINED'] = True 50 | default_dataset_config = trackeval.datasets.BDD100K.get_default_dataset_config() 51 | default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity']} 52 | config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs 53 | parser = argparse.ArgumentParser() 54 | for setting in config.keys(): 55 | if type(config[setting]) == list or type(config[setting]) == type(None): 56 | parser.add_argument("--" + setting, nargs='+') 57 | else: 58 | parser.add_argument("--" + setting) 59 | args = parser.parse_args().__dict__ 60 | for setting in args.keys(): 61 | if args[setting] is not None: 62 | if type(config[setting]) == type(True): 63 | if args[setting] == 'True': 64 | x = True 65 | elif args[setting] == 'False': 66 | x = False 67 | else: 68 | raise Exception('Command line parameter ' + setting + 'must be True or False') 69 | elif type(config[setting]) == type(1): 70 | x = int(args[setting]) 71 | elif type(args[setting]) == type(None): 72 | x = None 73 | else: 74 | x = args[setting] 75 | config[setting] = x 76 | eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} 77 | dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} 78 | metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} 79 | 80 | # Run code 81 | evaluator = trackeval.Evaluator(eval_config) 82 | dataset_list = [trackeval.datasets.BDD100K(dataset_config)] 83 | metrics_list = [] 84 | for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity]: 85 | if metric.get_name() in metrics_config['METRICS']: 86 | metrics_list.append(metric()) 87 | if len(metrics_list) == 0: 88 | raise Exception('No metrics selected for evaluation') 89 | evaluator.evaluate(dataset_list, metrics_list) -------------------------------------------------------------------------------- /TrackEval/scripts/run_kitti.py: -------------------------------------------------------------------------------- 1 | 2 | """ run_kitti.py 3 | 4 | Run example: 5 | run_kitti.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL CIWT 6 | 7 | Command Line Arguments: Defaults, # Comments 8 | Eval arguments: 9 | 'USE_PARALLEL': False, 10 | 'NUM_PARALLEL_CORES': 8, 11 | 'BREAK_ON_ERROR': True, 12 | 'PRINT_RESULTS': True, 13 | 'PRINT_ONLY_COMBINED': False, 14 | 'PRINT_CONFIG': True, 15 | 'TIME_PROGRESS': True, 16 | 'OUTPUT_SUMMARY': True, 17 | 'OUTPUT_DETAILED': True, 18 | 'PLOT_CURVES': True, 19 | Dataset arguments: 20 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/kitti/kitti_2d_box_train'), # Location of GT data 21 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/kitti/kitti_2d_box_train/'), # Trackers location 22 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) 23 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) 24 | 'CLASSES_TO_EVAL': ['car', 'pedestrian'], # Valid: ['car', 'pedestrian'] 25 | 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val', 'training_minus_val', 'test' 26 | 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped 27 | 'PRINT_CONFIG': True, # Whether to print current config 28 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER 29 | 'OUTPUT_SUB_FOLDER': '' # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER 30 | Metric arguments: 31 | 'METRICS': ['Hota','Clear', 'ID', 'Count'] 32 | """ 33 | 34 | import sys 35 | import os 36 | import argparse 37 | from multiprocessing import freeze_support 38 | 39 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 40 | import trackeval # noqa: E402 41 | 42 | if __name__ == '__main__': 43 | freeze_support() 44 | 45 | # Command line interface: 46 | default_eval_config = trackeval.Evaluator.get_default_eval_config() 47 | default_eval_config['DISPLAY_LESS_PROGRESS'] = False 48 | default_dataset_config = trackeval.datasets.Kitti2DBox.get_default_dataset_config() 49 | default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity']} 50 | config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs 51 | parser = argparse.ArgumentParser() 52 | for setting in config.keys(): 53 | if type(config[setting]) == list or type(config[setting]) == type(None): 54 | parser.add_argument("--" + setting, nargs='+') 55 | else: 56 | parser.add_argument("--" + setting) 57 | args = parser.parse_args().__dict__ 58 | for setting in args.keys(): 59 | if args[setting] is not None: 60 | if type(config[setting]) == type(True): 61 | if args[setting] == 'True': 62 | x = True 63 | elif args[setting] == 'False': 64 | x = False 65 | else: 66 | raise Exception('Command line parameter ' + setting + 'must be True or False') 67 | elif type(config[setting]) == type(1): 68 | x = int(args[setting]) 69 | elif type(args[setting]) == type(None): 70 | x = None 71 | else: 72 | x = args[setting] 73 | config[setting] = x 74 | eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} 75 | dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} 76 | metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} 77 | 78 | # Run code 79 | evaluator = trackeval.Evaluator(eval_config) 80 | dataset_list = [trackeval.datasets.Kitti2DBox(dataset_config)] 81 | metrics_list = [] 82 | for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity]: 83 | if metric.get_name() in metrics_config['METRICS']: 84 | metrics_list.append(metric()) 85 | if len(metrics_list) == 0: 86 | raise Exception('No metrics selected for evaluation') 87 | evaluator.evaluate(dataset_list, metrics_list) 88 | -------------------------------------------------------------------------------- /TrackEval/scripts/run_tao.py: -------------------------------------------------------------------------------- 1 | """ run_tao.py 2 | 3 | Run example: 4 | run_tao.py --USE_PARALLEL False --METRICS HOTA --TRACKERS_TO_EVAL Tracktor++ 5 | 6 | Command Line Arguments: Defaults, # Comments 7 | Eval arguments: 8 | 'USE_PARALLEL': False, 9 | 'NUM_PARALLEL_CORES': 8, 10 | 'BREAK_ON_ERROR': True, 11 | 'PRINT_RESULTS': True, 12 | 'PRINT_ONLY_COMBINED': False, 13 | 'PRINT_CONFIG': True, 14 | 'TIME_PROGRESS': True, 15 | 'OUTPUT_SUMMARY': True, 16 | 'OUTPUT_DETAILED': True, 17 | 'PLOT_CURVES': True, 18 | Dataset arguments: 19 | 'GT_FOLDER': os.path.join(code_path, 'data/gt/tao/tao_training'), # Location of GT data 20 | 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/tao/tao_training'), # Trackers location 21 | 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) 22 | 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) 23 | 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes) 24 | 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val' 25 | 'PRINT_CONFIG': True, # Whether to print current config 26 | 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER 27 | 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER 28 | 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL 29 | 'MAX_DETECTIONS': 300, # Number of maximal allowed detections per image (0 for unlimited) 30 | Metric arguments: 31 | 'METRICS': ['HOTA', 'CLEAR', 'Identity', 'TrackMAP'] 32 | """ 33 | 34 | import sys 35 | import os 36 | import argparse 37 | from multiprocessing import freeze_support 38 | 39 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 40 | import trackeval # noqa: E402 41 | 42 | if __name__ == '__main__': 43 | freeze_support() 44 | 45 | # Command line interface: 46 | default_eval_config = trackeval.Evaluator.get_default_eval_config() 47 | # print only combined since TrackMAP is undefined for per sequence breakdowns 48 | default_eval_config['PRINT_ONLY_COMBINED'] = True 49 | default_eval_config['DISPLAY_LESS_PROGRESS'] = True 50 | default_dataset_config = trackeval.datasets.TAO.get_default_dataset_config() 51 | default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity', 'TrackMAP']} 52 | config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs 53 | parser = argparse.ArgumentParser() 54 | for setting in config.keys(): 55 | if type(config[setting]) == list or type(config[setting]) == type(None): 56 | parser.add_argument("--" + setting, nargs='+') 57 | else: 58 | parser.add_argument("--" + setting) 59 | args = parser.parse_args().__dict__ 60 | for setting in args.keys(): 61 | if args[setting] is not None: 62 | if type(config[setting]) == type(True): 63 | if args[setting] == 'True': 64 | x = True 65 | elif args[setting] == 'False': 66 | x = False 67 | else: 68 | raise Exception('Command line parameter ' + setting + 'must be True or False') 69 | elif type(config[setting]) == type(1): 70 | x = int(args[setting]) 71 | elif type(args[setting]) == type(None): 72 | x = None 73 | else: 74 | x = args[setting] 75 | config[setting] = x 76 | eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} 77 | dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} 78 | metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} 79 | 80 | # Run code 81 | evaluator = trackeval.Evaluator(eval_config) 82 | dataset_list = [trackeval.datasets.TAO(dataset_config)] 83 | metrics_list = [] 84 | for metric in [trackeval.metrics.TrackMAP, trackeval.metrics.CLEAR, trackeval.metrics.Identity, 85 | trackeval.metrics.HOTA]: 86 | if metric.get_name() in metrics_config['METRICS']: 87 | metrics_list.append(metric()) 88 | if len(metrics_list) == 0: 89 | raise Exception('No metrics selected for evaluation') 90 | evaluator.evaluate(dataset_list, metrics_list) -------------------------------------------------------------------------------- /TrackEval/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = trackeval 3 | version = 1.0.dev1 4 | author = Jonathon Luiten, Arne Hoffhues 5 | author_email = jonoluiten@gmail.com 6 | description = Code for evaluating object tracking 7 | long_description = file: Readme.md 8 | long_description_content_type = text/markdown 9 | url = https://github.com/JonathonLuiten/TrackEval 10 | project_urls = 11 | Bug Tracker = https://github.com/JonathonLuiten/TrackEval/issues 12 | classifiers = 13 | Programming Language :: Python :: 3 14 | Programming Language :: Python :: 3 :: Only 15 | License :: OSI Approved :: MIT License 16 | Operating System :: OS Independent 17 | Topic :: Scientific/Engineering 18 | license_files = LICENSE 19 | 20 | [options] 21 | install_requires = 22 | numpy 23 | scipy 24 | packages = find: 25 | 26 | [options.packages.find] 27 | include = trackeval* 28 | -------------------------------------------------------------------------------- /TrackEval/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /TrackEval/tests/test_all_quick.py: -------------------------------------------------------------------------------- 1 | """ Test to ensure that the code is working correctly. 2 | Should test ALL metrics across all datasets and splits currently supported. 3 | Only tests one tracker per dataset/split to give a quick test result. 4 | """ 5 | 6 | import sys 7 | import os 8 | import numpy as np 9 | from multiprocessing import freeze_support 10 | 11 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 12 | import trackeval # noqa: E402 13 | 14 | # Fixes multiprocessing on windows, does nothing otherwise 15 | if __name__ == '__main__': 16 | freeze_support() 17 | 18 | eval_config = {'USE_PARALLEL': False, 19 | 'NUM_PARALLEL_CORES': 8, 20 | } 21 | evaluator = trackeval.Evaluator(eval_config) 22 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity()] 23 | 24 | tests = [ 25 | {'DATASET': 'Kitti2DBox', 'SPLIT_TO_EVAL': 'training', 'TRACKERS_TO_EVAL': ['CIWT']}, 26 | {'DATASET': 'MotChallenge2DBox', 'BENCHMARK': 'MOT15', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['MPNTrack']}, 27 | {'DATASET': 'MotChallenge2DBox', 'BENCHMARK': 'MOT16', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['MPNTrack']}, 28 | {'DATASET': 'MotChallenge2DBox', 'BENCHMARK': 'MOT17', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['MPNTrack']}, 29 | {'DATASET': 'MotChallenge2DBox', 'BENCHMARK': 'MOT20', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['MPNTrack']}, 30 | ] 31 | 32 | for dataset_config in tests: 33 | 34 | dataset_name = dataset_config.pop('DATASET') 35 | if dataset_name == 'MotChallenge2DBox': 36 | dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)] 37 | file_loc = os.path.join('mot_challenge', dataset_config['BENCHMARK'] + '-' + dataset_config['SPLIT_TO_EVAL']) 38 | elif dataset_name == 'Kitti2DBox': 39 | dataset_list = [trackeval.datasets.Kitti2DBox(dataset_config)] 40 | file_loc = os.path.join('kitti', 'kitti_2d_box_train') 41 | else: 42 | raise Exception('Dataset %s does not exist.' % dataset_name) 43 | 44 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list) 45 | 46 | classes = dataset_list[0].config['CLASSES_TO_EVAL'] 47 | tracker = dataset_config['TRACKERS_TO_EVAL'][0] 48 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', file_loc) 49 | 50 | for cls in classes: 51 | results = {seq: raw_results[dataset_name][tracker][seq][cls] for seq in raw_results[dataset_name][tracker].keys()} 52 | current_metrics_list = metrics_list + [trackeval.metrics.Count()] 53 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list) 54 | 55 | # Load expected results: 56 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, cls + '_detailed.csv')) 57 | 58 | # Do checks 59 | for seq in test_data.keys(): 60 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys()) 61 | 62 | details = [] 63 | for metric, metric_name in zip(current_metrics_list, metric_names): 64 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()} 65 | details.append(metric.detailed_results(table_res)) 66 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], []) 67 | res_values = sum([list(s[seq].values()) for s in details], []) 68 | res_dict = dict(zip(res_fields, res_values)) 69 | 70 | for field in test_data[seq].keys(): 71 | assert np.isclose(res_dict[field], test_data[seq][field]), seq + ': ' + cls + ': ' + field 72 | 73 | print('Tracker %s tests passed' % tracker) 74 | print('All tests passed') 75 | 76 | -------------------------------------------------------------------------------- /TrackEval/tests/test_davis.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import numpy as np 4 | from multiprocessing import freeze_support 5 | 6 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 7 | import trackeval # noqa: E402 8 | 9 | # Fixes multiprocessing on windows, does nothing otherwise 10 | if __name__ == '__main__': 11 | freeze_support() 12 | 13 | 14 | eval_config = {'USE_PARALLEL': False, 15 | 'NUM_PARALLEL_CORES': 8, 16 | 'PRINT_RESULTS': False, 17 | 'PRINT_CONFIG': True, 18 | 'TIME_PROGRESS': True, 19 | 'DISPLAY_LESS_PROGRESS': True, 20 | 'OUTPUT_SUMMARY': False, 21 | 'OUTPUT_EMPTY_CLASSES': False, 22 | 'OUTPUT_DETAILED': False, 23 | 'PLOT_CURVES': False, 24 | } 25 | evaluator = trackeval.Evaluator(eval_config) 26 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity(), 27 | trackeval.metrics.JAndF()] 28 | 29 | tests = [ 30 | {'SPLIT_TO_EVAL': 'val', 'TRACKERS_TO_EVAL': ['ags']}, 31 | ] 32 | 33 | for dataset_config in tests: 34 | 35 | dataset_list = [trackeval.datasets.DAVIS(dataset_config)] 36 | file_loc = os.path.join('davis', 'davis_unsupervised_' + dataset_config['SPLIT_TO_EVAL']) 37 | 38 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list) 39 | 40 | classes = dataset_list[0].config['CLASSES_TO_EVAL'] 41 | tracker = dataset_config['TRACKERS_TO_EVAL'][0] 42 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', file_loc) 43 | 44 | for cls in classes: 45 | results = {seq: raw_results['DAVIS'][tracker][seq][cls] for seq in raw_results['DAVIS'][tracker].keys()} 46 | current_metrics_list = metrics_list + [trackeval.metrics.Count()] 47 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list) 48 | 49 | # Load expected results: 50 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, cls + '_detailed.csv')) 51 | 52 | # Do checks 53 | for seq in test_data.keys(): 54 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys()) 55 | 56 | details = [] 57 | for metric, metric_name in zip(current_metrics_list, metric_names): 58 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()} 59 | details.append(metric.detailed_results(table_res)) 60 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], []) 61 | res_values = sum([list(s[seq].values()) for s in details], []) 62 | res_dict = dict(zip(res_fields, res_values)) 63 | 64 | for field in test_data[seq].keys(): 65 | assert np.isclose(res_dict[field], test_data[seq][field]), seq + ': ' + cls + ': ' + field 66 | 67 | print('Tracker %s tests passed' % tracker) 68 | print('All tests passed') -------------------------------------------------------------------------------- /TrackEval/tests/test_mot17.py: -------------------------------------------------------------------------------- 1 | """ Test to ensure that the code is working correctly. 2 | Runs all metrics on 14 trackers for the MOT Challenge MOT17 benchmark. 3 | """ 4 | 5 | 6 | import sys 7 | import os 8 | import numpy as np 9 | from multiprocessing import freeze_support 10 | 11 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 12 | import trackeval # noqa: E402 13 | 14 | # Fixes multiprocessing on windows, does nothing otherwise 15 | if __name__ == '__main__': 16 | freeze_support() 17 | 18 | eval_config = {'USE_PARALLEL': False, 19 | 'NUM_PARALLEL_CORES': 8, 20 | } 21 | evaluator = trackeval.Evaluator(eval_config) 22 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity()] 23 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', 'mot_challenge', 'MOT17-train') 24 | trackers = [ 25 | 'DPMOT', 26 | 'GNNMatch', 27 | 'IA', 28 | 'ISE_MOT17R', 29 | 'Lif_T', 30 | 'Lif_TsimInt', 31 | 'LPC_MOT', 32 | 'MAT', 33 | 'MIFTv2', 34 | 'MPNTrack', 35 | 'SSAT', 36 | 'TracktorCorr', 37 | 'Tracktorv2', 38 | 'UnsupTrack', 39 | ] 40 | 41 | for tracker in trackers: 42 | # Run code on tracker 43 | dataset_config = {'TRACKERS_TO_EVAL': [tracker], 44 | 'BENCHMARK': 'MOT17'} 45 | dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)] 46 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list) 47 | 48 | results = {seq: raw_results['MotChallenge2DBox'][tracker][seq]['pedestrian'] for seq in 49 | raw_results['MotChallenge2DBox'][tracker].keys()} 50 | current_metrics_list = metrics_list + [trackeval.metrics.Count()] 51 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list) 52 | 53 | # Load expected results: 54 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, 'pedestrian_detailed.csv')) 55 | assert len(test_data.keys()) == 22, len(test_data.keys()) 56 | 57 | # Do checks 58 | for seq in test_data.keys(): 59 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys()) 60 | 61 | details = [] 62 | for metric, metric_name in zip(current_metrics_list, metric_names): 63 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()} 64 | details.append(metric.detailed_results(table_res)) 65 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], []) 66 | res_values = sum([list(s[seq].values()) for s in details], []) 67 | res_dict = dict(zip(res_fields, res_values)) 68 | 69 | for field in test_data[seq].keys(): 70 | if not np.isclose(res_dict[field], test_data[seq][field]): 71 | print(tracker, seq, res_dict[field], test_data[seq][field], field) 72 | raise AssertionError 73 | 74 | print('Tracker %s tests passed' % tracker) 75 | print('All tests passed') 76 | 77 | -------------------------------------------------------------------------------- /TrackEval/tests/test_mots.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import numpy as np 4 | from multiprocessing import freeze_support 5 | 6 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 7 | import trackeval # noqa: E402 8 | 9 | # Fixes multiprocessing on windows, does nothing otherwise 10 | if __name__ == '__main__': 11 | freeze_support() 12 | 13 | eval_config = {'USE_PARALLEL': False, 14 | 'NUM_PARALLEL_CORES': 8, 15 | } 16 | evaluator = trackeval.Evaluator(eval_config) 17 | metrics_list = [trackeval.metrics.HOTA(), trackeval.metrics.CLEAR(), trackeval.metrics.Identity()] 18 | 19 | tests = [ 20 | {'DATASET': 'KittiMOTS', 'SPLIT_TO_EVAL': 'val', 'TRACKERS_TO_EVAL': ['trackrcnn']}, 21 | {'DATASET': 'MOTSChallenge', 'SPLIT_TO_EVAL': 'train', 'TRACKERS_TO_EVAL': ['TrackRCNN']} 22 | ] 23 | 24 | for dataset_config in tests: 25 | 26 | dataset_name = dataset_config.pop('DATASET') 27 | if dataset_name == 'MOTSChallenge': 28 | dataset_list = [trackeval.datasets.MOTSChallenge(dataset_config)] 29 | file_loc = os.path.join('mot_challenge', 'MOTS-' + dataset_config['SPLIT_TO_EVAL']) 30 | elif dataset_name == 'KittiMOTS': 31 | dataset_list = [trackeval.datasets.KittiMOTS(dataset_config)] 32 | file_loc = os.path.join('kitti', 'kitti_mots_val') 33 | else: 34 | raise Exception('Dataset %s does not exist.' % dataset_name) 35 | 36 | raw_results, messages = evaluator.evaluate(dataset_list, metrics_list) 37 | 38 | classes = dataset_list[0].config['CLASSES_TO_EVAL'] 39 | tracker = dataset_config['TRACKERS_TO_EVAL'][0] 40 | test_data_loc = os.path.join(os.path.dirname(__file__), '..', 'data', 'tests', file_loc) 41 | 42 | for cls in classes: 43 | results = {seq: raw_results[dataset_name][tracker][seq][cls] for seq in raw_results[dataset_name][tracker].keys()} 44 | current_metrics_list = metrics_list + [trackeval.metrics.Count()] 45 | metric_names = trackeval.utils.validate_metrics_list(current_metrics_list) 46 | 47 | # Load expected results: 48 | test_data = trackeval.utils.load_detail(os.path.join(test_data_loc, tracker, cls + '_detailed.csv')) 49 | 50 | # Do checks 51 | for seq in test_data.keys(): 52 | assert len(test_data[seq].keys()) > 250, len(test_data[seq].keys()) 53 | 54 | details = [] 55 | for metric, metric_name in zip(current_metrics_list, metric_names): 56 | table_res = {seq_key: seq_value[metric_name] for seq_key, seq_value in results.items()} 57 | details.append(metric.detailed_results(table_res)) 58 | res_fields = sum([list(s['COMBINED_SEQ'].keys()) for s in details], []) 59 | res_values = sum([list(s[seq].values()) for s in details], []) 60 | res_dict = dict(zip(res_fields, res_values)) 61 | 62 | for field in test_data[seq].keys(): 63 | assert np.isclose(res_dict[field], test_data[seq][field]), seq + ': ' + cls + ': ' + field 64 | 65 | print('Tracker %s tests passed' % tracker) 66 | print('All tests passed') -------------------------------------------------------------------------------- /TrackEval/trackeval/__init__.py: -------------------------------------------------------------------------------- 1 | from .eval import Evaluator 2 | from . import datasets 3 | from . import metrics 4 | from . import plotting 5 | from . import utils 6 | -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/_timing.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/_timing.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/_timing.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/_timing.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/eval.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/eval.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/eval.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/eval.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/plotting.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/plotting.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/plotting.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/plotting.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/_timing.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from time import perf_counter 3 | import inspect 4 | 5 | DO_TIMING = False 6 | DISPLAY_LESS_PROGRESS = False 7 | timer_dict = {} 8 | counter = 0 9 | 10 | 11 | def time(f): 12 | @wraps(f) 13 | def wrap(*args, **kw): 14 | if DO_TIMING: 15 | # Run function with timing 16 | ts = perf_counter() 17 | result = f(*args, **kw) 18 | te = perf_counter() 19 | tt = te-ts 20 | 21 | # Get function name 22 | arg_names = inspect.getfullargspec(f)[0] 23 | if arg_names[0] == 'self' and DISPLAY_LESS_PROGRESS: 24 | return result 25 | elif arg_names[0] == 'self': 26 | method_name = type(args[0]).__name__ + '.' + f.__name__ 27 | else: 28 | method_name = f.__name__ 29 | 30 | # Record accumulative time in each function for analysis 31 | if method_name in timer_dict.keys(): 32 | timer_dict[method_name] += tt 33 | else: 34 | timer_dict[method_name] = tt 35 | 36 | # If code is finished, display timing summary 37 | if method_name == "Evaluator.evaluate": 38 | print("") 39 | print("Timing analysis:") 40 | for key, value in timer_dict.items(): 41 | print('%-70s %2.4f sec' % (key, value)) 42 | else: 43 | # Get function argument values for printing special arguments of interest 44 | arg_titles = ['tracker', 'seq', 'cls'] 45 | arg_vals = [] 46 | for i, a in enumerate(arg_names): 47 | if a in arg_titles: 48 | arg_vals.append(args[i]) 49 | arg_text = '(' + ', '.join(arg_vals) + ')' 50 | 51 | # Display methods and functions with different indentation. 52 | if arg_names[0] == 'self': 53 | print('%-74s %2.4f sec' % (' '*4 + method_name + arg_text, tt)) 54 | elif arg_names[0] == 'test': 55 | pass 56 | else: 57 | global counter 58 | counter += 1 59 | print('%i %-70s %2.4f sec' % (counter, method_name + arg_text, tt)) 60 | 61 | return result 62 | else: 63 | # If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing. 64 | return f(*args, **kw) 65 | return wrap 66 | -------------------------------------------------------------------------------- /TrackEval/trackeval/baselines/__init__.py: -------------------------------------------------------------------------------- 1 | import baseline_utils 2 | import stp 3 | import non_overlap 4 | import pascal_colormap 5 | import thresholder 6 | import vizualize -------------------------------------------------------------------------------- /TrackEval/trackeval/baselines/non_overlap.py: -------------------------------------------------------------------------------- 1 | """ 2 | Non-Overlap: Code to take in a set of raw detections and produce a set of non-overlapping detections from it. 3 | 4 | Author: Jonathon Luiten 5 | """ 6 | 7 | import os 8 | import sys 9 | from multiprocessing.pool import Pool 10 | from multiprocessing import freeze_support 11 | 12 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) 13 | from trackeval.baselines import baseline_utils as butils 14 | from trackeval.utils import get_code_path 15 | 16 | code_path = get_code_path() 17 | config = { 18 | 'INPUT_FOL': os.path.join(code_path, 'data/detections/rob_mots/{split}/raw_supplied/data/'), 19 | 'OUTPUT_FOL': os.path.join(code_path, 'data/detections/rob_mots/{split}/non_overlap_supplied/data/'), 20 | 'SPLIT': 'train', # valid: 'train', 'val', 'test'. 21 | 'Benchmarks': None, # If None, all benchmarks in SPLIT. 22 | 23 | 'Num_Parallel_Cores': None, # If None, run without parallel. 24 | 25 | 'THRESHOLD_NMS_MASK_IOU': 0.5, 26 | } 27 | 28 | 29 | def do_sequence(seq_file): 30 | 31 | # Load input data from file (e.g. provided detections) 32 | # data format: data['cls'][t] = {'ids', 'scores', 'im_hs', 'im_ws', 'mask_rles'} 33 | data = butils.load_seq(seq_file) 34 | 35 | # Converts data from a class-separated to a class-combined format. 36 | # data[t] = {'ids', 'scores', 'im_hs', 'im_ws', 'mask_rles', 'cls'} 37 | data = butils.combine_classes(data) 38 | 39 | # Where to accumulate output data for writing out 40 | output_data = [] 41 | 42 | # Run for each timestep. 43 | for timestep, t_data in enumerate(data): 44 | 45 | # Remove redundant masks by performing non-maximum suppression (NMS) 46 | t_data = butils.mask_NMS(t_data, nms_threshold=config['THRESHOLD_NMS_MASK_IOU']) 47 | 48 | # Perform non-overlap, to get non_overlapping masks. 49 | t_data = butils.non_overlap(t_data, already_sorted=True) 50 | 51 | # Save result in output format to write to file later. 52 | # Output Format = [timestep ID class score im_h im_w mask_RLE] 53 | for i in range(len(t_data['ids'])): 54 | row = [timestep, int(t_data['ids'][i]), t_data['cls'][i], t_data['scores'][i], t_data['im_hs'][i], 55 | t_data['im_ws'][i], t_data['mask_rles'][i]] 56 | output_data.append(row) 57 | 58 | # Write results to file 59 | out_file = seq_file.replace(config['INPUT_FOL'].format(split=config['SPLIT']), 60 | config['OUTPUT_FOL'].format(split=config['SPLIT'])) 61 | butils.write_seq(output_data, out_file) 62 | 63 | print('DONE:', seq_file) 64 | 65 | 66 | if __name__ == '__main__': 67 | 68 | # Required to fix bug in multiprocessing on windows. 69 | freeze_support() 70 | 71 | # Obtain list of sequences to run tracker for. 72 | if config['Benchmarks']: 73 | benchmarks = config['Benchmarks'] 74 | else: 75 | benchmarks = ['davis_unsupervised', 'kitti_mots', 'youtube_vis', 'ovis', 'bdd_mots', 'tao'] 76 | if config['SPLIT'] != 'train': 77 | benchmarks += ['waymo', 'mots_challenge'] 78 | seqs_todo = [] 79 | for bench in benchmarks: 80 | bench_fol = os.path.join(config['INPUT_FOL'].format(split=config['SPLIT']), bench) 81 | seqs_todo += [os.path.join(bench_fol, seq) for seq in os.listdir(bench_fol)] 82 | 83 | # Run in parallel 84 | if config['Num_Parallel_Cores']: 85 | with Pool(config['Num_Parallel_Cores']) as pool: 86 | results = pool.map(do_sequence, seqs_todo) 87 | 88 | # Run in series 89 | else: 90 | for seq_todo in seqs_todo: 91 | do_sequence(seq_todo) 92 | 93 | -------------------------------------------------------------------------------- /TrackEval/trackeval/baselines/thresholder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Thresholder 3 | 4 | Author: Jonathon Luiten 5 | 6 | Simply reads in a set of detection, thresholds them at a certain score threshold, and writes them out again. 7 | """ 8 | 9 | import os 10 | import sys 11 | from multiprocessing.pool import Pool 12 | from multiprocessing import freeze_support 13 | 14 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) 15 | from trackeval.baselines import baseline_utils as butils 16 | from trackeval.utils import get_code_path 17 | 18 | THRESHOLD = 0.2 19 | 20 | code_path = get_code_path() 21 | config = { 22 | 'INPUT_FOL': os.path.join(code_path, 'data/detections/rob_mots/{split}/non_overlap_supplied/data/'), 23 | 'OUTPUT_FOL': os.path.join(code_path, 'data/detections/rob_mots/{split}/threshold_' + str(100*THRESHOLD) + '/data/'), 24 | 'SPLIT': 'train', # valid: 'train', 'val', 'test'. 25 | 'Benchmarks': None, # If None, all benchmarks in SPLIT. 26 | 27 | 'Num_Parallel_Cores': None, # If None, run without parallel. 28 | 29 | 'DETECTION_THRESHOLD': THRESHOLD, 30 | } 31 | 32 | 33 | def do_sequence(seq_file): 34 | 35 | # Load input data from file (e.g. provided detections) 36 | # data format: data['cls'][t] = {'ids', 'scores', 'im_hs', 'im_ws', 'mask_rles'} 37 | data = butils.load_seq(seq_file) 38 | 39 | # Where to accumulate output data for writing out 40 | output_data = [] 41 | 42 | # Run for each class. 43 | for cls, cls_data in data.items(): 44 | 45 | # Run for each timestep. 46 | for timestep, t_data in enumerate(cls_data): 47 | 48 | # Threshold detections. 49 | t_data = butils.threshold(t_data, config['DETECTION_THRESHOLD']) 50 | 51 | # Save result in output format to write to file later. 52 | # Output Format = [timestep ID class score im_h im_w mask_RLE] 53 | for i in range(len(t_data['ids'])): 54 | row = [timestep, int(t_data['ids'][i]), cls, t_data['scores'][i], t_data['im_hs'][i], 55 | t_data['im_ws'][i], t_data['mask_rles'][i]] 56 | output_data.append(row) 57 | 58 | # Write results to file 59 | out_file = seq_file.replace(config['INPUT_FOL'].format(split=config['SPLIT']), 60 | config['OUTPUT_FOL'].format(split=config['SPLIT'])) 61 | butils.write_seq(output_data, out_file) 62 | 63 | print('DONE:', seq_todo) 64 | 65 | 66 | if __name__ == '__main__': 67 | 68 | # Required to fix bug in multiprocessing on windows. 69 | freeze_support() 70 | 71 | # Obtain list of sequences to run tracker for. 72 | if config['Benchmarks']: 73 | benchmarks = config['Benchmarks'] 74 | else: 75 | benchmarks = ['davis_unsupervised', 'kitti_mots', 'youtube_vis', 'ovis', 'bdd_mots', 'tao'] 76 | if config['SPLIT'] != 'train': 77 | benchmarks += ['waymo', 'mots_challenge'] 78 | seqs_todo = [] 79 | for bench in benchmarks: 80 | bench_fol = os.path.join(config['INPUT_FOL'].format(split=config['SPLIT']), bench) 81 | seqs_todo += [os.path.join(bench_fol, seq) for seq in os.listdir(bench_fol)] 82 | 83 | # Run in parallel 84 | if config['Num_Parallel_Cores']: 85 | with Pool(config['Num_Parallel_Cores']) as pool: 86 | results = pool.map(do_sequence, seqs_todo) 87 | 88 | # Run in series 89 | else: 90 | for seq_todo in seqs_todo: 91 | do_sequence(seq_todo) 92 | 93 | -------------------------------------------------------------------------------- /TrackEval/trackeval/baselines/vizualize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Vizualize: Code which converts .txt rle tracking results into a visual .png format. 3 | 4 | Author: Jonathon Luiten 5 | """ 6 | 7 | import os 8 | import sys 9 | from multiprocessing.pool import Pool 10 | from multiprocessing import freeze_support 11 | 12 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) 13 | from trackeval.baselines import baseline_utils as butils 14 | from trackeval.utils import get_code_path 15 | from trackeval.datasets.rob_mots_classmap import cls_id_to_name 16 | 17 | code_path = get_code_path() 18 | config = { 19 | # Tracker format: 20 | 'INPUT_FOL': os.path.join(code_path, 'data/trackers/rob_mots/{split}/STP/data/{bench}'), 21 | 'OUTPUT_FOL': os.path.join(code_path, 'data/viz/rob_mots/{split}/STP/data/{bench}'), 22 | # GT format: 23 | # 'INPUT_FOL': os.path.join(code_path, 'data/gt/rob_mots/{split}/{bench}/data/'), 24 | # 'OUTPUT_FOL': os.path.join(code_path, 'data/gt_viz/rob_mots/{split}/{bench}/'), 25 | 'SPLIT': 'train', # valid: 'train', 'val', 'test'. 26 | 'Benchmarks': None, # If None, all benchmarks in SPLIT. 27 | 'Num_Parallel_Cores': None, # If None, run without parallel. 28 | } 29 | 30 | 31 | def do_sequence(seq_file): 32 | # Folder to save resulting visualization in 33 | out_fol = seq_file.replace(config['INPUT_FOL'].format(split=config['SPLIT'], bench=bench), 34 | config['OUTPUT_FOL'].format(split=config['SPLIT'], bench=bench)).replace('.txt', '') 35 | 36 | # Load input data from file (e.g. provided detections) 37 | # data format: data['cls'][t] = {'ids', 'scores', 'im_hs', 'im_ws', 'mask_rles'} 38 | data = butils.load_seq(seq_file) 39 | 40 | # Get frame size for visualizing empty frames 41 | im_h, im_w = butils.get_frame_size(data) 42 | 43 | # First run for each class. 44 | for cls, cls_data in data.items(): 45 | 46 | if cls >= 100: 47 | continue 48 | 49 | # Run for each timestep. 50 | for timestep, t_data in enumerate(cls_data): 51 | # Save out visualization 52 | out_file = os.path.join(out_fol, cls_id_to_name[cls], str(timestep).zfill(5) + '.png') 53 | butils.save_as_png(t_data, out_file, im_h, im_w) 54 | 55 | 56 | # Then run for all classes combined 57 | # Converts data from a class-separated to a class-combined format. 58 | data = butils.combine_classes(data) 59 | 60 | # Run for each timestep. 61 | for timestep, t_data in enumerate(data): 62 | # Save out visualization 63 | out_file = os.path.join(out_fol, 'all_classes', str(timestep).zfill(5) + '.png') 64 | butils.save_as_png(t_data, out_file, im_h, im_w) 65 | 66 | print('DONE:', seq_file) 67 | 68 | 69 | if __name__ == '__main__': 70 | 71 | # Required to fix bug in multiprocessing on windows. 72 | freeze_support() 73 | 74 | # Obtain list of sequences to run tracker for. 75 | if config['Benchmarks']: 76 | benchmarks = config['Benchmarks'] 77 | else: 78 | benchmarks = ['davis_unsupervised', 'kitti_mots', 'youtube_vis', 'ovis', 'bdd_mots', 'tao'] 79 | if config['SPLIT'] != 'train': 80 | benchmarks += ['waymo', 'mots_challenge'] 81 | seqs_todo = [] 82 | for bench in benchmarks: 83 | bench_fol = config['INPUT_FOL'].format(split=config['SPLIT'], bench=bench) 84 | seqs_todo += [os.path.join(bench_fol, seq) for seq in os.listdir(bench_fol)] 85 | 86 | # Run in parallel 87 | if config['Num_Parallel_Cores']: 88 | with Pool(config['Num_Parallel_Cores']) as pool: 89 | results = pool.map(do_sequence, seqs_todo) 90 | 91 | # Run in series 92 | else: 93 | for seq_todo in seqs_todo: 94 | do_sequence(seq_todo) 95 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .kitti_2d_box import Kitti2DBox 2 | from .kitti_mots import KittiMOTS 3 | from .mot_challenge_2d_box import MotChallenge2DBox 4 | from .mots_challenge import MOTSChallenge 5 | from .bdd100k import BDD100K 6 | from .davis import DAVIS 7 | from .tao import TAO 8 | from .tao_ow import TAO_OW 9 | try: 10 | from .burst import BURST 11 | from .burst_ow import BURST_OW 12 | except ImportError as err: 13 | print(f"Error importing BURST due to missing underlying dependency: {err}") 14 | from .youtube_vis import YouTubeVIS 15 | from .head_tracking_challenge import HeadTrackingChallenge 16 | from .rob_mots import RobMOTS 17 | from .person_path_22 import PersonPath22 18 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/_base_dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/_base_dataset.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/_base_dataset.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/_base_dataset.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/bdd100k.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/bdd100k.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/bdd100k.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/bdd100k.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/burst.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/burst.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/burst.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/burst.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/burst_ow.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/burst_ow.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/burst_ow.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/burst_ow.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/davis.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/davis.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/davis.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/davis.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/head_tracking_challenge.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/head_tracking_challenge.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/head_tracking_challenge.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/head_tracking_challenge.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/kitti_2d_box.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/kitti_2d_box.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/kitti_2d_box.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/kitti_2d_box.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/kitti_mots.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/kitti_mots.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/kitti_mots.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/kitti_mots.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/mot_challenge_2d_box.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/mot_challenge_2d_box.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/mot_challenge_2d_box.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/mot_challenge_2d_box.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/mots_challenge.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/mots_challenge.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/mots_challenge.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/mots_challenge.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/person_path_22.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/person_path_22.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/person_path_22.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/person_path_22.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/rob_mots.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/rob_mots.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/rob_mots.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/rob_mots.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/rob_mots_classmap.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/rob_mots_classmap.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/rob_mots_classmap.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/rob_mots_classmap.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/tao.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/tao.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/tao.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/tao.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/tao_ow.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/tao_ow.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/tao_ow.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/tao_ow.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/youtube_vis.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/youtube_vis.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/__pycache__/youtube_vis.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/__pycache__/youtube_vis.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst.py: -------------------------------------------------------------------------------- 1 | import os 2 | from .burst_helpers.burst_base import BURSTBase 3 | from .burst_helpers.format_converter import GroundTruthBURSTFormatToTAOFormatConverter, PredictionBURSTFormatToTAOFormatConverter 4 | from .. import utils 5 | 6 | 7 | class BURST(BURSTBase): 8 | """Dataset class for TAO tracking""" 9 | 10 | @staticmethod 11 | def get_default_dataset_config(): 12 | tao_config = BURSTBase.get_default_dataset_config() 13 | code_path = utils.get_code_path() 14 | 15 | # e.g. 'data/gt/tsunami/exemplar_guided/' 16 | tao_config['GT_FOLDER'] = os.path.join( 17 | code_path, 'data/gt/burst/val/') # Location of GT data 18 | # e.g. 'data/trackers/tsunami/exemplar_guided/mask_guided/validation/' 19 | tao_config['TRACKERS_FOLDER'] = os.path.join( 20 | code_path, 'data/trackers/burst/class-guided/') # Trackers location 21 | # set to True or False 22 | tao_config['EXEMPLAR_GUIDED'] = False 23 | return tao_config 24 | 25 | def _iou_type(self): 26 | return 'mask' 27 | 28 | def _box_or_mask_from_det(self, det): 29 | return det['segmentation'] 30 | 31 | def _calculate_area_for_ann(self, ann): 32 | import pycocotools.mask as cocomask 33 | return cocomask.area(ann["segmentation"]) 34 | 35 | def _calculate_similarities(self, gt_dets_t, tracker_dets_t): 36 | similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False) 37 | return similarity_scores 38 | 39 | def _is_exemplar_guided(self): 40 | exemplar_guided = self.config['EXEMPLAR_GUIDED'] 41 | return exemplar_guided 42 | 43 | def _postproc_ground_truth_data(self, data): 44 | return GroundTruthBURSTFormatToTAOFormatConverter(data).convert() 45 | 46 | def _postproc_prediction_data(self, data): 47 | return PredictionBURSTFormatToTAOFormatConverter( 48 | self.gt_data, data, 49 | exemplar_guided=self._is_exemplar_guided()).convert() 50 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/BURST_SPECIFIC_ISSUES.md: -------------------------------------------------------------------------------- 1 | The track ids in both ground truth and predictions are not globally unique, but 2 | start from 1 for each video. At the moment when converting from Ali format to 3 | TAO format, we remap the ids to be globally unique. It would be better to 4 | directly have this in the data though. 5 | 6 | 7 | Improve setting of EXEMPLAR_GUIDED flag, maybe this can be done automatically. 8 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__init__.py -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_base.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_base.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_base.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_ow_base.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_ow_base.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_ow_base.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/burst_ow_base.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/format_converter.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/format_converter.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/__pycache__/format_converter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/datasets/burst_helpers/__pycache__/format_converter.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_helpers/convert_burst_format_to_tao_format.py: -------------------------------------------------------------------------------- 1 | import json 2 | import argparse 3 | from .format_converter import GroundTruthBURSTFormatToTAOFormatConverter, PredictionBURSTFormatToTAOFormatConverter 4 | 5 | 6 | def main(args): 7 | with open(args.gt_input_file) as f: 8 | ali_format_gt = json.load(f) 9 | tao_format_gt = GroundTruthBURSTFormatToTAOFormatConverter( 10 | ali_format_gt, args.split).convert() 11 | with open(args.gt_output_file, 'w') as f: 12 | json.dump(tao_format_gt, f) 13 | 14 | if args.pred_input_file is None: 15 | return 16 | with open(args.pred_input_file) as f: 17 | ali_format_pred = json.load(f) 18 | tao_format_pred = PredictionBURSTFormatToTAOFormatConverter( 19 | tao_format_gt, ali_format_pred, args.split, 20 | args.exemplar_guided).convert() 21 | with open(args.pred_output_file, 'w') as f: 22 | json.dump(tao_format_pred, f) 23 | 24 | 25 | if __name__ == '__main__': 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument( 28 | '--gt_input_file', type=str, 29 | default='../data/gt/tsunami/exemplar_guided/validation_all_annotations.json') 30 | parser.add_argument('--gt_output_file', type=str, 31 | default='/tmp/val_gt.json') 32 | parser.add_argument('--pred_input_file', type=str, 33 | default='../data/trackers/tsunami/exemplar_guided/STCN_off_the_shelf/data/results.json') 34 | parser.add_argument('--pred_output_file', type=str, 35 | default='/tmp/pred.json') 36 | parser.add_argument('--split', type=str, default='validation') 37 | parser.add_argument('--exemplar_guided', type=bool, default=True) 38 | args_ = parser.parse_args() 39 | main(args_) 40 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/burst_ow.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from .burst_helpers.burst_ow_base import BURST_OW_Base 4 | from .burst_helpers.format_converter import GroundTruthBURSTFormatToTAOFormatConverter, PredictionBURSTFormatToTAOFormatConverter 5 | from .. import utils 6 | 7 | 8 | class BURST_OW(BURST_OW_Base): 9 | """Dataset class for TAO tracking""" 10 | 11 | @staticmethod 12 | def get_default_dataset_config(): 13 | tao_config = BURST_OW_Base.get_default_dataset_config() 14 | code_path = utils.get_code_path() 15 | tao_config['GT_FOLDER'] = os.path.join( 16 | code_path, 'data/gt/burst/all_classes/val/') # Location of GT data 17 | tao_config['TRACKERS_FOLDER'] = os.path.join( 18 | code_path, 'data/trackers/burst/open-world/val/') # Trackers location 19 | return tao_config 20 | 21 | def _iou_type(self): 22 | return 'mask' 23 | 24 | def _box_or_mask_from_det(self, det): 25 | if "segmentation" in det: 26 | return det["segmentation"] 27 | else: 28 | return det["mask"] 29 | 30 | def _calculate_area_for_ann(self, ann): 31 | import pycocotools.mask as cocomask 32 | seg = self._box_or_mask_from_det(ann) 33 | return cocomask.area(seg) 34 | 35 | def _calculate_similarities(self, gt_dets_t, tracker_dets_t): 36 | similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False) 37 | return similarity_scores 38 | 39 | def _postproc_ground_truth_data(self, data): 40 | return GroundTruthBURSTFormatToTAOFormatConverter(data).convert() 41 | 42 | def _postproc_prediction_data(self, data): 43 | # if it's a list, it's already in TAO format and not in Ali format 44 | # however the image ids do not match and need to be remapped 45 | if isinstance(data, list): 46 | _remap_image_ids(data, self.gt_data) 47 | return data 48 | 49 | return PredictionBURSTFormatToTAOFormatConverter( 50 | self.gt_data, data, 51 | exemplar_guided=False).convert() 52 | 53 | 54 | def _remap_image_ids(pred_data, ali_gt_data): 55 | code_path = utils.get_code_path() 56 | if 'split' in ali_gt_data: 57 | split = ali_gt_data['split'] 58 | else: 59 | split = 'val' 60 | 61 | if split in ('val', 'validation'): 62 | tao_gt_path = os.path.join( 63 | code_path, 'data/gt/tao/tao_validation/gt.json') 64 | else: 65 | tao_gt_path = os.path.join( 66 | code_path, 'data/gt/tao/tao_test/test_without_annotations.json') 67 | 68 | with open(tao_gt_path) as f: 69 | tao_gt = json.load(f) 70 | 71 | tao_img_by_id = {} 72 | for img in tao_gt['images']: 73 | img_id = img['id'] 74 | tao_img_by_id[img_id] = img 75 | 76 | ali_img_id_by_filename = {} 77 | for ali_img in ali_gt_data['images']: 78 | ali_img_id = ali_img['id'] 79 | file_name = ali_img['file_name'].replace("validation", "val") 80 | ali_img_id_by_filename[file_name] = ali_img_id 81 | 82 | ali_img_id_by_tao_img_id = {} 83 | for tao_img_id, tao_img in tao_img_by_id.items(): 84 | file_name = tao_img['file_name'] 85 | ali_img_id = ali_img_id_by_filename[file_name] 86 | ali_img_id_by_tao_img_id[tao_img_id] = ali_img_id 87 | 88 | for det in pred_data: 89 | tao_img_id = det['image_id'] 90 | ali_img_id = ali_img_id_by_tao_img_id[tao_img_id] 91 | det['image_id'] = ali_img_id 92 | -------------------------------------------------------------------------------- /TrackEval/trackeval/datasets/rob_mots_classmap.py: -------------------------------------------------------------------------------- 1 | cls_id_to_name = { 2 | 1: 'person', 3 | 2: 'bicycle', 4 | 3: 'car', 5 | 4: 'motorcycle', 6 | 5: 'airplane', 7 | 6: 'bus', 8 | 7: 'train', 9 | 8: 'truck', 10 | 9: 'boat', 11 | 10: 'traffic light', 12 | 11: 'fire hydrant', 13 | 12: 'stop sign', 14 | 13: 'parking meter', 15 | 14: 'bench', 16 | 15: 'bird', 17 | 16: 'cat', 18 | 17: 'dog', 19 | 18: 'horse', 20 | 19: 'sheep', 21 | 20: 'cow', 22 | 21: 'elephant', 23 | 22: 'bear', 24 | 23: 'zebra', 25 | 24: 'giraffe', 26 | 25: 'backpack', 27 | 26: 'umbrella', 28 | 27: 'handbag', 29 | 28: 'tie', 30 | 29: 'suitcase', 31 | 30: 'frisbee', 32 | 31: 'skis', 33 | 32: 'snowboard', 34 | 33: 'sports ball', 35 | 34: 'kite', 36 | 35: 'baseball bat', 37 | 36: 'baseball glove', 38 | 37: 'skateboard', 39 | 38: 'surfboard', 40 | 39: 'tennis racket', 41 | 40: 'bottle', 42 | 41: 'wine glass', 43 | 42: 'cup', 44 | 43: 'fork', 45 | 44: 'knife', 46 | 45: 'spoon', 47 | 46: 'bowl', 48 | 47: 'banana', 49 | 48: 'apple', 50 | 49: 'sandwich', 51 | 50: 'orange', 52 | 51: 'broccoli', 53 | 52: 'carrot', 54 | 53: 'hot dog', 55 | 54: 'pizza', 56 | 55: 'donut', 57 | 56: 'cake', 58 | 57: 'chair', 59 | 58: 'couch', 60 | 59: 'potted plant', 61 | 60: 'bed', 62 | 61: 'dining table', 63 | 62: 'toilet', 64 | 63: 'tv', 65 | 64: 'laptop', 66 | 65: 'mouse', 67 | 66: 'remote', 68 | 67: 'keyboard', 69 | 68: 'cell phone', 70 | 69: 'microwave', 71 | 70: 'oven', 72 | 71: 'toaster', 73 | 72: 'sink', 74 | 73: 'refrigerator', 75 | 74: 'book', 76 | 75: 'clock', 77 | 76: 'vase', 78 | 77: 'scissors', 79 | 78: 'teddy bear', 80 | 79: 'hair drier', 81 | 80: 'toothbrush'} -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .hota import HOTA 2 | from .clear import CLEAR 3 | from .identity import Identity 4 | from .count import Count 5 | from .j_and_f import JAndF 6 | from .track_map import TrackMAP 7 | from .vace import VACE 8 | from .ideucl import IDEucl -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/_base_metric.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/_base_metric.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/_base_metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/_base_metric.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/clear.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/clear.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/clear.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/clear.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/count.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/count.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/count.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/count.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/hota.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/hota.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/hota.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/hota.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/identity.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/identity.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/identity.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/identity.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/ideucl.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/ideucl.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/ideucl.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/ideucl.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/j_and_f.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/j_and_f.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/j_and_f.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/j_and_f.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/track_map.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/track_map.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/track_map.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/track_map.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/vace.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/vace.cpython-37.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/__pycache__/vace.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/TrackEval/trackeval/metrics/__pycache__/vace.cpython-38.pyc -------------------------------------------------------------------------------- /TrackEval/trackeval/metrics/count.py: -------------------------------------------------------------------------------- 1 | 2 | from ._base_metric import _BaseMetric 3 | from .. import _timing 4 | 5 | 6 | class Count(_BaseMetric): 7 | """Class which simply counts the number of tracker and gt detections and ids.""" 8 | def __init__(self, config=None): 9 | super().__init__() 10 | self.integer_fields = ['Dets', 'GT_Dets', 'IDs', 'GT_IDs'] 11 | self.fields = self.integer_fields 12 | self.summary_fields = self.fields 13 | 14 | @_timing.time 15 | def eval_sequence(self, data): 16 | """Returns counts for one sequence""" 17 | # Get results 18 | res = {'Dets': data['num_tracker_dets'], 19 | 'GT_Dets': data['num_gt_dets'], 20 | 'IDs': data['num_tracker_ids'], 21 | 'GT_IDs': data['num_gt_ids'], 22 | 'Frames': data['num_timesteps']} 23 | return res 24 | 25 | def combine_sequences(self, all_res): 26 | """Combines metrics across all sequences""" 27 | res = {} 28 | for field in self.integer_fields: 29 | res[field] = self._combine_sum(all_res, field) 30 | return res 31 | 32 | def combine_classes_class_averaged(self, all_res, ignore_empty_classes=None): 33 | """Combines metrics across all classes by averaging over the class values""" 34 | res = {} 35 | for field in self.integer_fields: 36 | res[field] = self._combine_sum(all_res, field) 37 | return res 38 | 39 | def combine_classes_det_averaged(self, all_res): 40 | """Combines metrics across all classes by averaging over the detection values""" 41 | res = {} 42 | for field in self.integer_fields: 43 | res[field] = self._combine_sum(all_res, field) 44 | return res 45 | -------------------------------------------------------------------------------- /checkpoints/generaltrack_bdd.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/checkpoints/generaltrack_bdd.pth -------------------------------------------------------------------------------- /configs/BDD100K.py: -------------------------------------------------------------------------------- 1 | exp_file = 'exps/example/bdd100k/yolox_x.py' 2 | 3 | # tracking args 4 | track_thresh = 0.35 5 | det_thresh = 0.45 6 | track_buffer = 50 7 | match_thresh = 0.9 8 | min_box_area = 100 9 | byte = True 10 | 11 | # relation args 12 | resize = [720, 1280] 13 | restore_ckpt = 'checkpoints/generaltrack_bdd.pth' 14 | roialign_size = (2, 2) 15 | corr_radius = 4 16 | corr_levels = 4 -------------------------------------------------------------------------------- /configs/__pycache__/config_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/configs/__pycache__/config_utils.cpython-38.pyc -------------------------------------------------------------------------------- /core/Point2InstanceRelation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import time 4 | 5 | from core.update import Hierarchical_Relation_Aggregation 6 | from core.extractor import Feature_Extractor 7 | from core.corr import CorrBlock 8 | from core.utils import coords_grid 9 | 10 | try: 11 | autocast = torch.cuda.amp.autocast 12 | except: 13 | class autocast: 14 | def __init__(self, enabled): 15 | pass 16 | 17 | def __enter__(self): 18 | pass 19 | 20 | def __exit__(self, *args): 21 | pass 22 | 23 | class Point2InstanceRelation(nn.Module): 24 | def __init__(self, args): 25 | super(Point2InstanceRelation, self).__init__() 26 | self.args = args 27 | self.hidden_dim = hdim = 128 28 | 29 | self.fnet = Feature_Extractor(output_dim=256, dropout=args.dropout) 30 | self.predict_block_similarity = Hierarchical_Relation_Aggregation(self.args) 31 | 32 | def freeze_bn(self): 33 | for m in self.modules(): 34 | if isinstance(m, nn.BatchNorm2d): 35 | m.eval() 36 | 37 | def init(self, img): 38 | N, C, H, W = img.shape 39 | coords = coords_grid(N, H // 8, W // 8, device=img.device) 40 | 41 | return coords 42 | 43 | def forward(self, image1, image2, detection1, detection2): 44 | time2 = time.time() 45 | image1 = (2 * (image1 / 255.0) - 1.0).contiguous() # rgb归一化,不共享内存 46 | image2 = (2 * (image2 / 255.0) - 1.0).contiguous() 47 | detection1.detach() 48 | detection2.detach() 49 | detection1_ = (detection1.clone()).int().detach() 50 | detection1_[:, :, 2:4] = torch.max(torch.div(detection1_[:, :, 2:4] , 8), torch.tensor([1]).cuda()) 51 | detection1_[:, :, 4:6] = torch.max(torch.div(detection1_[:, :, 4:6] , 8), torch.tensor([1]).cuda()) 52 | 53 | 54 | # Feature Extraction 55 | with autocast(enabled=self.args.mixed_precision): 56 | fmap1, fmap2 = self.fnet([image1, image2]) 57 | time3 = time.time() 58 | 59 | # Feature Relation Extraction & Multi-scale 60 | corr_fn = CorrBlock(fmap1.float(), fmap2.float(), num_levels=self.args.corr_levels, radius=self.args.corr_radius) 61 | 62 | # Point-region Relation 63 | corr = corr_fn(self.init(image1).detach()) 64 | 65 | # Hierarchical Relation Aggregation 66 | time4 = time.time() 67 | with autocast(enabled=self.args.mixed_precision): 68 | classification_score, label_gt = self.predict_block_similarity(detection1_, corr, detection1, detection2) 69 | 70 | time5 = time.time() 71 | t = [time2, time3, time4, time5] 72 | return classification_score, label_gt 73 | -------------------------------------------------------------------------------- /core/__pycache__/Point2InstanceRelation.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/Point2InstanceRelation.cpython-38.pyc -------------------------------------------------------------------------------- /core/__pycache__/corr.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/corr.cpython-38.pyc -------------------------------------------------------------------------------- /core/__pycache__/extractor.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/extractor.cpython-38.pyc -------------------------------------------------------------------------------- /core/__pycache__/raft.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/raft.cpython-38.pyc -------------------------------------------------------------------------------- /core/__pycache__/update.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/update.cpython-38.pyc -------------------------------------------------------------------------------- /core/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/core/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /core/corr.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from core.utils import bilinear_sampler 4 | 5 | try: 6 | import alt_cuda_corr 7 | except: 8 | pass 9 | 10 | 11 | class CorrBlock: 12 | def __init__(self, fmap1, fmap2, num_levels=4, radius=4): 13 | self.num_levels = num_levels 14 | self.radius = radius 15 | self.corr_pyramid = [] 16 | 17 | # Feature Relation Extractor 18 | corr = CorrBlock.corr(fmap1, fmap2) 19 | 20 | batch, h1, w1, dim, h2, w2 = corr.shape 21 | corr = corr.reshape(batch*h1*w1, dim, h2, w2) 22 | 23 | # Multi-scale Relation 24 | self.corr_pyramid.append(corr) 25 | for i in range(self.num_levels-1): 26 | corr = F.avg_pool2d(corr, 2, stride=2) 27 | self.corr_pyramid.append(corr) 28 | 29 | def __call__(self, coords): 30 | # Point-region Relation 31 | r = self.radius 32 | coords = coords.permute(0, 2, 3, 1) 33 | batch, h1, w1, _ = coords.shape 34 | 35 | out_pyramid = [] 36 | for i in range(self.num_levels): 37 | corr = self.corr_pyramid[i] 38 | dx = torch.linspace(-r, r, 2*r+1, device=coords.device) 39 | dy = torch.linspace(-r, r, 2*r+1, device=coords.device) 40 | delta = torch.stack(torch.meshgrid(dy, dx), axis=-1) 41 | 42 | centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i 43 | delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) 44 | coords_lvl = centroid_lvl + delta_lvl 45 | 46 | corr = bilinear_sampler(corr, coords_lvl) 47 | corr = corr.view(batch, h1, w1, -1) 48 | out_pyramid.append(corr) 49 | 50 | out = torch.cat(out_pyramid, dim=-1) 51 | return out.permute(0, 3, 1, 2).contiguous().float() 52 | 53 | @staticmethod 54 | def corr(fmap1, fmap2): 55 | batch, dim, ht, wd = fmap1.shape 56 | fmap1 = fmap1.view(batch, dim, ht*wd) 57 | fmap2 = fmap2.view(batch, dim, ht*wd) 58 | 59 | corr = torch.matmul(fmap1.transpose(1,2), fmap2) 60 | corr = corr.view(batch, ht, wd, 1, ht, wd) 61 | return corr / torch.sqrt(torch.tensor(dim).float()) 62 | -------------------------------------------------------------------------------- /core/extractor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class ResidualBlock(nn.Module): 6 | def __init__(self, in_planes, planes, norm_fn='group', stride=1): 7 | super(ResidualBlock, self).__init__() 8 | 9 | self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) 10 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) 11 | self.relu = nn.ReLU(inplace=True) 12 | 13 | num_groups = planes // 8 14 | 15 | if norm_fn == 'group': 16 | self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) 17 | self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) 18 | if not stride == 1: 19 | self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) 20 | 21 | elif norm_fn == 'batch': 22 | self.norm1 = nn.BatchNorm2d(planes) 23 | self.norm2 = nn.BatchNorm2d(planes) 24 | if not stride == 1: 25 | self.norm3 = nn.BatchNorm2d(planes) 26 | 27 | elif norm_fn == 'instance': 28 | self.norm1 = nn.InstanceNorm2d(planes) 29 | self.norm2 = nn.InstanceNorm2d(planes) 30 | if not stride == 1: 31 | self.norm3 = nn.InstanceNorm2d(planes) 32 | 33 | elif norm_fn == 'none': 34 | self.norm1 = nn.Sequential() 35 | self.norm2 = nn.Sequential() 36 | if not stride == 1: 37 | self.norm3 = nn.Sequential() 38 | 39 | if stride == 1: 40 | self.downsample = None 41 | 42 | else: 43 | self.downsample = nn.Sequential( 44 | nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) 45 | 46 | 47 | def forward(self, x): 48 | y = x 49 | y = self.relu(self.norm1(self.conv1(y))) 50 | y = self.relu(self.norm2(self.conv2(y))) 51 | 52 | if self.downsample is not None: 53 | x = self.downsample(x) 54 | 55 | return self.relu(x+y) 56 | 57 | class Feature_Extractor(nn.Module): 58 | def __init__(self, output_dim=128, dropout=0.0): 59 | super(Feature_Extractor, self).__init__() 60 | self.norm_fn = 'instance' 61 | self.norm1 = nn.InstanceNorm2d(64) 62 | 63 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) 64 | self.relu1 = nn.ReLU(inplace=True) 65 | 66 | self.in_planes = 64 67 | self.layer1 = self._make_layer(64, stride=1) 68 | self.layer2 = self._make_layer(96, stride=2) 69 | self.layer3 = self._make_layer(128, stride=2) 70 | 71 | # output convolution 72 | self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) 73 | 74 | self.dropout = None 75 | if dropout > 0: 76 | self.dropout = nn.Dropout2d(p=dropout) 77 | 78 | for m in self.modules(): 79 | if isinstance(m, nn.Conv2d): 80 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 81 | elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): 82 | if m.weight is not None: 83 | nn.init.constant_(m.weight, 1) 84 | if m.bias is not None: 85 | nn.init.constant_(m.bias, 0) 86 | 87 | def _make_layer(self, dim, stride=1): 88 | layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) 89 | layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) 90 | layers = (layer1, layer2) 91 | 92 | self.in_planes = dim 93 | return nn.Sequential(*layers) 94 | 95 | 96 | def forward(self, x): 97 | 98 | # if input is list, combine batch dimension 99 | is_list = isinstance(x, tuple) or isinstance(x, list) 100 | if is_list: 101 | batch_dim = x[0].shape[0] 102 | x = torch.cat(x, dim=0) 103 | 104 | x = self.conv1(x) 105 | x = self.norm1(x) 106 | x = self.relu1(x) 107 | 108 | x = self.layer1(x) 109 | x = self.layer2(x) 110 | x = self.layer3(x) 111 | 112 | x = self.conv2(x) 113 | 114 | if self.training and self.dropout is not None: 115 | x = self.dropout(x) 116 | 117 | if is_list: 118 | x = torch.split(x, [batch_dim, batch_dim], dim=0) 119 | 120 | return x -------------------------------------------------------------------------------- /core/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | from scipy import interpolate 5 | 6 | 7 | def bilinear_sampler(img, coords, mode='bilinear', mask=False): 8 | H, W = img.shape[-2:] 9 | xgrid, ygrid = coords.split([1, 1], dim=-1) 10 | xgrid = 2 * xgrid / (W - 1) - 1 11 | ygrid = 2 * ygrid / (H - 1) - 1 12 | 13 | grid = torch.cat([xgrid, ygrid], dim=-1) 14 | img = F.grid_sample(img, grid, align_corners=True) 15 | 16 | if mask: 17 | mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) 18 | return img, mask.float() 19 | 20 | return img 21 | 22 | def coords_grid(batch, ht, wd, device): 23 | coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) 24 | coords = torch.stack(coords[::-1], dim=0).float() 25 | return coords[None].repeat(batch, 1, 1, 1) 26 | 27 | def position_encoding(d, output_size, args): 28 | xyxy1 = d[:, :, :, 2:6] 29 | xyxy2 = d[:, :, :, 10:14] 30 | offset = d[:, :, :, 6:8] 31 | 32 | xy1_list = torch.zeros(d.shape[0], d.shape[1], d.shape[2], 4, output_size[0], output_size[1]) # 偏移量就按照xy的顺序 33 | xy2_list = torch.zeros(d.shape[0], d.shape[1], d.shape[2], 4, output_size[0], output_size[1]) 34 | 35 | w1 = (xyxy1[:, :, :,2] - xyxy1[:, :, :,0])/(output_size[1]*2) 36 | h1 = (xyxy1[:, :, :,3] - xyxy1[:, :, :,1])/(output_size[0]*2) 37 | w2 = (xyxy2[:, :, :,2] - xyxy2[:, :, :,0])/(output_size[1]*2) 38 | h2 = (xyxy2[:, :, :,3] - xyxy2[:, :, :,1])/(output_size[0]*2) 39 | 40 | for i in range(output_size[0]): # 高 41 | for j in range(output_size[1]): # 宽 42 | x1 = xyxy1[:, :, :,0] + w1*(j*2+1) 43 | y1 = xyxy1[:, :, :,1] + h1*(i*2+1) 44 | x2 = xyxy2[:, :, :,0] + w2 * (j * 2 + 1) 45 | y2 = xyxy2[:, :, :,1] + h2 * (i * 2 + 1) 46 | 47 | xy1_list[:, :, :, 0, i, j] = x1 48 | xy1_list[:, :, :, 1, i, j] = y1 49 | xy1_list[:, :, :, 2, i, j] = 0 50 | xy1_list[:, :, :, 3, i, j] = 0 51 | xy2_list[:, :, :, 0, i, j] = x2 52 | xy2_list[:, :, :, 1, i, j] = y2 53 | xy2_list[:, :, :, 2, i, j] = offset[:, :, :,0] 54 | xy2_list[:, :, :, 3, i, j] = offset[:, :, :,1] 55 | 56 | 57 | pe = xy2_list - xy1_list 58 | return pe.cuda() -------------------------------------------------------------------------------- /exps/example/bdd100k/__pycache__/yolox_x.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/exps/example/bdd100k/__pycache__/yolox_x.cpython-38.pyc -------------------------------------------------------------------------------- /exps/example/bdd100k/yolox_x.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | import os 3 | import random 4 | import torch 5 | import torch.nn as nn 6 | import torch.distributed as dist 7 | 8 | from yolox.exp import Exp as MyExp 9 | from yolox.data import get_yolox_datadir 10 | 11 | 12 | class Exp(MyExp): 13 | def __init__(self): 14 | super(Exp, self).__init__() 15 | self.num_classes = 1 16 | self.depth = 1.33 17 | self.width = 1.25 18 | self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] 19 | self.train_ann = "train.json" 20 | self.val_ann = "val.json" 21 | self.test_ann = "test.json" 22 | 23 | self.input_size = (800, 1440) 24 | self.test_size = (800, 1440) 25 | self.random_size = (18, 32) 26 | self.max_epoch = 8 27 | self.print_interval = 20 28 | self.eval_interval = 5 29 | self.test_conf = 0.1 30 | self.nmsthre = 0.7 31 | self.no_aug_epochs = 1 32 | self.basic_lr_per_img = 0.001 / 64.0 33 | self.warmup_epochs = 1 34 | 35 | def get_eval_loader(self, args_, batch_size, is_distributed, testdev=False): 36 | from yolox.data import ValTransform 37 | from yolox.data.datasets.mot_bdd import MOTDataset 38 | 39 | if testdev: 40 | valdataset = MOTDataset( 41 | data_dir='/datasets/bdd100k/images/track', 42 | json_file=self.test_ann, 43 | img_size=self.test_size, 44 | name='test', 45 | preproc=ValTransform( 46 | rgb_means=(0.485, 0.456, 0.406), 47 | std=(0.229, 0.224, 0.225), 48 | ), 49 | resize=args_.resize 50 | ) 51 | else: 52 | valdataset = MOTDataset( 53 | data_dir='/datasets/bdd100k/images/track', 54 | json_file=self.val_ann, 55 | img_size=self.test_size, 56 | name='val', 57 | preproc=ValTransform( 58 | rgb_means=(0.485, 0.456, 0.406), 59 | std=(0.229, 0.224, 0.225), 60 | ), 61 | resize=args_.resize 62 | ) 63 | 64 | if is_distributed: 65 | batch_size = batch_size // dist.get_world_size() 66 | sampler = torch.utils.data.distributed.DistributedSampler( 67 | valdataset, shuffle=False 68 | ) 69 | else: 70 | sampler = torch.utils.data.SequentialSampler(valdataset) 71 | 72 | dataloader_kwargs = { 73 | "num_workers": self.data_num_workers, 74 | "pin_memory": True, 75 | "sampler": sampler, 76 | } 77 | dataloader_kwargs["batch_size"] = batch_size 78 | val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) 79 | 80 | return val_loader 81 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # TODO: Update with exact module version 2 | numpy==1.23.4 3 | opencv_python 4 | loguru 5 | scikit-image 6 | tqdm 7 | Pillow 8 | thop 9 | ninja 10 | tabulate 11 | tensorboard 12 | lap 13 | motmetrics 14 | filterpy 15 | h5py 16 | 17 | 18 | # verified versions 19 | onnx==1.8.1 20 | onnxruntime==1.8.0 21 | onnx-simplifier==0.3.5 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved 3 | 4 | import re 5 | import setuptools 6 | import glob 7 | from os import path 8 | import torch 9 | from torch.utils.cpp_extension import CppExtension 10 | 11 | torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] 12 | assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3" 13 | 14 | 15 | def get_extensions(): 16 | this_dir = path.dirname(path.abspath(__file__)) 17 | extensions_dir = path.join(this_dir, "yolox", "layers", "csrc") 18 | 19 | main_source = path.join(extensions_dir, "vision.cpp") 20 | sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) 21 | 22 | sources = [main_source] + sources 23 | extension = CppExtension 24 | 25 | extra_compile_args = {"cxx": ["-O3"]} 26 | define_macros = [] 27 | 28 | include_dirs = [extensions_dir] 29 | 30 | ext_modules = [ 31 | extension( 32 | "yolox._C", 33 | sources, 34 | include_dirs=include_dirs, 35 | define_macros=define_macros, 36 | extra_compile_args=extra_compile_args, 37 | ) 38 | ] 39 | 40 | return ext_modules 41 | 42 | 43 | with open("yolox/__init__.py", "r") as f: 44 | version = re.search( 45 | r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', 46 | f.read(), re.MULTILINE 47 | ).group(1) 48 | 49 | 50 | with open("README.md", "r") as f: 51 | long_description = f.read() 52 | 53 | 54 | setuptools.setup( 55 | name="yolox", 56 | version=version, 57 | author="basedet team", 58 | python_requires=">=3.6", 59 | long_description=long_description, 60 | ext_modules=get_extensions(), 61 | classifiers=["Programming Language :: Python :: 3", "Operating System :: OS Independent"], 62 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, 63 | packages=setuptools.find_namespace_packages(), 64 | ) 65 | -------------------------------------------------------------------------------- /tools/convert_bdd100k_to_coco.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import cv2 5 | 6 | 7 | # Use the same script for MOT16 8 | DATA_PATH = 'datasets/detections_GHOST/bdd100k' 9 | image_PATH = 'datasets/bdd100k/images/track' 10 | OUT_PATH = os.path.join('datasets/bdd100k/images/track', 'annotations') 11 | 12 | # SPLITS = ['train', 'val', 'test'] # --> split training data to train_half and val_half. 13 | SPLITS = ['val'] 14 | HALF_VIDEO = True 15 | CREATE_SPLITTED_ANN = True 16 | CREATE_SPLITTED_DET = True 17 | 18 | class NpEncoder(json.JSONEncoder): 19 | def default(self, obj): 20 | if isinstance(obj, np.integer): 21 | return int(obj) 22 | elif isinstance(obj, np.floating): 23 | return float(obj) 24 | elif isinstance(obj, np.ndarray): 25 | return obj.tolist() 26 | else: 27 | return super(NpEncoder, self).default(obj) 28 | 29 | 30 | if __name__ == '__main__': 31 | 32 | if not os.path.exists(OUT_PATH): 33 | os.makedirs(OUT_PATH) 34 | 35 | for split in SPLITS: 36 | data_path = os.path.join(DATA_PATH, split) 37 | out_path = os.path.join(OUT_PATH, '{}.json'.format(split)) 38 | out = {'images': [], 'annotations': [], 'videos': [], 39 | 'categories': [{'id': 1, 'name': 'pedestrian'}]} 40 | seqs = os.listdir(data_path) 41 | image_cnt = 0 42 | ann_cnt = 0 43 | video_cnt = 0 44 | tid_curr = 0 45 | tid_last = -1 46 | for seq in sorted(seqs): 47 | video_cnt += 1 # video sequence number. 48 | # if video_cnt ==2: break 49 | out['videos'].append({'id': video_cnt, 'file_name': seq}) 50 | seq_path = os.path.join(image_PATH, split,seq) 51 | images = sorted(os.listdir(seq_path)) 52 | num_images = len([image for image in images if 'jpg' in image]) 53 | 54 | # detection文件 55 | det_path = os.path.join('datasets/detections_GHOST/bdd100k', split, seq, 'det/yolox_dets.txt') 56 | dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',') 57 | frame_index = dets[:, 0] 58 | dets = dets[:, 2:8] 59 | dets[:, 2:4] += dets[:, :2] 60 | 61 | 62 | 63 | 64 | for i in range(num_images): 65 | img = cv2.imread(os.path.join(image_PATH, split,'{}/{}'.format(seq, images[i]))) 66 | height, width = img.shape[:2] 67 | # detetcion结果存进json文件里,output的格式:(n,6)(x1y1x2y2,score,cls) 68 | index = frame_index == (i + 1) 69 | det = dets[index] 70 | 71 | image_info = {'file_name': '{}/{}'.format(seq, images[i]), # image name. 72 | 'id': image_cnt + i + 1, # image number in the entire training set. 73 | 'frame_id': i + 1 , # image number in the video sequence, starting from 1. 74 | 'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set. 75 | 'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1, 76 | 'video_id': video_cnt, 77 | 'height': height, 'width': width, 78 | 'detection': det 79 | } 80 | 81 | out['images'].append(image_info) 82 | 83 | 84 | 85 | 86 | 87 | print('{}: {} images'.format(seq, num_images)) 88 | image_cnt += num_images 89 | print(tid_curr, tid_last) 90 | 91 | 92 | 93 | 94 | print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations']))) 95 | json.dump(out, open(out_path, 'w'), cls=NpEncoder) -------------------------------------------------------------------------------- /tools/txt2json_down.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import cv2 5 | 6 | downsample_factor = 4 7 | 8 | # Use the same script for MOT16 9 | DATA_PATH = '.../track_results_down4' 10 | vedio_PATH = '/bdd100k/images/track/val' 11 | OUT_PATH = DATA_PATH + 'jsonclsfusion1' 12 | 13 | BDD_NAME_MAPPING = { 14 | 1: "pedestrian", 2: "rider", 3: "car", 4: "truck", 15 | 5: "bus", 6: "train", 7: "motorcycle", 8: "bicycle"} 16 | 17 | if __name__ == '__main__': 18 | if not os.path.exists(OUT_PATH): 19 | os.makedirs(OUT_PATH) 20 | 21 | seqs = os.listdir(DATA_PATH) 22 | for seq in sorted(seqs): 23 | txt_path = os.path.join(DATA_PATH, seq) 24 | dets = np.loadtxt(txt_path, dtype=np.float32, delimiter=',').astype(float) 25 | 26 | # 得到帧数,循环每一帧 27 | video_name = seq.split('.')[0] 28 | img_path = os.path.join(vedio_PATH, video_name) 29 | imgs_name = sorted(os.listdir(img_path)) 30 | 31 | if (len(imgs_name) % int(downsample_factor)) == 0: 32 | frame_num = int(len(imgs_name) / int(downsample_factor)) 33 | else: 34 | frame_num = int(len(imgs_name) / int(downsample_factor)) +1 35 | 36 | # 在这里处理所有的track_id 37 | track_id = dets[:,1] 38 | t_id = list(set(list(track_id.tolist()))) 39 | for id in t_id: 40 | # 这个id所有的ann 41 | a = track_id == id 42 | ann = dets[a] 43 | c = ann[:, 7] 44 | c = np.array(list(c.tolist()), dtype=int) 45 | count = np.bincount(c) 46 | c_ = np.argmax(count) 47 | if c.max() != c_ or c.min()!= c_: 48 | print('s') 49 | dets[a, 7] = c_ 50 | 51 | out = [] 52 | for i in range(frame_num): 53 | index = dets[:,0] == (i + 1) 54 | det = dets[index] 55 | labels = [] 56 | for j in range(det.shape[0]): 57 | box2d_tlwh = det[j][2:6] 58 | box2d_tlwh[2:4] += box2d_tlwh[:2] 59 | 60 | box2d = {'x1': box2d_tlwh[0], # video_name 61 | 'y1': box2d_tlwh[1], # imgs_name 62 | 'x2': box2d_tlwh[2], # image number in the video sequence, starting from 0. 63 | 'y2': box2d_tlwh[3] 64 | } 65 | # 每一个object一个字典 66 | a = int(det[j][7]) 67 | object_info = {'category': BDD_NAME_MAPPING[1+int(det[j][7])], # video_name 68 | 'id': int(det[j][1]), # imgs_name 69 | 'box2d': box2d, # image number in the video sequence, starting from 0. 70 | 'score': det[j][6] 71 | } 72 | labels.append(object_info) 73 | 74 | # det中的信息存一个字典 75 | image_info = {'video_name': video_name, # video_name 76 | 'name': video_name + '/' + imgs_name[i*int(downsample_factor)], # imgs_name 77 | 'index': i, # image number in the video sequence, starting from 0. 78 | 'labels': labels 79 | } 80 | # image_info = { 81 | # 'name': video_name + '/' + imgs_name[i], # imgs_name 82 | # 'labels': labels 83 | # } 84 | 85 | 86 | 87 | out.append(image_info) 88 | b = os.path.join(OUT_PATH, video_name+'.json') 89 | json.dump(out, open(os.path.join(OUT_PATH, video_name+'.json'), 'w')) -------------------------------------------------------------------------------- /tools/txt2json_trackeval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import cv2 5 | 6 | 7 | # Use the same script for MOT16 8 | DATA_PATH = 'YOLOX_outputs/yolox_x/BDD100K/track_results' 9 | vedio_PATH = 'datasets/bdd100k/images/track/val' 10 | OUT_PATH = DATA_PATH + 'jsonclsfusion1' 11 | 12 | BDD_NAME_MAPPING = { 13 | 1: "pedestrian", 2: "rider", 3: "car", 4: "truck", 14 | 5: "bus", 6: "train", 7: "motorcycle", 8: "bicycle"} 15 | 16 | if __name__ == '__main__': 17 | if not os.path.exists(OUT_PATH): 18 | os.makedirs(OUT_PATH) 19 | 20 | seqs = os.listdir(DATA_PATH) 21 | for seq in sorted(seqs): 22 | txt_path = os.path.join(DATA_PATH, seq) 23 | dets = np.loadtxt(txt_path, dtype=np.float32, delimiter=',').astype(float) 24 | 25 | # 得到帧数,循环每一帧 26 | video_name = seq.split('.')[0] 27 | img_path = os.path.join(vedio_PATH, video_name) 28 | imgs_name = sorted(os.listdir(img_path)) 29 | 30 | frame_num = len(imgs_name) 31 | 32 | # 在这里处理所有的track_id 33 | track_id = dets[:,1] 34 | t_id = list(set(list(track_id.tolist()))) 35 | for id in t_id: 36 | # 这个id所有的ann 37 | a = track_id == id 38 | ann = dets[a] 39 | c = ann[:, 7] 40 | c = np.array(list(c.tolist()), dtype=int) 41 | count = np.bincount(c) 42 | c_ = np.argmax(count) 43 | if c.max() != c_ or c.min()!= c_: 44 | print('s') 45 | dets[a, 7] = c_ 46 | 47 | out = [] 48 | for i in range(frame_num): 49 | index = dets[:,0] == (i + 1) 50 | det = dets[index] 51 | labels = [] 52 | for j in range(det.shape[0]): 53 | box2d_tlwh = det[j][2:6] 54 | box2d_tlwh[2:4] += box2d_tlwh[:2] 55 | 56 | box2d = {'x1': box2d_tlwh[0], # video_name 57 | 'y1': box2d_tlwh[1], # imgs_name 58 | 'x2': box2d_tlwh[2], # image number in the video sequence, starting from 0. 59 | 'y2': box2d_tlwh[3] 60 | } 61 | # 每一个object一个字典 62 | a = int(det[j][7]) 63 | object_info = {'category': BDD_NAME_MAPPING[1+int(det[j][7])], # video_name 64 | 'id': int(det[j][1]), # imgs_name 65 | 'box2d': box2d, # image number in the video sequence, starting from 0. 66 | 'score': det[j][6] 67 | } 68 | labels.append(object_info) 69 | 70 | image_info = {'video_name': video_name, # video_name 71 | 'name': video_name + '/' + imgs_name[i], # imgs_name 72 | 'index': i, # image number in the video sequence, starting from 0. 73 | 'labels': labels 74 | } 75 | 76 | 77 | 78 | out.append(image_info) 79 | b = os.path.join(OUT_PATH, video_name+'.json') 80 | json.dump(out, open(os.path.join(OUT_PATH, video_name+'.json'), 'w')) -------------------------------------------------------------------------------- /tools/txt2json_web.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import cv2 5 | 6 | 7 | DATA_PATH = 'YOLOX_outputs/yolox_x/BDD100K/track_resultstest' 8 | vedio_PATH = 'datasets/bdd100k/images/track/test' 9 | OUT_PATH = DATA_PATH + '_json' 10 | 11 | BDD_NAME_MAPPING = { 12 | 1: "pedestrian", 2: "rider", 3: "car", 4: "truck", 13 | 5: "bus", 6: "train", 7: "motorcycle", 8: "bicycle"} 14 | 15 | if __name__ == '__main__': 16 | if not os.path.exists(OUT_PATH): 17 | os.makedirs(OUT_PATH) 18 | 19 | seqs = os.listdir(DATA_PATH) 20 | for seq in sorted(seqs): 21 | txt_path = os.path.join(DATA_PATH, seq) 22 | dets = np.loadtxt(txt_path, dtype=np.float32, delimiter=',').astype(float) 23 | 24 | # 得到帧数,循环每一帧 25 | video_name = seq.split('.')[0] 26 | img_path = os.path.join(vedio_PATH, video_name) 27 | imgs_name = sorted(os.listdir(img_path)) 28 | 29 | frame_num = len(imgs_name) 30 | 31 | # 在这里处理所有的track_id 32 | track_id = dets[:,1] 33 | t_id = list(set(list(track_id.tolist()))) 34 | for id in t_id: 35 | # 这个id所有的ann 36 | a = track_id == id 37 | ann = dets[a] 38 | c = ann[:, 7] 39 | c = np.array(list(c.tolist()), dtype=int) 40 | count = np.bincount(c) 41 | c_ = np.argmax(count) 42 | if c.max() != c_ or c.min()!= c_: 43 | print('s') 44 | dets[a, 7] = c_ 45 | 46 | out = [] 47 | for i in range(frame_num): 48 | index = dets[:,0] == (i + 1) 49 | det = dets[index] 50 | labels = [] 51 | for j in range(det.shape[0]): 52 | box2d_tlwh = det[j][2:6] 53 | box2d_tlwh[2:4] += box2d_tlwh[:2] 54 | 55 | box2d = {'x1': box2d_tlwh[0], # video_name 56 | 'y1': box2d_tlwh[1], # imgs_name 57 | 'x2': box2d_tlwh[2], # image number in the video sequence, starting from 0. 58 | 'y2': box2d_tlwh[3] 59 | } 60 | 61 | a = int(det[j][7]) 62 | object_info = {'category': BDD_NAME_MAPPING[1+int(det[j][7])], # video_name 63 | 'id': int(det[j][1]), # imgs_name 64 | 'box2d': box2d, # image number in the video sequence, starting from 0. 65 | 'score': det[j][6] 66 | } 67 | labels.append(object_info) 68 | 69 | image_info = { 70 | 'name': video_name + '/' + imgs_name[i], # imgs_name 71 | 'labels': labels 72 | } 73 | 74 | 75 | 76 | out.append(image_info) 77 | b = os.path.join(OUT_PATH, video_name+'.json') 78 | json.dump(out, open(os.path.join(OUT_PATH, video_name+'.json'), 'w')) -------------------------------------------------------------------------------- /yolox/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | 4 | from .utils import configure_module 5 | 6 | configure_module() 7 | 8 | __version__ = "0.1.0" 9 | -------------------------------------------------------------------------------- /yolox/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/core/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | from .launch import launch 6 | from .trainer import Trainer 7 | -------------------------------------------------------------------------------- /yolox/core/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/core/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/core/__pycache__/launch.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/core/__pycache__/launch.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/core/__pycache__/trainer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/core/__pycache__/trainer.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | from .data_augment import TrainTransform, ValTransform 6 | from .data_prefetcher import DataPrefetcher 7 | from .dataloading import DataLoader, get_yolox_datadir 8 | from .datasets import * 9 | from .samplers import InfiniteSampler, YoloBatchSampler 10 | -------------------------------------------------------------------------------- /yolox/data/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/__pycache__/data_augment.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/__pycache__/data_augment.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/__pycache__/data_prefetcher.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/__pycache__/data_prefetcher.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/__pycache__/dataloading.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/__pycache__/dataloading.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/__pycache__/samplers.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/__pycache__/samplers.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/data_prefetcher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | import torch 6 | import torch.distributed as dist 7 | 8 | from yolox.utils import synchronize 9 | 10 | import random 11 | 12 | 13 | class DataPrefetcher: 14 | """ 15 | DataPrefetcher is inspired by code of following file: 16 | https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py 17 | It could speedup your pytorch dataloader. For more information, please check 18 | https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789. 19 | """ 20 | 21 | def __init__(self, loader): 22 | self.loader = iter(loader) 23 | self.stream = torch.cuda.Stream() 24 | self.input_cuda = self._input_cuda_for_image 25 | self.record_stream = DataPrefetcher._record_stream_for_image 26 | self.preload() 27 | 28 | def preload(self): 29 | try: 30 | self.next_input, self.next_target, _, _ = next(self.loader) 31 | except StopIteration: 32 | self.next_input = None 33 | self.next_target = None 34 | return 35 | 36 | with torch.cuda.stream(self.stream): 37 | self.input_cuda() 38 | self.next_target = self.next_target.cuda(non_blocking=True) 39 | 40 | def next(self): 41 | torch.cuda.current_stream().wait_stream(self.stream) 42 | input = self.next_input 43 | target = self.next_target 44 | if input is not None: 45 | self.record_stream(input) 46 | if target is not None: 47 | target.record_stream(torch.cuda.current_stream()) 48 | self.preload() 49 | return input, target 50 | 51 | def _input_cuda_for_image(self): 52 | self.next_input = self.next_input.cuda(non_blocking=True) 53 | 54 | @staticmethod 55 | def _record_stream_for_image(input): 56 | input.record_stream(torch.cuda.current_stream()) 57 | 58 | 59 | def random_resize(data_loader, exp, epoch, rank, is_distributed): 60 | tensor = torch.LongTensor(1).cuda() 61 | if is_distributed: 62 | synchronize() 63 | 64 | if rank == 0: 65 | if epoch > exp.max_epoch - 10: 66 | size = exp.input_size 67 | else: 68 | size = random.randint(*exp.random_size) 69 | size = int(32 * size) 70 | tensor.fill_(size) 71 | 72 | if is_distributed: 73 | synchronize() 74 | dist.broadcast(tensor, 0) 75 | 76 | input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None) 77 | return input_size 78 | -------------------------------------------------------------------------------- /yolox/data/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | from .datasets_wrapper import ConcatDataset, Dataset, MixConcatDataset 6 | from .mosaicdetection import MosaicDetection 7 | from .mot import MOTDataset 8 | -------------------------------------------------------------------------------- /yolox/data/datasets/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/datasets/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/datasets/__pycache__/datasets_wrapper.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/datasets/__pycache__/datasets_wrapper.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/datasets/__pycache__/mosaicdetection.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/datasets/__pycache__/mosaicdetection.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/datasets/__pycache__/mot.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/datasets/__pycache__/mot.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/datasets/__pycache__/mot_bdd.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/data/datasets/__pycache__/mot_bdd.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/data/samplers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | import torch 6 | import torch.distributed as dist 7 | from torch.utils.data.sampler import BatchSampler as torchBatchSampler 8 | from torch.utils.data.sampler import Sampler 9 | 10 | import itertools 11 | from typing import Optional 12 | 13 | 14 | class YoloBatchSampler(torchBatchSampler): 15 | """ 16 | This batch sampler will generate mini-batches of (dim, index) tuples from another sampler. 17 | It works just like the :class:`torch.utils.data.sampler.BatchSampler`, 18 | but it will prepend a dimension, whilst ensuring it stays the same across one mini-batch. 19 | """ 20 | 21 | def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs): 22 | super().__init__(*args, **kwargs) 23 | self.input_dim = input_dimension 24 | self.new_input_dim = None 25 | self.mosaic = mosaic 26 | 27 | def __iter__(self): 28 | self.__set_input_dim() 29 | for batch in super().__iter__(): 30 | yield [(self.input_dim, idx, self.mosaic) for idx in batch] 31 | self.__set_input_dim() 32 | 33 | def __set_input_dim(self): 34 | """ This function randomly changes the the input dimension of the dataset. """ 35 | if self.new_input_dim is not None: 36 | self.input_dim = (self.new_input_dim[0], self.new_input_dim[1]) 37 | self.new_input_dim = None 38 | 39 | 40 | class InfiniteSampler(Sampler): 41 | """ 42 | In training, we only care about the "infinite stream" of training data. 43 | So this sampler produces an infinite stream of indices and 44 | all workers cooperate to correctly shuffle the indices and sample different indices. 45 | The samplers in each worker effectively produces `indices[worker_id::num_workers]` 46 | where `indices` is an infinite stream of indices consisting of 47 | `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) 48 | or `range(size) + range(size) + ...` (if shuffle is False) 49 | """ 50 | 51 | def __init__( 52 | self, 53 | size: int, 54 | shuffle: bool = True, 55 | seed: Optional[int] = 0, 56 | rank=0, 57 | world_size=1, 58 | ): 59 | """ 60 | Args: 61 | size (int): the total number of data of the underlying dataset to sample from 62 | shuffle (bool): whether to shuffle the indices or not 63 | seed (int): the initial seed of the shuffle. Must be the same 64 | across all workers. If None, will use a random seed shared 65 | among workers (require synchronization among all workers). 66 | """ 67 | self._size = size 68 | assert size > 0 69 | self._shuffle = shuffle 70 | self._seed = int(seed) 71 | 72 | if dist.is_available() and dist.is_initialized(): 73 | self._rank = dist.get_rank() 74 | self._world_size = dist.get_world_size() 75 | else: 76 | self._rank = rank 77 | self._world_size = world_size 78 | 79 | def __iter__(self): 80 | start = self._rank 81 | yield from itertools.islice( 82 | self._infinite_indices(), start, None, self._world_size 83 | ) 84 | 85 | def _infinite_indices(self): 86 | g = torch.Generator() 87 | g.manual_seed(self._seed) 88 | while True: 89 | if self._shuffle: 90 | yield from torch.randperm(self._size, generator=g) 91 | else: 92 | yield from torch.arange(self._size) 93 | 94 | def __len__(self): 95 | return self._size // self._world_size 96 | -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/deepsort.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/deepsort.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/detection.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/detection.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/iou_matching.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/iou_matching.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/kalman_filter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/kalman_filter.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/linear_assignment.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/linear_assignment.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/reid_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/reid_model.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/__pycache__/track.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/deepsort_tracker/__pycache__/track.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/deepsort_tracker/detection.py: -------------------------------------------------------------------------------- 1 | # vim: expandtab:ts=4:sw=4 2 | import numpy as np 3 | 4 | 5 | class Detection(object): 6 | """ 7 | This class represents a bounding box detection in a single image. 8 | Parameters 9 | ---------- 10 | tlwh : array_like 11 | Bounding box in format `(x, y, w, h)`. 12 | confidence : float 13 | Detector confidence score. 14 | feature : array_like 15 | A feature vector that describes the object contained in this image. 16 | Attributes 17 | ---------- 18 | tlwh : ndarray 19 | Bounding box in format `(top left x, top left y, width, height)`. 20 | confidence : ndarray 21 | Detector confidence score. 22 | feature : ndarray | NoneType 23 | A feature vector that describes the object contained in this image. 24 | """ 25 | 26 | def __init__(self, tlwh, confidence, feature): 27 | self.tlwh = np.asarray(tlwh, dtype=np.float) 28 | self.confidence = float(confidence) 29 | self.feature = np.asarray(feature, dtype=np.float32) 30 | 31 | def to_tlbr(self): 32 | """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., 33 | `(top left, bottom right)`. 34 | """ 35 | ret = self.tlwh.copy() 36 | ret[2:] += ret[:2] 37 | return ret 38 | 39 | def to_xyah(self): 40 | """Convert bounding box to format `(center x, center y, aspect ratio, 41 | height)`, where the aspect ratio is `width / height`. 42 | """ 43 | ret = self.tlwh.copy() 44 | ret[:2] += ret[2:] / 2 45 | ret[2] /= ret[3] 46 | return ret -------------------------------------------------------------------------------- /yolox/deepsort_tracker/iou_matching.py: -------------------------------------------------------------------------------- 1 | # vim: expandtab:ts=4:sw=4 2 | from __future__ import absolute_import 3 | import numpy as np 4 | from yolox.deepsort_tracker import linear_assignment 5 | 6 | 7 | def iou(bbox, candidates): 8 | """Computer intersection over union. 9 | Parameters 10 | ---------- 11 | bbox : ndarray 12 | A bounding box in format `(top left x, top left y, width, height)`. 13 | candidates : ndarray 14 | A matrix of candidate bounding boxes (one per row) in the same format 15 | as `bbox`. 16 | Returns 17 | ------- 18 | ndarray 19 | The intersection over union in [0, 1] between the `bbox` and each 20 | candidate. A higher score means a larger fraction of the `bbox` is 21 | occluded by the candidate. 22 | """ 23 | bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:] 24 | candidates_tl = candidates[:, :2] 25 | candidates_br = candidates[:, :2] + candidates[:, 2:] 26 | 27 | tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis], 28 | np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]] 29 | br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis], 30 | np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]] 31 | wh = np.maximum(0., br - tl) 32 | 33 | area_intersection = wh.prod(axis=1) 34 | area_bbox = bbox[2:].prod() 35 | area_candidates = candidates[:, 2:].prod(axis=1) 36 | return area_intersection / (area_bbox + area_candidates - area_intersection) 37 | 38 | 39 | def iou_cost(tracks, detections, track_indices=None, 40 | detection_indices=None): 41 | """An intersection over union distance metric. 42 | Parameters 43 | ---------- 44 | tracks : List[deep_sort.track.Track] 45 | A list of tracks. 46 | detections : List[deep_sort.detection.Detection] 47 | A list of detections. 48 | track_indices : Optional[List[int]] 49 | A list of indices to tracks that should be matched. Defaults to 50 | all `tracks`. 51 | detection_indices : Optional[List[int]] 52 | A list of indices to detections that should be matched. Defaults 53 | to all `detections`. 54 | Returns 55 | ------- 56 | ndarray 57 | Returns a cost matrix of shape 58 | len(track_indices), len(detection_indices) where entry (i, j) is 59 | `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`. 60 | """ 61 | if track_indices is None: 62 | track_indices = np.arange(len(tracks)) 63 | if detection_indices is None: 64 | detection_indices = np.arange(len(detections)) 65 | 66 | cost_matrix = np.zeros((len(track_indices), len(detection_indices))) 67 | for row, track_idx in enumerate(track_indices): 68 | if tracks[track_idx].time_since_update > 1: 69 | cost_matrix[row, :] = linear_assignment.INFTY_COST 70 | continue 71 | 72 | bbox = tracks[track_idx].to_tlwh() 73 | candidates = np.asarray( 74 | [detections[i].tlwh for i in detection_indices]) 75 | cost_matrix[row, :] = 1. - iou(bbox, candidates) 76 | return cost_matrix -------------------------------------------------------------------------------- /yolox/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | from .coco_evaluator import COCOEvaluator 6 | from .mot_evaluator import MOTEvaluator 7 | -------------------------------------------------------------------------------- /yolox/evaluators/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/evaluators/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/evaluators/__pycache__/coco_evaluator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/evaluators/__pycache__/coco_evaluator.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/evaluators/__pycache__/mot_evaluator.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/evaluators/__pycache__/mot_evaluator.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/exp/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .base_exp import BaseExp 6 | from .build import get_exp 7 | from .yolox_base import Exp 8 | -------------------------------------------------------------------------------- /yolox/exp/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/exp/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/exp/__pycache__/base_exp.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/exp/__pycache__/base_exp.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/exp/__pycache__/build.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/exp/__pycache__/build.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/exp/__pycache__/yolox_base.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/exp/__pycache__/yolox_base.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/exp/base_exp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | from torch.nn import Module 7 | 8 | from yolox.utils import LRScheduler 9 | 10 | import ast 11 | import pprint 12 | from abc import ABCMeta, abstractmethod 13 | from tabulate import tabulate 14 | from typing import Dict 15 | 16 | 17 | class BaseExp(metaclass=ABCMeta): 18 | """Basic class for any experiment.""" 19 | 20 | def __init__(self): 21 | self.seed = None 22 | self.output_dir = "./YOLOX_outputs" 23 | self.print_interval = 100 24 | self.eval_interval = 10 25 | 26 | @abstractmethod 27 | def get_model(self) -> Module: 28 | pass 29 | 30 | @abstractmethod 31 | def get_data_loader( 32 | self, batch_size: int, is_distributed: bool 33 | ) -> Dict[str, torch.utils.data.DataLoader]: 34 | pass 35 | 36 | @abstractmethod 37 | def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer: 38 | pass 39 | 40 | @abstractmethod 41 | def get_lr_scheduler( 42 | self, lr: float, iters_per_epoch: int, **kwargs 43 | ) -> LRScheduler: 44 | pass 45 | 46 | @abstractmethod 47 | def get_evaluator(self): 48 | pass 49 | 50 | @abstractmethod 51 | def eval(self, model, evaluator, weights): 52 | pass 53 | 54 | def __repr__(self): 55 | table_header = ["keys", "values"] 56 | exp_table = [ 57 | (str(k), pprint.pformat(v)) 58 | for k, v in vars(self).items() 59 | if not k.startswith("_") 60 | ] 61 | return tabulate(exp_table, headers=table_header, tablefmt="fancy_grid") 62 | 63 | def merge(self, cfg_list): 64 | assert len(cfg_list) % 2 == 0 65 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]): 66 | # only update value with same key 67 | if hasattr(self, k): 68 | src_value = getattr(self, k) 69 | src_type = type(src_value) 70 | if src_value is not None and src_type != type(v): 71 | try: 72 | v = src_type(v) 73 | except Exception: 74 | v = ast.literal_eval(v) 75 | setattr(self, k, v) 76 | -------------------------------------------------------------------------------- /yolox/exp/build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import importlib 6 | import os 7 | import sys 8 | 9 | 10 | def get_exp_by_file(exp_file): 11 | try: 12 | sys.path.append(os.path.dirname(exp_file)) 13 | current_exp = importlib.import_module(os.path.basename(exp_file).split(".")[0]) 14 | exp = current_exp.Exp() 15 | except Exception: 16 | raise ImportError("{} doesn't contains class named 'Exp'".format(exp_file)) 17 | return exp 18 | 19 | 20 | def get_exp_by_name(exp_name): 21 | import yolox 22 | 23 | yolox_path = os.path.dirname(os.path.dirname(yolox.__file__)) 24 | filedict = { 25 | "yolox-s": "yolox_s.py", 26 | "yolox-m": "yolox_m.py", 27 | "yolox-l": "yolox_l.py", 28 | "yolox-x": "yolox_x.py", 29 | "yolox-tiny": "yolox_tiny.py", 30 | "yolox-nano": "nano.py", 31 | "yolov3": "yolov3.py", 32 | } 33 | filename = filedict[exp_name] 34 | exp_path = os.path.join(yolox_path, "exps", "default", filename) 35 | return get_exp_by_file(exp_path) 36 | 37 | 38 | def get_exp(exp_file, exp_name): 39 | """ 40 | get Exp object by file or name. If exp_file and exp_name 41 | are both provided, get Exp by exp_file. 42 | 43 | Args: 44 | exp_file (str): file path of experiment. 45 | exp_name (str): name of experiment. "yolo-s", 46 | """ 47 | assert ( 48 | exp_file is not None or exp_name is not None 49 | ), "plz provide exp file or exp name." 50 | if exp_file is not None: 51 | return get_exp_by_file(exp_file) 52 | else: 53 | return get_exp_by_name(exp_name) 54 | -------------------------------------------------------------------------------- /yolox/layers/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .fast_coco_eval_api import COCOeval_opt 6 | -------------------------------------------------------------------------------- /yolox/layers/csrc/cocoeval/cocoeval.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace py = pybind11; 11 | 12 | namespace COCOeval { 13 | 14 | // Annotation data for a single object instance in an image 15 | struct InstanceAnnotation { 16 | InstanceAnnotation( 17 | uint64_t id, 18 | double score, 19 | double area, 20 | bool is_crowd, 21 | bool ignore) 22 | : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} 23 | uint64_t id; 24 | double score = 0.; 25 | double area = 0.; 26 | bool is_crowd = false; 27 | bool ignore = false; 28 | }; 29 | 30 | // Stores intermediate results for evaluating detection results for a single 31 | // image that has D detected instances and G ground truth instances. This stores 32 | // matches between detected and ground truth instances 33 | struct ImageEvaluation { 34 | // For each of the D detected instances, the id of the matched ground truth 35 | // instance, or 0 if unmatched 36 | std::vector detection_matches; 37 | 38 | // The detection score of each of the D detected instances 39 | std::vector detection_scores; 40 | 41 | // Marks whether or not each of G instances was ignored from evaluation (e.g., 42 | // because it's outside area_range) 43 | std::vector ground_truth_ignores; 44 | 45 | // Marks whether or not each of D instances was ignored from evaluation (e.g., 46 | // because it's outside aRng) 47 | std::vector detection_ignores; 48 | }; 49 | 50 | template 51 | using ImageCategoryInstances = std::vector>>; 52 | 53 | // C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each 54 | // combination of image, category, area range settings, and IOU thresholds to 55 | // evaluate, it matches detected instances to ground truth instances and stores 56 | // the results into a vector of ImageEvaluation results, which will be 57 | // interpreted by the COCOeval::Accumulate() function to produce precion-recall 58 | // curves. The parameters of nested vectors have the following semantics: 59 | // image_category_ious[i][c][d][g] is the intersection over union of the d'th 60 | // detected instance and g'th ground truth instance of 61 | // category category_ids[c] in image image_ids[i] 62 | // image_category_ground_truth_instances[i][c] is a vector of ground truth 63 | // instances in image image_ids[i] of category category_ids[c] 64 | // image_category_detection_instances[i][c] is a vector of detected 65 | // instances in image image_ids[i] of category category_ids[c] 66 | std::vector EvaluateImages( 67 | const std::vector>& area_ranges, // vector of 2-tuples 68 | int max_detections, 69 | const std::vector& iou_thresholds, 70 | const ImageCategoryInstances>& image_category_ious, 71 | const ImageCategoryInstances& 72 | image_category_ground_truth_instances, 73 | const ImageCategoryInstances& 74 | image_category_detection_instances); 75 | 76 | // C++ implementation of COCOeval.accumulate(), which generates precision 77 | // recall curves for each set of category, IOU threshold, detection area range, 78 | // and max number of detections parameters. It is assumed that the parameter 79 | // evaluations is the return value of the functon COCOeval::EvaluateImages(), 80 | // which was called with the same parameter settings params 81 | py::dict Accumulate( 82 | const py::object& params, 83 | const std::vector& evalutations); 84 | 85 | } // namespace COCOeval 86 | -------------------------------------------------------------------------------- /yolox/layers/csrc/vision.cpp: -------------------------------------------------------------------------------- 1 | #include "cocoeval/cocoeval.h" 2 | 3 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 4 | m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); 5 | m.def( 6 | "COCOevalEvaluateImages", 7 | &COCOeval::EvaluateImages, 8 | "COCOeval::EvaluateImages"); 9 | pybind11::class_(m, "InstanceAnnotation") 10 | .def(pybind11::init()); 11 | pybind11::class_(m, "ImageEvaluation") 12 | .def(pybind11::init<>()); 13 | } 14 | -------------------------------------------------------------------------------- /yolox/models/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .darknet import CSPDarknet, Darknet 6 | from .losses import IOUloss 7 | from .yolo_fpn import YOLOFPN 8 | from .yolo_head import YOLOXHead 9 | from .yolo_pafpn import YOLOPAFPN 10 | from .yolox import YOLOX 11 | -------------------------------------------------------------------------------- /yolox/models/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/darknet.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/darknet.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/losses.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/losses.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/network_blocks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/network_blocks.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/yolo_fpn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/yolo_fpn.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/yolo_head.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/yolo_head.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/yolo_pafpn.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/yolo_pafpn.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/__pycache__/yolox.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/models/__pycache__/yolox.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/models/losses.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | 9 | 10 | class IOUloss(nn.Module): 11 | def __init__(self, reduction="none", loss_type="iou"): 12 | super(IOUloss, self).__init__() 13 | self.reduction = reduction 14 | self.loss_type = loss_type 15 | 16 | def forward(self, pred, target): 17 | assert pred.shape[0] == target.shape[0] 18 | 19 | pred = pred.view(-1, 4) 20 | target = target.view(-1, 4) 21 | tl = torch.max( 22 | (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2) 23 | ) 24 | br = torch.min( 25 | (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2) 26 | ) 27 | 28 | area_p = torch.prod(pred[:, 2:], 1) 29 | area_g = torch.prod(target[:, 2:], 1) 30 | 31 | en = (tl < br).type(tl.type()).prod(dim=1) 32 | area_i = torch.prod(br - tl, 1) * en 33 | iou = (area_i) / (area_p + area_g - area_i + 1e-16) 34 | 35 | if self.loss_type == "iou": 36 | loss = 1 - iou ** 2 37 | elif self.loss_type == "giou": 38 | c_tl = torch.min( 39 | (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2) 40 | ) 41 | c_br = torch.max( 42 | (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2) 43 | ) 44 | area_c = torch.prod(c_br - c_tl, 1) 45 | giou = iou - (area_c - area_i) / area_c.clamp(1e-16) 46 | loss = 1 - giou.clamp(min=-1.0, max=1.0) 47 | 48 | if self.reduction == "mean": 49 | loss = loss.mean() 50 | elif self.reduction == "sum": 51 | loss = loss.sum() 52 | 53 | return loss 54 | 55 | 56 | def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): 57 | """ 58 | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. 59 | Args: 60 | inputs: A float tensor of arbitrary shape. 61 | The predictions for each example. 62 | targets: A float tensor with the same shape as inputs. Stores the binary 63 | classification label for each element in inputs 64 | (0 for the negative class and 1 for the positive class). 65 | alpha: (optional) Weighting factor in range (0,1) to balance 66 | positive vs negative examples. Default = -1 (no weighting). 67 | gamma: Exponent of the modulating factor (1 - p_t) to 68 | balance easy vs hard examples. 69 | Returns: 70 | Loss tensor 71 | """ 72 | prob = inputs.sigmoid() 73 | ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") 74 | p_t = prob * targets + (1 - prob) * (1 - targets) 75 | loss = ce_loss * ((1 - p_t) ** gamma) 76 | 77 | if alpha >= 0: 78 | alpha_t = alpha * targets + (1 - alpha) * (1 - targets) 79 | loss = alpha_t * loss 80 | #return loss.mean(0).sum() / num_boxes 81 | return loss.sum() / num_boxes -------------------------------------------------------------------------------- /yolox/models/yolo_fpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | from .darknet import Darknet 9 | from .network_blocks import BaseConv 10 | 11 | 12 | class YOLOFPN(nn.Module): 13 | """ 14 | YOLOFPN module. Darknet 53 is the default backbone of this model. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | depth=53, 20 | in_features=["dark3", "dark4", "dark5"], 21 | ): 22 | super().__init__() 23 | 24 | self.backbone = Darknet(depth) 25 | self.in_features = in_features 26 | 27 | # out 1 28 | self.out1_cbl = self._make_cbl(512, 256, 1) 29 | self.out1 = self._make_embedding([256, 512], 512 + 256) 30 | 31 | # out 2 32 | self.out2_cbl = self._make_cbl(256, 128, 1) 33 | self.out2 = self._make_embedding([128, 256], 256 + 128) 34 | 35 | # upsample 36 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest") 37 | 38 | def _make_cbl(self, _in, _out, ks): 39 | return BaseConv(_in, _out, ks, stride=1, act="lrelu") 40 | 41 | def _make_embedding(self, filters_list, in_filters): 42 | m = nn.Sequential( 43 | *[ 44 | self._make_cbl(in_filters, filters_list[0], 1), 45 | self._make_cbl(filters_list[0], filters_list[1], 3), 46 | self._make_cbl(filters_list[1], filters_list[0], 1), 47 | self._make_cbl(filters_list[0], filters_list[1], 3), 48 | self._make_cbl(filters_list[1], filters_list[0], 1), 49 | ] 50 | ) 51 | return m 52 | 53 | def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"): 54 | with open(filename, "rb") as f: 55 | state_dict = torch.load(f, map_location="cpu") 56 | print("loading pretrained weights...") 57 | self.backbone.load_state_dict(state_dict) 58 | 59 | def forward(self, inputs): 60 | """ 61 | Args: 62 | inputs (Tensor): input image. 63 | 64 | Returns: 65 | Tuple[Tensor]: FPN output features.. 66 | """ 67 | # backbone 68 | out_features = self.backbone(inputs) 69 | x2, x1, x0 = [out_features[f] for f in self.in_features] 70 | 71 | # yolo branch 1 72 | x1_in = self.out1_cbl(x0) 73 | x1_in = self.upsample(x1_in) 74 | x1_in = torch.cat([x1_in, x1], 1) 75 | out_dark4 = self.out1(x1_in) 76 | 77 | # yolo branch 2 78 | x2_in = self.out2_cbl(out_dark4) 79 | x2_in = self.upsample(x2_in) 80 | x2_in = torch.cat([x2_in, x2], 1) 81 | out_dark3 = self.out2(x2_in) 82 | 83 | outputs = (out_dark3, out_dark4, x0) 84 | return outputs 85 | -------------------------------------------------------------------------------- /yolox/models/yolo_pafpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | 8 | from .darknet import CSPDarknet 9 | from .network_blocks import BaseConv, CSPLayer, DWConv 10 | 11 | 12 | class YOLOPAFPN(nn.Module): 13 | """ 14 | YOLOv3 model. Darknet 53 is the default backbone of this model. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | depth=1.0, 20 | width=1.0, 21 | in_features=("dark3", "dark4", "dark5"), 22 | in_channels=[256, 512, 1024], 23 | depthwise=False, 24 | act="silu", 25 | ): 26 | super().__init__() 27 | self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) 28 | self.in_features = in_features 29 | self.in_channels = in_channels 30 | Conv = DWConv if depthwise else BaseConv 31 | 32 | self.upsample = nn.Upsample(scale_factor=2, mode="nearest") 33 | self.lateral_conv0 = BaseConv( 34 | int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act 35 | ) 36 | self.C3_p4 = CSPLayer( 37 | int(2 * in_channels[1] * width), 38 | int(in_channels[1] * width), 39 | round(3 * depth), 40 | False, 41 | depthwise=depthwise, 42 | act=act, 43 | ) # cat 44 | 45 | self.reduce_conv1 = BaseConv( 46 | int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act 47 | ) 48 | self.C3_p3 = CSPLayer( 49 | int(2 * in_channels[0] * width), 50 | int(in_channels[0] * width), 51 | round(3 * depth), 52 | False, 53 | depthwise=depthwise, 54 | act=act, 55 | ) 56 | 57 | # bottom-up conv 58 | self.bu_conv2 = Conv( 59 | int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act 60 | ) 61 | self.C3_n3 = CSPLayer( 62 | int(2 * in_channels[0] * width), 63 | int(in_channels[1] * width), 64 | round(3 * depth), 65 | False, 66 | depthwise=depthwise, 67 | act=act, 68 | ) 69 | 70 | # bottom-up conv 71 | self.bu_conv1 = Conv( 72 | int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act 73 | ) 74 | self.C3_n4 = CSPLayer( 75 | int(2 * in_channels[1] * width), 76 | int(in_channels[2] * width), 77 | round(3 * depth), 78 | False, 79 | depthwise=depthwise, 80 | act=act, 81 | ) 82 | 83 | def forward(self, input): 84 | """ 85 | Args: 86 | inputs: input images. 87 | 88 | Returns: 89 | Tuple[Tensor]: FPN feature. 90 | """ 91 | 92 | # backbone 93 | out_features = self.backbone(input) 94 | features = [out_features[f] for f in self.in_features] 95 | [x2, x1, x0] = features 96 | 97 | fpn_out0 = self.lateral_conv0(x0) # 1024->512/32 98 | f_out0 = self.upsample(fpn_out0) # 512/16 99 | f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16 100 | f_out0 = self.C3_p4(f_out0) # 1024->512/16 101 | 102 | fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16 103 | f_out1 = self.upsample(fpn_out1) # 256/8 104 | f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8 105 | pan_out2 = self.C3_p3(f_out1) # 512->256/8 106 | 107 | p_out1 = self.bu_conv2(pan_out2) # 256->256/16 108 | p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16 109 | pan_out1 = self.C3_n3(p_out1) # 512->512/16 110 | 111 | p_out0 = self.bu_conv1(pan_out1) # 512->512/32 112 | p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32 113 | pan_out0 = self.C3_n4(p_out0) # 1024->1024/32 114 | 115 | outputs = (pan_out2, pan_out1, pan_out0) 116 | return outputs 117 | -------------------------------------------------------------------------------- /yolox/models/yolox.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch.nn as nn 6 | 7 | from .yolo_head import YOLOXHead 8 | from .yolo_pafpn import YOLOPAFPN 9 | 10 | 11 | class YOLOX(nn.Module): 12 | """ 13 | YOLOX model module. The module list is defined by create_yolov3_modules function. 14 | The network returns loss values from three YOLO layers during training 15 | and detection results during test. 16 | """ 17 | 18 | def __init__(self, backbone=None, head=None): 19 | super().__init__() 20 | if backbone is None: 21 | backbone = YOLOPAFPN() 22 | if head is None: 23 | head = YOLOXHead(80) 24 | 25 | self.backbone = backbone 26 | self.head = head 27 | 28 | def forward(self, x, targets=None): 29 | # fpn output content features of [dark3, dark4, dark5] 30 | fpn_outs = self.backbone(x) 31 | 32 | if self.training: 33 | assert targets is not None 34 | loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head( 35 | fpn_outs, targets, x 36 | ) 37 | outputs = { 38 | "total_loss": loss, 39 | "iou_loss": iou_loss, 40 | "l1_loss": l1_loss, 41 | "conf_loss": conf_loss, 42 | "cls_loss": cls_loss, 43 | "num_fg": num_fg, 44 | } 45 | else: 46 | outputs = self.head(fpn_outs) 47 | 48 | return outputs 49 | -------------------------------------------------------------------------------- /yolox/motdt_tracker/__pycache__/basetrack.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/motdt_tracker/__pycache__/basetrack.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/motdt_tracker/__pycache__/kalman_filter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/motdt_tracker/__pycache__/kalman_filter.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/motdt_tracker/__pycache__/matching.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/motdt_tracker/__pycache__/matching.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/motdt_tracker/__pycache__/motdt_tracker.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/motdt_tracker/__pycache__/motdt_tracker.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/motdt_tracker/__pycache__/reid_model.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/motdt_tracker/__pycache__/reid_model.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/motdt_tracker/basetrack.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import OrderedDict 3 | 4 | 5 | class TrackState(object): 6 | New = 0 7 | Tracked = 1 8 | Lost = 2 9 | Removed = 3 10 | Replaced = 4 11 | 12 | 13 | class BaseTrack(object): 14 | _count = 0 15 | 16 | track_id = 0 17 | is_activated = False 18 | state = TrackState.New 19 | 20 | history = OrderedDict() 21 | features = [] 22 | curr_feature = None 23 | score = 0 24 | start_frame = 0 25 | frame_id = 0 26 | time_since_update = 0 27 | 28 | # multi-camera 29 | location = (np.inf, np.inf) 30 | 31 | @property 32 | def end_frame(self): 33 | return self.frame_id 34 | 35 | @staticmethod 36 | def next_id(): 37 | BaseTrack._count += 1 38 | return BaseTrack._count 39 | 40 | def activate(self, *args): 41 | raise NotImplementedError 42 | 43 | def predict(self): 44 | raise NotImplementedError 45 | 46 | def update(self, *args, **kwargs): 47 | raise NotImplementedError 48 | 49 | def mark_lost(self): 50 | self.state = TrackState.Lost 51 | 52 | def mark_removed(self): 53 | self.state = TrackState.Removed 54 | 55 | def mark_replaced(self): 56 | self.state = TrackState.Replaced 57 | -------------------------------------------------------------------------------- /yolox/motdt_tracker/matching.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import lap 4 | from scipy.spatial.distance import cdist 5 | 6 | from cython_bbox import bbox_overlaps as bbox_ious 7 | from yolox.motdt_tracker import kalman_filter 8 | 9 | 10 | def _indices_to_matches(cost_matrix, indices, thresh): 11 | matched_cost = cost_matrix[tuple(zip(*indices))] 12 | matched_mask = (matched_cost <= thresh) 13 | 14 | matches = indices[matched_mask] 15 | unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) 16 | unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) 17 | 18 | return matches, unmatched_a, unmatched_b 19 | 20 | 21 | def linear_assignment(cost_matrix, thresh): 22 | if cost_matrix.size == 0: 23 | return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) 24 | matches, unmatched_a, unmatched_b = [], [], [] 25 | cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) 26 | for ix, mx in enumerate(x): 27 | if mx >= 0: 28 | matches.append([ix, mx]) 29 | unmatched_a = np.where(x < 0)[0] 30 | unmatched_b = np.where(y < 0)[0] 31 | matches = np.asarray(matches) 32 | return matches, unmatched_a, unmatched_b 33 | 34 | 35 | def ious(atlbrs, btlbrs): 36 | """ 37 | Compute cost based on IoU 38 | :type atlbrs: list[tlbr] | np.ndarray 39 | :type atlbrs: list[tlbr] | np.ndarray 40 | :rtype ious np.ndarray 41 | """ 42 | ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float) 43 | if ious.size == 0: 44 | return ious 45 | 46 | ious = bbox_ious( 47 | np.ascontiguousarray(atlbrs, dtype=np.float), 48 | np.ascontiguousarray(btlbrs, dtype=np.float) 49 | ) 50 | 51 | return ious 52 | 53 | 54 | def iou_distance(atracks, btracks): 55 | """ 56 | Compute cost based on IoU 57 | :type atracks: list[STrack] 58 | :type btracks: list[STrack] 59 | :rtype cost_matrix np.ndarray 60 | """ 61 | atlbrs = [track.tlbr for track in atracks] 62 | btlbrs = [track.tlbr for track in btracks] 63 | _ious = ious(atlbrs, btlbrs) 64 | cost_matrix = 1 - _ious 65 | 66 | return cost_matrix 67 | 68 | 69 | def nearest_reid_distance(tracks, detections, metric='cosine'): 70 | """ 71 | Compute cost based on ReID features 72 | :type tracks: list[STrack] 73 | :type detections: list[BaseTrack] 74 | :rtype cost_matrix np.ndarray 75 | """ 76 | cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float) 77 | if cost_matrix.size == 0: 78 | return cost_matrix 79 | 80 | det_features = np.asarray([track.curr_feature for track in detections], dtype=np.float32) 81 | for i, track in enumerate(tracks): 82 | cost_matrix[i, :] = np.maximum(0.0, cdist(track.features, det_features, metric).min(axis=0)) 83 | 84 | return cost_matrix 85 | 86 | 87 | def mean_reid_distance(tracks, detections, metric='cosine'): 88 | """ 89 | Compute cost based on ReID features 90 | :type tracks: list[STrack] 91 | :type detections: list[BaseTrack] 92 | :type metric: str 93 | :rtype cost_matrix np.ndarray 94 | """ 95 | cost_matrix = np.empty((len(tracks), len(detections)), dtype=np.float) 96 | if cost_matrix.size == 0: 97 | return cost_matrix 98 | 99 | track_features = np.asarray([track.curr_feature for track in tracks], dtype=np.float32) 100 | det_features = np.asarray([track.curr_feature for track in detections], dtype=np.float32) 101 | cost_matrix = cdist(track_features, det_features, metric) 102 | 103 | return cost_matrix 104 | 105 | 106 | def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): 107 | if cost_matrix.size == 0: 108 | return cost_matrix 109 | gating_dim = 2 if only_position else 4 110 | gating_threshold = kalman_filter.chi2inv95[gating_dim] 111 | measurements = np.asarray([det.to_xyah() for det in detections]) 112 | for row, track in enumerate(tracks): 113 | gating_distance = kf.gating_distance( 114 | track.mean, track.covariance, measurements, only_position) 115 | cost_matrix[row, gating_distance > gating_threshold] = np.inf 116 | return cost_matrix -------------------------------------------------------------------------------- /yolox/sort_tracker/__pycache__/sort.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/sort_tracker/__pycache__/sort.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/tracker/__pycache__/basetrack.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/tracker/__pycache__/basetrack.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/tracker/__pycache__/byte_tracker_bdd.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/tracker/__pycache__/byte_tracker_bdd.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/tracker/__pycache__/kalman_filter.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/tracker/__pycache__/kalman_filter.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/tracker/__pycache__/matching_bdd.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/tracker/__pycache__/matching_bdd.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/tracker/basetrack.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from collections import OrderedDict 3 | 4 | 5 | class TrackState(object): 6 | New = 0 7 | Tracked = 1 8 | Lost = 2 9 | Removed = 3 10 | 11 | 12 | class BaseTrack(object): 13 | _count = 0 14 | 15 | track_id = 0 16 | is_activated = False 17 | state = TrackState.New 18 | 19 | history = OrderedDict() 20 | features = [] 21 | curr_feature = None 22 | score = 0 23 | start_frame = 0 24 | frame_id = 0 25 | time_since_update = 0 26 | 27 | # multi-camera 28 | location = (np.inf, np.inf) 29 | 30 | @property 31 | def end_frame(self): 32 | return self.frame_id 33 | 34 | @staticmethod 35 | def next_id(): 36 | BaseTrack._count += 1 37 | return BaseTrack._count 38 | 39 | def activate(self, *args): 40 | raise NotImplementedError 41 | 42 | def predict(self): 43 | raise NotImplementedError 44 | 45 | def update(self, *args, **kwargs): 46 | raise NotImplementedError 47 | 48 | def mark_lost(self): 49 | self.state = TrackState.Lost 50 | 51 | def mark_removed(self): 52 | self.state = TrackState.Removed 53 | -------------------------------------------------------------------------------- /yolox/tracking_utils/io.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict 3 | import numpy as np 4 | 5 | 6 | def write_results(filename, results_dict: Dict, data_type: str): 7 | if not filename: 8 | return 9 | path = os.path.dirname(filename) 10 | if not os.path.exists(path): 11 | os.makedirs(path) 12 | 13 | if data_type in ('mot', 'mcmot', 'lab'): 14 | save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' 15 | elif data_type == 'kitti': 16 | save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' 17 | else: 18 | raise ValueError(data_type) 19 | 20 | with open(filename, 'w') as f: 21 | for frame_id, frame_data in results_dict.items(): 22 | if data_type == 'kitti': 23 | frame_id -= 1 24 | for tlwh, track_id in frame_data: 25 | if track_id < 0: 26 | continue 27 | x1, y1, w, h = tlwh 28 | x2, y2 = x1 + w, y1 + h 29 | line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) 30 | f.write(line) 31 | 32 | 33 | def read_results(filename, data_type: str, is_gt=False, is_ignore=False): 34 | if data_type in ('mot', 'lab'): 35 | read_fun = read_mot_results 36 | else: 37 | raise ValueError('Unknown data type: {}'.format(data_type)) 38 | 39 | return read_fun(filename, is_gt, is_ignore) 40 | 41 | 42 | """ 43 | labels={'ped', ... % 1 44 | 'person_on_vhcl', ... % 2 45 | 'car', ... % 3 46 | 'bicycle', ... % 4 47 | 'mbike', ... % 5 48 | 'non_mot_vhcl', ... % 6 49 | 'static_person', ... % 7 50 | 'distractor', ... % 8 51 | 'occluder', ... % 9 52 | 'occluder_on_grnd', ... %10 53 | 'occluder_full', ... % 11 54 | 'reflection', ... % 12 55 | 'crowd' ... % 13 56 | }; 57 | """ 58 | 59 | 60 | def read_mot_results(filename, is_gt, is_ignore): 61 | valid_labels = {1} 62 | ignore_labels = {2, 7, 8, 12} 63 | results_dict = dict() 64 | if os.path.isfile(filename): 65 | with open(filename, 'r') as f: 66 | for line in f.readlines(): 67 | linelist = line.split(',') 68 | if len(linelist) < 7: 69 | continue 70 | fid = int(linelist[0]) 71 | if fid < 1: 72 | continue 73 | results_dict.setdefault(fid, list()) 74 | 75 | box_size = float(linelist[4]) * float(linelist[5]) 76 | 77 | if is_gt: 78 | if 'MOT16-' in filename or 'MOT17-' in filename: 79 | label = int(float(linelist[7])) 80 | mark = int(float(linelist[6])) 81 | if mark == 0 or label not in valid_labels: 82 | continue 83 | score = 1 84 | elif is_ignore: 85 | if 'MOT16-' in filename or 'MOT17-' in filename: 86 | label = int(float(linelist[7])) 87 | vis_ratio = float(linelist[8]) 88 | if label not in ignore_labels and vis_ratio >= 0: 89 | continue 90 | else: 91 | continue 92 | score = 1 93 | else: 94 | score = float(linelist[6]) 95 | 96 | #if box_size > 7000: 97 | #if box_size <= 7000 or box_size >= 15000: 98 | #if box_size < 15000: 99 | #continue 100 | 101 | tlwh = tuple(map(float, linelist[2:6])) 102 | target_id = int(linelist[1]) 103 | 104 | results_dict[fid].append((tlwh, target_id, score)) 105 | 106 | return results_dict 107 | 108 | 109 | def unzip_objs(objs): 110 | if len(objs) > 0: 111 | tlwhs, ids, scores = zip(*objs) 112 | else: 113 | tlwhs, ids, scores = [], [], [] 114 | tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) 115 | 116 | return tlwhs, ids, scores -------------------------------------------------------------------------------- /yolox/tracking_utils/timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class Timer(object): 5 | """A simple timer.""" 6 | def __init__(self): 7 | self.total_time = 0. 8 | self.calls = 0 9 | self.start_time = 0. 10 | self.diff = 0. 11 | self.average_time = 0. 12 | 13 | self.duration = 0. 14 | 15 | def tic(self): 16 | # using time.time instead of time.clock because time time.clock 17 | # does not normalize for multithreading 18 | self.start_time = time.time() 19 | 20 | def toc(self, average=True): 21 | self.diff = time.time() - self.start_time 22 | self.total_time += self.diff 23 | self.calls += 1 24 | self.average_time = self.total_time / self.calls 25 | if average: 26 | self.duration = self.average_time 27 | else: 28 | self.duration = self.diff 29 | return self.duration 30 | 31 | def clear(self): 32 | self.total_time = 0. 33 | self.calls = 0 34 | self.start_time = 0. 35 | self.diff = 0. 36 | self.average_time = 0. 37 | self.duration = 0. -------------------------------------------------------------------------------- /yolox/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .allreduce_norm import * 6 | from .boxes import * 7 | from .checkpoint import load_ckpt, save_checkpoint 8 | from .demo_utils import * 9 | from .dist import * 10 | from .ema import ModelEMA 11 | from .logger import setup_logger 12 | from .lr_scheduler import LRScheduler 13 | from .metric import * 14 | from .model_utils import * 15 | from .setup_env import * 16 | from .visualize import * 17 | -------------------------------------------------------------------------------- /yolox/utils/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/allreduce_norm.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/allreduce_norm.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/boxes.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/boxes.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/checkpoint.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/checkpoint.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/demo_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/demo_utils.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/dist.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/dist.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/ema.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/ema.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/logger.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/logger.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/lr_scheduler.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/lr_scheduler.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/metric.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/metric.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/model_utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/model_utils.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/setup_env.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/setup_env.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/__pycache__/visualize.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qinzheng2000/GeneralTrack/dbb727bfb63eddd28e97b1f64462cbf7df1413c6/yolox/utils/__pycache__/visualize.cpython-38.pyc -------------------------------------------------------------------------------- /yolox/utils/allreduce_norm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | from torch import distributed as dist 7 | from torch import nn 8 | 9 | import pickle 10 | from collections import OrderedDict 11 | 12 | from .dist import _get_global_gloo_group, get_world_size 13 | 14 | ASYNC_NORM = ( 15 | nn.BatchNorm1d, 16 | nn.BatchNorm2d, 17 | nn.BatchNorm3d, 18 | nn.InstanceNorm1d, 19 | nn.InstanceNorm2d, 20 | nn.InstanceNorm3d, 21 | ) 22 | 23 | __all__ = [ 24 | "get_async_norm_states", 25 | "pyobj2tensor", 26 | "tensor2pyobj", 27 | "all_reduce", 28 | "all_reduce_norm", 29 | ] 30 | 31 | 32 | def get_async_norm_states(module): 33 | async_norm_states = OrderedDict() 34 | for name, child in module.named_modules(): 35 | if isinstance(child, ASYNC_NORM): 36 | for k, v in child.state_dict().items(): 37 | async_norm_states[".".join([name, k])] = v 38 | return async_norm_states 39 | 40 | 41 | def pyobj2tensor(pyobj, device="cuda"): 42 | """serialize picklable python object to tensor""" 43 | storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) 44 | return torch.ByteTensor(storage).to(device=device) 45 | 46 | 47 | def tensor2pyobj(tensor): 48 | """deserialize tensor to picklable python object""" 49 | return pickle.loads(tensor.cpu().numpy().tobytes()) 50 | 51 | 52 | def _get_reduce_op(op_name): 53 | return { 54 | "sum": dist.ReduceOp.SUM, 55 | "mean": dist.ReduceOp.SUM, 56 | }[op_name.lower()] 57 | 58 | 59 | def all_reduce(py_dict, op="sum", group=None): 60 | """ 61 | Apply all reduce function for python dict object. 62 | NOTE: make sure that every py_dict has the same keys and values are in the same shape. 63 | 64 | Args: 65 | py_dict (dict): dict to apply all reduce op. 66 | op (str): operator, could be "sum" or "mean". 67 | """ 68 | world_size = get_world_size() 69 | if world_size == 1: 70 | return py_dict 71 | if group is None: 72 | group = _get_global_gloo_group() 73 | if dist.get_world_size(group) == 1: 74 | return py_dict 75 | 76 | # all reduce logic across different devices. 77 | py_key = list(py_dict.keys()) 78 | py_key_tensor = pyobj2tensor(py_key) 79 | dist.broadcast(py_key_tensor, src=0) 80 | py_key = tensor2pyobj(py_key_tensor) 81 | 82 | tensor_shapes = [py_dict[k].shape for k in py_key] 83 | tensor_numels = [py_dict[k].numel() for k in py_key] 84 | 85 | flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) 86 | dist.all_reduce(flatten_tensor, op=_get_reduce_op(op)) 87 | if op == "mean": 88 | flatten_tensor /= world_size 89 | 90 | split_tensors = [ 91 | x.reshape(shape) 92 | for x, shape in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes) 93 | ] 94 | return OrderedDict({k: v for k, v in zip(py_key, split_tensors)}) 95 | 96 | 97 | def all_reduce_norm(module): 98 | """ 99 | All reduce norm statistics in different devices. 100 | """ 101 | states = get_async_norm_states(module) 102 | states = all_reduce(states, op="mean") 103 | module.load_state_dict(states, strict=False) 104 | -------------------------------------------------------------------------------- /yolox/utils/checkpoint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | from loguru import logger 5 | 6 | import torch 7 | 8 | import os 9 | import shutil 10 | 11 | 12 | def load_ckpt(model, ckpt): 13 | model_state_dict = model.state_dict() 14 | load_dict = {} 15 | for key_model, v in model_state_dict.items(): 16 | if key_model not in ckpt: 17 | logger.warning( 18 | "{} is not in the ckpt. Please double check and see if this is desired.".format( 19 | key_model 20 | ) 21 | ) 22 | continue 23 | v_ckpt = ckpt[key_model] 24 | if v.shape != v_ckpt.shape: 25 | logger.warning( 26 | "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( 27 | key_model, v_ckpt.shape, key_model, v.shape 28 | ) 29 | ) 30 | continue 31 | load_dict[key_model] = v_ckpt 32 | 33 | model.load_state_dict(load_dict, strict=False) 34 | return model 35 | 36 | 37 | def save_checkpoint(state, is_best, save_dir, model_name=""): 38 | if not os.path.exists(save_dir): 39 | os.makedirs(save_dir) 40 | filename = os.path.join(save_dir, model_name + "_ckpt.pth.tar") 41 | torch.save(state, filename) 42 | if is_best: 43 | best_filename = os.path.join(save_dir, "best_ckpt.pth.tar") 44 | shutil.copyfile(filename, best_filename) 45 | -------------------------------------------------------------------------------- /yolox/utils/demo_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import numpy as np 6 | 7 | import os 8 | 9 | __all__ = ["mkdir", "nms", "multiclass_nms", "demo_postprocess"] 10 | 11 | 12 | def mkdir(path): 13 | if not os.path.exists(path): 14 | os.makedirs(path) 15 | 16 | 17 | def nms(boxes, scores, nms_thr): 18 | """Single class NMS implemented in Numpy.""" 19 | x1 = boxes[:, 0] 20 | y1 = boxes[:, 1] 21 | x2 = boxes[:, 2] 22 | y2 = boxes[:, 3] 23 | 24 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 25 | order = scores.argsort()[::-1] 26 | 27 | keep = [] 28 | while order.size > 0: 29 | i = order[0] 30 | keep.append(i) 31 | xx1 = np.maximum(x1[i], x1[order[1:]]) 32 | yy1 = np.maximum(y1[i], y1[order[1:]]) 33 | xx2 = np.minimum(x2[i], x2[order[1:]]) 34 | yy2 = np.minimum(y2[i], y2[order[1:]]) 35 | 36 | w = np.maximum(0.0, xx2 - xx1 + 1) 37 | h = np.maximum(0.0, yy2 - yy1 + 1) 38 | inter = w * h 39 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 40 | 41 | inds = np.where(ovr <= nms_thr)[0] 42 | order = order[inds + 1] 43 | 44 | return keep 45 | 46 | 47 | def multiclass_nms(boxes, scores, nms_thr, score_thr): 48 | """Multiclass NMS implemented in Numpy""" 49 | final_dets = [] 50 | num_classes = scores.shape[1] 51 | for cls_ind in range(num_classes): 52 | cls_scores = scores[:, cls_ind] 53 | valid_score_mask = cls_scores > score_thr 54 | if valid_score_mask.sum() == 0: 55 | continue 56 | else: 57 | valid_scores = cls_scores[valid_score_mask] 58 | valid_boxes = boxes[valid_score_mask] 59 | keep = nms(valid_boxes, valid_scores, nms_thr) 60 | if len(keep) > 0: 61 | cls_inds = np.ones((len(keep), 1)) * cls_ind 62 | dets = np.concatenate( 63 | [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 64 | ) 65 | final_dets.append(dets) 66 | if len(final_dets) == 0: 67 | return None 68 | return np.concatenate(final_dets, 0) 69 | 70 | 71 | def demo_postprocess(outputs, img_size, p6=False): 72 | 73 | grids = [] 74 | expanded_strides = [] 75 | 76 | if not p6: 77 | strides = [8, 16, 32] 78 | else: 79 | strides = [8, 16, 32, 64] 80 | 81 | hsizes = [img_size[0] // stride for stride in strides] 82 | wsizes = [img_size[1] // stride for stride in strides] 83 | 84 | for hsize, wsize, stride in zip(hsizes, wsizes, strides): 85 | xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) 86 | grid = np.stack((xv, yv), 2).reshape(1, -1, 2) 87 | grids.append(grid) 88 | shape = grid.shape[:2] 89 | expanded_strides.append(np.full((*shape, 1), stride)) 90 | 91 | grids = np.concatenate(grids, 1) 92 | expanded_strides = np.concatenate(expanded_strides, 1) 93 | outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides 94 | outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides 95 | 96 | return outputs 97 | -------------------------------------------------------------------------------- /yolox/utils/ema.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | import torch 5 | import torch.nn as nn 6 | 7 | import math 8 | from copy import deepcopy 9 | 10 | 11 | def is_parallel(model): 12 | """check if model is in parallel mode.""" 13 | 14 | parallel_type = ( 15 | nn.parallel.DataParallel, 16 | nn.parallel.DistributedDataParallel, 17 | ) 18 | return isinstance(model, parallel_type) 19 | 20 | 21 | def copy_attr(a, b, include=(), exclude=()): 22 | # Copy attributes from b to a, options to only include [...] and to exclude [...] 23 | for k, v in b.__dict__.items(): 24 | if (len(include) and k not in include) or k.startswith("_") or k in exclude: 25 | continue 26 | else: 27 | setattr(a, k, v) 28 | 29 | 30 | class ModelEMA: 31 | """ 32 | Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models 33 | Keep a moving average of everything in the model state_dict (parameters and buffers). 34 | This is intended to allow functionality like 35 | https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage 36 | A smoothed version of the weights is necessary for some training schemes to perform well. 37 | This class is sensitive where it is initialized in the sequence of model init, 38 | GPU assignment and distributed training wrappers. 39 | """ 40 | 41 | def __init__(self, model, decay=0.9999, updates=0): 42 | """ 43 | Args: 44 | model (nn.Module): model to apply EMA. 45 | decay (float): ema decay reate. 46 | updates (int): counter of EMA updates. 47 | """ 48 | # Create EMA(FP32) 49 | self.ema = deepcopy(model.module if is_parallel(model) else model).eval() 50 | self.updates = updates 51 | # decay exponential ramp (to help early epochs) 52 | self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) 53 | for p in self.ema.parameters(): 54 | p.requires_grad_(False) 55 | 56 | def update(self, model): 57 | # Update EMA parameters 58 | with torch.no_grad(): 59 | self.updates += 1 60 | d = self.decay(self.updates) 61 | 62 | msd = ( 63 | model.module.state_dict() if is_parallel(model) else model.state_dict() 64 | ) # model state_dict 65 | for k, v in self.ema.state_dict().items(): 66 | if v.dtype.is_floating_point: 67 | v *= d 68 | v += (1.0 - d) * msd[k].detach() 69 | 70 | def update_attr(self, model, include=(), exclude=("process_group", "reducer")): 71 | # Update EMA attributes 72 | copy_attr(self.ema, model, include, exclude) 73 | -------------------------------------------------------------------------------- /yolox/utils/logger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from loguru import logger 6 | 7 | import inspect 8 | import os 9 | import sys 10 | 11 | 12 | def get_caller_name(depth=0): 13 | """ 14 | Args: 15 | depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0. 16 | 17 | Returns: 18 | str: module name of the caller 19 | """ 20 | # the following logic is a little bit faster than inspect.stack() logic 21 | frame = inspect.currentframe().f_back 22 | for _ in range(depth): 23 | frame = frame.f_back 24 | 25 | return frame.f_globals["__name__"] 26 | 27 | 28 | class StreamToLoguru: 29 | """ 30 | stream object that redirects writes to a logger instance. 31 | """ 32 | 33 | def __init__(self, level="INFO", caller_names=("apex", "pycocotools")): 34 | """ 35 | Args: 36 | level(str): log level string of loguru. Default value: "INFO". 37 | caller_names(tuple): caller names of redirected module. 38 | Default value: (apex, pycocotools). 39 | """ 40 | self.level = level 41 | self.linebuf = "" 42 | self.caller_names = caller_names 43 | 44 | def write(self, buf): 45 | full_name = get_caller_name(depth=1) 46 | module_name = full_name.rsplit(".", maxsplit=-1)[0] 47 | if module_name in self.caller_names: 48 | for line in buf.rstrip().splitlines(): 49 | # use caller level log 50 | logger.opt(depth=2).log(self.level, line.rstrip()) 51 | else: 52 | sys.__stdout__.write(buf) 53 | 54 | def flush(self): 55 | pass 56 | 57 | 58 | def redirect_sys_output(log_level="INFO"): 59 | redirect_logger = StreamToLoguru(log_level) 60 | sys.stderr = redirect_logger 61 | sys.stdout = redirect_logger 62 | 63 | 64 | def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode="a"): 65 | """setup logger for training and testing. 66 | Args: 67 | save_dir(str): location to save log file 68 | distributed_rank(int): device rank when multi-gpu environment 69 | filename (string): log save name. 70 | mode(str): log file write mode, `append` or `override`. default is `a`. 71 | 72 | Return: 73 | logger instance. 74 | """ 75 | loguru_format = ( 76 | "{time:YYYY-MM-DD HH:mm:ss} | " 77 | "{level: <8} | " 78 | "{name}:{line} - {message}" 79 | ) 80 | 81 | logger.remove() 82 | save_file = os.path.join(save_dir, filename) 83 | if mode == "o" and os.path.exists(save_file): 84 | os.remove(save_file) 85 | # only keep logger in rank0 process 86 | if distributed_rank == 0: 87 | logger.add( 88 | sys.stderr, 89 | format=loguru_format, 90 | level="INFO", 91 | enqueue=True, 92 | ) 93 | logger.add(save_file) 94 | 95 | # redirect stdout/stderr to loguru 96 | redirect_sys_output("INFO") 97 | -------------------------------------------------------------------------------- /yolox/utils/metric.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | import numpy as np 5 | 6 | import torch 7 | 8 | import functools 9 | import os 10 | import time 11 | from collections import defaultdict, deque 12 | 13 | __all__ = [ 14 | "AverageMeter", 15 | "MeterBuffer", 16 | "get_total_and_free_memory_in_Mb", 17 | "occupy_mem", 18 | "gpu_mem_usage", 19 | ] 20 | 21 | 22 | def get_total_and_free_memory_in_Mb(cuda_device): 23 | devices_info_str = os.popen( 24 | "nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader" 25 | ) 26 | devices_info = devices_info_str.read().strip().split("\n") 27 | total, used = devices_info[int(cuda_device)].split(",") 28 | return int(total), int(used) 29 | 30 | 31 | def occupy_mem(cuda_device, mem_ratio=0.95): 32 | """ 33 | pre-allocate gpu memory for training to avoid memory Fragmentation. 34 | """ 35 | total, used = get_total_and_free_memory_in_Mb(cuda_device) 36 | max_mem = int(total * mem_ratio) 37 | block_mem = max_mem - used 38 | x = torch.cuda.FloatTensor(256, 1024, block_mem) 39 | del x 40 | time.sleep(5) 41 | 42 | 43 | def gpu_mem_usage(): 44 | """ 45 | Compute the GPU memory usage for the current device (MB). 46 | """ 47 | mem_usage_bytes = torch.cuda.max_memory_allocated() 48 | return mem_usage_bytes / (1024 * 1024) 49 | 50 | 51 | class AverageMeter: 52 | """Track a series of values and provide access to smoothed values over a 53 | window or the global series average. 54 | """ 55 | 56 | def __init__(self, window_size=50): 57 | self._deque = deque(maxlen=window_size) 58 | self._total = 0.0 59 | self._count = 0 60 | 61 | def update(self, value): 62 | self._deque.append(value) 63 | self._count += 1 64 | self._total += value 65 | 66 | @property 67 | def median(self): 68 | d = np.array(list(self._deque)) 69 | return np.median(d) 70 | 71 | @property 72 | def avg(self): 73 | # if deque is empty, nan will be returned. 74 | d = np.array(list(self._deque)) 75 | return d.mean() 76 | 77 | @property 78 | def global_avg(self): 79 | return self._total / max(self._count, 1e-5) 80 | 81 | @property 82 | def latest(self): 83 | return self._deque[-1] if len(self._deque) > 0 else None 84 | 85 | @property 86 | def total(self): 87 | return self._total 88 | 89 | def reset(self): 90 | self._deque.clear() 91 | self._total = 0.0 92 | self._count = 0 93 | 94 | def clear(self): 95 | self._deque.clear() 96 | 97 | 98 | class MeterBuffer(defaultdict): 99 | """Computes and stores the average and current value""" 100 | 101 | def __init__(self, window_size=20): 102 | factory = functools.partial(AverageMeter, window_size=window_size) 103 | super().__init__(factory) 104 | 105 | def reset(self): 106 | for v in self.values(): 107 | v.reset() 108 | 109 | def get_filtered_meter(self, filter_key="time"): 110 | return {k: v for k, v in self.items() if filter_key in k} 111 | 112 | def update(self, values=None, **kwargs): 113 | if values is None: 114 | values = {} 115 | values.update(kwargs) 116 | for k, v in values.items(): 117 | if isinstance(v, torch.Tensor): 118 | v = v.detach() 119 | self[k].update(v) 120 | 121 | def clear_meters(self): 122 | for v in self.values(): 123 | v.clear() 124 | -------------------------------------------------------------------------------- /yolox/utils/model_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import torch 6 | import torch.nn as nn 7 | from thop import profile 8 | 9 | from copy import deepcopy 10 | 11 | __all__ = [ 12 | "fuse_conv_and_bn", 13 | "fuse_model", 14 | "get_model_info", 15 | "replace_module", 16 | ] 17 | 18 | 19 | def get_model_info(model, tsize): 20 | 21 | stride = 64 22 | img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device) 23 | flops, params = profile(deepcopy(model), inputs=(img,), verbose=False) 24 | params /= 1e6 25 | flops /= 1e9 26 | flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops 27 | info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops) 28 | return info 29 | 30 | 31 | def fuse_conv_and_bn(conv, bn): 32 | # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ 33 | fusedconv = ( 34 | nn.Conv2d( 35 | conv.in_channels, 36 | conv.out_channels, 37 | kernel_size=conv.kernel_size, 38 | stride=conv.stride, 39 | padding=conv.padding, 40 | groups=conv.groups, 41 | bias=True, 42 | ) 43 | .requires_grad_(False) 44 | .to(conv.weight.device) 45 | ) 46 | 47 | # prepare filters 48 | w_conv = conv.weight.clone().view(conv.out_channels, -1) 49 | w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) 50 | fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) 51 | 52 | # prepare spatial bias 53 | b_conv = ( 54 | torch.zeros(conv.weight.size(0), device=conv.weight.device) 55 | if conv.bias is None 56 | else conv.bias 57 | ) 58 | b_bn = bn.bias - bn.weight.mul(bn.running_mean).div( 59 | torch.sqrt(bn.running_var + bn.eps) 60 | ) 61 | fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) 62 | 63 | return fusedconv 64 | 65 | 66 | def fuse_model(model): 67 | from yolox.models.network_blocks import BaseConv 68 | 69 | for m in model.modules(): 70 | if type(m) is BaseConv and hasattr(m, "bn"): 71 | m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv 72 | delattr(m, "bn") # remove batchnorm 73 | m.forward = m.fuseforward # update forward 74 | return model 75 | 76 | 77 | def replace_module(module, replaced_module_type, new_module_type, replace_func=None): 78 | """ 79 | Replace given type in module to a new type. mostly used in deploy. 80 | 81 | Args: 82 | module (nn.Module): model to apply replace operation. 83 | replaced_module_type (Type): module type to be replaced. 84 | new_module_type (Type) 85 | replace_func (function): python function to describe replace logic. Defalut value None. 86 | 87 | Returns: 88 | model (nn.Module): module that already been replaced. 89 | """ 90 | 91 | def default_replace_func(replaced_module_type, new_module_type): 92 | return new_module_type() 93 | 94 | if replace_func is None: 95 | replace_func = default_replace_func 96 | 97 | model = module 98 | if isinstance(module, replaced_module_type): 99 | model = replace_func(replaced_module_type, new_module_type) 100 | else: # recurrsively replace 101 | for name, child in module.named_children(): 102 | new_child = replace_module(child, replaced_module_type, new_module_type) 103 | if new_child is not child: # child is already replaced 104 | model.add_module(name, new_child) 105 | 106 | return model 107 | -------------------------------------------------------------------------------- /yolox/utils/setup_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import cv2 6 | 7 | import os 8 | import subprocess 9 | 10 | __all__ = ["configure_nccl", "configure_module"] 11 | 12 | 13 | def configure_nccl(): 14 | """Configure multi-machine environment variables of NCCL.""" 15 | os.environ["NCCL_LAUNCH_MODE"] = "PARALLEL" 16 | os.environ["NCCL_IB_HCA"] = subprocess.getoutput( 17 | "pushd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; " 18 | "do cat $i/ports/1/gid_attrs/types/* 2>/dev/null " 19 | "| grep v >/dev/null && echo $i ; done; popd > /dev/null" 20 | ) 21 | os.environ["NCCL_IB_GID_INDEX"] = "3" 22 | os.environ["NCCL_IB_TC"] = "106" 23 | 24 | 25 | def configure_module(ulimit_value=8192): 26 | """ 27 | Configure pytorch module environment. setting of ulimit and cv2 will be set. 28 | 29 | Args: 30 | ulimit_value(int): default open file number on linux. Default value: 8192. 31 | """ 32 | # system setting 33 | try: 34 | import resource 35 | 36 | rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) 37 | resource.setrlimit(resource.RLIMIT_NOFILE, (ulimit_value, rlimit[1])) 38 | except Exception: 39 | # Exception might be raised in Windows OS or rlimit reaches max limit number. 40 | # However, set rlimit value might not be necessary. 41 | pass 42 | 43 | # cv2 44 | # multiprocess might be harmful on performance of torch dataloader 45 | os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" 46 | try: 47 | cv2.setNumThreads(0) 48 | cv2.ocl.setUseOpenCL(False) 49 | except Exception: 50 | # cv2 version mismatch might rasie exceptions. 51 | pass 52 | --------------------------------------------------------------------------------