├── .gitattributes
├── LICENSE
├── MOT16_eval
├── eval.sh
├── track_all.gif
└── track_pedestrians.gif
├── README.md
├── __pycache__
└── lane_finding.cpython-38.pyc
├── camera_cal
├── calibration1.jpg
├── calibration10.jpg
├── calibration11.jpg
├── calibration12.jpg
├── calibration13.jpg
├── calibration2.jpg
├── calibration3.jpg
├── calibration4.jpg
├── calibration5.jpg
├── calibration6.jpg
├── calibration7.jpg
├── calibration8.jpg
└── calibration9.jpg
├── data
└── demo.gif
├── deep_sort
├── LICENSE
├── README.md
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-38.pyc
│ └── deep_sort.cpython-38.pyc
├── configs
│ └── deep_sort.yaml
├── deep
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ └── feature_extractor.cpython-38.pyc
│ ├── checkpoint
│ │ └── .gitkeep
│ ├── feature_extractor.py
│ └── reid
│ │ ├── .flake8
│ │ ├── .gitignore
│ │ ├── .isort.cfg
│ │ ├── .style.yapf
│ │ ├── LICENSE
│ │ ├── README.rst
│ │ ├── configs
│ │ ├── im_osnet_ain_x1_0_softmax_256x128_amsgrad_cosine.yaml
│ │ ├── im_osnet_ibn_x1_0_softmax_256x128_amsgrad.yaml
│ │ ├── im_osnet_x0_25_softmax_256x128_amsgrad.yaml
│ │ ├── im_osnet_x0_5_softmax_256x128_amsgrad.yaml
│ │ ├── im_osnet_x0_75_softmax_256x128_amsgrad.yaml
│ │ ├── im_osnet_x1_0_softmax_256x128_amsgrad.yaml
│ │ ├── im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml
│ │ ├── im_r50_softmax_256x128_amsgrad.yaml
│ │ └── im_r50fc512_softmax_256x128_amsgrad.yaml
│ │ ├── docs
│ │ ├── AWESOME_REID.md
│ │ ├── MODEL_ZOO.md
│ │ ├── Makefile
│ │ ├── conf.py
│ │ ├── datasets.rst
│ │ ├── evaluation.rst
│ │ ├── figures
│ │ │ ├── actmap.jpg
│ │ │ └── ranking_results.jpg
│ │ ├── index.rst
│ │ ├── pkg
│ │ │ ├── data.rst
│ │ │ ├── engine.rst
│ │ │ ├── losses.rst
│ │ │ ├── metrics.rst
│ │ │ ├── models.rst
│ │ │ ├── optim.rst
│ │ │ └── utils.rst
│ │ ├── requirements.txt
│ │ └── user_guide.rst
│ │ ├── linter.sh
│ │ ├── projects
│ │ ├── DML
│ │ │ ├── README.md
│ │ │ ├── default_config.py
│ │ │ ├── dml.py
│ │ │ ├── im_osnet_x1_0_dml_256x128_amsgrad_cosine.yaml
│ │ │ └── main.py
│ │ ├── OSNet_AIN
│ │ │ ├── README.md
│ │ │ ├── default_config.py
│ │ │ ├── main.py
│ │ │ ├── nas.yaml
│ │ │ ├── osnet_child.py
│ │ │ ├── osnet_search.py
│ │ │ └── softmax_nas.py
│ │ ├── README.md
│ │ └── attribute_recognition
│ │ │ ├── README.md
│ │ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── dataset.py
│ │ │ └── pa100k.py
│ │ │ ├── default_parser.py
│ │ │ ├── main.py
│ │ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── osnet.py
│ │ │ └── train.sh
│ │ ├── requirements.txt
│ │ ├── scripts
│ │ ├── default_config.py
│ │ └── main.py
│ │ ├── setup.py
│ │ ├── tools
│ │ ├── compute_mean_std.py
│ │ ├── parse_test_res.py
│ │ └── visualize_actmap.py
│ │ └── torchreid
│ │ ├── __init__.py
│ │ ├── data
│ │ ├── __init__.py
│ │ ├── datamanager.py
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── dataset.py
│ │ │ ├── image
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cuhk01.py
│ │ │ │ ├── cuhk02.py
│ │ │ │ ├── cuhk03.py
│ │ │ │ ├── cuhksysu.py
│ │ │ │ ├── dukemtmcreid.py
│ │ │ │ ├── grid.py
│ │ │ │ ├── ilids.py
│ │ │ │ ├── market1501.py
│ │ │ │ ├── msmt17.py
│ │ │ │ ├── prid.py
│ │ │ │ ├── sensereid.py
│ │ │ │ ├── university1652.py
│ │ │ │ └── viper.py
│ │ │ └── video
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dukemtmcvidreid.py
│ │ │ │ ├── ilidsvid.py
│ │ │ │ ├── mars.py
│ │ │ │ └── prid2011.py
│ │ ├── sampler.py
│ │ └── transforms.py
│ │ ├── engine
│ │ ├── __init__.py
│ │ ├── engine.py
│ │ ├── image
│ │ │ ├── __init__.py
│ │ │ ├── softmax.py
│ │ │ └── triplet.py
│ │ └── video
│ │ │ ├── __init__.py
│ │ │ ├── softmax.py
│ │ │ └── triplet.py
│ │ ├── losses
│ │ ├── __init__.py
│ │ ├── cross_entropy_loss.py
│ │ └── hard_mine_triplet_loss.py
│ │ ├── metrics
│ │ ├── __init__.py
│ │ ├── accuracy.py
│ │ ├── distance.py
│ │ ├── rank.py
│ │ └── rank_cylib
│ │ │ ├── Makefile
│ │ │ ├── __init__.py
│ │ │ ├── rank_cy.pyx
│ │ │ ├── setup.py
│ │ │ └── test_cython.py
│ │ ├── models
│ │ ├── __init__.py
│ │ ├── densenet.py
│ │ ├── hacnn.py
│ │ ├── inceptionresnetv2.py
│ │ ├── inceptionv4.py
│ │ ├── mlfn.py
│ │ ├── mobilenetv2.py
│ │ ├── mudeep.py
│ │ ├── nasnet.py
│ │ ├── osnet.py
│ │ ├── osnet_ain.py
│ │ ├── pcb.py
│ │ ├── resnet.py
│ │ ├── resnet_ibn_a.py
│ │ ├── resnet_ibn_b.py
│ │ ├── resnetmid.py
│ │ ├── senet.py
│ │ ├── shufflenet.py
│ │ ├── shufflenetv2.py
│ │ ├── squeezenet.py
│ │ └── xception.py
│ │ ├── optim
│ │ ├── __init__.py
│ │ ├── lr_scheduler.py
│ │ ├── optimizer.py
│ │ └── radam.py
│ │ └── utils
│ │ ├── GPU-Re-Ranking
│ │ ├── README.md
│ │ ├── extension
│ │ │ ├── adjacency_matrix
│ │ │ │ ├── build_adjacency_matrix.cpp
│ │ │ │ ├── build_adjacency_matrix_kernel.cu
│ │ │ │ └── setup.py
│ │ │ ├── make.sh
│ │ │ └── propagation
│ │ │ │ ├── gnn_propagate.cpp
│ │ │ │ ├── gnn_propagate_kernel.cu
│ │ │ │ └── setup.py
│ │ ├── gnn_reranking.py
│ │ ├── main.py
│ │ └── utils.py
│ │ ├── __init__.py
│ │ ├── avgmeter.py
│ │ ├── feature_extractor.py
│ │ ├── loggers.py
│ │ ├── model_complexity.py
│ │ ├── reidtools.py
│ │ ├── rerank.py
│ │ ├── tools.py
│ │ └── torchtools.py
├── deep_sort.py
├── sort
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ ├── detection.cpython-38.pyc
│ │ ├── iou_matching.cpython-38.pyc
│ │ ├── kalman_filter.cpython-38.pyc
│ │ ├── linear_assignment.cpython-38.pyc
│ │ ├── nn_matching.cpython-38.pyc
│ │ ├── track.cpython-38.pyc
│ │ └── tracker.cpython-38.pyc
│ ├── detection.py
│ ├── iou_matching.py
│ ├── kalman_filter.py
│ ├── linear_assignment.py
│ ├── nn_matching.py
│ ├── preprocessing.py
│ ├── track.py
│ └── tracker.py
└── utils
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-38.pyc
│ └── parser.cpython-38.pyc
│ ├── asserts.py
│ ├── draw.py
│ ├── evaluation.py
│ ├── io.py
│ ├── json_logger.py
│ ├── log.py
│ ├── parser.py
│ └── tools.py
├── lane_finding.py
├── requirements.txt
├── result_output_lane.mp4
├── steering_wheel_image.jpg
├── track.py
├── yolov5
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── data
│ ├── Argoverse.yaml
│ ├── GlobalWheat2020.yaml
│ ├── Objects365.yaml
│ ├── SKU-110K.yaml
│ ├── VOC.yaml
│ ├── VisDrone.yaml
│ ├── coco.yaml
│ ├── coco128.yaml
│ ├── hyps
│ │ ├── hyp.finetune.yaml
│ │ ├── hyp.finetune_objects365.yaml
│ │ ├── hyp.scratch-high.yaml
│ │ ├── hyp.scratch-low.yaml
│ │ ├── hyp.scratch-med.yaml
│ │ └── hyp.scratch.yaml
│ ├── images
│ │ ├── bus.jpg
│ │ └── zidane.jpg
│ ├── output
│ │ ├── result_output_lane.mp4
│ │ ├── test_sample_result-1.avi
│ │ └── test_sample_result-2.avi
│ ├── scripts
│ │ ├── download_weights.sh
│ │ ├── get_coco.sh
│ │ └── get_coco128.sh
│ ├── video
│ │ └── test_sample.mp4
│ └── xView.yaml
├── detect.py
├── export.py
├── hubconf.py
├── models
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ ├── common.cpython-38.pyc
│ │ ├── experimental.cpython-38.pyc
│ │ └── yolo.cpython-38.pyc
│ ├── common.py
│ ├── experimental.py
│ ├── hub
│ │ ├── anchors.yaml
│ │ ├── yolov3-spp.yaml
│ │ ├── yolov3-tiny.yaml
│ │ ├── yolov3.yaml
│ │ ├── yolov5-bifpn.yaml
│ │ ├── yolov5-fpn.yaml
│ │ ├── yolov5-p2.yaml
│ │ ├── yolov5-p6.yaml
│ │ ├── yolov5-p7.yaml
│ │ ├── yolov5-panet.yaml
│ │ ├── yolov5l6.yaml
│ │ ├── yolov5m6.yaml
│ │ ├── yolov5n6.yaml
│ │ ├── yolov5s-ghost.yaml
│ │ ├── yolov5s-transformer.yaml
│ │ ├── yolov5s6.yaml
│ │ └── yolov5x6.yaml
│ ├── tf.py
│ ├── yolo.py
│ ├── yolov5l.yaml
│ ├── yolov5m.yaml
│ ├── yolov5n.yaml
│ ├── yolov5s.yaml
│ └── yolov5x.yaml
├── requirements.txt
├── setup.cfg
├── test_sample.mp4
├── train.py
├── tutorial.ipynb
├── utils
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ ├── augmentations.cpython-38.pyc
│ │ ├── autoanchor.cpython-38.pyc
│ │ ├── datasets.cpython-38.pyc
│ │ ├── downloads.cpython-38.pyc
│ │ ├── general.cpython-38.pyc
│ │ ├── metrics.cpython-38.pyc
│ │ ├── plots.cpython-38.pyc
│ │ └── torch_utils.cpython-38.pyc
│ ├── activations.py
│ ├── augmentations.py
│ ├── autoanchor.py
│ ├── autobatch.py
│ ├── aws
│ │ ├── __init__.py
│ │ ├── mime.sh
│ │ ├── resume.py
│ │ └── userdata.sh
│ ├── callbacks.py
│ ├── datasets.py
│ ├── downloads.py
│ ├── flask_rest_api
│ │ ├── README.md
│ │ ├── example_request.py
│ │ └── restapi.py
│ ├── general.py
│ ├── google_app_engine
│ │ ├── Dockerfile
│ │ ├── additional_requirements.txt
│ │ └── app.yaml
│ ├── loggers
│ │ ├── __init__.py
│ │ └── wandb
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── log_dataset.py
│ │ │ ├── sweep.py
│ │ │ ├── sweep.yaml
│ │ │ └── wandb_utils.py
│ ├── loss.py
│ ├── metrics.py
│ ├── plots.py
│ └── torch_utils.py
└── val.py
└── yolov5x.pt
/.gitattributes:
--------------------------------------------------------------------------------
1 | result_output_lane.mp4 filter=lfs diff=lfs merge=lfs -text
2 | *.avi filter=lfs diff=lfs merge=lfs -text
3 | yolov5x.pt filter=lfs diff=lfs merge=lfs -text
4 |
--------------------------------------------------------------------------------
/MOT16_eval/track_all.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/MOT16_eval/track_all.gif
--------------------------------------------------------------------------------
/MOT16_eval/track_pedestrians.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/MOT16_eval/track_pedestrians.gif
--------------------------------------------------------------------------------
/__pycache__/lane_finding.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/__pycache__/lane_finding.cpython-38.pyc
--------------------------------------------------------------------------------
/camera_cal/calibration1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration1.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration10.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration11.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration12.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration13.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration2.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration3.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration4.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration5.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration6.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration7.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration8.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/camera_cal/calibration9.jpg
--------------------------------------------------------------------------------
/data/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/data/demo.gif
--------------------------------------------------------------------------------
/deep_sort/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Ziqiang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/deep_sort/__init__.py:
--------------------------------------------------------------------------------
1 | from .deep_sort import DeepSort
2 |
3 |
4 | __all__ = ['DeepSort', 'build_tracker']
5 |
6 |
7 | def build_tracker(cfg, use_cuda):
8 | return DeepSort(cfg.DEEPSORT.REID_CKPT,
9 | max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
10 | nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
11 | max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
12 |
--------------------------------------------------------------------------------
/deep_sort/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/__pycache__/deep_sort.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/__pycache__/deep_sort.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/configs/deep_sort.yaml:
--------------------------------------------------------------------------------
1 | DEEPSORT:
2 | MODEL_TYPE: "osnet_x_25"
3 | MAX_DIST: 0.2 # The matching threshold. Samples with larger distance are considered an invalid match
4 | MAX_IOU_DISTANCE: 0.7 # Gating threshold. Associations with cost larger than this value are disregarded.
5 | MAX_AGE: 30 # Maximum number of missed misses before a track is deleted
6 | N_INIT: 3 # Number of frames that a track remains in initialization phase
7 | NN_BUDGET: 100 # Maximum size of the appearance descriptors gallery
8 |
9 |
--------------------------------------------------------------------------------
/deep_sort/deep/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/__init__.py
--------------------------------------------------------------------------------
/deep_sort/deep/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/deep/__pycache__/feature_extractor.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/__pycache__/feature_extractor.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/deep/checkpoint/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/checkpoint/.gitkeep
--------------------------------------------------------------------------------
/deep_sort/deep/feature_extractor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchvision.transforms as transforms
3 | import numpy as np
4 | import cv2
5 | import logging
6 |
7 | import sys
8 | # so that init does not execute in the package
9 | sys.path.append('deep_sort/deep/reid')
10 | from torchreid import models
11 |
12 |
13 | class Extractor(object):
14 | def __init__(self, model_type, use_cuda=True):
15 | self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
16 | self.input_width = 128
17 | self.input_height = 256
18 |
19 | self.model = models.build_model(name=model_type, num_classes=1000)
20 | self.model.to(self.device)
21 | self.model.eval()
22 |
23 | logger = logging.getLogger("root.tracker")
24 | logger.info("Selected model type: {}".format(model_type))
25 | self.size = (self.input_width, self.input_height)
26 | self.norm = transforms.Compose([
27 | transforms.ToTensor(),
28 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
29 | ])
30 |
31 | def _preprocess(self, im_crops):
32 | """
33 | TODO:
34 | 1. to float with scale from 0 to 1
35 | 2. resize to (64, 128) as Market1501 dataset did
36 | 3. concatenate to a numpy array
37 | 3. to torch Tensor
38 | 4. normalize
39 | """
40 | def _resize(im, size):
41 | return cv2.resize(im.astype(np.float32) / 255., size)
42 |
43 | im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(
44 | 0) for im in im_crops], dim=0).float()
45 | return im_batch
46 |
47 | def __call__(self, im_crops):
48 | im_batch = self._preprocess(im_crops)
49 | with torch.no_grad():
50 | im_batch = im_batch.to(self.device)
51 | features = self.model(im_batch)
52 | return features.cpu().numpy()
53 |
54 |
55 | if __name__ == '__main__':
56 | img = cv2.imread("demo.jpg")[:, :, (2, 1, 0)]
57 | extr = Extractor("osnet_x1_0")
58 | feature = extr(img)
59 | print(feature.shape)
60 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore =
3 | # At least two spaces before inline comment
4 | E261,
5 | # Line lengths are recommended to be no greater than 79 characters
6 | E501,
7 | # Missing whitespace around arithmetic operator
8 | E226,
9 | # Blank line contains whitespace
10 | W293,
11 | # Do not use bare 'except'
12 | E722,
13 | # Line break after binary operator
14 | W504,
15 | # isort found an import in the wrong position
16 | I001
17 | max-line-length = 79
18 | exclude = __init__.py, build, torchreid/metrics/rank_cylib/
--------------------------------------------------------------------------------
/deep_sort/deep/reid/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 |
62 | # Flask stuff:
63 | instance/
64 | .webassets-cache
65 |
66 | # Scrapy stuff:
67 | .scrapy
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # Jupyter Notebook
76 | .ipynb_checkpoints
77 |
78 | # IPython
79 | profile_default/
80 | ipython_config.py
81 |
82 | # pyenv
83 | .python-version
84 |
85 | # pipenv
86 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
87 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
88 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
89 | # install all needed dependencies.
90 | #Pipfile.lock
91 |
92 | # celery beat schedule file
93 | celerybeat-schedule
94 |
95 | # SageMath parsed files
96 | *.sage.py
97 |
98 | # Environments
99 | .env
100 | .venv
101 | env/
102 | venv/
103 | ENV/
104 | env.bak/
105 | venv.bak/
106 |
107 | # Spyder project settings
108 | .spyderproject
109 | .spyproject
110 |
111 | # Rope project settings
112 | .ropeproject
113 |
114 | # mkdocs documentation
115 | /site
116 |
117 | # mypy
118 | .mypy_cache/
119 | .dmypy.json
120 | dmypy.json
121 |
122 | # Pyre type checker
123 | .pyre/
124 |
125 | # Cython eval code
126 | *.c
127 | *.html
128 |
129 | # OS X
130 | .DS_Store
131 | .Spotlight-V100
132 | .Trashes
133 | ._*
134 |
135 | # ReID
136 | reid-data/
137 | log/
138 | saved-models/
139 | model-zoo/
140 | debug*
141 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/.isort.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | line_length=79
3 | multi_line_output=3
4 | length_sort=true
5 | known_standard_library=numpy,setuptools
6 | known_myself=torchreid
7 | known_third_party=matplotlib,cv2,torch,torchvision,PIL,yacs
8 | no_lines_before=STDLIB,THIRDPARTY
9 | sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER
10 | default_section=FIRSTPARTY
--------------------------------------------------------------------------------
/deep_sort/deep/reid/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | BASED_ON_STYLE = pep8
3 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
4 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
5 | DEDENT_CLOSING_BRACKETS = true
6 | SPACES_BEFORE_COMMENT = 1
7 | ARITHMETIC_PRECEDENCE_INDICATION = true
--------------------------------------------------------------------------------
/deep_sort/deep/reid/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Kaiyang Zhou
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_ain_x1_0_softmax_256x128_amsgrad_cosine.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_ain_x1_0'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501', 'dukemtmcreid']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip', 'color_jitter']
13 | save_dir: 'log/osnet_ain_x1_0_market1501_softmax_cosinelr'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0015
23 | max_epoch: 100
24 | batch_size: 64
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'cosine'
28 |
29 | test:
30 | batch_size: 300
31 | dist_metric: 'cosine'
32 | normalize_feature: False
33 | evaluate: False
34 | eval_freq: -1
35 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_ibn_x1_0_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_ibn_x1_0'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['dukemtmcreid']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip', 'color_jitter']
13 | save_dir: 'log/osnet_ibn_x1_0_market2duke_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0015
23 | max_epoch: 150
24 | batch_size: 64
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [60]
29 |
30 | test:
31 | batch_size: 300
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_x0_25_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x0_25'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/osnet_x0_25_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.003
23 | max_epoch: 180
24 | batch_size: 128
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [80]
29 |
30 | test:
31 | batch_size: 300
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_x0_5_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x0_5'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/osnet_x0_5_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.003
23 | max_epoch: 180
24 | batch_size: 128
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [80]
29 |
30 | test:
31 | batch_size: 300
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_x0_75_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x0_75'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/osnet_x0_75_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0015
23 | max_epoch: 150
24 | batch_size: 64
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [60]
29 |
30 | test:
31 | batch_size: 300
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_x1_0_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x1_0'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/osnet_x1_0_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0015
23 | max_epoch: 150
24 | batch_size: 64
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [60]
29 |
30 | test:
31 | batch_size: 300
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x1_0'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/osnet_x1_0_market1501_softmax_cosinelr'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0015
23 | max_epoch: 250
24 | batch_size: 64
25 | fixbase_epoch: 10
26 | open_layers: ['classifier']
27 | lr_scheduler: 'cosine'
28 |
29 | test:
30 | batch_size: 300
31 | dist_metric: 'euclidean'
32 | normalize_feature: False
33 | evaluate: False
34 | eval_freq: -1
35 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_r50_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'resnet50_fc512'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/resnet50_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0003
23 | max_epoch: 60
24 | batch_size: 32
25 | fixbase_epoch: 5
26 | open_layers: ['classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [20]
29 |
30 | test:
31 | batch_size: 100
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/configs/im_r50fc512_softmax_256x128_amsgrad.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'resnet50_fc512'
3 | pretrained: True
4 |
5 | data:
6 | type: 'image'
7 | sources: ['market1501']
8 | targets: ['market1501']
9 | height: 256
10 | width: 128
11 | combineall: False
12 | transforms: ['random_flip']
13 | save_dir: 'log/resnet50_fc512_market1501_softmax'
14 |
15 | loss:
16 | name: 'softmax'
17 | softmax:
18 | label_smooth: True
19 |
20 | train:
21 | optim: 'amsgrad'
22 | lr: 0.0003
23 | max_epoch: 60
24 | batch_size: 32
25 | fixbase_epoch: 5
26 | open_layers: ['fc', 'classifier']
27 | lr_scheduler: 'single_step'
28 | stepsize: [20]
29 |
30 | test:
31 | batch_size: 100
32 | dist_metric: 'euclidean'
33 | normalize_feature: False
34 | evaluate: False
35 | eval_freq: -1
36 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/evaluation.rst:
--------------------------------------------------------------------------------
1 | Evaluation
2 | ==========
3 |
4 | Image ReID
5 | -----------
6 | - **Market1501**, **DukeMTMC-reID**, **CUHK03 (767/700 split)** and **MSMT17** have fixed split so keeping ``split_id=0`` is fine.
7 | - **CUHK03 (classic split)** has 20 fixed splits, so do ``split_id=0~19``.
8 | - **VIPeR** contains 632 identities each with 2 images under two camera views. Evaluation should be done for 10 random splits. Each split randomly divides 632 identities to 316 train ids (632 images) and the other 316 test ids (632 images). Note that, in each random split, there are two sub-splits, one using camera-A as query and camera-B as gallery while the other one using camera-B as query and camera-A as gallery. Thus, there are totally 20 splits generated with ``split_id`` starting from 0 to 19. Models can be trained on ``split_id=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]`` (because ``split_id=0`` and ``split_id=1`` share the same train set, and so on and so forth.). At test time, models trained on ``split_id=0`` can be directly evaluated on ``split_id=1``, models trained on ``split_id=2`` can be directly evaluated on ``split_id=3``, and so on and so forth.
9 | - **CUHK01** is similar to VIPeR in the split generation.
10 | - **GRID** , **iLIDS** and **PRID** have 10 random splits, so evaluation should be done by varying ``split_id`` from 0 to 9.
11 | - **SenseReID** has no training images and is used for evaluation only.
12 |
13 |
14 | .. note::
15 | The ``split_id`` argument is defined in ``ImageDataManager`` and ``VideoDataManager``. Please refer to :ref:`torchreid_data`.
16 |
17 |
18 | Video ReID
19 | -----------
20 | - **MARS** and **DukeMTMC-VideoReID** have fixed single split so using ``split_id=0`` is ok.
21 | - **iLIDS-VID** and **PRID2011** have 10 predefined splits so evaluation should be done by varying ``split_id`` from 0 to 9.
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/figures/actmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/reid/docs/figures/actmap.jpg
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/figures/ranking_results.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/reid/docs/figures/ranking_results.jpg
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../README.rst
2 |
3 |
4 | .. toctree::
5 | :hidden:
6 |
7 | user_guide
8 | datasets
9 | evaluation
10 |
11 | .. toctree::
12 | :caption: Package Reference
13 | :hidden:
14 |
15 | pkg/data
16 | pkg/engine
17 | pkg/losses
18 | pkg/metrics
19 | pkg/models
20 | pkg/optim
21 | pkg/utils
22 |
23 | .. toctree::
24 | :caption: Resources
25 | :hidden:
26 |
27 | AWESOME_REID.md
28 | MODEL_ZOO.md
29 |
30 |
31 | Indices and tables
32 | ==================
33 |
34 | * :ref:`genindex`
35 | * :ref:`modindex`
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/data.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_data:
2 |
3 | torchreid.data
4 | ==============
5 |
6 |
7 | Data Manager
8 | ---------------------------
9 |
10 | .. automodule:: torchreid.data.datamanager
11 | :members:
12 |
13 |
14 | Sampler
15 | -----------------------
16 |
17 | .. automodule:: torchreid.data.sampler
18 | :members:
19 |
20 |
21 | Transforms
22 | ---------------------------
23 |
24 | .. automodule:: torchreid.data.transforms
25 | :members:
26 |
27 |
28 | Dataset
29 | ---------------------------
30 |
31 | .. automodule:: torchreid.data.datasets.dataset
32 | :members:
33 |
34 |
35 | .. automodule:: torchreid.data.datasets.__init__
36 | :members:
37 |
38 |
39 | Image Datasets
40 | ------------------------------
41 |
42 | .. automodule:: torchreid.data.datasets.image.market1501
43 | :members:
44 |
45 | .. automodule:: torchreid.data.datasets.image.cuhk03
46 | :members:
47 |
48 | .. automodule:: torchreid.data.datasets.image.dukemtmcreid
49 | :members:
50 |
51 | .. automodule:: torchreid.data.datasets.image.msmt17
52 | :members:
53 |
54 | .. automodule:: torchreid.data.datasets.image.viper
55 | :members:
56 |
57 | .. automodule:: torchreid.data.datasets.image.grid
58 | :members:
59 |
60 | .. automodule:: torchreid.data.datasets.image.cuhk01
61 | :members:
62 |
63 | .. automodule:: torchreid.data.datasets.image.ilids
64 | :members:
65 |
66 | .. automodule:: torchreid.data.datasets.image.sensereid
67 | :members:
68 |
69 | .. automodule:: torchreid.data.datasets.image.prid
70 | :members:
71 |
72 |
73 | Video Datasets
74 | ------------------------------
75 |
76 | .. automodule:: torchreid.data.datasets.video.mars
77 | :members:
78 |
79 | .. automodule:: torchreid.data.datasets.video.ilidsvid
80 | :members:
81 |
82 | .. automodule:: torchreid.data.datasets.video.prid2011
83 | :members:
84 |
85 | .. automodule:: torchreid.data.datasets.video.dukemtmcvidreid
86 | :members:
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/engine.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_engine:
2 |
3 | torchreid.engine
4 | ==================
5 |
6 |
7 | Base Engine
8 | ------------
9 |
10 | .. autoclass:: torchreid.engine.engine.Engine
11 | :members:
12 |
13 |
14 | Image Engines
15 | -------------
16 |
17 | .. autoclass:: torchreid.engine.image.softmax.ImageSoftmaxEngine
18 | :members:
19 |
20 |
21 | .. autoclass:: torchreid.engine.image.triplet.ImageTripletEngine
22 | :members:
23 |
24 |
25 | Video Engines
26 | -------------
27 |
28 | .. autoclass:: torchreid.engine.video.softmax.VideoSoftmaxEngine
29 |
30 |
31 | .. autoclass:: torchreid.engine.video.triplet.VideoTripletEngine
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/losses.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_losses:
2 |
3 | torchreid.losses
4 | =================
5 |
6 |
7 | Softmax
8 | --------
9 |
10 | .. automodule:: torchreid.losses.cross_entropy_loss
11 | :members:
12 |
13 |
14 | Triplet
15 | -------
16 |
17 | .. automodule:: torchreid.losses.hard_mine_triplet_loss
18 | :members:
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/metrics.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_metrics:
2 |
3 | torchreid.metrics
4 | =================
5 |
6 |
7 | Distance
8 | ---------
9 |
10 | .. automodule:: torchreid.metrics.distance
11 | :members:
12 |
13 |
14 | Accuracy
15 | --------
16 |
17 | .. automodule:: torchreid.metrics.accuracy
18 | :members:
19 |
20 |
21 | Rank
22 | -----
23 |
24 | .. automodule:: torchreid.metrics.rank
25 | :members: evaluate_rank
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/models.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_models:
2 |
3 | torchreid.models
4 | =================
5 |
6 | Interface
7 | ---------
8 |
9 | .. automodule:: torchreid.models.__init__
10 | :members:
11 |
12 |
13 | ImageNet Classification Models
14 | -------------------------------
15 |
16 | .. autoclass:: torchreid.models.resnet.ResNet
17 | .. autoclass:: torchreid.models.senet.SENet
18 | .. autoclass:: torchreid.models.densenet.DenseNet
19 | .. autoclass:: torchreid.models.inceptionresnetv2.InceptionResNetV2
20 | .. autoclass:: torchreid.models.inceptionv4.InceptionV4
21 | .. autoclass:: torchreid.models.xception.Xception
22 |
23 |
24 | Lightweight Models
25 | ------------------
26 |
27 | .. autoclass:: torchreid.models.nasnet.NASNetAMobile
28 | .. autoclass:: torchreid.models.mobilenetv2.MobileNetV2
29 | .. autoclass:: torchreid.models.shufflenet.ShuffleNet
30 | .. autoclass:: torchreid.models.squeezenet.SqueezeNet
31 | .. autoclass:: torchreid.models.shufflenetv2.ShuffleNetV2
32 |
33 |
34 | ReID-specific Models
35 | --------------------
36 |
37 | .. autoclass:: torchreid.models.mudeep.MuDeep
38 | .. autoclass:: torchreid.models.resnetmid.ResNetMid
39 | .. autoclass:: torchreid.models.hacnn.HACNN
40 | .. autoclass:: torchreid.models.pcb.PCB
41 | .. autoclass:: torchreid.models.mlfn.MLFN
42 | .. autoclass:: torchreid.models.osnet.OSNet
43 | .. autoclass:: torchreid.models.osnet_ain.OSNet
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/optim.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_optim:
2 |
3 | torchreid.optim
4 | =================
5 |
6 |
7 | Optimizer
8 | ----------
9 |
10 | .. automodule:: torchreid.optim.optimizer
11 | :members: build_optimizer
12 |
13 |
14 | LR Scheduler
15 | -------------
16 |
17 | .. automodule:: torchreid.optim.lr_scheduler
18 | :members: build_lr_scheduler
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/pkg/utils.rst:
--------------------------------------------------------------------------------
1 | .. _torchreid_utils:
2 |
3 | torchreid.utils
4 | =================
5 |
6 | Average Meter
7 | --------------
8 |
9 | .. automodule:: torchreid.utils.avgmeter
10 | :members:
11 |
12 |
13 | Loggers
14 | -------
15 |
16 | .. automodule:: torchreid.utils.loggers
17 | :members:
18 |
19 |
20 | Generic Tools
21 | ---------------
22 | .. automodule:: torchreid.utils.tools
23 | :members:
24 |
25 |
26 | ReID Tools
27 | ----------
28 |
29 | .. automodule:: torchreid.utils.reidtools
30 | :members:
31 |
32 |
33 | Torch Tools
34 | ------------
35 |
36 | .. automodule:: torchreid.utils.torchtools
37 | :members:
38 |
39 |
40 | .. automodule:: torchreid.utils.model_complexity
41 | :members:
42 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==2.2.0
2 | sphinx-markdown-tables
3 | sphinx-rtd-theme
4 | sphinxcontrib-napoleon
5 | sphinxcontrib-websupport
6 | recommonmark
--------------------------------------------------------------------------------
/deep_sort/deep/reid/linter.sh:
--------------------------------------------------------------------------------
1 | echo "Running isort"
2 | isort -y -sp .
3 | echo "Done"
4 |
5 | echo "Running yapf"
6 | yapf -i -r -vv -e build .
7 | echo "Done"
8 |
9 | echo "Running flake8"
10 | flake8 .
11 | echo "Done"
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/DML/README.md:
--------------------------------------------------------------------------------
1 | # Deep mutual learning
2 |
3 | This repo implements [Deep Mutual Learning (CVPR'18)](https://zpascal.net/cvpr2018/Zhang_Deep_Mutual_Learning_CVPR_2018_paper.pdf) (DML) for person re-id.
4 |
5 | We used this code in our [OSNet](https://arxiv.org/pdf/1905.00953.pdf) paper (see Supp. B). The training command to reproduce the result of "triplet + DML" (Table 12f in the paper) is
6 | ```bash
7 | python main.py \
8 | --config-file im_osnet_x1_0_dml_256x128_amsgrad_cosine.yaml \
9 | --root $DATA
10 | ```
11 |
12 | `$DATA` corresponds to the path to your dataset folder.
13 |
14 | Change `model.deploy` to `both` if you wanna enable model ensembling.
15 |
16 | If you have any questions, please raise an issue in the Issues area.
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/DML/im_osnet_x1_0_dml_256x128_amsgrad_cosine.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_x1_0'
3 | pretrained: True
4 | deploy: 'model1'
5 |
6 | data:
7 | type: 'image'
8 | sources: ['market1501']
9 | targets: ['market1501']
10 | height: 256
11 | width: 128
12 | combineall: False
13 | transforms: ['random_flip', 'random_erase']
14 | save_dir: 'log/osnet_x1_0_market1501_dml_cosinelr'
15 |
16 | loss:
17 | name: 'triplet'
18 | softmax:
19 | label_smooth: True
20 | triplet:
21 | margin: 0.3
22 | weight_t: 0.5
23 | weight_x: 1.
24 | dml:
25 | weight_ml: 1.
26 |
27 | train:
28 | optim: 'amsgrad'
29 | lr: 0.0015
30 | max_epoch: 250
31 | batch_size: 64
32 | fixbase_epoch: 10
33 | open_layers: ['classifier']
34 | lr_scheduler: 'cosine'
35 |
36 | test:
37 | batch_size: 300
38 | dist_metric: 'cosine'
39 | normalize_feature: False
40 | evaluate: False
41 | eval_freq: -1
42 | rerank: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/OSNet_AIN/README.md:
--------------------------------------------------------------------------------
1 | # Differentiable NAS for OSNet-AIN
2 |
3 | ## Introduction
4 | This repository contains the neural architecture search (NAS) code (based on [Torchreid](https://arxiv.org/abs/1910.10093)) for [OSNet-AIN](https://arxiv.org/abs/1910.06827), an extension of [OSNet](https://arxiv.org/abs/1905.00953) that achieves strong performance on cross-domain person re-identification (re-ID) benchmarks (*without using any target data*). OSNet-AIN builds on the idea of using [instance normalisation](https://arxiv.org/abs/1607.08022) (IN) layers to eliminate instance-specific contrast in images for domain-generalisable representation learning. This is inspired by the [neural style transfer](https://arxiv.org/abs/1703.06868) works that use IN to remove image styles. Though IN naturally suits the cross-domain person re-ID task, it still remains unclear that where to insert IN to a re-ID CNN can maximise the performance gain. To avoid exhaustively evaluating all possible designs, OSNet-AIN learns to search for the optimal OSNet+IN design from data using a differentiable NAS algorithm. For technical details, please refer to our paper at https://arxiv.org/abs/1910.06827.
5 |
6 |
7 |

8 |
9 |
10 | ## Training
11 | Assume the reid data is stored at `$DATA`. Run
12 | ```
13 | python main.py --config-file nas.yaml --root $DATA
14 | ```
15 |
16 | The structure of the found architecture will be shown at the end of training.
17 |
18 | The default config was designed for 8 Tesla V100 32GB GPUs. You can modify the batch size based on your device memory.
19 |
20 | **Note** that the test result obtained at the end of architecture search is not meaningful (due to the stochastic sampling layers). Therefore, do not rely on the result to judge the model performance. Instead, you should construct the found architecture in `osnet_child.py` and re-train and evaluate the model on the reid datasets.
21 |
22 | ## Citation
23 | If you find this code useful to your research, please consider citing the following papers.
24 | ```
25 | @article{zhou2021osnet,
26 | title={Learning Generalisable Omni-Scale Representations for Person Re-Identification},
27 | author={Zhou, Kaiyang and Yang, Yongxin and Cavallaro, Andrea and Xiang, Tao},
28 | journal={TPAMI},
29 | year={2021}
30 | }
31 |
32 | @inproceedings{zhou2019osnet,
33 | title={Omni-Scale Feature Learning for Person Re-Identification},
34 | author={Zhou, Kaiyang and Yang, Yongxin and Cavallaro, Andrea and Xiang, Tao},
35 | booktitle={ICCV},
36 | year={2019}
37 | }
38 | ```
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/OSNet_AIN/nas.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | name: 'osnet_nas'
3 | pretrained: False
4 |
5 | nas:
6 | mc_iter: 1
7 | init_lmda: 10.
8 | min_lmda: 1.
9 | lmda_decay_step: 20
10 | lmda_decay_rate: 0.5
11 | fixed_lmda: False
12 |
13 | data:
14 | type: 'image'
15 | sources: ['msmt17']
16 | targets: ['market1501']
17 | height: 256
18 | width: 128
19 | combineall: True
20 | transforms: ['random_flip', 'color_jitter']
21 | save_dir: 'log/osnet_nas'
22 |
23 | loss:
24 | name: 'softmax'
25 | softmax:
26 | label_smooth: True
27 |
28 | train:
29 | optim: 'sgd'
30 | lr: 0.1
31 | max_epoch: 120
32 | batch_size: 512
33 | fixbase_epoch: 0
34 | open_layers: ['classifier']
35 | lr_scheduler: 'cosine'
36 |
37 | test:
38 | batch_size: 300
39 | dist_metric: 'cosine'
40 | normalize_feature: False
41 | evaluate: False
42 | eval_freq: -1
43 | rerank: False
44 | visactmap: False
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/OSNet_AIN/softmax_nas.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 |
3 | from torchreid import metrics
4 | from torchreid.engine import Engine
5 | from torchreid.losses import CrossEntropyLoss
6 |
7 |
8 | class ImageSoftmaxNASEngine(Engine):
9 |
10 | def __init__(
11 | self,
12 | datamanager,
13 | model,
14 | optimizer,
15 | scheduler=None,
16 | use_gpu=False,
17 | label_smooth=True,
18 | mc_iter=1,
19 | init_lmda=1.,
20 | min_lmda=1.,
21 | lmda_decay_step=20,
22 | lmda_decay_rate=0.5,
23 | fixed_lmda=False
24 | ):
25 | super(ImageSoftmaxNASEngine, self).__init__(datamanager, use_gpu)
26 | self.mc_iter = mc_iter
27 | self.init_lmda = init_lmda
28 | self.min_lmda = min_lmda
29 | self.lmda_decay_step = lmda_decay_step
30 | self.lmda_decay_rate = lmda_decay_rate
31 | self.fixed_lmda = fixed_lmda
32 |
33 | self.model = model
34 | self.optimizer = optimizer
35 | self.scheduler = scheduler
36 | self.register_model('model', model, optimizer, scheduler)
37 |
38 | self.criterion = CrossEntropyLoss(
39 | num_classes=self.datamanager.num_train_pids,
40 | use_gpu=self.use_gpu,
41 | label_smooth=label_smooth
42 | )
43 |
44 | def forward_backward(self, data):
45 | imgs, pids = self.parse_data_for_train(data)
46 |
47 | if self.use_gpu:
48 | imgs = imgs.cuda()
49 | pids = pids.cuda()
50 |
51 | # softmax temporature
52 | if self.fixed_lmda or self.lmda_decay_step == -1:
53 | lmda = self.init_lmda
54 | else:
55 | lmda = self.init_lmda * self.lmda_decay_rate**(
56 | self.epoch // self.lmda_decay_step
57 | )
58 | if lmda < self.min_lmda:
59 | lmda = self.min_lmda
60 |
61 | for k in range(self.mc_iter):
62 | outputs = self.model(imgs, lmda=lmda)
63 | loss = self.compute_loss(self.criterion, outputs, pids)
64 | self.optimizer.zero_grad()
65 | loss.backward()
66 | self.optimizer.step()
67 |
68 | loss_dict = {
69 | 'loss': loss.item(),
70 | 'acc': metrics.accuracy(outputs, pids)[0].item()
71 | }
72 |
73 | return loss_dict
74 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/README.md:
--------------------------------------------------------------------------------
1 | Here are some research projects built on [Torchreid](https://arxiv.org/abs/1910.10093).
2 |
3 | + `OSNet_AIN`: [Learning Generalisable Omni-Scale Representations for Person Re-Identification](https://arxiv.org/abs/1910.06827)
4 | + `DML`: [Deep Mutual Learning (CVPR'18)](https://arxiv.org/abs/1706.00384)
5 | + `attribute_recognition`: [Omni-Scale Feature Learning for Person Re-Identification (ICCV'19)](https://arxiv.org/abs/1905.00953)
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/README.md:
--------------------------------------------------------------------------------
1 | # Person Attribute Recognition
2 | This code was developed for the experiment of person attribute recognition in [Omni-Scale Feature Learning for Person Re-Identification (ICCV'19)](https://arxiv.org/abs/1905.00953).
3 |
4 | ## Download data
5 | Download the PA-100K dataset from [https://github.com/xh-liu/HydraPlus-Net](https://github.com/xh-liu/HydraPlus-Net), and extract the file under the folder where you store your data (say $DATASET). The folder structure should look like
6 | ```bash
7 | $DATASET/
8 | pa100k/
9 | data/ # images
10 | annotation/
11 | annotation.mat
12 | ```
13 |
14 | ## Train
15 | The training command is provided in `train.sh`. Run `bash train.sh $DATASET` to start training.
16 |
17 | ## Test
18 | To test a pretrained model, add the following two arguments to `train.sh`: `--load-weights $PATH_TO_WEIGHTS --evaluate`.
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 |
3 | from .pa100k import PA100K
4 |
5 | __datasets = {'pa100k': PA100K}
6 |
7 |
8 | def init_dataset(name, **kwargs):
9 | avai_datasets = list(__datasets.keys())
10 | if name not in avai_datasets:
11 | raise ValueError(
12 | 'Invalid dataset name. Received "{}", '
13 | 'but expected to be one of {}'.format(name, avai_datasets)
14 | )
15 | return __datasets[name](**kwargs)
16 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/datasets/dataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import os.path as osp
3 |
4 | from torchreid.utils import read_image
5 |
6 |
7 | class Dataset(object):
8 |
9 | def __init__(
10 | self,
11 | train,
12 | val,
13 | test,
14 | attr_dict,
15 | transform=None,
16 | mode='train',
17 | verbose=True,
18 | **kwargs
19 | ):
20 | self.train = train
21 | self.val = val
22 | self.test = test
23 | self._attr_dict = attr_dict
24 | self._num_attrs = len(self.attr_dict)
25 | self.transform = transform
26 |
27 | if mode == 'train':
28 | self.data = self.train
29 | elif mode == 'val':
30 | self.data = self.val
31 | else:
32 | self.data = self.test
33 |
34 | if verbose:
35 | self.show_summary()
36 |
37 | @property
38 | def num_attrs(self):
39 | return self._num_attrs
40 |
41 | @property
42 | def attr_dict(self):
43 | return self._attr_dict
44 |
45 | def __len__(self):
46 | return len(self.data)
47 |
48 | def __getitem__(self, index):
49 | img_path, attrs = self.data[index]
50 | img = read_image(img_path)
51 | if self.transform is not None:
52 | img = self.transform(img)
53 | return img, attrs, img_path
54 |
55 | def check_before_run(self, required_files):
56 | """Checks if required files exist before going deeper.
57 | Args:
58 | required_files (str or list): string file name(s).
59 | """
60 | if isinstance(required_files, str):
61 | required_files = [required_files]
62 |
63 | for fpath in required_files:
64 | if not osp.exists(fpath):
65 | raise RuntimeError('"{}" is not found'.format(fpath))
66 |
67 | def show_summary(self):
68 | num_train = len(self.train)
69 | num_val = len(self.val)
70 | num_test = len(self.test)
71 | num_total = num_train + num_val + num_test
72 |
73 | print('=> Loaded {}'.format(self.__class__.__name__))
74 | print(" ------------------------------")
75 | print(" subset | # images")
76 | print(" ------------------------------")
77 | print(" train | {:8d}".format(num_train))
78 | print(" val | {:8d}".format(num_val))
79 | print(" test | {:8d}".format(num_test))
80 | print(" ------------------------------")
81 | print(" total | {:8d}".format(num_total))
82 | print(" ------------------------------")
83 | print(" # attributes: {}".format(len(self.attr_dict)))
84 | print(" attributes:")
85 | for label, attr in self.attr_dict.items():
86 | print(' {:3d}: {}'.format(label, attr))
87 | print(" ------------------------------")
88 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/datasets/pa100k.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import numpy as np
3 | import os.path as osp
4 | from scipy.io import loadmat
5 |
6 | from .dataset import Dataset
7 |
8 |
9 | class PA100K(Dataset):
10 | """Pedestrian attribute dataset.
11 |
12 | 80k training images + 20k test images.
13 |
14 | The folder structure should be:
15 | pa100k/
16 | data/ # images
17 | annotation/
18 | annotation.mat
19 | """
20 | dataset_dir = 'pa100k'
21 |
22 | def __init__(self, root='', **kwargs):
23 | self.root = osp.abspath(osp.expanduser(root))
24 | self.dataset_dir = osp.join(self.root, self.dataset_dir)
25 | self.data_dir = osp.join(self.dataset_dir, 'data')
26 | self.anno_mat_path = osp.join(
27 | self.dataset_dir, 'annotation', 'annotation.mat'
28 | )
29 |
30 | required_files = [self.data_dir, self.anno_mat_path]
31 | self.check_before_run(required_files)
32 |
33 | train, val, test, attr_dict = self.extract_data()
34 | super(PA100K, self).__init__(train, val, test, attr_dict, **kwargs)
35 |
36 | def extract_data(self):
37 | # anno_mat is a dictionary with keys: ['test_images_name', 'val_images_name',
38 | # 'train_images_name', 'val_label', 'attributes', 'test_label', 'train_label']
39 | anno_mat = loadmat(self.anno_mat_path)
40 |
41 | def _extract(key_name, key_label):
42 | names = anno_mat[key_name]
43 | labels = anno_mat[key_label]
44 | num_imgs = names.shape[0]
45 | data = []
46 | for i in range(num_imgs):
47 | name = names[i, 0][0]
48 | attrs = labels[i, :].astype(np.float32)
49 | img_path = osp.join(self.data_dir, name)
50 | data.append((img_path, attrs))
51 | return data
52 |
53 | train = _extract('train_images_name', 'train_label')
54 | val = _extract('val_images_name', 'val_label')
55 | test = _extract('test_images_name', 'test_label')
56 | attrs = anno_mat['attributes']
57 | attr_dict = {i: str(attr[0][0]) for i, attr in enumerate(attrs)}
58 |
59 | return train, val, test, attr_dict
60 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/models/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .osnet import *
4 |
5 | __model_factory = {
6 | 'osnet_avgpool': osnet_avgpool,
7 | 'osnet_maxpool': osnet_maxpool
8 | }
9 |
10 |
11 | def build_model(name, num_classes, pretrained=True, use_gpu=True):
12 | avai_models = list(__model_factory.keys())
13 | if name not in avai_models:
14 | raise KeyError
15 | return __model_factory[name](
16 | num_classes=num_classes, pretrained=pretrained, use_gpu=use_gpu
17 | )
18 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/projects/attribute_recognition/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # DATASET points to the directory containing pa100k/
4 | DATASET=$1
5 |
6 | python main.py \
7 | --root ${DATASET} \
8 | -d pa100k \
9 | -a osnet_maxpool \
10 | --max-epoch 50 \
11 | --stepsize 30 40 \
12 | --batch-size 32 \
13 | --lr 0.065 \
14 | --optim sgd \
15 | --weighted-bce \
16 | --save-dir log/pa100k-osnet_maxpool
--------------------------------------------------------------------------------
/deep_sort/deep/reid/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | Cython
3 | h5py
4 | Pillow
5 | six
6 | scipy
7 | opencv-python
8 | matplotlib
9 | tb-nightly
10 | future
11 | yacs
12 | gdown
13 | flake8
14 | yapf
15 | isort==4.3.21
16 | imageio
--------------------------------------------------------------------------------
/deep_sort/deep/reid/setup.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os.path as osp
3 | from setuptools import setup, find_packages
4 | from distutils.extension import Extension
5 | from Cython.Build import cythonize
6 |
7 |
8 | def readme():
9 | with open('README.rst') as f:
10 | content = f.read()
11 | return content
12 |
13 |
14 | def find_version():
15 | version_file = 'torchreid/__init__.py'
16 | with open(version_file, 'r') as f:
17 | exec(compile(f.read(), version_file, 'exec'))
18 | return locals()['__version__']
19 |
20 |
21 | def numpy_include():
22 | try:
23 | numpy_include = np.get_include()
24 | except AttributeError:
25 | numpy_include = np.get_numpy_include()
26 | return numpy_include
27 |
28 |
29 | ext_modules = [
30 | Extension(
31 | 'torchreid.metrics.rank_cylib.rank_cy',
32 | ['torchreid/metrics/rank_cylib/rank_cy.pyx'],
33 | include_dirs=[numpy_include()],
34 | )
35 | ]
36 |
37 |
38 | def get_requirements(filename='requirements.txt'):
39 | here = osp.dirname(osp.realpath(__file__))
40 | with open(osp.join(here, filename), 'r') as f:
41 | requires = [line.replace('\n', '') for line in f.readlines()]
42 | return requires
43 |
44 |
45 | setup(
46 | name='torchreid',
47 | version=find_version(),
48 | description='A library for deep learning person re-ID in PyTorch',
49 | author='Kaiyang Zhou',
50 | license='MIT',
51 | long_description=readme(),
52 | url='https://github.com/KaiyangZhou/deep-person-reid',
53 | packages=find_packages(),
54 | install_requires=get_requirements(),
55 | keywords=['Person Re-Identification', 'Deep Learning', 'Computer Vision'],
56 | ext_modules=cythonize(ext_modules)
57 | )
58 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/tools/compute_mean_std.py:
--------------------------------------------------------------------------------
1 | """
2 | Compute channel-wise mean and standard deviation of a dataset.
3 |
4 | Usage:
5 | $ python compute_mean_std.py DATASET_ROOT DATASET_KEY
6 |
7 | - The first argument points to the root path where you put the datasets.
8 | - The second argument means the specific dataset key.
9 |
10 | For instance, your datasets are put under $DATA and you wanna
11 | compute the statistics of Market1501, do
12 | $ python compute_mean_std.py $DATA market1501
13 | """
14 | import argparse
15 |
16 | import torchreid
17 |
18 |
19 | def main():
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument('root', type=str)
22 | parser.add_argument('sources', type=str)
23 | args = parser.parse_args()
24 |
25 | datamanager = torchreid.data.ImageDataManager(
26 | root=args.root,
27 | sources=args.sources,
28 | targets=None,
29 | height=256,
30 | width=128,
31 | batch_size_train=100,
32 | batch_size_test=100,
33 | transforms=None,
34 | norm_mean=[0., 0., 0.],
35 | norm_std=[1., 1., 1.],
36 | train_sampler='SequentialSampler'
37 | )
38 | train_loader = datamanager.train_loader
39 |
40 | print('Computing mean and std ...')
41 | mean = 0.
42 | std = 0.
43 | n_samples = 0.
44 | for data in train_loader:
45 | data = data['img']
46 | batch_size = data.size(0)
47 | data = data.view(batch_size, data.size(1), -1)
48 | mean += data.mean(2).sum(0)
49 | std += data.std(2).sum(0)
50 | n_samples += batch_size
51 |
52 | mean /= n_samples
53 | std /= n_samples
54 | print('Mean: {}'.format(mean))
55 | print('Std: {}'.format(std))
56 |
57 |
58 | if __name__ == '__main__':
59 | main()
60 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 |
3 | from torchreid import data, optim, utils, engine, losses, models, metrics
4 |
5 | __version__ = '1.4.0'
6 | __author__ = 'Kaiyang Zhou'
7 | __homepage__ = 'https://kaiyangzhou.github.io/'
8 | __description__ = 'Deep learning person re-identification in PyTorch'
9 | __url__ = 'https://github.com/KaiyangZhou/deep-person-reid'
10 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 |
3 | from .datasets import (
4 | Dataset, ImageDataset, VideoDataset, register_image_dataset,
5 | register_video_dataset
6 | )
7 | from .datamanager import ImageDataManager, VideoDataManager
8 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/image/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 |
3 | from .grid import GRID
4 | from .prid import PRID
5 | from .ilids import iLIDS
6 | from .viper import VIPeR
7 | from .cuhk01 import CUHK01
8 | from .cuhk02 import CUHK02
9 | from .cuhk03 import CUHK03
10 | from .msmt17 import MSMT17
11 | from .cuhksysu import CUHKSYSU
12 | from .sensereid import SenseReID
13 | from .market1501 import Market1501
14 | from .dukemtmcreid import DukeMTMCreID
15 | from .university1652 import University1652
16 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/image/cuhksysu.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import copy
3 | import glob
4 | import os.path as osp
5 |
6 | from ..dataset import ImageDataset
7 |
8 |
9 | class CUHKSYSU(ImageDataset):
10 | """CUHKSYSU.
11 |
12 | This dataset can only be used for model training.
13 |
14 | Reference:
15 | Xiao et al. End-to-end deep learning for person search.
16 |
17 | URL: ``_
18 |
19 | Dataset statistics:
20 | - identities: 11,934
21 | - images: 34,574
22 | """
23 | _train_only = True
24 | dataset_dir = 'cuhksysu'
25 |
26 | def __init__(self, root='', **kwargs):
27 | self.root = osp.abspath(osp.expanduser(root))
28 | self.dataset_dir = osp.join(self.root, self.dataset_dir)
29 | self.data_dir = osp.join(self.dataset_dir, 'cropped_images')
30 |
31 | # image name format: p11422_s16929_1.jpg
32 | train = self.process_dir(self.data_dir)
33 | query = [copy.deepcopy(train[0])]
34 | gallery = [copy.deepcopy(train[0])]
35 |
36 | super(CUHKSYSU, self).__init__(train, query, gallery, **kwargs)
37 |
38 | def process_dir(self, dirname):
39 | img_paths = glob.glob(osp.join(dirname, '*.jpg'))
40 | # num_imgs = len(img_paths)
41 |
42 | # get all identities:
43 | pid_container = set()
44 | for img_path in img_paths:
45 | img_name = osp.basename(img_path)
46 | pid = img_name.split('_')[0]
47 | pid_container.add(pid)
48 | pid2label = {pid: label for label, pid in enumerate(pid_container)}
49 |
50 | # num_pids = len(pid_container)
51 |
52 | # extract data
53 | data = []
54 | for img_path in img_paths:
55 | img_name = osp.basename(img_path)
56 | pid = img_name.split('_')[0]
57 | label = pid2label[pid]
58 | data.append((img_path, label, 0)) # dummy camera id
59 |
60 | return data
61 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/image/dukemtmcreid.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import re
3 | import glob
4 | import os.path as osp
5 |
6 | from ..dataset import ImageDataset
7 |
8 |
9 | class DukeMTMCreID(ImageDataset):
10 | """DukeMTMC-reID.
11 |
12 | Reference:
13 | - Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
14 | - Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
15 |
16 | URL: ``_
17 |
18 | Dataset statistics:
19 | - identities: 1404 (train + query).
20 | - images:16522 (train) + 2228 (query) + 17661 (gallery).
21 | - cameras: 8.
22 | """
23 | dataset_dir = 'dukemtmc-reid'
24 | dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
25 |
26 | def __init__(self, root='', **kwargs):
27 | self.root = osp.abspath(osp.expanduser(root))
28 | self.dataset_dir = osp.join(self.root, self.dataset_dir)
29 | self.download_dataset(self.dataset_dir, self.dataset_url)
30 | self.train_dir = osp.join(
31 | self.dataset_dir, 'DukeMTMC-reID/bounding_box_train'
32 | )
33 | self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
34 | self.gallery_dir = osp.join(
35 | self.dataset_dir, 'DukeMTMC-reID/bounding_box_test'
36 | )
37 |
38 | required_files = [
39 | self.dataset_dir, self.train_dir, self.query_dir, self.gallery_dir
40 | ]
41 | self.check_before_run(required_files)
42 |
43 | train = self.process_dir(self.train_dir, relabel=True)
44 | query = self.process_dir(self.query_dir, relabel=False)
45 | gallery = self.process_dir(self.gallery_dir, relabel=False)
46 |
47 | super(DukeMTMCreID, self).__init__(train, query, gallery, **kwargs)
48 |
49 | def process_dir(self, dir_path, relabel=False):
50 | img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
51 | pattern = re.compile(r'([-\d]+)_c(\d)')
52 |
53 | pid_container = set()
54 | for img_path in img_paths:
55 | pid, _ = map(int, pattern.search(img_path).groups())
56 | pid_container.add(pid)
57 | pid2label = {pid: label for label, pid in enumerate(pid_container)}
58 |
59 | data = []
60 | for img_path in img_paths:
61 | pid, camid = map(int, pattern.search(img_path).groups())
62 | assert 1 <= camid <= 8
63 | camid -= 1 # index starts from 0
64 | if relabel:
65 | pid = pid2label[pid]
66 | data.append((img_path, pid, camid))
67 |
68 | return data
69 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/image/sensereid.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import copy
3 | import glob
4 | import os.path as osp
5 |
6 | from ..dataset import ImageDataset
7 |
8 |
9 | class SenseReID(ImageDataset):
10 | """SenseReID.
11 |
12 | This dataset is used for test purpose only.
13 |
14 | Reference:
15 | Zhao et al. Spindle Net: Person Re-identification with Human Body
16 | Region Guided Feature Decomposition and Fusion. CVPR 2017.
17 |
18 | URL: ``_
19 |
20 | Dataset statistics:
21 | - query: 522 ids, 1040 images.
22 | - gallery: 1717 ids, 3388 images.
23 | """
24 | dataset_dir = 'sensereid'
25 | dataset_url = None
26 |
27 | def __init__(self, root='', **kwargs):
28 | self.root = osp.abspath(osp.expanduser(root))
29 | self.dataset_dir = osp.join(self.root, self.dataset_dir)
30 | self.download_dataset(self.dataset_dir, self.dataset_url)
31 |
32 | self.query_dir = osp.join(self.dataset_dir, 'SenseReID', 'test_probe')
33 | self.gallery_dir = osp.join(
34 | self.dataset_dir, 'SenseReID', 'test_gallery'
35 | )
36 |
37 | required_files = [self.dataset_dir, self.query_dir, self.gallery_dir]
38 | self.check_before_run(required_files)
39 |
40 | query = self.process_dir(self.query_dir)
41 | gallery = self.process_dir(self.gallery_dir)
42 |
43 | # relabel
44 | g_pids = set()
45 | for _, pid, _ in gallery:
46 | g_pids.add(pid)
47 | pid2label = {pid: i for i, pid in enumerate(g_pids)}
48 |
49 | query = [
50 | (img_path, pid2label[pid], camid) for img_path, pid, camid in query
51 | ]
52 | gallery = [
53 | (img_path, pid2label[pid], camid)
54 | for img_path, pid, camid in gallery
55 | ]
56 | train = copy.deepcopy(query) + copy.deepcopy(gallery) # dummy variable
57 |
58 | super(SenseReID, self).__init__(train, query, gallery, **kwargs)
59 |
60 | def process_dir(self, dir_path):
61 | img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
62 | data = []
63 |
64 | for img_path in img_paths:
65 | img_name = osp.splitext(osp.basename(img_path))[0]
66 | pid, camid = img_name.split('_')
67 | pid, camid = int(pid), int(camid)
68 | data.append((img_path, pid, camid))
69 |
70 | return data
71 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/video/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 |
3 | from .mars import Mars
4 | from .ilidsvid import iLIDSVID
5 | from .prid2011 import PRID2011
6 | from .dukemtmcvidreid import DukeMTMCVidReID
7 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/data/datasets/video/prid2011.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import glob
3 | import os.path as osp
4 |
5 | from torchreid.utils import read_json
6 |
7 | from ..dataset import VideoDataset
8 |
9 |
10 | class PRID2011(VideoDataset):
11 | """PRID2011.
12 |
13 | Reference:
14 | Hirzer et al. Person Re-Identification by Descriptive and
15 | Discriminative Classification. SCIA 2011.
16 |
17 | URL: ``_
18 |
19 | Dataset statistics:
20 | - identities: 200.
21 | - tracklets: 400.
22 | - cameras: 2.
23 | """
24 | dataset_dir = 'prid2011'
25 | dataset_url = None
26 |
27 | def __init__(self, root='', split_id=0, **kwargs):
28 | self.root = osp.abspath(osp.expanduser(root))
29 | self.dataset_dir = osp.join(self.root, self.dataset_dir)
30 | self.download_dataset(self.dataset_dir, self.dataset_url)
31 |
32 | self.split_path = osp.join(self.dataset_dir, 'splits_prid2011.json')
33 | self.cam_a_dir = osp.join(
34 | self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_a'
35 | )
36 | self.cam_b_dir = osp.join(
37 | self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_b'
38 | )
39 |
40 | required_files = [self.dataset_dir, self.cam_a_dir, self.cam_b_dir]
41 | self.check_before_run(required_files)
42 |
43 | splits = read_json(self.split_path)
44 | if split_id >= len(splits):
45 | raise ValueError(
46 | 'split_id exceeds range, received {}, but expected between 0 and {}'
47 | .format(split_id,
48 | len(splits) - 1)
49 | )
50 | split = splits[split_id]
51 | train_dirs, test_dirs = split['train'], split['test']
52 |
53 | train = self.process_dir(train_dirs, cam1=True, cam2=True)
54 | query = self.process_dir(test_dirs, cam1=True, cam2=False)
55 | gallery = self.process_dir(test_dirs, cam1=False, cam2=True)
56 |
57 | super(PRID2011, self).__init__(train, query, gallery, **kwargs)
58 |
59 | def process_dir(self, dirnames, cam1=True, cam2=True):
60 | tracklets = []
61 | dirname2pid = {dirname: i for i, dirname in enumerate(dirnames)}
62 |
63 | for dirname in dirnames:
64 | if cam1:
65 | person_dir = osp.join(self.cam_a_dir, dirname)
66 | img_names = glob.glob(osp.join(person_dir, '*.png'))
67 | assert len(img_names) > 0
68 | img_names = tuple(img_names)
69 | pid = dirname2pid[dirname]
70 | tracklets.append((img_names, pid, 0))
71 |
72 | if cam2:
73 | person_dir = osp.join(self.cam_b_dir, dirname)
74 | img_names = glob.glob(osp.join(person_dir, '*.png'))
75 | assert len(img_names) > 0
76 | img_names = tuple(img_names)
77 | pid = dirname2pid[dirname]
78 | tracklets.append((img_names, pid, 1))
79 |
80 | return tracklets
81 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/engine/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 |
3 | from .image import ImageSoftmaxEngine, ImageTripletEngine
4 | from .video import VideoSoftmaxEngine, VideoTripletEngine
5 | from .engine import Engine
6 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/engine/image/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .softmax import ImageSoftmaxEngine
4 | from .triplet import ImageTripletEngine
5 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/engine/video/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .softmax import VideoSoftmaxEngine
4 | from .triplet import VideoTripletEngine
5 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/losses/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 |
3 | from .cross_entropy_loss import CrossEntropyLoss
4 | from .hard_mine_triplet_loss import TripletLoss
5 |
6 |
7 | def DeepSupervision(criterion, xs, y):
8 | """DeepSupervision
9 |
10 | Applies criterion to each element in a list.
11 |
12 | Args:
13 | criterion: loss function
14 | xs: tuple of inputs
15 | y: ground truth
16 | """
17 | loss = 0.
18 | for x in xs:
19 | loss += criterion(x, y)
20 | loss /= len(xs)
21 | return loss
22 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/losses/cross_entropy_loss.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, absolute_import
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class CrossEntropyLoss(nn.Module):
7 | r"""Cross entropy loss with label smoothing regularizer.
8 |
9 | Reference:
10 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
11 |
12 | With label smoothing, the label :math:`y` for a class is computed by
13 |
14 | .. math::
15 | \begin{equation}
16 | (1 - \eps) \times y + \frac{\eps}{K},
17 | \end{equation}
18 |
19 | where :math:`K` denotes the number of classes and :math:`\eps` is a weight. When
20 | :math:`\eps = 0`, the loss function reduces to the normal cross entropy.
21 |
22 | Args:
23 | num_classes (int): number of classes.
24 | eps (float, optional): weight. Default is 0.1.
25 | use_gpu (bool, optional): whether to use gpu devices. Default is True.
26 | label_smooth (bool, optional): whether to apply label smoothing. Default is True.
27 | """
28 |
29 | def __init__(self, num_classes, eps=0.1, use_gpu=True, label_smooth=True):
30 | super(CrossEntropyLoss, self).__init__()
31 | self.num_classes = num_classes
32 | self.eps = eps if label_smooth else 0
33 | self.use_gpu = use_gpu
34 | self.logsoftmax = nn.LogSoftmax(dim=1)
35 |
36 | def forward(self, inputs, targets):
37 | """
38 | Args:
39 | inputs (torch.Tensor): prediction matrix (before softmax) with
40 | shape (batch_size, num_classes).
41 | targets (torch.LongTensor): ground truth labels with shape (batch_size).
42 | Each position contains the label index.
43 | """
44 | log_probs = self.logsoftmax(inputs)
45 | zeros = torch.zeros(log_probs.size())
46 | targets = zeros.scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
47 | if self.use_gpu:
48 | targets = targets.cuda()
49 | targets = (1 - self.eps) * targets + self.eps / self.num_classes
50 | return (-targets * log_probs).mean(0).sum()
51 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/losses/hard_mine_triplet_loss.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, absolute_import
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | class TripletLoss(nn.Module):
7 | """Triplet loss with hard positive/negative mining.
8 |
9 | Reference:
10 | Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
11 |
12 | Imported from ``_.
13 |
14 | Args:
15 | margin (float, optional): margin for triplet. Default is 0.3.
16 | """
17 |
18 | def __init__(self, margin=0.3):
19 | super(TripletLoss, self).__init__()
20 | self.margin = margin
21 | self.ranking_loss = nn.MarginRankingLoss(margin=margin)
22 |
23 | def forward(self, inputs, targets):
24 | """
25 | Args:
26 | inputs (torch.Tensor): feature matrix with shape (batch_size, feat_dim).
27 | targets (torch.LongTensor): ground truth labels with shape (num_classes).
28 | """
29 | n = inputs.size(0)
30 |
31 | # Compute pairwise distance, replace by the official when merged
32 | dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
33 | dist = dist + dist.t()
34 | dist.addmm_(inputs, inputs.t(), beta=1, alpha=-2)
35 | dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
36 |
37 | # For each anchor, find the hardest positive and negative
38 | mask = targets.expand(n, n).eq(targets.expand(n, n).t())
39 | dist_ap, dist_an = [], []
40 | for i in range(n):
41 | dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
42 | dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
43 | dist_ap = torch.cat(dist_ap)
44 | dist_an = torch.cat(dist_an)
45 |
46 | # Compute ranking hinge loss
47 | y = torch.ones_like(dist_an)
48 | return self.ranking_loss(dist_an, dist_ap, y)
49 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .rank import evaluate_rank
4 | from .accuracy import accuracy
5 | from .distance import compute_distance_matrix
6 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/accuracy.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 |
3 |
4 | def accuracy(output, target, topk=(1, )):
5 | """Computes the accuracy over the k top predictions for
6 | the specified values of k.
7 |
8 | Args:
9 | output (torch.Tensor): prediction matrix with shape (batch_size, num_classes).
10 | target (torch.LongTensor): ground truth labels with shape (batch_size).
11 | topk (tuple, optional): accuracy at top-k will be computed. For example,
12 | topk=(1, 5) means accuracy at top-1 and top-5 will be computed.
13 |
14 | Returns:
15 | list: accuracy at top-k.
16 |
17 | Examples::
18 | >>> from torchreid import metrics
19 | >>> metrics.accuracy(output, target)
20 | """
21 | maxk = max(topk)
22 | batch_size = target.size(0)
23 |
24 | if isinstance(output, (tuple, list)):
25 | output = output[0]
26 |
27 | _, pred = output.topk(maxk, 1, True, True)
28 | pred = pred.t()
29 | correct = pred.eq(target.view(1, -1).expand_as(pred))
30 |
31 | res = []
32 | for k in topk:
33 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
34 | acc = correct_k.mul_(100.0 / batch_size)
35 | res.append(acc)
36 |
37 | return res
38 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/distance.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, print_function, absolute_import
2 | import torch
3 | from torch.nn import functional as F
4 |
5 |
6 | def compute_distance_matrix(input1, input2, metric='euclidean'):
7 | """A wrapper function for computing distance matrix.
8 |
9 | Args:
10 | input1 (torch.Tensor): 2-D feature matrix.
11 | input2 (torch.Tensor): 2-D feature matrix.
12 | metric (str, optional): "euclidean" or "cosine".
13 | Default is "euclidean".
14 |
15 | Returns:
16 | torch.Tensor: distance matrix.
17 |
18 | Examples::
19 | >>> from torchreid import metrics
20 | >>> input1 = torch.rand(10, 2048)
21 | >>> input2 = torch.rand(100, 2048)
22 | >>> distmat = metrics.compute_distance_matrix(input1, input2)
23 | >>> distmat.size() # (10, 100)
24 | """
25 | # check input
26 | assert isinstance(input1, torch.Tensor)
27 | assert isinstance(input2, torch.Tensor)
28 | assert input1.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(
29 | input1.dim()
30 | )
31 | assert input2.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(
32 | input2.dim()
33 | )
34 | assert input1.size(1) == input2.size(1)
35 |
36 | if metric == 'euclidean':
37 | distmat = euclidean_squared_distance(input1, input2)
38 | elif metric == 'cosine':
39 | distmat = cosine_distance(input1, input2)
40 | else:
41 | raise ValueError(
42 | 'Unknown distance metric: {}. '
43 | 'Please choose either "euclidean" or "cosine"'.format(metric)
44 | )
45 |
46 | return distmat
47 |
48 |
49 | def euclidean_squared_distance(input1, input2):
50 | """Computes euclidean squared distance.
51 |
52 | Args:
53 | input1 (torch.Tensor): 2-D feature matrix.
54 | input2 (torch.Tensor): 2-D feature matrix.
55 |
56 | Returns:
57 | torch.Tensor: distance matrix.
58 | """
59 | m, n = input1.size(0), input2.size(0)
60 | mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)
61 | mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
62 | distmat = mat1 + mat2
63 | distmat.addmm_(input1, input2.t(), beta=1, alpha=-2)
64 | return distmat
65 |
66 |
67 | def cosine_distance(input1, input2):
68 | """Computes cosine distance.
69 |
70 | Args:
71 | input1 (torch.Tensor): 2-D feature matrix.
72 | input2 (torch.Tensor): 2-D feature matrix.
73 |
74 | Returns:
75 | torch.Tensor: distance matrix.
76 | """
77 | input1_normed = F.normalize(input1, p=2, dim=1)
78 | input2_normed = F.normalize(input2, p=2, dim=1)
79 | distmat = 1 - torch.mm(input1_normed, input2_normed.t())
80 | return distmat
81 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/rank_cylib/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | $(PYTHON) setup.py build_ext --inplace
3 | rm -rf build
4 | clean:
5 | rm -rf build
6 | rm -f rank_cy.c *.so
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/rank_cylib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/deep/reid/torchreid/metrics/rank_cylib/__init__.py
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/rank_cylib/setup.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from distutils.core import setup
3 | from distutils.extension import Extension
4 | from Cython.Build import cythonize
5 |
6 |
7 | def numpy_include():
8 | try:
9 | numpy_include = np.get_include()
10 | except AttributeError:
11 | numpy_include = np.get_numpy_include()
12 | return numpy_include
13 |
14 |
15 | ext_modules = [
16 | Extension(
17 | 'rank_cy',
18 | ['rank_cy.pyx'],
19 | include_dirs=[numpy_include()],
20 | )
21 | ]
22 |
23 | setup(
24 | name='Cython-based reid evaluation code',
25 | ext_modules=cythonize(ext_modules)
26 | )
27 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/metrics/rank_cylib/test_cython.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import sys
3 | import numpy as np
4 | import timeit
5 | import os.path as osp
6 |
7 | from torchreid import metrics
8 |
9 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
10 | """
11 | Test the speed of cython-based evaluation code. The speed improvements
12 | can be much bigger when using the real reid data, which contains a larger
13 | amount of query and gallery images.
14 |
15 | Note: you might encounter the following error:
16 | 'AssertionError: Error: all query identities do not appear in gallery'.
17 | This is normal because the inputs are random numbers. Just try again.
18 | """
19 |
20 | print('*** Compare running time ***')
21 |
22 | setup = '''
23 | import sys
24 | import os.path as osp
25 | import numpy as np
26 | sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
27 | from torchreid import metrics
28 | num_q = 30
29 | num_g = 300
30 | max_rank = 5
31 | distmat = np.random.rand(num_q, num_g) * 20
32 | q_pids = np.random.randint(0, num_q, size=num_q)
33 | g_pids = np.random.randint(0, num_g, size=num_g)
34 | q_camids = np.random.randint(0, 5, size=num_q)
35 | g_camids = np.random.randint(0, 5, size=num_g)
36 | '''
37 |
38 | print('=> Using market1501\'s metric')
39 | pytime = timeit.timeit(
40 | 'metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)',
41 | setup=setup,
42 | number=20
43 | )
44 | cytime = timeit.timeit(
45 | 'metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)',
46 | setup=setup,
47 | number=20
48 | )
49 | print('Python time: {} s'.format(pytime))
50 | print('Cython time: {} s'.format(cytime))
51 | print('Cython is {} times faster than python\n'.format(pytime / cytime))
52 |
53 | print('=> Using cuhk03\'s metric')
54 | pytime = timeit.timeit(
55 | 'metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)',
56 | setup=setup,
57 | number=20
58 | )
59 | cytime = timeit.timeit(
60 | 'metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)',
61 | setup=setup,
62 | number=20
63 | )
64 | print('Python time: {} s'.format(pytime))
65 | print('Cython time: {} s'.format(cytime))
66 | print('Cython is {} times faster than python\n'.format(pytime / cytime))
67 | """
68 | print("=> Check precision")
69 |
70 | num_q = 30
71 | num_g = 300
72 | max_rank = 5
73 | distmat = np.random.rand(num_q, num_g) * 20
74 | q_pids = np.random.randint(0, num_q, size=num_q)
75 | g_pids = np.random.randint(0, num_g, size=num_g)
76 | q_camids = np.random.randint(0, 5, size=num_q)
77 | g_camids = np.random.randint(0, 5, size=num_g)
78 |
79 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)
80 | print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
81 | cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)
82 | print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
83 | """
84 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/optim/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .optimizer import build_optimizer
4 | from .lr_scheduler import build_lr_scheduler
5 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/optim/lr_scheduler.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, absolute_import
2 | import torch
3 |
4 | AVAI_SCH = ['single_step', 'multi_step', 'cosine']
5 |
6 |
7 | def build_lr_scheduler(
8 | optimizer, lr_scheduler='single_step', stepsize=1, gamma=0.1, max_epoch=1
9 | ):
10 | """A function wrapper for building a learning rate scheduler.
11 |
12 | Args:
13 | optimizer (Optimizer): an Optimizer.
14 | lr_scheduler (str, optional): learning rate scheduler method. Default is single_step.
15 | stepsize (int or list, optional): step size to decay learning rate. When ``lr_scheduler``
16 | is "single_step", ``stepsize`` should be an integer. When ``lr_scheduler`` is
17 | "multi_step", ``stepsize`` is a list. Default is 1.
18 | gamma (float, optional): decay rate. Default is 0.1.
19 | max_epoch (int, optional): maximum epoch (for cosine annealing). Default is 1.
20 |
21 | Examples::
22 | >>> # Decay learning rate by every 20 epochs.
23 | >>> scheduler = torchreid.optim.build_lr_scheduler(
24 | >>> optimizer, lr_scheduler='single_step', stepsize=20
25 | >>> )
26 | >>> # Decay learning rate at 30, 50 and 55 epochs.
27 | >>> scheduler = torchreid.optim.build_lr_scheduler(
28 | >>> optimizer, lr_scheduler='multi_step', stepsize=[30, 50, 55]
29 | >>> )
30 | """
31 | if lr_scheduler not in AVAI_SCH:
32 | raise ValueError(
33 | 'Unsupported scheduler: {}. Must be one of {}'.format(
34 | lr_scheduler, AVAI_SCH
35 | )
36 | )
37 |
38 | if lr_scheduler == 'single_step':
39 | if isinstance(stepsize, list):
40 | stepsize = stepsize[-1]
41 |
42 | if not isinstance(stepsize, int):
43 | raise TypeError(
44 | 'For single_step lr_scheduler, stepsize must '
45 | 'be an integer, but got {}'.format(type(stepsize))
46 | )
47 |
48 | scheduler = torch.optim.lr_scheduler.StepLR(
49 | optimizer, step_size=stepsize, gamma=gamma
50 | )
51 |
52 | elif lr_scheduler == 'multi_step':
53 | if not isinstance(stepsize, list):
54 | raise TypeError(
55 | 'For multi_step lr_scheduler, stepsize must '
56 | 'be a list, but got {}'.format(type(stepsize))
57 | )
58 |
59 | scheduler = torch.optim.lr_scheduler.MultiStepLR(
60 | optimizer, milestones=stepsize, gamma=gamma
61 | )
62 |
63 | elif lr_scheduler == 'cosine':
64 | scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
65 | optimizer, float(max_epoch)
66 | )
67 |
68 | return scheduler
69 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/README.md:
--------------------------------------------------------------------------------
1 | # Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
2 |
3 | [[Paper]](https://arxiv.org/abs/2012.07620v2)
4 |
5 | On the Market-1501 dataset, we accelerate the re-ranking processing from **89.2s** to **9.4ms** with one K40m GPU, facilitating the real-time post-processing.
6 | Similarly, we observe that our method achieves comparable or even better retrieval results on the other four image retrieval benchmarks,
7 | i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652, with limited time cost.
8 |
9 | ## Prerequisites
10 |
11 | The code was mainly developed and tested with python 3.7, PyTorch 1.4.1, CUDA 10.2, and CentOS release 6.10.
12 |
13 | The code has been included in `/extension`. To compile it:
14 |
15 | ```shell
16 | cd extension
17 | sh make.sh
18 | ```
19 |
20 | ## Demo
21 |
22 | The demo script `main.py` provides the gnn re-ranking method using the prepared feature.
23 |
24 | ```shell
25 | python main.py --data_path PATH_TO_DATA --k1 26 --k2 7
26 | ```
27 |
28 | ## Citation
29 | ```bibtex
30 | @article{zhang2020understanding,
31 | title={Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective},
32 | author={Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang},
33 | journal={arXiv preprint arXiv:2012.07620},
34 | year={2020}
35 | }
36 | ```
37 |
38 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/build_adjacency_matrix.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | at::Tensor build_adjacency_matrix_forward(torch::Tensor initial_rank);
6 |
7 |
8 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
9 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11 |
12 | at::Tensor build_adjacency_matrix(at::Tensor initial_rank) {
13 | CHECK_INPUT(initial_rank);
14 | return build_adjacency_matrix_forward(initial_rank);
15 | }
16 |
17 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
18 | m.def("forward", &build_adjacency_matrix, "build_adjacency_matrix (CUDA)");
19 | }
20 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/build_adjacency_matrix_kernel.cu:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include
4 | #include
5 | #include
6 |
7 | #define CUDA_1D_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
8 |
9 |
10 | __global__ void build_adjacency_matrix_kernel(float* initial_rank, float* A, const int total_num, const int topk, const int nthreads, const int all_num) {
11 | int index = blockIdx.x * blockDim.x + threadIdx.x;
12 | int stride = blockDim.x * gridDim.x;
13 | for (int i = index; i < all_num; i += stride) {
14 | int ii = i / topk;
15 | A[ii * total_num + int(initial_rank[i])] = float(1.0);
16 | }
17 | }
18 |
19 | at::Tensor build_adjacency_matrix_forward(at::Tensor initial_rank) {
20 | const auto total_num = initial_rank.size(0);
21 | const auto topk = initial_rank.size(1);
22 | const auto all_num = total_num * topk;
23 | auto A = torch::zeros({total_num, total_num}, at::device(initial_rank.device()).dtype(at::ScalarType::Float));
24 |
25 | const int threads = 1024;
26 | const int blocks = (all_num + threads - 1) / threads;
27 |
28 | build_adjacency_matrix_kernel<<>>(initial_rank.data_ptr(), A.data_ptr(), total_num, topk, threads, all_num);
29 | return A;
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/adjacency_matrix/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
3 |
4 | Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang
5 |
6 | Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
7 |
8 | Paper: https://arxiv.org/abs/2012.07620v2
9 |
10 | ======================================================================
11 |
12 | On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
13 | with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
14 | that our method achieves comparable or even better retrieval results on the other four
15 | image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
16 | with limited time cost.
17 | """
18 |
19 | from setuptools import Extension, setup
20 | import torch
21 | import torch.nn as nn
22 | from torch.autograd import Function
23 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension
24 |
25 | setup(
26 | name='build_adjacency_matrix',
27 | ext_modules=[
28 | CUDAExtension(
29 | 'build_adjacency_matrix', [
30 | 'build_adjacency_matrix.cpp',
31 | 'build_adjacency_matrix_kernel.cu',
32 | ]
33 | ),
34 | ],
35 | cmdclass={'build_ext': BuildExtension}
36 | )
37 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/make.sh:
--------------------------------------------------------------------------------
1 | cd adjacency_matrix
2 | python setup.py install
3 | cd ../propagation
4 | python setup.py install
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/propagation/gnn_propagate.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | at::Tensor gnn_propagate_forward(at::Tensor A, at::Tensor initial_rank, at::Tensor S);
6 |
7 |
8 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
9 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
10 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11 |
12 | at::Tensor gnn_propagate(at::Tensor A ,at::Tensor initial_rank, at::Tensor S) {
13 | CHECK_INPUT(A);
14 | CHECK_INPUT(initial_rank);
15 | CHECK_INPUT(S);
16 | return gnn_propagate_forward(A, initial_rank, S);
17 | }
18 |
19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20 | m.def("forward", &gnn_propagate, "gnn propagate (CUDA)");
21 | }
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/propagation/gnn_propagate_kernel.cu:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | __global__ void gnn_propagate_forward_kernel(float* initial_rank, float* A, float* A_qe, float* S, const int sample_num, const int topk, const int total_num) {
9 | int index = blockIdx.x * blockDim.x + threadIdx.x;
10 | int stride = blockDim.x * gridDim.x;
11 | for (int i = index; i < total_num; i += stride) {
12 | int fea = i % sample_num;
13 | int sample_index = i / sample_num;
14 | float sum = 0.0;
15 | for (int j = 0; j < topk ; j++) {
16 | int topk_fea_index = int(initial_rank[sample_index*topk+j]) * sample_num + fea;
17 | sum += A[ topk_fea_index] * S[sample_index*topk+j];
18 | }
19 | A_qe[i] = sum;
20 | }
21 | }
22 |
23 | at::Tensor gnn_propagate_forward(at::Tensor A, at::Tensor initial_rank, at::Tensor S) {
24 | const auto sample_num = A.size(0);
25 | const auto topk = initial_rank.size(1);
26 |
27 | const auto total_num = sample_num * sample_num ;
28 | auto A_qe = torch::zeros({sample_num, sample_num}, at::device(initial_rank.device()).dtype(at::ScalarType::Float));
29 |
30 | const int threads = 1024;
31 | const int blocks = (total_num + threads - 1) / threads;
32 |
33 | gnn_propagate_forward_kernel<<>>(initial_rank.data_ptr(), A.data_ptr(), A_qe.data_ptr(), S.data_ptr(), sample_num, topk, total_num);
34 | return A_qe;
35 |
36 | }
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/extension/propagation/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
3 |
4 | Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang
5 |
6 | Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
7 |
8 | Paper: https://arxiv.org/abs/2012.07620v2
9 |
10 | ======================================================================
11 |
12 | On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
13 | with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
14 | that our method achieves comparable or even better retrieval results on the other four
15 | image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
16 | with limited time cost.
17 | """
18 |
19 | from setuptools import Extension, setup
20 | import torch
21 | import torch.nn as nn
22 | from torch.autograd import Function
23 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension
24 |
25 | setup(
26 | name='gnn_propagate',
27 | ext_modules=[
28 | CUDAExtension(
29 | 'gnn_propagate', [
30 | 'gnn_propagate.cpp',
31 | 'gnn_propagate_kernel.cu',
32 | ]
33 | ),
34 | ],
35 | cmdclass={'build_ext': BuildExtension}
36 | )
37 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/gnn_reranking.py:
--------------------------------------------------------------------------------
1 | """
2 | Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
3 |
4 | Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang
5 |
6 | Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
7 |
8 | Paper: https://arxiv.org/abs/2012.07620v2
9 |
10 | ======================================================================
11 |
12 | On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
13 | with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
14 | that our method achieves comparable or even better retrieval results on the other four
15 | image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
16 | with limited time cost.
17 | """
18 |
19 | import numpy as np
20 | import torch
21 |
22 | import gnn_propagate
23 | import build_adjacency_matrix
24 | from utils import *
25 |
26 |
27 | def gnn_reranking(X_q, X_g, k1, k2):
28 | query_num, gallery_num = X_q.shape[0], X_g.shape[0]
29 |
30 | X_u = torch.cat((X_q, X_g), axis=0)
31 | original_score = torch.mm(X_u, X_u.t())
32 | del X_u, X_q, X_g
33 |
34 | # initial ranking list
35 | S, initial_rank = original_score.topk(
36 | k=k1, dim=-1, largest=True, sorted=True
37 | )
38 |
39 | # stage 1
40 | A = build_adjacency_matrix.forward(initial_rank.float())
41 | S = S * S
42 |
43 | # stage 2
44 | if k2 != 1:
45 | for i in range(2):
46 | A = A + A.T
47 | A = gnn_propagate.forward(
48 | A, initial_rank[:, :k2].contiguous().float(),
49 | S[:, :k2].contiguous().float()
50 | )
51 | A_norm = torch.norm(A, p=2, dim=1, keepdim=True)
52 | A = A.div(A_norm.expand_as(A))
53 |
54 | cosine_similarity = torch.mm(A[:query_num, ], A[query_num:, ].t())
55 | del A, S
56 |
57 | L = torch.sort(-cosine_similarity, dim=1)[1]
58 | L = L.data.cpu().numpy()
59 | return L
60 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/GPU-Re-Ranking/main.py:
--------------------------------------------------------------------------------
1 | """
2 | Understanding Image Retrieval Re-Ranking: A Graph Neural Network Perspective
3 |
4 | Xuanmeng Zhang, Minyue Jiang, Zhedong Zheng, Xiao Tan, Errui Ding, Yi Yang
5 |
6 | Project Page : https://github.com/Xuanmeng-Zhang/gnn-re-ranking
7 |
8 | Paper: https://arxiv.org/abs/2012.07620v2
9 |
10 | ======================================================================
11 |
12 | On the Market-1501 dataset, we accelerate the re-ranking processing from 89.2s to 9.4ms
13 | with one K40m GPU, facilitating the real-time post-processing. Similarly, we observe
14 | that our method achieves comparable or even better retrieval results on the other four
15 | image retrieval benchmarks, i.e., VeRi-776, Oxford-5k, Paris-6k and University-1652,
16 | with limited time cost.
17 | """
18 |
19 | import os
20 | import numpy as np
21 | import argparse
22 | import torch
23 |
24 | from utils import *
25 | from gnn_reranking import *
26 |
27 | parser = argparse.ArgumentParser(description='Reranking_is_GNN')
28 | parser.add_argument(
29 | '--data_path',
30 | type=str,
31 | default='../xm_rerank_gpu_2/features/market_88_test.pkl',
32 | help='path to dataset'
33 | )
34 | parser.add_argument(
35 | '--k1',
36 | type=int,
37 | default=26, # Market-1501
38 | # default=60, # Veri-776
39 | help='parameter k1'
40 | )
41 | parser.add_argument(
42 | '--k2',
43 | type=int,
44 | default=7, # Market-1501
45 | # default=10, # Veri-776
46 | help='parameter k2'
47 | )
48 |
49 | args = parser.parse_args()
50 |
51 |
52 | def main():
53 | data = load_pickle(args.data_path)
54 |
55 | query_cam = data['query_cam']
56 | query_label = data['query_label']
57 | gallery_cam = data['gallery_cam']
58 | gallery_label = data['gallery_label']
59 |
60 | gallery_feature = torch.FloatTensor(data['gallery_f'])
61 | query_feature = torch.FloatTensor(data['query_f'])
62 | query_feature = query_feature.cuda()
63 | gallery_feature = gallery_feature.cuda()
64 |
65 | indices = gnn_reranking(query_feature, gallery_feature, args.k1, args.k2)
66 | evaluate_ranking_list(
67 | indices, query_label, query_cam, gallery_label, gallery_cam
68 | )
69 |
70 |
71 | if __name__ == '__main__':
72 | main()
73 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from .tools import *
4 | from .rerank import re_ranking
5 | from .loggers import *
6 | from .avgmeter import *
7 | from .reidtools import *
8 | from .torchtools import *
9 | from .model_complexity import compute_model_complexity
10 | from .feature_extractor import FeatureExtractor
11 |
--------------------------------------------------------------------------------
/deep_sort/deep/reid/torchreid/utils/avgmeter.py:
--------------------------------------------------------------------------------
1 | from __future__ import division, absolute_import
2 | from collections import defaultdict
3 | import torch
4 |
5 | __all__ = ['AverageMeter', 'MetricMeter']
6 |
7 |
8 | class AverageMeter(object):
9 | """Computes and stores the average and current value.
10 |
11 | Examples::
12 | >>> # Initialize a meter to record loss
13 | >>> losses = AverageMeter()
14 | >>> # Update meter after every minibatch update
15 | >>> losses.update(loss_value, batch_size)
16 | """
17 |
18 | def __init__(self):
19 | self.reset()
20 |
21 | def reset(self):
22 | self.val = 0
23 | self.avg = 0
24 | self.sum = 0
25 | self.count = 0
26 |
27 | def update(self, val, n=1):
28 | self.val = val
29 | self.sum += val * n
30 | self.count += n
31 | self.avg = self.sum / self.count
32 |
33 |
34 | class MetricMeter(object):
35 | """A collection of metrics.
36 |
37 | Source: https://github.com/KaiyangZhou/Dassl.pytorch
38 |
39 | Examples::
40 | >>> # 1. Create an instance of MetricMeter
41 | >>> metric = MetricMeter()
42 | >>> # 2. Update using a dictionary as input
43 | >>> input_dict = {'loss_1': value_1, 'loss_2': value_2}
44 | >>> metric.update(input_dict)
45 | >>> # 3. Convert to string and print
46 | >>> print(str(metric))
47 | """
48 |
49 | def __init__(self, delimiter='\t'):
50 | self.meters = defaultdict(AverageMeter)
51 | self.delimiter = delimiter
52 |
53 | def update(self, input_dict):
54 | if input_dict is None:
55 | return
56 |
57 | if not isinstance(input_dict, dict):
58 | raise TypeError(
59 | 'Input to MetricMeter.update() must be a dictionary'
60 | )
61 |
62 | for k, v in input_dict.items():
63 | if isinstance(v, torch.Tensor):
64 | v = v.item()
65 | self.meters[k].update(v)
66 |
67 | def __str__(self):
68 | output_str = []
69 | for name, meter in self.meters.items():
70 | output_str.append(
71 | '{} {:.4f} ({:.4f})'.format(name, meter.val, meter.avg)
72 | )
73 | return self.delimiter.join(output_str)
74 |
--------------------------------------------------------------------------------
/deep_sort/sort/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__init__.py
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/detection.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/detection.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/iou_matching.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/iou_matching.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/kalman_filter.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/kalman_filter.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/linear_assignment.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/linear_assignment.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/nn_matching.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/nn_matching.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/track.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/track.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/__pycache__/tracker.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/sort/__pycache__/tracker.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/sort/detection.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 |
4 |
5 | class Detection(object):
6 | """
7 | This class represents a bounding box detection in a single image.
8 |
9 | Parameters
10 | ----------
11 | tlwh : array_like
12 | Bounding box in format `(x, y, w, h)`.
13 | confidence : float
14 | Detector confidence score.
15 | feature : array_like
16 | A feature vector that describes the object contained in this image.
17 |
18 | Attributes
19 | ----------
20 | tlwh : ndarray
21 | Bounding box in format `(top left x, top left y, width, height)`.
22 | confidence : ndarray
23 | Detector confidence score.
24 | feature : ndarray | NoneType
25 | A feature vector that describes the object contained in this image.
26 |
27 | """
28 |
29 | def __init__(self, tlwh, confidence, feature):
30 | self.tlwh = np.asarray(tlwh, dtype=np.float)
31 | self.confidence = float(confidence)
32 | self.feature = np.asarray(feature, dtype=np.float32)
33 |
34 | def to_tlbr(self):
35 | """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
36 | `(top left, bottom right)`.
37 | """
38 | ret = self.tlwh.copy()
39 | ret[2:] += ret[:2]
40 | return ret
41 |
42 | def to_xyah(self):
43 | """Convert bounding box to format `(center x, center y, aspect ratio,
44 | height)`, where the aspect ratio is `width / height`.
45 | """
46 | ret = self.tlwh.copy()
47 | ret[:2] += ret[2:] / 2
48 | ret[2] /= ret[3]
49 | return ret
50 |
--------------------------------------------------------------------------------
/deep_sort/sort/iou_matching.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | from __future__ import absolute_import
3 | import numpy as np
4 | from . import linear_assignment
5 |
6 |
7 | def iou(bbox, candidates):
8 | """Computer intersection over union.
9 |
10 | Parameters
11 | ----------
12 | bbox : ndarray
13 | A bounding box in format `(top left x, top left y, width, height)`.
14 | candidates : ndarray
15 | A matrix of candidate bounding boxes (one per row) in the same format
16 | as `bbox`.
17 |
18 | Returns
19 | -------
20 | ndarray
21 | The intersection over union in [0, 1] between the `bbox` and each
22 | candidate. A higher score means a larger fraction of the `bbox` is
23 | occluded by the candidate.
24 |
25 | """
26 | bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
27 | candidates_tl = candidates[:, :2]
28 | candidates_br = candidates[:, :2] + candidates[:, 2:]
29 |
30 | tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
31 | np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
32 | br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
33 | np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
34 | wh = np.maximum(0., br - tl)
35 |
36 | area_intersection = wh.prod(axis=1)
37 | area_bbox = bbox[2:].prod()
38 | area_candidates = candidates[:, 2:].prod(axis=1)
39 | return area_intersection / (area_bbox + area_candidates - area_intersection)
40 |
41 |
42 | def iou_cost(tracks, detections, track_indices=None,
43 | detection_indices=None):
44 | """An intersection over union distance metric.
45 |
46 | Parameters
47 | ----------
48 | tracks : List[deep_sort.track.Track]
49 | A list of tracks.
50 | detections : List[deep_sort.detection.Detection]
51 | A list of detections.
52 | track_indices : Optional[List[int]]
53 | A list of indices to tracks that should be matched. Defaults to
54 | all `tracks`.
55 | detection_indices : Optional[List[int]]
56 | A list of indices to detections that should be matched. Defaults
57 | to all `detections`.
58 |
59 | Returns
60 | -------
61 | ndarray
62 | Returns a cost matrix of shape
63 | len(track_indices), len(detection_indices) where entry (i, j) is
64 | `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
65 |
66 | """
67 | if track_indices is None:
68 | track_indices = np.arange(len(tracks))
69 | if detection_indices is None:
70 | detection_indices = np.arange(len(detections))
71 |
72 | cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
73 | for row, track_idx in enumerate(track_indices):
74 | if tracks[track_idx].time_since_update > 1:
75 | cost_matrix[row, :] = linear_assignment.INFTY_COST
76 | continue
77 |
78 | bbox = tracks[track_idx].to_tlwh()
79 | candidates = np.asarray(
80 | [detections[i].tlwh for i in detection_indices])
81 | cost_matrix[row, :] = 1. - iou(bbox, candidates)
82 | return cost_matrix
83 |
--------------------------------------------------------------------------------
/deep_sort/sort/preprocessing.py:
--------------------------------------------------------------------------------
1 | # vim: expandtab:ts=4:sw=4
2 | import numpy as np
3 | import cv2
4 |
5 |
6 | def non_max_suppression(boxes, max_bbox_overlap, scores=None):
7 | """Suppress overlapping detections.
8 |
9 | Original code from [1]_ has been adapted to include confidence score.
10 |
11 | .. [1] http://www.pyimagesearch.com/2015/02/16/
12 | faster-non-maximum-suppression-python/
13 |
14 | Examples
15 | --------
16 |
17 | >>> boxes = [d.roi for d in detections]
18 | >>> scores = [d.confidence for d in detections]
19 | >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
20 | >>> detections = [detections[i] for i in indices]
21 |
22 | Parameters
23 | ----------
24 | boxes : ndarray
25 | Array of ROIs (x, y, width, height).
26 | max_bbox_overlap : float
27 | ROIs that overlap more than this values are suppressed.
28 | scores : Optional[array_like]
29 | Detector confidence score.
30 |
31 | Returns
32 | -------
33 | List[int]
34 | Returns indices of detections that have survived non-maxima suppression.
35 |
36 | """
37 | if len(boxes) == 0:
38 | return []
39 |
40 | boxes = boxes.astype(np.float)
41 | pick = []
42 |
43 | x1 = boxes[:, 0]
44 | y1 = boxes[:, 1]
45 | x2 = boxes[:, 2] + boxes[:, 0]
46 | y2 = boxes[:, 3] + boxes[:, 1]
47 |
48 | area = (x2 - x1 + 1) * (y2 - y1 + 1)
49 | if scores is not None:
50 | idxs = np.argsort(scores)
51 | else:
52 | idxs = np.argsort(y2)
53 |
54 | while len(idxs) > 0:
55 | last = len(idxs) - 1
56 | i = idxs[last]
57 | pick.append(i)
58 |
59 | xx1 = np.maximum(x1[i], x1[idxs[:last]])
60 | yy1 = np.maximum(y1[i], y1[idxs[:last]])
61 | xx2 = np.minimum(x2[i], x2[idxs[:last]])
62 | yy2 = np.minimum(y2[i], y2[idxs[:last]])
63 |
64 | w = np.maximum(0, xx2 - xx1 + 1)
65 | h = np.maximum(0, yy2 - yy1 + 1)
66 |
67 | overlap = (w * h) / area[idxs[:last]]
68 |
69 | idxs = np.delete(
70 | idxs, np.concatenate(
71 | ([last], np.where(overlap > max_bbox_overlap)[0])))
72 |
73 | return pick
74 |
--------------------------------------------------------------------------------
/deep_sort/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/utils/__init__.py
--------------------------------------------------------------------------------
/deep_sort/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/utils/__pycache__/parser.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/deep_sort/utils/__pycache__/parser.cpython-38.pyc
--------------------------------------------------------------------------------
/deep_sort/utils/asserts.py:
--------------------------------------------------------------------------------
1 | from os import environ
2 |
3 |
4 | def assert_in(file, files_to_check):
5 | if file not in files_to_check:
6 | raise AssertionError("{} does not exist in the list".format(str(file)))
7 | return True
8 |
9 |
10 | def assert_in_env(check_list: list):
11 | for item in check_list:
12 | assert_in(item, environ.keys())
13 | return True
14 |
--------------------------------------------------------------------------------
/deep_sort/utils/draw.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 | palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
5 |
6 |
7 | def compute_color_for_labels(label):
8 | """
9 | Simple function that adds fixed color depending on the class
10 | """
11 | color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
12 | return tuple(color)
13 |
14 |
15 | def draw_boxes(img, bbox, identities=None, offset=(0,0)):
16 | for i,box in enumerate(bbox):
17 | x1,y1,x2,y2 = [int(i) for i in box]
18 | x1 += offset[0]
19 | x2 += offset[0]
20 | y1 += offset[1]
21 | y2 += offset[1]
22 | # box text and bar
23 | id = int(identities[i]) if identities is not None else 0
24 | color = compute_color_for_labels(id)
25 | label = '{}{:d}'.format("", id)
26 | t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
27 | cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
28 | cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
29 | cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
30 | return img
31 |
32 |
33 |
34 | if __name__ == '__main__':
35 | for i in range(82):
36 | print(compute_color_for_labels(i))
37 |
--------------------------------------------------------------------------------
/deep_sort/utils/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | def get_logger(name='root'):
5 | formatter = logging.Formatter(
6 | # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
7 | fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
8 |
9 | handler = logging.StreamHandler()
10 | handler.setFormatter(formatter)
11 |
12 | logger = logging.getLogger(name)
13 | logger.setLevel(logging.INFO)
14 | logger.addHandler(handler)
15 | return logger
16 |
17 |
18 |
--------------------------------------------------------------------------------
/deep_sort/utils/parser.py:
--------------------------------------------------------------------------------
1 | import os
2 | import yaml
3 | from easydict import EasyDict as edict
4 |
5 |
6 | class YamlParser(edict):
7 | """
8 | This is yaml parser based on EasyDict.
9 | """
10 |
11 | def __init__(self, cfg_dict=None, config_file=None):
12 | if cfg_dict is None:
13 | cfg_dict = {}
14 |
15 | if config_file is not None:
16 | assert(os.path.isfile(config_file))
17 | with open(config_file, 'r') as fo:
18 | yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
19 | cfg_dict.update(yaml_)
20 |
21 | super(YamlParser, self).__init__(cfg_dict)
22 |
23 | def merge_from_file(self, config_file):
24 | with open(config_file, 'r') as fo:
25 | yaml_ = yaml.load(fo.read(), Loader=yaml.FullLoader)
26 | self.update(yaml_)
27 |
28 | def merge_from_dict(self, config_dict):
29 | self.update(config_dict)
30 |
31 |
32 | def get_config(config_file=None):
33 | return YamlParser(config_file=config_file)
34 |
35 |
36 | if __name__ == "__main__":
37 | cfg = YamlParser(config_file="../configs/yolov3.yaml")
38 | cfg.merge_from_file("../configs/deep_sort.yaml")
39 |
40 | import ipdb
41 | ipdb.set_trace()
42 |
--------------------------------------------------------------------------------
/deep_sort/utils/tools.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from time import time
3 |
4 |
5 | def is_video(ext: str):
6 | """
7 | Returns true if ext exists in
8 | allowed_exts for video files.
9 |
10 | Args:
11 | ext:
12 |
13 | Returns:
14 |
15 | """
16 |
17 | allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
18 | return any((ext.endswith(x) for x in allowed_exts))
19 |
20 |
21 | def tik_tok(func):
22 | """
23 | keep track of time for each process.
24 | Args:
25 | func:
26 |
27 | Returns:
28 |
29 | """
30 | @wraps(func)
31 | def _time_it(*args, **kwargs):
32 | start = time()
33 | try:
34 | return func(*args, **kwargs)
35 | finally:
36 | end_ = time()
37 | print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
38 |
39 | return _time_it
40 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # base ----------------------------------------
4 |
5 | matplotlib>=3.2.2
6 | numpy>=1.18.5
7 | opencv-python>=4.1.2
8 | Pillow>=7.1.2
9 | PyYAML>=5.3.1
10 | requests>=2.23.0
11 | scipy>=1.4.1
12 | torch>=1.7.0
13 | torchvision>=0.8.1
14 | tqdm>=4.41.0
15 |
16 | # plotting ------------------------------------
17 |
18 | pandas>=1.1.4
19 | seaborn>=0.11.0
20 |
21 | # deep_sort -----------------------------------
22 |
23 | easydict
24 |
25 | # torchreid
26 |
27 | Cython
28 | h5py
29 | six
30 | tb-nightly
31 | future
32 | yacs
33 | gdown
34 | flake8
35 | yapf
36 | isort==4.3.21
37 | imageio
--------------------------------------------------------------------------------
/result_output_lane.mp4:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:be7ff8959c88176e73423915e6f46318bb05e4ff66de421835b8c3667620f0a4
3 | size 93797981
4 |
--------------------------------------------------------------------------------
/steering_wheel_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/steering_wheel_image.jpg
--------------------------------------------------------------------------------
/yolov5/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4 | FROM nvcr.io/nvidia/pytorch:21.10-py3
5 |
6 | # Install linux packages
7 | RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
8 |
9 | # Install python dependencies
10 | COPY requirements.txt .
11 | RUN python -m pip install --upgrade pip
12 | RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof
13 | RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2
14 | RUN pip install --no-cache -U torch torchvision numpy Pillow
15 | # RUN pip install --no-cache torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
16 |
17 | # Create working directory
18 | RUN mkdir -p /usr/src/app
19 | WORKDIR /usr/src/app
20 |
21 | # Copy contents
22 | COPY . /usr/src/app
23 |
24 | # Downloads to user config dir
25 | ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/
26 |
27 | # Set environment variables
28 | # ENV HOME=/usr/src/app
29 |
30 |
31 | # Usage Examples -------------------------------------------------------------------------------------------------------
32 |
33 | # Build and Push
34 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
35 |
36 | # Pull and Run
37 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
38 |
39 | # Pull and Run with local directory access
40 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
41 |
42 | # Kill all
43 | # sudo docker kill $(sudo docker ps -q)
44 |
45 | # Kill all image-based
46 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
47 |
48 | # Bash into running container
49 | # sudo docker exec -it 5a9b5863d93d bash
50 |
51 | # Bash into stopped container
52 | # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
53 |
54 | # Clean up
55 | # docker system prune -a --volumes
56 |
57 | # Update Ubuntu drivers
58 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
59 |
60 | # DDP test
61 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
62 |
63 | # GCP VM from Image
64 | # docker.io/ultralytics/yolov5:latest
65 |
--------------------------------------------------------------------------------
/yolov5/data/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: python train.py --data Argoverse.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── Argoverse ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | nc: 8 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import json
24 |
25 | from tqdm import tqdm
26 | from utils.general import download, Path
27 |
28 |
29 | def argoverse2yolo(set):
30 | labels = {}
31 | a = json.load(open(set, "rb"))
32 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
33 | img_id = annot['image_id']
34 | img_name = a['images'][img_id]['name']
35 | img_label_name = img_name[:-3] + "txt"
36 |
37 | cls = annot['category_id'] # instance class id
38 | x_center, y_center, width, height = annot['bbox']
39 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
40 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
41 | width /= 1920.0 # scale
42 | height /= 1200.0 # scale
43 |
44 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
45 | if not img_dir.exists():
46 | img_dir.mkdir(parents=True, exist_ok=True)
47 |
48 | k = str(img_dir / img_label_name)
49 | if k not in labels:
50 | labels[k] = []
51 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
52 |
53 | for k in labels:
54 | with open(k, "w") as f:
55 | f.writelines(labels[k])
56 |
57 |
58 | # Download
59 | dir = Path('../datasets/Argoverse') # dataset root dir
60 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
61 | download(urls, dir=dir, delete=False)
62 |
63 | # Convert
64 | annotations_dir = 'Argoverse-HD/annotations/'
65 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
66 | for d in "train.json", "val.json":
67 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
68 |
--------------------------------------------------------------------------------
/yolov5/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | nc: 1 # number of classes
30 | names: ['wheat_head'] # class names
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 | # Download
38 | dir = Path(yaml['path']) # dataset root dir
39 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
40 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
41 | download(urls, dir=dir)
42 |
43 | # Make Directories
44 | for p in 'annotations', 'images', 'labels':
45 | (dir / p).mkdir(parents=True, exist_ok=True)
46 |
47 | # Move
48 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
49 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
50 | (dir / p).rename(dir / 'images' / p) # move to /images
51 | f = (dir / p).with_suffix('.json') # json file
52 | if f.exists():
53 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
54 |
--------------------------------------------------------------------------------
/yolov5/data/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: python train.py --data SKU-110K.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── SKU-110K ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | nc: 1 # number of classes
18 | names: ['object'] # class names
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from tqdm import tqdm
25 | from utils.general import np, pd, Path, download, xyxy2xywh
26 |
27 | # Download
28 | dir = Path(yaml['path']) # dataset root dir
29 | parent = Path(dir.parent) # download dir
30 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
31 | download(urls, dir=parent, delete=False)
32 |
33 | # Rename directories
34 | if dir.exists():
35 | shutil.rmtree(dir)
36 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
37 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
38 |
39 | # Convert labels
40 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
41 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
42 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
43 | images, unique_images = x[:, 0], np.unique(x[:, 0])
44 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
45 | f.writelines(f'./images/{s}\n' for s in unique_images)
46 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
47 | cls = 0 # single-class dataset
48 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
49 | for r in x[images == im]:
50 | w, h = r[6], r[7] # image width, height
51 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
52 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
53 |
--------------------------------------------------------------------------------
/yolov5/data/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: python train.py --data VisDrone.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VisDrone ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | nc: 10 # number of classes
18 | names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor']
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | from utils.general import download, os, Path
24 |
25 | def visdrone2yolo(dir):
26 | from PIL import Image
27 | from tqdm import tqdm
28 |
29 | def convert_box(size, box):
30 | # Convert VisDrone box to YOLO xywh box
31 | dw = 1. / size[0]
32 | dh = 1. / size[1]
33 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
34 |
35 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
36 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
37 | for f in pbar:
38 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
39 | lines = []
40 | with open(f, 'r') as file: # read annotation.txt
41 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
42 | if row[4] == '0': # VisDrone 'ignored regions' class 0
43 | continue
44 | cls = int(row[5]) - 1
45 | box = convert_box(img_size, tuple(map(int, row[:4])))
46 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
47 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
48 | fl.writelines(lines) # write label.txt
49 |
50 |
51 | # Download
52 | dir = Path(yaml['path']) # dataset root dir
53 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
54 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
55 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
56 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
57 | download(urls, dir=dir)
58 |
59 | # Convert
60 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
61 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
62 |
--------------------------------------------------------------------------------
/yolov5/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: |
31 | from utils.general import download, Path
32 |
33 | # Download labels
34 | segments = False # segment or box labels
35 | dir = Path(yaml['path']) # dataset root dir
36 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
37 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
38 | download(urls, dir=dir.parent)
39 |
40 | # Download data
41 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
42 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
43 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
44 | download(urls, dir=dir / 'images', threads=3)
45 |
--------------------------------------------------------------------------------
/yolov5/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: https://ultralytics.com/assets/coco128.zip
31 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.finetune.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for VOC finetuning
3 | # python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # Hyperparameter Evolution Results
7 | # Generations: 306
8 | # P R mAP.5 mAP.5:.95 box obj cls
9 | # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
10 |
11 | lr0: 0.0032
12 | lrf: 0.12
13 | momentum: 0.843
14 | weight_decay: 0.00036
15 | warmup_epochs: 2.0
16 | warmup_momentum: 0.5
17 | warmup_bias_lr: 0.05
18 | box: 0.0296
19 | cls: 0.243
20 | cls_pw: 0.631
21 | obj: 0.301
22 | obj_pw: 0.911
23 | iou_t: 0.2
24 | anchor_t: 2.91
25 | # anchors: 3.63
26 | fl_gamma: 0.0
27 | hsv_h: 0.0138
28 | hsv_s: 0.664
29 | hsv_v: 0.464
30 | degrees: 0.373
31 | translate: 0.245
32 | scale: 0.898
33 | shear: 0.602
34 | perspective: 0.0
35 | flipud: 0.00856
36 | fliplr: 0.5
37 | mosaic: 1.0
38 | mixup: 0.243
39 | copy_paste: 0.0
40 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.finetune_objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | lr0: 0.00258
4 | lrf: 0.17
5 | momentum: 0.779
6 | weight_decay: 0.00058
7 | warmup_epochs: 1.33
8 | warmup_momentum: 0.86
9 | warmup_bias_lr: 0.0711
10 | box: 0.0539
11 | cls: 0.299
12 | cls_pw: 0.825
13 | obj: 0.632
14 | obj_pw: 1.0
15 | iou_t: 0.2
16 | anchor_t: 3.44
17 | anchors: 3.2
18 | fl_gamma: 0.0
19 | hsv_h: 0.0188
20 | hsv_s: 0.704
21 | hsv_v: 0.36
22 | degrees: 0.0
23 | translate: 0.0902
24 | scale: 0.491
25 | shear: 0.0
26 | perspective: 0.0
27 | flipud: 0.0
28 | fliplr: 0.5
29 | mosaic: 1.0
30 | mixup: 0.0
31 | copy_paste: 0.0
32 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.1 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/hyps/hyp.scratch.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for COCO training from scratch
3 | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/yolov5/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/data/images/bus.jpg
--------------------------------------------------------------------------------
/yolov5/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/data/images/zidane.jpg
--------------------------------------------------------------------------------
/yolov5/data/output/result_output_lane.mp4:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:81d99a9584f19f5625823e3797d9ab67fc0d68972da5e6210b6f742e60613b9e
3 | size 112724008
4 |
--------------------------------------------------------------------------------
/yolov5/data/output/test_sample_result-1.avi:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0983548b5608e817ee1ab002a0110bb4b8039f23ae2833a1cf8e4873afa5ea51
3 | size 129057114
4 |
--------------------------------------------------------------------------------
/yolov5/data/output/test_sample_result-2.avi:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f808a63ad6d3ca259cb5c037e887285e8dd9e388403d7b0f49f9e51b3934af42
3 | size 129297990
4 |
--------------------------------------------------------------------------------
/yolov5/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash path/to/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - <=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 | # openvino-dev # OpenVINO export
31 |
32 | # Extras --------------------------------------
33 | # albumentations>=1.0.3
34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
35 | # pycocotools>=2.0 # COCO mAP
36 | # roboflow
37 | thop # FLOPs computation
38 |
--------------------------------------------------------------------------------
/yolov5/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 |
4 | [metadata]
5 | license_file = LICENSE
6 | description-file = README.md
7 |
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 |
20 | [flake8]
21 | max-line-length = 120
22 | exclude = .tox,*.egg,build,temp
23 | select = E,W,F
24 | doctests = True
25 | verbose = 2
26 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
27 | format = pylint
28 | # see: https://www.flake8rules.com/
29 | ignore =
30 | E731 # Do not assign a lambda expression, use a def
31 | F405
32 | E402
33 | F841
34 | E741
35 | F821
36 | E722
37 | F401
38 | W504
39 | E127
40 | W504
41 | E231
42 | E501
43 | F403
44 | E302
45 | F541
46 |
47 |
48 | [isort]
49 | # https://pycqa.github.io/isort/docs/configuration/options.html
50 | line_length = 120
51 | multi_line_output = 0
52 |
--------------------------------------------------------------------------------
/yolov5/test_sample.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/test_sample.mp4
--------------------------------------------------------------------------------
/yolov5/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 |
7 | def notebook_init(verbose=True):
8 | # Check system software and hardware
9 | print('Checking setup...')
10 |
11 | import os
12 | import shutil
13 |
14 | from utils.general import check_requirements, emojis, is_colab
15 | from utils.torch_utils import select_device # imports
16 |
17 | check_requirements(('psutil', 'IPython'))
18 | import psutil
19 | from IPython import display # to display images and clear console output
20 |
21 | if is_colab():
22 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
23 |
24 | if verbose:
25 | # System info
26 | # gb = 1 / 1000 ** 3 # bytes to GB
27 | gib = 1 / 1024 ** 3 # bytes to GiB
28 | ram = psutil.virtual_memory().total
29 | total, used, free = shutil.disk_usage("/")
30 | display.clear_output()
31 | s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)'
32 | else:
33 | s = ''
34 |
35 | select_device(newline=False)
36 | print(emojis(f'Setup complete ✅ {s}'))
37 | return display
38 |
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/augmentations.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/augmentations.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/autoanchor.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/autoanchor.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/datasets.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/datasets.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/downloads.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/downloads.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/general.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/general.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/metrics.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/metrics.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/plots.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/plots.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/__pycache__/torch_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/__pycache__/torch_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/yolov5/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 | from torch.cuda import amp
11 |
12 | from utils.general import LOGGER, colorstr
13 | from utils.torch_utils import profile
14 |
15 |
16 | def check_train_batch_size(model, imgsz=640):
17 | # Check YOLOv5 training batch size
18 | with amp.autocast():
19 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
20 |
21 |
22 | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
23 | # Automatically estimate best batch size to use `fraction` of available CUDA memory
24 | # Usage:
25 | # import torch
26 | # from utils.autobatch import autobatch
27 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
28 | # print(autobatch(model))
29 |
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 |
37 | d = str(device).upper() # 'CUDA:0'
38 | properties = torch.cuda.get_device_properties(device) # device properties
39 | t = properties.total_memory / 1024 ** 3 # (GiB)
40 | r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB)
41 | a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB)
42 | f = t - (r + a) # free inside reserved
43 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
44 |
45 | batch_sizes = [1, 2, 4, 8, 16]
46 | try:
47 | img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
48 | y = profile(img, model, n=3, device=device)
49 | except Exception as e:
50 | LOGGER.warning(f'{prefix}{e}')
51 |
52 | y = [x[2] for x in y if x] # memory [2]
53 | batch_sizes = batch_sizes[:len(y)]
54 | p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit
55 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
56 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)')
57 | return b
58 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/aws/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/yolov5/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/yolov5/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Callback utils
4 | """
5 |
6 |
7 | class Callbacks:
8 | """"
9 | Handles all registered callbacks for YOLOv5 Hooks
10 | """
11 |
12 | def __init__(self):
13 | # Define the available callbacks
14 | self._callbacks = {
15 | 'on_pretrain_routine_start': [],
16 | 'on_pretrain_routine_end': [],
17 |
18 | 'on_train_start': [],
19 | 'on_train_epoch_start': [],
20 | 'on_train_batch_start': [],
21 | 'optimizer_step': [],
22 | 'on_before_zero_grad': [],
23 | 'on_train_batch_end': [],
24 | 'on_train_epoch_end': [],
25 |
26 | 'on_val_start': [],
27 | 'on_val_batch_start': [],
28 | 'on_val_image_end': [],
29 | 'on_val_batch_end': [],
30 | 'on_val_end': [],
31 |
32 | 'on_fit_epoch_end': [], # fit = train + val
33 | 'on_model_save': [],
34 | 'on_train_end': [],
35 | 'on_params_update': [],
36 | 'teardown': [],
37 | }
38 |
39 | def register_action(self, hook, name='', callback=None):
40 | """
41 | Register a new action to a callback hook
42 |
43 | Args:
44 | hook The callback hook name to register the action to
45 | name The name of the action for later reference
46 | callback The callback to fire
47 | """
48 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
49 | assert callable(callback), f"callback '{callback}' is not callable"
50 | self._callbacks[hook].append({'name': name, 'callback': callback})
51 |
52 | def get_registered_actions(self, hook=None):
53 | """"
54 | Returns all the registered actions by callback hook
55 |
56 | Args:
57 | hook The name of the hook to check, defaults to all
58 | """
59 | if hook:
60 | return self._callbacks[hook]
61 | else:
62 | return self._callbacks
63 |
64 | def run(self, hook, *args, **kwargs):
65 | """
66 | Loop through the registered actions and fire all callbacks
67 |
68 | Args:
69 | hook The name of the hook to check, defaults to all
70 | args Arguments to receive from YOLOv5
71 | kwargs Keyword Arguments to receive from YOLOv5
72 | """
73 |
74 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
75 |
76 | for logger in self._callbacks[hook]:
77 | logger['callback'](*args, **kwargs)
78 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | """Perform test request"""
2 | import pprint
3 |
4 | import requests
5 |
6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
7 | TEST_IMAGE = "zidane.jpg"
8 |
9 | image_data = open(TEST_IMAGE, "rb").read()
10 |
11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
12 |
13 | pprint.pprint(response)
14 |
--------------------------------------------------------------------------------
/yolov5/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | """
2 | Run a rest API exposing the yolov5s object detection model
3 | """
4 | import argparse
5 | import io
6 |
7 | import torch
8 | from flask import Flask, request
9 | from PIL import Image
10 |
11 | app = Flask(__name__)
12 |
13 | DETECTION_URL = "/v1/object-detection/yolov5s"
14 |
15 |
16 | @app.route(DETECTION_URL, methods=["POST"])
17 | def predict():
18 | if not request.method == "POST":
19 | return
20 |
21 | if request.files.get("image"):
22 | image_file = request.files["image"]
23 | image_bytes = image_file.read()
24 |
25 | img = Image.open(io.BytesIO(image_bytes))
26 |
27 | results = model(img, size=640) # reduce size=320 for faster inference
28 | return results.pandas().xyxy[0].to_json(orient="records")
29 |
30 |
31 | if __name__ == "__main__":
32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
33 | parser.add_argument("--port", default=5000, type=int, help="port number")
34 | args = parser.parse_args()
35 |
36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
38 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/yolov5/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nicedaddy/Yolov5_DeepSort_Pytorch_lane_detection/c19f5f94a9026e90a56aeadb2ab79366d82116c9/yolov5/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/yolov5/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | from utils.general import LOGGER
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
12 | if not logger.wandb:
13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset")
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
21 | parser.add_argument('--entity', default=None, help='W&B entity')
22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
23 |
24 | opt = parser.parse_args()
25 | opt.resume = False # Explicitly disallow resume check for dataset upload job
26 |
27 | create_dataset_artifact(opt)
28 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import parse_opt, train
12 | from utils.callbacks import Callbacks
13 | from utils.general import increment_path
14 | from utils.torch_utils import select_device
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent
20 | hyp_dict = vars(wandb.config).get("_items")
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | opt.weights = str(opt.weights)
30 | opt.cfg = str(opt.cfg)
31 | opt.data = str(opt.data)
32 | opt.hyp = str(opt.hyp)
33 | opt.project = str(opt.project)
34 | device = select_device(opt.device, batch_size=opt.batch_size)
35 |
36 | # train
37 | train(hyp_dict, opt, device, callbacks=Callbacks())
38 |
39 |
40 | if __name__ == "__main__":
41 | sweep()
42 |
--------------------------------------------------------------------------------
/yolov5/utils/loggers/wandb/sweep.yaml:
--------------------------------------------------------------------------------
1 | # Hyperparameters for training
2 | # To set range-
3 | # Provide min and max values as:
4 | # parameter:
5 | #
6 | # min: scalar
7 | # max: scalar
8 | # OR
9 | #
10 | # Set a specific list of search space-
11 | # parameter:
12 | # values: [scalar1, scalar2, scalar3...]
13 | #
14 | # You can use grid, bayesian and hyperopt search strategy
15 | # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
16 |
17 | program: utils/loggers/wandb/sweep.py
18 | method: random
19 | metric:
20 | name: metrics/mAP_0.5
21 | goal: maximize
22 |
23 | parameters:
24 | # hyperparameters: set either min, max range or values list
25 | data:
26 | value: "data/coco128.yaml"
27 | batch_size:
28 | values: [64]
29 | epochs:
30 | values: [10]
31 |
32 | lr0:
33 | distribution: uniform
34 | min: 1e-5
35 | max: 1e-1
36 | lrf:
37 | distribution: uniform
38 | min: 0.01
39 | max: 1.0
40 | momentum:
41 | distribution: uniform
42 | min: 0.6
43 | max: 0.98
44 | weight_decay:
45 | distribution: uniform
46 | min: 0.0
47 | max: 0.001
48 | warmup_epochs:
49 | distribution: uniform
50 | min: 0.0
51 | max: 5.0
52 | warmup_momentum:
53 | distribution: uniform
54 | min: 0.0
55 | max: 0.95
56 | warmup_bias_lr:
57 | distribution: uniform
58 | min: 0.0
59 | max: 0.2
60 | box:
61 | distribution: uniform
62 | min: 0.02
63 | max: 0.2
64 | cls:
65 | distribution: uniform
66 | min: 0.2
67 | max: 4.0
68 | cls_pw:
69 | distribution: uniform
70 | min: 0.5
71 | max: 2.0
72 | obj:
73 | distribution: uniform
74 | min: 0.2
75 | max: 4.0
76 | obj_pw:
77 | distribution: uniform
78 | min: 0.5
79 | max: 2.0
80 | iou_t:
81 | distribution: uniform
82 | min: 0.1
83 | max: 0.7
84 | anchor_t:
85 | distribution: uniform
86 | min: 2.0
87 | max: 8.0
88 | fl_gamma:
89 | distribution: uniform
90 | min: 0.0
91 | max: 0.1
92 | hsv_h:
93 | distribution: uniform
94 | min: 0.0
95 | max: 0.1
96 | hsv_s:
97 | distribution: uniform
98 | min: 0.0
99 | max: 0.9
100 | hsv_v:
101 | distribution: uniform
102 | min: 0.0
103 | max: 0.9
104 | degrees:
105 | distribution: uniform
106 | min: 0.0
107 | max: 45.0
108 | translate:
109 | distribution: uniform
110 | min: 0.0
111 | max: 0.9
112 | scale:
113 | distribution: uniform
114 | min: 0.0
115 | max: 0.9
116 | shear:
117 | distribution: uniform
118 | min: 0.0
119 | max: 10.0
120 | perspective:
121 | distribution: uniform
122 | min: 0.0
123 | max: 0.001
124 | flipud:
125 | distribution: uniform
126 | min: 0.0
127 | max: 1.0
128 | fliplr:
129 | distribution: uniform
130 | min: 0.0
131 | max: 1.0
132 | mosaic:
133 | distribution: uniform
134 | min: 0.0
135 | max: 1.0
136 | mixup:
137 | distribution: uniform
138 | min: 0.0
139 | max: 1.0
140 | copy_paste:
141 | distribution: uniform
142 | min: 0.0
143 | max: 1.0
144 |
--------------------------------------------------------------------------------
/yolov5x.pt:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:89bd0a9a71639a9b607a82c53cfe2fa1272a867726c68fe2977c80a8f4a08abe
3 | size 174025507
4 |
--------------------------------------------------------------------------------