├── utils ├── __init__.py ├── aws │ ├── __init__.py │ ├── mime.sh │ ├── resume.py │ └── userdata.sh ├── wandb_logging │ ├── __init__.py │ ├── log_dataset.py │ └── wandb_utils.py ├── __pycache__ │ ├── loss.cpython-39.pyc │ ├── plots.cpython-39.pyc │ ├── __init__.cpython-39.pyc │ ├── datasets.cpython-39.pyc │ ├── general.cpython-39.pyc │ ├── metrics.cpython-39.pyc │ ├── autoanchor.cpython-39.pyc │ ├── google_utils.cpython-39.pyc │ └── torch_utils.cpython-39.pyc ├── google_app_engine │ ├── additional_requirements.txt │ ├── app.yaml │ └── Dockerfile ├── activations.py ├── google_utils.py ├── add_nms.py ├── autoanchor.py ├── metrics.py ├── torch_utils.py ├── plots.py └── general.py ├── models ├── __init__.py ├── __pycache__ │ ├── yolo.cpython-39.pyc │ ├── common.cpython-39.pyc │ ├── __init__.cpython-39.pyc │ └── experimental.cpython-39.pyc └── experimental.py ├── __pycache__ └── sort.cpython-39.pyc ├── README.md ├── requirements.txt ├── data ├── coco.yaml ├── hyp.scratch.p5.yaml ├── hyp.scratch.p6.yaml ├── hyp.scratch.custom.yaml └── hyp.scratch.tiny.yaml ├── sort.py ├── detect_speed.py └── LICENSE /utils/__init__.py: -------------------------------------------------------------------------------- 1 | # init -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | # init -------------------------------------------------------------------------------- /utils/aws/__init__.py: -------------------------------------------------------------------------------- 1 | #init -------------------------------------------------------------------------------- /utils/wandb_logging/__init__.py: -------------------------------------------------------------------------------- 1 | # init -------------------------------------------------------------------------------- /__pycache__/sort.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/__pycache__/sort.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/yolo.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/models/__pycache__/yolo.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/loss.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/loss.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/plots.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/plots.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/common.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/models/__pycache__/common.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/datasets.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/datasets.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/general.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/general.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/metrics.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/metrics.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/models/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/autoanchor.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/autoanchor.cpython-39.pyc -------------------------------------------------------------------------------- /models/__pycache__/experimental.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/models/__pycache__/experimental.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/google_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/google_utils.cpython-39.pyc -------------------------------------------------------------------------------- /utils/__pycache__/torch_utils.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/krisnarengga/vehicle-speed-counting/HEAD/utils/__pycache__/torch_utils.cpython-39.pyc -------------------------------------------------------------------------------- /utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==18.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolorapp 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /utils/aws/mime.sh: -------------------------------------------------------------------------------- 1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ 2 | # This script will run on every instance restart, not only on first start 3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- 4 | 5 | Content-Type: multipart/mixed; boundary="//" 6 | MIME-Version: 1.0 7 | 8 | --// 9 | Content-Type: text/cloud-config; charset="us-ascii" 10 | MIME-Version: 1.0 11 | Content-Transfer-Encoding: 7bit 12 | Content-Disposition: attachment; filename="cloud-config.txt" 13 | 14 | #cloud-config 15 | cloud_final_modules: 16 | - [scripts-user, always] 17 | 18 | --// 19 | Content-Type: text/x-shellscript; charset="us-ascii" 20 | MIME-Version: 1.0 21 | Content-Transfer-Encoding: 7bit 22 | Content-Disposition: attachment; filename="userdata.txt" 23 | 24 | #!/bin/bash 25 | # --- paste contents of userdata.sh here --- 26 | --// 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vehicle-speed-counting 2 | This project is demonstrating simple vehicle speed measurement estimation on toll road or highway 3 | 4 | This app is based on YOLOV7 object detection library engine from WongKinYiu repository on https://github.com/WongKinYiu/yolov7 5 | 6 | DeepSort object tracking implementation 7 | https://github.com/RizwanMunawar/yolov7-object-tracking/ 8 | 9 | And based on PySource.com tutorial 10 | https://www.youtube.com/watch?v=j10j8IuKSBI&t=0s 11 | 12 | Run this command to run the app 13 | python detect_speed.py --weights yolov7-tiny.pt --source traffic.mp4 --view-img --nosave --no-trace 14 | 15 | Notes: 16 | You can download yolov7-tiny or yolov7 weights from YOLOV7 assets repository https://github.com/WongKinYiu/yolov7/releases 17 | 18 | App Demo: 19 | https://www.youtube.com/watch?v=dFxkekwXPCQ 20 | 21 | Sample Video: 22 | https://drive.google.com/file/d/119FPTQ3FyK6J_16WZUzQUCD9R0PD9In-/view?usp=sharing 23 | -------------------------------------------------------------------------------- /utils/wandb_logging/log_dataset.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import yaml 4 | 5 | from wandb_utils import WandbLogger 6 | 7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 8 | 9 | 10 | def create_dataset_artifact(opt): 11 | with open(opt.data) as f: 12 | data = yaml.load(f, Loader=yaml.SafeLoader) # data dict 13 | logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') 14 | 15 | 16 | if __name__ == '__main__': 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') 19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') 20 | parser.add_argument('--project', type=str, default='YOLOR', help='name of W&B Project') 21 | opt = parser.parse_args() 22 | opt.resume = False # Explicitly disallow resume check for dataset upload job 23 | 24 | create_dataset_artifact(opt) 25 | -------------------------------------------------------------------------------- /utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Usage: pip install -r requirements.txt 2 | 3 | # Base ---------------------------------------- 4 | matplotlib>=3.2.2 5 | numpy>=1.18.5 6 | opencv-python>=4.1.1 7 | Pillow>=7.1.2 8 | PyYAML>=5.3.1 9 | requests>=2.23.0 10 | scipy>=1.4.1 11 | torch>=1.7.0,!=1.12.0 12 | torchvision>=0.8.1,!=0.13.0 13 | tqdm>=4.41.0 14 | protobuf<4.21.3 15 | 16 | # Tracking ------------------------------------ 17 | filterpy 18 | scikit-image 19 | 20 | # Logging ------------------------------------- 21 | tensorboard>=2.4.1 22 | # wandb 23 | 24 | # Plotting ------------------------------------ 25 | pandas>=1.1.4 26 | seaborn>=0.11.0 27 | 28 | # Export -------------------------------------- 29 | # coremltools>=4.1 # CoreML export 30 | # onnx>=1.9.0 # ONNX export 31 | # onnx-simplifier>=0.3.6 # ONNX simplifier 32 | # scikit-learn==0.19.2 # CoreML quantization 33 | # tensorflow>=2.4.1 # TFLite export 34 | # tensorflowjs>=3.9.0 # TF.js export 35 | # openvino-dev # OpenVINO export 36 | 37 | # Extras -------------------------------------- 38 | ipython # interactive notebook 39 | psutil # system utilization 40 | thop # FLOPs computation 41 | # albumentations>=1.0.3 42 | # pycocotools>=2.0 # COCO mAP 43 | # roboflow 44 | -------------------------------------------------------------------------------- /utils/aws/resume.py: -------------------------------------------------------------------------------- 1 | # Resume all interrupted trainings in yolor/ dir including DDP trainings 2 | # Usage: $ python utils/aws/resume.py 3 | 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | import torch 9 | import yaml 10 | 11 | sys.path.append('./') # to run '$ python *.py' files in subdirectories 12 | 13 | port = 0 # --master_port 14 | path = Path('').resolve() 15 | for last in path.rglob('*/**/last.pt'): 16 | ckpt = torch.load(last) 17 | if ckpt['optimizer'] is None: 18 | continue 19 | 20 | # Load opt.yaml 21 | with open(last.parent.parent / 'opt.yaml') as f: 22 | opt = yaml.load(f, Loader=yaml.SafeLoader) 23 | 24 | # Get device count 25 | d = opt['device'].split(',') # devices 26 | nd = len(d) # number of devices 27 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel 28 | 29 | if ddp: # multi-GPU 30 | port += 1 31 | cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' 32 | else: # single-GPU 33 | cmd = f'python train.py --resume {last}' 34 | 35 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread 36 | print(cmd) 37 | os.system(cmd) 38 | -------------------------------------------------------------------------------- /utils/aws/userdata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html 3 | # This script will run only once on first instance start (for a re-start script see mime.sh) 4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir 5 | # Use >300 GB SSD 6 | 7 | cd home/ubuntu 8 | if [ ! -d yolor ]; then 9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker 10 | git clone -b paper https://github.com/WongKinYiu/yolor && sudo chmod -R 777 yolor 11 | cd yolor 12 | bash data/scripts/get_coco.sh && echo "Data done." & 13 | sudo docker pull nvcr.io/nvidia/pytorch:21.08-py3 && echo "Docker done." & 14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & 15 | wait && echo "All tasks done." # finish background tasks 16 | else 17 | echo "Running re-start script." # resume interrupted runs 18 | i=0 19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' 20 | while IFS= read -r id; do 21 | ((i++)) 22 | echo "restarting container $i: $id" 23 | sudo docker start $id 24 | # sudo docker exec -it $id python train.py --resume # single-GPU 25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario 26 | done <<<"$list" 27 | fi 28 | -------------------------------------------------------------------------------- /data/coco.yaml: -------------------------------------------------------------------------------- 1 | # COCO 2017 dataset http://cocodataset.org 2 | 3 | # download command/URL (optional) 4 | download: bash ./scripts/get_coco.sh 5 | 6 | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] 7 | train: ./coco/train2017.txt # 118287 images 8 | val: ./coco/val2017.txt # 5000 images 9 | test: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 10 | 11 | # number of classes 12 | nc: 80 13 | 14 | # class names 15 | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 16 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 17 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 18 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 19 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 20 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 21 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 22 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 23 | 'hair drier', 'toothbrush' ] 24 | -------------------------------------------------------------------------------- /data/hyp.scratch.p5.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 2 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) 3 | momentum: 0.937 # SGD momentum/Adam beta1 4 | weight_decay: 0.0005 # optimizer weight decay 5e-4 5 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 6 | warmup_momentum: 0.8 # warmup initial momentum 7 | warmup_bias_lr: 0.1 # warmup initial bias lr 8 | box: 0.05 # box loss gain 9 | cls: 0.3 # cls loss gain 10 | cls_pw: 1.0 # cls BCELoss positive_weight 11 | obj: 0.7 # obj loss gain (scale with pixels) 12 | obj_pw: 1.0 # obj BCELoss positive_weight 13 | iou_t: 0.20 # IoU training threshold 14 | anchor_t: 4.0 # anchor-multiple threshold 15 | # anchors: 3 # anchors per output layer (0 to ignore) 16 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 17 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 18 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 19 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 20 | degrees: 0.0 # image rotation (+/- deg) 21 | translate: 0.2 # image translation (+/- fraction) 22 | scale: 0.9 # image scale (+/- gain) 23 | shear: 0.0 # image shear (+/- deg) 24 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 25 | flipud: 0.0 # image flip up-down (probability) 26 | fliplr: 0.5 # image flip left-right (probability) 27 | mosaic: 1.0 # image mosaic (probability) 28 | mixup: 0.15 # image mixup (probability) 29 | copy_paste: 0.0 # image copy paste (probability) 30 | paste_in: 0.15 # image copy paste (probability), use 0 for faster training 31 | loss_ota: 1 # use ComputeLossOTA, use 0 for faster training -------------------------------------------------------------------------------- /data/hyp.scratch.p6.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 2 | lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) 3 | momentum: 0.937 # SGD momentum/Adam beta1 4 | weight_decay: 0.0005 # optimizer weight decay 5e-4 5 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 6 | warmup_momentum: 0.8 # warmup initial momentum 7 | warmup_bias_lr: 0.1 # warmup initial bias lr 8 | box: 0.05 # box loss gain 9 | cls: 0.3 # cls loss gain 10 | cls_pw: 1.0 # cls BCELoss positive_weight 11 | obj: 0.7 # obj loss gain (scale with pixels) 12 | obj_pw: 1.0 # obj BCELoss positive_weight 13 | iou_t: 0.20 # IoU training threshold 14 | anchor_t: 4.0 # anchor-multiple threshold 15 | # anchors: 3 # anchors per output layer (0 to ignore) 16 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 17 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 18 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 19 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 20 | degrees: 0.0 # image rotation (+/- deg) 21 | translate: 0.2 # image translation (+/- fraction) 22 | scale: 0.9 # image scale (+/- gain) 23 | shear: 0.0 # image shear (+/- deg) 24 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 25 | flipud: 0.0 # image flip up-down (probability) 26 | fliplr: 0.5 # image flip left-right (probability) 27 | mosaic: 1.0 # image mosaic (probability) 28 | mixup: 0.15 # image mixup (probability) 29 | copy_paste: 0.0 # image copy paste (probability) 30 | paste_in: 0.15 # image copy paste (probability), use 0 for faster training 31 | loss_ota: 1 # use ComputeLossOTA, use 0 for faster training -------------------------------------------------------------------------------- /data/hyp.scratch.custom.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 2 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) 3 | momentum: 0.937 # SGD momentum/Adam beta1 4 | weight_decay: 0.0005 # optimizer weight decay 5e-4 5 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 6 | warmup_momentum: 0.8 # warmup initial momentum 7 | warmup_bias_lr: 0.1 # warmup initial bias lr 8 | box: 0.05 # box loss gain 9 | cls: 0.3 # cls loss gain 10 | cls_pw: 1.0 # cls BCELoss positive_weight 11 | obj: 0.7 # obj loss gain (scale with pixels) 12 | obj_pw: 1.0 # obj BCELoss positive_weight 13 | iou_t: 0.20 # IoU training threshold 14 | anchor_t: 4.0 # anchor-multiple threshold 15 | # anchors: 3 # anchors per output layer (0 to ignore) 16 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 17 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 18 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 19 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 20 | degrees: 0.0 # image rotation (+/- deg) 21 | translate: 0.2 # image translation (+/- fraction) 22 | scale: 0.5 # image scale (+/- gain) 23 | shear: 0.0 # image shear (+/- deg) 24 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 25 | flipud: 0.0 # image flip up-down (probability) 26 | fliplr: 0.5 # image flip left-right (probability) 27 | mosaic: 1.0 # image mosaic (probability) 28 | mixup: 0.0 # image mixup (probability) 29 | copy_paste: 0.0 # image copy paste (probability) 30 | paste_in: 0.0 # image copy paste (probability), use 0 for faster training 31 | loss_ota: 1 # use ComputeLossOTA, use 0 for faster training -------------------------------------------------------------------------------- /data/hyp.scratch.tiny.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) 2 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) 3 | momentum: 0.937 # SGD momentum/Adam beta1 4 | weight_decay: 0.0005 # optimizer weight decay 5e-4 5 | warmup_epochs: 3.0 # warmup epochs (fractions ok) 6 | warmup_momentum: 0.8 # warmup initial momentum 7 | warmup_bias_lr: 0.1 # warmup initial bias lr 8 | box: 0.05 # box loss gain 9 | cls: 0.5 # cls loss gain 10 | cls_pw: 1.0 # cls BCELoss positive_weight 11 | obj: 1.0 # obj loss gain (scale with pixels) 12 | obj_pw: 1.0 # obj BCELoss positive_weight 13 | iou_t: 0.20 # IoU training threshold 14 | anchor_t: 4.0 # anchor-multiple threshold 15 | # anchors: 3 # anchors per output layer (0 to ignore) 16 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) 17 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) 18 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) 19 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) 20 | degrees: 0.0 # image rotation (+/- deg) 21 | translate: 0.1 # image translation (+/- fraction) 22 | scale: 0.5 # image scale (+/- gain) 23 | shear: 0.0 # image shear (+/- deg) 24 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 25 | flipud: 0.0 # image flip up-down (probability) 26 | fliplr: 0.5 # image flip left-right (probability) 27 | mosaic: 1.0 # image mosaic (probability) 28 | mixup: 0.05 # image mixup (probability) 29 | copy_paste: 0.0 # image copy paste (probability) 30 | paste_in: 0.05 # image copy paste (probability), use 0 for faster training 31 | loss_ota: 1 # use ComputeLossOTA, use 0 for faster training 32 | -------------------------------------------------------------------------------- /utils/activations.py: -------------------------------------------------------------------------------- 1 | # Activation functions 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- 9 | class SiLU(nn.Module): # export-friendly version of nn.SiLU() 10 | @staticmethod 11 | def forward(x): 12 | return x * torch.sigmoid(x) 13 | 14 | 15 | class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() 16 | @staticmethod 17 | def forward(x): 18 | # return x * F.hardsigmoid(x) # for torchscript and CoreML 19 | return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX 20 | 21 | 22 | class MemoryEfficientSwish(nn.Module): 23 | class F(torch.autograd.Function): 24 | @staticmethod 25 | def forward(ctx, x): 26 | ctx.save_for_backward(x) 27 | return x * torch.sigmoid(x) 28 | 29 | @staticmethod 30 | def backward(ctx, grad_output): 31 | x = ctx.saved_tensors[0] 32 | sx = torch.sigmoid(x) 33 | return grad_output * (sx * (1 + x * (1 - sx))) 34 | 35 | def forward(self, x): 36 | return self.F.apply(x) 37 | 38 | 39 | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- 40 | class Mish(nn.Module): 41 | @staticmethod 42 | def forward(x): 43 | return x * F.softplus(x).tanh() 44 | 45 | 46 | class MemoryEfficientMish(nn.Module): 47 | class F(torch.autograd.Function): 48 | @staticmethod 49 | def forward(ctx, x): 50 | ctx.save_for_backward(x) 51 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) 52 | 53 | @staticmethod 54 | def backward(ctx, grad_output): 55 | x = ctx.saved_tensors[0] 56 | sx = torch.sigmoid(x) 57 | fx = F.softplus(x).tanh() 58 | return grad_output * (fx + x * sx * (1 - fx * fx)) 59 | 60 | def forward(self, x): 61 | return self.F.apply(x) 62 | 63 | 64 | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- 65 | class FReLU(nn.Module): 66 | def __init__(self, c1, k=3): # ch_in, kernel 67 | super().__init__() 68 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) 69 | self.bn = nn.BatchNorm2d(c1) 70 | 71 | def forward(self, x): 72 | return torch.max(x, self.bn(self.conv(x))) 73 | -------------------------------------------------------------------------------- /utils/google_utils.py: -------------------------------------------------------------------------------- 1 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries 2 | 3 | import os 4 | import platform 5 | import subprocess 6 | import time 7 | from pathlib import Path 8 | 9 | import requests 10 | import torch 11 | 12 | 13 | def gsutil_getsize(url=''): 14 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du 15 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') 16 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes 17 | 18 | 19 | def attempt_download(file, repo='WongKinYiu/yolov7'): 20 | # Attempt file download if does not exist 21 | file = Path(str(file).strip().replace("'", '').lower()) 22 | 23 | if not file.exists(): 24 | try: 25 | response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api 26 | assets = [x['name'] for x in response['assets']] # release assets 27 | tag = response['tag_name'] # i.e. 'v1.0' 28 | except: # fallback plan 29 | assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', 30 | 'yolov7-e6e.pt', 'yolov7-w6.pt'] 31 | tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] 32 | 33 | name = file.name 34 | if name in assets: 35 | msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' 36 | redundant = False # second download option 37 | try: # GitHub 38 | url = f'https://github.com/{repo}/releases/download/{tag}/{name}' 39 | print(f'Downloading {url} to {file}...') 40 | torch.hub.download_url_to_file(url, file) 41 | assert file.exists() and file.stat().st_size > 1E6 # check 42 | except Exception as e: # GCP 43 | print(f'Download error: {e}') 44 | assert redundant, 'No secondary mirror' 45 | url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' 46 | print(f'Downloading {url} to {file}...') 47 | os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) 48 | finally: 49 | if not file.exists() or file.stat().st_size < 1E6: # check 50 | file.unlink(missing_ok=True) # remove partial downloads 51 | print(f'ERROR: Download failure: {msg}') 52 | print('') 53 | return 54 | 55 | 56 | def gdrive_download(id='', file='tmp.zip'): 57 | # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download() 58 | t = time.time() 59 | file = Path(file) 60 | cookie = Path('cookie') # gdrive cookie 61 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') 62 | file.unlink(missing_ok=True) # remove existing file 63 | cookie.unlink(missing_ok=True) # remove existing cookie 64 | 65 | # Attempt file download 66 | out = "NUL" if platform.system() == "Windows" else "/dev/null" 67 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') 68 | if os.path.exists('cookie'): # large file 69 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' 70 | else: # small file 71 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' 72 | r = os.system(s) # execute, capture return 73 | cookie.unlink(missing_ok=True) # remove existing cookie 74 | 75 | # Error check 76 | if r != 0: 77 | file.unlink(missing_ok=True) # remove partial 78 | print('Download error ') # raise Exception('Download error') 79 | return r 80 | 81 | # Unzip if archive 82 | if file.suffix == '.zip': 83 | print('unzipping... ', end='') 84 | os.system(f'unzip -q {file}') # unzip 85 | file.unlink() # remove zip to free space 86 | 87 | print(f'Done ({time.time() - t:.1f}s)') 88 | return r 89 | 90 | 91 | def get_token(cookie="./cookie"): 92 | with open(cookie) as f: 93 | for line in f: 94 | if "download" in line: 95 | return line.split()[-1] 96 | return "" 97 | 98 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): 99 | # # Uploads a file to a bucket 100 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python 101 | # 102 | # storage_client = storage.Client() 103 | # bucket = storage_client.get_bucket(bucket_name) 104 | # blob = bucket.blob(destination_blob_name) 105 | # 106 | # blob.upload_from_filename(source_file_name) 107 | # 108 | # print('File {} uploaded to {}.'.format( 109 | # source_file_name, 110 | # destination_blob_name)) 111 | # 112 | # 113 | # def download_blob(bucket_name, source_blob_name, destination_file_name): 114 | # # Uploads a blob from a bucket 115 | # storage_client = storage.Client() 116 | # bucket = storage_client.get_bucket(bucket_name) 117 | # blob = bucket.blob(source_blob_name) 118 | # 119 | # blob.download_to_filename(destination_file_name) 120 | # 121 | # print('Blob {} downloaded to {}.'.format( 122 | # source_blob_name, 123 | # destination_file_name)) 124 | -------------------------------------------------------------------------------- /utils/add_nms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import onnx 3 | from onnx import shape_inference 4 | try: 5 | import onnx_graphsurgeon as gs 6 | except Exception as e: 7 | print('Import onnx_graphsurgeon failure: %s' % e) 8 | 9 | import logging 10 | 11 | LOGGER = logging.getLogger(__name__) 12 | 13 | class RegisterNMS(object): 14 | def __init__( 15 | self, 16 | onnx_model_path: str, 17 | precision: str = "fp32", 18 | ): 19 | 20 | self.graph = gs.import_onnx(onnx.load(onnx_model_path)) 21 | assert self.graph 22 | LOGGER.info("ONNX graph created successfully") 23 | # Fold constants via ONNX-GS that PyTorch2ONNX may have missed 24 | self.graph.fold_constants() 25 | self.precision = precision 26 | self.batch_size = 1 27 | def infer(self): 28 | """ 29 | Sanitize the graph by cleaning any unconnected nodes, do a topological resort, 30 | and fold constant inputs values. When possible, run shape inference on the 31 | ONNX graph to determine tensor shapes. 32 | """ 33 | for _ in range(3): 34 | count_before = len(self.graph.nodes) 35 | 36 | self.graph.cleanup().toposort() 37 | try: 38 | for node in self.graph.nodes: 39 | for o in node.outputs: 40 | o.shape = None 41 | model = gs.export_onnx(self.graph) 42 | model = shape_inference.infer_shapes(model) 43 | self.graph = gs.import_onnx(model) 44 | except Exception as e: 45 | LOGGER.info(f"Shape inference could not be performed at this time:\n{e}") 46 | try: 47 | self.graph.fold_constants(fold_shapes=True) 48 | except TypeError as e: 49 | LOGGER.error( 50 | "This version of ONNX GraphSurgeon does not support folding shapes, " 51 | f"please upgrade your onnx_graphsurgeon module. Error:\n{e}" 52 | ) 53 | raise 54 | 55 | count_after = len(self.graph.nodes) 56 | if count_before == count_after: 57 | # No new folding occurred in this iteration, so we can stop for now. 58 | break 59 | 60 | def save(self, output_path): 61 | """ 62 | Save the ONNX model to the given location. 63 | Args: 64 | output_path: Path pointing to the location where to write 65 | out the updated ONNX model. 66 | """ 67 | self.graph.cleanup().toposort() 68 | model = gs.export_onnx(self.graph) 69 | onnx.save(model, output_path) 70 | LOGGER.info(f"Saved ONNX model to {output_path}") 71 | 72 | def register_nms( 73 | self, 74 | *, 75 | score_thresh: float = 0.25, 76 | nms_thresh: float = 0.45, 77 | detections_per_img: int = 100, 78 | ): 79 | """ 80 | Register the ``EfficientNMS_TRT`` plugin node. 81 | NMS expects these shapes for its input tensors: 82 | - box_net: [batch_size, number_boxes, 4] 83 | - class_net: [batch_size, number_boxes, number_labels] 84 | Args: 85 | score_thresh (float): The scalar threshold for score (low scoring boxes are removed). 86 | nms_thresh (float): The scalar threshold for IOU (new boxes that have high IOU 87 | overlap with previously selected boxes are removed). 88 | detections_per_img (int): Number of best detections to keep after NMS. 89 | """ 90 | 91 | self.infer() 92 | # Find the concat node at the end of the network 93 | op_inputs = self.graph.outputs 94 | op = "EfficientNMS_TRT" 95 | attrs = { 96 | "plugin_version": "1", 97 | "background_class": -1, # no background class 98 | "max_output_boxes": detections_per_img, 99 | "score_threshold": score_thresh, 100 | "iou_threshold": nms_thresh, 101 | "score_activation": False, 102 | "box_coding": 0, 103 | } 104 | 105 | if self.precision == "fp32": 106 | dtype_output = np.float32 107 | elif self.precision == "fp16": 108 | dtype_output = np.float16 109 | else: 110 | raise NotImplementedError(f"Currently not supports precision: {self.precision}") 111 | 112 | # NMS Outputs 113 | output_num_detections = gs.Variable( 114 | name="num_dets", 115 | dtype=np.int32, 116 | shape=[self.batch_size, 1], 117 | ) # A scalar indicating the number of valid detections per batch image. 118 | output_boxes = gs.Variable( 119 | name="det_boxes", 120 | dtype=dtype_output, 121 | shape=[self.batch_size, detections_per_img, 4], 122 | ) 123 | output_scores = gs.Variable( 124 | name="det_scores", 125 | dtype=dtype_output, 126 | shape=[self.batch_size, detections_per_img], 127 | ) 128 | output_labels = gs.Variable( 129 | name="det_classes", 130 | dtype=np.int32, 131 | shape=[self.batch_size, detections_per_img], 132 | ) 133 | 134 | op_outputs = [output_num_detections, output_boxes, output_scores, output_labels] 135 | 136 | # Create the NMS Plugin node with the selected inputs. The outputs of the node will also 137 | # become the final outputs of the graph. 138 | self.graph.layer(op=op, name="batched_nms", inputs=op_inputs, outputs=op_outputs, attrs=attrs) 139 | LOGGER.info(f"Created NMS plugin '{op}' with attributes: {attrs}") 140 | 141 | self.graph.outputs = op_outputs 142 | 143 | self.infer() 144 | 145 | def save(self, output_path): 146 | """ 147 | Save the ONNX model to the given location. 148 | Args: 149 | output_path: Path pointing to the location where to write 150 | out the updated ONNX model. 151 | """ 152 | self.graph.cleanup().toposort() 153 | model = gs.export_onnx(self.graph) 154 | onnx.save(model, output_path) 155 | LOGGER.info(f"Saved ONNX model to {output_path}") 156 | -------------------------------------------------------------------------------- /utils/autoanchor.py: -------------------------------------------------------------------------------- 1 | # Auto-anchor utils 2 | 3 | import numpy as np 4 | import torch 5 | import yaml 6 | from scipy.cluster.vq import kmeans 7 | from tqdm import tqdm 8 | 9 | from utils.general import colorstr 10 | 11 | 12 | def check_anchor_order(m): 13 | # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary 14 | a = m.anchor_grid.prod(-1).view(-1) # anchor area 15 | da = a[-1] - a[0] # delta a 16 | ds = m.stride[-1] - m.stride[0] # delta s 17 | if da.sign() != ds.sign(): # same order 18 | print('Reversing anchor order') 19 | m.anchors[:] = m.anchors.flip(0) 20 | m.anchor_grid[:] = m.anchor_grid.flip(0) 21 | 22 | 23 | def check_anchors(dataset, model, thr=4.0, imgsz=640): 24 | # Check anchor fit to data, recompute if necessary 25 | prefix = colorstr('autoanchor: ') 26 | print(f'\n{prefix}Analyzing anchors... ', end='') 27 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() 28 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) 29 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale 30 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh 31 | 32 | def metric(k): # compute metric 33 | r = wh[:, None] / k[None] 34 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 35 | best = x.max(1)[0] # best_x 36 | aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold 37 | bpr = (best > 1. / thr).float().mean() # best possible recall 38 | return bpr, aat 39 | 40 | anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors 41 | bpr, aat = metric(anchors) 42 | print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') 43 | if bpr < 0.98: # threshold to recompute 44 | print('. Attempting to improve anchors, please wait...') 45 | na = m.anchor_grid.numel() // 2 # number of anchors 46 | try: 47 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) 48 | except Exception as e: 49 | print(f'{prefix}ERROR: {e}') 50 | new_bpr = metric(anchors)[0] 51 | if new_bpr > bpr: # replace anchors 52 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) 53 | m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference 54 | check_anchor_order(m) 55 | m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss 56 | print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') 57 | else: 58 | print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') 59 | print('') # newline 60 | 61 | 62 | def kmean_anchors(path='./data/coco.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): 63 | """ Creates kmeans-evolved anchors from training dataset 64 | 65 | Arguments: 66 | path: path to dataset *.yaml, or a loaded dataset 67 | n: number of anchors 68 | img_size: image size used for training 69 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 70 | gen: generations to evolve anchors using genetic algorithm 71 | verbose: print all results 72 | 73 | Return: 74 | k: kmeans evolved anchors 75 | 76 | Usage: 77 | from utils.autoanchor import *; _ = kmean_anchors() 78 | """ 79 | thr = 1. / thr 80 | prefix = colorstr('autoanchor: ') 81 | 82 | def metric(k, wh): # compute metrics 83 | r = wh[:, None] / k[None] 84 | x = torch.min(r, 1. / r).min(2)[0] # ratio metric 85 | # x = wh_iou(wh, torch.tensor(k)) # iou metric 86 | return x, x.max(1)[0] # x, best_x 87 | 88 | def anchor_fitness(k): # mutation fitness 89 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) 90 | return (best * (best > thr).float()).mean() # fitness 91 | 92 | def print_results(k): 93 | k = k[np.argsort(k.prod(1))] # sort small to large 94 | x, best = metric(k, wh0) 95 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr 96 | print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') 97 | print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' 98 | f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') 99 | for i, x in enumerate(k): 100 | print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg 101 | return k 102 | 103 | if isinstance(path, str): # *.yaml file 104 | with open(path) as f: 105 | data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict 106 | from utils.datasets import LoadImagesAndLabels 107 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) 108 | else: 109 | dataset = path # dataset 110 | 111 | # Get label wh 112 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) 113 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh 114 | 115 | # Filter 116 | i = (wh0 < 3.0).any(1).sum() 117 | if i: 118 | print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') 119 | wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels 120 | # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 121 | 122 | # Kmeans calculation 123 | print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') 124 | s = wh.std(0) # sigmas for whitening 125 | k, dist = kmeans(wh / s, n, iter=30) # points, mean distance 126 | assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') 127 | k *= s 128 | wh = torch.tensor(wh, dtype=torch.float32) # filtered 129 | wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered 130 | k = print_results(k) 131 | 132 | # Plot 133 | # k, d = [None] * 20, [None] * 20 134 | # for i in tqdm(range(1, 21)): 135 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance 136 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) 137 | # ax = ax.ravel() 138 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') 139 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh 140 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) 141 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) 142 | # fig.savefig('wh.png', dpi=200) 143 | 144 | # Evolve 145 | npr = np.random 146 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma 147 | pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar 148 | for _ in pbar: 149 | v = np.ones(sh) 150 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) 151 | v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) 152 | kg = (k.copy() * v).clip(min=2.0) 153 | fg = anchor_fitness(kg) 154 | if fg > f: 155 | f, k = fg, kg.copy() 156 | pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' 157 | if verbose: 158 | print_results(k) 159 | 160 | return print_results(k) 161 | -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | # Model validation metrics 2 | 3 | from pathlib import Path 4 | 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import torch 8 | 9 | from . import general 10 | 11 | 12 | def fitness(x): 13 | # Model fitness as a weighted combination of metrics 14 | w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] 15 | return (x[:, :4] * w).sum(1) 16 | 17 | 18 | def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): 19 | """ Compute the average precision, given the recall and precision curves. 20 | Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. 21 | # Arguments 22 | tp: True positives (nparray, nx1 or nx10). 23 | conf: Objectness value from 0-1 (nparray). 24 | pred_cls: Predicted object classes (nparray). 25 | target_cls: True object classes (nparray). 26 | plot: Plot precision-recall curve at mAP@0.5 27 | save_dir: Plot save directory 28 | # Returns 29 | The average precision as computed in py-faster-rcnn. 30 | """ 31 | 32 | # Sort by objectness 33 | i = np.argsort(-conf) 34 | tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] 35 | 36 | # Find unique classes 37 | unique_classes = np.unique(target_cls) 38 | nc = unique_classes.shape[0] # number of classes, number of detections 39 | 40 | # Create Precision-Recall curve and compute AP for each class 41 | px, py = np.linspace(0, 1, 1000), [] # for plotting 42 | ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) 43 | for ci, c in enumerate(unique_classes): 44 | i = pred_cls == c 45 | n_l = (target_cls == c).sum() # number of labels 46 | n_p = i.sum() # number of predictions 47 | 48 | if n_p == 0 or n_l == 0: 49 | continue 50 | else: 51 | # Accumulate FPs and TPs 52 | fpc = (1 - tp[i]).cumsum(0) 53 | tpc = tp[i].cumsum(0) 54 | 55 | # Recall 56 | recall = tpc / (n_l + 1e-16) # recall curve 57 | r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases 58 | 59 | # Precision 60 | precision = tpc / (tpc + fpc) # precision curve 61 | p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score 62 | 63 | # AP from recall-precision curve 64 | for j in range(tp.shape[1]): 65 | ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) 66 | if plot and j == 0: 67 | py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 68 | 69 | # Compute F1 (harmonic mean of precision and recall) 70 | f1 = 2 * p * r / (p + r + 1e-16) 71 | if plot: 72 | plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) 73 | plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') 74 | plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') 75 | plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') 76 | 77 | i = f1.mean(0).argmax() # max F1 index 78 | return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') 79 | 80 | 81 | def compute_ap(recall, precision): 82 | """ Compute the average precision, given the recall and precision curves 83 | # Arguments 84 | recall: The recall curve (list) 85 | precision: The precision curve (list) 86 | # Returns 87 | Average precision, precision curve, recall curve 88 | """ 89 | 90 | # Append sentinel values to beginning and end 91 | mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) 92 | mpre = np.concatenate(([1.], precision, [0.])) 93 | 94 | # Compute the precision envelope 95 | mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) 96 | 97 | # Integrate area under curve 98 | method = 'interp' # methods: 'continuous', 'interp' 99 | if method == 'interp': 100 | x = np.linspace(0, 1, 101) # 101-point interp (COCO) 101 | ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate 102 | else: # 'continuous' 103 | i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes 104 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve 105 | 106 | return ap, mpre, mrec 107 | 108 | 109 | class ConfusionMatrix: 110 | # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix 111 | def __init__(self, nc, conf=0.25, iou_thres=0.45): 112 | self.matrix = np.zeros((nc + 1, nc + 1)) 113 | self.nc = nc # number of classes 114 | self.conf = conf 115 | self.iou_thres = iou_thres 116 | 117 | def process_batch(self, detections, labels): 118 | """ 119 | Return intersection-over-union (Jaccard index) of boxes. 120 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 121 | Arguments: 122 | detections (Array[N, 6]), x1, y1, x2, y2, conf, class 123 | labels (Array[M, 5]), class, x1, y1, x2, y2 124 | Returns: 125 | None, updates confusion matrix accordingly 126 | """ 127 | detections = detections[detections[:, 4] > self.conf] 128 | gt_classes = labels[:, 0].int() 129 | detection_classes = detections[:, 5].int() 130 | iou = general.box_iou(labels[:, 1:], detections[:, :4]) 131 | 132 | x = torch.where(iou > self.iou_thres) 133 | if x[0].shape[0]: 134 | matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() 135 | if x[0].shape[0] > 1: 136 | matches = matches[matches[:, 2].argsort()[::-1]] 137 | matches = matches[np.unique(matches[:, 1], return_index=True)[1]] 138 | matches = matches[matches[:, 2].argsort()[::-1]] 139 | matches = matches[np.unique(matches[:, 0], return_index=True)[1]] 140 | else: 141 | matches = np.zeros((0, 3)) 142 | 143 | n = matches.shape[0] > 0 144 | m0, m1, _ = matches.transpose().astype(np.int16) 145 | for i, gc in enumerate(gt_classes): 146 | j = m0 == i 147 | if n and sum(j) == 1: 148 | self.matrix[gc, detection_classes[m1[j]]] += 1 # correct 149 | else: 150 | self.matrix[self.nc, gc] += 1 # background FP 151 | 152 | if n: 153 | for i, dc in enumerate(detection_classes): 154 | if not any(m1 == i): 155 | self.matrix[dc, self.nc] += 1 # background FN 156 | 157 | def matrix(self): 158 | return self.matrix 159 | 160 | def plot(self, save_dir='', names=()): 161 | try: 162 | import seaborn as sn 163 | 164 | array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize 165 | array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) 166 | 167 | fig = plt.figure(figsize=(12, 9), tight_layout=True) 168 | sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size 169 | labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels 170 | sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, 171 | xticklabels=names + ['background FP'] if labels else "auto", 172 | yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) 173 | fig.axes[0].set_xlabel('True') 174 | fig.axes[0].set_ylabel('Predicted') 175 | fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) 176 | except Exception as e: 177 | pass 178 | 179 | def print(self): 180 | for i in range(self.nc + 1): 181 | print(' '.join(map(str, self.matrix[i]))) 182 | 183 | 184 | # Plots ---------------------------------------------------------------------------------------------------------------- 185 | 186 | def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): 187 | # Precision-recall curve 188 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 189 | py = np.stack(py, axis=1) 190 | 191 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 192 | for i, y in enumerate(py.T): 193 | ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) 194 | else: 195 | ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) 196 | 197 | ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) 198 | ax.set_xlabel('Recall') 199 | ax.set_ylabel('Precision') 200 | ax.set_xlim(0, 1) 201 | ax.set_ylim(0, 1) 202 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 203 | fig.savefig(Path(save_dir), dpi=250) 204 | 205 | 206 | def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): 207 | # Metric-confidence curve 208 | fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) 209 | 210 | if 0 < len(names) < 21: # display per-class legend if < 21 classes 211 | for i, y in enumerate(py): 212 | ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) 213 | else: 214 | ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) 215 | 216 | y = py.mean(0) 217 | ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') 218 | ax.set_xlabel(xlabel) 219 | ax.set_ylabel(ylabel) 220 | ax.set_xlim(0, 1) 221 | ax.set_ylim(0, 1) 222 | plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") 223 | fig.savefig(Path(save_dir), dpi=250) 224 | -------------------------------------------------------------------------------- /models/experimental.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import torch 4 | import torch.nn as nn 5 | 6 | from models.common import Conv, DWConv 7 | from utils.google_utils import attempt_download 8 | 9 | 10 | class CrossConv(nn.Module): 11 | # Cross Convolution Downsample 12 | def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): 13 | # ch_in, ch_out, kernel, stride, groups, expansion, shortcut 14 | super(CrossConv, self).__init__() 15 | c_ = int(c2 * e) # hidden channels 16 | self.cv1 = Conv(c1, c_, (1, k), (1, s)) 17 | self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) 18 | self.add = shortcut and c1 == c2 19 | 20 | def forward(self, x): 21 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) 22 | 23 | 24 | class Sum(nn.Module): 25 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 26 | def __init__(self, n, weight=False): # n: number of inputs 27 | super(Sum, self).__init__() 28 | self.weight = weight # apply weights boolean 29 | self.iter = range(n - 1) # iter object 30 | if weight: 31 | self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights 32 | 33 | def forward(self, x): 34 | y = x[0] # no weight 35 | if self.weight: 36 | w = torch.sigmoid(self.w) * 2 37 | for i in self.iter: 38 | y = y + x[i + 1] * w[i] 39 | else: 40 | for i in self.iter: 41 | y = y + x[i + 1] 42 | return y 43 | 44 | 45 | class MixConv2d(nn.Module): 46 | # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 47 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): 48 | super(MixConv2d, self).__init__() 49 | groups = len(k) 50 | if equal_ch: # equal c_ per group 51 | i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices 52 | c_ = [(i == g).sum() for g in range(groups)] # intermediate channels 53 | else: # equal weight.numel() per group 54 | b = [c2] + [0] * groups 55 | a = np.eye(groups + 1, groups, k=-1) 56 | a -= np.roll(a, 1, axis=1) 57 | a *= np.array(k) ** 2 58 | a[0] = 1 59 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b 60 | 61 | self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) 62 | self.bn = nn.BatchNorm2d(c2) 63 | self.act = nn.LeakyReLU(0.1, inplace=True) 64 | 65 | def forward(self, x): 66 | return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) 67 | 68 | 69 | class Ensemble(nn.ModuleList): 70 | # Ensemble of models 71 | def __init__(self): 72 | super(Ensemble, self).__init__() 73 | 74 | def forward(self, x, augment=False): 75 | y = [] 76 | for module in self: 77 | y.append(module(x, augment)[0]) 78 | # y = torch.stack(y).max(0)[0] # max ensemble 79 | # y = torch.stack(y).mean(0) # mean ensemble 80 | y = torch.cat(y, 1) # nms ensemble 81 | return y, None # inference, train output 82 | 83 | 84 | 85 | 86 | 87 | class ORT_NMS(torch.autograd.Function): 88 | '''ONNX-Runtime NMS operation''' 89 | @staticmethod 90 | def forward(ctx, 91 | boxes, 92 | scores, 93 | max_output_boxes_per_class=torch.tensor([100]), 94 | iou_threshold=torch.tensor([0.45]), 95 | score_threshold=torch.tensor([0.25])): 96 | device = boxes.device 97 | batch = scores.shape[0] 98 | num_det = random.randint(0, 100) 99 | batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) 100 | idxs = torch.arange(100, 100 + num_det).to(device) 101 | zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) 102 | selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() 103 | selected_indices = selected_indices.to(torch.int64) 104 | return selected_indices 105 | 106 | @staticmethod 107 | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): 108 | return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) 109 | 110 | 111 | class TRT_NMS(torch.autograd.Function): 112 | '''TensorRT NMS operation''' 113 | @staticmethod 114 | def forward( 115 | ctx, 116 | boxes, 117 | scores, 118 | background_class=-1, 119 | box_coding=1, 120 | iou_threshold=0.45, 121 | max_output_boxes=100, 122 | plugin_version="1", 123 | score_activation=0, 124 | score_threshold=0.25, 125 | ): 126 | batch_size, num_boxes, num_classes = scores.shape 127 | num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) 128 | det_boxes = torch.randn(batch_size, max_output_boxes, 4) 129 | det_scores = torch.randn(batch_size, max_output_boxes) 130 | det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) 131 | return num_det, det_boxes, det_scores, det_classes 132 | 133 | @staticmethod 134 | def symbolic(g, 135 | boxes, 136 | scores, 137 | background_class=-1, 138 | box_coding=1, 139 | iou_threshold=0.45, 140 | max_output_boxes=100, 141 | plugin_version="1", 142 | score_activation=0, 143 | score_threshold=0.25): 144 | out = g.op("TRT::EfficientNMS_TRT", 145 | boxes, 146 | scores, 147 | background_class_i=background_class, 148 | box_coding_i=box_coding, 149 | iou_threshold_f=iou_threshold, 150 | max_output_boxes_i=max_output_boxes, 151 | plugin_version_s=plugin_version, 152 | score_activation_i=score_activation, 153 | score_threshold_f=score_threshold, 154 | outputs=4) 155 | nums, boxes, scores, classes = out 156 | return nums, boxes, scores, classes 157 | 158 | 159 | class ONNX_ORT(nn.Module): 160 | '''onnx module with ONNX-Runtime NMS operation.''' 161 | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None): 162 | super().__init__() 163 | self.device = device if device else torch.device("cpu") 164 | self.max_obj = torch.tensor([max_obj]).to(device) 165 | self.iou_threshold = torch.tensor([iou_thres]).to(device) 166 | self.score_threshold = torch.tensor([score_thres]).to(device) 167 | self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic 168 | self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], 169 | dtype=torch.float32, 170 | device=self.device) 171 | 172 | def forward(self, x): 173 | boxes = x[:, :, :4] 174 | conf = x[:, :, 4:5] 175 | scores = x[:, :, 5:] 176 | scores *= conf 177 | boxes @= self.convert_matrix 178 | max_score, category_id = scores.max(2, keepdim=True) 179 | dis = category_id.float() * self.max_wh 180 | nmsbox = boxes + dis 181 | max_score_tp = max_score.transpose(1, 2).contiguous() 182 | selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold) 183 | X, Y = selected_indices[:, 0], selected_indices[:, 2] 184 | selected_boxes = boxes[X, Y, :] 185 | selected_categories = category_id[X, Y, :].float() 186 | selected_scores = max_score[X, Y, :] 187 | X = X.unsqueeze(1).float() 188 | return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1) 189 | 190 | class ONNX_TRT(nn.Module): 191 | '''onnx module with TensorRT NMS operation.''' 192 | def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None): 193 | super().__init__() 194 | assert max_wh is None 195 | self.device = device if device else torch.device('cpu') 196 | self.background_class = -1, 197 | self.box_coding = 1, 198 | self.iou_threshold = iou_thres 199 | self.max_obj = max_obj 200 | self.plugin_version = '1' 201 | self.score_activation = 0 202 | self.score_threshold = score_thres 203 | 204 | def forward(self, x): 205 | boxes = x[:, :, :4] 206 | conf = x[:, :, 4:5] 207 | scores = x[:, :, 5:] 208 | scores *= conf 209 | num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, 210 | self.iou_threshold, self.max_obj, 211 | self.plugin_version, self.score_activation, 212 | self.score_threshold) 213 | return num_det, det_boxes, det_scores, det_classes 214 | 215 | 216 | class End2End(nn.Module): 217 | '''export onnx or tensorrt model with NMS operation.''' 218 | def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None): 219 | super().__init__() 220 | device = device if device else torch.device('cpu') 221 | assert isinstance(max_wh,(int)) or max_wh is None 222 | self.model = model.to(device) 223 | self.model.model[-1].end2end = True 224 | self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT 225 | self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device) 226 | self.end2end.eval() 227 | 228 | def forward(self, x): 229 | x = self.model(x) 230 | x = self.end2end(x) 231 | return x 232 | 233 | 234 | 235 | 236 | 237 | def attempt_load(weights, map_location=None): 238 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a 239 | model = Ensemble() 240 | for w in weights if isinstance(weights, list) else [weights]: 241 | # attempt_download(w) 242 | ckpt = torch.load(w, map_location=map_location) # load 243 | model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model 244 | 245 | # Compatibility updates 246 | for m in model.modules(): 247 | if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: 248 | m.inplace = True # pytorch 1.7.0 compatibility 249 | elif type(m) is nn.Upsample: 250 | m.recompute_scale_factor = None # torch 1.11.0 compatibility 251 | elif type(m) is Conv: 252 | m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility 253 | 254 | if len(model) == 1: 255 | return model[-1] # return model 256 | else: 257 | print('Ensemble created with %s\n' % weights) 258 | for k in ['names', 'stride']: 259 | setattr(model, k, getattr(model[-1], k)) 260 | return model # return ensemble 261 | 262 | 263 | -------------------------------------------------------------------------------- /sort.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import os 4 | import numpy as np 5 | import matplotlib 6 | matplotlib.use('TkAgg') 7 | import matplotlib.pyplot as plt 8 | import matplotlib.patches as patches 9 | from skimage import io 10 | 11 | import glob 12 | import time 13 | import argparse 14 | from filterpy.kalman import KalmanFilter 15 | 16 | np.random.seed(0) 17 | 18 | def linear_assignment(cost_matrix): 19 | try: 20 | import lap #linear assignment problem solver 21 | _, x, y = lap.lapjv(cost_matrix, extend_cost = True) 22 | return np.array([[y[i],i] for i in x if i>=0]) 23 | except ImportError: 24 | from scipy.optimize import linear_sum_assignment 25 | x,y = linear_sum_assignment(cost_matrix) 26 | return np.array(list(zip(x,y))) 27 | 28 | 29 | """From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]""" 30 | def iou_batch(bb_test, bb_gt): 31 | 32 | bb_gt = np.expand_dims(bb_gt, 0) 33 | bb_test = np.expand_dims(bb_test, 1) 34 | 35 | xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0]) 36 | yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) 37 | xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) 38 | yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) 39 | w = np.maximum(0., xx2 - xx1) 40 | h = np.maximum(0., yy2 - yy1) 41 | wh = w * h 42 | o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) 43 | + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh) 44 | return(o) 45 | 46 | 47 | """Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio""" 48 | def convert_bbox_to_z(bbox): 49 | w = bbox[2] - bbox[0] 50 | h = bbox[3] - bbox[1] 51 | x = bbox[0] + w/2. 52 | y = bbox[1] + h/2. 53 | s = w * h 54 | #scale is just area 55 | r = w / float(h) 56 | return np.array([x, y, s, r]).reshape((4, 1)) 57 | 58 | 59 | """Takes a bounding box in the centre form [x,y,s,r] and returns it in the form 60 | [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right""" 61 | def convert_x_to_bbox(x, score=None): 62 | w = np.sqrt(x[2] * x[3]) 63 | h = x[2] / w 64 | if(score==None): 65 | return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4)) 66 | else: 67 | return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) 68 | 69 | """This class represents the internal state of individual tracked objects observed as bbox.""" 70 | class KalmanBoxTracker(object): 71 | 72 | count = 0 73 | def __init__(self, bbox): 74 | """ 75 | Initialize a tracker using initial bounding box 76 | 77 | Parameter 'bbox' must have 'detected class' int number at the -1 position. 78 | """ 79 | self.kf = KalmanFilter(dim_x=7, dim_z=4) 80 | self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]]) 81 | self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]]) 82 | 83 | self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes') 84 | self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities 85 | self.kf.P *= 10. 86 | self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things) 87 | self.kf.Q[4:,4:] *= 0.5 88 | 89 | self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR 90 | self.time_since_update = 0 91 | self.id = KalmanBoxTracker.count 92 | KalmanBoxTracker.count += 1 93 | self.history = [] 94 | self.hits = 0 95 | self.hit_streak = 0 96 | self.age = 0 97 | self.centroidarr = [] 98 | CX = (bbox[0]+bbox[2])//2 99 | CY = (bbox[1]+bbox[3])//2 100 | self.centroidarr.append((CX,CY)) 101 | 102 | 103 | #keep yolov5 detected class information 104 | self.detclass = bbox[5] 105 | 106 | def update(self, bbox): 107 | """ 108 | Updates the state vector with observed bbox 109 | """ 110 | self.time_since_update = 0 111 | self.history = [] 112 | self.hits += 1 113 | self.hit_streak += 1 114 | self.kf.update(convert_bbox_to_z(bbox)) 115 | self.detclass = bbox[5] 116 | CX = (bbox[0]+bbox[2])//2 117 | CY = (bbox[1]+bbox[3])//2 118 | self.centroidarr.append((CX,CY)) 119 | 120 | def predict(self): 121 | """ 122 | Advances the state vector and returns the predicted bounding box estimate 123 | """ 124 | if((self.kf.x[6]+self.kf.x[2])<=0): 125 | self.kf.x[6] *= 0.0 126 | self.kf.predict() 127 | self.age += 1 128 | if(self.time_since_update>0): 129 | self.hit_streak = 0 130 | self.time_since_update += 1 131 | self.history.append(convert_x_to_bbox(self.kf.x)) 132 | # bbox=self.history[-1] 133 | # CX = (bbox[0]+bbox[2])/2 134 | # CY = (bbox[1]+bbox[3])/2 135 | # self.centroidarr.append((CX,CY)) 136 | 137 | return self.history[-1] 138 | 139 | 140 | def get_state(self): 141 | """ 142 | Returns the current bounding box estimate 143 | # test 144 | arr1 = np.array([[1,2,3,4]]) 145 | arr2 = np.array([0]) 146 | arr3 = np.expand_dims(arr2, 0) 147 | np.concatenate((arr1,arr3), axis=1) 148 | """ 149 | arr_detclass = np.expand_dims(np.array([self.detclass]), 0) 150 | 151 | arr_u_dot = np.expand_dims(self.kf.x[4],0) 152 | arr_v_dot = np.expand_dims(self.kf.x[5],0) 153 | arr_s_dot = np.expand_dims(self.kf.x[6],0) 154 | 155 | return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1) 156 | 157 | def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3): 158 | """ 159 | Assigns detections to tracked object (both represented as bounding boxes) 160 | Returns 3 lists of 161 | 1. matches, 162 | 2. unmatched_detections 163 | 3. unmatched_trackers 164 | """ 165 | if(len(trackers)==0): 166 | return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) 167 | 168 | iou_matrix = iou_batch(detections, trackers) 169 | 170 | if min(iou_matrix.shape) > 0: 171 | a = (iou_matrix > iou_threshold).astype(np.int32) 172 | if a.sum(1).max() == 1 and a.sum(0).max() ==1: 173 | matched_indices = np.stack(np.where(a), axis=1) 174 | else: 175 | matched_indices = linear_assignment(-iou_matrix) 176 | else: 177 | matched_indices = np.empty(shape=(0,2)) 178 | 179 | unmatched_detections = [] 180 | for d, det in enumerate(detections): 181 | if(d not in matched_indices[:,0]): 182 | unmatched_detections.append(d) 183 | 184 | unmatched_trackers = [] 185 | for t, trk in enumerate(trackers): 186 | if(t not in matched_indices[:,1]): 187 | unmatched_trackers.append(t) 188 | 189 | #filter out matched with low IOU 190 | matches = [] 191 | for m in matched_indices: 192 | if(iou_matrix[m[0], m[1]]= self.min_hits or self.frame_count <= self.min_hits): 260 | ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value 261 | i -= 1 262 | #remove dead tracklet 263 | if(trk.time_since_update >self.max_age): 264 | self.trackers.pop(i) 265 | if(len(ret) > 0): 266 | return np.concatenate(ret) 267 | return np.empty((0,6)) 268 | 269 | def parse_args(): 270 | """Parse input arguments.""" 271 | parser = argparse.ArgumentParser(description='SORT demo') 272 | parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true') 273 | parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data') 274 | parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train') 275 | parser.add_argument("--max_age", 276 | help="Maximum number of frames to keep alive a track without associated detections.", 277 | type=int, default=1) 278 | parser.add_argument("--min_hits", 279 | help="Minimum number of associated detections before track is initialised.", 280 | type=int, default=3) 281 | parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3) 282 | args = parser.parse_args() 283 | return args 284 | 285 | if __name__ == '__main__': 286 | # all train 287 | args = parse_args() 288 | display = args.display 289 | phase = args.phase 290 | total_time = 0.0 291 | total_frames = 0 292 | colours = np.random.rand(32, 3) #used only for display 293 | if(display): 294 | if not os.path.exists('mot_benchmark'): 295 | print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n') 296 | exit() 297 | plt.ion() 298 | fig = plt.figure() 299 | ax1 = fig.add_subplot(111, aspect='equal') 300 | 301 | if not os.path.exists('output'): 302 | os.makedirs('output') 303 | pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt') 304 | for seq_dets_fn in glob.glob(pattern): 305 | mot_tracker = Sort(max_age=args.max_age, 306 | min_hits=args.min_hits, 307 | iou_threshold=args.iou_threshold) #create instance of the SORT tracker 308 | seq_dets = np.loadtxt(seq_dets_fn, delimiter=',') 309 | seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0] 310 | 311 | with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file: 312 | print("Processing %s."%(seq)) 313 | for frame in range(int(seq_dets[:,0].max())): 314 | frame += 1 #detection and frame numbers begin at 1 315 | dets = seq_dets[seq_dets[:, 0]==frame, 2:7] 316 | dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] 317 | total_frames += 1 318 | 319 | if(display): 320 | fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame)) 321 | im =io.imread(fn) 322 | ax1.imshow(im) 323 | plt.title(seq + ' Tracked Targets') 324 | 325 | start_time = time.time() 326 | trackers = mot_tracker.update(dets) 327 | cycle_time = time.time() - start_time 328 | total_time += cycle_time 329 | 330 | for d in trackers: 331 | print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file) 332 | if(display): 333 | d = d.astype(np.int32) 334 | ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:])) 335 | 336 | if(display): 337 | fig.canvas.flush_events() 338 | plt.draw() 339 | ax1.cla() 340 | 341 | print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time)) 342 | 343 | if(display): 344 | print("Note: to get real runtime results run without the option: --display") 345 | -------------------------------------------------------------------------------- /utils/torch_utils.py: -------------------------------------------------------------------------------- 1 | # YOLOR PyTorch utils 2 | 3 | import datetime 4 | import logging 5 | import math 6 | import os 7 | import platform 8 | import subprocess 9 | import time 10 | from contextlib import contextmanager 11 | from copy import deepcopy 12 | from pathlib import Path 13 | 14 | import torch 15 | import torch.backends.cudnn as cudnn 16 | import torch.nn as nn 17 | import torch.nn.functional as F 18 | import torchvision 19 | 20 | try: 21 | import thop # for FLOPS computation 22 | except ImportError: 23 | thop = None 24 | logger = logging.getLogger(__name__) 25 | 26 | 27 | @contextmanager 28 | def torch_distributed_zero_first(local_rank: int): 29 | """ 30 | Decorator to make all processes in distributed training wait for each local_master to do something. 31 | """ 32 | if local_rank not in [-1, 0]: 33 | torch.distributed.barrier() 34 | yield 35 | if local_rank == 0: 36 | torch.distributed.barrier() 37 | 38 | 39 | def init_torch_seeds(seed=0): 40 | # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html 41 | torch.manual_seed(seed) 42 | if seed == 0: # slower, more reproducible 43 | cudnn.benchmark, cudnn.deterministic = False, True 44 | else: # faster, less reproducible 45 | cudnn.benchmark, cudnn.deterministic = True, False 46 | 47 | 48 | def date_modified(path=__file__): 49 | # return human-readable file modification date, i.e. '2021-3-26' 50 | t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) 51 | return f'{t.year}-{t.month}-{t.day}' 52 | 53 | 54 | def git_describe(path=Path(__file__).parent): # path must be a directory 55 | # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe 56 | s = f'git -C {path} describe --tags --long --always' 57 | try: 58 | return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] 59 | except subprocess.CalledProcessError as e: 60 | return '' # not a git repository 61 | 62 | 63 | def select_device(device='', batch_size=None): 64 | # device = 'cpu' or '0' or '0,1,2,3' 65 | s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string 66 | cpu = device.lower() == 'cpu' 67 | if cpu: 68 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False 69 | elif device: # non-cpu device requested 70 | os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable 71 | assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability 72 | 73 | cuda = not cpu and torch.cuda.is_available() 74 | if cuda: 75 | n = torch.cuda.device_count() 76 | if n > 1 and batch_size: # check that batch_size is compatible with device_count 77 | assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' 78 | space = ' ' * len(s) 79 | for i, d in enumerate(device.split(',') if device else range(n)): 80 | p = torch.cuda.get_device_properties(i) 81 | s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB 82 | else: 83 | s += 'CPU\n' 84 | 85 | logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe 86 | return torch.device('cuda:0' if cuda else 'cpu') 87 | 88 | 89 | def time_synchronized(): 90 | # pytorch-accurate time 91 | if torch.cuda.is_available(): 92 | torch.cuda.synchronize() 93 | return time.time() 94 | 95 | 96 | def profile(x, ops, n=100, device=None): 97 | # profile a pytorch module or list of modules. Example usage: 98 | # x = torch.randn(16, 3, 640, 640) # input 99 | # m1 = lambda x: x * torch.sigmoid(x) 100 | # m2 = nn.SiLU() 101 | # profile(x, [m1, m2], n=100) # profile speed over 100 iterations 102 | 103 | device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 104 | x = x.to(device) 105 | x.requires_grad = True 106 | print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') 107 | print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") 108 | for m in ops if isinstance(ops, list) else [ops]: 109 | m = m.to(device) if hasattr(m, 'to') else m # device 110 | m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type 111 | dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward 112 | try: 113 | flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS 114 | except: 115 | flops = 0 116 | 117 | for _ in range(n): 118 | t[0] = time_synchronized() 119 | y = m(x) 120 | t[1] = time_synchronized() 121 | try: 122 | _ = y.sum().backward() 123 | t[2] = time_synchronized() 124 | except: # no backward method 125 | t[2] = float('nan') 126 | dtf += (t[1] - t[0]) * 1000 / n # ms per op forward 127 | dtb += (t[2] - t[1]) * 1000 / n # ms per op backward 128 | 129 | s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' 130 | s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' 131 | p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters 132 | print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') 133 | 134 | 135 | def is_parallel(model): 136 | return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) 137 | 138 | 139 | def intersect_dicts(da, db, exclude=()): 140 | # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values 141 | return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} 142 | 143 | 144 | def initialize_weights(model): 145 | for m in model.modules(): 146 | t = type(m) 147 | if t is nn.Conv2d: 148 | pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 149 | elif t is nn.BatchNorm2d: 150 | m.eps = 1e-3 151 | m.momentum = 0.03 152 | elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: 153 | m.inplace = True 154 | 155 | 156 | def find_modules(model, mclass=nn.Conv2d): 157 | # Finds layer indices matching module class 'mclass' 158 | return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] 159 | 160 | 161 | def sparsity(model): 162 | # Return global model sparsity 163 | a, b = 0., 0. 164 | for p in model.parameters(): 165 | a += p.numel() 166 | b += (p == 0).sum() 167 | return b / a 168 | 169 | 170 | def prune(model, amount=0.3): 171 | # Prune model to requested global sparsity 172 | import torch.nn.utils.prune as prune 173 | print('Pruning model... ', end='') 174 | for name, m in model.named_modules(): 175 | if isinstance(m, nn.Conv2d): 176 | prune.l1_unstructured(m, name='weight', amount=amount) # prune 177 | prune.remove(m, 'weight') # make permanent 178 | print(' %.3g global sparsity' % sparsity(model)) 179 | 180 | 181 | def fuse_conv_and_bn(conv, bn): 182 | # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ 183 | fusedconv = nn.Conv2d(conv.in_channels, 184 | conv.out_channels, 185 | kernel_size=conv.kernel_size, 186 | stride=conv.stride, 187 | padding=conv.padding, 188 | groups=conv.groups, 189 | bias=True).requires_grad_(False).to(conv.weight.device) 190 | 191 | # prepare filters 192 | w_conv = conv.weight.clone().view(conv.out_channels, -1) 193 | w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) 194 | fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) 195 | 196 | # prepare spatial bias 197 | b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias 198 | b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) 199 | fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) 200 | 201 | return fusedconv 202 | 203 | 204 | def model_info(model, verbose=False, img_size=640): 205 | # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] 206 | n_p = sum(x.numel() for x in model.parameters()) # number parameters 207 | n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients 208 | if verbose: 209 | print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) 210 | for i, (name, p) in enumerate(model.named_parameters()): 211 | name = name.replace('module_list.', '') 212 | print('%5g %40s %9s %12g %20s %10.3g %10.3g' % 213 | (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) 214 | 215 | try: # FLOPS 216 | from thop import profile 217 | stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 218 | img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input 219 | flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS 220 | img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float 221 | fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS 222 | except (ImportError, Exception): 223 | fs = '' 224 | 225 | logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") 226 | 227 | 228 | def load_classifier(name='resnet101', n=2): 229 | # Loads a pretrained model reshaped to n-class output 230 | model = torchvision.models.__dict__[name](pretrained=True) 231 | 232 | # ResNet model properties 233 | # input_size = [3, 224, 224] 234 | # input_space = 'RGB' 235 | # input_range = [0, 1] 236 | # mean = [0.485, 0.456, 0.406] 237 | # std = [0.229, 0.224, 0.225] 238 | 239 | # Reshape output to n classes 240 | filters = model.fc.weight.shape[1] 241 | model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) 242 | model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) 243 | model.fc.out_features = n 244 | return model 245 | 246 | 247 | def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) 248 | # scales img(bs,3,y,x) by ratio constrained to gs-multiple 249 | if ratio == 1.0: 250 | return img 251 | else: 252 | h, w = img.shape[2:] 253 | s = (int(h * ratio), int(w * ratio)) # new size 254 | img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize 255 | if not same_shape: # pad/crop img 256 | h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] 257 | return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean 258 | 259 | 260 | def copy_attr(a, b, include=(), exclude=()): 261 | # Copy attributes from b to a, options to only include [...] and to exclude [...] 262 | for k, v in b.__dict__.items(): 263 | if (len(include) and k not in include) or k.startswith('_') or k in exclude: 264 | continue 265 | else: 266 | setattr(a, k, v) 267 | 268 | 269 | class ModelEMA: 270 | """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models 271 | Keep a moving average of everything in the model state_dict (parameters and buffers). 272 | This is intended to allow functionality like 273 | https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage 274 | A smoothed version of the weights is necessary for some training schemes to perform well. 275 | This class is sensitive where it is initialized in the sequence of model init, 276 | GPU assignment and distributed training wrappers. 277 | """ 278 | 279 | def __init__(self, model, decay=0.9999, updates=0): 280 | # Create EMA 281 | self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA 282 | # if next(model.parameters()).device.type != 'cpu': 283 | # self.ema.half() # FP16 EMA 284 | self.updates = updates # number of EMA updates 285 | self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) 286 | for p in self.ema.parameters(): 287 | p.requires_grad_(False) 288 | 289 | def update(self, model): 290 | # Update EMA parameters 291 | with torch.no_grad(): 292 | self.updates += 1 293 | d = self.decay(self.updates) 294 | 295 | msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict 296 | for k, v in self.ema.state_dict().items(): 297 | if v.dtype.is_floating_point: 298 | v *= d 299 | v += (1. - d) * msd[k].detach() 300 | 301 | def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): 302 | # Update EMA attributes 303 | copy_attr(self.ema, model, include, exclude) 304 | 305 | 306 | class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): 307 | def _check_input_dim(self, input): 308 | # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc 309 | # is this method that is overwritten by the sub-class 310 | # This original goal of this method was for tensor sanity checks 311 | # If you're ok bypassing those sanity checks (eg. if you trust your inference 312 | # to provide the right dimensional inputs), then you can just use this method 313 | # for easy conversion from SyncBatchNorm 314 | # (unfortunately, SyncBatchNorm does not store the original class - if it did 315 | # we could return the one that was originally created) 316 | return 317 | 318 | def revert_sync_batchnorm(module): 319 | # this is very similar to the function that it is trying to revert: 320 | # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 321 | module_output = module 322 | if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): 323 | new_cls = BatchNormXd 324 | module_output = BatchNormXd(module.num_features, 325 | module.eps, module.momentum, 326 | module.affine, 327 | module.track_running_stats) 328 | if module.affine: 329 | with torch.no_grad(): 330 | module_output.weight = module.weight 331 | module_output.bias = module.bias 332 | module_output.running_mean = module.running_mean 333 | module_output.running_var = module.running_var 334 | module_output.num_batches_tracked = module.num_batches_tracked 335 | if hasattr(module, "qconfig"): 336 | module_output.qconfig = module.qconfig 337 | for name, child in module.named_children(): 338 | module_output.add_module(name, revert_sync_batchnorm(child)) 339 | del module 340 | return module_output 341 | 342 | 343 | class TracedModel(nn.Module): 344 | 345 | def __init__(self, model=None, device=None, img_size=(640,640)): 346 | super(TracedModel, self).__init__() 347 | 348 | print(" Convert model to Traced-model... ") 349 | self.stride = model.stride 350 | self.names = model.names 351 | self.model = model 352 | 353 | self.model = revert_sync_batchnorm(self.model) 354 | self.model.to('cpu') 355 | self.model.eval() 356 | 357 | self.detect_layer = self.model.model[-1] 358 | self.model.traced = True 359 | 360 | rand_example = torch.rand(1, 3, img_size, img_size) 361 | 362 | traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) 363 | #traced_script_module = torch.jit.script(self.model) 364 | traced_script_module.save("traced_model.pt") 365 | print(" traced_script_module saved! ") 366 | self.model = traced_script_module 367 | self.model.to(device) 368 | self.detect_layer.to(device) 369 | print(" model is traced! \n") 370 | 371 | def forward(self, x, augment=False, profile=False): 372 | out = self.model(x) 373 | out = self.detect_layer(out) 374 | return out -------------------------------------------------------------------------------- /utils/wandb_logging/wandb_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | from pathlib import Path 4 | 5 | import torch 6 | import yaml 7 | from tqdm import tqdm 8 | 9 | sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path 10 | from utils.datasets import LoadImagesAndLabels 11 | from utils.datasets import img2label_paths 12 | from utils.general import colorstr, xywh2xyxy, check_dataset 13 | 14 | try: 15 | import wandb 16 | from wandb import init, finish 17 | except ImportError: 18 | wandb = None 19 | 20 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' 21 | 22 | 23 | def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): 24 | return from_string[len(prefix):] 25 | 26 | 27 | def check_wandb_config_file(data_config_file): 28 | wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path 29 | if Path(wandb_config).is_file(): 30 | return wandb_config 31 | return data_config_file 32 | 33 | 34 | def get_run_info(run_path): 35 | run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) 36 | run_id = run_path.stem 37 | project = run_path.parent.stem 38 | model_artifact_name = 'run_' + run_id + '_model' 39 | return run_id, project, model_artifact_name 40 | 41 | 42 | def check_wandb_resume(opt): 43 | process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None 44 | if isinstance(opt.resume, str): 45 | if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): 46 | if opt.global_rank not in [-1, 0]: # For resuming DDP runs 47 | run_id, project, model_artifact_name = get_run_info(opt.resume) 48 | api = wandb.Api() 49 | artifact = api.artifact(project + '/' + model_artifact_name + ':latest') 50 | modeldir = artifact.download() 51 | opt.weights = str(Path(modeldir) / "last.pt") 52 | return True 53 | return None 54 | 55 | 56 | def process_wandb_config_ddp_mode(opt): 57 | with open(opt.data) as f: 58 | data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict 59 | train_dir, val_dir = None, None 60 | if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): 61 | api = wandb.Api() 62 | train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) 63 | train_dir = train_artifact.download() 64 | train_path = Path(train_dir) / 'data/images/' 65 | data_dict['train'] = str(train_path) 66 | 67 | if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): 68 | api = wandb.Api() 69 | val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) 70 | val_dir = val_artifact.download() 71 | val_path = Path(val_dir) / 'data/images/' 72 | data_dict['val'] = str(val_path) 73 | if train_dir or val_dir: 74 | ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') 75 | with open(ddp_data_path, 'w') as f: 76 | yaml.dump(data_dict, f) 77 | opt.data = ddp_data_path 78 | 79 | 80 | class WandbLogger(): 81 | def __init__(self, opt, name, run_id, data_dict, job_type='Training'): 82 | # Pre-training routine -- 83 | self.job_type = job_type 84 | self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict 85 | # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call 86 | if isinstance(opt.resume, str): # checks resume from artifact 87 | if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): 88 | run_id, project, model_artifact_name = get_run_info(opt.resume) 89 | model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name 90 | assert wandb, 'install wandb to resume wandb runs' 91 | # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config 92 | self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') 93 | opt.resume = model_artifact_name 94 | elif self.wandb: 95 | self.wandb_run = wandb.init(config=opt, 96 | resume="allow", 97 | project='YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem, 98 | name=name, 99 | job_type=job_type, 100 | id=run_id) if not wandb.run else wandb.run 101 | if self.wandb_run: 102 | if self.job_type == 'Training': 103 | if not opt.resume: 104 | wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict 105 | # Info useful for resuming from artifacts 106 | self.wandb_run.config.opt = vars(opt) 107 | self.wandb_run.config.data_dict = wandb_data_dict 108 | self.data_dict = self.setup_training(opt, data_dict) 109 | if self.job_type == 'Dataset Creation': 110 | self.data_dict = self.check_and_upload_dataset(opt) 111 | else: 112 | prefix = colorstr('wandb: ') 113 | print(f"{prefix}Install Weights & Biases for YOLOR logging with 'pip install wandb' (recommended)") 114 | 115 | def check_and_upload_dataset(self, opt): 116 | assert wandb, 'Install wandb to upload dataset' 117 | check_dataset(self.data_dict) 118 | config_path = self.log_dataset_artifact(opt.data, 119 | opt.single_cls, 120 | 'YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem) 121 | print("Created dataset config file ", config_path) 122 | with open(config_path) as f: 123 | wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) 124 | return wandb_data_dict 125 | 126 | def setup_training(self, opt, data_dict): 127 | self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants 128 | self.bbox_interval = opt.bbox_interval 129 | if isinstance(opt.resume, str): 130 | modeldir, _ = self.download_model_artifact(opt) 131 | if modeldir: 132 | self.weights = Path(modeldir) / "last.pt" 133 | config = self.wandb_run.config 134 | opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( 135 | self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ 136 | config.opt['hyp'] 137 | data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume 138 | if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download 139 | self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), 140 | opt.artifact_alias) 141 | self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), 142 | opt.artifact_alias) 143 | self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None 144 | if self.train_artifact_path is not None: 145 | train_path = Path(self.train_artifact_path) / 'data/images/' 146 | data_dict['train'] = str(train_path) 147 | if self.val_artifact_path is not None: 148 | val_path = Path(self.val_artifact_path) / 'data/images/' 149 | data_dict['val'] = str(val_path) 150 | self.val_table = self.val_artifact.get("val") 151 | self.map_val_table_path() 152 | if self.val_artifact is not None: 153 | self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") 154 | self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) 155 | if opt.bbox_interval == -1: 156 | self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 157 | return data_dict 158 | 159 | def download_dataset_artifact(self, path, alias): 160 | if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): 161 | dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) 162 | assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" 163 | datadir = dataset_artifact.download() 164 | return datadir, dataset_artifact 165 | return None, None 166 | 167 | def download_model_artifact(self, opt): 168 | if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): 169 | model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") 170 | assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' 171 | modeldir = model_artifact.download() 172 | epochs_trained = model_artifact.metadata.get('epochs_trained') 173 | total_epochs = model_artifact.metadata.get('total_epochs') 174 | assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( 175 | total_epochs) 176 | return modeldir, model_artifact 177 | return None, None 178 | 179 | def log_model(self, path, opt, epoch, fitness_score, best_model=False): 180 | model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 181 | 'original_url': str(path), 182 | 'epochs_trained': epoch + 1, 183 | 'save period': opt.save_period, 184 | 'project': opt.project, 185 | 'total_epochs': opt.epochs, 186 | 'fitness_score': fitness_score 187 | }) 188 | model_artifact.add_file(str(path / 'last.pt'), name='last.pt') 189 | wandb.log_artifact(model_artifact, 190 | aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) 191 | print("Saving model artifact on epoch ", epoch + 1) 192 | 193 | def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): 194 | with open(data_file) as f: 195 | data = yaml.load(f, Loader=yaml.SafeLoader) # data dict 196 | nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) 197 | names = {k: v for k, v in enumerate(names)} # to index dictionary 198 | self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( 199 | data['train']), names, name='train') if data.get('train') else None 200 | self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( 201 | data['val']), names, name='val') if data.get('val') else None 202 | if data.get('train'): 203 | data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') 204 | if data.get('val'): 205 | data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') 206 | path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path 207 | data.pop('download', None) 208 | with open(path, 'w') as f: 209 | yaml.dump(data, f) 210 | 211 | if self.job_type == 'Training': # builds correct artifact pipeline graph 212 | self.wandb_run.use_artifact(self.val_artifact) 213 | self.wandb_run.use_artifact(self.train_artifact) 214 | self.val_artifact.wait() 215 | self.val_table = self.val_artifact.get('val') 216 | self.map_val_table_path() 217 | else: 218 | self.wandb_run.log_artifact(self.train_artifact) 219 | self.wandb_run.log_artifact(self.val_artifact) 220 | return path 221 | 222 | def map_val_table_path(self): 223 | self.val_table_map = {} 224 | print("Mapping dataset") 225 | for i, data in enumerate(tqdm(self.val_table.data)): 226 | self.val_table_map[data[3]] = data[0] 227 | 228 | def create_dataset_table(self, dataset, class_to_id, name='dataset'): 229 | # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging 230 | artifact = wandb.Artifact(name=name, type="dataset") 231 | img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None 232 | img_files = tqdm(dataset.img_files) if not img_files else img_files 233 | for img_file in img_files: 234 | if Path(img_file).is_dir(): 235 | artifact.add_dir(img_file, name='data/images') 236 | labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) 237 | artifact.add_dir(labels_path, name='data/labels') 238 | else: 239 | artifact.add_file(img_file, name='data/images/' + Path(img_file).name) 240 | label_file = Path(img2label_paths([img_file])[0]) 241 | artifact.add_file(str(label_file), 242 | name='data/labels/' + label_file.name) if label_file.exists() else None 243 | table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) 244 | class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) 245 | for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): 246 | height, width = shapes[0] 247 | labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) 248 | box_data, img_classes = [], {} 249 | for cls, *xyxy in labels[:, 1:].tolist(): 250 | cls = int(cls) 251 | box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, 252 | "class_id": cls, 253 | "box_caption": "%s" % (class_to_id[cls]), 254 | "scores": {"acc": 1}, 255 | "domain": "pixel"}) 256 | img_classes[cls] = class_to_id[cls] 257 | boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space 258 | table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), 259 | Path(paths).name) 260 | artifact.add(table, name) 261 | return artifact 262 | 263 | def log_training_progress(self, predn, path, names): 264 | if self.val_table and self.result_table: 265 | class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) 266 | box_data = [] 267 | total_conf = 0 268 | for *xyxy, conf, cls in predn.tolist(): 269 | if conf >= 0.25: 270 | box_data.append( 271 | {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, 272 | "class_id": int(cls), 273 | "box_caption": "%s %.3f" % (names[cls], conf), 274 | "scores": {"class_score": conf}, 275 | "domain": "pixel"}) 276 | total_conf = total_conf + conf 277 | boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space 278 | id = self.val_table_map[Path(path).name] 279 | self.result_table.add_data(self.current_epoch, 280 | id, 281 | wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), 282 | total_conf / max(1, len(box_data)) 283 | ) 284 | 285 | def log(self, log_dict): 286 | if self.wandb_run: 287 | for key, value in log_dict.items(): 288 | self.log_dict[key] = value 289 | 290 | def end_epoch(self, best_result=False): 291 | if self.wandb_run: 292 | wandb.log(self.log_dict) 293 | self.log_dict = {} 294 | if self.result_artifact: 295 | train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") 296 | self.result_artifact.add(train_results, 'result') 297 | wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), 298 | ('best' if best_result else '')]) 299 | self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) 300 | self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") 301 | 302 | def finish_run(self): 303 | if self.wandb_run: 304 | if self.log_dict: 305 | wandb.log(self.log_dict) 306 | wandb.run.finish() 307 | -------------------------------------------------------------------------------- /detect_speed.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | from pathlib import Path 4 | import cv2 5 | import torch 6 | import torch.backends.cudnn as cudnn 7 | from numpy import random 8 | 9 | from models.experimental import attempt_load 10 | from utils.datasets import LoadStreams, LoadImages 11 | from utils.general import check_img_size, check_requirements, \ 12 | check_imshow, non_max_suppression, apply_classifier, \ 13 | scale_coords, xyxy2xywh, strip_optimizer, set_logging, \ 14 | increment_path 15 | from utils.plots import plot_one_box 16 | from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel 17 | 18 | #For SORT tracking 19 | import skimage 20 | from sort import * 21 | 22 | import time 23 | 24 | #............................... Tracker Functions ............................ 25 | """ Random created palette""" 26 | palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1) 27 | 28 | area1_pointA = (560,350) 29 | area1_pointB = (865,350) 30 | area1_pointC = (560,400) 31 | area1_pointD = (920,400) 32 | 33 | start_area = [(560,250),(760,250),(760,300),(560,300)] 34 | stop_area = [(560,350),(920,350),(920,400),(560,400)] 35 | 36 | #vehicles total counting variables 37 | array_ids = [] 38 | counting = 0 39 | modulo_counting = 0 40 | 41 | #Tracking vehicles 42 | vehicles_entering = {} 43 | vehicles_elapsed_time = {} 44 | distance = 25 #meters (assumption) 45 | speed_array = [] 46 | 47 | """" Calculates the relative bounding box from absolute pixel values. """ 48 | def bbox_rel(*xyxy): 49 | bbox_left = min([xyxy[0].item(), xyxy[2].item()]) 50 | bbox_top = min([xyxy[1].item(), xyxy[3].item()]) 51 | bbox_w = abs(xyxy[0].item() - xyxy[2].item()) 52 | bbox_h = abs(xyxy[1].item() - xyxy[3].item()) 53 | x_c = (bbox_left + bbox_w / 2) 54 | y_c = (bbox_top + bbox_h / 2) 55 | w = bbox_w 56 | h = bbox_h 57 | return x_c, y_c, w, h 58 | 59 | """Simple function that adds fixed color depending on the class""" 60 | def compute_color_for_labels(label): 61 | color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette] 62 | return tuple(color) 63 | 64 | """Function to Draw Bounding boxes""" 65 | def draw_boxes(img, bbox, identities=None, categories=None, names=None, offset=(0, 0)): 66 | for i, box in enumerate(bbox): 67 | x1, y1, x2, y2 = [int(i) for i in box] 68 | x1 += offset[0] 69 | x2 += offset[0] 70 | y1 += offset[1] 71 | y2 += offset[1] 72 | cat = int(categories[i]) if categories is not None else 0 73 | id = int(identities[i]) if identities is not None else 0 74 | color = compute_color_for_labels(id) 75 | data = (int((box[0]+box[2])/2),(int((box[1]+box[3])/2))) 76 | label = str(id) + ":"+ names[cat] 77 | (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1) 78 | #cv2.rectangle(img, (x1, y1), (x2, y2), (255,144,30), 1) 79 | #cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1) 80 | #cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX, 0.6, [255, 255, 255], 1) 81 | # cv2.circle(img, data, 6, color,-1) 82 | 83 | #c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])) 84 | midpoint_x = x1+((x2-x1)/2) 85 | midpoint_y = y1+((y2-y1)/2) 86 | center_point = (int(midpoint_x),int(midpoint_y)) 87 | midpoint_color = (0,255,0) 88 | 89 | #check if object on in the start_area box 90 | in_on_start_area = cv2.pointPolygonTest(np.array(start_area,np.int32),(int(midpoint_x),int(midpoint_y)),False) 91 | 92 | if in_on_start_area >= 0: 93 | vehicles_entering[id] = time.time() 94 | 95 | if id in vehicles_entering: 96 | in_on_stop_area = cv2.pointPolygonTest(np.array(stop_area,np.int32),(int(midpoint_x),int(midpoint_y)),False) 97 | 98 | if in_on_stop_area >= 0: 99 | elapsed_time = time.time() - vehicles_entering[id] 100 | 101 | if id not in vehicles_elapsed_time: 102 | vehicles_elapsed_time[id] = elapsed_time 103 | 104 | if id in vehicles_elapsed_time: 105 | elapsed_time = vehicles_elapsed_time[id] 106 | 107 | a_speed_ms = distance / elapsed_time 108 | a_speed_kh = a_speed_ms * 3.6 109 | 110 | cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 1) 111 | cv2.rectangle(img, (x1, y1 - 20), (x1 + w + 100, y1), (255,144,30), -1) 112 | cv2.putText(img, "Speed "+str(int(a_speed_kh))+" km/h", (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX, 0.6, [255, 255, 255], 1) 113 | 114 | if len(speed_array) == 100: 115 | speed_array.clear() 116 | 117 | speed_array.append(int(a_speed_kh)) 118 | # cv2.circle(img, data, 6, color,-1) 119 | ''' 120 | else: 121 | cv2.rectangle(img, (x1, y1), (x2, y2), (255,144,30), 1) 122 | #cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1) 123 | cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX, 0.6, [255, 255, 255], 1) 124 | # cv2.circle(img, data, 6, color,-1) 125 | ''' 126 | 127 | if (midpoint_x > area1_pointA[0] and midpoint_x < area1_pointD[0]) and (midpoint_y > area1_pointA[1] and midpoint_y < area1_pointD[1]): 128 | 129 | midpoint_color = (0,0,255) 130 | print('Kategori : '+str(cat)) 131 | 132 | #add vehicles counting 133 | if len(array_ids) > 0: 134 | if id not in array_ids: 135 | array_ids.append(id) 136 | else: 137 | array_ids.append(id) 138 | 139 | 140 | cv2.circle(img,center_point,radius=1,color=midpoint_color,thickness=2) 141 | 142 | 143 | return img 144 | #.............................................................................. 145 | 146 | 147 | def detect(save_img=False): 148 | source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace 149 | save_img = not opt.nosave and not source.endswith('.txt') # save inference images 150 | webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( 151 | ('rtsp://', 'rtmp://', 'http://', 'https://')) 152 | 153 | 154 | #.... Initialize SORT .... 155 | #......................... 156 | sort_max_age = 5 157 | sort_min_hits = 2 158 | sort_iou_thresh = 0.2 159 | sort_tracker = Sort(max_age=sort_max_age, 160 | min_hits=sort_min_hits, 161 | iou_threshold=sort_iou_thresh) 162 | #......................... 163 | # Directories 164 | save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run 165 | (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir 166 | 167 | # Initialize 168 | set_logging() 169 | device = select_device(opt.device) 170 | half = device.type != 'cpu' # half precision only supported on CUDA 171 | half = False 172 | 173 | # Load model 174 | model = attempt_load(weights, map_location=device) # load FP32 model 175 | stride = int(model.stride.max()) # model stride 176 | imgsz = check_img_size(imgsz, s=stride) # check img_size 177 | 178 | if trace: 179 | model = TracedModel(model, device, opt.img_size) 180 | 181 | if half: 182 | model.half() # to FP16 183 | 184 | # Second-stage classifier 185 | classify = False 186 | if classify: 187 | modelc = load_classifier(name='resnet101', n=2) # initialize 188 | modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() 189 | 190 | # Set Dataloader 191 | vid_path, vid_writer = None, None 192 | if webcam: 193 | view_img = check_imshow() 194 | cudnn.benchmark = True # set True to speed up constant image size inference 195 | dataset = LoadStreams(source, img_size=imgsz, stride=stride) 196 | else: 197 | dataset = LoadImages(source, img_size=imgsz, stride=stride) 198 | 199 | # Get names and colors 200 | names = model.module.names if hasattr(model, 'module') else model.names 201 | colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] 202 | 203 | # Run inference 204 | if device.type != 'cpu': 205 | model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once 206 | old_img_w = old_img_h = imgsz 207 | old_img_b = 1 208 | 209 | count_vehicle = 0 210 | 211 | t0 = time.time() 212 | for path, img, im0s, vid_cap in dataset: 213 | img = torch.from_numpy(img).to(device) 214 | img = img.half() if half else img.float() # uint8 to fp16/32 215 | img /= 255.0 # 0 - 255 to 0.0 - 1.0 216 | if img.ndimension() == 3: 217 | img = img.unsqueeze(0) 218 | 219 | # Warmup 220 | if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): 221 | old_img_b = img.shape[0] 222 | old_img_h = img.shape[2] 223 | old_img_w = img.shape[3] 224 | for i in range(3): 225 | model(img, augment=opt.augment)[0] 226 | 227 | # Inference 228 | t1 = time_synchronized() 229 | pred = model(img, augment=opt.augment)[0] 230 | t2 = time_synchronized() 231 | 232 | # Apply NMS 233 | pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) 234 | t3 = time_synchronized() 235 | 236 | # Apply Classifier 237 | if classify: 238 | pred = apply_classifier(pred, modelc, img, im0s) 239 | 240 | # Process detections 241 | for i, det in enumerate(pred): # detections per image 242 | if webcam: # batch_size >= 1 243 | p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count 244 | else: 245 | p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) 246 | 247 | p = Path(p) # to Path 248 | save_path = str(save_dir / p.name) # img.jpg 249 | txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt 250 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh 251 | if len(det): 252 | # Rescale boxes from img_size to im0 size 253 | det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() 254 | 255 | # Print results 256 | for c in det[:, -1].unique(): 257 | n = (det[:, -1] == c).sum() # detections per class 258 | s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string 259 | 260 | #..................USE TRACK FUNCTION.................... 261 | #pass an empty array to sort 262 | dets_to_sort = np.empty((0,6)) 263 | 264 | # NOTE: We send in detected object class too 265 | for x1,y1,x2,y2,conf,detclass in det.cpu().detach().numpy(): 266 | dets_to_sort = np.vstack((dets_to_sort, 267 | np.array([x1, y1, x2, y2, conf, detclass]))) 268 | 269 | # Run SORT 270 | tracked_dets = sort_tracker.update(dets_to_sort) 271 | tracks =sort_tracker.getTrackers() 272 | 273 | print('Tracked Detections : '+str(len(tracked_dets))) 274 | 275 | #loop over tracks 276 | ''' 277 | for track in tracks: 278 | # color = compute_color_for_labels(id) 279 | #draw tracks 280 | 281 | [cv2.line(im0, (int(track.centroidarr[i][0]), 282 | int(track.centroidarr[i][1])), 283 | (int(track.centroidarr[i+1][0]), 284 | int(track.centroidarr[i+1][1])), 285 | (0,255,0), thickness=1) 286 | for i,_ in enumerate(track.centroidarr) 287 | if i < len(track.centroidarr)-1 ] 288 | ''' 289 | 290 | # draw boxes for visualization 291 | if len(tracked_dets)>0: 292 | bbox_xyxy = tracked_dets[:,:4] 293 | identities = tracked_dets[:, 8] 294 | categories = tracked_dets[:, 4] 295 | draw_boxes(im0, bbox_xyxy, identities, categories, names) 296 | print('Bbox xy count : '+str(len(bbox_xyxy))) 297 | #........................................................ 298 | 299 | # Print time (inference + NMS) 300 | print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS') 301 | 302 | cv2.line(im0,area1_pointA,area1_pointB,(0,255,0),2) 303 | cv2.line(im0,area1_pointC,area1_pointD,(0,255,0),2) 304 | 305 | color = (0,255,0) 306 | thickness = 2 307 | fontScale = 1 308 | font = cv2.FONT_HERSHEY_SIMPLEX 309 | org = (160,570) 310 | 311 | 312 | if (count_vehicle == 0): 313 | counting = len(array_ids) 314 | else: 315 | if (counting < 100): 316 | counting = len(array_ids) 317 | else: 318 | counting = modulo_counting + len(array_ids) 319 | if(len(array_ids)%100 == 0): 320 | modulo_counting = modulo_counting + 100 321 | array_ids.clear() 322 | 323 | cv2.putText(im0, 'Vehicle Counting = '+str(counting), org, font, fontScale, color, thickness, cv2.LINE_AA) 324 | 325 | average_speed = 0 326 | if len(speed_array) != 0: 327 | total_speed = 0 328 | for speed in speed_array: 329 | total_speed = total_speed + speed 330 | 331 | average_speed = int(total_speed / len(speed_array)) 332 | 333 | cv2.putText(im0, 'Average Speed = '+str(int(average_speed))+' km/h', (160,620), font, fontScale, color, thickness, cv2.LINE_AA) 334 | 335 | #draw start_area 336 | cv2.polylines(im0,[np.array(start_area,np.int32)],True,color,2) 337 | 338 | #draw stop_area 339 | cv2.polylines(im0,[np.array(stop_area,np.int32)],True,color,2) 340 | 341 | # Stream results 342 | if view_img: 343 | cv2.imshow(str(p), im0) 344 | cv2.waitKey(1) # 1 millisecond 345 | 346 | # Save results (image with detections) 347 | if save_img: 348 | if dataset.mode == 'image': 349 | cv2.imwrite(save_path, im0) 350 | print(f" The image with the result is saved in: {save_path}") 351 | else: # 'video' or 'stream' 352 | if vid_path != save_path: # new video 353 | vid_path = save_path 354 | if isinstance(vid_writer, cv2.VideoWriter): 355 | vid_writer.release() # release previous video writer 356 | if vid_cap: # video 357 | fps = vid_cap.get(cv2.CAP_PROP_FPS) 358 | w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 359 | h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 360 | else: # stream 361 | fps, w, h = 30, im0.shape[1], im0.shape[0] 362 | save_path += '.mp4' 363 | vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) 364 | vid_writer.write(im0) 365 | 366 | if save_txt or save_img: 367 | s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' 368 | #print(f"Results saved to {save_dir}{s}") 369 | 370 | print(f'Done. ({time.time() - t0:.3f}s)') 371 | 372 | 373 | if __name__ == '__main__': 374 | parser = argparse.ArgumentParser() 375 | parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') 376 | parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam 377 | parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') 378 | parser.add_argument('--conf-thres', type=float, default=0.65, help='object confidence threshold') 379 | parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') 380 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 381 | parser.add_argument('--view-img', action='store_true', help='display results') 382 | parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') 383 | parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') 384 | parser.add_argument('--nosave', action='store_true', help='do not save images/videos') 385 | parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') 386 | parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') 387 | parser.add_argument('--augment', action='store_true', help='augmented inference') 388 | parser.add_argument('--update', action='store_true', help='update all models') 389 | parser.add_argument('--project', default='runs/detect', help='save results to project/name') 390 | parser.add_argument('--name', default='object_tracking', help='save results to project/name') 391 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') 392 | parser.add_argument('--no-trace', action='store_true', help='don`t trace model') 393 | opt = parser.parse_args() 394 | print(opt) 395 | #check_requirements(exclude=('pycocotools', 'thop')) 396 | 397 | with torch.no_grad(): 398 | if opt.update: # update all models (to fix SourceChangeWarning) 399 | for opt.weights in ['yolov7.pt']: 400 | detect() 401 | strip_optimizer(opt.weights) 402 | else: 403 | detect() -------------------------------------------------------------------------------- /utils/plots.py: -------------------------------------------------------------------------------- 1 | # Plotting utils 2 | 3 | import glob 4 | import math 5 | import os 6 | import random 7 | from copy import copy 8 | from pathlib import Path 9 | 10 | import cv2 11 | import matplotlib 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import pandas as pd 15 | import seaborn as sns 16 | import torch 17 | import yaml 18 | from PIL import Image, ImageDraw, ImageFont 19 | from scipy.signal import butter, filtfilt 20 | 21 | from utils.general import xywh2xyxy, xyxy2xywh 22 | from utils.metrics import fitness 23 | 24 | # Settings 25 | matplotlib.rc('font', **{'size': 11}) 26 | matplotlib.use('Agg') # for writing to files only 27 | 28 | 29 | def color_list(): 30 | # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb 31 | def hex2rgb(h): 32 | return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) 33 | 34 | return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) 35 | 36 | 37 | def hist2d(x, y, n=100): 38 | # 2d histogram used in labels.png and evolve.png 39 | xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) 40 | hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) 41 | xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) 42 | yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) 43 | return np.log(hist[xidx, yidx]) 44 | 45 | 46 | def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): 47 | # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy 48 | def butter_lowpass(cutoff, fs, order): 49 | nyq = 0.5 * fs 50 | normal_cutoff = cutoff / nyq 51 | return butter(order, normal_cutoff, btype='low', analog=False) 52 | 53 | b, a = butter_lowpass(cutoff, fs, order=order) 54 | return filtfilt(b, a, data) # forward-backward filter 55 | 56 | 57 | def plot_one_box(x, img, color=None, label=None, line_thickness=3): 58 | # Plots one bounding box on image img 59 | tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness 60 | color = color or [random.randint(0, 255) for _ in range(3)] 61 | c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) 62 | cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) 63 | if label: 64 | tf = max(tl - 1, 1) # font thickness 65 | t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] 66 | c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 67 | cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled 68 | cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) 69 | 70 | 71 | def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): 72 | img = Image.fromarray(img) 73 | draw = ImageDraw.Draw(img) 74 | line_thickness = line_thickness or max(int(min(img.size) / 200), 2) 75 | draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot 76 | if label: 77 | fontsize = max(round(max(img.size) / 40), 12) 78 | font = ImageFont.truetype("Arial.ttf", fontsize) 79 | txt_width, txt_height = font.getsize(label) 80 | draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) 81 | draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) 82 | return np.asarray(img) 83 | 84 | 85 | def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() 86 | # Compares the two methods for width-height anchor multiplication 87 | # https://github.com/ultralytics/yolov3/issues/168 88 | x = np.arange(-4.0, 4.0, .1) 89 | ya = np.exp(x) 90 | yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 91 | 92 | fig = plt.figure(figsize=(6, 3), tight_layout=True) 93 | plt.plot(x, ya, '.-', label='YOLOv3') 94 | plt.plot(x, yb ** 2, '.-', label='YOLOR ^2') 95 | plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6') 96 | plt.xlim(left=-4, right=4) 97 | plt.ylim(bottom=0, top=6) 98 | plt.xlabel('input') 99 | plt.ylabel('output') 100 | plt.grid() 101 | plt.legend() 102 | fig.savefig('comparison.png', dpi=200) 103 | 104 | 105 | def output_to_target(output): 106 | # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] 107 | targets = [] 108 | for i, o in enumerate(output): 109 | for *box, conf, cls in o.cpu().numpy(): 110 | targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) 111 | return np.array(targets) 112 | 113 | 114 | def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): 115 | # Plot image grid with labels 116 | 117 | if isinstance(images, torch.Tensor): 118 | images = images.cpu().float().numpy() 119 | if isinstance(targets, torch.Tensor): 120 | targets = targets.cpu().numpy() 121 | 122 | # un-normalise 123 | if np.max(images[0]) <= 1: 124 | images *= 255 125 | 126 | tl = 3 # line thickness 127 | tf = max(tl - 1, 1) # font thickness 128 | bs, _, h, w = images.shape # batch size, _, height, width 129 | bs = min(bs, max_subplots) # limit plot images 130 | ns = np.ceil(bs ** 0.5) # number of subplots (square) 131 | 132 | # Check if we should resize 133 | scale_factor = max_size / max(h, w) 134 | if scale_factor < 1: 135 | h = math.ceil(scale_factor * h) 136 | w = math.ceil(scale_factor * w) 137 | 138 | colors = color_list() # list of colors 139 | mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init 140 | for i, img in enumerate(images): 141 | if i == max_subplots: # if last batch has fewer images than we expect 142 | break 143 | 144 | block_x = int(w * (i // ns)) 145 | block_y = int(h * (i % ns)) 146 | 147 | img = img.transpose(1, 2, 0) 148 | if scale_factor < 1: 149 | img = cv2.resize(img, (w, h)) 150 | 151 | mosaic[block_y:block_y + h, block_x:block_x + w, :] = img 152 | if len(targets) > 0: 153 | image_targets = targets[targets[:, 0] == i] 154 | boxes = xywh2xyxy(image_targets[:, 2:6]).T 155 | classes = image_targets[:, 1].astype('int') 156 | labels = image_targets.shape[1] == 6 # labels if no conf column 157 | conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) 158 | 159 | if boxes.shape[1]: 160 | if boxes.max() <= 1.01: # if normalized with tolerance 0.01 161 | boxes[[0, 2]] *= w # scale to pixels 162 | boxes[[1, 3]] *= h 163 | elif scale_factor < 1: # absolute coords need scale if image scales 164 | boxes *= scale_factor 165 | boxes[[0, 2]] += block_x 166 | boxes[[1, 3]] += block_y 167 | for j, box in enumerate(boxes.T): 168 | cls = int(classes[j]) 169 | color = colors[cls % len(colors)] 170 | cls = names[cls] if names else cls 171 | if labels or conf[j] > 0.25: # 0.25 conf thresh 172 | label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) 173 | plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) 174 | 175 | # Draw image filename labels 176 | if paths: 177 | label = Path(paths[i]).name[:40] # trim to 40 char 178 | t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] 179 | cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, 180 | lineType=cv2.LINE_AA) 181 | 182 | # Image border 183 | cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) 184 | 185 | if fname: 186 | r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size 187 | mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) 188 | # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save 189 | Image.fromarray(mosaic).save(fname) # PIL save 190 | return mosaic 191 | 192 | 193 | def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): 194 | # Plot LR simulating training for full epochs 195 | optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals 196 | y = [] 197 | for _ in range(epochs): 198 | scheduler.step() 199 | y.append(optimizer.param_groups[0]['lr']) 200 | plt.plot(y, '.-', label='LR') 201 | plt.xlabel('epoch') 202 | plt.ylabel('LR') 203 | plt.grid() 204 | plt.xlim(0, epochs) 205 | plt.ylim(0) 206 | plt.savefig(Path(save_dir) / 'LR.png', dpi=200) 207 | plt.close() 208 | 209 | 210 | def plot_test_txt(): # from utils.plots import *; plot_test() 211 | # Plot test.txt histograms 212 | x = np.loadtxt('test.txt', dtype=np.float32) 213 | box = xyxy2xywh(x[:, :4]) 214 | cx, cy = box[:, 0], box[:, 1] 215 | 216 | fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) 217 | ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) 218 | ax.set_aspect('equal') 219 | plt.savefig('hist2d.png', dpi=300) 220 | 221 | fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) 222 | ax[0].hist(cx, bins=600) 223 | ax[1].hist(cy, bins=600) 224 | plt.savefig('hist1d.png', dpi=200) 225 | 226 | 227 | def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() 228 | # Plot targets.txt histograms 229 | x = np.loadtxt('targets.txt', dtype=np.float32).T 230 | s = ['x targets', 'y targets', 'width targets', 'height targets'] 231 | fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) 232 | ax = ax.ravel() 233 | for i in range(4): 234 | ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) 235 | ax[i].legend() 236 | ax[i].set_title(s[i]) 237 | plt.savefig('targets.jpg', dpi=200) 238 | 239 | 240 | def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() 241 | # Plot study.txt generated by test.py 242 | fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) 243 | # ax = ax.ravel() 244 | 245 | fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) 246 | # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]: 247 | for f in sorted(Path(path).glob('study*.txt')): 248 | y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T 249 | x = np.arange(y.shape[1]) if x is None else np.array(x) 250 | s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] 251 | # for i in range(7): 252 | # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) 253 | # ax[i].set_title(s[i]) 254 | 255 | j = y[3].argmax() + 1 256 | ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, 257 | label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) 258 | 259 | ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], 260 | 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') 261 | 262 | ax2.grid(alpha=0.2) 263 | ax2.set_yticks(np.arange(20, 60, 5)) 264 | ax2.set_xlim(0, 57) 265 | ax2.set_ylim(30, 55) 266 | ax2.set_xlabel('GPU Speed (ms/img)') 267 | ax2.set_ylabel('COCO AP val') 268 | ax2.legend(loc='lower right') 269 | plt.savefig(str(Path(path).name) + '.png', dpi=300) 270 | 271 | 272 | def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): 273 | # plot dataset labels 274 | print('Plotting labels... ') 275 | c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes 276 | nc = int(c.max() + 1) # number of classes 277 | colors = color_list() 278 | x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) 279 | 280 | # seaborn correlogram 281 | sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) 282 | plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) 283 | plt.close() 284 | 285 | # matplotlib labels 286 | matplotlib.use('svg') # faster 287 | ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() 288 | ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) 289 | ax[0].set_ylabel('instances') 290 | if 0 < len(names) < 30: 291 | ax[0].set_xticks(range(len(names))) 292 | ax[0].set_xticklabels(names, rotation=90, fontsize=10) 293 | else: 294 | ax[0].set_xlabel('classes') 295 | sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) 296 | sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) 297 | 298 | # rectangles 299 | labels[:, 1:3] = 0.5 # center 300 | labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 301 | img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) 302 | for cls, *box in labels[:1000]: 303 | ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot 304 | ax[1].imshow(img) 305 | ax[1].axis('off') 306 | 307 | for a in [0, 1, 2, 3]: 308 | for s in ['top', 'right', 'left', 'bottom']: 309 | ax[a].spines[s].set_visible(False) 310 | 311 | plt.savefig(save_dir / 'labels.jpg', dpi=200) 312 | matplotlib.use('Agg') 313 | plt.close() 314 | 315 | # loggers 316 | for k, v in loggers.items() or {}: 317 | if k == 'wandb' and v: 318 | v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) 319 | 320 | 321 | def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() 322 | # Plot hyperparameter evolution results in evolve.txt 323 | with open(yaml_file) as f: 324 | hyp = yaml.load(f, Loader=yaml.SafeLoader) 325 | x = np.loadtxt('evolve.txt', ndmin=2) 326 | f = fitness(x) 327 | # weights = (f - f.min()) ** 2 # for weighted results 328 | plt.figure(figsize=(10, 12), tight_layout=True) 329 | matplotlib.rc('font', **{'size': 8}) 330 | for i, (k, v) in enumerate(hyp.items()): 331 | y = x[:, i + 7] 332 | # mu = (y * weights).sum() / weights.sum() # best weighted result 333 | mu = y[f.argmax()] # best single result 334 | plt.subplot(6, 5, i + 1) 335 | plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') 336 | plt.plot(mu, f.max(), 'k+', markersize=15) 337 | plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters 338 | if i % 5 != 0: 339 | plt.yticks([]) 340 | print('%15s: %.3g' % (k, mu)) 341 | plt.savefig('evolve.png', dpi=200) 342 | print('\nPlot saved as evolve.png') 343 | 344 | 345 | def profile_idetection(start=0, stop=0, labels=(), save_dir=''): 346 | # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() 347 | ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() 348 | s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] 349 | files = list(Path(save_dir).glob('frames*.txt')) 350 | for fi, f in enumerate(files): 351 | try: 352 | results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows 353 | n = results.shape[1] # number of rows 354 | x = np.arange(start, min(stop, n) if stop else n) 355 | results = results[:, x] 356 | t = (results[0] - results[0].min()) # set t0=0s 357 | results[0] = x 358 | for i, a in enumerate(ax): 359 | if i < len(results): 360 | label = labels[fi] if len(labels) else f.stem.replace('frames_', '') 361 | a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) 362 | a.set_title(s[i]) 363 | a.set_xlabel('time (s)') 364 | # if fi == len(files) - 1: 365 | # a.set_ylim(bottom=0) 366 | for side in ['top', 'right']: 367 | a.spines[side].set_visible(False) 368 | else: 369 | a.remove() 370 | except Exception as e: 371 | print('Warning: Plotting error for %s; %s' % (f, e)) 372 | 373 | ax[1].legend() 374 | plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) 375 | 376 | 377 | def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() 378 | # Plot training 'results*.txt', overlaying train and val losses 379 | s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends 380 | t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles 381 | for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): 382 | results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T 383 | n = results.shape[1] # number of rows 384 | x = range(start, min(stop, n) if stop else n) 385 | fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) 386 | ax = ax.ravel() 387 | for i in range(5): 388 | for j in [i, i + 5]: 389 | y = results[j, x] 390 | ax[i].plot(x, y, marker='.', label=s[j]) 391 | # y_smooth = butter_lowpass_filtfilt(y) 392 | # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) 393 | 394 | ax[i].set_title(t[i]) 395 | ax[i].legend() 396 | ax[i].set_ylabel(f) if i == 0 else None # add filename 397 | fig.savefig(f.replace('.txt', '.png'), dpi=200) 398 | 399 | 400 | def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): 401 | # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') 402 | fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) 403 | ax = ax.ravel() 404 | s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', 405 | 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] 406 | if bucket: 407 | # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] 408 | files = ['results%g.txt' % x for x in id] 409 | c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) 410 | os.system(c) 411 | else: 412 | files = list(Path(save_dir).glob('results*.txt')) 413 | assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) 414 | for fi, f in enumerate(files): 415 | try: 416 | results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T 417 | n = results.shape[1] # number of rows 418 | x = range(start, min(stop, n) if stop else n) 419 | for i in range(10): 420 | y = results[i, x] 421 | if i in [0, 1, 2, 5, 6, 7]: 422 | y[y == 0] = np.nan # don't show zero loss values 423 | # y /= y[0] # normalize 424 | label = labels[fi] if len(labels) else f.stem 425 | ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) 426 | ax[i].set_title(s[i]) 427 | # if i in [5, 6, 7]: # share train and val loss y axes 428 | # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) 429 | except Exception as e: 430 | print('Warning: Plotting error for %s; %s' % (f, e)) 431 | 432 | ax[1].legend() 433 | fig.savefig(Path(save_dir) / 'results.png', dpi=200) 434 | 435 | 436 | def output_to_keypoint(output): 437 | # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] 438 | targets = [] 439 | for i, o in enumerate(output): 440 | kpts = o[:,6:] 441 | o = o[:,:6] 442 | for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()): 443 | targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])]) 444 | return np.array(targets) 445 | 446 | 447 | def plot_skeleton_kpts(im, kpts, steps, orig_shape=None): 448 | #Plot the skeleton and keypointsfor coco datatset 449 | palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], 450 | [230, 230, 0], [255, 153, 255], [153, 204, 255], 451 | [255, 102, 255], [255, 51, 255], [102, 178, 255], 452 | [51, 153, 255], [255, 153, 153], [255, 102, 102], 453 | [255, 51, 51], [153, 255, 153], [102, 255, 102], 454 | [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], 455 | [255, 255, 255]]) 456 | 457 | skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], 458 | [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], 459 | [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] 460 | 461 | pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] 462 | pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] 463 | radius = 5 464 | num_kpts = len(kpts) // steps 465 | 466 | for kid in range(num_kpts): 467 | r, g, b = pose_kpt_color[kid] 468 | x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1] 469 | if not (x_coord % 640 == 0 or y_coord % 640 == 0): 470 | if steps == 3: 471 | conf = kpts[steps * kid + 2] 472 | if conf < 0.5: 473 | continue 474 | cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1) 475 | 476 | for sk_id, sk in enumerate(skeleton): 477 | r, g, b = pose_limb_color[sk_id] 478 | pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1])) 479 | pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1])) 480 | if steps == 3: 481 | conf1 = kpts[(sk[0]-1)*steps+2] 482 | conf2 = kpts[(sk[1]-1)*steps+2] 483 | if conf1<0.5 or conf2<0.5: 484 | continue 485 | if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0: 486 | continue 487 | if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0: 488 | continue 489 | cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2) 490 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /utils/general.py: -------------------------------------------------------------------------------- 1 | # YOLOR general utils 2 | 3 | import glob 4 | import logging 5 | import math 6 | import os 7 | import platform 8 | import random 9 | import re 10 | import subprocess 11 | import time 12 | from pathlib import Path 13 | 14 | import cv2 15 | import numpy as np 16 | import pandas as pd 17 | import torch 18 | import torchvision 19 | import yaml 20 | 21 | from utils.google_utils import gsutil_getsize 22 | from utils.metrics import fitness 23 | from utils.torch_utils import init_torch_seeds 24 | 25 | # Settings 26 | torch.set_printoptions(linewidth=320, precision=5, profile='long') 27 | np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 28 | pd.options.display.max_columns = 10 29 | cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) 30 | os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads 31 | 32 | 33 | def set_logging(rank=-1): 34 | logging.basicConfig( 35 | format="%(message)s", 36 | level=logging.INFO if rank in [-1, 0] else logging.WARN) 37 | 38 | 39 | def init_seeds(seed=0): 40 | # Initialize random number generator (RNG) seeds 41 | random.seed(seed) 42 | np.random.seed(seed) 43 | init_torch_seeds(seed) 44 | 45 | 46 | def get_latest_run(search_dir='.'): 47 | # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) 48 | last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) 49 | return max(last_list, key=os.path.getctime) if last_list else '' 50 | 51 | 52 | def isdocker(): 53 | # Is environment a Docker container 54 | return Path('/workspace').exists() # or Path('/.dockerenv').exists() 55 | 56 | 57 | def emojis(str=''): 58 | # Return platform-dependent emoji-safe version of string 59 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str 60 | 61 | 62 | def check_online(): 63 | # Check internet connectivity 64 | import socket 65 | try: 66 | socket.create_connection(("1.1.1.1", 443), 5) # check host accesability 67 | return True 68 | except OSError: 69 | return False 70 | 71 | 72 | def check_git_status(): 73 | # Recommend 'git pull' if code is out of date 74 | print(colorstr('github: '), end='') 75 | try: 76 | assert Path('.git').exists(), 'skipping check (not a git repository)' 77 | assert not isdocker(), 'skipping check (Docker image)' 78 | assert check_online(), 'skipping check (offline)' 79 | 80 | cmd = 'git fetch && git config --get remote.origin.url' 81 | url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url 82 | branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out 83 | n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind 84 | if n > 0: 85 | s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ 86 | f"Use 'git pull' to update or 'git clone {url}' to download latest." 87 | else: 88 | s = f'up to date with {url} ✅' 89 | print(emojis(s)) # emoji-safe 90 | except Exception as e: 91 | print(e) 92 | 93 | 94 | def check_requirements(requirements='requirements.txt', exclude=()): 95 | # Check installed dependencies meet requirements (pass *.txt file or list of packages) 96 | import pkg_resources as pkg 97 | prefix = colorstr('red', 'bold', 'requirements:') 98 | if isinstance(requirements, (str, Path)): # requirements.txt file 99 | file = Path(requirements) 100 | if not file.exists(): 101 | print(f"{prefix} {file.resolve()} not found, check failed.") 102 | return 103 | requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] 104 | else: # list or tuple of packages 105 | requirements = [x for x in requirements if x not in exclude] 106 | 107 | n = 0 # number of packages updates 108 | for r in requirements: 109 | try: 110 | pkg.require(r) 111 | except Exception as e: # DistributionNotFound or VersionConflict if requirements not met 112 | n += 1 113 | print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...") 114 | print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) 115 | 116 | if n: # if packages updated 117 | source = file.resolve() if 'file' in locals() else requirements 118 | s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ 119 | f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" 120 | print(emojis(s)) # emoji-safe 121 | 122 | 123 | def check_img_size(img_size, s=32): 124 | # Verify img_size is a multiple of stride s 125 | new_size = make_divisible(img_size, int(s)) # ceil gs-multiple 126 | if new_size != img_size: 127 | print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) 128 | return new_size 129 | 130 | 131 | def check_imshow(): 132 | # Check if environment supports image displays 133 | try: 134 | assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' 135 | cv2.imshow('test', np.zeros((1, 1, 3))) 136 | cv2.waitKey(1) 137 | cv2.destroyAllWindows() 138 | cv2.waitKey(1) 139 | return True 140 | except Exception as e: 141 | print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') 142 | return False 143 | 144 | 145 | def check_file(file): 146 | # Search for file if not found 147 | if Path(file).is_file() or file == '': 148 | return file 149 | else: 150 | files = glob.glob('./**/' + file, recursive=True) # find file 151 | assert len(files), f'File Not Found: {file}' # assert file was found 152 | assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique 153 | return files[0] # return file 154 | 155 | 156 | def check_dataset(dict): 157 | # Download dataset if not found locally 158 | val, s = dict.get('val'), dict.get('download') 159 | if val and len(val): 160 | val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path 161 | if not all(x.exists() for x in val): 162 | print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) 163 | if s and len(s): # download script 164 | print('Downloading %s ...' % s) 165 | if s.startswith('http') and s.endswith('.zip'): # URL 166 | f = Path(s).name # filename 167 | torch.hub.download_url_to_file(s, f) 168 | r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip 169 | else: # bash script 170 | r = os.system(s) 171 | print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value 172 | else: 173 | raise Exception('Dataset not found.') 174 | 175 | 176 | def make_divisible(x, divisor): 177 | # Returns x evenly divisible by divisor 178 | return math.ceil(x / divisor) * divisor 179 | 180 | 181 | def clean_str(s): 182 | # Cleans a string by replacing special characters with underscore _ 183 | return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) 184 | 185 | 186 | def one_cycle(y1=0.0, y2=1.0, steps=100): 187 | # lambda function for sinusoidal ramp from y1 to y2 188 | return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 189 | 190 | 191 | def colorstr(*input): 192 | # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') 193 | *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string 194 | colors = {'black': '\033[30m', # basic colors 195 | 'red': '\033[31m', 196 | 'green': '\033[32m', 197 | 'yellow': '\033[33m', 198 | 'blue': '\033[34m', 199 | 'magenta': '\033[35m', 200 | 'cyan': '\033[36m', 201 | 'white': '\033[37m', 202 | 'bright_black': '\033[90m', # bright colors 203 | 'bright_red': '\033[91m', 204 | 'bright_green': '\033[92m', 205 | 'bright_yellow': '\033[93m', 206 | 'bright_blue': '\033[94m', 207 | 'bright_magenta': '\033[95m', 208 | 'bright_cyan': '\033[96m', 209 | 'bright_white': '\033[97m', 210 | 'end': '\033[0m', # misc 211 | 'bold': '\033[1m', 212 | 'underline': '\033[4m'} 213 | return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] 214 | 215 | 216 | def labels_to_class_weights(labels, nc=80): 217 | # Get class weights (inverse frequency) from training labels 218 | if labels[0] is None: # no labels loaded 219 | return torch.Tensor() 220 | 221 | labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO 222 | classes = labels[:, 0].astype(np.int) # labels = [class xywh] 223 | weights = np.bincount(classes, minlength=nc) # occurrences per class 224 | 225 | # Prepend gridpoint count (for uCE training) 226 | # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image 227 | # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start 228 | 229 | weights[weights == 0] = 1 # replace empty bins with 1 230 | weights = 1 / weights # number of targets per class 231 | weights /= weights.sum() # normalize 232 | return torch.from_numpy(weights) 233 | 234 | 235 | def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): 236 | # Produces image weights based on class_weights and image contents 237 | class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) 238 | image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) 239 | # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample 240 | return image_weights 241 | 242 | 243 | def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) 244 | # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ 245 | # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') 246 | # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') 247 | # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco 248 | # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet 249 | x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 250 | 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 251 | 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] 252 | return x 253 | 254 | 255 | def xyxy2xywh(x): 256 | # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right 257 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) 258 | y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center 259 | y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center 260 | y[:, 2] = x[:, 2] - x[:, 0] # width 261 | y[:, 3] = x[:, 3] - x[:, 1] # height 262 | return y 263 | 264 | 265 | def xywh2xyxy(x): 266 | # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right 267 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) 268 | y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x 269 | y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y 270 | y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x 271 | y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y 272 | return y 273 | 274 | 275 | def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): 276 | # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right 277 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) 278 | y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x 279 | y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y 280 | y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x 281 | y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y 282 | return y 283 | 284 | 285 | def xyn2xy(x, w=640, h=640, padw=0, padh=0): 286 | # Convert normalized segments into pixel segments, shape (n,2) 287 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) 288 | y[:, 0] = w * x[:, 0] + padw # top left x 289 | y[:, 1] = h * x[:, 1] + padh # top left y 290 | return y 291 | 292 | 293 | def segment2box(segment, width=640, height=640): 294 | # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) 295 | x, y = segment.T # segment xy 296 | inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) 297 | x, y, = x[inside], y[inside] 298 | return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy 299 | 300 | 301 | def segments2boxes(segments): 302 | # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) 303 | boxes = [] 304 | for s in segments: 305 | x, y = s.T # segment xy 306 | boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy 307 | return xyxy2xywh(np.array(boxes)) # cls, xywh 308 | 309 | 310 | def resample_segments(segments, n=1000): 311 | # Up-sample an (n,2) segment 312 | for i, s in enumerate(segments): 313 | x = np.linspace(0, len(s) - 1, n) 314 | xp = np.arange(len(s)) 315 | segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy 316 | return segments 317 | 318 | 319 | def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): 320 | # Rescale coords (xyxy) from img1_shape to img0_shape 321 | if ratio_pad is None: # calculate from img0_shape 322 | gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new 323 | pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding 324 | else: 325 | gain = ratio_pad[0][0] 326 | pad = ratio_pad[1] 327 | 328 | coords[:, [0, 2]] -= pad[0] # x padding 329 | coords[:, [1, 3]] -= pad[1] # y padding 330 | coords[:, :4] /= gain 331 | clip_coords(coords, img0_shape) 332 | return coords 333 | 334 | 335 | def clip_coords(boxes, img_shape): 336 | # Clip bounding xyxy bounding boxes to image shape (height, width) 337 | boxes[:, 0].clamp_(0, img_shape[1]) # x1 338 | boxes[:, 1].clamp_(0, img_shape[0]) # y1 339 | boxes[:, 2].clamp_(0, img_shape[1]) # x2 340 | boxes[:, 3].clamp_(0, img_shape[0]) # y2 341 | 342 | 343 | def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): 344 | # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 345 | box2 = box2.T 346 | 347 | # Get the coordinates of bounding boxes 348 | if x1y1x2y2: # x1, y1, x2, y2 = box1 349 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] 350 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] 351 | else: # transform from xywh to xyxy 352 | b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 353 | b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 354 | b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 355 | b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 356 | 357 | # Intersection area 358 | inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ 359 | (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) 360 | 361 | # Union Area 362 | w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps 363 | w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps 364 | union = w1 * h1 + w2 * h2 - inter + eps 365 | 366 | iou = inter / union 367 | 368 | if GIoU or DIoU or CIoU: 369 | cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width 370 | ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height 371 | if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 372 | c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared 373 | rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + 374 | (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared 375 | if DIoU: 376 | return iou - rho2 / c2 # DIoU 377 | elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 378 | v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) 379 | with torch.no_grad(): 380 | alpha = v / (v - iou + (1 + eps)) 381 | return iou - (rho2 / c2 + v * alpha) # CIoU 382 | else: # GIoU https://arxiv.org/pdf/1902.09630.pdf 383 | c_area = cw * ch + eps # convex area 384 | return iou - (c_area - union) / c_area # GIoU 385 | else: 386 | return iou # IoU 387 | 388 | 389 | 390 | 391 | def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9): 392 | # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4 393 | box2 = box2.T 394 | 395 | # Get the coordinates of bounding boxes 396 | if x1y1x2y2: # x1, y1, x2, y2 = box1 397 | b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] 398 | b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] 399 | else: # transform from xywh to xyxy 400 | b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 401 | b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 402 | b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 403 | b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 404 | 405 | # Intersection area 406 | inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ 407 | (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) 408 | 409 | # Union Area 410 | w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps 411 | w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps 412 | union = w1 * h1 + w2 * h2 - inter + eps 413 | 414 | # change iou into pow(iou+eps) 415 | # iou = inter / union 416 | iou = torch.pow(inter/union + eps, alpha) 417 | # beta = 2 * alpha 418 | if GIoU or DIoU or CIoU: 419 | cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width 420 | ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height 421 | if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 422 | c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal 423 | rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2) 424 | rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2) 425 | rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance 426 | if DIoU: 427 | return iou - rho2 / c2 # DIoU 428 | elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 429 | v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) 430 | with torch.no_grad(): 431 | alpha_ciou = v / ((1 + eps) - inter / union + v) 432 | # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU 433 | return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU 434 | else: # GIoU https://arxiv.org/pdf/1902.09630.pdf 435 | # c_area = cw * ch + eps # convex area 436 | # return iou - (c_area - union) / c_area # GIoU 437 | c_area = torch.max(cw * ch + eps, union) # convex area 438 | return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU 439 | else: 440 | return iou # torch.log(iou+eps) or iou 441 | 442 | 443 | def box_iou(box1, box2): 444 | # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py 445 | """ 446 | Return intersection-over-union (Jaccard index) of boxes. 447 | Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 448 | Arguments: 449 | box1 (Tensor[N, 4]) 450 | box2 (Tensor[M, 4]) 451 | Returns: 452 | iou (Tensor[N, M]): the NxM matrix containing the pairwise 453 | IoU values for every element in boxes1 and boxes2 454 | """ 455 | 456 | def box_area(box): 457 | # box = 4xn 458 | return (box[2] - box[0]) * (box[3] - box[1]) 459 | 460 | area1 = box_area(box1.T) 461 | area2 = box_area(box2.T) 462 | 463 | # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) 464 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) 465 | return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) 466 | 467 | 468 | def wh_iou(wh1, wh2): 469 | # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 470 | wh1 = wh1[:, None] # [N,1,2] 471 | wh2 = wh2[None] # [1,M,2] 472 | inter = torch.min(wh1, wh2).prod(2) # [N,M] 473 | return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) 474 | 475 | 476 | def box_giou(box1, box2): 477 | """ 478 | Return generalized intersection-over-union (Jaccard index) between two sets of boxes. 479 | Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with 480 | ``0 <= x1 < x2`` and ``0 <= y1 < y2``. 481 | Args: 482 | boxes1 (Tensor[N, 4]): first set of boxes 483 | boxes2 (Tensor[M, 4]): second set of boxes 484 | Returns: 485 | Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values 486 | for every element in boxes1 and boxes2 487 | """ 488 | 489 | def box_area(box): 490 | # box = 4xn 491 | return (box[2] - box[0]) * (box[3] - box[1]) 492 | 493 | area1 = box_area(box1.T) 494 | area2 = box_area(box2.T) 495 | 496 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) 497 | union = (area1[:, None] + area2 - inter) 498 | 499 | iou = inter / union 500 | 501 | lti = torch.min(box1[:, None, :2], box2[:, :2]) 502 | rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) 503 | 504 | whi = (rbi - lti).clamp(min=0) # [N,M,2] 505 | areai = whi[:, :, 0] * whi[:, :, 1] 506 | 507 | return iou - (areai - union) / areai 508 | 509 | 510 | def box_ciou(box1, box2, eps: float = 1e-7): 511 | """ 512 | Return complete intersection-over-union (Jaccard index) between two sets of boxes. 513 | Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with 514 | ``0 <= x1 < x2`` and ``0 <= y1 < y2``. 515 | Args: 516 | boxes1 (Tensor[N, 4]): first set of boxes 517 | boxes2 (Tensor[M, 4]): second set of boxes 518 | eps (float, optional): small number to prevent division by zero. Default: 1e-7 519 | Returns: 520 | Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values 521 | for every element in boxes1 and boxes2 522 | """ 523 | 524 | def box_area(box): 525 | # box = 4xn 526 | return (box[2] - box[0]) * (box[3] - box[1]) 527 | 528 | area1 = box_area(box1.T) 529 | area2 = box_area(box2.T) 530 | 531 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) 532 | union = (area1[:, None] + area2 - inter) 533 | 534 | iou = inter / union 535 | 536 | lti = torch.min(box1[:, None, :2], box2[:, :2]) 537 | rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) 538 | 539 | whi = (rbi - lti).clamp(min=0) # [N,M,2] 540 | diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps 541 | 542 | # centers of boxes 543 | x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 544 | y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 545 | x_g = (box2[:, 0] + box2[:, 2]) / 2 546 | y_g = (box2[:, 1] + box2[:, 3]) / 2 547 | # The distance between boxes' centers squared. 548 | centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 549 | 550 | w_pred = box1[:, None, 2] - box1[:, None, 0] 551 | h_pred = box1[:, None, 3] - box1[:, None, 1] 552 | 553 | w_gt = box2[:, 2] - box2[:, 0] 554 | h_gt = box2[:, 3] - box2[:, 1] 555 | 556 | v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) 557 | with torch.no_grad(): 558 | alpha = v / (1 - iou + v + eps) 559 | return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v 560 | 561 | 562 | def box_diou(box1, box2, eps: float = 1e-7): 563 | """ 564 | Return distance intersection-over-union (Jaccard index) between two sets of boxes. 565 | Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with 566 | ``0 <= x1 < x2`` and ``0 <= y1 < y2``. 567 | Args: 568 | boxes1 (Tensor[N, 4]): first set of boxes 569 | boxes2 (Tensor[M, 4]): second set of boxes 570 | eps (float, optional): small number to prevent division by zero. Default: 1e-7 571 | Returns: 572 | Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values 573 | for every element in boxes1 and boxes2 574 | """ 575 | 576 | def box_area(box): 577 | # box = 4xn 578 | return (box[2] - box[0]) * (box[3] - box[1]) 579 | 580 | area1 = box_area(box1.T) 581 | area2 = box_area(box2.T) 582 | 583 | inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) 584 | union = (area1[:, None] + area2 - inter) 585 | 586 | iou = inter / union 587 | 588 | lti = torch.min(box1[:, None, :2], box2[:, :2]) 589 | rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) 590 | 591 | whi = (rbi - lti).clamp(min=0) # [N,M,2] 592 | diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps 593 | 594 | # centers of boxes 595 | x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 596 | y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 597 | x_g = (box2[:, 0] + box2[:, 2]) / 2 598 | y_g = (box2[:, 1] + box2[:, 3]) / 2 599 | # The distance between boxes' centers squared. 600 | centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 601 | 602 | # The distance IoU is the IoU penalized by a normalized 603 | # distance between boxes' centers squared. 604 | return iou - (centers_distance_squared / diagonal_distance_squared) 605 | 606 | 607 | def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, 608 | labels=()): 609 | """Runs Non-Maximum Suppression (NMS) on inference results 610 | 611 | Returns: 612 | list of detections, on (n,6) tensor per image [xyxy, conf, cls] 613 | """ 614 | 615 | nc = prediction.shape[2] - 5 # number of classes 616 | xc = prediction[..., 4] > conf_thres # candidates 617 | 618 | # Settings 619 | min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height 620 | max_det = 300 # maximum number of detections per image 621 | max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() 622 | time_limit = 10.0 # seconds to quit after 623 | redundant = True # require redundant detections 624 | multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) 625 | merge = False # use merge-NMS 626 | 627 | t = time.time() 628 | output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] 629 | for xi, x in enumerate(prediction): # image index, image inference 630 | # Apply constraints 631 | # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height 632 | x = x[xc[xi]] # confidence 633 | 634 | # Cat apriori labels if autolabelling 635 | if labels and len(labels[xi]): 636 | l = labels[xi] 637 | v = torch.zeros((len(l), nc + 5), device=x.device) 638 | v[:, :4] = l[:, 1:5] # box 639 | v[:, 4] = 1.0 # conf 640 | v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls 641 | x = torch.cat((x, v), 0) 642 | 643 | # If none remain process next image 644 | if not x.shape[0]: 645 | continue 646 | 647 | # Compute conf 648 | if nc == 1: 649 | x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5, 650 | # so there is no need to multiplicate. 651 | else: 652 | x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf 653 | 654 | # Box (center x, center y, width, height) to (x1, y1, x2, y2) 655 | box = xywh2xyxy(x[:, :4]) 656 | 657 | # Detections matrix nx6 (xyxy, conf, cls) 658 | if multi_label: 659 | i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T 660 | x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) 661 | else: # best class only 662 | conf, j = x[:, 5:].max(1, keepdim=True) 663 | x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] 664 | 665 | # Filter by class 666 | if classes is not None: 667 | x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] 668 | 669 | # Apply finite constraint 670 | # if not torch.isfinite(x).all(): 671 | # x = x[torch.isfinite(x).all(1)] 672 | 673 | # Check shape 674 | n = x.shape[0] # number of boxes 675 | if not n: # no boxes 676 | continue 677 | elif n > max_nms: # excess boxes 678 | x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence 679 | 680 | # Batched NMS 681 | c = x[:, 5:6] * (0 if agnostic else max_wh) # classes 682 | boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores 683 | i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS 684 | if i.shape[0] > max_det: # limit detections 685 | i = i[:max_det] 686 | if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) 687 | # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) 688 | iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix 689 | weights = iou * scores[None] # box weights 690 | x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes 691 | if redundant: 692 | i = i[iou.sum(1) > 1] # require redundancy 693 | 694 | output[xi] = x[i] 695 | if (time.time() - t) > time_limit: 696 | print(f'WARNING: NMS time limit {time_limit}s exceeded') 697 | break # time limit exceeded 698 | 699 | return output 700 | 701 | 702 | def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, 703 | labels=(), kpt_label=False, nc=None, nkpt=None): 704 | """Runs Non-Maximum Suppression (NMS) on inference results 705 | 706 | Returns: 707 | list of detections, on (n,6) tensor per image [xyxy, conf, cls] 708 | """ 709 | if nc is None: 710 | nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes 711 | xc = prediction[..., 4] > conf_thres # candidates 712 | 713 | # Settings 714 | min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height 715 | max_det = 300 # maximum number of detections per image 716 | max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() 717 | time_limit = 10.0 # seconds to quit after 718 | redundant = True # require redundant detections 719 | multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) 720 | merge = False # use merge-NMS 721 | 722 | t = time.time() 723 | output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0] 724 | for xi, x in enumerate(prediction): # image index, image inference 725 | # Apply constraints 726 | # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height 727 | x = x[xc[xi]] # confidence 728 | 729 | # Cat apriori labels if autolabelling 730 | if labels and len(labels[xi]): 731 | l = labels[xi] 732 | v = torch.zeros((len(l), nc + 5), device=x.device) 733 | v[:, :4] = l[:, 1:5] # box 734 | v[:, 4] = 1.0 # conf 735 | v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls 736 | x = torch.cat((x, v), 0) 737 | 738 | # If none remain process next image 739 | if not x.shape[0]: 740 | continue 741 | 742 | # Compute conf 743 | x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf 744 | 745 | # Box (center x, center y, width, height) to (x1, y1, x2, y2) 746 | box = xywh2xyxy(x[:, :4]) 747 | 748 | # Detections matrix nx6 (xyxy, conf, cls) 749 | if multi_label: 750 | i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T 751 | x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) 752 | else: # best class only 753 | if not kpt_label: 754 | conf, j = x[:, 5:].max(1, keepdim=True) 755 | x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] 756 | else: 757 | kpts = x[:, 6:] 758 | conf, j = x[:, 5:6].max(1, keepdim=True) 759 | x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres] 760 | 761 | 762 | # Filter by class 763 | if classes is not None: 764 | x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] 765 | 766 | # Apply finite constraint 767 | # if not torch.isfinite(x).all(): 768 | # x = x[torch.isfinite(x).all(1)] 769 | 770 | # Check shape 771 | n = x.shape[0] # number of boxes 772 | if not n: # no boxes 773 | continue 774 | elif n > max_nms: # excess boxes 775 | x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence 776 | 777 | # Batched NMS 778 | c = x[:, 5:6] * (0 if agnostic else max_wh) # classes 779 | boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores 780 | i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS 781 | if i.shape[0] > max_det: # limit detections 782 | i = i[:max_det] 783 | if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) 784 | # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) 785 | iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix 786 | weights = iou * scores[None] # box weights 787 | x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes 788 | if redundant: 789 | i = i[iou.sum(1) > 1] # require redundancy 790 | 791 | output[xi] = x[i] 792 | if (time.time() - t) > time_limit: 793 | print(f'WARNING: NMS time limit {time_limit}s exceeded') 794 | break # time limit exceeded 795 | 796 | return output 797 | 798 | 799 | def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() 800 | # Strip optimizer from 'f' to finalize training, optionally save as 's' 801 | x = torch.load(f, map_location=torch.device('cpu')) 802 | if x.get('ema'): 803 | x['model'] = x['ema'] # replace model with ema 804 | for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys 805 | x[k] = None 806 | x['epoch'] = -1 807 | x['model'].half() # to FP16 808 | for p in x['model'].parameters(): 809 | p.requires_grad = False 810 | torch.save(x, s or f) 811 | mb = os.path.getsize(s or f) / 1E6 # filesize 812 | print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") 813 | 814 | 815 | def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): 816 | # Print mutation results to evolve.txt (for use with train.py --evolve) 817 | a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys 818 | b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values 819 | c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) 820 | print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) 821 | 822 | if bucket: 823 | url = 'gs://%s/evolve.txt' % bucket 824 | if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): 825 | os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local 826 | 827 | with open('evolve.txt', 'a') as f: # append result 828 | f.write(c + b + '\n') 829 | x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows 830 | x = x[np.argsort(-fitness(x))] # sort 831 | np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness 832 | 833 | # Save yaml 834 | for i, k in enumerate(hyp.keys()): 835 | hyp[k] = float(x[0, i + 7]) 836 | with open(yaml_file, 'w') as f: 837 | results = tuple(x[0, :7]) 838 | c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) 839 | f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') 840 | yaml.dump(hyp, f, sort_keys=False) 841 | 842 | if bucket: 843 | os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload 844 | 845 | 846 | def apply_classifier(x, model, img, im0): 847 | # applies a second stage classifier to yolo outputs 848 | im0 = [im0] if isinstance(im0, np.ndarray) else im0 849 | for i, d in enumerate(x): # per image 850 | if d is not None and len(d): 851 | d = d.clone() 852 | 853 | # Reshape and pad cutouts 854 | b = xyxy2xywh(d[:, :4]) # boxes 855 | b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square 856 | b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad 857 | d[:, :4] = xywh2xyxy(b).long() 858 | 859 | # Rescale boxes from img_size to im0 size 860 | scale_coords(img.shape[2:], d[:, :4], im0[i].shape) 861 | 862 | # Classes 863 | pred_cls1 = d[:, 5].long() 864 | ims = [] 865 | for j, a in enumerate(d): # per item 866 | cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] 867 | im = cv2.resize(cutout, (224, 224)) # BGR 868 | # cv2.imwrite('test%i.jpg' % j, cutout) 869 | 870 | im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 871 | im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 872 | im /= 255.0 # 0 - 255 to 0.0 - 1.0 873 | ims.append(im) 874 | 875 | pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction 876 | x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections 877 | 878 | return x 879 | 880 | 881 | def increment_path(path, exist_ok=True, sep=''): 882 | # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. 883 | path = Path(path) # os-agnostic 884 | if (path.exists() and exist_ok) or (not path.exists()): 885 | return str(path) 886 | else: 887 | dirs = glob.glob(f"{path}{sep}*") # similar paths 888 | matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] 889 | i = [int(m.groups()[0]) for m in matches if m] # indices 890 | n = max(i) + 1 if i else 2 # increment number 891 | return f"{path}{sep}{n}" # update path 892 | --------------------------------------------------------------------------------