├── .dockerignore
├── .gitattributes
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── OFFICAL_README.md
├── README.md
├── data
├── Argoverse.yaml
├── Fruit.yaml
├── GlobalWheat2020.yaml
├── Objects365.yaml
├── SKU-110K.yaml
├── VOC.yaml
├── VOC2007.yaml
├── VOC_val_500.yaml
├── VisDrone.yaml
├── abnormal_driving.yaml
├── coco.yaml
├── coco128.yaml
├── hyps
│ ├── hyp.Objects365.yaml
│ ├── hyp.VOC.yaml
│ ├── hyp.scratch-high.yaml
│ ├── hyp.scratch-low.yaml
│ └── hyp.scratch-med.yaml
├── images
│ ├── bus.jpg
│ └── zidane.jpg
├── scripts
│ ├── download_weights.sh
│ ├── get_coco.sh
│ ├── get_coco128.sh
│ └── get_voc.sh
└── xView.yaml
├── demo_test.py
├── detect.py
├── export.py
├── hubconf.py
├── maketxt.py
├── models
├── __init__.py
├── abnormal_driving
│ ├── yolov5l_bifpn_dd.yaml
│ ├── yolov5l_ca_dd.yaml
│ ├── yolov5l_dd.yaml
│ ├── yolov5l_ghost_bifpn_ca_dd.yaml
│ ├── yolov5l_ghost_dd.yaml
│ ├── yolov5m_bifpn_dd.yaml
│ ├── yolov5m_ca_dd.yaml
│ ├── yolov5m_dd.yaml
│ ├── yolov5m_ghost_bifpn_ca_dd.yaml
│ ├── yolov5m_ghost_dd.yaml
│ ├── yolov5n_dd.yaml
│ ├── yolov5n_ghost_bifpn_ca_dd.yaml
│ ├── yolov5n_ghost_dd.yaml
│ ├── yolov5s_bifpn_dd.yaml
│ ├── yolov5s_ca_dd.yaml
│ ├── yolov5s_dd.yaml
│ ├── yolov5s_ghost_bifpn2_ca_dd.yaml
│ ├── yolov5s_ghost_bifpn_ca_dd.yaml
│ ├── yolov5s_ghost_ca_dd.yaml
│ ├── yolov5s_ghost_dd.yaml
│ ├── yolov5x_bifpn_dd.yaml
│ ├── yolov5x_ca_dd.yaml
│ ├── yolov5x_dd.yaml
│ ├── yolov5x_ghost_bifpn_ca_dd.yaml
│ ├── yolov5x_ghost_ca_dd.yaml
│ └── yolov5x_ghost_dd.yaml
├── bishe_voc
│ ├── yolov5l_ghost_bifpn_ca_voc.yaml
│ ├── yolov5l_voc.yaml
│ ├── yolov5m_ghost_bifpn_ca_voc.yaml
│ ├── yolov5m_voc.yaml
│ ├── yolov5n_ghost_bifpn_ca_voc.yaml
│ ├── yolov5n_voc.yaml
│ ├── yolov5s_ca_voc.yaml
│ ├── yolov5s_ghost_bifpn_ca_voc.yaml
│ ├── yolov5s_ghost_bifpn_voc.yaml
│ ├── yolov5s_ghost_voc.yaml
│ ├── yolov5s_voc.yaml
│ ├── yolov5x_ghost_bifpn_ca_voc.yaml
│ └── yolov5x_voc.yaml
├── common.py
├── experimental.py
├── hub
│ ├── anchors.yaml
│ ├── yolov3-spp.yaml
│ ├── yolov3-tiny.yaml
│ ├── yolov3.yaml
│ ├── yolov5-bifpn.yaml
│ ├── yolov5-fpn.yaml
│ ├── yolov5-p2.yaml
│ ├── yolov5-p34.yaml
│ ├── yolov5-p6.yaml
│ ├── yolov5-p7.yaml
│ ├── yolov5-panet.yaml
│ ├── yolov5l6.yaml
│ ├── yolov5m6.yaml
│ ├── yolov5n6.yaml
│ ├── yolov5s-ghost.yaml
│ └── yolov5s-transformer.yaml
├── tf.py
├── yolo.py
├── yolov5l.yaml
├── yolov5m.yaml
├── yolov5n.yaml
├── yolov5s-bifpn-new.yaml
├── yolov5s-voc
│ ├── yolov5s-bifpn-ca-voc.yaml
│ ├── yolov5s-bifpn-voc.yaml
│ ├── yolov5s-bifpn_new-voc.yaml
│ ├── yolov5s-ca-voc.yaml
│ ├── yolov5s-cbam-voc.yaml
│ ├── yolov5s-ghostbottleneck-ca-voc.yaml
│ ├── yolov5s-ghostconv-bifpn-ca-voc.yaml
│ ├── yolov5s-ghostconv-bifpn1-ca-voc.yaml
│ ├── yolov5s-ghostconv-bifpn4-ca-voc.yaml
│ ├── yolov5s-ghostconv-bifpn_new-ca-voc.yaml
│ ├── yolov5s-ghostconv-bifpn_new-voc.yaml
│ ├── yolov5s-ghostconv-ca-voc.yaml
│ ├── yolov5s-ghostconv-voc.yaml
│ ├── yolov5s-ghostnet-bifpn-ca-voc.yaml
│ ├── yolov5s-ghostnet-bifpn-voc.yaml
│ ├── yolov5s-mobilenet-voc.yaml
│ ├── yolov5s-p2-p5-voc.yaml
│ ├── yolov5s-shufflenet-voc.yaml
│ ├── yolov5s-voc.yaml
│ ├── yolov5x-ghostconv-bifpn_new-ca-voc.yaml
│ └── yolov5x-voc.yaml
├── yolov5s.yaml
├── yolov5s6.yaml
├── yolov5x.yaml
└── yolov5x6.yaml
├── readme.md
├── requirements.txt
├── setup.cfg
├── test
├── activation_test.py
├── data_augment_test.py
├── demo.py
├── focus_vs_conv.py
├── gpu_memory.py
├── images
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── 3.jpg
│ └── 4.jpg
├── labels
│ ├── 1.txt
│ ├── 2.txt
│ ├── 3.txt
│ └── 4.txt
├── model_test.py
├── outputs
│ ├── cutmix.jpg
│ ├── cutout.jpg
│ ├── fliplr.jpg
│ ├── flipud.jpg
│ ├── hsv.jpg
│ ├── mixup.jpg
│ ├── mixup_origin.jpg
│ ├── mosaic_with_label.jpg
│ ├── mosaic_without_label.jpg
│ ├── perspective.jpg
│ ├── rotation.jpg
│ ├── scale.jpg
│ ├── shear.jpg
│ └── translation.jpg
├── param_metric_test.py
├── spp_vs_sppf.py
└── train_test.py
├── train.py
├── tutorial.ipynb
├── utils
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-38.pyc
│ ├── activations.cpython-38.pyc
│ ├── augmentations.cpython-38.pyc
│ ├── autoanchor.cpython-38.pyc
│ ├── autobatch.cpython-38.pyc
│ ├── callbacks.cpython-38.pyc
│ ├── datasets.cpython-38.pyc
│ ├── downloads.cpython-38.pyc
│ ├── general.cpython-38.pyc
│ ├── loss.cpython-38.pyc
│ ├── metrics.cpython-38.pyc
│ ├── plots.cpython-38.pyc
│ └── torch_utils.cpython-38.pyc
├── activations.py
├── augmentations.py
├── autoanchor.py
├── autobatch.py
├── aws
│ ├── __init__.py
│ ├── mime.sh
│ ├── resume.py
│ └── userdata.sh
├── benchmarks.py
├── callbacks.py
├── datasets.py
├── downloads.py
├── flask_rest_api
│ ├── README.md
│ ├── example_request.py
│ └── restapi.py
├── general.py
├── google_app_engine
│ ├── Dockerfile
│ ├── additional_requirements.txt
│ └── app.yaml
├── loggers
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-38.pyc
│ └── wandb
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-38.pyc
│ │ └── wandb_utils.cpython-38.pyc
│ │ ├── log_dataset.py
│ │ ├── sweep.py
│ │ ├── sweep.yaml
│ │ └── wandb_utils.py
├── loss.py
├── metrics.py
├── plots.py
└── torch_utils.py
├── val.py
├── xml2txt.py
└── 改进算法整体框架.png
/.gitattributes:
--------------------------------------------------------------------------------
1 | # this drop notebooks from GitHub language stats
2 | *.ipynb linguist-vendored
3 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # Define hooks for code formations
2 | # Will be applied on any updated commit files if a user has installed and linked commit hook
3 |
4 | default_language_version:
5 | python: python3.8
6 |
7 | # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
8 | ci:
9 | autofix_prs: true
10 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
11 | autoupdate_schedule: quarterly
12 | # submodules: true
13 |
14 | repos:
15 | - repo: https://github.com/pre-commit/pre-commit-hooks
16 | rev: v4.1.0
17 | hooks:
18 | - id: end-of-file-fixer
19 | - id: trailing-whitespace
20 | - id: check-case-conflict
21 | - id: check-yaml
22 | - id: check-toml
23 | - id: pretty-format-json
24 | - id: check-docstring-first
25 |
26 | - repo: https://github.com/asottile/pyupgrade
27 | rev: v2.31.0
28 | hooks:
29 | - id: pyupgrade
30 | args: [--py36-plus]
31 | name: Upgrade code
32 |
33 | - repo: https://github.com/PyCQA/isort
34 | rev: 5.10.1
35 | hooks:
36 | - id: isort
37 | name: Sort imports
38 |
39 | # TODO
40 | #- repo: https://github.com/pre-commit/mirrors-yapf
41 | # rev: v0.31.0
42 | # hooks:
43 | # - id: yapf
44 | # name: formatting
45 |
46 | # TODO
47 | #- repo: https://github.com/executablebooks/mdformat
48 | # rev: 0.7.7
49 | # hooks:
50 | # - id: mdformat
51 | # additional_dependencies:
52 | # - mdformat-gfm
53 | # - mdformat-black
54 | # - mdformat_frontmatter
55 |
56 | # TODO
57 | #- repo: https://github.com/asottile/yesqa
58 | # rev: v1.2.3
59 | # hooks:
60 | # - id: yesqa
61 |
62 | - repo: https://github.com/PyCQA/flake8
63 | rev: 4.0.1
64 | hooks:
65 | - id: flake8
66 | name: PEP8
67 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 |
3 | # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4 | FROM nvcr.io/nvidia/pytorch:21.10-py3
5 |
6 | # Install linux packages
7 | RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
8 |
9 | # Install python dependencies
10 | COPY requirements.txt .
11 | RUN python -m pip install --upgrade pip
12 | RUN pip uninstall -y torch torchvision torchtext
13 | RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \
14 | torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
15 | # RUN pip install --no-cache -U torch torchvision
16 |
17 | # Create working directory
18 | RUN mkdir -p /usr/src/app
19 | WORKDIR /usr/src/app
20 |
21 | # Copy contents
22 | COPY . /usr/src/app
23 |
24 | # Downloads to user config dir
25 | ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/
26 |
27 | # Set environment variables
28 | # ENV HOME=/usr/src/app
29 |
30 |
31 | # Usage Examples -------------------------------------------------------------------------------------------------------
32 |
33 | # Build and Push
34 | # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
35 |
36 | # Pull and Run
37 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
38 |
39 | # Pull and Run with local directory access
40 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
41 |
42 | # Kill all
43 | # sudo docker kill $(sudo docker ps -q)
44 |
45 | # Kill all image-based
46 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
47 |
48 | # Bash into running container
49 | # sudo docker exec -it 5a9b5863d93d bash
50 |
51 | # Bash into stopped container
52 | # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
53 |
54 | # Clean up
55 | # docker system prune -a --volumes
56 |
57 | # Update Ubuntu drivers
58 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
59 |
60 | # DDP test
61 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
62 |
63 | # GCP VM from Image
64 | # docker.io/ultralytics/yolov5:latest
65 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # YOLOv5-Abnormal-Driving
2 |
3 | > 山东大学-本科毕设-基于深度学习的异常行为检测算法研究
4 | >
5 | > Author: 嗜睡的篠龙
6 | >
7 | > Email: 442082944@qq.com
8 |
9 | 对YOLOv5算法的4种改进方案:
10 |
11 |
12 |
13 |
14 | - 引入轻量化卷积Ghostconv,大大降低了模型参数量;
15 | - 引入BiFPN,并充分利用中小型目标检测层;
16 | - 引入CA注意力机制,使得模型对目标框的定位更精确;
17 | - 将CIoU替换为Alpha-EIoU,使得模型在不引入额外参数的情况下,检测精度得到大幅提升。
18 |
19 | 更多细节见:https://blog.csdn.net/weixin_43799388?spm=1000.2115.3001.5343
20 |
--------------------------------------------------------------------------------
/data/Fruit.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | # path: ../datasets/coco # dataset root dir
12 | train: ../datasets/Fruit/train.txt
13 | val: ../datasets/Fruit/val.txt
14 | test: ../datasets/Fruit/test.txt
15 |
16 | # Classes
17 | nc: 3 # number of classes
18 | names: ['apple', 'banana', 'grape'] # class names
19 |
--------------------------------------------------------------------------------
/data/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: python train.py --data GlobalWheat2020.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | nc: 1 # number of classes
30 | names: ['wheat_head'] # class names
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from utils.general import download, Path
36 |
37 | # Download
38 | dir = Path(yaml['path']) # dataset root dir
39 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
40 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
41 | download(urls, dir=dir)
42 |
43 | # Make Directories
44 | for p in 'annotations', 'images', 'labels':
45 | (dir / p).mkdir(parents=True, exist_ok=True)
46 |
47 | # Move
48 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
49 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
50 | (dir / p).rename(dir / 'images' / p) # move to /images
51 | f = (dir / p).with_suffix('.json') # json file
52 | if f.exists():
53 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
54 |
--------------------------------------------------------------------------------
/data/VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 ? by Ultralytics, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: python train.py --data VOC.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC # dataset root dir
12 | train: train.txt # train images (relative to 'path') 128 images
13 | val: val.txt # val images (relative to 'path') 128 images
14 |
15 | # Classes
16 | nc: 20 # number of classes
17 | names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
18 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names
19 |
--------------------------------------------------------------------------------
/data/VOC2007.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: python train.py --data VOC2007.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── VOC2007 ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC2007 # dataset root dir
12 | train: train.txt # train images (relative to 'path') 128 images
13 | val: val.txt # val images (relative to 'path') 128 images
14 | test: test.txt # test images (optional)
15 |
16 | # Classes
17 | nc: 20 # number of classes
18 | names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
19 | 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names
20 |
--------------------------------------------------------------------------------
/data/VOC_val_500.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/data/VOC_val_500.yaml
--------------------------------------------------------------------------------
/data/abnormal_driving.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/data/abnormal_driving.yaml
--------------------------------------------------------------------------------
/data/coco.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: |
31 | from utils.general import download, Path
32 |
33 | # Download labels
34 | segments = False # segment or box labels
35 | dir = Path(yaml['path']) # dataset root dir
36 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
37 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
38 | download(urls, dir=dir.parent)
39 |
40 | # Download data
41 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
42 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
43 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
44 | download(urls, dir=dir / 'images', threads=3)
45 |
--------------------------------------------------------------------------------
/data/coco128.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | nc: 80 # number of classes
18 | names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19 | 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20 | 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21 | 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22 | 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23 | 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24 | 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25 | 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26 | 'hair drier', 'toothbrush'] # class names
27 |
28 |
29 | # Download script/URL (optional)
30 | download: https://ultralytics.com/assets/coco128.zip
31 |
--------------------------------------------------------------------------------
/data/hyps/hyp.Objects365.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for Objects365 training
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.00258
7 | lrf: 0.17
8 | momentum: 0.779
9 | weight_decay: 0.00058
10 | warmup_epochs: 1.33
11 | warmup_momentum: 0.86
12 | warmup_bias_lr: 0.0711
13 | box: 0.0539
14 | cls: 0.299
15 | cls_pw: 0.825
16 | obj: 0.632
17 | obj_pw: 1.0
18 | iou_t: 0.2
19 | anchor_t: 3.44
20 | anchors: 3.2
21 | fl_gamma: 0.0
22 | hsv_h: 0.0188
23 | hsv_s: 0.704
24 | hsv_v: 0.36
25 | degrees: 0.0
26 | translate: 0.0902
27 | scale: 0.491
28 | shear: 0.0
29 | perspective: 0.0
30 | flipud: 0.0
31 | fliplr: 0.5
32 | mosaic: 1.0
33 | mixup: 0.0
34 | copy_paste: 0.0
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.VOC.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for VOC training
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC2007.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # YOLOv5 Hyperparameter Evolution Results
7 | # Best generation: 319
8 | # Last generation: 434
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10 | # 0.86236, 0.86184, 0.91274, 0.72647, 0.0077056, 0.0042449, 0.0013846
11 |
12 | lr0: 0.00334
13 | lrf: 0.15135
14 | momentum: 0.74832
15 | weight_decay: 0.00025
16 | warmup_epochs: 3.3835
17 | warmup_momentum: 0.59462
18 | warmup_bias_lr: 0.18657
19 | box: 0.02
20 | cls: 0.21638
21 | cls_pw: 0.5
22 | obj: 0.51728
23 | obj_pw: 0.67198
24 | iou_t: 0.2
25 | anchor_t: 3.3744
26 | fl_gamma: 0.0
27 | hsv_h: 0.01041
28 | hsv_s: 0.54703
29 | hsv_v: 0.27739
30 | degrees: 0.0
31 | translate: 0.04591
32 | scale: 0.75544
33 | shear: 0.0
34 | perspective: 0.0
35 | flipud: 0.0
36 | fliplr: 0.5
37 | mosaic: 0.85834
38 | mixup: 0.04266
39 | copy_paste: 0.0
40 | anchors: 3.412
41 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-high.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for high-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | # 1. train
7 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
8 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
9 | momentum: 0.937 # SGD momentum/Adam beta1
10 | weight_decay: 0.0005 # optimizer weight decay 5e-4
11 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
12 | warmup_momentum: 0.8 # warmup initial momentum
13 | warmup_bias_lr: 0.1 # warmup initial bias lr
14 |
15 | # 2. loss function
16 | box: 0.05 # box loss gain
17 | cls: 0.3 # cls loss gain
18 | cls_pw: 1.0 # cls BCELoss positive_weight
19 | obj: 0.7 # obj loss gain (scale with pixels)
20 | obj_pw: 1.0 # obj BCELoss positive_weight
21 |
22 | # 3. others
23 | iou_t: 0.20 # IoU training threshold
24 | anchor_t: 4.0 # anchor-multiple threshold
25 | # anchors: 3 # anchors per output layer (0 to ignore)
26 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
27 |
28 | # 4. data augmentation
29 | # 1. hsv增强系数 色调 饱和度 亮度
30 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
31 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
32 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
33 |
34 | # 2. random_perspective增强系数 旋转角度 平移 缩放 剪切 透明度
35 | degrees: 0.0 # image rotation (+/- deg)
36 | translate: 0.1 # image translation (+/- fraction)
37 | scale: 0.9 # image scale (+/- gain)
38 | shear: 0.0 # image shear (+/- deg)
39 | perspective: 0.0 # image perspective (+/- fraction)
40 |
41 | # 3. 图像翻转 上下 左右
42 | flipud: 0.0 # image flip up-down (probability)
43 | fliplr: 0.5 # image flip left-right (probability)
44 |
45 | # 4. 图片级数据增强
46 | mosaic: 1.0 # image mosaic (probability)
47 | mixup: 0.1 # image mixup (probability)
48 | cutout: 0.0 # image cutout (probability)
49 | copy_paste: 0.1 # segment copy-paste (probability)
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-low.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for low-augmentation COCO training from scratch
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.5 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 1.0 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.5 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.0 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/hyps/hyp.scratch-med.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # Hyperparameters for medium-augmentation COCO training from scratch
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5 |
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8 | momentum: 0.937 # SGD momentum/Adam beta1
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
11 | warmup_momentum: 0.8 # warmup initial momentum
12 | warmup_bias_lr: 0.1 # warmup initial bias lr
13 | box: 0.05 # box loss gain
14 | cls: 0.3 # cls loss gain
15 | cls_pw: 1.0 # cls BCELoss positive_weight
16 | obj: 0.7 # obj loss gain (scale with pixels)
17 | obj_pw: 1.0 # obj BCELoss positive_weight
18 | iou_t: 0.20 # IoU training threshold
19 | anchor_t: 4.0 # anchor-multiple threshold
20 | # anchors: 3 # anchors per output layer (0 to ignore)
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25 | degrees: 0.0 # image rotation (+/- deg)
26 | translate: 0.1 # image translation (+/- fraction)
27 | scale: 0.9 # image scale (+/- gain)
28 | shear: 0.0 # image shear (+/- deg)
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30 | flipud: 0.0 # image flip up-down (probability)
31 | fliplr: 0.5 # image flip left-right (probability)
32 | mosaic: 1.0 # image mosaic (probability)
33 | mixup: 0.1 # image mixup (probability)
34 | copy_paste: 0.0 # segment copy-paste (probability)
35 |
--------------------------------------------------------------------------------
/data/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/data/images/bus.jpg
--------------------------------------------------------------------------------
/data/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/data/images/zidane.jpg
--------------------------------------------------------------------------------
/data/scripts/download_weights.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases
4 | # Example usage: bash path/to/download_weights.sh
5 | # parent
6 | # └── yolov5
7 | # ├── yolov5s.pt ← downloads here
8 | # ├── yolov5m.pt
9 | # └── ...
10 |
11 | python - < 山东大学-本科毕设-基于深度学习的异常行为检测算法研究
4 | >
5 | > Author: 任瑞龙
6 | >
7 | > Email: 442082944@qq.com
8 |
9 |
10 |
11 | 对YOLOv5算法的4种改进方案:
12 |
13 |
14 |
15 | - 引入轻量化卷积Ghostconv,大大降低了模型参数量;
16 | - 引入BiFPN,并充分利用中小型目标检测层;
17 | - 引入CA注意力机制,使得模型对目标框的定位更精确;
18 | - 将CIoU替换为Alpha-EIoU,使得模型在不引入额外参数的情况下,检测精度得到大幅提升。
19 |
20 |
21 |
22 | 更多细节见:https://blog.csdn.net/weixin_43799388?spm=1000.2115.3001.5343
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # pip install -r requirements.txt
2 |
3 | # Base ----------------------------------------
4 | matplotlib>=3.2.2
5 | numpy>=1.18.5
6 | opencv-python>=4.1.2
7 | Pillow>=7.1.2
8 | PyYAML>=5.3.1
9 | requests>=2.23.0
10 | scipy>=1.4.1
11 | torch>=1.7.0
12 | torchvision>=0.8.1
13 | tqdm>=4.41.0
14 |
15 | # Logging -------------------------------------
16 | tensorboard>=2.4.1
17 | # wandb
18 |
19 | # Plotting ------------------------------------
20 | pandas>=1.1.4
21 | seaborn>=0.11.0
22 |
23 | # Export --------------------------------------
24 | # coremltools>=4.1 # CoreML export
25 | # onnx>=1.9.0 # ONNX export
26 | # onnx-simplifier>=0.3.6 # ONNX simplifier
27 | # scikit-learn==0.19.2 # CoreML quantization
28 | # tensorflow>=2.4.1 # TFLite export
29 | # tensorflowjs>=3.9.0 # TF.js export
30 | # openvino-dev # OpenVINO export
31 |
32 | # Extras --------------------------------------
33 | # albumentations>=1.0.3
34 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
35 | # pycocotools>=2.0 # COCO mAP
36 | # roboflow
37 | thop # FLOPs computation
38 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
3 |
4 | [metadata]
5 | license_file = LICENSE
6 | description-file = README.md
7 |
8 |
9 | [tool:pytest]
10 | norecursedirs =
11 | .git
12 | dist
13 | build
14 | addopts =
15 | --doctest-modules
16 | --durations=25
17 | --color=yes
18 |
19 |
20 | [flake8]
21 | max-line-length = 120
22 | exclude = .tox,*.egg,build,temp
23 | select = E,W,F
24 | doctests = True
25 | verbose = 2
26 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes
27 | format = pylint
28 | # see: https://www.flake8rules.com/
29 | ignore =
30 | E731 # Do not assign a lambda expression, use a def
31 | F405 # name may be undefined, or defined from star imports: module
32 | E402 # module level import not at top of file
33 | F401 # module imported but unused
34 | W504 # line break after binary operator
35 | E127 # continuation line over-indented for visual indent
36 | W504 # line break after binary operator
37 | E231 # missing whitespace after ‘,’, ‘;’, or ‘:’
38 | E501 # line too long
39 | F403 # ‘from module import *’ used; unable to detect undefined names
40 |
41 |
42 | [isort]
43 | # https://pycqa.github.io/isort/docs/configuration/options.html
44 | line_length = 120
45 | multi_line_output = 0
46 |
--------------------------------------------------------------------------------
/test/activation_test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from matplotlib import pyplot as plt
4 |
5 | relu = nn.ReLU()
6 | leaky_relu = nn.LeakyReLU()
7 | mish = nn.Mish()
8 | silu = nn.SiLU()
9 |
10 | x = torch.linspace(-10, 10, 1000)
11 | y_relu = relu(x)
12 | y_leaky_relu = leaky_relu(x)
13 | y_mish = mish(x)
14 | y_silu = silu(x)
15 |
16 | plt.plot(x, y_relu, 'g-')
17 | plt.plot(x, y_leaky_relu, 'y-')
18 | plt.plot(x, y_mish, 'b-')
19 | plt.plot(x, y_silu, 'r-')
20 | plt.grid()
21 | plt.show()
22 |
--------------------------------------------------------------------------------
/test/demo.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | print(matplotlib.matplotlib_fname())
--------------------------------------------------------------------------------
/test/focus_vs_conv.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import torch
4 | import torch.nn as nn
5 |
6 |
7 | class Conv(nn.Module):
8 | def __init__(self):
9 | super().__init__()
10 | self.conv = nn.Conv2d(in_channels=3,
11 | out_channels=32,
12 | kernel_size=6,
13 | stride=2,
14 | padding=2)
15 | self.bn = nn.BatchNorm2d(32)
16 | self.act = nn.SiLU()
17 |
18 | def forward(self, x):
19 | return self.act(self.bn(self.conv(x)))
20 |
21 |
22 | class Focus(nn.Module):
23 | def __init__(self):
24 | super().__init__()
25 | self.conv = nn.Sequential(
26 | nn.Conv2d(in_channels=3 * 4,
27 | out_channels=32,
28 | kernel_size=3,
29 | stride=1,
30 | padding=3 // 2),
31 | nn.BatchNorm2d(32),
32 | nn.SiLU()
33 | )
34 |
35 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
36 | # 假设x = [1,2,3,4,5,6,7,8,9] x[::2] = [1,3,5,7,9] 间隔2个取样
37 | # x[1::2] = [2, 4, 6, 8] 从第二个数据开始,间隔2个取样
38 | return self.conv(torch.cat([x[..., ::2, ::2],
39 | x[..., 1::2, ::2],
40 | x[..., ::2, 1::2],
41 | x[..., 1::2, 1::2]], 1))
42 |
43 |
44 | if __name__ == '__main__':
45 | input_data = torch.rand(1, 3, 640, 640)
46 |
47 | conv = Conv()
48 | focus = Focus()
49 |
50 | output1 = conv(input_data)
51 | output2 = focus(input_data)
52 |
53 | print(f"output1 size: {output1.size()}")
54 | print(f"output2 size: {output2.size()}")
55 | print(torch.equal(output1, output2))
56 |
57 | # 速度对比
58 | t_start = time.time()
59 | for _ in range(300):
60 | conv(input_data)
61 | print(f"6x6 conv time: {time.time() - t_start}")
62 |
63 | t_start = time.time()
64 | for _ in range(300):
65 | focus(input_data)
66 | print(f"focus time: {time.time() - t_start}")
67 |
68 | '''
69 | output1 size: torch.Size([1, 32, 320, 320])
70 | output2 size: torch.Size([1, 32, 320, 320])
71 | False
72 | 6x6 conv time: 2.4613001346588135
73 | focus time: 2.8948004245758057
74 | '''
75 |
--------------------------------------------------------------------------------
/test/gpu_memory.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | print("torch版本号: ", end="")
4 | print(torch.__version__)
5 |
6 | print("判断torch是否可用: ", end="")
7 | print(torch.cuda.is_available())
8 |
9 | print("gpu数量: ", end="")
10 | print(torch.cuda.device_count())
11 |
12 | print("gpu名字,设备索引默认从0开始: ", end="")
13 | print(torch.cuda.get_device_name(0))
14 | print("现在正在使用的GPU编号: ", end="")
15 | print(torch.cuda.current_device())
--------------------------------------------------------------------------------
/test/images/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/images/1.jpg
--------------------------------------------------------------------------------
/test/images/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/images/2.jpg
--------------------------------------------------------------------------------
/test/images/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/images/3.jpg
--------------------------------------------------------------------------------
/test/images/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/images/4.jpg
--------------------------------------------------------------------------------
/test/labels/1.txt:
--------------------------------------------------------------------------------
1 | 16 0.182 0.534343 0.18 0.288889
2 | 36 0.70079 0.530333 0.18242 0.507859
3 |
--------------------------------------------------------------------------------
/test/labels/2.txt:
--------------------------------------------------------------------------------
1 | 4 0.516492 0.469388 0.912516 0.748282
2 |
--------------------------------------------------------------------------------
/test/labels/3.txt:
--------------------------------------------------------------------------------
1 | 32 0.680102 0.631552 0.012953 0.01867
2 | 0 0.36418 0.679347 0.163484 0.322734
3 | 0 0.274594 0.74064 0.198437 0.272808
4 | 34 0.385367 0.612722 0.099141 0.113128
5 | 35 0.361008 0.702106 0.037734 0.050468
6 | 35 0.887125 0.633781 0.016406 0.01968
7 | 0 0.346883 0.574544 0.039703 0.174261
8 | 0 0.888102 0.593793 0.068953 0.12133
9 | 0 0.125453 0.7 0.142562 0.375271
10 | 0 0.354016 0.457796 0.006906 0.019729
11 |
--------------------------------------------------------------------------------
/test/labels/4.txt:
--------------------------------------------------------------------------------
1 | 14 0.820783 0.56129 0.218633 0.3527
2 | 14 0.293458 0.37634 0.159617 0.33484
3 | 14 0.525983 0.41653 0.166667 0.32554
4 | 14 0.486708 0.66271 0.16165 0.24138
5 | 14 0.267033 0.79969 0.1538 0.3225
6 | 14 0.139517 0.17415 0.159167 0.26742
7 | 14 0.2955 0.60918 0.1904 0.26864
8 | 14 0.859717 0.79617 0.157067 0.30918
9 |
--------------------------------------------------------------------------------
/test/model_test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import torch
4 | import yaml
5 |
6 | from models.yolo import Model
7 | from torchstat import stat
8 |
9 | os.environ['CUDA_VISIBLE_DEVICES'] = '2'
10 |
11 |
12 | def parse_opt(known=False):
13 | parser = argparse.ArgumentParser()
14 | # yolov5s-voc/yolov5s-ghostconv-bifpn1-ca-voc
15 | parser.add_argument('--cfg', type=str,
16 | default='../models/abnormal_driving/yolov5s_dd.yaml',
17 | help='model.yaml path')
18 | parser.add_argument('--hyp', type=str, default='../data/hyps/hyp.scratch-high.yaml',
19 | help='hyperparameters path')
20 |
21 | opt = parser.parse_known_args()[0] if known else parser.parse_args()
22 | return opt
23 |
24 |
25 | def main(opt):
26 | with open(opt.hyp, encoding='utf-8', errors='ignore') as f:
27 | hyp = yaml.safe_load(f) # load hyps dict 字典形式
28 |
29 | # 如果配置文件中有中文,打开时要加encoding = 'utf-8'参数
30 | with open(opt.cfg, encoding='ascii', errors='ignore') as f:
31 | cfg = yaml.safe_load(f) # model dict 取到配置文件中每条的信息
32 |
33 | nc = cfg['nc'] # 获取数据集的类别数
34 | device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
35 | print(f'device: {device}')
36 | # input_img = torch.zeros(size=(1, 3, 1280, 1280))
37 | input_img = torch.zeros(size=(1, 3, 640, 640))
38 | input_img = input_img.to(device, non_blocking=True).float()
39 | print(f'the model of \'{opt.cfg}\' is :')
40 | model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
41 | output = model(input_img)
42 |
43 | print(f'number of detect layers: {len(output)}')
44 | print('Detect head output: ')
45 | # print(f'P2/4: {output[len(output) - 4].shape}')
46 | print(f'P3/8: {output[len(output) - 3].shape}')
47 | print(f'P4/16: {output[len(output) - 2].shape}')
48 | print(f'P5/32: {output[len(output) - 1].shape}')
49 |
50 | '''
51 | 3
52 | torch.Size([1, 3, 80, 80, 85])
53 | torch.Size([1, 3, 40, 40, 85])
54 | torch.Size([1, 3, 20, 20, 85])
55 | '''
56 |
57 |
58 | if __name__ == '__main__':
59 | opt = parse_opt()
60 | main(opt)
61 |
--------------------------------------------------------------------------------
/test/outputs/cutmix.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/cutmix.jpg
--------------------------------------------------------------------------------
/test/outputs/cutout.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/cutout.jpg
--------------------------------------------------------------------------------
/test/outputs/fliplr.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/fliplr.jpg
--------------------------------------------------------------------------------
/test/outputs/flipud.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/flipud.jpg
--------------------------------------------------------------------------------
/test/outputs/hsv.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/hsv.jpg
--------------------------------------------------------------------------------
/test/outputs/mixup.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/mixup.jpg
--------------------------------------------------------------------------------
/test/outputs/mixup_origin.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/mixup_origin.jpg
--------------------------------------------------------------------------------
/test/outputs/mosaic_with_label.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/mosaic_with_label.jpg
--------------------------------------------------------------------------------
/test/outputs/mosaic_without_label.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/mosaic_without_label.jpg
--------------------------------------------------------------------------------
/test/outputs/perspective.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/perspective.jpg
--------------------------------------------------------------------------------
/test/outputs/rotation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/rotation.jpg
--------------------------------------------------------------------------------
/test/outputs/scale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/scale.jpg
--------------------------------------------------------------------------------
/test/outputs/shear.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/shear.jpg
--------------------------------------------------------------------------------
/test/outputs/translation.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/test/outputs/translation.jpg
--------------------------------------------------------------------------------
/test/param_metric_test.py:
--------------------------------------------------------------------------------
1 | '''
2 | stat
3 | 可以用来计算pytorch构建的网络的参数,空间大小,MAdd,FLOPs等指标,简单好用。
4 | 比如:我想知道alexnet的网络的一些参数
5 | '''
6 | from torchstat import stat
7 | import torchvision.models as models
8 |
9 | model = models.alexnet()
10 | stat(model, (3, 224, 224))
11 |
--------------------------------------------------------------------------------
/test/spp_vs_sppf.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import time
4 |
5 |
6 | class SPP(nn.Module):
7 | def __init__(self):
8 | super().__init__()
9 | self.maxpool1 = nn.MaxPool2d(kernel_size=5, stride=1, padding=5 // 2)
10 | self.maxpool2 = nn.MaxPool2d(kernel_size=9, stride=1, padding=9 // 2)
11 | self.maxpool3 = nn.MaxPool2d(kernel_size=13, stride=1, padding=13 // 2)
12 |
13 | def forward(self, x):
14 | p1 = self.maxpool1(x)
15 | p2 = self.maxpool2(x)
16 | p3 = self.maxpool3(x)
17 | return torch.cat([x, p1, p2, p3], dim=1)
18 |
19 |
20 | class SPPF(nn.Module):
21 | def __init__(self):
22 | super().__init__()
23 | self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=5 // 2)
24 |
25 | def forward(self, x):
26 | p1 = self.maxpool(x)
27 | p2 = self.maxpool(p1)
28 | p3 = self.maxpool(p2)
29 | return torch.cat([x, p1, p2, p3], dim=1)
30 |
31 |
32 | if __name__ == '__main__':
33 | input_data = torch.rand(8, 32, 64, 64)
34 | spp = SPP()
35 | sppf = SPPF()
36 | output1 = spp(input_data)
37 | output2 = sppf(input_data)
38 |
39 | print(torch.equal(output1, output2))
40 |
41 | t_start = time.time()
42 | for _ in range(100):
43 | spp(input_data)
44 | print(f"spp time: {time.time() - t_start}")
45 |
46 | t_start = time.time()
47 | for _ in range(100):
48 | sppf(input_data)
49 | print(f"sppf time: {time.time() - t_start}")
50 |
51 | '''
52 | True
53 | spp time: 4.364669561386108
54 | sppf time: 1.5898349285125732
55 | '''
56 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | utils/initialization
4 | """
5 |
6 |
7 | def notebook_init(verbose=True):
8 | # Check system software and hardware
9 | print('Checking setup...')
10 |
11 | import os
12 | import shutil
13 |
14 | from utils.general import check_requirements, emojis, is_colab
15 | from utils.torch_utils import select_device # imports
16 |
17 | check_requirements(('psutil', 'IPython'))
18 | import psutil
19 | from IPython import display # to display images and clear console output
20 |
21 | if is_colab():
22 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
23 |
24 | if verbose:
25 | # System info
26 | # gb = 1 / 1000 ** 3 # bytes to GB
27 | gib = 1 / 1024 ** 3 # bytes to GiB
28 | ram = psutil.virtual_memory().total
29 | total, used, free = shutil.disk_usage("/")
30 | display.clear_output()
31 | s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)'
32 | else:
33 | s = ''
34 |
35 | select_device(newline=False)
36 | print(emojis(f'Setup complete ✅ {s}'))
37 | return display
38 |
--------------------------------------------------------------------------------
/utils/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/activations.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/activations.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/augmentations.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/augmentations.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/autoanchor.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/autoanchor.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/autobatch.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/autobatch.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/callbacks.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/callbacks.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/datasets.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/datasets.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/downloads.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/downloads.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/general.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/general.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/loss.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/loss.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/metrics.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/metrics.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/plots.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/plots.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/torch_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/__pycache__/torch_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 | from torch.cuda import amp
11 |
12 | from utils.general import LOGGER, colorstr
13 | from utils.torch_utils import profile
14 |
15 |
16 | def check_train_batch_size(model, imgsz=640):
17 | # Check YOLOv5 training batch size
18 | with amp.autocast():
19 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
20 |
21 |
22 | def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
23 | # Automatically estimate best batch size to use `fraction` of available CUDA memory
24 | # Usage:
25 | # import torch
26 | # from utils.autobatch import autobatch
27 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
28 | # print(autobatch(model))
29 |
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 |
37 | d = str(device).upper() # 'CUDA:0'
38 | properties = torch.cuda.get_device_properties(device) # device properties
39 | t = properties.total_memory / 1024 ** 3 # (GiB)
40 | r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB)
41 | a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB)
42 | f = t - (r + a) # free inside reserved
43 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
44 |
45 | batch_sizes = [1, 2, 4, 8, 16]
46 | try:
47 | img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
48 | y = profile(img, model, n=3, device=device)
49 | except Exception as e:
50 | LOGGER.warning(f'{prefix}{e}')
51 |
52 | y = [x[2] for x in y if x] # memory [2]
53 | batch_sizes = batch_sizes[:len(y)]
54 | p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit
55 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
56 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)')
57 | return b
58 |
--------------------------------------------------------------------------------
/utils/aws/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/aws/__init__.py
--------------------------------------------------------------------------------
/utils/aws/mime.sh:
--------------------------------------------------------------------------------
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2 | # This script will run on every instance restart, not only on first start
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4 |
5 | Content-Type: multipart/mixed; boundary="//"
6 | MIME-Version: 1.0
7 |
8 | --//
9 | Content-Type: text/cloud-config; charset="us-ascii"
10 | MIME-Version: 1.0
11 | Content-Transfer-Encoding: 7bit
12 | Content-Disposition: attachment; filename="cloud-config.txt"
13 |
14 | #cloud-config
15 | cloud_final_modules:
16 | - [scripts-user, always]
17 |
18 | --//
19 | Content-Type: text/x-shellscript; charset="us-ascii"
20 | MIME-Version: 1.0
21 | Content-Transfer-Encoding: 7bit
22 | Content-Disposition: attachment; filename="userdata.txt"
23 |
24 | #!/bin/bash
25 | # --- paste contents of userdata.sh here ---
26 | --//
27 |
--------------------------------------------------------------------------------
/utils/aws/resume.py:
--------------------------------------------------------------------------------
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2 | # Usage: $ python utils/aws/resume.py
3 |
4 | import os
5 | import sys
6 | from pathlib import Path
7 |
8 | import torch
9 | import yaml
10 |
11 | FILE = Path(__file__).resolve()
12 | ROOT = FILE.parents[2] # YOLOv5 root directory
13 | if str(ROOT) not in sys.path:
14 | sys.path.append(str(ROOT)) # add ROOT to PATH
15 |
16 | port = 0 # --master_port
17 | path = Path('').resolve()
18 | for last in path.rglob('*/**/last.pt'):
19 | ckpt = torch.load(last)
20 | if ckpt['optimizer'] is None:
21 | continue
22 |
23 | # Load opt.yaml
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25 | opt = yaml.safe_load(f)
26 |
27 | # Get device count
28 | d = opt['device'].split(',') # devices
29 | nd = len(d) # number of devices
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31 |
32 | if ddp: # multi-GPU
33 | port += 1
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35 | else: # single-GPU
36 | cmd = f'python train.py --resume {last}'
37 |
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39 | print(cmd)
40 | os.system(cmd)
41 |
--------------------------------------------------------------------------------
/utils/aws/userdata.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3 | # This script will run only once on first instance start (for a re-start script see mime.sh)
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5 | # Use >300 GB SSD
6 |
7 | cd home/ubuntu
8 | if [ ! -d yolov5 ]; then
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11 | cd yolov5
12 | bash data/scripts/get_coco.sh && echo "COCO done." &
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15 | wait && echo "All tasks done." # finish background tasks
16 | else
17 | echo "Running re-start script." # resume interrupted runs
18 | i=0
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20 | while IFS= read -r id; do
21 | ((i++))
22 | echo "restarting container $i: $id"
23 | sudo docker start $id
24 | # sudo docker exec -it $id python train.py --resume # single-GPU
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26 | done <<<"$list"
27 | fi
28 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/README.md:
--------------------------------------------------------------------------------
1 | # Flask REST API
2 |
3 | [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4 | commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5 | created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6 |
7 | ## Requirements
8 |
9 | [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10 |
11 | ```shell
12 | $ pip install Flask
13 | ```
14 |
15 | ## Run
16 |
17 | After Flask installation run:
18 |
19 | ```shell
20 | $ python3 restapi.py --port 5000
21 | ```
22 |
23 | Then use [curl](https://curl.se/) to perform a request:
24 |
25 | ```shell
26 | $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'
27 | ```
28 |
29 | The model inference results are returned as a JSON response:
30 |
31 | ```json
32 | [
33 | {
34 | "class": 0,
35 | "confidence": 0.8900438547,
36 | "height": 0.9318675399,
37 | "name": "person",
38 | "width": 0.3264600933,
39 | "xcenter": 0.7438579798,
40 | "ycenter": 0.5207948685
41 | },
42 | {
43 | "class": 0,
44 | "confidence": 0.8440024257,
45 | "height": 0.7155083418,
46 | "name": "person",
47 | "width": 0.6546785235,
48 | "xcenter": 0.427829951,
49 | "ycenter": 0.6334488392
50 | },
51 | {
52 | "class": 27,
53 | "confidence": 0.3771208823,
54 | "height": 0.3902671337,
55 | "name": "tie",
56 | "width": 0.0696444362,
57 | "xcenter": 0.3675483763,
58 | "ycenter": 0.7991207838
59 | },
60 | {
61 | "class": 27,
62 | "confidence": 0.3527112305,
63 | "height": 0.1540903747,
64 | "name": "tie",
65 | "width": 0.0336618312,
66 | "xcenter": 0.7814827561,
67 | "ycenter": 0.5065554976
68 | }
69 | ]
70 | ```
71 |
72 | An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73 | in `example_request.py`
74 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/example_request.py:
--------------------------------------------------------------------------------
1 | """Perform test request"""
2 | import pprint
3 |
4 | import requests
5 |
6 | DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
7 | TEST_IMAGE = "zidane.jpg"
8 |
9 | image_data = open(TEST_IMAGE, "rb").read()
10 |
11 | response = requests.post(DETECTION_URL, files={"image": image_data}).json()
12 |
13 | pprint.pprint(response)
14 |
--------------------------------------------------------------------------------
/utils/flask_rest_api/restapi.py:
--------------------------------------------------------------------------------
1 | """
2 | Run a rest API exposing the yolov5s object detection model
3 | """
4 | import argparse
5 | import io
6 |
7 | import torch
8 | from flask import Flask, request
9 | from PIL import Image
10 |
11 | app = Flask(__name__)
12 |
13 | DETECTION_URL = "/v1/object-detection/yolov5s"
14 |
15 |
16 | @app.route(DETECTION_URL, methods=["POST"])
17 | def predict():
18 | if not request.method == "POST":
19 | return
20 |
21 | if request.files.get("image"):
22 | image_file = request.files["image"]
23 | image_bytes = image_file.read()
24 |
25 | img = Image.open(io.BytesIO(image_bytes))
26 |
27 | results = model(img, size=640) # reduce size=320 for faster inference
28 | return results.pandas().xyxy[0].to_json(orient="records")
29 |
30 |
31 | if __name__ == "__main__":
32 | parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
33 | parser.add_argument("--port", default=5000, type=int, help="port number")
34 | args = parser.parse_args()
35 |
36 | model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
37 | app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
38 |
--------------------------------------------------------------------------------
/utils/google_app_engine/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/google-appengine/python
2 |
3 | # Create a virtualenv for dependencies. This isolates these packages from
4 | # system-level packages.
5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6 | RUN virtualenv /env -p python3
7 |
8 | # Setting these environment variables are the same as running
9 | # source /env/bin/activate.
10 | ENV VIRTUAL_ENV /env
11 | ENV PATH /env/bin:$PATH
12 |
13 | RUN apt-get update && apt-get install -y python-opencv
14 |
15 | # Copy the application's requirements.txt and run pip to install all
16 | # dependencies into the virtualenv.
17 | ADD requirements.txt /app/requirements.txt
18 | RUN pip install -r /app/requirements.txt
19 |
20 | # Add the application source code.
21 | ADD . /app
22 |
23 | # Run a WSGI server to serve the application. gunicorn must be declared as
24 | # a dependency in requirements.txt.
25 | CMD gunicorn -b :$PORT main:app
26 |
--------------------------------------------------------------------------------
/utils/google_app_engine/additional_requirements.txt:
--------------------------------------------------------------------------------
1 | # add these requirements in your app on top of the existing ones
2 | pip==21.1
3 | Flask==1.0.2
4 | gunicorn==19.9.0
5 |
--------------------------------------------------------------------------------
/utils/google_app_engine/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: custom
2 | env: flex
3 |
4 | service: yolov5app
5 |
6 | liveness_check:
7 | initial_delay_sec: 600
8 |
9 | manual_scaling:
10 | instances: 1
11 | resources:
12 | cpu: 1
13 | memory_gb: 4
14 | disk_size_gb: 20
15 |
--------------------------------------------------------------------------------
/utils/loggers/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/loggers/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/loggers/wandb/__init__.py
--------------------------------------------------------------------------------
/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/utils/loggers/wandb/log_dataset.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from wandb_utils import WandbLogger
4 |
5 | from utils.general import LOGGER
6 |
7 | WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
8 |
9 |
10 | def create_dataset_artifact(opt):
11 | logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
12 | if not logger.wandb:
13 | LOGGER.info("install wandb using `pip install wandb` to log the dataset")
14 |
15 |
16 | if __name__ == '__main__':
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
19 | parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
20 | parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
21 | parser.add_argument('--entity', default=None, help='W&B entity')
22 | parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
23 |
24 | opt = parser.parse_args()
25 | opt.resume = False # Explicitly disallow resume check for dataset upload job
26 |
27 | create_dataset_artifact(opt)
28 |
--------------------------------------------------------------------------------
/utils/loggers/wandb/sweep.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | import wandb
5 |
6 | FILE = Path(__file__).resolve()
7 | ROOT = FILE.parents[3] # YOLOv5 root directory
8 | if str(ROOT) not in sys.path:
9 | sys.path.append(str(ROOT)) # add ROOT to PATH
10 |
11 | from train import parse_opt, train
12 | from utils.callbacks import Callbacks
13 | from utils.general import increment_path
14 | from utils.torch_utils import select_device
15 |
16 |
17 | def sweep():
18 | wandb.init()
19 | # Get hyp dict from sweep agent
20 | hyp_dict = vars(wandb.config).get("_items")
21 |
22 | # Workaround: get necessary opt args
23 | opt = parse_opt(known=True)
24 | opt.batch_size = hyp_dict.get("batch_size")
25 | opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26 | opt.epochs = hyp_dict.get("epochs")
27 | opt.nosave = True
28 | opt.data = hyp_dict.get("data")
29 | opt.weights = str(opt.weights)
30 | opt.cfg = str(opt.cfg)
31 | opt.data = str(opt.data)
32 | opt.hyp = str(opt.hyp)
33 | opt.project = str(opt.project)
34 | device = select_device(opt.device, batch_size=opt.batch_size)
35 |
36 | # train
37 | train(hyp_dict, opt, device, callbacks=Callbacks())
38 |
39 |
40 | if __name__ == "__main__":
41 | sweep()
42 |
--------------------------------------------------------------------------------
/改进算法整体框架.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xiaolong-RRL/YOLOv5-Abnormal-Driving/276a53a032c937da3457981605880a75c377a659/改进算法整体框架.png
--------------------------------------------------------------------------------