├── ultralytics
├── nn
│ ├── __init__.py
│ └── __pycache__
│ │ ├── tasks.cpython-37.pyc
│ │ ├── __init__.cpython-37.pyc
│ │ ├── modules.cpython-37.pyc
│ │ └── autobackend.cpython-37.pyc
├── yolo
│ ├── engine
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── model.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── exporter.cpython-37.pyc
│ │ │ ├── predictor.cpython-37.pyc
│ │ │ ├── trainer.cpython-37.pyc
│ │ │ └── validator.cpython-37.pyc
│ │ ├── model.py
│ │ └── validator.py
│ ├── data
│ │ ├── dataloaders
│ │ │ ├── __init__.py
│ │ │ └── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── v5loader.cpython-37.pyc
│ │ │ │ ├── stream_loaders.cpython-37.pyc
│ │ │ │ └── v5augmentations.cpython-37.pyc
│ │ ├── __pycache__
│ │ │ ├── base.cpython-37.pyc
│ │ │ ├── build.cpython-37.pyc
│ │ │ ├── utils.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── augment.cpython-37.pyc
│ │ │ ├── dataset.cpython-37.pyc
│ │ │ └── dataset_wrappers.cpython-37.pyc
│ │ ├── __init__.py
│ │ ├── dataset_wrappers.py
│ │ ├── datasets
│ │ │ ├── coco128.yaml
│ │ │ ├── coco128-seg.yaml
│ │ │ └── coco.yaml
│ │ ├── build.py
│ │ └── base.py
│ ├── __init__.py
│ ├── utils
│ │ ├── callbacks
│ │ │ ├── __init__.py
│ │ │ ├── __pycache__
│ │ │ │ ├── wb.cpython-37.pyc
│ │ │ │ ├── base.cpython-37.pyc
│ │ │ │ ├── comet.cpython-37.pyc
│ │ │ │ ├── hub.cpython-37.pyc
│ │ │ │ ├── clearml.cpython-37.pyc
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── tensorboard.cpython-37.pyc
│ │ │ ├── tensorboard.py
│ │ │ ├── comet.py
│ │ │ ├── wb.py
│ │ │ ├── clearml.py
│ │ │ ├── hub.py
│ │ │ └── base.py
│ │ ├── __pycache__
│ │ │ ├── dist.cpython-37.pyc
│ │ │ ├── files.cpython-37.pyc
│ │ │ ├── loss.cpython-37.pyc
│ │ │ ├── ops.cpython-37.pyc
│ │ │ ├── tal.cpython-37.pyc
│ │ │ ├── checks.cpython-37.pyc
│ │ │ ├── metrics.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── autobatch.cpython-37.pyc
│ │ │ ├── downloads.cpython-37.pyc
│ │ │ ├── instance.cpython-37.pyc
│ │ │ ├── plotting.cpython-37.pyc
│ │ │ └── torch_utils.cpython-37.pyc
│ │ ├── loss.py
│ │ ├── dist.py
│ │ ├── autobatch.py
│ │ ├── files.py
│ │ └── downloads.py
│ ├── __pycache__
│ │ ├── cli.cpython-37.pyc
│ │ └── __init__.cpython-37.pyc
│ ├── v8
│ │ ├── __pycache__
│ │ │ └── __init__.cpython-37.pyc
│ │ ├── detect
│ │ │ ├── __pycache__
│ │ │ │ ├── val.cpython-37.pyc
│ │ │ │ ├── train.cpython-37.pyc
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── predict.cpython-37.pyc
│ │ │ ├── __init__.py
│ │ │ └── predict.py
│ │ ├── classify
│ │ │ ├── __pycache__
│ │ │ │ ├── val.cpython-37.pyc
│ │ │ │ ├── train.cpython-37.pyc
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ └── predict.cpython-37.pyc
│ │ │ ├── __init__.py
│ │ │ ├── val.py
│ │ │ ├── predict.py
│ │ │ └── train.py
│ │ ├── segment
│ │ │ ├── __pycache__
│ │ │ │ ├── train.cpython-37.pyc
│ │ │ │ ├── val.cpython-37.pyc
│ │ │ │ ├── predict.cpython-37.pyc
│ │ │ │ └── __init__.cpython-37.pyc
│ │ │ ├── __init__.py
│ │ │ ├── predict.py
│ │ │ └── train.py
│ │ ├── __init__.py
│ │ └── models
│ │ │ ├── cls
│ │ │ ├── yolov8l-cls.yaml
│ │ │ ├── yolov8m-cls.yaml
│ │ │ ├── yolov8n-cls.yaml
│ │ │ ├── yolov8s-cls.yaml
│ │ │ └── yolov8x-cls.yaml
│ │ │ ├── yolov8l.yaml
│ │ │ ├── yolov8m.yaml
│ │ │ ├── yolov8x.yaml
│ │ │ ├── yolov8n.yaml
│ │ │ ├── yolov8s.yaml
│ │ │ ├── seg
│ │ │ ├── yolov8l-seg.yaml
│ │ │ ├── yolov8m-seg.yaml
│ │ │ ├── yolov8x-seg.yaml
│ │ │ ├── yolov8n-seg.yaml
│ │ │ └── yolov8s-seg.yaml
│ │ │ └── yolov8x6.yaml
│ ├── configs
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── hydra_patch.cpython-37.pyc
│ │ ├── __init__.py
│ │ ├── hydra_patch.py
│ │ └── default.yaml
│ └── cli.py
├── __pycache__
│ └── __init__.cpython-37.pyc
├── hub
│ ├── __pycache__
│ │ ├── auth.cpython-37.pyc
│ │ ├── session.cpython-37.pyc
│ │ ├── utils.cpython-37.pyc
│ │ └── __init__.cpython-37.pyc
│ ├── auth.py
│ ├── session.py
│ ├── __init__.py
│ └── utils.py
└── __init__.py
├── Arial.ttf
├── docs
├── SPP.png
├── bus.jpg
├── c2f.png
├── c3.png
├── dfl.png
├── nms.png
├── trt.png
├── SPPF.png
├── cat1.jpg
├── dog1.jpg
├── horse.jpg
├── loss.png
├── sppvs.png
├── v5FPN.png
├── v5head.png
├── v6FPN.png
├── v8FPN.png
├── v8head.png
├── yolov5.png
├── yolov7.png
├── yolov8.png
├── zidane.jpg
├── F1_curve.png
├── PR_curve.png
├── P_curve.png
└── train_log1.png
├── tensorrt
├── yolov8
│ ├── yolov8
│ │ ├── res
│ │ │ ├── bus.jpg
│ │ │ ├── cat1.jpg
│ │ │ ├── dog1.jpg
│ │ │ ├── horse.jpg
│ │ │ └── zidane.jpg
│ │ ├── yolov8.cpp
│ │ ├── yolov8.vcxproj.user
│ │ ├── yolov8.vcxproj.filters
│ │ └── yolov8.vcxproj
│ └── yolov8.sln
├── yolov8_add_nms.py
└── yolov8_add_postprocess.py
├── export.py
├── score_data.yaml
├── yolov8s.yaml
└── inference.py
/ultralytics/nn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Arial.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/Arial.ttf
--------------------------------------------------------------------------------
/docs/SPP.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/SPP.png
--------------------------------------------------------------------------------
/docs/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/bus.jpg
--------------------------------------------------------------------------------
/docs/c2f.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/c2f.png
--------------------------------------------------------------------------------
/docs/c3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/c3.png
--------------------------------------------------------------------------------
/docs/dfl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/dfl.png
--------------------------------------------------------------------------------
/docs/nms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/nms.png
--------------------------------------------------------------------------------
/docs/trt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/trt.png
--------------------------------------------------------------------------------
/docs/SPPF.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/SPPF.png
--------------------------------------------------------------------------------
/docs/cat1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/cat1.jpg
--------------------------------------------------------------------------------
/docs/dog1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/dog1.jpg
--------------------------------------------------------------------------------
/docs/horse.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/horse.jpg
--------------------------------------------------------------------------------
/docs/loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/loss.png
--------------------------------------------------------------------------------
/docs/sppvs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/sppvs.png
--------------------------------------------------------------------------------
/docs/v5FPN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/v5FPN.png
--------------------------------------------------------------------------------
/docs/v5head.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/v5head.png
--------------------------------------------------------------------------------
/docs/v6FPN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/v6FPN.png
--------------------------------------------------------------------------------
/docs/v8FPN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/v8FPN.png
--------------------------------------------------------------------------------
/docs/v8head.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/v8head.png
--------------------------------------------------------------------------------
/docs/yolov5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/yolov5.png
--------------------------------------------------------------------------------
/docs/yolov7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/yolov7.png
--------------------------------------------------------------------------------
/docs/yolov8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/yolov8.png
--------------------------------------------------------------------------------
/docs/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/zidane.jpg
--------------------------------------------------------------------------------
/docs/F1_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/F1_curve.png
--------------------------------------------------------------------------------
/docs/PR_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/PR_curve.png
--------------------------------------------------------------------------------
/docs/P_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/P_curve.png
--------------------------------------------------------------------------------
/docs/train_log1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/docs/train_log1.png
--------------------------------------------------------------------------------
/ultralytics/yolo/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from . import v8
4 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import add_integration_callbacks, default_callbacks
2 |
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/res/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/res/bus.jpg
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/res/cat1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/res/cat1.jpg
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/res/dog1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/res/dog1.jpg
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/yolov8.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/yolov8.cpp
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/res/horse.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/res/horse.jpg
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/res/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/tensorrt/yolov8/yolov8/res/zidane.jpg
--------------------------------------------------------------------------------
/ultralytics/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/auth.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/hub/__pycache__/auth.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/tasks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/nn/__pycache__/tasks.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/__pycache__/cli.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/__pycache__/cli.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/session.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/hub/__pycache__/session.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/hub/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/nn/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/modules.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/nn/__pycache__/modules.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/hub/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/autobackend.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/nn/__pycache__/autobackend.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/base.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/base.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/build.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/build.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/dist.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/dist.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/files.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/files.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/loss.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/ops.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/ops.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/tal.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/tal.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/augment.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/augment.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/dataset.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/dataset.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/model.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/model.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/checks.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/checks.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/metrics.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/metrics.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/val.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/detect/__pycache__/val.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/configs/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/configs/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/exporter.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/exporter.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/predictor.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/predictor.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/trainer.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/trainer.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/validator.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/engine/__pycache__/validator.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/autobatch.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/autobatch.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/downloads.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/downloads.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/instance.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/instance.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/plotting.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/plotting.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/val.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/classify/__pycache__/val.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/train.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/detect/__pycache__/train.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/train.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/segment/__pycache__/train.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/val.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/segment/__pycache__/val.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/torch_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/__pycache__/torch_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/wb.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/wb.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/train.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/classify/__pycache__/train.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/detect/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/predict.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/detect/__pycache__/predict.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/predict.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/segment/__pycache__/predict.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/configs/__pycache__/hydra_patch.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/configs/__pycache__/hydra_patch.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/dataset_wrappers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/__pycache__/dataset_wrappers.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/base.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/base.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/comet.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/comet.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/hub.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/hub.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/classify/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/predict.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/classify/__pycache__/predict.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/v8/segment/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/clearml.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/clearml.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/dataloaders/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/v5loader.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/dataloaders/__pycache__/v5loader.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/tensorboard.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/utils/callbacks/__pycache__/tensorboard.cpython-37.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/stream_loaders.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/dataloaders/__pycache__/stream_loaders.cpython-37.pyc
--------------------------------------------------------------------------------
/export.py:
--------------------------------------------------------------------------------
1 | from ultralytics import YOLO
2 |
3 | model = YOLO("./runs/detect/train/weights/last.pt") # load a pretrained YOLOv8n model
4 | model.export(format="onnx",opset=13) # export the model to ONNX forma
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/v5augmentations.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/YOLOv8/HEAD/ultralytics/yolo/data/dataloaders/__pycache__/v5augmentations.cpython-37.pyc
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/yolov8.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import DetectionPredictor, predict
4 | from .train import DetectionTrainer, train
5 | from .val import DetectionValidator, val
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import SegmentationPredictor, predict
4 | from .train import SegmentationTrainer, train
5 | from .val import SegmentationValidator, val
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .base import BaseDataset
4 | from .build import build_classification_dataloader, build_dataloader
5 | from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
6 | from .dataset_wrappers import MixAndRectDataset
7 |
--------------------------------------------------------------------------------
/ultralytics/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | __version__ = "8.0.0"
4 |
5 | from ultralytics.hub import checks
6 | from ultralytics.yolo.engine.model import YOLO
7 | from ultralytics.yolo.utils import ops
8 |
9 | __all__ = ["__version__", "YOLO", "hub", "checks"] # allow simpler import
10 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict
4 | from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train
5 | from ultralytics.yolo.v8.classify.val import ClassificationValidator, val
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from pathlib import Path
4 |
5 | from ultralytics.yolo.v8 import classify, detect, segment
6 |
7 | ROOT = Path(__file__).parents[0] # yolov8 ROOT
8 |
9 | __all__ = ["classify", "segment", "detect"]
10 |
11 | from ultralytics.yolo.configs import hydra_patch # noqa (patch hydra cli)
12 |
--------------------------------------------------------------------------------
/score_data.yaml:
--------------------------------------------------------------------------------
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | train: ./dataset/score/images/train # train images
12 | val: ./dataset/score/images/val # val images
13 | #test: ./dataset/score/images/test # test images (optional)
14 |
15 | # Classes
16 | names:
17 | 0: person
18 | 1: cat
19 | 2: dog
20 | 3: horse
21 |
22 |
23 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/cls/yolov8l-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/cls/yolov8m-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/cls/yolov8n-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/cls/yolov8s-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/cls/yolov8x-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 1000 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 |
21 | # YOLOv8.0n head
22 | head:
23 | - [-1, 1, Classify, [nc]]
24 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/tensorboard.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from torch.utils.tensorboard import SummaryWriter
4 |
5 | writer = None # TensorBoard SummaryWriter instance
6 |
7 |
8 | def _log_scalars(scalars, step=0):
9 | for k, v in scalars.items():
10 | writer.add_scalar(k, v, step)
11 |
12 |
13 | def on_pretrain_routine_start(trainer):
14 | global writer
15 | writer = SummaryWriter(str(trainer.save_dir))
16 |
17 |
18 | def on_batch_end(trainer):
19 | _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
20 |
21 |
22 | def on_fit_epoch_end(trainer):
23 | _log_scalars(trainer.metrics, trainer.epoch + 1)
24 |
25 |
26 | callbacks = {
27 | "on_pretrain_routine_start": on_pretrain_routine_start,
28 | "on_fit_epoch_end": on_fit_epoch_end,
29 | "on_batch_end": on_batch_end}
30 |
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/yolov8.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | 源文件
20 |
21 |
22 |
23 |
24 | 头文件
25 |
26 |
27 |
--------------------------------------------------------------------------------
/yolov8s.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 4 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0s backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0s head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/configs/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from pathlib import Path
4 | from typing import Dict, Union
5 |
6 | from omegaconf import DictConfig, OmegaConf
7 |
8 | from ultralytics.yolo.configs.hydra_patch import check_config_mismatch
9 |
10 |
11 | def get_config(config: Union[str, DictConfig], overrides: Union[str, Dict] = None):
12 | """
13 | Load and merge configuration data from a file or dictionary.
14 |
15 | Args:
16 | config (Union[str, DictConfig]): Configuration data in the form of a file name or a DictConfig object.
17 | overrides (Union[str, Dict], optional): Overrides in the form of a file name or a dictionary. Default is None.
18 |
19 | Returns:
20 | OmegaConf.Namespace: Training arguments namespace.
21 | """
22 | if overrides is None:
23 | overrides = {}
24 | if isinstance(config, (str, Path)):
25 | config = OmegaConf.load(config)
26 | elif isinstance(config, Dict):
27 | config = OmegaConf.create(config)
28 | # override
29 | if isinstance(overrides, str):
30 | overrides = OmegaConf.load(overrides)
31 | elif isinstance(overrides, Dict):
32 | overrides = OmegaConf.create(overrides)
33 |
34 | check_config_mismatch(dict(overrides).keys(), dict(config).keys())
35 |
36 | return OmegaConf.merge(config, overrides)
37 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8l.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0l backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0l head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8m.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0m backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [768, True]]
20 | - [-1, 1, SPPF, [768, 5]] # 9
21 |
22 | # YOLOv8.0m head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [768]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8x.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0x head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8n.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0n head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8s.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0s backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0s head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/seg/yolov8l-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.00 # scales convolution channels
7 |
8 | # YOLOv8.0l backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0l head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/seg/yolov8m-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.67 # scales module repeats
6 | width_multiple: 0.75 # scales convolution channels
7 |
8 | # YOLOv8.0m backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [768, True]]
20 | - [-1, 1, SPPF, [768, 5]] # 9
21 |
22 | # YOLOv8.0m head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [768]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/seg/yolov8x-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, SPPF, [512, 5]] # 9
21 |
22 | # YOLOv8.0x head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [512]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/seg/yolov8n-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.25 # scales convolution channels
7 |
8 | # YOLOv8.0n backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0n head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/seg/yolov8s-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 0.33 # scales module repeats
6 | width_multiple: 0.50 # scales convolution channels
7 |
8 | # YOLOv8.0s backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [1024, True]]
20 | - [-1, 1, SPPF, [1024, 5]] # 9
21 |
22 | # YOLOv8.0s head
23 | head:
24 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
25 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
26 | - [-1, 3, C2f, [512]] # 13
27 |
28 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
29 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
30 | - [-1, 3, C2f, [256]] # 17 (P3/8-small)
31 |
32 | - [-1, 1, Conv, [256, 3, 2]]
33 | - [[-1, 12], 1, Concat, [1]] # cat head P4
34 | - [-1, 3, C2f, [512]] # 20 (P4/16-medium)
35 |
36 | - [-1, 1, Conv, [512, 3, 2]]
37 | - [[-1, 9], 1, Concat, [1]] # cat head P5
38 | - [-1, 3, C2f, [1024]] # 23 (P5/32-large)
39 |
40 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
41 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataset_wrappers.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import collections
4 | from copy import deepcopy
5 |
6 | from .augment import LetterBox
7 |
8 |
9 | class MixAndRectDataset:
10 | """A wrapper of multiple images mixed dataset.
11 |
12 | Args:
13 | dataset (:obj:`BaseDataset`): The dataset to be mixed.
14 | transforms (Sequence[dict]): config dict to be composed.
15 | """
16 |
17 | def __init__(self, dataset):
18 | self.dataset = dataset
19 | self.imgsz = dataset.imgsz
20 |
21 | def __len__(self):
22 | return len(self.dataset)
23 |
24 | def __getitem__(self, index):
25 | labels = deepcopy(self.dataset[index])
26 | for transform in self.dataset.transforms.tolist():
27 | # mosaic and mixup
28 | if hasattr(transform, "get_indexes"):
29 | indexes = transform.get_indexes(self.dataset)
30 | if not isinstance(indexes, collections.abc.Sequence):
31 | indexes = [indexes]
32 | mix_labels = [deepcopy(self.dataset[index]) for index in indexes]
33 | labels["mix_labels"] = mix_labels
34 | if self.dataset.rect and isinstance(transform, LetterBox):
35 | transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]]
36 | labels = transform(labels)
37 | if "mix_labels" in labels:
38 | labels.pop("mix_labels")
39 | return labels
40 |
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 15
4 | VisualStudioVersion = 15.0.28307.1169
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "yolov8", "yolov8\yolov8.vcxproj", "{4898203C-0C0B-4BC0-9597-6F143C06EDCA}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|x64 = Debug|x64
11 | Debug|x86 = Debug|x86
12 | Release|x64 = Release|x64
13 | Release|x86 = Release|x86
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Debug|x64.ActiveCfg = Debug|x64
17 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Debug|x64.Build.0 = Debug|x64
18 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Debug|x86.ActiveCfg = Debug|Win32
19 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Debug|x86.Build.0 = Debug|Win32
20 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Release|x64.ActiveCfg = Release|x64
21 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Release|x64.Build.0 = Release|x64
22 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Release|x86.ActiveCfg = Release|Win32
23 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}.Release|x86.Build.0 = Release|Win32
24 | EndGlobalSection
25 | GlobalSection(SolutionProperties) = preSolution
26 | HideSolutionNode = FALSE
27 | EndGlobalSection
28 | GlobalSection(ExtensibilityGlobals) = postSolution
29 | SolutionGuid = {3DF27D24-2492-49DB-B151-3AE684C89B43}
30 | EndGlobalSection
31 | EndGlobal
32 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/models/yolov8x6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.00 # scales module repeats
6 | width_multiple: 1.25 # scales convolution channels
7 |
8 | # YOLOv8.0x6 backbone
9 | backbone:
10 | # [from, repeats, module, args]
11 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
12 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
13 | - [-1, 3, C2f, [128, True]]
14 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
15 | - [-1, 6, C2f, [256, True]]
16 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
17 | - [-1, 6, C2f, [512, True]]
18 | - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32
19 | - [-1, 3, C2f, [512, True]]
20 | - [-1, 1, Conv, [512, 3, 2]] # 9-P6/64
21 | - [-1, 3, C2f, [512, True]]
22 | - [-1, 1, SPPF, [512, 5]] # 11
23 |
24 | # YOLOv8.0x6 head
25 | head:
26 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
27 | - [[-1, 8], 1, Concat, [1]] # cat backbone P5
28 | - [-1, 3, C2, [512, False]] # 14
29 |
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2, [512, False]] # 17
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2, [256, False]] # 20 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 17], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2, [512, False]] # 23 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 14], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2, [512, False]] # 26 (P5/32-large)
45 |
46 | - [-1, 1, Conv, [512, 3, 2]]
47 | - [[-1, 11], 1, Concat, [1]] # cat head P6
48 | - [-1, 3, C2, [512, False]] # 29 (P6/64-xlarge)
49 |
50 | - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6)
51 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/comet.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
4 |
5 | try:
6 | import comet_ml
7 |
8 | except (ModuleNotFoundError, ImportError):
9 | comet_ml = None
10 |
11 |
12 | def on_pretrain_routine_start(trainer):
13 | experiment = comet_ml.Experiment(project_name=trainer.args.project or "YOLOv8",)
14 | experiment.log_parameters(dict(trainer.args))
15 |
16 |
17 | def on_train_epoch_end(trainer):
18 | experiment = comet_ml.get_global_experiment()
19 | experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1)
20 | if trainer.epoch == 1:
21 | for f in trainer.save_dir.glob('train_batch*.jpg'):
22 | experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
23 |
24 |
25 | def on_fit_epoch_end(trainer):
26 | experiment = comet_ml.get_global_experiment()
27 | experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
28 | if trainer.epoch == 0:
29 | model_info = {
30 | "model/parameters": get_num_params(trainer.model),
31 | "model/GFLOPs": round(get_flops(trainer.model), 3),
32 | "model/speed(ms)": round(trainer.validator.speed[1], 3)}
33 | experiment.log_metrics(model_info, step=trainer.epoch + 1)
34 |
35 |
36 | def on_train_end(trainer):
37 | experiment = comet_ml.get_global_experiment()
38 | experiment.log_model("YOLOv8", file_or_folder=trainer.best, file_name="best.pt", overwrite=True)
39 |
40 |
41 | callbacks = {
42 | "on_pretrain_routine_start": on_pretrain_routine_start,
43 | "on_train_epoch_end": on_train_epoch_end,
44 | "on_fit_epoch_end": on_fit_epoch_end,
45 | "on_train_end": on_train_end} if comet_ml else {}
46 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/wb.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
4 |
5 | try:
6 | import wandb
7 |
8 | assert hasattr(wandb, '__version__')
9 | except (ImportError, AssertionError):
10 | wandb = None
11 |
12 |
13 | def on_pretrain_routine_start(trainer):
14 | wandb.init(project=trainer.args.project or "YOLOv8", name=trainer.args.name, config=dict(
15 | trainer.args)) if not wandb.run else wandb.run
16 |
17 |
18 | def on_fit_epoch_end(trainer):
19 | wandb.run.log(trainer.metrics, step=trainer.epoch + 1)
20 | if trainer.epoch == 0:
21 | model_info = {
22 | "model/parameters": get_num_params(trainer.model),
23 | "model/GFLOPs": round(get_flops(trainer.model), 3),
24 | "model/speed(ms)": round(trainer.validator.speed[1], 3)}
25 | wandb.run.log(model_info, step=trainer.epoch + 1)
26 |
27 |
28 | def on_train_epoch_end(trainer):
29 | wandb.run.log(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1)
30 | wandb.run.log(trainer.lr, step=trainer.epoch + 1)
31 | if trainer.epoch == 1:
32 | wandb.run.log({f.stem: wandb.Image(str(f))
33 | for f in trainer.save_dir.glob('train_batch*.jpg')},
34 | step=trainer.epoch + 1)
35 |
36 |
37 | def on_train_end(trainer):
38 | art = wandb.Artifact(type="model", name=f"run_{wandb.run.id}_model")
39 | if trainer.best.exists():
40 | art.add_file(trainer.best)
41 | wandb.run.log_artifact(art)
42 |
43 |
44 | callbacks = {
45 | "on_pretrain_routine_start": on_pretrain_routine_start,
46 | "on_train_epoch_end": on_train_epoch_end,
47 | "on_fit_epoch_end": on_fit_epoch_end,
48 | "on_train_end": on_train_end} if wandb else {}
49 |
--------------------------------------------------------------------------------
/ultralytics/yolo/cli.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import shutil
4 | from pathlib import Path
5 |
6 | import hydra
7 |
8 | from ultralytics import hub, yolo
9 | from ultralytics.yolo.utils import DEFAULT_CONFIG, LOGGER, colorstr
10 |
11 | DIR = Path(__file__).parent
12 |
13 |
14 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent.relative_to(DIR)), config_name=DEFAULT_CONFIG.name)
15 | def cli(cfg):
16 | """
17 | Run a specified task and mode with the given configuration.
18 |
19 | Args:
20 | cfg (DictConfig): Configuration for the task and mode.
21 | """
22 | # LOGGER.info(f"{colorstr(f'Ultralytics YOLO v{ultralytics.__version__}')}")
23 | task, mode = cfg.task.lower(), cfg.mode.lower()
24 |
25 | # Special case for initializing the configuration
26 | if task == "init":
27 | shutil.copy2(DEFAULT_CONFIG, Path.cwd())
28 | LOGGER.info(f"""
29 | {colorstr("YOLO:")} configuration saved to {Path.cwd() / DEFAULT_CONFIG.name}.
30 | To run experiments using custom configuration:
31 | yolo task='task' mode='mode' --config-name config_file.yaml
32 | """)
33 | return
34 |
35 | # Mapping from task to module
36 | task_module_map = {"detect": yolo.v8.detect, "segment": yolo.v8.segment, "classify": yolo.v8.classify}
37 | module = task_module_map.get(task)
38 | if not module:
39 | raise SyntaxError(f"task not recognized. Choices are {', '.join(task_module_map.keys())}")
40 |
41 | # Mapping from mode to function
42 | mode_func_map = {
43 | "train": module.train,
44 | "val": module.val,
45 | "predict": module.predict,
46 | "export": yolo.engine.exporter.export,
47 | "checks": hub.checks}
48 | func = mode_func_map.get(mode)
49 | if not func:
50 | raise SyntaxError(f"mode not recognized. Choices are {', '.join(mode_func_map.keys())}")
51 |
52 | func(cfg)
53 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/clearml.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
4 |
5 | try:
6 | import clearml
7 | from clearml import Task
8 |
9 | assert hasattr(clearml, '__version__')
10 | except (ImportError, AssertionError):
11 | clearml = None
12 |
13 |
14 | def _log_images(imgs_dict, group="", step=0):
15 | task = Task.current_task()
16 | if task:
17 | for k, v in imgs_dict.items():
18 | task.get_logger().report_image(group, k, step, v)
19 |
20 |
21 | def on_pretrain_routine_start(trainer):
22 | # TODO: reuse existing task
23 | task = Task.init(project_name=trainer.args.project or "YOLOv8",
24 | task_name=trainer.args.name,
25 | tags=['YOLOv8'],
26 | output_uri=True,
27 | reuse_last_task_id=False,
28 | auto_connect_frameworks={'pytorch': False})
29 | task.connect(dict(trainer.args), name='General')
30 |
31 |
32 | def on_train_epoch_end(trainer):
33 | if trainer.epoch == 1:
34 | _log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, "Mosaic", trainer.epoch)
35 |
36 |
37 | def on_fit_epoch_end(trainer):
38 | if trainer.epoch == 0:
39 | model_info = {
40 | "Parameters": get_num_params(trainer.model),
41 | "GFLOPs": round(get_flops(trainer.model), 3),
42 | "Inference speed (ms/img)": round(trainer.validator.speed[1], 3)}
43 | Task.current_task().connect(model_info, name='Model')
44 |
45 |
46 | def on_train_end(trainer):
47 | Task.current_task().update_output_model(model_path=str(trainer.best),
48 | model_name=trainer.args.name,
49 | auto_delete_file=False)
50 |
51 |
52 | callbacks = {
53 | "on_pretrain_routine_start": on_pretrain_routine_start,
54 | "on_train_epoch_end": on_train_epoch_end,
55 | "on_fit_epoch_end": on_fit_epoch_end,
56 | "on_train_end": on_train_end} if clearml else {}
57 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/val.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import hydra
4 |
5 | from ultralytics.yolo.data import build_classification_dataloader
6 | from ultralytics.yolo.engine.validator import BaseValidator
7 | from ultralytics.yolo.utils import DEFAULT_CONFIG
8 | from ultralytics.yolo.utils.metrics import ClassifyMetrics
9 |
10 |
11 | class ClassificationValidator(BaseValidator):
12 |
13 | def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
14 | super().__init__(dataloader, save_dir, pbar, logger, args)
15 | self.metrics = ClassifyMetrics()
16 |
17 | def get_desc(self):
18 | return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc')
19 |
20 | def init_metrics(self, model):
21 | self.pred = []
22 | self.targets = []
23 |
24 | def preprocess(self, batch):
25 | batch["img"] = batch["img"].to(self.device, non_blocking=True)
26 | batch["img"] = batch["img"].half() if self.args.half else batch["img"].float()
27 | batch["cls"] = batch["cls"].to(self.device)
28 | return batch
29 |
30 | def update_metrics(self, preds, batch):
31 | self.pred.append(preds.argsort(1, descending=True)[:, :5])
32 | self.targets.append(batch["cls"])
33 |
34 | def get_stats(self):
35 | self.metrics.process(self.targets, self.pred)
36 | return self.metrics.results_dict
37 |
38 | def get_dataloader(self, dataset_path, batch_size):
39 | return build_classification_dataloader(path=dataset_path,
40 | imgsz=self.args.imgsz,
41 | batch_size=batch_size,
42 | workers=self.args.workers)
43 |
44 | def print_results(self):
45 | pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format
46 | self.logger.info(pf % ("all", self.metrics.top1, self.metrics.top5))
47 |
48 |
49 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
50 | def val(cfg):
51 | cfg.data = cfg.data or "imagenette160"
52 | cfg.model = cfg.model or "resnet18"
53 | validator = ClassificationValidator(args=cfg)
54 | validator(model=cfg.model)
55 |
56 |
57 | if __name__ == "__main__":
58 | val()
59 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/loss.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 | from .metrics import bbox_iou
8 | from .tal import bbox2dist
9 |
10 |
11 | class VarifocalLoss(nn.Module):
12 | # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367
13 | def __init__(self):
14 | super().__init__()
15 |
16 | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
17 | weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
18 | with torch.cuda.amp.autocast(enabled=False):
19 | loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") *
20 | weight).sum()
21 | return loss
22 |
23 |
24 | class BboxLoss(nn.Module):
25 |
26 | def __init__(self, reg_max, use_dfl=False):
27 | super().__init__()
28 | self.reg_max = reg_max
29 | self.use_dfl = use_dfl
30 |
31 | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
32 | # IoU loss
33 | weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1)
34 | iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
35 | loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
36 |
37 | # DFL loss
38 | if self.use_dfl:
39 | target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max)
40 | loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
41 | loss_dfl = loss_dfl.sum() / target_scores_sum
42 | else:
43 | loss_dfl = torch.tensor(0.0).to(pred_dist.device)
44 |
45 | return loss_iou, loss_dfl
46 |
47 | @staticmethod
48 | def _df_loss(pred_dist, target):
49 | # Return sum of left and right DFL losses
50 | tl = target.long() # target left
51 | tr = tl + 1 # target right
52 | wl = tr - target # weight left
53 | wr = 1 - wl # weight right
54 | return (F.cross_entropy(pred_dist, tl.view(-1), reduction="none").view(tl.shape) * wl +
55 | F.cross_entropy(pred_dist, tr.view(-1), reduction="none").view(tl.shape) * wr).mean(-1, keepdim=True)
56 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/datasets/coco128.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
--------------------------------------------------------------------------------
/ultralytics/yolo/data/datasets/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: python train.py --data coco128.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/dist.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import shutil
5 | import socket
6 | import sys
7 | import tempfile
8 |
9 | from . import USER_CONFIG_DIR
10 |
11 |
12 | def find_free_network_port() -> int:
13 | # https://github.com/Lightning-AI/lightning/blob/master/src/lightning_lite/plugins/environments/lightning.py
14 | """Finds a free port on localhost.
15 |
16 | It is useful in single-node training when we don't want to connect to a real main node but have to set the
17 | `MASTER_PORT` environment variable.
18 | """
19 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
20 | s.bind(("", 0))
21 | port = s.getsockname()[1]
22 | s.close()
23 | return port
24 |
25 |
26 | def generate_ddp_file(trainer):
27 | import_path = '.'.join(str(trainer.__class__).split(".")[1:-1])
28 |
29 | if not trainer.resume:
30 | shutil.rmtree(trainer.save_dir) # remove the save_dir
31 | content = f'''config = {dict(trainer.args)} \nif __name__ == "__main__":
32 | from ultralytics.{import_path} import {trainer.__class__.__name__}
33 |
34 | trainer = {trainer.__class__.__name__}(config=config)
35 | trainer.train()'''
36 | (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True)
37 | with tempfile.NamedTemporaryFile(prefix="_temp_",
38 | suffix=f"{id(trainer)}.py",
39 | mode="w+",
40 | encoding='utf-8',
41 | dir=USER_CONFIG_DIR / 'DDP',
42 | delete=False) as file:
43 | file.write(content)
44 | return file.name
45 |
46 |
47 | def generate_ddp_command(world_size, trainer):
48 | import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218
49 | file_name = os.path.abspath(sys.argv[0])
50 | using_cli = not file_name.endswith(".py")
51 | if using_cli:
52 | file_name = generate_ddp_file(trainer)
53 | return [
54 | sys.executable, "-m", "torch.distributed.run", "--nproc_per_node", f"{world_size}", "--master_port",
55 | f"{find_free_network_port()}", file_name] + sys.argv[1:]
56 |
57 |
58 | def ddp_cleanup(command, trainer):
59 | # delete temp file if created
60 | tempfile_suffix = f"{id(trainer)}.py"
61 | if tempfile_suffix in "".join(command):
62 | for chunk in command:
63 | if tempfile_suffix in chunk:
64 | os.remove(chunk)
65 | break
66 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import hydra
4 | import torch
5 |
6 | from ultralytics.yolo.engine.predictor import BasePredictor
7 | from ultralytics.yolo.utils import DEFAULT_CONFIG
8 | from ultralytics.yolo.utils.checks import check_imgsz
9 | from ultralytics.yolo.utils.plotting import Annotator
10 |
11 |
12 | class ClassificationPredictor(BasePredictor):
13 |
14 | def get_annotator(self, img):
15 | return Annotator(img, example=str(self.model.names), pil=True)
16 |
17 | def preprocess(self, img):
18 | img = torch.Tensor(img).to(self.model.device)
19 | img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
20 | return img
21 |
22 | def write_results(self, idx, preds, batch):
23 | p, im, im0 = batch
24 | log_string = ""
25 | if len(im.shape) == 3:
26 | im = im[None] # expand for batch dim
27 | self.seen += 1
28 | im0 = im0.copy()
29 | if self.webcam: # batch_size >= 1
30 | log_string += f'{idx}: '
31 | frame = self.dataset.cound
32 | else:
33 | frame = getattr(self.dataset, 'frame', 0)
34 |
35 | self.data_path = p
36 | # save_path = str(self.save_dir / p.name) # im.jpg
37 | self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
38 | log_string += '%gx%g ' % im.shape[2:] # print string
39 | self.annotator = self.get_annotator(im0)
40 |
41 | prob = preds[idx]
42 | self.all_outputs.append(prob)
43 | # Print results
44 | top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
45 | log_string += f"{', '.join(f'{self.model.names[j]} {prob[j]:.2f}' for j in top5i)}, "
46 |
47 | # write
48 | text = '\n'.join(f'{prob[j]:.2f} {self.model.names[j]}' for j in top5i)
49 | if self.args.save or self.args.show: # Add bbox to image
50 | self.annotator.text((32, 32), text, txt_color=(255, 255, 255))
51 | if self.args.save_txt: # Write to file
52 | with open(f'{self.txt_path}.txt', 'a') as f:
53 | f.write(text + '\n')
54 |
55 | return log_string
56 |
57 |
58 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
59 | def predict(cfg):
60 | cfg.model = cfg.model or "squeezenet1_0"
61 | cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
62 | predictor = ClassificationPredictor(cfg)
63 | predictor()
64 |
65 |
66 | if __name__ == "__main__":
67 | predict()
68 |
--------------------------------------------------------------------------------
/ultralytics/hub/auth.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import requests
4 |
5 | from ultralytics.hub.utils import HUB_API_ROOT, request_with_credentials
6 | from ultralytics.yolo.utils import is_colab
7 |
8 | API_KEY_PATH = "https://hub.ultralytics.com/settings?tab=api+keys"
9 |
10 |
11 | class Auth:
12 | id_token = api_key = model_key = False
13 |
14 | def __init__(self, api_key=None):
15 | self.api_key = self._clean_api_key(api_key)
16 | self.authenticate() if self.api_key else self.auth_with_cookies()
17 |
18 | @staticmethod
19 | def _clean_api_key(key: str) -> str:
20 | """Strip model from key if present"""
21 | separator = "_"
22 | return key.split(separator)[0] if separator in key else key
23 |
24 | def authenticate(self) -> bool:
25 | """Attempt to authenticate with server"""
26 | try:
27 | header = self.get_auth_header()
28 | if header:
29 | r = requests.post(f"{HUB_API_ROOT}/v1/auth", headers=header)
30 | if not r.json().get('success', False):
31 | raise ConnectionError("Unable to authenticate.")
32 | return True
33 | raise ConnectionError("User has not authenticated locally.")
34 | except ConnectionError:
35 | self.id_token = self.api_key = False # reset invalid
36 | return False
37 |
38 | def auth_with_cookies(self) -> bool:
39 | """
40 | Attempt to fetch authentication via cookies and set id_token.
41 | User must be logged in to HUB and running in a supported browser.
42 | """
43 | if not is_colab():
44 | return False # Currently only works with Colab
45 | try:
46 | authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")
47 | if authn.get("success", False):
48 | self.id_token = authn.get("data", {}).get("idToken", None)
49 | self.authenticate()
50 | return True
51 | raise ConnectionError("Unable to fetch browser authentication details.")
52 | except ConnectionError:
53 | self.id_token = False # reset invalid
54 | return False
55 |
56 | def get_auth_header(self):
57 | if self.id_token:
58 | return {"authorization": f"Bearer {self.id_token}"}
59 | elif self.api_key:
60 | return {"x-api-key": self.api_key}
61 | else:
62 | return None
63 |
64 | def get_state(self) -> bool:
65 | """Get the authentication state"""
66 | return self.id_token or self.api_key
67 |
68 | def set_api_key(self, key: str):
69 | """Get the authentication state"""
70 | self.api_key = key
71 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/hub.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import json
4 | from time import time
5 |
6 | import torch
7 |
8 | from ultralytics.hub.utils import PREFIX, sync_analytics
9 | from ultralytics.yolo.utils import LOGGER
10 |
11 |
12 | def on_pretrain_routine_end(trainer):
13 | session = getattr(trainer, 'hub_session', None)
14 | if session:
15 | # Start timer for upload rate limit
16 | LOGGER.info(f"{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀")
17 | session.t = {'metrics': time(), 'ckpt': time()} # start timer on self.rate_limit
18 |
19 |
20 | def on_fit_epoch_end(trainer):
21 | session = getattr(trainer, 'hub_session', None)
22 | if session:
23 | session.metrics_queue[trainer.epoch] = json.dumps(trainer.metrics) # json string
24 | if time() - session.t['metrics'] > session.rate_limits['metrics']:
25 | session.upload_metrics()
26 | session.t['metrics'] = time() # reset timer
27 | session.metrics_queue = {} # reset queue
28 |
29 |
30 | def on_model_save(trainer):
31 | session = getattr(trainer, 'hub_session', None)
32 | if session:
33 | # Upload checkpoints with rate limiting
34 | is_best = trainer.best_fitness == trainer.fitness
35 | if time() - session.t['ckpt'] > session.rate_limits['ckpt']:
36 | LOGGER.info(f"{PREFIX}Uploading checkpoint {session.model_id}")
37 | session.upload_model(trainer.epoch, trainer.last, is_best)
38 | session.t['ckpt'] = time() # reset timer
39 |
40 |
41 | def on_train_end(trainer):
42 | session = getattr(trainer, 'hub_session', None)
43 | if session:
44 | # Upload final model and metrics with exponential standoff
45 | LOGGER.info(f"{PREFIX}Training completed successfully ✅\n"
46 | f"{PREFIX}Uploading final {session.model_id}")
47 | session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics['metrics/mAP50-95(B)'], final=True)
48 | session.alive = False # stop heartbeats
49 | LOGGER.info(f"{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀")
50 |
51 |
52 | def on_train_start(trainer):
53 | sync_analytics(trainer.args)
54 |
55 |
56 | def on_val_start(validator):
57 | sync_analytics(validator.args)
58 |
59 |
60 | def on_predict_start(predictor):
61 | sync_analytics(predictor.args)
62 |
63 |
64 | def on_export_start(exporter):
65 | sync_analytics(exporter.args)
66 |
67 |
68 | callbacks = {
69 | "on_pretrain_routine_end": on_pretrain_routine_end,
70 | "on_fit_epoch_end": on_fit_epoch_end,
71 | "on_model_save": on_model_save,
72 | "on_train_end": on_train_end,
73 | "on_train_start": on_train_start,
74 | "on_val_start": on_val_start,
75 | "on_predict_start": on_predict_start,
76 | "on_export_start": on_export_start}
77 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/datasets/coco.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: python train.py --data coco.yaml
4 | # parent
5 | # ├── yolov5
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: |
102 | from utils.general import download, Path
103 | # Download labels
104 | segments = True # segment or box labels
105 | dir = Path(yaml['path']) # dataset root dir
106 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
107 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
108 | download(urls, dir=dir.parent)
109 | # Download data
110 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
111 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
112 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
113 | download(urls, dir=dir / 'images', threads=3)
--------------------------------------------------------------------------------
/tensorrt/yolov8_add_nms.py:
--------------------------------------------------------------------------------
1 | '''
2 | xujing
3 |
4 | 把NMSPlugin对应的结点添加到 ONNX
5 |
6 | '''
7 | import onnx_graphsurgeon as gs
8 | import argparse
9 | import onnx
10 | import numpy as np
11 |
12 | def create_and_add_plugin_node(graph, topK, keepTopK):
13 |
14 | batch_size = graph.inputs[0].shape[0]
15 | print("The batch size is: ", batch_size)
16 | input_h = graph.inputs[0].shape[2]
17 | input_w = graph.inputs[0].shape[3]
18 |
19 | tensors = graph.tensors()
20 | boxes_tensor = tensors["boxes"]
21 | confs_tensor = tensors["label_conf"]
22 |
23 | num_detections = gs.Variable(name="num_detections").to_variable(dtype=np.int32, shape=[batch_size, 1])
24 | nmsed_boxes = gs.Variable(name="nmsed_boxes").to_variable(dtype=np.float32, shape=[batch_size, keepTopK, 4])
25 | nmsed_scores = gs.Variable(name="nmsed_scores").to_variable(dtype=np.float32, shape=[batch_size, keepTopK])
26 | nmsed_classes = gs.Variable(name="nmsed_classes").to_variable(dtype=np.float32, shape=[batch_size, keepTopK])
27 |
28 | new_outputs = [num_detections, nmsed_boxes, nmsed_scores, nmsed_classes]
29 |
30 | mns_node = gs.Node(
31 | op="BatchedNMS_TRT",
32 | attrs=create_attrs(topK, keepTopK),
33 | inputs=[boxes_tensor, confs_tensor],
34 | outputs=new_outputs)
35 |
36 | graph.nodes.append(mns_node)
37 | graph.outputs = new_outputs
38 |
39 | return graph.cleanup().toposort()
40 |
41 |
42 | def create_attrs(topK, keepTopK):
43 |
44 | # num_anchors = 3
45 |
46 | # h1 = input_h // 8
47 | # h2 = input_h // 16
48 | # h3 = input_h // 32
49 |
50 | # w1 = input_w // 8
51 | # w2 = input_w // 16
52 | # w3 = input_w // 32
53 |
54 | # num_boxes = num_anchors * (h1 * w1 + h2 * w2 + h3 * w3)
55 |
56 | attrs = {}
57 |
58 | attrs["shareLocation"] = 1
59 | attrs["backgroundLabelId"] = -1
60 | attrs["numClasses"] = 4
61 | attrs["topK"] = topK # number of bounding boxes for nms eg 1000s
62 | attrs["keepTopK"] = keepTopK # bounding boxes to be kept per image eg 20
63 | attrs["scoreThreshold"] = 0.20 #0.70
64 | attrs["iouThreshold"] = 0.45
65 | attrs["isNormalized"] = 0
66 | attrs["clipBoxes"] = 0
67 | attrs['scoreBits'] = 16
68 |
69 | # 001 is the default plugin version the parser will search for, and therefore can be omitted,
70 | # but we include it here for illustrative purposes.
71 | attrs["plugin_version"] = "1"
72 |
73 | return attrs
74 |
75 |
76 | def main():
77 | parser = argparse.ArgumentParser(description="Add batchedNMSPlugin")
78 | parser.add_argument("-f", "--model", help="Path to the ONNX model generated by export_model.py", default="./last_1.onnx")
79 | parser.add_argument("-t", "--topK", help="number of bounding boxes for nms", default=1000)
80 | parser.add_argument("-k", "--keepTopK", help="bounding boxes to be kept per image", default=20)
81 |
82 | args, _ = parser.parse_known_args()
83 |
84 | graph = gs.import_onnx(onnx.load(args.model))
85 |
86 | graph = create_and_add_plugin_node(graph, int(args.topK), int(args.keepTopK))
87 |
88 | onnx.save(gs.export_onnx(graph), args.model[:-5] + "_nms.onnx")
89 |
90 |
91 | if __name__ == '__main__':
92 | main()
93 |
94 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Auto-batch utils
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 |
11 | from ultralytics.yolo.utils import LOGGER, colorstr
12 | from ultralytics.yolo.utils.torch_utils import profile
13 |
14 |
15 | def check_train_batch_size(model, imgsz=640, amp=True):
16 | # Check YOLOv5 training batch size
17 | with torch.cuda.amp.autocast(amp):
18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
19 |
20 |
21 | def autobatch(model, imgsz=640, fraction=0.7, batch_size=16):
22 | # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
23 | # Usage:
24 | # import torch
25 | # from utils.autobatch import autobatch
26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
27 | # print(autobatch(model))
28 |
29 | # Check device
30 | prefix = colorstr('AutoBatch: ')
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32 | device = next(model.parameters()).device # get model device
33 | if device.type == 'cpu':
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35 | return batch_size
36 | if torch.backends.cudnn.benchmark:
37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
38 | return batch_size
39 |
40 | # Inspect CUDA memory
41 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
42 | d = str(device).upper() # 'CUDA:0'
43 | properties = torch.cuda.get_device_properties(device) # device properties
44 | t = properties.total_memory / gb # GiB total
45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved
46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated
47 | f = t - (r + a) # GiB free
48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
49 |
50 | # Profile batch sizes
51 | batch_sizes = [1, 2, 4, 8, 16]
52 | try:
53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
54 | results = profile(img, model, n=3, device=device)
55 | except Exception as e:
56 | LOGGER.warning(f'{prefix}{e}')
57 |
58 | # Fit a solution
59 | y = [x[2] for x in results if x] # memory [2]
60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
62 | if None in results: # some sizes failed
63 | i = results.index(None) # first fail index
64 | if b >= batch_sizes[i]: # y intercept above failure point
65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
66 | if b < 1 or b > 1024: # b outside of safe range
67 | b = batch_size
68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
69 |
70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
72 | return b
73 |
--------------------------------------------------------------------------------
/ultralytics/yolo/configs/hydra_patch.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import sys
4 | from difflib import get_close_matches
5 | from textwrap import dedent
6 |
7 | import hydra
8 | from hydra.errors import ConfigCompositionException
9 | from omegaconf import OmegaConf, open_dict # noqa
10 | from omegaconf.errors import ConfigAttributeError, ConfigKeyError, OmegaConfBaseException # noqa
11 |
12 | from ultralytics.yolo.utils import LOGGER, colorstr
13 |
14 |
15 | def override_config(overrides, cfg):
16 | override_keys = [override.key_or_group for override in overrides]
17 | check_config_mismatch(override_keys, cfg.keys())
18 | for override in overrides:
19 | if override.package is not None:
20 | raise ConfigCompositionException(f"Override {override.input_line} looks like a config group"
21 | f" override, but config group '{override.key_or_group}' does not exist.")
22 |
23 | key = override.key_or_group
24 | value = override.value()
25 | try:
26 | if override.is_delete():
27 | config_val = OmegaConf.select(cfg, key, throw_on_missing=False)
28 | if config_val is None:
29 | raise ConfigCompositionException(f"Could not delete from config. '{override.key_or_group}'"
30 | " does not exist.")
31 | elif value is not None and value != config_val:
32 | raise ConfigCompositionException("Could not delete from config. The value of"
33 | f" '{override.key_or_group}' is {config_val} and not"
34 | f" {value}.")
35 |
36 | last_dot = key.rfind(".")
37 | with open_dict(cfg):
38 | if last_dot == -1:
39 | del cfg[key]
40 | else:
41 | node = OmegaConf.select(cfg, key[:last_dot])
42 | del node[key[last_dot + 1:]]
43 |
44 | elif override.is_add():
45 | if OmegaConf.select(cfg, key, throw_on_missing=False) is None or isinstance(value, (dict, list)):
46 | OmegaConf.update(cfg, key, value, merge=True, force_add=True)
47 | else:
48 | assert override.input_line is not None
49 | raise ConfigCompositionException(
50 | dedent(f"""\
51 | Could not append to config. An item is already at '{override.key_or_group}'.
52 | Either remove + prefix: '{override.input_line[1:]}'
53 | Or add a second + to add or override '{override.key_or_group}': '+{override.input_line}'
54 | """))
55 | elif override.is_force_add():
56 | OmegaConf.update(cfg, key, value, merge=True, force_add=True)
57 | else:
58 | try:
59 | OmegaConf.update(cfg, key, value, merge=True)
60 | except (ConfigAttributeError, ConfigKeyError) as ex:
61 | raise ConfigCompositionException(f"Could not override '{override.key_or_group}'."
62 | f"\nTo append to your config use +{override.input_line}") from ex
63 | except OmegaConfBaseException as ex:
64 | raise ConfigCompositionException(f"Error merging override {override.input_line}").with_traceback(
65 | sys.exc_info()[2]) from ex
66 |
67 |
68 | def check_config_mismatch(overrides, cfg):
69 | mismatched = [option for option in overrides if option not in cfg and 'hydra.' not in option]
70 |
71 | for option in mismatched:
72 | LOGGER.info(f"{colorstr(option)} is not a valid key. Similar keys: {get_close_matches(option, cfg, 3, 0.6)}")
73 | if mismatched:
74 | exit()
75 |
76 |
77 | hydra._internal.config_loader_impl.ConfigLoaderImpl._apply_overrides_to_config = override_config
78 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/base.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Base callbacks
4 | """
5 |
6 |
7 | # Trainer callbacks ----------------------------------------------------------------------------------------------------
8 | def on_pretrain_routine_start(trainer):
9 | pass
10 |
11 |
12 | def on_pretrain_routine_end(trainer):
13 | pass
14 |
15 |
16 | def on_train_start(trainer):
17 | pass
18 |
19 |
20 | def on_train_epoch_start(trainer):
21 | pass
22 |
23 |
24 | def on_train_batch_start(trainer):
25 | pass
26 |
27 |
28 | def optimizer_step(trainer):
29 | pass
30 |
31 |
32 | def on_before_zero_grad(trainer):
33 | pass
34 |
35 |
36 | def on_train_batch_end(trainer):
37 | pass
38 |
39 |
40 | def on_train_epoch_end(trainer):
41 | pass
42 |
43 |
44 | def on_fit_epoch_end(trainer):
45 | pass
46 |
47 |
48 | def on_model_save(trainer):
49 | pass
50 |
51 |
52 | def on_train_end(trainer):
53 | pass
54 |
55 |
56 | def on_params_update(trainer):
57 | pass
58 |
59 |
60 | def teardown(trainer):
61 | pass
62 |
63 |
64 | # Validator callbacks --------------------------------------------------------------------------------------------------
65 | def on_val_start(validator):
66 | pass
67 |
68 |
69 | def on_val_batch_start(validator):
70 | pass
71 |
72 |
73 | def on_val_batch_end(validator):
74 | pass
75 |
76 |
77 | def on_val_end(validator):
78 | pass
79 |
80 |
81 | # Predictor callbacks --------------------------------------------------------------------------------------------------
82 | def on_predict_start(predictor):
83 | pass
84 |
85 |
86 | def on_predict_batch_start(predictor):
87 | pass
88 |
89 |
90 | def on_predict_batch_end(predictor):
91 | pass
92 |
93 |
94 | def on_predict_end(predictor):
95 | pass
96 |
97 |
98 | # Exporter callbacks ---------------------------------------------------------------------------------------------------
99 | def on_export_start(exporter):
100 | pass
101 |
102 |
103 | def on_export_end(exporter):
104 | pass
105 |
106 |
107 | default_callbacks = {
108 | # Run in trainer
109 | 'on_pretrain_routine_start': on_pretrain_routine_start,
110 | 'on_pretrain_routine_end': on_pretrain_routine_end,
111 | 'on_train_start': on_train_start,
112 | 'on_train_epoch_start': on_train_epoch_start,
113 | 'on_train_batch_start': on_train_batch_start,
114 | 'optimizer_step': optimizer_step,
115 | 'on_before_zero_grad': on_before_zero_grad,
116 | 'on_train_batch_end': on_train_batch_end,
117 | 'on_train_epoch_end': on_train_epoch_end,
118 | 'on_fit_epoch_end': on_fit_epoch_end, # fit = train + val
119 | 'on_model_save': on_model_save,
120 | 'on_train_end': on_train_end,
121 | 'on_params_update': on_params_update,
122 | 'teardown': teardown,
123 |
124 | # Run in validator
125 | 'on_val_start': on_val_start,
126 | 'on_val_batch_start': on_val_batch_start,
127 | 'on_val_batch_end': on_val_batch_end,
128 | 'on_val_end': on_val_end,
129 |
130 | # Run in predictor
131 | 'on_predict_start': on_predict_start,
132 | 'on_predict_batch_start': on_predict_batch_start,
133 | 'on_predict_batch_end': on_predict_batch_end,
134 | 'on_predict_end': on_predict_end,
135 |
136 | # Run in exporter
137 | 'on_export_start': on_export_start,
138 | 'on_export_end': on_export_end}
139 |
140 |
141 | def add_integration_callbacks(instance):
142 | from .clearml import callbacks as clearml_callbacks
143 | from .comet import callbacks as comet_callbacks
144 | from .hub import callbacks as hub_callbacks
145 | from .tensorboard import callbacks as tb_callbacks
146 | from .wb import callbacks as wb_callbacks
147 |
148 | for x in clearml_callbacks, comet_callbacks, hub_callbacks, tb_callbacks, wb_callbacks:
149 | for k, v in x.items():
150 | instance.callbacks[k].append(v) # callback[name].append(func)
151 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/files.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import contextlib
4 | import glob
5 | import os
6 | import urllib
7 | from datetime import datetime
8 | from pathlib import Path
9 | from zipfile import ZipFile
10 |
11 |
12 | class WorkingDirectory(contextlib.ContextDecorator):
13 | # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
14 | def __init__(self, new_dir):
15 | self.dir = new_dir # new dir
16 | self.cwd = Path.cwd().resolve() # current dir
17 |
18 | def __enter__(self):
19 | os.chdir(self.dir)
20 |
21 | def __exit__(self, exc_type, exc_val, exc_tb):
22 | os.chdir(self.cwd)
23 |
24 |
25 | def increment_path(path, exist_ok=False, sep='', mkdir=False):
26 | """
27 | Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
28 |
29 | If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to
30 | the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the
31 | number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a
32 | directory if it does not already exist.
33 |
34 | Args:
35 | path (str or pathlib.Path): Path to increment.
36 | exist_ok (bool, optional): If True, the path will not be incremented and will be returned as-is. Defaults to False.
37 | sep (str, optional): Separator to use between the path and the incrementation number. Defaults to an empty string.
38 | mkdir (bool, optional): If True, the path will be created as a directory if it does not exist. Defaults to False.
39 |
40 | Returns:
41 | pathlib.Path: Incremented path.
42 | """
43 | path = Path(path) # os-agnostic
44 | if path.exists() and not exist_ok:
45 | path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
46 |
47 | # Method 1
48 | for n in range(2, 9999):
49 | p = f'{path}{sep}{n}{suffix}' # increment path
50 | if not os.path.exists(p): #
51 | break
52 | path = Path(p)
53 |
54 | if mkdir:
55 | path.mkdir(parents=True, exist_ok=True) # make directory
56 |
57 | return path
58 |
59 |
60 | def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):
61 | # Unzip a *.zip file to path/, excluding files containing strings in exclude list
62 | if path is None:
63 | path = Path(file).parent # default path
64 | with ZipFile(file) as zipObj:
65 | for f in zipObj.namelist(): # list all archived filenames in the zip
66 | if all(x not in f for x in exclude):
67 | zipObj.extract(f, path=path)
68 |
69 |
70 | def file_age(path=__file__):
71 | # Return days since last file update
72 | dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
73 | return dt.days # + dt.seconds / 86400 # fractional days
74 |
75 |
76 | def file_date(path=__file__):
77 | # Return human-readable file modification date, i.e. '2021-3-26'
78 | t = datetime.fromtimestamp(Path(path).stat().st_mtime)
79 | return f'{t.year}-{t.month}-{t.day}'
80 |
81 |
82 | def file_size(path):
83 | # Return file/dir size (MB)
84 | mb = 1 << 20 # bytes to MiB (1024 ** 2)
85 | path = Path(path)
86 | if path.is_file():
87 | return path.stat().st_size / mb
88 | elif path.is_dir():
89 | return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
90 | else:
91 | return 0.0
92 |
93 |
94 | def url2file(url):
95 | # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
96 | url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
97 | return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
98 |
99 |
100 | def get_latest_run(search_dir='.'):
101 | # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
102 | last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
103 | return max(last_list, key=os.path.getctime) if last_list else ''
104 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import hydra
4 | import torch
5 |
6 | from ultralytics.yolo.engine.predictor import BasePredictor
7 | from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
8 | from ultralytics.yolo.utils.checks import check_imgsz
9 | from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
10 |
11 |
12 | class DetectionPredictor(BasePredictor):
13 |
14 | def get_annotator(self, img):
15 | return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names))
16 |
17 | def preprocess(self, img):
18 | img = torch.from_numpy(img).to(self.model.device)
19 | img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
20 | img /= 255 # 0 - 255 to 0.0 - 1.0
21 | return img
22 |
23 | def postprocess(self, preds, img, orig_img):
24 | preds = ops.non_max_suppression(preds,
25 | self.args.conf,
26 | self.args.iou,
27 | agnostic=self.args.agnostic_nms,
28 | max_det=self.args.max_det)
29 |
30 | for i, pred in enumerate(preds):
31 | shape = orig_img[i].shape if self.webcam else orig_img.shape
32 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
33 |
34 | return preds
35 |
36 | def write_results(self, idx, preds, batch):
37 | p, im, im0 = batch
38 | log_string = ""
39 | if len(im.shape) == 3:
40 | im = im[None] # expand for batch dim
41 | self.seen += 1
42 | im0 = im0.copy()
43 | if self.webcam: # batch_size >= 1
44 | log_string += f'{idx}: '
45 | frame = self.dataset.count
46 | else:
47 | frame = getattr(self.dataset, 'frame', 0)
48 |
49 | self.data_path = p
50 | # save_path = str(self.save_dir / p.name) # im.jpg
51 | self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
52 | log_string += '%gx%g ' % im.shape[2:] # print string
53 | self.annotator = self.get_annotator(im0)
54 |
55 | det = preds[idx]
56 | self.all_outputs.append(det)
57 | if len(det) == 0:
58 | return log_string
59 | for c in det[:, 5].unique():
60 | n = (det[:, 5] == c).sum() # detections per class
61 | log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
62 | # write
63 | gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
64 | for *xyxy, conf, cls in reversed(det):
65 | if self.args.save_txt: # Write to file
66 | xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
67 | line = (cls, *xywh, conf) if self.args.save_conf else (cls, *xywh) # label format
68 | with open(f'{self.txt_path}.txt', 'a') as f:
69 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
70 |
71 | if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
72 | c = int(cls) # integer class
73 | label = None if self.args.hide_labels else (
74 | self.model.names[c] if self.args.hide_conf else f'{self.model.names[c]} {conf:.2f}')
75 | self.annotator.box_label(xyxy, label, color=colors(c, True))
76 | if self.args.save_crop:
77 | imc = im0.copy()
78 | save_one_box(xyxy,
79 | imc,
80 | file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
81 | BGR=True)
82 |
83 | return log_string
84 |
85 |
86 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
87 | def predict(cfg):
88 | cfg.model = cfg.model or "yolov8n.pt"
89 | cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
90 | cfg.source = cfg.source or ROOT / "assets"
91 | predictor = DetectionPredictor(cfg)
92 | predictor()
93 |
94 |
95 | if __name__ == "__main__":
96 | predict()
97 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import hydra
4 | import torch
5 |
6 | from ultralytics.yolo.utils import DEFAULT_CONFIG, ops
7 | from ultralytics.yolo.utils.checks import check_imgsz
8 | from ultralytics.yolo.utils.plotting import colors, save_one_box
9 |
10 | from ..detect.predict import DetectionPredictor
11 |
12 |
13 | class SegmentationPredictor(DetectionPredictor):
14 |
15 | def postprocess(self, preds, img, orig_img):
16 | masks = []
17 | # TODO: filter by classes
18 | p = ops.non_max_suppression(preds[0],
19 | self.args.conf,
20 | self.args.iou,
21 | agnostic=self.args.agnostic_nms,
22 | max_det=self.args.max_det,
23 | nm=32)
24 | proto = preds[1][-1]
25 | for i, pred in enumerate(p):
26 | shape = orig_img[i].shape if self.webcam else orig_img.shape
27 | if not len(pred):
28 | continue
29 | if self.args.retina_masks:
30 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
31 | masks.append(ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], shape[:2])) # HWC
32 | else:
33 | masks.append(ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True)) # HWC
34 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
35 |
36 | return (p, masks)
37 |
38 | def write_results(self, idx, preds, batch):
39 | p, im, im0 = batch
40 | log_string = ""
41 | if len(im.shape) == 3:
42 | im = im[None] # expand for batch dim
43 | self.seen += 1
44 | if self.webcam: # batch_size >= 1
45 | log_string += f'{idx}: '
46 | frame = self.dataset.count
47 | else:
48 | frame = getattr(self.dataset, 'frame', 0)
49 |
50 | self.data_path = p
51 | self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
52 | log_string += '%gx%g ' % im.shape[2:] # print string
53 | self.annotator = self.get_annotator(im0)
54 |
55 | preds, masks = preds
56 | det = preds[idx]
57 | if len(det) == 0:
58 | return log_string
59 | # Segments
60 | mask = masks[idx]
61 | if self.args.save_txt:
62 | segments = [
63 | ops.scale_segments(im0.shape if self.args.retina_masks else im.shape[2:], x, im0.shape, normalize=True)
64 | for x in reversed(ops.masks2segments(mask))]
65 |
66 | # Print results
67 | for c in det[:, 5].unique():
68 | n = (det[:, 5] == c).sum() # detections per class
69 | log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, " # add to string
70 |
71 | # Mask plotting
72 | self.annotator.masks(
73 | mask,
74 | colors=[colors(x, True) for x in det[:, 5]],
75 | im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(self.device).permute(2, 0, 1).flip(0).contiguous() /
76 | 255 if self.args.retina_masks else im[idx])
77 |
78 | det = reversed(det[:, :6])
79 | self.all_outputs.append([det, mask])
80 |
81 | # Write results
82 | for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
83 | if self.args.save_txt: # Write to file
84 | seg = segments[j].reshape(-1) # (n,2) to (n*2)
85 | line = (cls, *seg, conf) if self.args.save_conf else (cls, *seg) # label format
86 | with open(f'{self.txt_path}.txt', 'a') as f:
87 | f.write(('%g ' * len(line)).rstrip() % line + '\n')
88 |
89 | if self.args.save or self.args.save_crop or self.args.show:
90 | c = int(cls) # integer class
91 | label = None if self.args.hide_labels else (
92 | self.model.names[c] if self.args.hide_conf else f'{self.model.names[c]} {conf:.2f}')
93 | self.annotator.box_label(xyxy, label, color=colors(c, True))
94 | # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
95 | if self.args.save_crop:
96 | imc = im0.copy()
97 | save_one_box(xyxy, imc, file=self.save_dir / 'crops' / self.model.names[c] / f'{p.stem}.jpg', BGR=True)
98 |
99 | return log_string
100 |
101 |
102 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
103 | def predict(cfg):
104 | cfg.model = cfg.model or "yolov8n-seg.pt"
105 | cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
106 | predictor = SegmentationPredictor(cfg)
107 | predictor()
108 |
109 |
110 | if __name__ == "__main__":
111 | predict()
112 |
--------------------------------------------------------------------------------
/ultralytics/hub/session.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import signal
4 | import sys
5 | from pathlib import Path
6 | from time import sleep
7 |
8 | import requests
9 |
10 | from ultralytics import __version__
11 | from ultralytics.hub.utils import HUB_API_ROOT, check_dataset_disk_space, smart_request
12 | from ultralytics.yolo.utils import LOGGER, is_colab, threaded
13 |
14 | AGENT_NAME = f'python-{__version__}-colab' if is_colab() else f'python-{__version__}-local'
15 |
16 | session = None
17 |
18 |
19 | def signal_handler(signum, frame):
20 | """ Confirm exit """
21 | global hub_logger
22 | LOGGER.info(f'Signal received. {signum} {frame}')
23 | if isinstance(session, HubTrainingSession):
24 | hub_logger.alive = False
25 | del hub_logger
26 | sys.exit(signum)
27 |
28 |
29 | signal.signal(signal.SIGTERM, signal_handler)
30 | signal.signal(signal.SIGINT, signal_handler)
31 |
32 |
33 | class HubTrainingSession:
34 |
35 | def __init__(self, model_id, auth):
36 | self.agent_id = None # identifies which instance is communicating with server
37 | self.model_id = model_id
38 | self.api_url = f'{HUB_API_ROOT}/v1/models/{model_id}'
39 | self.auth_header = auth.get_auth_header()
40 | self.rate_limits = {'metrics': 3.0, 'ckpt': 900.0, 'heartbeat': 300.0} # rate limits (seconds)
41 | self.t = {} # rate limit timers (seconds)
42 | self.metrics_queue = {} # metrics queue
43 | self.alive = True # for heartbeats
44 | self.model = self._get_model()
45 | self._heartbeats() # start heartbeats
46 |
47 | def __del__(self):
48 | # Class destructor
49 | self.alive = False
50 |
51 | def upload_metrics(self):
52 | payload = {"metrics": self.metrics_queue.copy(), "type": "metrics"}
53 | smart_request(f'{self.api_url}', json=payload, headers=self.auth_header, code=2)
54 |
55 | def upload_model(self, epoch, weights, is_best=False, map=0.0, final=False):
56 | # Upload a model to HUB
57 | file = None
58 | if Path(weights).is_file():
59 | with open(weights, "rb") as f:
60 | file = f.read()
61 | if final:
62 | smart_request(f'{self.api_url}/upload',
63 | data={
64 | "epoch": epoch,
65 | "type": "final",
66 | "map": map},
67 | files={"best.pt": file},
68 | headers=self.auth_header,
69 | retry=10,
70 | timeout=3600,
71 | code=4)
72 | else:
73 | smart_request(f'{self.api_url}/upload',
74 | data={
75 | "epoch": epoch,
76 | "type": "epoch",
77 | "isBest": bool(is_best)},
78 | headers=self.auth_header,
79 | files={"last.pt": file},
80 | code=3)
81 |
82 | def _get_model(self):
83 | # Returns model from database by id
84 | api_url = f"{HUB_API_ROOT}/v1/models/{self.model_id}"
85 | headers = self.auth_header
86 |
87 | try:
88 | r = smart_request(api_url, method="get", headers=headers, thread=False, code=0)
89 | data = r.json().get("data", None)
90 | if not data:
91 | return
92 | assert data['data'], 'ERROR: Dataset may still be processing. Please wait a minute and try again.' # RF fix
93 | self.model_id = data["id"]
94 |
95 | return data
96 | except requests.exceptions.ConnectionError as e:
97 | raise ConnectionRefusedError('ERROR: The HUB server is not online. Please try again later.') from e
98 |
99 | def check_disk_space(self):
100 | if not check_dataset_disk_space(self.model['data']):
101 | raise MemoryError("Not enough disk space")
102 |
103 | # COMMENT: Should not be needed as HUB is now considered an integration and is in integrations_callbacks
104 | # import ultralytics.yolo.utils.callbacks.hub as hub_callbacks
105 | # @staticmethod
106 | # def register_callbacks(trainer):
107 | # for k, v in hub_callbacks.callbacks.items():
108 | # trainer.add_callback(k, v)
109 |
110 | @threaded
111 | def _heartbeats(self):
112 | while self.alive:
113 | r = smart_request(f'{HUB_API_ROOT}/v1/agent/heartbeat/models/{self.model_id}',
114 | json={
115 | "agent": AGENT_NAME,
116 | "agentId": self.agent_id},
117 | headers=self.auth_header,
118 | retry=0,
119 | code=5,
120 | thread=False)
121 | self.agent_id = r.json().get('data', {}).get('agentId', None)
122 | sleep(self.rate_limits['heartbeat'])
123 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/build.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import random
5 |
6 | import numpy as np
7 | import torch
8 | from torch.utils.data import DataLoader, dataloader, distributed
9 |
10 | from ..utils import LOGGER, colorstr
11 | from ..utils.torch_utils import torch_distributed_zero_first
12 | from .dataset import ClassificationDataset, YOLODataset
13 | from .utils import PIN_MEMORY, RANK
14 |
15 |
16 | class InfiniteDataLoader(dataloader.DataLoader):
17 | """Dataloader that reuses workers
18 |
19 | Uses same syntax as vanilla DataLoader
20 | """
21 |
22 | def __init__(self, *args, **kwargs):
23 | super().__init__(*args, **kwargs)
24 | object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
25 | self.iterator = super().__iter__()
26 |
27 | def __len__(self):
28 | return len(self.batch_sampler.sampler)
29 |
30 | def __iter__(self):
31 | for _ in range(len(self)):
32 | yield next(self.iterator)
33 |
34 |
35 | class _RepeatSampler:
36 | """Sampler that repeats forever
37 |
38 | Args:
39 | sampler (Sampler)
40 | """
41 |
42 | def __init__(self, sampler):
43 | self.sampler = sampler
44 |
45 | def __iter__(self):
46 | while True:
47 | yield from iter(self.sampler)
48 |
49 |
50 | def seed_worker(worker_id):
51 | # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
52 | worker_seed = torch.initial_seed() % 2 ** 32
53 | np.random.seed(worker_seed)
54 | random.seed(worker_seed)
55 |
56 |
57 | def build_dataloader(cfg, batch_size, img_path, stride=32, label_path=None, rank=-1, mode="train"):
58 | assert mode in ["train", "val"]
59 | shuffle = mode == "train"
60 | if cfg.rect and shuffle:
61 | LOGGER.warning("WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False")
62 | shuffle = False
63 | with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
64 | dataset = YOLODataset(
65 | img_path=img_path,
66 | label_path=label_path,
67 | imgsz=cfg.imgsz,
68 | batch_size=batch_size,
69 | augment=mode == "train", # augmentation
70 | hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
71 | rect=cfg.rect if mode == "train" else True, # rectangular batches
72 | cache=cfg.get("cache", None),
73 | single_cls=cfg.get("single_cls", False),
74 | stride=int(stride),
75 | pad=0.0 if mode == "train" else 0.5,
76 | prefix=colorstr(f"{mode}: "),
77 | use_segments=cfg.task == "segment",
78 | use_keypoints=cfg.task == "keypoint")
79 |
80 | batch_size = min(batch_size, len(dataset))
81 | nd = torch.cuda.device_count() # number of CUDA devices
82 | workers = cfg.workers if mode == "train" else cfg.workers * 2
83 | nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
84 | sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
85 | loader = DataLoader if cfg.image_weights or cfg.close_mosaic else InfiniteDataLoader # allow attribute updates
86 | generator = torch.Generator()
87 | generator.manual_seed(6148914691236517205 + RANK)
88 | return loader(dataset=dataset,
89 | batch_size=batch_size,
90 | shuffle=shuffle and sampler is None,
91 | num_workers=nw,
92 | sampler=sampler,
93 | pin_memory=PIN_MEMORY,
94 | collate_fn=getattr(dataset, "collate_fn", None),
95 | worker_init_fn=seed_worker,
96 | generator=generator), dataset
97 |
98 |
99 | # build classification
100 | # TODO: using cfg like `build_dataloader`
101 | def build_classification_dataloader(path,
102 | imgsz=224,
103 | batch_size=16,
104 | augment=True,
105 | cache=False,
106 | rank=-1,
107 | workers=8,
108 | shuffle=True):
109 | # Returns Dataloader object to be used with YOLOv5 Classifier
110 | with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
111 | dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
112 | batch_size = min(batch_size, len(dataset))
113 | nd = torch.cuda.device_count()
114 | nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
115 | sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
116 | generator = torch.Generator()
117 | generator.manual_seed(6148914691236517205 + RANK)
118 | return InfiniteDataLoader(dataset,
119 | batch_size=batch_size,
120 | shuffle=shuffle and sampler is None,
121 | num_workers=nw,
122 | sampler=sampler,
123 | pin_memory=PIN_MEMORY,
124 | worker_init_fn=seed_worker,
125 | generator=generator) # or DataLoader(persistent_workers=True)
126 |
--------------------------------------------------------------------------------
/ultralytics/hub/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import shutil
5 |
6 | import psutil
7 | import requests
8 | from IPython import display # to display images and clear console output
9 |
10 | from ultralytics.hub.auth import Auth
11 | from ultralytics.hub.session import HubTrainingSession
12 | from ultralytics.hub.utils import PREFIX, split_key
13 | from ultralytics.yolo.utils import LOGGER, emojis, is_colab
14 | from ultralytics.yolo.utils.torch_utils import select_device
15 | from ultralytics.yolo.v8.detect import DetectionTrainer
16 |
17 |
18 | def checks(verbose=True):
19 | if is_colab():
20 | shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory
21 |
22 | if verbose:
23 | # System info
24 | gib = 1 << 30 # bytes per GiB
25 | ram = psutil.virtual_memory().total
26 | total, used, free = shutil.disk_usage("/")
27 | display.clear_output()
28 | s = f'({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)'
29 | else:
30 | s = ''
31 |
32 | select_device(newline=False)
33 | LOGGER.info(f'Setup complete ✅ {s}')
34 |
35 |
36 | def start(key=''):
37 | # Start training models with Ultralytics HUB. Usage: from src.ultralytics import start; start('API_KEY')
38 | def request_api_key(attempts=0):
39 | """Prompt the user to input their API key"""
40 | import getpass
41 |
42 | max_attempts = 3
43 | tries = f"Attempt {str(attempts + 1)} of {max_attempts}" if attempts > 0 else ""
44 | LOGGER.info(f"{PREFIX}Login. {tries}")
45 | input_key = getpass.getpass("Enter your Ultralytics HUB API key:\n")
46 | auth.api_key, model_id = split_key(input_key)
47 | if not auth.authenticate():
48 | attempts += 1
49 | LOGGER.warning(f"{PREFIX}Invalid API key ⚠️\n")
50 | if attempts < max_attempts:
51 | return request_api_key(attempts)
52 | raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate ❌"))
53 | else:
54 | return model_id
55 |
56 | try:
57 | api_key, model_id = split_key(key)
58 | auth = Auth(api_key) # attempts cookie login if no api key is present
59 | attempts = 1 if len(key) else 0
60 | if not auth.get_state():
61 | if len(key):
62 | LOGGER.warning(f"{PREFIX}Invalid API key ⚠️\n")
63 | model_id = request_api_key(attempts)
64 | LOGGER.info(f"{PREFIX}Authenticated ✅")
65 | if not model_id:
66 | raise ConnectionError(emojis('Connecting with global API key is not currently supported. ❌'))
67 | session = HubTrainingSession(model_id=model_id, auth=auth)
68 | session.check_disk_space()
69 |
70 | # TODO: refactor, hardcoded for v8
71 | args = session.model.copy()
72 | args.pop("id")
73 | args.pop("status")
74 | args.pop("weights")
75 | args["data"] = "coco128.yaml"
76 | args["model"] = "yolov8n.yaml"
77 | args["batch_size"] = 16
78 | args["imgsz"] = 64
79 |
80 | trainer = DetectionTrainer(overrides=args)
81 | session.register_callbacks(trainer)
82 | setattr(trainer, 'hub_session', session)
83 | trainer.train()
84 | except Exception as e:
85 | LOGGER.warning(f"{PREFIX}{e}")
86 |
87 |
88 | def reset_model(key=''):
89 | # Reset a trained model to an untrained state
90 | api_key, model_id = split_key(key)
91 | r = requests.post('https://api.ultralytics.com/model-reset', json={"apiKey": api_key, "modelId": model_id})
92 |
93 | if r.status_code == 200:
94 | LOGGER.info(f"{PREFIX}model reset successfully")
95 | return
96 | LOGGER.warning(f"{PREFIX}model reset failure {r.status_code} {r.reason}")
97 |
98 |
99 | def export_model(key='', format='torchscript'):
100 | # Export a model to all formats
101 | api_key, model_id = split_key(key)
102 | formats = ('torchscript', 'onnx', 'openvino', 'engine', 'coreml', 'saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs',
103 | 'ultralytics_tflite', 'ultralytics_coreml')
104 | assert format in formats, f"ERROR: Unsupported export format '{format}' passed, valid formats are {formats}"
105 |
106 | r = requests.post('https://api.ultralytics.com/export',
107 | json={
108 | "apiKey": api_key,
109 | "modelId": model_id,
110 | "format": format})
111 | assert r.status_code == 200, f"{PREFIX}{format} export failure {r.status_code} {r.reason}"
112 | LOGGER.info(f"{PREFIX}{format} export started ✅")
113 |
114 |
115 | def get_export(key='', format='torchscript'):
116 | # Get an exported model dictionary with download URL
117 | api_key, model_id = split_key(key)
118 | formats = ('torchscript', 'onnx', 'openvino', 'engine', 'coreml', 'saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs',
119 | 'ultralytics_tflite', 'ultralytics_coreml')
120 | assert format in formats, f"ERROR: Unsupported export format '{format}' passed, valid formats are {formats}"
121 |
122 | r = requests.post('https://api.ultralytics.com/get-export',
123 | json={
124 | "apiKey": api_key,
125 | "modelId": model_id,
126 | "format": format})
127 | assert r.status_code == 200, f"{PREFIX}{format} get_export failure {r.status_code} {r.reason}"
128 | return r.json()
129 |
130 |
131 | # temp. For checking
132 | if __name__ == "__main__":
133 | start(key="b3fba421be84a20dbe68644e14436d1cce1b0a0aaa_HeMfHgvHsseMPhdq7Ylz")
134 |
--------------------------------------------------------------------------------
/ultralytics/yolo/configs/default.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default training settings and hyperparameters for medium-augmentation COCO training
3 |
4 | task: "detect" # choices=['detect', 'segment', 'classify', 'init'] # init is a special case. Specify task to run.
5 | mode: "train" # choices=['train', 'val', 'predict'] # mode to run task in.
6 |
7 | # Train settings -------------------------------------------------------------------------------------------------------
8 | model: null # i.e. yolov8n.pt, yolov8n.yaml. Path to model file
9 | data: null # i.e. coco128.yaml. Path to data file
10 | epochs: 100 # number of epochs to train for
11 | patience: 50 # TODO: epochs to wait for no observable improvement for early stopping of training
12 | batch: 16 # number of images per batch
13 | imgsz: 640 # size of input images
14 | save: True # save checkpoints
15 | cache: False # True/ram, disk or False. Use cache for data loading
16 | device: null # cuda device, i.e. 0 or 0,1,2,3 or cpu. Device to run on
17 | workers: 8 # number of worker threads for data loading
18 | project: null # project name
19 | name: null # experiment name
20 | exist_ok: False # whether to overwrite existing experiment
21 | pretrained: False # whether to use a pretrained model
22 | optimizer: 'SGD' # optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp']
23 | verbose: False # whether to print verbose output
24 | seed: 0 # random seed for reproducibility
25 | deterministic: True # whether to enable deterministic mode
26 | single_cls: False # train multi-class data as single-class
27 | image_weights: False # use weighted image selection for training
28 | rect: False # support rectangular training
29 | cos_lr: False # use cosine learning rate scheduler
30 | close_mosaic: 10 # disable mosaic augmentation for final 10 epochs
31 | resume: False # resume training from last checkpoint
32 | # Segmentation
33 | overlap_mask: True # masks should overlap during training
34 | mask_ratio: 4 # mask downsample ratio
35 | # Classification
36 | dropout: 0.0 # use dropout regularization
37 |
38 | # Val/Test settings ----------------------------------------------------------------------------------------------------
39 | val: True # validate/test during training
40 | save_json: False # save results to JSON file
41 | save_hybrid: False # save hybrid version of labels (labels + additional predictions)
42 | conf: null # object confidence threshold for detection (default 0.25 predict, 0.001 val)
43 | iou: 0.7 # intersection over union (IoU) threshold for NMS
44 | max_det: 300 # maximum number of detections per image
45 | half: False # use half precision (FP16)
46 | dnn: False # use OpenCV DNN for ONNX inference
47 | plots: True # show plots during training
48 |
49 | # Prediction settings --------------------------------------------------------------------------------------------------
50 | source: null # source directory for images or videos
51 | show: False # show results if possible
52 | save_txt: False # save results as .txt file
53 | save_conf: False # save results with confidence scores
54 | save_crop: False # save cropped images with results
55 | hide_labels: False # hide labels
56 | hide_conf: False # hide confidence scores
57 | vid_stride: 1 # video frame-rate stride
58 | line_thickness: 3 # bounding box thickness (pixels)
59 | visualize: False # visualize results
60 | augment: False # apply data augmentation to images
61 | agnostic_nms: False # class-agnostic NMS
62 | retina_masks: False # use retina masks for object detection
63 |
64 | # Export settings ------------------------------------------------------------------------------------------------------
65 | format: torchscript # format to export to
66 | keras: False # use Keras
67 | optimize: False # TorchScript: optimize for mobile
68 | int8: False # CoreML/TF INT8 quantization
69 | dynamic: False # ONNX/TF/TensorRT: dynamic axes
70 | simplify: False # ONNX: simplify model
71 | opset: 17 # ONNX: opset version
72 | workspace: 4 # TensorRT: workspace size (GB)
73 | nms: False # CoreML: add NMS
74 |
75 | # Hyperparameters ------------------------------------------------------------------------------------------------------
76 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
77 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
78 | momentum: 0.937 # SGD momentum/Adam beta1
79 | weight_decay: 0.0005 # optimizer weight decay 5e-4
80 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
81 | warmup_momentum: 0.8 # warmup initial momentum
82 | warmup_bias_lr: 0.1 # warmup initial bias lr
83 | box: 7.5 # box loss gain
84 | cls: 0.5 # cls loss gain (scale with pixels)
85 | dfl: 1.5 # dfl loss gain
86 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
87 | label_smoothing: 0.0
88 | nbs: 64 # nominal batch size
89 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
90 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
91 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
92 | degrees: 0.0 # image rotation (+/- deg)
93 | translate: 0.1 # image translation (+/- fraction)
94 | scale: 0.5 # image scale (+/- gain)
95 | shear: 0.0 # image shear (+/- deg)
96 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
97 | flipud: 0.0 # image flip up-down (probability)
98 | fliplr: 0.5 # image flip left-right (probability)
99 | mosaic: 1.0 # image mosaic (probability)
100 | mixup: 0.0 # image mixup (probability)
101 | copy_paste: 0.0 # segment copy-paste (probability)
102 |
103 | # Hydra configs --------------------------------------------------------------------------------------------------------
104 | hydra:
105 | output_subdir: null # disable hydra directory creation
106 | run:
107 | dir: .
108 |
109 | # Debug, do not modify -------------------------------------------------------------------------------------------------
110 | v5loader: False # use legacy YOLOv5 dataloader
111 |
--------------------------------------------------------------------------------
/inference.py:
--------------------------------------------------------------------------------
1 |
2 | from ultralytics import YOLO
3 | from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
4 | from ultralytics.nn.tasks import attempt_load_weights
5 | import os
6 | import cv2
7 | import torch
8 | import numpy as np
9 |
10 | # model = YOLO("./runs/detect/train/weights/last.pt")
11 | model = attempt_load_weights("./runs/detect/train/weights/last.pt")
12 |
13 | INPUT_W=640
14 | INPUT_H=640
15 |
16 | names = ["person","cat","dog","horse"]
17 |
18 | # 前处理和YOLOv5相同
19 | def preprocess_image(image_path):
20 | """
21 | description: Read an image from image path, convert it to RGB,
22 | resize and pad it to target size, normalize to [0,1],
23 | transform to NCHW format.
24 | param:
25 | image_path: str, image path
26 | return:
27 | image: the processed image
28 | image_raw: the original image
29 | h: original height
30 | w: original width
31 | """
32 | image_raw = cv2.imread(image_path) # 1.opencv读入图片
33 | h, w, c = image_raw.shape # 2.记录图片大小
34 | image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB) # 3. BGR2RGB
35 | # Calculate widht and height and paddings
36 | r_w = INPUT_W / w # INPUT_W=INPUT_H=640 # 4.计算宽高缩放的倍数 r_w,r_h
37 | r_h = INPUT_H / h
38 | if r_h > r_w: # 5.如果原图的高小于宽(长边),则长边缩放到640,短边按长边缩放比例缩放
39 | tw = INPUT_W
40 | th = int(r_w * h)
41 | tx1 = tx2 = 0
42 | ty1 = int((INPUT_H - th) / 2) # ty1=(640-短边缩放的长度)/2 ,这部分是YOLOv5为加速推断而做的一个图像缩放算法
43 | ty2 = INPUT_H - th - ty1 # ty2=640-短边缩放的长度-ty1
44 | else:
45 | tw = int(r_h * w)
46 | th = INPUT_H
47 | tx1 = int((INPUT_W - tw) / 2)
48 | tx2 = INPUT_W - tw - tx1
49 | ty1 = ty2 = 0
50 | # Resize the image with long side while maintaining ratio
51 | image = cv2.resize(image, (tw, th),interpolation=cv2.INTER_LINEAR) # 6.图像resize,按照cv2.INTER_LINEAR方法
52 | # Pad the short side with (128,128,128)
53 | image = cv2.copyMakeBorder(
54 | # image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128)
55 | image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (114, 114, 114)
56 |
57 | ) # image:图像, ty1, ty2.tx1,tx2: 相应方向上的边框宽度,添加的边界框像素值为常数,value填充的常数值
58 | image = image.astype(np.float32) # 7.unit8-->float
59 | # Normalize to [0,1]
60 | image /= 255.0 # 8. 逐像素点除255.0
61 | # HWC to CHW format:
62 | image = np.transpose(image, [2, 0, 1]) # 9. HWC2CHW
63 | # CHW to NCHW format
64 | image = np.expand_dims(image, axis=0) # 10.CWH2NCHW
65 | # Convert the image to row-major order, also known as "C order":
66 | image = np.ascontiguousarray(image) # 11.ascontiguousarray函数将一个内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快
67 | return image, image_raw, h, w # 处理后的图像,原图, 原图的h,w
68 |
69 |
70 | # 后处理
71 | #output = [1,8,8400]
72 | #[batch, box+class, num_box] # num_class=4, box=4
73 | #class_prob: [:,4:8,:]
74 | # box: box = xywh2xyxy(box) # (center_x, center_y, width, height) to (x1, y1, x2, y2)
75 |
76 | def postprocess(preds, img, orig_img):
77 | preds = ops.non_max_suppression(preds,
78 | conf_thres=0.25,
79 | iou_thres=0.45,
80 | agnostic=False,
81 | max_det=300)
82 |
83 | # for i, pred in enumerate(preds):
84 | # shape = orig_img[i].shape
85 | # pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
86 |
87 | return preds
88 |
89 |
90 |
91 | files = os.listdir("./test_img")
92 |
93 | for file in files:
94 | print(file)
95 | img_path = os.path.join("./test_img",file)
96 |
97 | image,image_raw,h,w = preprocess_image(img_path)
98 | input_ = torch.tensor(image)
99 |
100 |
101 | preds = model(input_)
102 | # print(len(preds))
103 | # print(preds[0].shape)
104 |
105 | preds = postprocess(preds, image, image_raw)
106 |
107 | for i, det in enumerate(preds): # detections per image
108 |
109 | gn = torch.tensor(image_raw.shape)[[1, 0, 1, 0]] # normalization gain whwh
110 | if det is not None and len(det):
111 | # Rescale boxes from img_size to im0 size
112 | det[:, :4] = ops.scale_boxes(image.shape[2:], det[:, :4], image_raw.shape).round()
113 |
114 | for *xyxy, conf, cls_ in det: # x1,y1,x2,y2
115 |
116 |
117 | # det_count += 1\
118 | label_text = names[int(cls_)]
119 | # print(conf.cpu().detach().numpy())
120 | prob = round(conf.cpu().detach().numpy().item(),2)
121 |
122 |
123 |
124 | # tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
125 | tl = round(0.02 * (image.shape[0] + image.shape[1]) / 2) + 1 # line/font thickness
126 |
127 | color = (255, 255, 0)
128 | c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3]))
129 |
130 | cv2.rectangle(image_raw, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
131 |
132 | tf = max(tl - 1, 1) # font thickness
133 | t_size = cv2.getTextSize(label_text+":"+str(prob), 0, fontScale=tl / 2, thickness=tf)[0]
134 | c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
135 | cv2.rectangle(image_raw, c1, c2, color, -1, cv2.LINE_AA) # filled
136 | cv2.putText(image_raw, label_text+":"+str(prob), (c1[0], c1[1] - 2), 0, tl / 2, [0, 0, 255],
137 | thickness=tf, lineType=cv2.LINE_AA)
138 |
139 | if not os.path.exists("./detect_res"):
140 | os.makedirs("./detect_res")
141 | cv2.imwrite("./detect_res/"+file,image_raw)
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/train.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import hydra
4 | import torch
5 | import torchvision
6 |
7 | from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
8 | from ultralytics.yolo import v8
9 | from ultralytics.yolo.data import build_classification_dataloader
10 | from ultralytics.yolo.engine.trainer import BaseTrainer
11 | from ultralytics.yolo.utils import DEFAULT_CONFIG
12 | from ultralytics.yolo.utils.torch_utils import strip_optimizer
13 |
14 |
15 | class ClassificationTrainer(BaseTrainer):
16 |
17 | def __init__(self, config=DEFAULT_CONFIG, overrides=None):
18 | if overrides is None:
19 | overrides = {}
20 | overrides["task"] = "classify"
21 | super().__init__(config, overrides)
22 |
23 | def set_model_attributes(self):
24 | self.model.names = self.data["names"]
25 |
26 | def get_model(self, cfg=None, weights=None, verbose=True):
27 | model = ClassificationModel(cfg, nc=self.data["nc"])
28 |
29 | pretrained = False
30 | for m in model.modules():
31 | if not pretrained and hasattr(m, 'reset_parameters'):
32 | m.reset_parameters()
33 | if isinstance(m, torch.nn.Dropout) and self.args.dropout:
34 | m.p = self.args.dropout # set dropout
35 | for p in model.parameters():
36 | p.requires_grad = True # for training
37 |
38 | if weights:
39 | model.load(weights)
40 |
41 | # Update defaults
42 | if self.args.imgsz == 640:
43 | self.args.imgsz = 224
44 |
45 | return model
46 |
47 | def setup_model(self):
48 | """
49 | load/create/download model for any task
50 | """
51 | # classification models require special handling
52 |
53 | if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
54 | return
55 |
56 | model = str(self.model)
57 | # Load a YOLO model locally, from torchvision, or from Ultralytics assets
58 | if model.endswith(".pt"):
59 | self.model, _ = attempt_load_one_weight(model, device='cpu')
60 | elif model.endswith(".yaml"):
61 | self.model = self.get_model(cfg=model)
62 | elif model in torchvision.models.__dict__:
63 | pretrained = True
64 | self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
65 | else:
66 | FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.')
67 |
68 | return # dont return ckpt. Classification doesn't support resume
69 |
70 | def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
71 | return build_classification_dataloader(path=dataset_path,
72 | imgsz=self.args.imgsz,
73 | batch_size=batch_size if mode == "train" else (batch_size * 2),
74 | augment=mode == "train",
75 | rank=rank,
76 | workers=self.args.workers)
77 |
78 | def preprocess_batch(self, batch):
79 | batch["img"] = batch["img"].to(self.device)
80 | batch["cls"] = batch["cls"].to(self.device)
81 | return batch
82 |
83 | def progress_string(self):
84 | return ('\n' + '%11s' * (4 + len(self.loss_names))) % \
85 | ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')
86 |
87 | def get_validator(self):
88 | self.loss_names = ['loss']
89 | return v8.classify.ClassificationValidator(self.test_loader, self.save_dir, logger=self.console)
90 |
91 | def criterion(self, preds, batch):
92 | loss = torch.nn.functional.cross_entropy(preds, batch["cls"], reduction='sum') / self.args.nbs
93 | loss_items = loss.detach()
94 | return loss, loss_items
95 |
96 | # def label_loss_items(self, loss_items=None, prefix="train"):
97 | # """
98 | # Returns a loss dict with labelled training loss items tensor
99 | # """
100 | # # Not needed for classification but necessary for segmentation & detection
101 | # keys = [f"{prefix}/{x}" for x in self.loss_names]
102 | # if loss_items is not None:
103 | # loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
104 | # return dict(zip(keys, loss_items))
105 | # else:
106 | # return keys
107 |
108 | def label_loss_items(self, loss_items=None, prefix="train"):
109 | """
110 | Returns a loss dict with labelled training loss items tensor
111 | """
112 | # Not needed for classification but necessary for segmentation & detection
113 | keys = [f"{prefix}/{x}" for x in self.loss_names]
114 | if loss_items is not None:
115 | loss_items = [round(float(loss_items), 5)]
116 | return dict(zip(keys, loss_items))
117 | else:
118 | return keys
119 |
120 | def resume_training(self, ckpt):
121 | pass
122 |
123 | def final_eval(self):
124 | for f in self.last, self.best:
125 | if f.exists():
126 | strip_optimizer(f) # strip optimizers
127 | # TODO: validate best.pt after training completes
128 | # if f is self.best:
129 | # self.console.info(f'\nValidating {f}...')
130 | # self.validator.args.save_json = True
131 | # self.metrics = self.validator(model=f)
132 | # self.metrics.pop('fitness', None)
133 | # self.run_callbacks('on_fit_epoch_end')
134 |
135 |
136 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
137 | def train(cfg):
138 | cfg.model = cfg.model or "yolov8n-cls.yaml" # or "resnet18"
139 | cfg.data = cfg.data or "mnist160" # or yolo.ClassificationDataset("mnist")
140 | cfg.lr0 = 0.1
141 | cfg.weight_decay = 5e-5
142 | cfg.label_smoothing = 0.1
143 | cfg.warmup_epochs = 0.0
144 | trainer = ClassificationTrainer(cfg)
145 | trainer.train()
146 | # from ultralytics import YOLO
147 | # model = YOLO(cfg.model)
148 | # model.train(**cfg)
149 |
150 |
151 | if __name__ == "__main__":
152 | """
153 | CLI usage:
154 | python ultralytics/yolo/v8/classify/train.py model=resnet18 data=imagenette160 epochs=1 imgsz=224
155 |
156 | TODO:
157 | Direct cli support, i.e, yolov8 classify_train args.epochs 10
158 | """
159 | train()
160 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/downloads.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import logging
4 | import os
5 | import subprocess
6 | import urllib
7 | from itertools import repeat
8 | from multiprocessing.pool import ThreadPool
9 | from pathlib import Path
10 | from zipfile import ZipFile
11 |
12 | import requests
13 | import torch
14 |
15 | from ultralytics.yolo.utils import LOGGER
16 |
17 |
18 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
19 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
20 | file = Path(file)
21 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
22 | try: # url1
23 | LOGGER.info(f'Downloading {url} to {file}...')
24 | torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)
25 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
26 | except Exception as e: # url2
27 | if file.exists():
28 | file.unlink() # remove partial downloads
29 | LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
30 | os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
31 | finally:
32 | if not file.exists() or file.stat().st_size < min_bytes: # check
33 | if file.exists():
34 | file.unlink() # remove partial downloads
35 | LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}")
36 | LOGGER.info('')
37 |
38 |
39 | def is_url(url, check=True):
40 | # Check if string is URL and check if URL exists
41 | try:
42 | url = str(url)
43 | result = urllib.parse.urlparse(url)
44 | assert all([result.scheme, result.netloc]) # check if is url
45 | return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online
46 | except (AssertionError, urllib.request.HTTPError):
47 | return False
48 |
49 |
50 | def attempt_download(file, repo='ultralytics/assets', release='v0.0.0'):
51 | # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.
52 |
53 | def github_assets(repository, version='latest'):
54 | # Return GitHub repo tag and assets (i.e. ['yolov8n.pt', 'yolov5m.pt', ...])
55 | # Return GitHub repo tag and assets (i.e. ['yolov8n.pt', 'yolov8s.pt', ...])
56 | if version != 'latest':
57 | version = f'tags/{version}' # i.e. tags/v6.2
58 | response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api
59 | return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets
60 |
61 | file = Path(str(file).strip().replace("'", ''))
62 | if not file.exists():
63 | # URL specified
64 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
65 | if str(file).startswith(('http:/', 'https:/')): # download
66 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
67 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
68 | if Path(file).is_file():
69 | LOGGER.info(f'Found {url} locally at {file}') # file already exists
70 | else:
71 | safe_download(file=file, url=url, min_bytes=1E5)
72 | return file
73 |
74 | # GitHub assets
75 | assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default
76 | assets = [f'yolov8{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default
77 | try:
78 | tag, assets = github_assets(repo, release)
79 | except Exception:
80 | try:
81 | tag, assets = github_assets(repo) # latest release
82 | except Exception:
83 | try:
84 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
85 | except Exception:
86 | tag = release
87 |
88 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
89 | if name in assets:
90 | url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror
91 | safe_download(
92 | file,
93 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
94 | min_bytes=1E5,
95 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}')
96 |
97 | return str(file)
98 |
99 |
100 | def download(url, dir=Path.cwd(), unzip=True, delete=True, curl=False, threads=1, retry=3):
101 | # Multithreaded file download and unzip function, used in data.yaml for autodownload
102 | def download_one(url, dir):
103 | # Download 1 file
104 | success = True
105 | if Path(url).is_file():
106 | f = Path(url) # filename
107 | else: # does not exist
108 | f = dir / Path(url).name
109 | LOGGER.info(f'Downloading {url} to {f}...')
110 | for i in range(retry + 1):
111 | if curl:
112 | s = 'sS' if threads > 1 else '' # silent
113 | r = os.system(
114 | f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue
115 | success = r == 0
116 | else:
117 | torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
118 | success = f.is_file()
119 | if success:
120 | break
121 | elif i < retry:
122 | LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...')
123 | else:
124 | LOGGER.warning(f'❌ Failed to download {url}...')
125 |
126 | if unzip and success and f.suffix in ('.zip', '.tar', '.gz'):
127 | LOGGER.info(f'Unzipping {f}...')
128 | if f.suffix == '.zip':
129 | ZipFile(f).extractall(path=dir) # unzip
130 | elif f.suffix == '.tar':
131 | os.system(f'tar xf {f} --directory {f.parent}') # unzip
132 | elif f.suffix == '.gz':
133 | os.system(f'tar xfz {f} --directory {f.parent}') # unzip
134 | if delete:
135 | f.unlink() # remove zip
136 |
137 | dir = Path(dir)
138 | dir.mkdir(parents=True, exist_ok=True) # make directory
139 | if threads > 1:
140 | pool = ThreadPool(threads)
141 | pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded
142 | pool.close()
143 | pool.join()
144 | else:
145 | for u in [url] if isinstance(url, (str, Path)) else url:
146 | download_one(u, dir)
147 |
--------------------------------------------------------------------------------
/ultralytics/hub/utils.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import shutil
5 | import threading
6 | import time
7 |
8 | import requests
9 |
10 | from ultralytics.yolo.utils import DEFAULT_CONFIG_DICT, LOGGER, RANK, SETTINGS, TryExcept, colorstr, emojis
11 |
12 | PREFIX = colorstr('Ultralytics: ')
13 | HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.'
14 | HUB_API_ROOT = os.environ.get("ULTRALYTICS_HUB_API", "https://api.ultralytics.com")
15 |
16 |
17 | def check_dataset_disk_space(url='https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', sf=2.0):
18 | # Check that url fits on disk with safety factor sf, i.e. require 2GB free if url size is 1GB with sf=2.0
19 | gib = 1 << 30 # bytes per GiB
20 | data = int(requests.head(url).headers['Content-Length']) / gib # dataset size (GB)
21 | total, used, free = (x / gib for x in shutil.disk_usage("/")) # bytes
22 | LOGGER.info(f'{PREFIX}{data:.3f} GB dataset, {free:.1f}/{total:.1f} GB free disk space')
23 | if data * sf < free:
24 | return True # sufficient space
25 | LOGGER.warning(f'{PREFIX}WARNING: Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, '
26 | f'training cancelled ❌. Please free {data * sf - free:.1f} GB additional disk space and try again.')
27 | return False # insufficient space
28 |
29 |
30 | def request_with_credentials(url: str) -> any:
31 | """ Make an ajax request with cookies attached """
32 | from google.colab import output # noqa
33 | from IPython import display # noqa
34 | display.display(
35 | display.Javascript("""
36 | window._hub_tmp = new Promise((resolve, reject) => {
37 | const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
38 | fetch("%s", {
39 | method: 'POST',
40 | credentials: 'include'
41 | })
42 | .then((response) => resolve(response.json()))
43 | .then((json) => {
44 | clearTimeout(timeout);
45 | }).catch((err) => {
46 | clearTimeout(timeout);
47 | reject(err);
48 | });
49 | });
50 | """ % url))
51 | return output.eval_js("_hub_tmp")
52 |
53 |
54 | # Deprecated TODO: eliminate this function?
55 | def split_key(key=''):
56 | """
57 | Verify and split a 'api_key[sep]model_id' string, sep is one of '.' or '_'
58 |
59 | Args:
60 | key (str): The model key to split. If not provided, the user will be prompted to enter it.
61 |
62 | Returns:
63 | Tuple[str, str]: A tuple containing the API key and model ID.
64 | """
65 |
66 | import getpass
67 |
68 | error_string = emojis(f'{PREFIX}Invalid API key ⚠️\n') # error string
69 | if not key:
70 | key = getpass.getpass('Enter model key: ')
71 | sep = '_' if '_' in key else '.' if '.' in key else None # separator
72 | assert sep, error_string
73 | api_key, model_id = key.split(sep)
74 | assert len(api_key) and len(model_id), error_string
75 | return api_key, model_id
76 |
77 |
78 | def smart_request(*args, retry=3, timeout=30, thread=True, code=-1, method="post", verbose=True, **kwargs):
79 | """
80 | Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
81 |
82 | Args:
83 | *args: Positional arguments to be passed to the requests function specified in method.
84 | retry (int, optional): Number of retries to attempt before giving up. Default is 3.
85 | timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30.
86 | thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True.
87 | code (int, optional): An identifier for the request, used for logging purposes. Default is -1.
88 | method (str, optional): The HTTP method to use for the request. Choices are 'post' and 'get'. Default is 'post'.
89 | verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True.
90 | **kwargs: Keyword arguments to be passed to the requests function specified in method.
91 |
92 | Returns:
93 | requests.Response: The HTTP response object. If the request is executed in a separate thread, returns None.
94 | """
95 | retry_codes = (408, 500) # retry only these codes
96 |
97 | def func(*func_args, **func_kwargs):
98 | r = None # response
99 | t0 = time.time() # initial time for timer
100 | for i in range(retry + 1):
101 | if (time.time() - t0) > timeout:
102 | break
103 | if method == 'post':
104 | r = requests.post(*func_args, **func_kwargs) # i.e. post(url, data, json, files)
105 | elif method == 'get':
106 | r = requests.get(*func_args, **func_kwargs) # i.e. get(url, data, json, files)
107 | if r.status_code == 200:
108 | break
109 | try:
110 | m = r.json().get('message', 'No JSON message.')
111 | except AttributeError:
112 | m = 'Unable to read JSON.'
113 | if i == 0:
114 | if r.status_code in retry_codes:
115 | m += f' Retrying {retry}x for {timeout}s.' if retry else ''
116 | elif r.status_code == 429: # rate limit
117 | h = r.headers # response headers
118 | m = f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " \
119 | f"Please retry after {h['Retry-After']}s."
120 | if verbose:
121 | LOGGER.warning(f"{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})")
122 | if r.status_code not in retry_codes:
123 | return r
124 | time.sleep(2 ** i) # exponential standoff
125 | return r
126 |
127 | if thread:
128 | threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
129 | else:
130 | return func(*args, **kwargs)
131 |
132 |
133 | @TryExcept()
134 | def sync_analytics(cfg, all_keys=False, enabled=False):
135 | """
136 | Sync analytics data if enabled in the global settings
137 |
138 | Args:
139 | cfg (DictConfig): Configuration for the task and mode.
140 | all_keys (bool): Sync all items, not just non-default values.
141 | enabled (bool): For debugging.
142 | """
143 | if SETTINGS['sync'] and RANK in {-1, 0} and enabled:
144 | cfg = dict(cfg) # convert type from DictConfig to dict
145 | if not all_keys:
146 | cfg = {k: v for k, v in cfg.items() if v != DEFAULT_CONFIG_DICT.get(k, None)} # retain non-default values
147 | cfg['uuid'] = SETTINGS['uuid'] # add the device UUID to the configuration data
148 |
149 | # Send a request to the HUB API to sync analytics
150 | smart_request(f'{HUB_API_ROOT}/v1/usage/anonymous', json=cfg, headers=None, code=3, retry=0, verbose=False)
151 |
--------------------------------------------------------------------------------
/tensorrt/yolov8/yolov8/yolov8.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | 15.0
23 | {4898203C-0C0B-4BC0-9597-6F143C06EDCA}
24 | yolov8
25 | 10.0.17763.0
26 |
27 |
28 |
29 | Application
30 | true
31 | v141
32 | MultiByte
33 |
34 |
35 | Application
36 | false
37 | v141
38 | true
39 | MultiByte
40 |
41 |
42 | Application
43 | true
44 | v141
45 | MultiByte
46 |
47 |
48 | Application
49 | false
50 | v141
51 | true
52 | MultiByte
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 | C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.0\include;D:\libtorch_install\opencv\build\include;D:\libtorch_install\opencv\build\include\opencv2;D:\trt_install\TensorRT-8.2.1.8\include;$(IncludePath)
74 |
75 |
76 |
77 | Level3
78 | Disabled
79 | true
80 | true
81 |
82 |
83 | Console
84 |
85 |
86 |
87 |
88 | Level3
89 | Disabled
90 | true
91 | true
92 |
93 |
94 | Console
95 | C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.0\lib\x64;D:\libtorch_install\opencv\build\x64\vc15\lib;D:\trt_install\TensorRT-8.2.1.8\lib;%(AdditionalLibraryDirectories)
96 | opencv_world452d.lib;cublas.lib;cuda.lib;cudadevrt.lib;cudart.lib;cudart_static.lib;nvinfer.lib;nvinfer_plugin.lib;nvonnxparser.lib;nvparsers.lib;%(AdditionalDependencies)
97 |
98 |
99 |
100 |
101 | Level3
102 | MaxSpeed
103 | true
104 | true
105 | true
106 | true
107 |
108 |
109 | Console
110 | true
111 | true
112 |
113 |
114 |
115 |
116 | Level3
117 | MaxSpeed
118 | true
119 | true
120 | true
121 | true
122 |
123 |
124 | Console
125 | true
126 | true
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
--------------------------------------------------------------------------------
/tensorrt/yolov8_add_postprocess.py:
--------------------------------------------------------------------------------
1 | '''
2 | xujing
3 |
4 |
5 | yolov8 nms
6 | '''
7 |
8 | import onnx_graphsurgeon as gs
9 | import numpy as np
10 | import onnx
11 |
12 | graph = gs.import_onnx(onnx.load("./last.onnx"))
13 |
14 |
15 | # 添加计算类别概率的结点
16 |
17 | # input
18 | origin_output = [node for node in graph.nodes if node.name == "Concat_307"][0] # Concat_307是output的输入结点,不同的模型需要修改
19 | print(origin_output.outputs)
20 |
21 | output_t = gs.Variable(name="output_t",shape=(1,8400,8),dtype=np.float32)
22 | output_t_node = gs.Node(op="Transpose",inputs=[origin_output.outputs[0]],outputs=[output_t],attrs={"perm":[0,2,1]})
23 |
24 | starts_wh = gs.Constant("starts_wh",values=np.array([0,0,0],dtype=np.int64))
25 | ends_wh = gs.Constant("ends_wh",values=np.array([1,8400,4],dtype=np.int64))
26 | # axes = gs.Constant("axes",values=np.array([2],dtype=np.int64))
27 | # steps = gs.Constant("steps",values=np.array([4,1,80],dtype=np.int64))
28 | # split = gs.Constant("split",values=np.array([4,1,80],dtype=np.int64))
29 |
30 | # starts_object = gs.Constant("starts_object",values=np.array([0,0,4],dtype=np.int64))
31 | # ends_object = gs.Constant("ends_object",values=np.array([1,8400,5],dtype=np.int64))
32 |
33 | starts_conf = gs.Constant("starts_conf",values=np.array([0,0,4],dtype=np.int64))
34 | ends_conf = gs.Constant("ends_conf",values=np.array([1,8400,8],dtype=np.int64))
35 |
36 | # output
37 | box_xywh_0 = gs.Variable(name="box_xywh_0",shape=(1,8400,4),dtype=np.float32)
38 | # object_prob_0 = gs.Variable(name="object_prob_0",shape=(1,8400,1),dtype=np.float32)
39 | label_conf_0 = gs.Variable(name='label_conf_0',shape=(1,8400,4),dtype=np.float32)
40 |
41 | # trt不支持
42 | # split_node = gs.Node(op="Split",inputs=[origin_output.outputs[0],split],outputs= [ box_xywh_0,object_prob_0,label_conf_0] )
43 | # slice
44 | box_xywh_node = gs.Node(op="Slice",inputs=[output_t,starts_wh,ends_wh],outputs= [ box_xywh_0])
45 | #box_prob_node = gs.Node(op="Slice",inputs=[origin_output.outputs[0],starts_object,ends_object],outputs= [ object_prob_0])
46 | box_conf_node = gs.Node(op="Slice",inputs=[output_t,starts_conf,ends_conf],outputs= [ label_conf_0])
47 |
48 |
49 | # identity
50 | box_xywh = gs.Variable(name="box_xywh",shape=(1,8400,4),dtype=np.float32)
51 | #object_prob = gs.Variable(name="object_prob",shape=(1,8400,1),dtype=np.float32)
52 | label_conf = gs.Variable(name='label_conf',shape=(1,8400,4),dtype=np.float32)
53 |
54 | identity_node_wh = gs.Node(op="Identity",inputs=[box_xywh_0],outputs= [ box_xywh] )
55 | #identity_node_prob = gs.Node(op="Identity",inputs=[object_prob_0],outputs= [object_prob] )
56 | identity_node_conf = gs.Node(op="Identity",inputs=[label_conf_0],outputs= [ label_conf] )
57 |
58 |
59 | print(identity_node_wh)
60 |
61 | # graph.nodes.extend([split_node])
62 |
63 | # graph.outputs = [ box_xywh,object_prob,label_conf ]
64 |
65 | # graph.cleanup().toposort()
66 |
67 |
68 |
69 | # onnx.save(gs.export_onnx(graph),"test0.onnx")
70 |
71 |
72 | # #-----------------------重新加载模型-------------
73 |
74 | # graph = gs.import_onnx(onnx.load("./test0.onnx"))
75 |
76 |
77 | # # 添加计算类别概率的结点
78 |
79 | # # input
80 | # origin_output = [node for node in graph.nodes ][-1]
81 | # print(origin_output.outputs)
82 |
83 | # 添加xywh->x1y1x2y2的结点
84 |
85 | # input
86 | starts_1 = gs.Constant("starts_x",values=np.array([0,0,0],dtype=np.int64))
87 | ends_1 = gs.Constant("ends_x",values=np.array([1,8400,1],dtype=np.int64))
88 | # axes_1 = gs.Constant("axes",values=np.array([2],dtype=np.int64))
89 | # steps_1 = gs.Constant("steps",values=np.array([1],dtype=np.int64))
90 |
91 | starts_2 = gs.Constant("starts_y",values=np.array([0,0,1],dtype=np.int64))
92 | ends_2 = gs.Constant("ends_y",values=np.array([1,8400,2],dtype=np.int64))
93 |
94 | starts_3 = gs.Constant("starts_w",values=np.array([0,0,2],dtype=np.int64))
95 | ends_3 = gs.Constant("ends_w",values=np.array([1,8400,3],dtype=np.int64))
96 |
97 | starts_4 = gs.Constant("starts_h",values=np.array([0,0,3],dtype=np.int64))
98 | ends_4 = gs.Constant("ends_h",values=np.array([1,8400,4],dtype=np.int64))
99 |
100 | # output
101 | x = gs.Variable(name="x_center",shape=(1,8400,1),dtype=np.float32)
102 | y = gs.Variable(name="y_center",shape=(1,8400,1),dtype=np.float32)
103 | w = gs.Variable(name="w",shape=(1,8400,1),dtype=np.float32)
104 | h = gs.Variable(name="h",shape=(1,8400,1),dtype=np.float32)
105 |
106 | # xywh_split_node = gs.Node(op="Split",inputs=[box_xywh],outputs= [x,y,w,h] )
107 | x_node = gs.Node(op="Slice",inputs=[box_xywh,starts_1,ends_1],outputs=[x])
108 | y_node = gs.Node(op="Slice",inputs=[box_xywh,starts_2,ends_2],outputs=[y])
109 | w_node = gs.Node(op="Slice",inputs=[box_xywh,starts_3,ends_3],outputs=[w])
110 | h_node = gs.Node(op="Slice",inputs=[box_xywh,starts_4,ends_4],outputs=[h])
111 |
112 |
113 |
114 | # 变换1
115 | # input
116 | div_val = gs.Constant("div_val",values=np.array([2],dtype=np.float32))
117 | div_val_ = gs.Constant("div_val_",values=np.array([-2],dtype=np.float32))
118 | # output
119 | w_ = gs.Variable(name="w_half_",shape=(1,8400,1),dtype=np.float32)
120 | wplus = gs.Variable(name="w_half_plus",shape=(1,8400,1),dtype=np.float32)
121 | h_ = gs.Variable(name="h_half_",shape=(1,8400,1),dtype=np.float32)
122 | hplus = gs.Variable(name="h_half_plus",shape=(1,8400,1),dtype=np.float32)
123 |
124 |
125 | w_node_ = gs.Node(op="Div",inputs=[w,div_val_],outputs= [w_] )
126 | w_node_plus = gs.Node(op="Div",inputs=[w,div_val],outputs= [wplus] )
127 | h_node_ = gs.Node(op="Div",inputs=[h,div_val_],outputs= [h_] )
128 | h_node_plus = gs.Node(op="Div",inputs=[h,div_val],outputs= [hplus] )
129 |
130 |
131 | #变换2
132 | # output
133 | x1 = gs.Variable(name="x1",shape=(1,8400,1),dtype=np.float32)
134 | y1 = gs.Variable(name="y1",shape=(1,8400,1),dtype=np.float32)
135 | x2 = gs.Variable(name="x2",shape=(1,8400,1),dtype=np.float32)
136 | y2 = gs.Variable(name="y2",shape=(1,8400,1),dtype=np.float32)
137 |
138 |
139 | x1_node = gs.Node(op="Add",inputs=[x,w_],outputs= [x1] )
140 | x2_node = gs.Node(op="Add",inputs=[x,wplus],outputs= [x2] )
141 | y1_node = gs.Node(op="Add",inputs=[y,h_],outputs= [y1] )
142 | y2_node= gs.Node(op="Add",inputs=[y,hplus],outputs= [y2] )
143 |
144 |
145 | # concat
146 | # output
147 |
148 | boxes_0 = gs.Variable(name="boxes_0",shape=(1,8400,4),dtype=np.float32)
149 |
150 | # print(help(gs.Node))
151 |
152 | boxes_node_0 = gs.Node(op="Concat",inputs=[x1,y1,x2,y2],outputs= [boxes_0] ,attrs={"axis":2})
153 | # print(boxes_node_0)
154 |
155 | # # Unsqueeze tensorrt不支持
156 | # axis_squeeze = gs.Constant("axes",values=np.array([2],dtype=np.int64))
157 | shapes = gs.Constant("shape",values=np.array([1,8400,1,4],dtype=np.int64))
158 |
159 | # output
160 | boxes = gs.Variable(name="boxes",shape=(1,8400,1,4),dtype=np.float32)
161 |
162 |
163 | # boxes_node = gs.Node(op="Unsqueeze",inputs=[boxes_0,axis_squeeze],outputs= [boxes])
164 | # print(boxes_node)
165 | boxes_node = gs.Node(op="Reshape",inputs=[boxes_0,shapes],outputs= [boxes])
166 |
167 | # #----处理prob
168 | # scores = gs.Variable(name="scores",shape=(1,8400,4),dtype=np.float32)
169 |
170 | # # Mul是矩阵中逐点相乘
171 | # scores_node = gs.Node(op="Mul",inputs=[label_conf,object_prob],outputs=[scores])
172 |
173 |
174 | graph.nodes.extend([output_t_node,box_xywh_node,box_conf_node,identity_node_wh,identity_node_conf,
175 | x_node,y_node,w_node,h_node,
176 | w_node_,w_node_plus,h_node_,h_node_plus,x1_node,x2_node,y1_node,y2_node,boxes_node_0,boxes_node])
177 | # graph.nodes.extend([split_node,xywh_split_node,w_node_,w_node_plus,h_node_,h_node_plus,x1_node,x2_node,y1_node,y2_node,boxes_node,scores_node])
178 |
179 | graph.outputs = [ boxes,label_conf ]
180 |
181 | graph.cleanup().toposort()
182 |
183 |
184 | onnx.save(gs.export_onnx(graph),"./last_1.onnx")
185 |
186 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/train.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from copy import copy
4 |
5 | import hydra
6 | import torch
7 | import torch.nn.functional as F
8 |
9 | from ultralytics.nn.tasks import SegmentationModel
10 | from ultralytics.yolo import v8
11 | from ultralytics.yolo.utils import DEFAULT_CONFIG
12 | from ultralytics.yolo.utils.ops import crop_mask, xyxy2xywh
13 | from ultralytics.yolo.utils.plotting import plot_images, plot_results
14 | from ultralytics.yolo.utils.tal import make_anchors
15 | from ultralytics.yolo.utils.torch_utils import de_parallel
16 |
17 | from ..detect.train import Loss
18 |
19 |
20 | # BaseTrainer python usage
21 | class SegmentationTrainer(v8.detect.DetectionTrainer):
22 |
23 | def __init__(self, config=DEFAULT_CONFIG, overrides={}):
24 | overrides["task"] = "segment"
25 | super().__init__(config, overrides)
26 |
27 | def get_model(self, cfg=None, weights=None, verbose=True):
28 | model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose)
29 | if weights:
30 | model.load(weights)
31 |
32 | return model
33 |
34 | def get_validator(self):
35 | self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss'
36 | return v8.segment.SegmentationValidator(self.test_loader,
37 | save_dir=self.save_dir,
38 | logger=self.console,
39 | args=copy(self.args))
40 |
41 | def criterion(self, preds, batch):
42 | if not hasattr(self, 'compute_loss'):
43 | self.compute_loss = SegLoss(de_parallel(self.model), overlap=self.args.overlap_mask)
44 | return self.compute_loss(preds, batch)
45 |
46 | def plot_training_samples(self, batch, ni):
47 | images = batch["img"]
48 | masks = batch["masks"]
49 | cls = batch["cls"].squeeze(-1)
50 | bboxes = batch["bboxes"]
51 | paths = batch["im_file"]
52 | batch_idx = batch["batch_idx"]
53 | plot_images(images, batch_idx, cls, bboxes, masks, paths=paths, fname=self.save_dir / f"train_batch{ni}.jpg")
54 |
55 | def plot_metrics(self):
56 | plot_results(file=self.csv, segment=True) # save results.png
57 |
58 |
59 | # Criterion class for computing training losses
60 | class SegLoss(Loss):
61 |
62 | def __init__(self, model, overlap=True): # model must be de-paralleled
63 | super().__init__(model)
64 | self.nm = model.model[-1].nm # number of masks
65 | self.overlap = overlap
66 |
67 | def __call__(self, preds, batch):
68 | loss = torch.zeros(4, device=self.device) # box, cls, dfl
69 | feats, pred_masks, proto = preds if len(preds) == 3 else preds[1]
70 | batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
71 | pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
72 | (self.reg_max * 4, self.nc), 1)
73 |
74 | # b, grids, ..
75 | pred_scores = pred_scores.permute(0, 2, 1).contiguous()
76 | pred_distri = pred_distri.permute(0, 2, 1).contiguous()
77 | pred_masks = pred_masks.permute(0, 2, 1).contiguous()
78 |
79 | dtype = pred_scores.dtype
80 | imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
81 | anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
82 |
83 | # targets
84 | batch_idx = batch["batch_idx"].view(-1, 1)
85 | targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
86 | targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
87 | gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
88 | mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
89 |
90 | masks = batch["masks"].to(self.device).float()
91 | if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
92 | masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
93 |
94 | # pboxes
95 | pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
96 |
97 | _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner(
98 | pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
99 | anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt)
100 |
101 | target_scores_sum = target_scores.sum()
102 |
103 | # cls loss
104 | # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
105 | loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
106 |
107 | # bbox loss
108 | if fg_mask.sum():
109 | loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor,
110 | target_scores, target_scores_sum, fg_mask)
111 | for i in range(batch_size):
112 | if fg_mask[i].sum():
113 | mask_idx = target_gt_idx[i][fg_mask[i]] + 1
114 | if self.overlap:
115 | gt_mask = torch.where(masks[[i]] == mask_idx.view(-1, 1, 1), 1.0, 0.0)
116 | else:
117 | gt_mask = masks[batch_idx == i][mask_idx]
118 | xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]]
119 | marea = xyxy2xywh(xyxyn)[:, 2:].prod(1)
120 | mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)
121 | loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy,
122 | marea) # seg loss
123 | # WARNING: Uncomment lines below in case of Multi-GPU DDP unused gradient errors
124 | # else:
125 | # loss[1] += proto.sum() * 0
126 | # else:
127 | # loss[1] += proto.sum() * 0
128 |
129 | loss[0] *= self.hyp.box # box gain
130 | loss[1] *= self.hyp.box / batch_size # seg gain
131 | loss[2] *= self.hyp.cls # cls gain
132 | loss[3] *= self.hyp.dfl # dfl gain
133 |
134 | return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
135 |
136 | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
137 | # Mask loss for one image
138 | pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80)
139 | loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
140 | return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
141 |
142 |
143 | @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
144 | def train(cfg):
145 | cfg.model = cfg.model or "yolov8n-seg.yaml"
146 | cfg.data = cfg.data or "coco128-seg.yaml" # or yolo.ClassificationDataset("mnist")
147 | # trainer = SegmentationTrainer(cfg)
148 | # trainer.train()
149 | from ultralytics import YOLO
150 | model = YOLO(cfg.model)
151 | model.train(**cfg)
152 |
153 |
154 | if __name__ == "__main__":
155 | """
156 | CLI usage:
157 | python ultralytics/yolo/v8/segment/train.py model=yolov8n-seg.yaml data=coco128-segments epochs=100 imgsz=640
158 |
159 | TODO:
160 | Direct cli support, i.e, yolov8 classify_train args.epochs 10
161 | """
162 | train()
163 |
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/model.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from pathlib import Path
4 |
5 | from ultralytics import yolo # noqa
6 | from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight
7 | from ultralytics.yolo.configs import get_config
8 | from ultralytics.yolo.engine.exporter import Exporter
9 | from ultralytics.yolo.utils import DEFAULT_CONFIG, LOGGER, yaml_load
10 | from ultralytics.yolo.utils.checks import check_imgsz, check_yaml
11 | from ultralytics.yolo.utils.torch_utils import guess_task_from_head, smart_inference_mode
12 |
13 | # Map head to model, trainer, validator, and predictor classes
14 | MODEL_MAP = {
15 | "classify": [
16 | ClassificationModel, 'yolo.TYPE.classify.ClassificationTrainer', 'yolo.TYPE.classify.ClassificationValidator',
17 | 'yolo.TYPE.classify.ClassificationPredictor'],
18 | "detect": [
19 | DetectionModel, 'yolo.TYPE.detect.DetectionTrainer', 'yolo.TYPE.detect.DetectionValidator',
20 | 'yolo.TYPE.detect.DetectionPredictor'],
21 | "segment": [
22 | SegmentationModel, 'yolo.TYPE.segment.SegmentationTrainer', 'yolo.TYPE.segment.SegmentationValidator',
23 | 'yolo.TYPE.segment.SegmentationPredictor']}
24 |
25 |
26 | class YOLO:
27 | """
28 | YOLO
29 |
30 | A python interface which emulates a model-like behaviour by wrapping trainers.
31 | """
32 |
33 | def __init__(self, model='yolov8n.yaml', type="v8") -> None:
34 | """
35 | > Initializes the YOLO object.
36 |
37 | Args:
38 | model (str, Path): model to load or create
39 | type (str): Type/version of models to use. Defaults to "v8".
40 | """
41 | self.type = type
42 | self.ModelClass = None # model class
43 | self.TrainerClass = None # trainer class
44 | self.ValidatorClass = None # validator class
45 | self.PredictorClass = None # predictor class
46 | self.model = None # model object
47 | self.trainer = None # trainer object
48 | self.task = None # task type
49 | self.ckpt = None # if loaded from *.pt
50 | self.cfg = None # if loaded from *.yaml
51 | self.ckpt_path = None
52 | self.overrides = {} # overrides for trainer object
53 |
54 | # Load or create new YOLO model
55 | {'.pt': self._load, '.yaml': self._new}[Path(model).suffix](model)
56 |
57 | def __call__(self, source, **kwargs):
58 | return self.predict(source, **kwargs)
59 |
60 | def _new(self, cfg: str, verbose=True):
61 | """
62 | > Initializes a new model and infers the task type from the model definitions.
63 |
64 | Args:
65 | cfg (str): model configuration file
66 | verbose (bool): display model info on load
67 | """
68 | cfg = check_yaml(cfg) # check YAML
69 | cfg_dict = yaml_load(cfg, append_filename=True) # model dict
70 | self.task = guess_task_from_head(cfg_dict["head"][-1][-2])
71 | self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = \
72 | self._guess_ops_from_task(self.task)
73 | self.model = self.ModelClass(cfg_dict, verbose=verbose) # initialize
74 | self.cfg = cfg
75 |
76 | def _load(self, weights: str):
77 | """
78 | > Initializes a new model and infers the task type from the model head.
79 |
80 | Args:
81 | weights (str): model checkpoint to be loaded
82 | """
83 | self.model, self.ckpt = attempt_load_one_weight(weights)
84 | self.ckpt_path = weights
85 | self.task = self.model.args["task"]
86 | self.overrides = self.model.args
87 | self._reset_ckpt_args(self.overrides)
88 | self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = \
89 | self._guess_ops_from_task(self.task)
90 |
91 | def reset(self):
92 | """
93 | > Resets the model modules.
94 | """
95 | for m in self.model.modules():
96 | if hasattr(m, 'reset_parameters'):
97 | m.reset_parameters()
98 | for p in self.model.parameters():
99 | p.requires_grad = True
100 |
101 | def info(self, verbose=False):
102 | """
103 | > Logs model info.
104 |
105 | Args:
106 | verbose (bool): Controls verbosity.
107 | """
108 | self.model.info(verbose=verbose)
109 |
110 | def fuse(self):
111 | self.model.fuse()
112 |
113 | @smart_inference_mode()
114 | def predict(self, source, **kwargs):
115 | """
116 | Visualize prediction.
117 |
118 | Args:
119 | source (str): Accepts all source types accepted by yolo
120 | **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs
121 | """
122 | overrides = self.overrides.copy()
123 | overrides["conf"] = 0.25
124 | overrides.update(kwargs)
125 | overrides["mode"] = "predict"
126 | overrides["save"] = kwargs.get("save", False) # not save files by default
127 | predictor = self.PredictorClass(overrides=overrides)
128 |
129 | predictor.args.imgsz = check_imgsz(predictor.args.imgsz, min_dim=2) # check image size
130 | predictor.setup(model=self.model, source=source)
131 | return predictor()
132 |
133 | @smart_inference_mode()
134 | def val(self, data=None, **kwargs):
135 | """
136 | > Validate a model on a given dataset .
137 |
138 | Args:
139 | data (str): The dataset to validate on. Accepts all formats accepted by yolo
140 | **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs
141 | """
142 | overrides = self.overrides.copy()
143 | overrides.update(kwargs)
144 | overrides["mode"] = "val"
145 | args = get_config(config=DEFAULT_CONFIG, overrides=overrides)
146 | args.data = data or args.data
147 | args.task = self.task
148 |
149 | validator = self.ValidatorClass(args=args)
150 | validator(model=self.model)
151 |
152 | @smart_inference_mode()
153 | def export(self, **kwargs):
154 | """
155 | > Export model.
156 |
157 | Args:
158 | **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs
159 | """
160 |
161 | overrides = self.overrides.copy()
162 | overrides.update(kwargs)
163 | args = get_config(config=DEFAULT_CONFIG, overrides=overrides)
164 | args.task = self.task
165 |
166 | exporter = Exporter(overrides=args)
167 | exporter(model=self.model)
168 |
169 | def train(self, **kwargs):
170 | """
171 | > Trains the model on a given dataset.
172 |
173 | Args:
174 | **kwargs (Any): Any number of arguments representing the training configuration. List of all args can be found in 'config' section.
175 | You can pass all arguments as a yaml file in `cfg`. Other args are ignored if `cfg` file is passed
176 | """
177 | overrides = self.overrides.copy()
178 | overrides.update(kwargs)
179 | if kwargs.get("cfg"):
180 | LOGGER.info(f"cfg file passed. Overriding default params with {kwargs['cfg']}.")
181 | overrides = yaml_load(check_yaml(kwargs["cfg"]), append_filename=True)
182 | overrides["task"] = self.task
183 | overrides["mode"] = "train"
184 | if not overrides.get("data"):
185 | raise AttributeError("dataset not provided! Please define `data` in config.yaml or pass as an argument.")
186 | if overrides.get("resume"):
187 | overrides["resume"] = self.ckpt_path
188 |
189 | self.trainer = self.TrainerClass(overrides=overrides)
190 | if not overrides.get("resume"): # manually set model only if not resuming
191 | self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
192 | self.model = self.trainer.model
193 | self.trainer.train()
194 |
195 | def to(self, device):
196 | """
197 | > Sends the model to the given device.
198 |
199 | Args:
200 | device (str): device
201 | """
202 | self.model.to(device)
203 |
204 | def _guess_ops_from_task(self, task):
205 | model_class, train_lit, val_lit, pred_lit = MODEL_MAP[task]
206 | # warning: eval is unsafe. Use with caution
207 | trainer_class = eval(train_lit.replace("TYPE", f"{self.type}"))
208 | validator_class = eval(val_lit.replace("TYPE", f"{self.type}"))
209 | predictor_class = eval(pred_lit.replace("TYPE", f"{self.type}"))
210 |
211 | return model_class, trainer_class, validator_class, predictor_class
212 |
213 | @staticmethod
214 | def _reset_ckpt_args(args):
215 | args.pop("device", None)
216 | args.pop("project", None)
217 | args.pop("name", None)
218 | args.pop("batch", None)
219 | args.pop("epochs", None)
220 | args.pop("cache", None)
221 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/base.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import glob
4 | import math
5 | import os
6 | from multiprocessing.pool import ThreadPool
7 | from pathlib import Path
8 | from typing import Optional
9 |
10 | import cv2
11 | import numpy as np
12 | from torch.utils.data import Dataset
13 | from tqdm import tqdm
14 |
15 | from ..utils import NUM_THREADS, TQDM_BAR_FORMAT
16 | from .utils import HELP_URL, IMG_FORMATS, LOCAL_RANK
17 |
18 |
19 | class BaseDataset(Dataset):
20 | """Base Dataset.
21 | Args:
22 | img_path (str): image path.
23 | pipeline (dict): a dict of image transforms.
24 | label_path (str): label path, this can also be an ann_file or other custom label path.
25 | """
26 |
27 | def __init__(
28 | self,
29 | img_path,
30 | imgsz=640,
31 | label_path=None,
32 | cache=False,
33 | augment=True,
34 | hyp=None,
35 | prefix="",
36 | rect=False,
37 | batch_size=None,
38 | stride=32,
39 | pad=0.5,
40 | single_cls=False,
41 | ):
42 | super().__init__()
43 | self.img_path = img_path
44 | self.imgsz = imgsz
45 | self.label_path = label_path
46 | self.augment = augment
47 | self.single_cls = single_cls
48 | self.prefix = prefix
49 |
50 | self.im_files = self.get_img_files(self.img_path)
51 | self.labels = self.get_labels()
52 | if self.single_cls:
53 | self.update_labels(include_class=[])
54 |
55 | self.ni = len(self.labels)
56 |
57 | # rect stuff
58 | self.rect = rect
59 | self.batch_size = batch_size
60 | self.stride = stride
61 | self.pad = pad
62 | if self.rect:
63 | assert self.batch_size is not None
64 | self.set_rectangle()
65 |
66 | # cache stuff
67 | self.ims = [None] * self.ni
68 | self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files]
69 | if cache:
70 | self.cache_images(cache)
71 |
72 | # transforms
73 | self.transforms = self.build_transforms(hyp=hyp)
74 |
75 | def get_img_files(self, img_path):
76 | """Read image files."""
77 | try:
78 | f = [] # image files
79 | for p in img_path if isinstance(img_path, list) else [img_path]:
80 | p = Path(p) # os-agnostic
81 | if p.is_dir(): # dir
82 | f += glob.glob(str(p / "**" / "*.*"), recursive=True)
83 | # f = list(p.rglob('*.*')) # pathlib
84 | elif p.is_file(): # file
85 | with open(p) as t:
86 | t = t.read().strip().splitlines()
87 | parent = str(p.parent) + os.sep
88 | f += [x.replace("./", parent) if x.startswith("./") else x for x in t] # local to global path
89 | # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
90 | else:
91 | raise FileNotFoundError(f"{self.prefix}{p} does not exist")
92 | im_files = sorted(x.replace("/", os.sep) for x in f if x.split(".")[-1].lower() in IMG_FORMATS)
93 | # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
94 | assert im_files, f"{self.prefix}No images found"
95 | except Exception as e:
96 | raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}: {e}\n{HELP_URL}") from e
97 | return im_files
98 |
99 | def update_labels(self, include_class: Optional[list]):
100 | """include_class, filter labels to include only these classes (optional)"""
101 | include_class_array = np.array(include_class).reshape(1, -1)
102 | for i in range(len(self.labels)):
103 | if include_class:
104 | cls = self.labels[i]["cls"]
105 | bboxes = self.labels[i]["bboxes"]
106 | segments = self.labels[i]["segments"]
107 | j = (cls == include_class_array).any(1)
108 | self.labels[i]["cls"] = cls[j]
109 | self.labels[i]["bboxes"] = bboxes[j]
110 | if segments:
111 | self.labels[i]["segments"] = segments[j]
112 | if self.single_cls:
113 | self.labels[i]["cls"] = 0
114 |
115 | def load_image(self, i):
116 | # Loads 1 image from dataset index 'i', returns (im, resized hw)
117 | im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i]
118 | if im is None: # not cached in RAM
119 | if fn.exists(): # load npy
120 | im = np.load(fn)
121 | else: # read image
122 | im = cv2.imread(f) # BGR
123 | assert im is not None, f"Image Not Found {f}"
124 | h0, w0 = im.shape[:2] # orig hw
125 | r = self.imgsz / max(h0, w0) # ratio
126 | if r != 1: # if sizes are not equal
127 | interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
128 | im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)
129 | return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
130 | return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
131 |
132 | def cache_images(self, cache):
133 | # cache images to memory or disk
134 | gb = 0 # Gigabytes of cached images
135 | self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni
136 | fcn = self.cache_images_to_disk if cache == "disk" else self.load_image
137 | results = ThreadPool(NUM_THREADS).imap(fcn, range(self.ni))
138 | pbar = tqdm(enumerate(results), total=self.ni, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
139 | for i, x in pbar:
140 | if cache == "disk":
141 | gb += self.npy_files[i].stat().st_size
142 | else: # 'ram'
143 | self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
144 | gb += self.ims[i].nbytes
145 | pbar.desc = f"{self.prefix}Caching images ({gb / 1E9:.1f}GB {cache})"
146 | pbar.close()
147 |
148 | def cache_images_to_disk(self, i):
149 | # Saves an image as an *.npy file for faster loading
150 | f = self.npy_files[i]
151 | if not f.exists():
152 | np.save(f.as_posix(), cv2.imread(self.im_files[i]))
153 |
154 | def set_rectangle(self):
155 | bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index
156 | nb = bi[-1] + 1 # number of batches
157 |
158 | s = np.array([x.pop("shape") for x in self.labels]) # hw
159 | ar = s[:, 0] / s[:, 1] # aspect ratio
160 | irect = ar.argsort()
161 | self.im_files = [self.im_files[i] for i in irect]
162 | self.labels = [self.labels[i] for i in irect]
163 | ar = ar[irect]
164 |
165 | # Set training image shapes
166 | shapes = [[1, 1]] * nb
167 | for i in range(nb):
168 | ari = ar[bi == i]
169 | mini, maxi = ari.min(), ari.max()
170 | if maxi < 1:
171 | shapes[i] = [maxi, 1]
172 | elif mini > 1:
173 | shapes[i] = [1, 1 / mini]
174 |
175 | self.batch_shapes = np.ceil(np.array(shapes) * self.imgsz / self.stride + self.pad).astype(int) * self.stride
176 | self.batch = bi # batch index of image
177 |
178 | def __getitem__(self, index):
179 | return self.transforms(self.get_label_info(index))
180 |
181 | def get_label_info(self, index):
182 | label = self.labels[index].copy()
183 | label["img"], label["ori_shape"], label["resized_shape"] = self.load_image(index)
184 | label["ratio_pad"] = (
185 | label["resized_shape"][0] / label["ori_shape"][0],
186 | label["resized_shape"][1] / label["ori_shape"][1],
187 | ) # for evaluation
188 | if self.rect:
189 | label["rect_shape"] = self.batch_shapes[self.batch[index]]
190 | label = self.update_labels_info(label)
191 | return label
192 |
193 | def __len__(self):
194 | return len(self.im_files)
195 |
196 | def update_labels_info(self, label):
197 | """custom your label format here"""
198 | return label
199 |
200 | def build_transforms(self, hyp=None):
201 | """Users can custom augmentations here
202 | like:
203 | if self.augment:
204 | # training transforms
205 | return Compose([])
206 | else:
207 | # val transforms
208 | return Compose([])
209 | """
210 | raise NotImplementedError
211 |
212 | def get_labels(self):
213 | """Users can custom their own format here.
214 | Make sure your output is a list with each element like below:
215 | dict(
216 | im_file=im_file,
217 | shape=shape, # format: (height, width)
218 | cls=cls,
219 | bboxes=bboxes, # xywh
220 | segments=segments, # xy
221 | keypoints=keypoints, # xy
222 | normalized=True, # or False
223 | bbox_format="xyxy", # or xywh, ltwh
224 | )
225 | """
226 | raise NotImplementedError
227 |
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/validator.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import json
4 | from collections import defaultdict
5 | from pathlib import Path
6 |
7 | import torch
8 | from omegaconf import OmegaConf # noqa
9 | from tqdm import tqdm
10 |
11 | from ultralytics.nn.autobackend import AutoBackend
12 | from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
13 | from ultralytics.yolo.utils import DEFAULT_CONFIG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks
14 | from ultralytics.yolo.utils.checks import check_imgsz
15 | from ultralytics.yolo.utils.files import increment_path
16 | from ultralytics.yolo.utils.ops import Profile
17 | from ultralytics.yolo.utils.torch_utils import de_parallel, select_device, smart_inference_mode
18 |
19 |
20 | class BaseValidator:
21 | """
22 | BaseValidator
23 |
24 | A base class for creating validators.
25 |
26 | Attributes:
27 | dataloader (DataLoader): Dataloader to use for validation.
28 | pbar (tqdm): Progress bar to update during validation.
29 | logger (logging.Logger): Logger to use for validation.
30 | args (OmegaConf): Configuration for the validator.
31 | model (nn.Module): Model to validate.
32 | data (dict): Data dictionary.
33 | device (torch.device): Device to use for validation.
34 | batch_i (int): Current batch index.
35 | training (bool): Whether the model is in training mode.
36 | speed (float): Batch processing speed in seconds.
37 | jdict (dict): Dictionary to store validation results.
38 | save_dir (Path): Directory to save results.
39 | """
40 |
41 | def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
42 | """
43 | Initializes a BaseValidator instance.
44 |
45 | Args:
46 | dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation.
47 | save_dir (Path): Directory to save results.
48 | pbar (tqdm.tqdm): Progress bar for displaying progress.
49 | logger (logging.Logger): Logger to log messages.
50 | args (OmegaConf): Configuration for the validator.
51 | """
52 | self.dataloader = dataloader
53 | self.pbar = pbar
54 | self.logger = logger or LOGGER
55 | self.args = args or OmegaConf.load(DEFAULT_CONFIG)
56 | self.model = None
57 | self.data = None
58 | self.device = None
59 | self.batch_i = None
60 | self.training = True
61 | self.speed = None
62 | self.jdict = None
63 |
64 | project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task
65 | name = self.args.name or f"{self.args.mode}"
66 | self.save_dir = save_dir or increment_path(Path(project) / name,
67 | exist_ok=self.args.exist_ok if RANK in {-1, 0} else True)
68 | (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
69 |
70 | if self.args.conf is None:
71 | self.args.conf = 0.001 # default conf=0.001
72 |
73 | self.callbacks = defaultdict(list, {k: [v] for k, v in callbacks.default_callbacks.items()}) # add callbacks
74 |
75 | @smart_inference_mode()
76 | def __call__(self, trainer=None, model=None):
77 | """
78 | Supports validation of a pre-trained model if passed or a model being trained
79 | if trainer is passed (trainer gets priority).
80 | """
81 | self.training = trainer is not None
82 | if self.training:
83 | self.device = trainer.device
84 | self.data = trainer.data
85 | model = trainer.ema.ema or trainer.model
86 | self.args.half = self.device.type != 'cpu' # force FP16 val during training
87 | model = model.half() if self.args.half else model.float()
88 | self.model = model
89 | self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
90 | self.args.plots = trainer.epoch == trainer.epochs - 1 # always plot final epoch
91 | model.eval()
92 | else:
93 | callbacks.add_integration_callbacks(self)
94 | self.run_callbacks('on_val_start')
95 | assert model is not None, "Either trainer or model is needed for validation"
96 | self.device = select_device(self.args.device, self.args.batch)
97 | self.args.half &= self.device.type != 'cpu'
98 | model = AutoBackend(model, device=self.device, dnn=self.args.dnn, fp16=self.args.half)
99 | self.model = model
100 | stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
101 | imgsz = check_imgsz(self.args.imgsz, stride=stride)
102 | if engine:
103 | self.args.batch = model.batch_size
104 | else:
105 | self.device = model.device
106 | if not pt and not jit:
107 | self.args.batch = 1 # export.py models default to batch-size 1
108 | self.logger.info(
109 | f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
110 |
111 | if isinstance(self.args.data, str) and self.args.data.endswith(".yaml"):
112 | self.data = check_dataset_yaml(self.args.data)
113 | else:
114 | self.data = check_dataset(self.args.data)
115 |
116 | if self.device.type == 'cpu':
117 | self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
118 | self.dataloader = self.dataloader or \
119 | self.get_dataloader(self.data.get("val") or self.data.set("test"), self.args.batch)
120 |
121 | model.eval()
122 | model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup
123 |
124 | dt = Profile(), Profile(), Profile(), Profile()
125 | n_batches = len(self.dataloader)
126 | desc = self.get_desc()
127 | # NOTE: keeping `not self.training` in tqdm will eliminate pbar after segmentation evaluation during training,
128 | # which may affect classification task since this arg is in yolov5/classify/val.py.
129 | # bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT)
130 | bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT)
131 | self.init_metrics(de_parallel(model))
132 | self.jdict = [] # empty before each val
133 | for batch_i, batch in enumerate(bar):
134 | self.run_callbacks('on_val_batch_start')
135 | self.batch_i = batch_i
136 | # pre-process
137 | with dt[0]:
138 | batch = self.preprocess(batch)
139 |
140 | # inference
141 | with dt[1]:
142 | preds = model(batch["img"])
143 |
144 | # loss
145 | with dt[2]:
146 | if self.training:
147 | self.loss += trainer.criterion(preds, batch)[1]
148 |
149 | # pre-process predictions
150 | with dt[3]:
151 | preds = self.postprocess(preds)
152 |
153 | self.update_metrics(preds, batch)
154 | if self.args.plots and batch_i < 3:
155 | self.plot_val_samples(batch, batch_i)
156 | self.plot_predictions(batch, preds, batch_i)
157 |
158 | self.run_callbacks('on_val_batch_end')
159 | stats = self.get_stats()
160 | self.check_stats(stats)
161 | self.print_results()
162 | self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image
163 | self.run_callbacks('on_val_end')
164 | if self.training:
165 | model.float()
166 | results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")}
167 | return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
168 | else:
169 | self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' %
170 | self.speed)
171 | if self.args.save_json and self.jdict:
172 | with open(str(self.save_dir / "predictions.json"), 'w') as f:
173 | self.logger.info(f"Saving {f.name}...")
174 | json.dump(self.jdict, f) # flatten and save
175 | stats = self.eval_json(stats) # update stats
176 | return stats
177 |
178 | def run_callbacks(self, event: str):
179 | for callback in self.callbacks.get(event, []):
180 | callback(self)
181 |
182 | def get_dataloader(self, dataset_path, batch_size):
183 | raise NotImplementedError("get_dataloader function not implemented for this validator")
184 |
185 | def preprocess(self, batch):
186 | return batch
187 |
188 | def postprocess(self, preds):
189 | return preds
190 |
191 | def init_metrics(self, model):
192 | pass
193 |
194 | def update_metrics(self, preds, batch):
195 | pass
196 |
197 | def get_stats(self):
198 | return {}
199 |
200 | def check_stats(self, stats):
201 | pass
202 |
203 | def print_results(self):
204 | pass
205 |
206 | def get_desc(self):
207 | pass
208 |
209 | @property
210 | def metric_keys(self):
211 | return []
212 |
213 | # TODO: may need to put these following functions into callback
214 | def plot_val_samples(self, batch, ni):
215 | pass
216 |
217 | def plot_predictions(self, batch, preds, ni):
218 | pass
219 |
220 | def pred_to_json(self, preds, batch):
221 | pass
222 |
223 | def eval_json(self, stats):
224 | pass
225 |
--------------------------------------------------------------------------------