├── LICENSE
├── README.md
├── config
├── fold.json
├── id.json
├── ip.json
└── setting.json
├── main.py
├── requirements.txt
├── ui
├── CustomMessageBox.py
├── UIFunctions.py
├── __pycache__
│ ├── CustomMessageBox.cpython-39.pyc
│ ├── UIFunctions.cpython-39.pyc
│ ├── cmp_res.cpython-39.pyc
│ ├── custom_grips.cpython-39.pyc
│ ├── home.cpython-39.pyc
│ ├── mainwindow.cpython-39.pyc
│ └── resources_rc.cpython-39.pyc
├── cmp_res.py
├── custom_grips.py
├── mainwindow.py
└── rtsp_dialog.ui
├── ultralytics
├── __init__.py
├── assets
│ ├── bus.jpg
│ └── zidane.jpg
├── datasets
│ ├── Argoverse.yaml
│ ├── GlobalWheat2020.yaml
│ ├── ImageNet.yaml
│ ├── Objects365.yaml
│ ├── SKU-110K.yaml
│ ├── VOC.yaml
│ ├── VisDrone.yaml
│ ├── coco-pose.yaml
│ ├── coco.yaml
│ ├── coco128-seg.yaml
│ ├── coco128.yaml
│ ├── coco8-pose.yaml
│ ├── coco8-seg.yaml
│ ├── coco8.yaml
│ └── xView.yaml
├── hub
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── auth.cpython-39.pyc
│ │ ├── session.cpython-39.pyc
│ │ └── utils.cpython-39.pyc
│ ├── auth.py
│ ├── session.py
│ └── utils.py
├── models
│ ├── v3
│ │ ├── yolov3-spp.yaml
│ │ ├── yolov3-tiny.yaml
│ │ └── yolov3.yaml
│ ├── v5
│ │ ├── yolov5-p6.yaml
│ │ └── yolov5.yaml
│ └── v8
│ │ ├── yolov8-cls.yaml
│ │ ├── yolov8-p2.yaml
│ │ ├── yolov8-p6.yaml
│ │ ├── yolov8-pose-p6.yaml
│ │ ├── yolov8-pose.yaml
│ │ ├── yolov8-seg.yaml
│ │ └── yolov8.yaml
├── nn
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── autobackend.cpython-39.pyc
│ │ ├── autoshape.cpython-39.pyc
│ │ ├── modules.cpython-39.pyc
│ │ └── tasks.cpython-39.pyc
│ ├── autobackend.py
│ ├── autoshape.py
│ ├── modules.py
│ └── tasks.py
├── tracker
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ └── track.cpython-39.pyc
│ ├── cfg
│ │ ├── botsort.yaml
│ │ └── bytetrack.yaml
│ ├── track.py
│ ├── trackers
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-39.pyc
│ │ │ ├── basetrack.cpython-39.pyc
│ │ │ ├── bot_sort.cpython-39.pyc
│ │ │ └── byte_tracker.cpython-39.pyc
│ │ ├── basetrack.py
│ │ ├── bot_sort.py
│ │ └── byte_tracker.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── gmc.cpython-39.pyc
│ │ ├── kalman_filter.cpython-39.pyc
│ │ └── matching.cpython-39.pyc
│ │ ├── gmc.py
│ │ ├── kalman_filter.py
│ │ └── matching.py
└── yolo
│ ├── __init__.py
│ ├── cfg
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-39.pyc
│ └── default.yaml
│ ├── data
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── augment.cpython-39.pyc
│ │ ├── base.cpython-39.pyc
│ │ ├── build.cpython-39.pyc
│ │ ├── dataset.cpython-39.pyc
│ │ ├── dataset_wrappers.cpython-39.pyc
│ │ └── utils.cpython-39.pyc
│ ├── augment.py
│ ├── base.py
│ ├── build.py
│ ├── dataloaders
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-39.pyc
│ │ │ ├── stream_loaders.cpython-39.pyc
│ │ │ ├── v5augmentations.cpython-39.pyc
│ │ │ └── v5loader.cpython-39.pyc
│ │ ├── stream_loaders.py
│ │ ├── v5augmentations.py
│ │ └── v5loader.py
│ ├── dataset.py
│ ├── dataset_wrappers.py
│ └── utils.py
│ ├── engine
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── exporter.cpython-39.pyc
│ │ ├── model.cpython-39.pyc
│ │ ├── predictor.cpython-39.pyc
│ │ ├── results.cpython-39.pyc
│ │ ├── trainer.cpython-39.pyc
│ │ └── validator.cpython-39.pyc
│ ├── exporter.py
│ ├── model.py
│ ├── predictor.py
│ ├── results.py
│ ├── trainer.py
│ └── validator.py
│ ├── utils
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── autobatch.cpython-39.pyc
│ │ ├── benchmarks.cpython-39.pyc
│ │ ├── checks.cpython-39.pyc
│ │ ├── dist.cpython-39.pyc
│ │ ├── downloads.cpython-39.pyc
│ │ ├── files.cpython-39.pyc
│ │ ├── instance.cpython-39.pyc
│ │ ├── loss.cpython-39.pyc
│ │ ├── metrics.cpython-39.pyc
│ │ ├── ops.cpython-39.pyc
│ │ ├── plotting.cpython-39.pyc
│ │ ├── tal.cpython-39.pyc
│ │ └── torch_utils.cpython-39.pyc
│ ├── autobatch.py
│ ├── benchmarks.py
│ ├── callbacks
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-39.pyc
│ │ │ ├── base.cpython-39.pyc
│ │ │ ├── clearml.cpython-39.pyc
│ │ │ ├── comet.cpython-39.pyc
│ │ │ ├── hub.cpython-39.pyc
│ │ │ ├── mlflow.cpython-39.pyc
│ │ │ └── tensorboard.cpython-39.pyc
│ │ ├── base.py
│ │ ├── clearml.py
│ │ ├── comet.py
│ │ ├── hub.py
│ │ ├── mlflow.py
│ │ └── tensorboard.py
│ ├── checks.py
│ ├── dist.py
│ ├── downloads.py
│ ├── files.py
│ ├── instance.py
│ ├── loss.py
│ ├── metrics.py
│ ├── ops.py
│ ├── plotting.py
│ ├── tal.py
│ └── torch_utils.py
│ └── v8
│ ├── __init__.py
│ ├── __pycache__
│ └── __init__.cpython-39.pyc
│ ├── classify
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── predict.cpython-39.pyc
│ │ ├── train.cpython-39.pyc
│ │ └── val.cpython-39.pyc
│ ├── predict.py
│ ├── train.py
│ └── val.py
│ ├── detect
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── predict.cpython-39.pyc
│ │ ├── train.cpython-39.pyc
│ │ └── val.cpython-39.pyc
│ ├── predict.py
│ ├── train.py
│ └── val.py
│ ├── pose
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-39.pyc
│ │ ├── predict.cpython-39.pyc
│ │ ├── train.cpython-39.pyc
│ │ └── val.cpython-39.pyc
│ ├── predict.py
│ ├── train.py
│ └── val.py
│ └── segment
│ ├── __init__.py
│ ├── __pycache__
│ ├── __init__.cpython-39.pyc
│ ├── predict.cpython-39.pyc
│ ├── train.cpython-39.pyc
│ └── val.cpython-39.pyc
│ ├── predict.py
│ ├── train.py
│ └── val.py
├── utils
├── __pycache__
│ ├── capnums.cpython-39.pyc
│ ├── id_dialog.cpython-39.pyc
│ ├── id_win.cpython-39.pyc
│ ├── rtsp_dialog.cpython-39.pyc
│ └── rtsp_win.cpython-39.pyc
├── capnums.py
├── id_dialog.py
├── id_win.py
├── rtsp_dialog.py
├── rtsp_win.py
└── video_transform.py
└── weights
└── 轻量级模型.pt
/README.md:
--------------------------------------------------------------------------------
1 | ## 基于YOLOv8与Qt的多目标跟踪智能交通路况监控系统
2 | # 1.演示视频
3 | https://www.bilibili.com/video/BV1yX4y1m7Fe/?spm_id_from=333.999.0.0
4 | https://youtu.be/_77LrsXaYzM
5 | # 2.安装(INSTALL)
6 | ## 第一步(FIRST STEP) 安装ANACONDA
7 | 1)访问Anaconda官网:https://www.anaconda.com/products/individual
8 | 2)选择相应的操作系统版本并下载对应的安装包(推荐下载64位版本)
9 | 3)打开下载的安装包,按照提示进行安装即可
10 | 4)创建一个虚拟环境:
11 | conda create --name 自命名 python=3.9.16
12 |
13 | ## 第二步(SECOND STEP) pip install -r requirements.txt
14 | 激活环境并安装相应的库: activate 自命名-> pip install -r requirements.txt
15 | 这一步会安装cpu版本的torch与torchvision,如果想要更好的帧数体验请安装cuda版本哦,安装cuda版本很简单,首先要有英伟达显卡,其次nvdia-smi查看cuda driver驱动版本号,上英伟达官网选择对应cuda版本号的cuda套件安装,最后去torch官网选择自己安装的cuda套件版本使用conda或者pip安装即可。
16 | # 3.运行
17 | 配置好环境后在含有main.py的工作目录下运行main.py即可,也可以下载以下链接里的压缩包使用exe文件运行:
18 | https://pan.baidu.com/s/1U9dskWzOouF4y1_s7KnPfg?pwd=Zlad 提取码: Zlad
19 |
20 | ## Multi-Object Tracking Intelligent Traffic Monitoring System based on YOLOv8 and Qt
21 |
22 | # 1. Demo Video
23 | https://www.bilibili.com/video/BV1yX4y1m7Fe/?spm_id_from=333.999.0.0
24 | https://youtu.be/_77LrsXaYzM
25 |
26 | # 2. Installation
27 | ## First Step: Install Anaconda
28 | 1) Visit the Anaconda official website: https://www.anaconda.com/products/individual
29 | 2) Select the appropriate operating system version and download the corresponding installation package (it is recommended to download the 64-bit version)
30 | 3) Open the downloaded installation package and follow the prompts to install it
31 | 4) Create a virtual environment:
32 | conda create --name your_env_name python=3.19.16
33 |
34 | ## Second Step: pip install -r requirements.txt
35 | Activate the environment and install the required libraries:
36 | activate your_env_name -> pip install -r requirements.txt
37 |
38 | This step will install the CPU version of torch and torchvision. If you want a better frame rate experience, please install the CUDA version. Installing the CUDA version is very simple. First, you need an NVIDIA graphics card. Next, use nvidia-smi to check the version number of the CUDA driver. Then, select the corresponding CUDA package version according to the CUDA driver version number on the NVIDIA official website, and finally, choose the CUDA package version you installed and use conda or pip to install it on the torch official website.
39 |
40 | # 3. Running
41 | After configuring the environment, run main.py in the working directory that contains the file. You can also download the compressed package from the following link and use the exe file to run it:
42 | https://pan.baidu.com/s/1U9dskWzOouF4y1_s7KnPfg?pwd=Zlad Password: Zlad
43 |
44 | ## 参考(REFERENCE)
45 | https://github.com/Jai-wei/YOLOv8-PySide6-GUI
46 | https://github.com/ultralytics/ultralytics
47 | https://doc.qt.io/qtforpython-6/
48 |
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/config/fold.json:
--------------------------------------------------------------------------------
1 | {
2 | "open_fold": "D:/"
3 | }
4 |
--------------------------------------------------------------------------------
/config/id.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": "2"
3 | }
--------------------------------------------------------------------------------
/config/ip.json:
--------------------------------------------------------------------------------
1 | {
2 | "ip": "rtsp://admin:admin@192.168.0.103:8554/live"
3 | }
--------------------------------------------------------------------------------
/config/setting.json:
--------------------------------------------------------------------------------
1 | {
2 | "iou": 0.45,
3 | "conf": 0.25,
4 | "rate": 10,
5 | "save_res": 2,
6 | "save_txt": 2
7 | }
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | #win+r ->cmd->cd-> pip install -r requirements.txt
2 | matplotlib>=3.2.2
3 | opencv-python>=4.6.0
4 | numpy>=1.24.3
5 | Pillow>=7.1.2
6 | PyYAML>=5.3.1
7 | requests>=2.23.0
8 | scipy>=1.4.1
9 | torch>=1.7.0 # install cuda version for better fps performance
10 | torchvision>=0.8.1 # install cuda version for better fps performance
11 | tqdm>=4.64.0
12 | supervision>=0.6.0
13 | psutil>=5.9.5
14 | thop>=0.1.1
15 | PySide6>=6.4.2
16 | pandas>=2.0.1
17 | lap>=0.4
--------------------------------------------------------------------------------
/ui/CustomMessageBox.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | from PySide6.QtCore import QTimer, Qt
5 | from PySide6.QtWidgets import QMessageBox
6 | from PySide6.QtGui import QPixmap, QIcon
7 |
8 |
9 | # Single-button dialog box, which disappears automatically after appearing for a specified period of time
10 | class MessageBox(QMessageBox):
11 | def __init__(self, *args, title='提示', count=1, time=1000, auto=False, **kwargs):
12 | super(MessageBox, self).__init__(*args, **kwargs)
13 | self._count = count
14 | self._time = time
15 | self._auto = auto # Whether to close automatically
16 | assert count > 0 # must be greater than 0
17 | assert time >= 500 # Must be >=500 milliseconds
18 | self.setStyleSheet('''
19 | QWidget{color:black;
20 | background-color: qlineargradient(x0:0, y0:1, x1:1, y1:1,stop:0.4 rgb(0,205,102),stop:1 rgb(0 205 102));
21 | font: 13pt "Microsoft YaHei UI";
22 | padding-right: 5px;
23 | padding-top: 14px;
24 | font-weight: light;}
25 | QLabel{
26 | color:white;
27 | background-color: rgba(107, 128, 210, 0);}''')
28 |
29 | self.setWindowTitle(title)
30 |
31 | self.setStandardButtons(QMessageBox.StandardButton.Close) # close button
32 | self.closeBtn = self.button(QMessageBox.StandardButton.Close) # get close button
33 | self.closeBtn.setText('Close')
34 | self.closeBtn.setVisible(False)
35 | self._timer = QTimer(self, timeout=self.doCountDown)
36 | self._timer.start(self._time)
37 |
38 | def doCountDown(self):
39 | self._count -= 1
40 | if self._count <= 0:
41 | self._timer.stop()
42 | if self._auto: # auto close
43 | self.accept()
44 | self.close()
45 |
46 | if __name__ == '__main__':
47 | MessageBox(QWidget=None, text='123', auto=True).exec()
48 |
--------------------------------------------------------------------------------
/ui/UIFunctions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | from main import *
5 | from ui.custom_grips import CustomGrip
6 | from PySide6.QtCore import QPropertyAnimation, QEasingCurve, QEvent, QTimer
7 | from PySide6.QtCore import *
8 | from PySide6.QtGui import *
9 | from PySide6.QtWidgets import *
10 | import time
11 |
12 | GLOBAL_STATE = False # max min flag
13 | GLOBAL_TITLE_BAR = True
14 |
15 |
16 | class UIFuncitons(MainWindow):
17 | #展开左菜单
18 | def toggleMenu(self, enable):
19 | if enable:
20 | standard = 68
21 | maxExtend = 180
22 | width = self.LeftMenuBg.width()
23 |
24 | if width == 68:
25 | widthExtended = maxExtend
26 | else:
27 | widthExtended = standard
28 |
29 | #Qt动画
30 | self.animation = QPropertyAnimation(self.LeftMenuBg, b"minimumWidth")
31 | self.animation.setDuration(500) # ms
32 | self.animation.setStartValue(width)
33 | self.animation.setEndValue(widthExtended)
34 | self.animation.setEasingCurve(QEasingCurve.InOutQuint)
35 | self.animation.start()
36 |
37 | #展开右菜单
38 | def settingBox(self, enable):
39 | if enable:
40 | #获取宽度
41 | widthRightBox = self.prm_page.width() # right set column width
42 | widthLeftBox = self.LeftMenuBg.width() # left column length
43 | maxExtend = 220
44 | standard = 0
45 |
46 | #设置最大宽度
47 | if widthRightBox == 0:
48 | widthExtended = maxExtend
49 | else:
50 | widthExtended = standard
51 |
52 | #左菜单的动画
53 | self.left_box = QPropertyAnimation(self.LeftMenuBg, b"minimumWidth")
54 | self.left_box.setDuration(500)
55 | self.left_box.setStartValue(widthLeftBox)
56 | self.left_box.setEndValue(68)
57 | self.left_box.setEasingCurve(QEasingCurve.InOutQuart)
58 |
59 | #设置界面的动画
60 | self.right_box = QPropertyAnimation(self.prm_page, b"minimumWidth")
61 | self.right_box.setDuration(500)
62 | self.right_box.setStartValue(widthRightBox)
63 | self.right_box.setEndValue(widthExtended)
64 | self.right_box.setEasingCurve(QEasingCurve.InOutQuart)
65 |
66 | #并行动画
67 | self.group = QParallelAnimationGroup()
68 | self.group.addAnimation(self.left_box)
69 | self.group.addAnimation(self.right_box)
70 | self.group.start()
71 |
72 | #最大化窗口
73 | def maximize_restore(self):
74 | global GLOBAL_STATE
75 | status = GLOBAL_STATE
76 | if status == False:
77 | GLOBAL_STATE = True
78 | self.showMaximized() #最大化
79 | self.max_sf.setToolTip("Restore")
80 | self.frame_size_grip.hide()
81 | self.left_grip.hide()
82 | self.right_grip.hide()
83 | self.top_grip.hide()
84 | self.bottom_grip.hide()
85 | else:
86 | GLOBAL_STATE = False
87 | self.showNormal() #最小化
88 | self.resize(self.width()+1, self.height()+1)
89 | self.max_sf.setToolTip("Maximize")
90 | self.frame_size_grip.show()
91 | self.left_grip.show()
92 | self.right_grip.show()
93 | self.top_grip.show()
94 | self.bottom_grip.show()
95 |
96 | # window control
97 | def uiDefinitions(self):
98 | # Double-click the title bar to maximize
99 | def dobleClickMaximizeRestore(event):
100 | if event.type() == QEvent.MouseButtonDblClick:
101 | QTimer.singleShot(250, lambda: UIFuncitons.maximize_restore(self))
102 | self.top.mouseDoubleClickEvent = dobleClickMaximizeRestore
103 |
104 | # MOVE WINDOW / MAXIMIZE / RESTORE
105 | def moveWindow(event):
106 | if GLOBAL_STATE: # IF MAXIMIZED CHANGE TO NORMAL
107 | UIFuncitons.maximize_restore(self)
108 | if event.buttons() == Qt.LeftButton: # MOVE
109 | self.move(self.pos() + event.globalPos() - self.dragPos)
110 | self.dragPos = event.globalPos()
111 | self.top.mouseMoveEvent = moveWindow
112 | # CUSTOM GRIPS
113 | self.left_grip = CustomGrip(self, Qt.LeftEdge, True)
114 | self.right_grip = CustomGrip(self, Qt.RightEdge, True)
115 | self.top_grip = CustomGrip(self, Qt.TopEdge, True)
116 | self.bottom_grip = CustomGrip(self, Qt.BottomEdge, True)
117 |
118 | # MINIMIZE
119 | self.min_sf.clicked.connect(lambda: self.showMinimized())
120 | # MAXIMIZE/RESTORE
121 | self.max_sf.clicked.connect(lambda: UIFuncitons.maximize_restore(self))
122 | # CLOSE APPLICATION
123 | self.close_button.clicked.connect(self.close)
124 |
125 | # Control the stretching of the four sides of the window
126 | def resize_grips(self):
127 | self.left_grip.setGeometry(0, 10, 10, self.height())
128 | self.right_grip.setGeometry(self.width() - 10, 10, 10, self.height())
129 | self.top_grip.setGeometry(0, 0, self.width(), 10)
130 | self.bottom_grip.setGeometry(0, self.height() - 10, self.width(), 10)
131 |
132 | # Show module to add shadow
133 | def shadow_style(self, widget, Color):
134 | shadow = QGraphicsDropShadowEffect(self)
135 | shadow.setOffset(8, 8) # offset
136 | shadow.setBlurRadius(38) # shadow radius
137 | shadow.setColor(Color) # shadow color
138 | widget.setGraphicsEffect(shadow)
--------------------------------------------------------------------------------
/ui/__pycache__/CustomMessageBox.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/CustomMessageBox.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/UIFunctions.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/UIFunctions.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/cmp_res.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/cmp_res.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/custom_grips.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/custom_grips.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/home.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/home.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/mainwindow.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/mainwindow.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/__pycache__/resources_rc.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ui/__pycache__/resources_rc.cpython-39.pyc
--------------------------------------------------------------------------------
/ui/rtsp_dialog.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Form
4 |
5 |
6 |
7 | 0
8 | 0
9 | 783
10 | 40
11 |
12 |
13 |
14 |
15 | 0
16 | 40
17 |
18 |
19 |
20 |
21 | 16777215
22 | 41
23 |
24 |
25 |
26 | Form
27 |
28 |
29 |
30 | :/img/icon/实时视频流解析.png:/img/icon/实时视频流解析.png
31 |
32 |
33 | #Form{background:rgba(120,120,120,255)}
34 |
35 |
36 |
37 | 5
38 |
39 |
40 | 5
41 |
42 | -
43 |
44 |
45 |
46 | 0
47 | 30
48 |
49 |
50 |
51 |
52 | 16777215
53 | 30
54 |
55 |
56 |
57 | QLabel{font-family: "Microsoft YaHei";
58 | font-size: 18px;
59 | font-weight: bold;
60 | color:white;}
61 |
62 |
63 | rtsp address:
64 |
65 |
66 |
67 | -
68 |
69 |
70 |
71 | 0
72 | 31
73 |
74 |
75 |
76 | background-color: rgb(207, 207, 207);
77 |
78 |
79 |
80 | -
81 |
82 |
83 | QPushButton{font-family: "Microsoft YaHei";
84 | font-size: 18px;
85 | font-weight: bold;
86 | color:white;
87 | text-align: center center;
88 | padding-left: 5px;
89 | padding-right: 5px;
90 | padding-top: 4px;
91 | padding-bottom: 4px;
92 | border-style: solid;
93 | border-width: 0px;
94 | border-color: rgba(255, 255, 255, 255);
95 | border-radius: 3px;
96 | background-color: rgba(255,255,255,30);}
97 |
98 | QPushButton:focus{outline: none;}
99 |
100 | QPushButton::pressed{font-family: "Microsoft YaHei";
101 | font-size: 16px;
102 | font-weight: bold;
103 | color:rgb(200,200,200);
104 | text-align: center center;
105 | padding-left: 5px;
106 | padding-right: 5px;
107 | padding-top: 4px;
108 | padding-bottom: 4px;
109 | border-style: solid;
110 | border-width: 0px;
111 | border-color: rgba(255, 255, 255, 255);
112 | border-radius: 3px;
113 | background-color: rgba(255,255,255,150);}
114 |
115 | QPushButton::hover {
116 | border-style: solid;
117 | border-width: 0px;
118 | border-radius: 0px;
119 | background-color: rgba(255,255,255,50);}
120 |
121 |
122 | confirm
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
--------------------------------------------------------------------------------
/ultralytics/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | __version__ = '8.0.75'
4 |
5 | from ultralytics.hub import start
6 | from ultralytics.yolo.engine.model import YOLO
7 | from ultralytics.yolo.utils.checks import check_yolo as checks
8 |
9 | __all__ = '__version__', 'YOLO', 'checks', 'start' # allow simpler import
10 |
--------------------------------------------------------------------------------
/ultralytics/assets/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/assets/bus.jpg
--------------------------------------------------------------------------------
/ultralytics/assets/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/assets/zidane.jpg
--------------------------------------------------------------------------------
/ultralytics/datasets/Argoverse.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3 | # Example usage: yolo train data=Argoverse.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── Argoverse ← downloads here (31.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/Argoverse # dataset root dir
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: bus
23 | 5: truck
24 | 6: traffic_light
25 | 7: stop_sign
26 |
27 |
28 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
29 | download: |
30 | import json
31 | from tqdm import tqdm
32 | from ultralytics.yolo.utils.downloads import download
33 | from pathlib import Path
34 |
35 | def argoverse2yolo(set):
36 | labels = {}
37 | a = json.load(open(set, "rb"))
38 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
39 | img_id = annot['image_id']
40 | img_name = a['images'][img_id]['name']
41 | img_label_name = f'{img_name[:-3]}txt'
42 |
43 | cls = annot['category_id'] # instance class id
44 | x_center, y_center, width, height = annot['bbox']
45 | x_center = (x_center + width / 2) / 1920.0 # offset and scale
46 | y_center = (y_center + height / 2) / 1200.0 # offset and scale
47 | width /= 1920.0 # scale
48 | height /= 1200.0 # scale
49 |
50 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
51 | if not img_dir.exists():
52 | img_dir.mkdir(parents=True, exist_ok=True)
53 |
54 | k = str(img_dir / img_label_name)
55 | if k not in labels:
56 | labels[k] = []
57 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
58 |
59 | for k in labels:
60 | with open(k, "w") as f:
61 | f.writelines(labels[k])
62 |
63 |
64 | # Download
65 | dir = Path(yaml['path']) # dataset root dir
66 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
67 | download(urls, dir=dir)
68 |
69 | # Convert
70 | annotations_dir = 'Argoverse-HD/annotations/'
71 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
72 | for d in "train.json", "val.json":
73 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
74 |
--------------------------------------------------------------------------------
/ultralytics/datasets/GlobalWheat2020.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3 | # Example usage: yolo train data=GlobalWheat2020.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/GlobalWheat2020 # dataset root dir
12 | train: # train images (relative to 'path') 3422 images
13 | - images/arvalis_1
14 | - images/arvalis_2
15 | - images/arvalis_3
16 | - images/ethz_1
17 | - images/rres_1
18 | - images/inrae_1
19 | - images/usask_1
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21 | - images/ethz_1
22 | test: # test images (optional) 1276 images
23 | - images/utokyo_1
24 | - images/utokyo_2
25 | - images/nau_1
26 | - images/uq_1
27 |
28 | # Classes
29 | names:
30 | 0: wheat_head
31 |
32 |
33 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
34 | download: |
35 | from ultralytics.yolo.utils.downloads import download
36 | from pathlib import Path
37 |
38 | # Download
39 | dir = Path(yaml['path']) # dataset root dir
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42 | download(urls, dir=dir)
43 |
44 | # Make Directories
45 | for p in 'annotations', 'images', 'labels':
46 | (dir / p).mkdir(parents=True, exist_ok=True)
47 |
48 | # Move
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51 | (dir / p).rename(dir / 'images' / p) # move to /images
52 | f = (dir / p).with_suffix('.json') # json file
53 | if f.exists():
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
55 |
--------------------------------------------------------------------------------
/ultralytics/datasets/SKU-110K.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3 | # Example usage: yolo train data=SKU-110K.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── SKU-110K ← downloads here (13.6 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/SKU-110K # dataset root dir
12 | train: train.txt # train images (relative to 'path') 8219 images
13 | val: val.txt # val images (relative to 'path') 588 images
14 | test: test.txt # test images (optional) 2936 images
15 |
16 | # Classes
17 | names:
18 | 0: object
19 |
20 |
21 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
22 | download: |
23 | import shutil
24 | from pathlib import Path
25 |
26 | import numpy as np
27 | import pandas as pd
28 | from tqdm import tqdm
29 |
30 | from ultralytics.yolo.utils.downloads import download
31 | from ultralytics.yolo.utils.ops import xyxy2xywh
32 |
33 | # Download
34 | dir = Path(yaml['path']) # dataset root dir
35 | parent = Path(dir.parent) # download dir
36 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
37 | download(urls, dir=parent)
38 |
39 | # Rename directories
40 | if dir.exists():
41 | shutil.rmtree(dir)
42 | (parent / 'SKU110K_fixed').rename(dir) # rename dir
43 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
44 |
45 | # Convert labels
46 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
47 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
48 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
49 | images, unique_images = x[:, 0], np.unique(x[:, 0])
50 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
51 | f.writelines(f'./images/{s}\n' for s in unique_images)
52 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
53 | cls = 0 # single-class dataset
54 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
55 | for r in x[images == im]:
56 | w, h = r[6], r[7] # image width, height
57 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
58 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
59 |
--------------------------------------------------------------------------------
/ultralytics/datasets/VOC.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3 | # Example usage: yolo train data=VOC.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── VOC ← downloads here (2.8 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VOC
12 | train: # train images (relative to 'path') 16551 images
13 | - images/train2012
14 | - images/train2007
15 | - images/val2012
16 | - images/val2007
17 | val: # val images (relative to 'path') 4952 images
18 | - images/test2007
19 | test: # test images (optional)
20 | - images/test2007
21 |
22 | # Classes
23 | names:
24 | 0: aeroplane
25 | 1: bicycle
26 | 2: bird
27 | 3: boat
28 | 4: bottle
29 | 5: bus
30 | 6: car
31 | 7: cat
32 | 8: chair
33 | 9: cow
34 | 10: diningtable
35 | 11: dog
36 | 12: horse
37 | 13: motorbike
38 | 14: person
39 | 15: pottedplant
40 | 16: sheep
41 | 17: sofa
42 | 18: train
43 | 19: tvmonitor
44 |
45 |
46 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
47 | download: |
48 | import xml.etree.ElementTree as ET
49 |
50 | from tqdm import tqdm
51 | from ultralytics.yolo.utils.downloads import download
52 | from pathlib import Path
53 |
54 | def convert_label(path, lb_path, year, image_id):
55 | def convert_box(size, box):
56 | dw, dh = 1. / size[0], 1. / size[1]
57 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
58 | return x * dw, y * dh, w * dw, h * dh
59 |
60 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
61 | out_file = open(lb_path, 'w')
62 | tree = ET.parse(in_file)
63 | root = tree.getroot()
64 | size = root.find('size')
65 | w = int(size.find('width').text)
66 | h = int(size.find('height').text)
67 |
68 | names = list(yaml['names'].values()) # names list
69 | for obj in root.iter('object'):
70 | cls = obj.find('name').text
71 | if cls in names and int(obj.find('difficult').text) != 1:
72 | xmlbox = obj.find('bndbox')
73 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
74 | cls_id = names.index(cls) # class id
75 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
76 |
77 |
78 | # Download
79 | dir = Path(yaml['path']) # dataset root dir
80 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
81 | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
82 | f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
83 | f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
84 | download(urls, dir=dir / 'images', curl=True, threads=3)
85 |
86 | # Convert
87 | path = dir / 'images/VOCdevkit'
88 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
89 | imgs_path = dir / 'images' / f'{image_set}{year}'
90 | lbs_path = dir / 'labels' / f'{image_set}{year}'
91 | imgs_path.mkdir(exist_ok=True, parents=True)
92 | lbs_path.mkdir(exist_ok=True, parents=True)
93 |
94 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
95 | image_ids = f.read().strip().split()
96 | for id in tqdm(image_ids, desc=f'{image_set}{year}'):
97 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
98 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
99 | f.rename(imgs_path / f.name) # move image
100 | convert_label(path, lb_path, year, id) # convert labels to YOLO format
101 |
--------------------------------------------------------------------------------
/ultralytics/datasets/VisDrone.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3 | # Example usage: yolo train data=VisDrone.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── VisDrone ← downloads here (2.3 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/VisDrone # dataset root dir
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15 |
16 | # Classes
17 | names:
18 | 0: pedestrian
19 | 1: people
20 | 2: bicycle
21 | 3: car
22 | 4: van
23 | 5: truck
24 | 6: tricycle
25 | 7: awning-tricycle
26 | 8: bus
27 | 9: motor
28 |
29 |
30 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
31 | download: |
32 | import os
33 | from pathlib import Path
34 |
35 | from ultralytics.yolo.utils.downloads import download
36 |
37 | def visdrone2yolo(dir):
38 | from PIL import Image
39 | from tqdm import tqdm
40 |
41 | def convert_box(size, box):
42 | # Convert VisDrone box to YOLO xywh box
43 | dw = 1. / size[0]
44 | dh = 1. / size[1]
45 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
46 |
47 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
48 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
49 | for f in pbar:
50 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
51 | lines = []
52 | with open(f, 'r') as file: # read annotation.txt
53 | for row in [x.split(',') for x in file.read().strip().splitlines()]:
54 | if row[4] == '0': # VisDrone 'ignored regions' class 0
55 | continue
56 | cls = int(row[5]) - 1
57 | box = convert_box(img_size, tuple(map(int, row[:4])))
58 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
59 | with open(str(f).replace(f'{os.sep}annotations{os.sep}', f'{os.sep}labels{os.sep}'), 'w') as fl:
60 | fl.writelines(lines) # write label.txt
61 |
62 |
63 | # Download
64 | dir = Path(yaml['path']) # dataset root dir
65 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
66 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
67 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
68 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
69 | download(urls, dir=dir, curl=True, threads=4)
70 |
71 | # Convert
72 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
73 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
74 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: yolo train data=coco-pose.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco-pose ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco-pose # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Keypoints
17 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18 | flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
19 |
20 | # Classes
21 | names:
22 | 0: person
23 |
24 | # Download script/URL (optional)
25 | download: |
26 | from ultralytics.yolo.utils.downloads import download
27 | from pathlib import Path
28 |
29 | # Download labels
30 | dir = Path(yaml['path']) # dataset root dir
31 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
32 | urls = [url + 'coco2017labels-pose.zip'] # labels
33 | download(urls, dir=dir.parent)
34 | # Download data
35 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
36 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
37 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
38 | download(urls, dir=dir / 'images', threads=3)
39 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft
3 | # Example usage: yolo train data=coco.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco ← downloads here (20.1 GB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco # dataset root dir
12 | train: train2017.txt # train images (relative to 'path') 118287 images
13 | val: val2017.txt # val images (relative to 'path') 5000 images
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: |
102 | from ultralytics.yolo.utils.downloads import download
103 | from pathlib import Path
104 |
105 | # Download labels
106 | segments = True # segment or box labels
107 | dir = Path(yaml['path']) # dataset root dir
108 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
109 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
110 | download(urls, dir=dir.parent)
111 | # Download data
112 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115 | download(urls, dir=dir / 'images', threads=3)
116 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco128-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco128.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco128-seg ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128-seg # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128-seg.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco128.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco128.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco128 ← downloads here (7 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco128 # dataset root dir
12 | train: images/train2017 # train images (relative to 'path') 128 images
13 | val: images/train2017 # val images (relative to 'path') 128 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco128.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco8-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO8-pose dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8-pose.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco8-pose ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8-pose # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Keypoints
17 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18 | flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
19 |
20 | # Classes
21 | names:
22 | 0: person
23 |
24 | # Download script/URL (optional)
25 | download: https://ultralytics.com/assets/coco8-pose.zip
26 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco8-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8-seg.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco8-seg ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8-seg # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco8-seg.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/coco8.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # COCO8 dataset (first 8 images from COCO train2017) by Ultralytics
3 | # Example usage: yolo train data=coco8.yaml
4 | # parent
5 | # ├── ultralytics
6 | # └── datasets
7 | # └── coco8 ← downloads here (1 MB)
8 |
9 |
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11 | path: ../datasets/coco8 # dataset root dir
12 | train: images/train # train images (relative to 'path') 4 images
13 | val: images/val # val images (relative to 'path') 4 images
14 | test: # test images (optional)
15 |
16 | # Classes
17 | names:
18 | 0: person
19 | 1: bicycle
20 | 2: car
21 | 3: motorcycle
22 | 4: airplane
23 | 5: bus
24 | 6: train
25 | 7: truck
26 | 8: boat
27 | 9: traffic light
28 | 10: fire hydrant
29 | 11: stop sign
30 | 12: parking meter
31 | 13: bench
32 | 14: bird
33 | 15: cat
34 | 16: dog
35 | 17: horse
36 | 18: sheep
37 | 19: cow
38 | 20: elephant
39 | 21: bear
40 | 22: zebra
41 | 23: giraffe
42 | 24: backpack
43 | 25: umbrella
44 | 26: handbag
45 | 27: tie
46 | 28: suitcase
47 | 29: frisbee
48 | 30: skis
49 | 31: snowboard
50 | 32: sports ball
51 | 33: kite
52 | 34: baseball bat
53 | 35: baseball glove
54 | 36: skateboard
55 | 37: surfboard
56 | 38: tennis racket
57 | 39: bottle
58 | 40: wine glass
59 | 41: cup
60 | 42: fork
61 | 43: knife
62 | 44: spoon
63 | 45: bowl
64 | 46: banana
65 | 47: apple
66 | 48: sandwich
67 | 49: orange
68 | 50: broccoli
69 | 51: carrot
70 | 52: hot dog
71 | 53: pizza
72 | 54: donut
73 | 55: cake
74 | 56: chair
75 | 57: couch
76 | 58: potted plant
77 | 59: bed
78 | 60: dining table
79 | 61: toilet
80 | 62: tv
81 | 63: laptop
82 | 64: mouse
83 | 65: remote
84 | 66: keyboard
85 | 67: cell phone
86 | 68: microwave
87 | 69: oven
88 | 70: toaster
89 | 71: sink
90 | 72: refrigerator
91 | 73: book
92 | 74: clock
93 | 75: vase
94 | 76: scissors
95 | 77: teddy bear
96 | 78: hair drier
97 | 79: toothbrush
98 |
99 |
100 | # Download script/URL (optional)
101 | download: https://ultralytics.com/assets/coco8.zip
102 |
--------------------------------------------------------------------------------
/ultralytics/datasets/xView.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
3 | # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
4 | # Example usage: yolo train data=xView.yaml
5 | # parent
6 | # ├── ultralytics
7 | # └── datasets
8 | # └── xView ← downloads here (20.7 GB)
9 |
10 |
11 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12 | path: ../datasets/xView # dataset root dir
13 | train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
14 | val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
15 |
16 | # Classes
17 | names:
18 | 0: Fixed-wing Aircraft
19 | 1: Small Aircraft
20 | 2: Cargo Plane
21 | 3: Helicopter
22 | 4: Passenger Vehicle
23 | 5: Small Car
24 | 6: Bus
25 | 7: Pickup Truck
26 | 8: Utility Truck
27 | 9: Truck
28 | 10: Cargo Truck
29 | 11: Truck w/Box
30 | 12: Truck Tractor
31 | 13: Trailer
32 | 14: Truck w/Flatbed
33 | 15: Truck w/Liquid
34 | 16: Crane Truck
35 | 17: Railway Vehicle
36 | 18: Passenger Car
37 | 19: Cargo Car
38 | 20: Flat Car
39 | 21: Tank car
40 | 22: Locomotive
41 | 23: Maritime Vessel
42 | 24: Motorboat
43 | 25: Sailboat
44 | 26: Tugboat
45 | 27: Barge
46 | 28: Fishing Vessel
47 | 29: Ferry
48 | 30: Yacht
49 | 31: Container Ship
50 | 32: Oil Tanker
51 | 33: Engineering Vehicle
52 | 34: Tower crane
53 | 35: Container Crane
54 | 36: Reach Stacker
55 | 37: Straddle Carrier
56 | 38: Mobile Crane
57 | 39: Dump Truck
58 | 40: Haul Truck
59 | 41: Scraper/Tractor
60 | 42: Front loader/Bulldozer
61 | 43: Excavator
62 | 44: Cement Mixer
63 | 45: Ground Grader
64 | 46: Hut/Tent
65 | 47: Shed
66 | 48: Building
67 | 49: Aircraft Hangar
68 | 50: Damaged Building
69 | 51: Facility
70 | 52: Construction Site
71 | 53: Vehicle Lot
72 | 54: Helipad
73 | 55: Storage Tank
74 | 56: Shipping container lot
75 | 57: Shipping Container
76 | 58: Pylon
77 | 59: Tower
78 |
79 |
80 | # Download script/URL (optional) ---------------------------------------------------------------------------------------
81 | download: |
82 | import json
83 | import os
84 | from pathlib import Path
85 |
86 | import numpy as np
87 | from PIL import Image
88 | from tqdm import tqdm
89 |
90 | from ultralytics.yolo.data.dataloaders.v5loader import autosplit
91 | from ultralytics.yolo.utils.ops import xyxy2xywhn
92 |
93 |
94 | def convert_labels(fname=Path('xView/xView_train.geojson')):
95 | # Convert xView geoJSON labels to YOLO format
96 | path = fname.parent
97 | with open(fname) as f:
98 | print(f'Loading {fname}...')
99 | data = json.load(f)
100 |
101 | # Make dirs
102 | labels = Path(path / 'labels' / 'train')
103 | os.system(f'rm -rf {labels}')
104 | labels.mkdir(parents=True, exist_ok=True)
105 |
106 | # xView classes 11-94 to 0-59
107 | xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
108 | 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
109 | 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
110 | 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
111 |
112 | shapes = {}
113 | for feature in tqdm(data['features'], desc=f'Converting {fname}'):
114 | p = feature['properties']
115 | if p['bounds_imcoords']:
116 | id = p['image_id']
117 | file = path / 'train_images' / id
118 | if file.exists(): # 1395.tif missing
119 | try:
120 | box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
121 | assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
122 | cls = p['type_id']
123 | cls = xview_class2index[int(cls)] # xView class to 0-60
124 | assert 59 >= cls >= 0, f'incorrect class index {cls}'
125 |
126 | # Write YOLO label
127 | if id not in shapes:
128 | shapes[id] = Image.open(file).size
129 | box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
130 | with open((labels / id).with_suffix('.txt'), 'a') as f:
131 | f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
132 | except Exception as e:
133 | print(f'WARNING: skipping one label for {file}: {e}')
134 |
135 |
136 | # Download manually from https://challenge.xviewdataset.org
137 | dir = Path(yaml['path']) # dataset root dir
138 | # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
139 | # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
140 | # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
141 | # download(urls, dir=dir)
142 |
143 | # Convert labels
144 | convert_labels(dir / 'xView_train.geojson')
145 |
146 | # Move images
147 | images = Path(dir / 'images')
148 | images.mkdir(parents=True, exist_ok=True)
149 | Path(dir / 'train_images').rename(dir / 'images' / 'train')
150 | Path(dir / 'val_images').rename(dir / 'images' / 'val')
151 |
152 | # Split
153 | autosplit(dir / 'images' / 'train')
154 |
--------------------------------------------------------------------------------
/ultralytics/hub/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import requests
4 |
5 | from ultralytics.hub.auth import Auth
6 | from ultralytics.hub.utils import PREFIX
7 | from ultralytics.yolo.utils import LOGGER, SETTINGS, USER_CONFIG_DIR, yaml_save
8 |
9 |
10 | def login(api_key=''):
11 | """
12 | Log in to the Ultralytics HUB API using the provided API key.
13 |
14 | Args:
15 | api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id
16 |
17 | Example:
18 | from ultralytics import hub
19 | hub.login('API_KEY')
20 | """
21 | Auth(api_key, verbose=True)
22 |
23 |
24 | def logout():
25 | """
26 | Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo hub login'.
27 |
28 | Example:
29 | from ultralytics import hub
30 | hub.logout()
31 | """
32 | SETTINGS['api_key'] = ''
33 | yaml_save(USER_CONFIG_DIR / 'settings.yaml', SETTINGS)
34 | LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo hub login'.")
35 |
36 |
37 | def start(key=''):
38 | """
39 | Start training models with Ultralytics HUB (DEPRECATED).
40 |
41 | Args:
42 | key (str, optional): A string containing either the API key and model ID combination (apikey_modelid),
43 | or the full model URL (https://hub.ultralytics.com/models/apikey_modelid).
44 | """
45 | api_key, model_id = key.split('_')
46 | LOGGER.warning(f"""
47 | WARNING ⚠️ ultralytics.start() is deprecated after 8.0.60. Updated usage to train Ultralytics HUB models is:
48 |
49 | from ultralytics import YOLO, hub
50 |
51 | hub.login('{api_key}')
52 | model = YOLO('https://hub.ultralytics.com/models/{model_id}')
53 | model.train()""")
54 |
55 |
56 | def reset_model(model_id=''):
57 | # Reset a trained model to an untrained state
58 | r = requests.post('https://api.ultralytics.com/model-reset', json={'apiKey': Auth().api_key, 'modelId': model_id})
59 | if r.status_code == 200:
60 | LOGGER.info(f'{PREFIX}Model reset successfully')
61 | return
62 | LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}')
63 |
64 |
65 | def export_fmts_hub():
66 | # Returns a list of HUB-supported export formats
67 | from ultralytics.yolo.engine.exporter import export_formats
68 | return list(export_formats()['Argument'][1:]) + ['ultralytics_tflite', 'ultralytics_coreml']
69 |
70 |
71 | def export_model(model_id='', format='torchscript'):
72 | # Export a model to all formats
73 | assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
74 | r = requests.post('https://api.ultralytics.com/export',
75 | json={
76 | 'apiKey': Auth().api_key,
77 | 'modelId': model_id,
78 | 'format': format})
79 | assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}'
80 | LOGGER.info(f'{PREFIX}{format} export started ✅')
81 |
82 |
83 | def get_export(model_id='', format='torchscript'):
84 | # Get an exported model dictionary with download URL
85 | assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
86 | r = requests.post('https://api.ultralytics.com/get-export',
87 | json={
88 | 'apiKey': Auth().api_key,
89 | 'modelId': model_id,
90 | 'format': format})
91 | assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}'
92 | return r.json()
93 |
94 |
95 | if __name__ == '__main__':
96 | start()
97 |
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/hub/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/auth.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/hub/__pycache__/auth.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/session.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/hub/__pycache__/session.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/hub/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/hub/auth.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import requests
4 |
5 | from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, request_with_credentials
6 | from ultralytics.yolo.utils import LOGGER, SETTINGS, emojis, is_colab, set_settings
7 |
8 | API_KEY_URL = 'https://hub.ultralytics.com/settings?tab=api+keys'
9 |
10 |
11 | class Auth:
12 | id_token = api_key = model_key = False
13 |
14 | def __init__(self, api_key='', verbose=False):
15 | """
16 | Initialize the Auth class with an optional API key.
17 |
18 | Args:
19 | api_key (str, optional): May be an API key or a combination API key and model ID, i.e. key_id
20 | """
21 | # Split the input API key in case it contains a combined key_model and keep only the API key part
22 | api_key = api_key.split('_')[0]
23 |
24 | # Set API key attribute as value passed or SETTINGS API key if none passed
25 | self.api_key = api_key or SETTINGS.get('api_key', '')
26 |
27 | # If an API key is provided
28 | if self.api_key:
29 | # If the provided API key matches the API key in the SETTINGS
30 | if self.api_key == SETTINGS.get('api_key'):
31 | # Log that the user is already logged in
32 | if verbose:
33 | LOGGER.info(f'{PREFIX}Authenticated ✅')
34 | return
35 | else:
36 | # Attempt to authenticate with the provided API key
37 | success = self.authenticate()
38 | # If the API key is not provided and the environment is a Google Colab notebook
39 | elif is_colab():
40 | # Attempt to authenticate using browser cookies
41 | success = self.auth_with_cookies()
42 | else:
43 | # Request an API key
44 | success = self.request_api_key()
45 |
46 | # Update SETTINGS with the new API key after successful authentication
47 | if success:
48 | set_settings({'api_key': self.api_key})
49 | # Log that the new login was successful
50 | if verbose:
51 | LOGGER.info(f'{PREFIX}New authentication successful ✅')
52 | elif verbose:
53 | LOGGER.info(f'{PREFIX}Retrieve API key from {API_KEY_URL}')
54 |
55 | def request_api_key(self, max_attempts=3):
56 | """
57 | Prompt the user to input their API key. Returns the model ID.
58 | """
59 | import getpass
60 | for attempts in range(max_attempts):
61 | LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}')
62 | input_key = getpass.getpass(f'Enter API key from {API_KEY_URL} ')
63 | self.api_key = input_key.split('_')[0] # remove model id if present
64 | if self.authenticate():
65 | return True
66 | raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate ❌'))
67 |
68 | def authenticate(self) -> bool:
69 | """
70 | Attempt to authenticate with the server using either id_token or API key.
71 |
72 | Returns:
73 | bool: True if authentication is successful, False otherwise.
74 | """
75 | try:
76 | header = self.get_auth_header()
77 | if header:
78 | r = requests.post(f'{HUB_API_ROOT}/v1/auth', headers=header)
79 | if not r.json().get('success', False):
80 | raise ConnectionError('Unable to authenticate.')
81 | return True
82 | raise ConnectionError('User has not authenticated locally.')
83 | except ConnectionError:
84 | self.id_token = self.api_key = False # reset invalid
85 | LOGGER.warning(f'{PREFIX}Invalid API key ⚠️')
86 | return False
87 |
88 | def auth_with_cookies(self) -> bool:
89 | """
90 | Attempt to fetch authentication via cookies and set id_token.
91 | User must be logged in to HUB and running in a supported browser.
92 |
93 | Returns:
94 | bool: True if authentication is successful, False otherwise.
95 | """
96 | if not is_colab():
97 | return False # Currently only works with Colab
98 | try:
99 | authn = request_with_credentials(f'{HUB_API_ROOT}/v1/auth/auto')
100 | if authn.get('success', False):
101 | self.id_token = authn.get('data', {}).get('idToken', None)
102 | self.authenticate()
103 | return True
104 | raise ConnectionError('Unable to fetch browser authentication details.')
105 | except ConnectionError:
106 | self.id_token = False # reset invalid
107 | return False
108 |
109 | def get_auth_header(self):
110 | """
111 | Get the authentication header for making API requests.
112 |
113 | Returns:
114 | dict: The authentication header if id_token or API key is set, None otherwise.
115 | """
116 | if self.id_token:
117 | return {'authorization': f'Bearer {self.id_token}'}
118 | elif self.api_key:
119 | return {'x-api-key': self.api_key}
120 | else:
121 | return None
122 |
123 | def get_state(self) -> bool:
124 | """
125 | Get the authentication state.
126 |
127 | Returns:
128 | bool: True if either id_token or API key is set, False otherwise.
129 | """
130 | return self.id_token or self.api_key
131 |
132 | def set_api_key(self, key: str):
133 | """
134 | Set the API key for authentication.
135 |
136 | Args:
137 | key (str): The API key string.
138 | """
139 | self.api_key = key
140 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3-spp.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # darknet53 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [32, 3, 1]], # 0
12 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
13 | [-1, 1, Bottleneck, [64]],
14 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
15 | [-1, 2, Bottleneck, [128]],
16 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
17 | [-1, 8, Bottleneck, [256]],
18 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
19 | [-1, 8, Bottleneck, [512]],
20 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
21 | [-1, 4, Bottleneck, [1024]], # 10
22 | ]
23 |
24 | # YOLOv3-SPP head
25 | head:
26 | [[-1, 1, Bottleneck, [1024, False]],
27 | [-1, 1, SPP, [512, [5, 9, 13]]],
28 | [-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [256, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Bottleneck, [512, False]],
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Conv, [256, 1, 1]],
38 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
39 |
40 | [-2, 1, Conv, [128, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
43 | [-1, 1, Bottleneck, [256, False]],
44 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
45 |
46 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
47 | ]
48 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3-tiny.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # YOLOv3-tiny backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [16, 3, 1]], # 0
12 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
13 | [-1, 1, Conv, [32, 3, 1]],
14 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
15 | [-1, 1, Conv, [64, 3, 1]],
16 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
17 | [-1, 1, Conv, [128, 3, 1]],
18 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
19 | [-1, 1, Conv, [256, 3, 1]],
20 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
21 | [-1, 1, Conv, [512, 3, 1]],
22 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
23 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
24 | ]
25 |
26 | # YOLOv3-tiny head
27 | head:
28 | [[-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [256, 1, 1]],
30 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [128, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
36 |
37 | [[19, 15], 1, Detect, [nc]], # Detect(P4, P5)
38 | ]
39 |
--------------------------------------------------------------------------------
/ultralytics/models/v3/yolov3.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | depth_multiple: 1.0 # model depth multiple
6 | width_multiple: 1.0 # layer channel multiple
7 |
8 | # darknet53 backbone
9 | backbone:
10 | # [from, number, module, args]
11 | [[-1, 1, Conv, [32, 3, 1]], # 0
12 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
13 | [-1, 1, Bottleneck, [64]],
14 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
15 | [-1, 2, Bottleneck, [128]],
16 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
17 | [-1, 8, Bottleneck, [256]],
18 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
19 | [-1, 8, Bottleneck, [512]],
20 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
21 | [-1, 4, Bottleneck, [1024]], # 10
22 | ]
23 |
24 | # YOLOv3 head
25 | head:
26 | [[-1, 1, Bottleneck, [1024, False]],
27 | [-1, 1, Conv, [512, 1, 1]],
28 | [-1, 1, Conv, [1024, 3, 1]],
29 | [-1, 1, Conv, [512, 1, 1]],
30 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
31 |
32 | [-2, 1, Conv, [256, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P4
35 | [-1, 1, Bottleneck, [512, False]],
36 | [-1, 1, Bottleneck, [512, False]],
37 | [-1, 1, Conv, [256, 1, 1]],
38 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
39 |
40 | [-2, 1, Conv, [128, 1, 1]],
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P3
43 | [-1, 1, Bottleneck, [256, False]],
44 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
45 |
46 | [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5)
47 | ]
48 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5-p6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will call yolov5-p6.yaml with scale 'n'
6 | # [depth, width, max_channels]
7 | n: [0.33, 0.25, 1024]
8 | s: [0.33, 0.50, 1024]
9 | m: [0.67, 0.75, 1024]
10 | l: [1.00, 1.00, 1024]
11 | x: [1.33, 1.25, 1024]
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [768]],
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26 | [-1, 3, C3, [1024]],
27 | [-1, 1, SPPF, [1024, 5]], # 11
28 | ]
29 |
30 | # YOLOv5 v6.0 head
31 | head:
32 | [[-1, 1, Conv, [768, 1, 1]],
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5
35 | [-1, 3, C3, [768, False]], # 15
36 |
37 | [-1, 1, Conv, [512, 1, 1]],
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
40 | [-1, 3, C3, [512, False]], # 19
41 |
42 | [-1, 1, Conv, [256, 1, 1]],
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46 |
47 | [-1, 1, Conv, [256, 3, 2]],
48 | [[-1, 20], 1, Concat, [1]], # cat head P4
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50 |
51 | [-1, 1, Conv, [512, 3, 2]],
52 | [[-1, 16], 1, Concat, [1]], # cat head P5
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54 |
55 | [-1, 1, Conv, [768, 3, 2]],
56 | [[-1, 12], 1, Concat, [1]], # cat head P6
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58 |
59 | [[23, 26, 29, 32], 1, Detect, [nc]], # Detect(P3, P4, P5, P6)
60 | ]
61 |
--------------------------------------------------------------------------------
/ultralytics/models/v5/yolov5.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | # Parameters
4 | nc: 80 # number of classes
5 | scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n'
6 | # [depth, width, max_channels]
7 | n: [0.33, 0.25, 1024]
8 | s: [0.33, 0.50, 1024]
9 | m: [0.67, 0.75, 1024]
10 | l: [1.00, 1.00, 1024]
11 | x: [1.33, 1.25, 1024]
12 |
13 | # YOLOv5 v6.0 backbone
14 | backbone:
15 | # [from, number, module, args]
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18 | [-1, 3, C3, [128]],
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20 | [-1, 6, C3, [256]],
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22 | [-1, 9, C3, [512]],
23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
24 | [-1, 3, C3, [1024]],
25 | [-1, 1, SPPF, [1024, 5]], # 9
26 | ]
27 |
28 | # YOLOv5 v6.0 head
29 | head:
30 | [[-1, 1, Conv, [512, 1, 1]],
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4
33 | [-1, 3, C3, [512, False]], # 13
34 |
35 | [-1, 1, Conv, [256, 1, 1]],
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39 |
40 | [-1, 1, Conv, [256, 3, 2]],
41 | [[-1, 14], 1, Concat, [1]], # cat head P4
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43 |
44 | [-1, 1, Conv, [512, 3, 2]],
45 | [[-1, 10], 1, Concat, [1]], # cat head P5
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47 |
48 | [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5)
49 | ]
50 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-cls.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify
3 |
4 | # Parameters
5 | nc: 1000 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 1024]
11 | l: [1.00, 1.00, 1024]
12 | x: [1.00, 1.25, 1024]
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 |
27 | # YOLOv8.0n head
28 | head:
29 | - [-1, 1, Classify, [nc]] # Classify
30 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-p2.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0 backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0-p2 head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
39 | - [[-1, 2], 1, Concat, [1]] # cat backbone P2
40 | - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall)
41 |
42 | - [-1, 1, Conv, [128, 3, 2]]
43 | - [[-1, 15], 1, Concat, [1]] # cat head P3
44 | - [-1, 3, C2f, [256]] # 21 (P3/8-small)
45 |
46 | - [-1, 1, Conv, [256, 3, 2]]
47 | - [[-1, 12], 1, Concat, [1]] # cat head P4
48 | - [-1, 3, C2f, [512]] # 24 (P4/16-medium)
49 |
50 | - [-1, 1, Conv, [512, 3, 2]]
51 | - [[-1, 9], 1, Concat, [1]] # cat head P5
52 | - [-1, 3, C2f, [1024]] # 27 (P5/32-large)
53 |
54 | - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5)
55 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-p6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0x6 backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [768, True]]
26 | - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64
27 | - [-1, 3, C2f, [1024, True]]
28 | - [-1, 1, SPPF, [1024, 5]] # 11
29 |
30 | # YOLOv8.0x6 head
31 | head:
32 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
33 | - [[-1, 8], 1, Concat, [1]] # cat backbone P5
34 | - [-1, 3, C2, [768, False]] # 14
35 |
36 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
37 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
38 | - [-1, 3, C2, [512, False]] # 17
39 |
40 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
41 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
42 | - [-1, 3, C2, [256, False]] # 20 (P3/8-small)
43 |
44 | - [-1, 1, Conv, [256, 3, 2]]
45 | - [[-1, 17], 1, Concat, [1]] # cat head P4
46 | - [-1, 3, C2, [512, False]] # 23 (P4/16-medium)
47 |
48 | - [-1, 1, Conv, [512, 3, 2]]
49 | - [[-1, 14], 1, Concat, [1]] # cat head P5
50 | - [-1, 3, C2, [768, False]] # 26 (P5/32-large)
51 |
52 | - [-1, 1, Conv, [768, 3, 2]]
53 | - [[-1, 11], 1, Concat, [1]] # cat head P6
54 | - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge)
55 |
56 | - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6)
57 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-pose-p6.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 1 # number of classes
6 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
7 | scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n'
8 | # [depth, width, max_channels]
9 | n: [0.33, 0.25, 1024]
10 | s: [0.33, 0.50, 1024]
11 | m: [0.67, 0.75, 768]
12 | l: [1.00, 1.00, 512]
13 | x: [1.00, 1.25, 512]
14 |
15 | # YOLOv8.0x6 backbone
16 | backbone:
17 | # [from, repeats, module, args]
18 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20 | - [-1, 3, C2f, [128, True]]
21 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22 | - [-1, 6, C2f, [256, True]]
23 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24 | - [-1, 6, C2f, [512, True]]
25 | - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
26 | - [-1, 3, C2f, [768, True]]
27 | - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64
28 | - [-1, 3, C2f, [1024, True]]
29 | - [-1, 1, SPPF, [1024, 5]] # 11
30 |
31 | # YOLOv8.0x6 head
32 | head:
33 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
34 | - [[-1, 8], 1, Concat, [1]] # cat backbone P5
35 | - [-1, 3, C2, [768, False]] # 14
36 |
37 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
38 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
39 | - [-1, 3, C2, [512, False]] # 17
40 |
41 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
42 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
43 | - [-1, 3, C2, [256, False]] # 20 (P3/8-small)
44 |
45 | - [-1, 1, Conv, [256, 3, 2]]
46 | - [[-1, 17], 1, Concat, [1]] # cat head P4
47 | - [-1, 3, C2, [512, False]] # 23 (P4/16-medium)
48 |
49 | - [-1, 1, Conv, [512, 3, 2]]
50 | - [[-1, 14], 1, Concat, [1]] # cat head P5
51 | - [-1, 3, C2, [768, False]] # 26 (P5/32-large)
52 |
53 | - [-1, 1, Conv, [768, 3, 2]]
54 | - [[-1, 11], 1, Concat, [1]] # cat head P6
55 | - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge)
56 |
57 | - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6)
58 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-pose.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose
3 |
4 | # Parameters
5 | nc: 1 # number of classes
6 | kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
7 | scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will call yolov8-pose.yaml with scale 'n'
8 | # [depth, width, max_channels]
9 | n: [0.33, 0.25, 1024]
10 | s: [0.33, 0.50, 1024]
11 | m: [0.67, 0.75, 768]
12 | l: [1.00, 1.00, 512]
13 | x: [1.00, 1.25, 512]
14 |
15 | # YOLOv8.0n backbone
16 | backbone:
17 | # [from, repeats, module, args]
18 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20 | - [-1, 3, C2f, [128, True]]
21 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22 | - [-1, 6, C2f, [256, True]]
23 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24 | - [-1, 6, C2f, [512, True]]
25 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26 | - [-1, 3, C2f, [1024, True]]
27 | - [-1, 1, SPPF, [1024, 5]] # 9
28 |
29 | # YOLOv8.0n head
30 | head:
31 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
32 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
33 | - [-1, 3, C2f, [512]] # 12
34 |
35 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
36 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
37 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
38 |
39 | - [-1, 1, Conv, [256, 3, 2]]
40 | - [[-1, 12], 1, Concat, [1]] # cat head P4
41 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
42 |
43 | - [-1, 1, Conv, [512, 3, 2]]
44 | - [[-1, 9], 1, Concat, [1]] # cat head P5
45 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
46 |
47 | - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5)
48 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8-seg.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024]
9 | s: [0.33, 0.50, 1024]
10 | m: [0.67, 0.75, 768]
11 | l: [1.00, 1.00, 512]
12 | x: [1.00, 1.25, 512]
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0n head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 12], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 9], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
45 |
46 | - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5)
47 |
--------------------------------------------------------------------------------
/ultralytics/models/v8/yolov8.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3 |
4 | # Parameters
5 | nc: 80 # number of classes
6 | scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
7 | # [depth, width, max_channels]
8 | n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
9 | s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
10 | m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
11 | l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
12 | x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
13 |
14 | # YOLOv8.0n backbone
15 | backbone:
16 | # [from, repeats, module, args]
17 | - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
18 | - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
19 | - [-1, 3, C2f, [128, True]]
20 | - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
21 | - [-1, 6, C2f, [256, True]]
22 | - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
23 | - [-1, 6, C2f, [512, True]]
24 | - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
25 | - [-1, 3, C2f, [1024, True]]
26 | - [-1, 1, SPPF, [1024, 5]] # 9
27 |
28 | # YOLOv8.0n head
29 | head:
30 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
31 | - [[-1, 6], 1, Concat, [1]] # cat backbone P4
32 | - [-1, 3, C2f, [512]] # 12
33 |
34 | - [-1, 1, nn.Upsample, [None, 2, 'nearest']]
35 | - [[-1, 4], 1, Concat, [1]] # cat backbone P3
36 | - [-1, 3, C2f, [256]] # 15 (P3/8-small)
37 |
38 | - [-1, 1, Conv, [256, 3, 2]]
39 | - [[-1, 12], 1, Concat, [1]] # cat head P4
40 | - [-1, 3, C2f, [512]] # 18 (P4/16-medium)
41 |
42 | - [-1, 1, Conv, [512, 3, 2]]
43 | - [[-1, 9], 1, Concat, [1]] # cat head P5
44 | - [-1, 3, C2f, [1024]] # 21 (P5/32-large)
45 |
46 | - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
47 |
--------------------------------------------------------------------------------
/ultralytics/nn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__init__.py
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/autobackend.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__pycache__/autobackend.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/autoshape.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__pycache__/autoshape.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/modules.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__pycache__/modules.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/nn/__pycache__/tasks.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/nn/__pycache__/tasks.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .track import register_tracker
4 | from .trackers import BOTSORT, BYTETracker
5 |
6 | __all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import
7 |
--------------------------------------------------------------------------------
/ultralytics/tracker/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/__pycache__/track.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/__pycache__/track.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/cfg/botsort.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT
3 |
4 | tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
13 | # BoT-SORT settings
14 | cmc_method: sparseOptFlow # method of global motion compensation
15 | # ReID model related thresh (not supported yet)
16 | proximity_thresh: 0.5
17 | appearance_thresh: 0.25
18 | with_reid: False
19 |
--------------------------------------------------------------------------------
/ultralytics/tracker/cfg/bytetrack.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
3 |
4 | tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
5 | track_high_thresh: 0.5 # threshold for the first association
6 | track_low_thresh: 0.1 # threshold for the second association
7 | new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
8 | track_buffer: 30 # buffer to calculate the time when to remove tracks
9 | match_thresh: 0.8 # threshold for matching tracks
10 | # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
11 | # mot20: False # for tracker evaluation(not used for now)
12 |
--------------------------------------------------------------------------------
/ultralytics/tracker/track.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from functools import partial
4 |
5 | import torch
6 |
7 | from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
8 | from ultralytics.yolo.utils.checks import check_yaml
9 |
10 | from .trackers import BOTSORT, BYTETracker
11 |
12 | TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
13 |
14 |
15 | def on_predict_start(predictor, persist=False):
16 | """
17 | Initialize trackers for object tracking during prediction.
18 |
19 | Args:
20 | predictor (object): The predictor object to initialize trackers for.
21 | persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
22 |
23 | Raises:
24 | AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
25 | """
26 | if hasattr(predictor, 'trackers') and persist:
27 | return
28 | tracker = check_yaml(predictor.args.tracker)
29 | cfg = IterableSimpleNamespace(**yaml_load(tracker))
30 | assert cfg.tracker_type in ['bytetrack', 'botsort'], \
31 | f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'"
32 | trackers = []
33 | for _ in range(predictor.dataset.bs):
34 | tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
35 | trackers.append(tracker)
36 | predictor.trackers = trackers
37 |
38 |
39 | def on_predict_postprocess_end(predictor):
40 | bs = predictor.dataset.bs
41 | im0s = predictor.batch[2]
42 | im0s = im0s if isinstance(im0s, list) else [im0s]
43 | for i in range(bs):
44 | det = predictor.results[i].boxes.cpu().numpy()
45 | if len(det) == 0:
46 | continue
47 | tracks = predictor.trackers[i].update(det, im0s[i])
48 | if len(tracks) == 0:
49 | continue
50 | idx = tracks[:, -1].tolist()
51 | predictor.results[i] = predictor.results[i][idx]
52 | predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1]))
53 |
54 |
55 | def register_tracker(model, persist):
56 | """
57 | Register tracking callbacks to the model for object tracking during prediction.
58 |
59 | Args:
60 | model (object): The model object to register tracking callbacks for.
61 | persist (bool): Whether to persist the trackers if they already exist.
62 |
63 | """
64 | model.add_callback('on_predict_start', partial(on_predict_start, persist=persist))
65 | model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end)
66 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .bot_sort import BOTSORT
4 | from .byte_tracker import BYTETracker
5 |
6 | __all__ = 'BOTSORT', 'BYTETracker' # allow simpler import
7 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/trackers/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__pycache__/basetrack.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/trackers/__pycache__/basetrack.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__pycache__/bot_sort.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/trackers/__pycache__/bot_sort.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/__pycache__/byte_tracker.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/trackers/__pycache__/byte_tracker.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/basetrack.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from collections import OrderedDict
4 |
5 | import numpy as np
6 |
7 |
8 | class TrackState:
9 | New = 0
10 | Tracked = 1
11 | Lost = 2
12 | Removed = 3
13 |
14 |
15 | class BaseTrack:
16 | _count = 0
17 |
18 | track_id = 0
19 | is_activated = False
20 | state = TrackState.New
21 |
22 | history = OrderedDict()
23 | features = []
24 | curr_feature = None
25 | score = 0
26 | start_frame = 0
27 | frame_id = 0
28 | time_since_update = 0
29 |
30 | # multi-camera
31 | location = (np.inf, np.inf)
32 |
33 | @property
34 | def end_frame(self):
35 | return self.frame_id
36 |
37 | @staticmethod
38 | def next_id():
39 | BaseTrack._count += 1
40 | return BaseTrack._count
41 |
42 | def activate(self, *args):
43 | raise NotImplementedError
44 |
45 | def predict(self):
46 | raise NotImplementedError
47 |
48 | def update(self, *args, **kwargs):
49 | raise NotImplementedError
50 |
51 | def mark_lost(self):
52 | self.state = TrackState.Lost
53 |
54 | def mark_removed(self):
55 | self.state = TrackState.Removed
56 |
57 | @staticmethod
58 | def reset_id():
59 | BaseTrack._count = 0
60 |
--------------------------------------------------------------------------------
/ultralytics/tracker/trackers/bot_sort.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from collections import deque
4 |
5 | import numpy as np
6 |
7 | from ..utils import matching
8 | from ..utils.gmc import GMC
9 | from ..utils.kalman_filter import KalmanFilterXYWH
10 | from .basetrack import TrackState
11 | from .byte_tracker import BYTETracker, STrack
12 |
13 |
14 | class BOTrack(STrack):
15 | shared_kalman = KalmanFilterXYWH()
16 |
17 | def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
18 | super().__init__(tlwh, score, cls)
19 |
20 | self.smooth_feat = None
21 | self.curr_feat = None
22 | if feat is not None:
23 | self.update_features(feat)
24 | self.features = deque([], maxlen=feat_history)
25 | self.alpha = 0.9
26 |
27 | def update_features(self, feat):
28 | feat /= np.linalg.norm(feat)
29 | self.curr_feat = feat
30 | if self.smooth_feat is None:
31 | self.smooth_feat = feat
32 | else:
33 | self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
34 | self.features.append(feat)
35 | self.smooth_feat /= np.linalg.norm(self.smooth_feat)
36 |
37 | def predict(self):
38 | mean_state = self.mean.copy()
39 | if self.state != TrackState.Tracked:
40 | mean_state[6] = 0
41 | mean_state[7] = 0
42 |
43 | self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
44 |
45 | def re_activate(self, new_track, frame_id, new_id=False):
46 | if new_track.curr_feat is not None:
47 | self.update_features(new_track.curr_feat)
48 | super().re_activate(new_track, frame_id, new_id)
49 |
50 | def update(self, new_track, frame_id):
51 | if new_track.curr_feat is not None:
52 | self.update_features(new_track.curr_feat)
53 | super().update(new_track, frame_id)
54 |
55 | @property
56 | def tlwh(self):
57 | """Get current position in bounding box format `(top left x, top left y,
58 | width, height)`.
59 | """
60 | if self.mean is None:
61 | return self._tlwh.copy()
62 | ret = self.mean[:4].copy()
63 | ret[:2] -= ret[2:] / 2
64 | return ret
65 |
66 | @staticmethod
67 | def multi_predict(stracks):
68 | if len(stracks) <= 0:
69 | return
70 | multi_mean = np.asarray([st.mean.copy() for st in stracks])
71 | multi_covariance = np.asarray([st.covariance for st in stracks])
72 | for i, st in enumerate(stracks):
73 | if st.state != TrackState.Tracked:
74 | multi_mean[i][6] = 0
75 | multi_mean[i][7] = 0
76 | multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
77 | for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
78 | stracks[i].mean = mean
79 | stracks[i].covariance = cov
80 |
81 | def convert_coords(self, tlwh):
82 | return self.tlwh_to_xywh(tlwh)
83 |
84 | @staticmethod
85 | def tlwh_to_xywh(tlwh):
86 | """Convert bounding box to format `(center x, center y, width,
87 | height)`.
88 | """
89 | ret = np.asarray(tlwh).copy()
90 | ret[:2] += ret[2:] / 2
91 | return ret
92 |
93 |
94 | class BOTSORT(BYTETracker):
95 |
96 | def __init__(self, args, frame_rate=30):
97 | super().__init__(args, frame_rate)
98 | # ReID module
99 | self.proximity_thresh = args.proximity_thresh
100 | self.appearance_thresh = args.appearance_thresh
101 |
102 | if args.with_reid:
103 | # haven't supported BoT-SORT(reid) yet
104 | self.encoder = None
105 | # self.gmc = GMC(method=args.cmc_method, verbose=[args.name, args.ablation])
106 | self.gmc = GMC(method=args.cmc_method)
107 |
108 | def get_kalmanfilter(self):
109 | return KalmanFilterXYWH()
110 |
111 | def init_track(self, dets, scores, cls, img=None):
112 | if len(dets) == 0:
113 | return []
114 | if self.args.with_reid and self.encoder is not None:
115 | features_keep = self.encoder.inference(img, dets)
116 | return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
117 | else:
118 | return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
119 |
120 | def get_dists(self, tracks, detections):
121 | dists = matching.iou_distance(tracks, detections)
122 | dists_mask = (dists > self.proximity_thresh)
123 |
124 | # TODO: mot20
125 | # if not self.args.mot20:
126 | dists = matching.fuse_score(dists, detections)
127 |
128 | if self.args.with_reid and self.encoder is not None:
129 | emb_dists = matching.embedding_distance(tracks, detections) / 2.0
130 | emb_dists[emb_dists > self.appearance_thresh] = 1.0
131 | emb_dists[dists_mask] = 1.0
132 | dists = np.minimum(dists, emb_dists)
133 | return dists
134 |
135 | def multi_predict(self, tracks):
136 | BOTrack.multi_predict(tracks)
137 |
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/utils/__init__.py
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/utils/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__pycache__/gmc.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/utils/__pycache__/gmc.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__pycache__/kalman_filter.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/utils/__pycache__/kalman_filter.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/tracker/utils/__pycache__/matching.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/tracker/utils/__pycache__/matching.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from . import v8
4 |
5 | __all__ = 'v8', # tuple or list
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/cfg/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/cfg/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/cfg/default.yaml:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | # Default training settings and hyperparameters for medium-augmentation COCO training
3 |
4 | task: detect # YOLO task, i.e. detect, segment, classify, pose
5 | mode: train # YOLO mode, i.e. train, val, predict, export, track, benchmark
6 |
7 | # Train settings -------------------------------------------------------------------------------------------------------
8 | model: # path to model file, i.e. yolov8n.pt, yolov8n.yaml
9 | data: # path to data file, i.e. coco128.yaml
10 | epochs: 100 # number of epochs to train for
11 | patience: 50 # epochs to wait for no observable improvement for early stopping of training
12 | batch: 16 # number of images per batch (-1 for AutoBatch)
13 | imgsz: 640 # size of input images as integer or w,h
14 | save: True # save train checkpoints and predict results
15 | save_period: -1 # Save checkpoint every x epochs (disabled if < 1)
16 | cache: False # True/ram, disk or False. Use cache for data loading
17 | device: # device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu
18 | workers: 8 # number of worker threads for data loading (per RANK if DDP)
19 | project: # project name
20 | name: # experiment name, results saved to 'project/name' directory
21 | exist_ok: False # whether to overwrite existing experiment
22 | pretrained: False # whether to use a pretrained model
23 | optimizer: SGD # optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp']
24 | verbose: True # whether to print verbose output
25 | seed: 0 # random seed for reproducibility
26 | deterministic: True # whether to enable deterministic mode
27 | single_cls: False # train multi-class data as single-class
28 | image_weights: False # use weighted image selection for training
29 | rect: False # rectangular training if mode='train' or rectangular validation if mode='val'
30 | cos_lr: False # use cosine learning rate scheduler
31 | close_mosaic: 0 # (int) disable mosaic augmentation for final epochs
32 | resume: False # resume training from last checkpoint
33 | amp: True # Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
34 | # Segmentation
35 | overlap_mask: True # masks should overlap during training (segment train only)
36 | mask_ratio: 4 # mask downsample ratio (segment train only)
37 | # Classification
38 | dropout: 0.0 # use dropout regularization (classify train only)
39 |
40 | # Val/Test settings ----------------------------------------------------------------------------------------------------
41 | val: True # validate/test during training
42 | split: val # dataset split to use for validation, i.e. 'val', 'test' or 'train'
43 | save_json: False # save results to JSON file
44 | save_hybrid: False # save hybrid version of labels (labels + additional predictions)
45 | conf: # object confidence threshold for detection (default 0.25 predict, 0.001 val)
46 | iou: 0.7 # intersection over union (IoU) threshold for NMS
47 | max_det: 300 # maximum number of detections per image
48 | half: False # use half precision (FP16)
49 | dnn: False # use OpenCV DNN for ONNX inference
50 | plots: True # save plots during train/val
51 |
52 | # Prediction settings --------------------------------------------------------------------------------------------------
53 | source: # source directory for images or videos
54 | show: False # show results if possible
55 | save_txt: False # save results as .txt file
56 | save_conf: False # save results with confidence scores
57 | save_crop: False # save cropped images with results
58 | show_labels: True # show object labels in plots
59 | show_conf: True # show object confidence scores in plots
60 | vid_stride: 1 # video frame-rate stride
61 | line_thickness: 3 # bounding box thickness (pixels)
62 | visualize: False # visualize model features
63 | augment: False # apply image augmentation to prediction sources
64 | agnostic_nms: False # class-agnostic NMS
65 | classes: # filter results by class, i.e. class=0, or class=[0,2,3]
66 | retina_masks: False # use high-resolution segmentation masks
67 | boxes: True # Show boxes in segmentation predictions
68 |
69 | # Export settings ------------------------------------------------------------------------------------------------------
70 | format: torchscript # format to export to
71 | keras: False # use Keras
72 | optimize: False # TorchScript: optimize for mobile
73 | int8: False # CoreML/TF INT8 quantization
74 | dynamic: False # ONNX/TF/TensorRT: dynamic axes
75 | simplify: False # ONNX: simplify model
76 | opset: # ONNX: opset version (optional)
77 | workspace: 4 # TensorRT: workspace size (GB)
78 | nms: False # CoreML: add NMS
79 |
80 | # Hyperparameters ------------------------------------------------------------------------------------------------------
81 | lr0: 0.01 # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
82 | lrf: 0.01 # final learning rate (lr0 * lrf)
83 | momentum: 0.937 # SGD momentum/Adam beta1
84 | weight_decay: 0.0005 # optimizer weight decay 5e-4
85 | warmup_epochs: 3.0 # warmup epochs (fractions ok)
86 | warmup_momentum: 0.8 # warmup initial momentum
87 | warmup_bias_lr: 0.1 # warmup initial bias lr
88 | box: 7.5 # box loss gain
89 | cls: 0.5 # cls loss gain (scale with pixels)
90 | dfl: 1.5 # dfl loss gain
91 | pose: 12.0 # pose loss gain
92 | kobj: 1.0 # keypoint obj loss gain
93 | label_smoothing: 0.0 # label smoothing (fraction)
94 | nbs: 64 # nominal batch size
95 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
96 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
97 | hsv_v: 0.4 # image HSV-Value augmentation (fraction)
98 | degrees: 0.0 # image rotation (+/- deg)
99 | translate: 0.1 # image translation (+/- fraction)
100 | scale: 0.5 # image scale (+/- gain)
101 | shear: 0.0 # image shear (+/- deg)
102 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
103 | flipud: 0.0 # image flip up-down (probability)
104 | fliplr: 0.5 # image flip left-right (probability)
105 | mosaic: 1.0 # image mosaic (probability)
106 | mixup: 0.0 # image mixup (probability)
107 | copy_paste: 0.0 # segment copy-paste (probability)
108 |
109 | # Custom config.yaml ---------------------------------------------------------------------------------------------------
110 | cfg: # for overriding defaults.yaml
111 |
112 | # Debug, do not modify -------------------------------------------------------------------------------------------------
113 | v5loader: False # use legacy YOLOv5 dataloader
114 |
115 | # Tracker settings ------------------------------------------------------------------------------------------------------
116 | tracker: botsort.yaml # tracker type, ['botsort.yaml', 'bytetrack.yaml']
117 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .base import BaseDataset
4 | from .build import build_classification_dataloader, build_dataloader, load_inference_source
5 | from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
6 | from .dataset_wrappers import MixAndRectDataset
7 |
8 | __all__ = ('BaseDataset', 'ClassificationDataset', 'MixAndRectDataset', 'SemanticDataset', 'YOLODataset',
9 | 'build_classification_dataloader', 'build_dataloader', 'load_inference_source')
10 |
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/augment.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/augment.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/base.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/base.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/build.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/build.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/dataset.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/dataset.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/dataset_wrappers.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/dataset_wrappers.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/dataloaders/__init__.py
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/dataloaders/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/stream_loaders.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/dataloaders/__pycache__/stream_loaders.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/v5augmentations.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/dataloaders/__pycache__/v5augmentations.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataloaders/__pycache__/v5loader.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/data/dataloaders/__pycache__/v5loader.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/data/dataset_wrappers.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import collections
4 | from copy import deepcopy
5 |
6 | from .augment import LetterBox
7 |
8 |
9 | class MixAndRectDataset:
10 | """A wrapper of multiple images mixed dataset.
11 |
12 | Args:
13 | dataset (:obj:`BaseDataset`): The dataset to be mixed.
14 | transforms (Sequence[dict]): config dict to be composed.
15 | """
16 |
17 | def __init__(self, dataset):
18 | self.dataset = dataset
19 | self.imgsz = dataset.imgsz
20 |
21 | def __len__(self):
22 | return len(self.dataset)
23 |
24 | def __getitem__(self, index):
25 | labels = deepcopy(self.dataset[index])
26 | for transform in self.dataset.transforms.tolist():
27 | # mosaic and mixup
28 | if hasattr(transform, 'get_indexes'):
29 | indexes = transform.get_indexes(self.dataset)
30 | if not isinstance(indexes, collections.abc.Sequence):
31 | indexes = [indexes]
32 | mix_labels = [deepcopy(self.dataset[index]) for index in indexes]
33 | labels['mix_labels'] = mix_labels
34 | if self.dataset.rect and isinstance(transform, LetterBox):
35 | transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]]
36 | labels = transform(labels)
37 | if 'mix_labels' in labels:
38 | labels.pop('mix_labels')
39 | return labels
40 |
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__init__.py
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/exporter.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/exporter.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/model.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/model.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/predictor.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/predictor.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/results.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/results.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/trainer.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/trainer.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/engine/__pycache__/validator.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/engine/__pycache__/validator.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/autobatch.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/autobatch.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/benchmarks.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/benchmarks.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/checks.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/checks.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/dist.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/dist.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/downloads.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/downloads.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/files.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/files.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/instance.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/instance.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/loss.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/loss.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/metrics.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/metrics.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/ops.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/ops.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/plotting.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/plotting.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/tal.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/tal.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/__pycache__/torch_utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/__pycache__/torch_utils.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/autobatch.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.
4 | """
5 |
6 | from copy import deepcopy
7 |
8 | import numpy as np
9 | import torch
10 |
11 | from ultralytics.yolo.utils import LOGGER, colorstr
12 | from ultralytics.yolo.utils.torch_utils import profile
13 |
14 |
15 | def check_train_batch_size(model, imgsz=640, amp=True):
16 | """
17 | Check YOLO training batch size using the autobatch() function.
18 |
19 | Args:
20 | model (torch.nn.Module): YOLO model to check batch size for.
21 | imgsz (int): Image size used for training.
22 | amp (bool): If True, use automatic mixed precision (AMP) for training.
23 |
24 | Returns:
25 | int: Optimal batch size computed using the autobatch() function.
26 | """
27 |
28 | with torch.cuda.amp.autocast(amp):
29 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
30 |
31 |
32 | def autobatch(model, imgsz=640, fraction=0.67, batch_size=16):
33 | """
34 | Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.
35 |
36 | Args:
37 | model: YOLO model to compute batch size for.
38 | imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640.
39 | fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.67.
40 | batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16.
41 |
42 | Returns:
43 | int: The optimal batch size.
44 | """
45 |
46 | # Check device
47 | prefix = colorstr('AutoBatch: ')
48 | LOGGER.info(f'{prefix}Computing optimal batch size for imgsz={imgsz}')
49 | device = next(model.parameters()).device # get model device
50 | if device.type == 'cpu':
51 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
52 | return batch_size
53 | if torch.backends.cudnn.benchmark:
54 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
55 | return batch_size
56 |
57 | # Inspect CUDA memory
58 | gb = 1 << 30 # bytes to GiB (1024 ** 3)
59 | d = str(device).upper() # 'CUDA:0'
60 | properties = torch.cuda.get_device_properties(device) # device properties
61 | t = properties.total_memory / gb # GiB total
62 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved
63 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated
64 | f = t - (r + a) # GiB free
65 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
66 |
67 | # Profile batch sizes
68 | batch_sizes = [1, 2, 4, 8, 16]
69 | try:
70 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
71 | results = profile(img, model, n=3, device=device)
72 |
73 | # Fit a solution
74 | y = [x[2] for x in results if x] # memory [2]
75 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
76 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
77 | if None in results: # some sizes failed
78 | i = results.index(None) # first fail index
79 | if b >= batch_sizes[i]: # y intercept above failure point
80 | b = batch_sizes[max(i - 1, 0)] # select prior safe point
81 | if b < 1 or b > 1024: # b outside of safe range
82 | b = batch_size
83 | LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.')
84 |
85 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
86 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
87 | return b
88 | except Exception as e:
89 | LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.')
90 | return batch_size
91 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/benchmarks.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Benchmark a YOLO model formats for speed and accuracy
4 |
5 | Usage:
6 | from ultralytics.yolo.utils.benchmarks import run_benchmarks
7 | run_benchmarks(model='yolov8n.pt', imgsz=160)
8 |
9 | Format | `format=argument` | Model
10 | --- | --- | ---
11 | PyTorch | - | yolov8n.pt
12 | TorchScript | `torchscript` | yolov8n.torchscript
13 | ONNX | `onnx` | yolov8n.onnx
14 | OpenVINO | `openvino` | yolov8n_openvino_model/
15 | TensorRT | `engine` | yolov8n.engine
16 | CoreML | `coreml` | yolov8n.mlmodel
17 | TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/
18 | TensorFlow GraphDef | `pb` | yolov8n.pb
19 | TensorFlow Lite | `tflite` | yolov8n.tflite
20 | TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
21 | TensorFlow.js | `tfjs` | yolov8n_web_model/
22 | PaddlePaddle | `paddle` | yolov8n_paddle_model/
23 | """
24 |
25 | import platform
26 | import time
27 | from pathlib import Path
28 |
29 | from ultralytics import YOLO
30 | from ultralytics.yolo.engine.exporter import export_formats
31 | from ultralytics.yolo.utils import LINUX, LOGGER, MACOS, ROOT, SETTINGS
32 | from ultralytics.yolo.utils.checks import check_yolo
33 | from ultralytics.yolo.utils.downloads import download
34 | from ultralytics.yolo.utils.files import file_size
35 | from ultralytics.yolo.utils.torch_utils import select_device
36 |
37 |
38 | def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=False):
39 | import pandas as pd
40 | pd.options.display.max_columns = 10
41 | pd.options.display.width = 120
42 | device = select_device(device, verbose=False)
43 | if isinstance(model, (str, Path)):
44 | model = YOLO(model)
45 |
46 | y = []
47 | t0 = time.time()
48 | for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
49 | emoji, filename = '❌', None # export defaults
50 | try:
51 | assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
52 | if i == 10:
53 | assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux'
54 | if 'cpu' in device.type:
55 | assert cpu, 'inference not supported on CPU'
56 | if 'cuda' in device.type:
57 | assert gpu, 'inference not supported on GPU'
58 |
59 | # Export
60 | if format == '-':
61 | filename = model.ckpt_path or model.cfg
62 | export = model # PyTorch format
63 | else:
64 | filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
65 | export = YOLO(filename, task=model.task)
66 | assert suffix in str(filename), 'export failed'
67 | emoji = '❎' # indicates export succeeded
68 |
69 | # Predict
70 | assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
71 | assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
72 | if not (ROOT / 'assets/bus.jpg').exists():
73 | download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets')
74 | export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half)
75 |
76 | # Validate
77 | if model.task == 'detect':
78 | data, key = 'coco8.yaml', 'metrics/mAP50-95(B)'
79 | elif model.task == 'segment':
80 | data, key = 'coco8-seg.yaml', 'metrics/mAP50-95(M)'
81 | elif model.task == 'classify':
82 | data, key = 'imagenet100', 'metrics/accuracy_top5'
83 | elif model.task == 'pose':
84 | data, key = 'coco8-pose.yaml', 'metrics/mAP50-95(P)'
85 |
86 | results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False)
87 | metric, speed = results.results_dict[key], results.speed['inference']
88 | y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
89 | except Exception as e:
90 | if hard_fail:
91 | assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}'
92 | LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
93 | y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
94 |
95 | # Print results
96 | check_yolo(device=device) # print system info
97 | df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
98 |
99 | name = Path(model.ckpt_path).name
100 | s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
101 | LOGGER.info(s)
102 | with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
103 | f.write(s)
104 |
105 | if hard_fail and isinstance(hard_fail, float):
106 | metrics = df[key].array # values to compare to floor
107 | floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
108 | assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: one or more metric(s) < floor {floor}'
109 |
110 | return df
111 |
112 |
113 | if __name__ == '__main__':
114 | benchmark()
115 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import add_integration_callbacks, default_callbacks, get_default_callbacks
2 |
3 | __all__ = 'add_integration_callbacks', 'default_callbacks', 'get_default_callbacks'
4 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/base.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/base.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/clearml.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/clearml.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/comet.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/comet.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/hub.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/hub.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/mlflow.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/mlflow.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/__pycache__/tensorboard.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/utils/callbacks/__pycache__/tensorboard.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/base.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Base callbacks
4 | """
5 | from collections import defaultdict
6 | from copy import deepcopy
7 |
8 |
9 | # Trainer callbacks ----------------------------------------------------------------------------------------------------
10 | def on_pretrain_routine_start(trainer):
11 | pass
12 |
13 |
14 | def on_pretrain_routine_end(trainer):
15 | pass
16 |
17 |
18 | def on_train_start(trainer):
19 | pass
20 |
21 |
22 | def on_train_epoch_start(trainer):
23 | pass
24 |
25 |
26 | def on_train_batch_start(trainer):
27 | pass
28 |
29 |
30 | def optimizer_step(trainer):
31 | pass
32 |
33 |
34 | def on_before_zero_grad(trainer):
35 | pass
36 |
37 |
38 | def on_train_batch_end(trainer):
39 | pass
40 |
41 |
42 | def on_train_epoch_end(trainer):
43 | pass
44 |
45 |
46 | def on_fit_epoch_end(trainer):
47 | pass
48 |
49 |
50 | def on_model_save(trainer):
51 | pass
52 |
53 |
54 | def on_train_end(trainer):
55 | pass
56 |
57 |
58 | def on_params_update(trainer):
59 | pass
60 |
61 |
62 | def teardown(trainer):
63 | pass
64 |
65 |
66 | # Validator callbacks --------------------------------------------------------------------------------------------------
67 | def on_val_start(validator):
68 | pass
69 |
70 |
71 | def on_val_batch_start(validator):
72 | pass
73 |
74 |
75 | def on_val_batch_end(validator):
76 | pass
77 |
78 |
79 | def on_val_end(validator):
80 | pass
81 |
82 |
83 | # Predictor callbacks --------------------------------------------------------------------------------------------------
84 | def on_predict_start(predictor):
85 | pass
86 |
87 |
88 | def on_predict_batch_start(predictor):
89 | pass
90 |
91 |
92 | def on_predict_batch_end(predictor):
93 | pass
94 |
95 |
96 | def on_predict_postprocess_end(predictor):
97 | pass
98 |
99 |
100 | def on_predict_end(predictor):
101 | pass
102 |
103 |
104 | # Exporter callbacks ---------------------------------------------------------------------------------------------------
105 | def on_export_start(exporter):
106 | pass
107 |
108 |
109 | def on_export_end(exporter):
110 | pass
111 |
112 |
113 | default_callbacks = {
114 | # Run in trainer
115 | 'on_pretrain_routine_start': [on_pretrain_routine_start],
116 | 'on_pretrain_routine_end': [on_pretrain_routine_end],
117 | 'on_train_start': [on_train_start],
118 | 'on_train_epoch_start': [on_train_epoch_start],
119 | 'on_train_batch_start': [on_train_batch_start],
120 | 'optimizer_step': [optimizer_step],
121 | 'on_before_zero_grad': [on_before_zero_grad],
122 | 'on_train_batch_end': [on_train_batch_end],
123 | 'on_train_epoch_end': [on_train_epoch_end],
124 | 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val
125 | 'on_model_save': [on_model_save],
126 | 'on_train_end': [on_train_end],
127 | 'on_params_update': [on_params_update],
128 | 'teardown': [teardown],
129 |
130 | # Run in validator
131 | 'on_val_start': [on_val_start],
132 | 'on_val_batch_start': [on_val_batch_start],
133 | 'on_val_batch_end': [on_val_batch_end],
134 | 'on_val_end': [on_val_end],
135 |
136 | # Run in predictor
137 | 'on_predict_start': [on_predict_start],
138 | 'on_predict_batch_start': [on_predict_batch_start],
139 | 'on_predict_postprocess_end': [on_predict_postprocess_end],
140 | 'on_predict_batch_end': [on_predict_batch_end],
141 | 'on_predict_end': [on_predict_end],
142 |
143 | # Run in exporter
144 | 'on_export_start': [on_export_start],
145 | 'on_export_end': [on_export_end]}
146 |
147 |
148 | def get_default_callbacks():
149 | return defaultdict(list, deepcopy(default_callbacks))
150 |
151 |
152 | def add_integration_callbacks(instance):
153 | from .clearml import callbacks as clearml_callbacks
154 | from .comet import callbacks as comet_callbacks
155 | from .hub import callbacks as hub_callbacks
156 | from .mlflow import callbacks as mf_callbacks
157 | from .tensorboard import callbacks as tb_callbacks
158 |
159 | for x in clearml_callbacks, comet_callbacks, hub_callbacks, tb_callbacks, mf_callbacks:
160 | for k, v in x.items():
161 | if v not in instance.callbacks[k]: # prevent duplicate callbacks addition
162 | instance.callbacks[k].append(v) # callback[name].append(func)
163 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/clearml.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | import re
3 |
4 | import matplotlib.image as mpimg
5 | import matplotlib.pyplot as plt
6 |
7 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
8 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
9 |
10 | try:
11 | import clearml
12 | from clearml import Task
13 | from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO
14 | from clearml.binding.matplotlib_bind import PatchedMatplotlib
15 |
16 | assert hasattr(clearml, '__version__') # verify package is not directory
17 | assert not TESTS_RUNNING # do not log pytest
18 | except (ImportError, AssertionError):
19 | clearml = None
20 |
21 |
22 | def _log_debug_samples(files, title='Debug Samples'):
23 | """
24 | Log files (images) as debug samples in the ClearML task.
25 |
26 | arguments:
27 | files (List(PosixPath)) a list of file paths in PosixPath format
28 | title (str) A title that groups together images with the same values
29 | """
30 | task = Task.current_task()
31 | if task:
32 | for f in files:
33 | if f.exists():
34 | it = re.search(r'_batch(\d+)', f.name)
35 | iteration = int(it.groups()[0]) if it else 0
36 | task.get_logger().report_image(title=title,
37 | series=f.name.replace(it.group(), ''),
38 | local_path=str(f),
39 | iteration=iteration)
40 |
41 |
42 | def _log_plot(title, plot_path):
43 | """
44 | Log image as plot in the plot section of ClearML
45 |
46 | arguments:
47 | title (str) Title of the plot
48 | plot_path (PosixPath or str) Path to the saved image file
49 | """
50 | img = mpimg.imread(plot_path)
51 | fig = plt.figure()
52 | ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks
53 | ax.imshow(img)
54 |
55 | Task.current_task().get_logger().report_matplotlib_figure(title, '', figure=fig, report_interactive=False)
56 |
57 |
58 | def on_pretrain_routine_start(trainer):
59 | try:
60 | task = Task.current_task()
61 | if task:
62 | # Make sure the automatic pytorch and matplotlib bindings are disabled!
63 | # We are logging these plots and model files manually in the integration
64 | PatchPyTorchModelIO.update_current_task(None)
65 | PatchedMatplotlib.update_current_task(None)
66 | else:
67 | task = Task.init(project_name=trainer.args.project or 'YOLOv8',
68 | task_name=trainer.args.name,
69 | tags=['YOLOv8'],
70 | output_uri=True,
71 | reuse_last_task_id=False,
72 | auto_connect_frameworks={
73 | 'pytorch': False,
74 | 'matplotlib': False})
75 | LOGGER.warning('ClearML Initialized a new task. If you want to run remotely, '
76 | 'please add clearml-init and connect your arguments before initializing YOLO.')
77 | task.connect(vars(trainer.args), name='General')
78 | except Exception as e:
79 | LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}')
80 |
81 |
82 | def on_train_epoch_end(trainer):
83 | if trainer.epoch == 1 and Task.current_task():
84 | _log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic')
85 |
86 |
87 | def on_fit_epoch_end(trainer):
88 | task = Task.current_task()
89 | if task:
90 | # You should have access to the validation bboxes under jdict
91 | task.get_logger().report_scalar(title='Epoch Time',
92 | series='Epoch Time',
93 | value=trainer.epoch_time,
94 | iteration=trainer.epoch)
95 | if trainer.epoch == 0:
96 | model_info = {
97 | 'model/parameters': get_num_params(trainer.model),
98 | 'model/GFLOPs': round(get_flops(trainer.model), 3),
99 | 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
100 | for k, v in model_info.items():
101 | task.get_logger().report_single_value(k, v)
102 |
103 |
104 | def on_val_end(validator):
105 | if Task.current_task():
106 | # Log val_labels and val_pred
107 | _log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation')
108 |
109 |
110 | def on_train_end(trainer):
111 | task = Task.current_task()
112 | if task:
113 | # Log final results, CM matrix + PR plots
114 | files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
115 | files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
116 | for f in files:
117 | _log_plot(title=f.stem, plot_path=f)
118 | # Report final metrics
119 | for k, v in trainer.validator.metrics.results_dict.items():
120 | task.get_logger().report_single_value(k, v)
121 | # Log the final model
122 | task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
123 |
124 |
125 | callbacks = {
126 | 'on_pretrain_routine_start': on_pretrain_routine_start,
127 | 'on_train_epoch_end': on_train_epoch_end,
128 | 'on_fit_epoch_end': on_fit_epoch_end,
129 | 'on_val_end': on_val_end,
130 | 'on_train_end': on_train_end} if clearml else {}
131 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/hub.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import json
4 | from time import time
5 |
6 | from ultralytics.hub.utils import PREFIX, traces
7 | from ultralytics.yolo.utils import LOGGER
8 | from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
9 |
10 |
11 | def on_pretrain_routine_end(trainer):
12 | session = getattr(trainer, 'hub_session', None)
13 | if session:
14 | # Start timer for upload rate limit
15 | LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
16 | session.timers = {'metrics': time(), 'ckpt': time()} # start timer on session.rate_limit
17 |
18 |
19 | def on_fit_epoch_end(trainer):
20 | session = getattr(trainer, 'hub_session', None)
21 | if session:
22 | # Upload metrics after val end
23 | all_plots = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics}
24 | if trainer.epoch == 0:
25 | model_info = {
26 | 'model/parameters': get_num_params(trainer.model),
27 | 'model/GFLOPs': round(get_flops(trainer.model), 3),
28 | 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
29 | all_plots = {**all_plots, **model_info}
30 | session.metrics_queue[trainer.epoch] = json.dumps(all_plots)
31 | if time() - session.timers['metrics'] > session.rate_limits['metrics']:
32 | session.upload_metrics()
33 | session.timers['metrics'] = time() # reset timer
34 | session.metrics_queue = {} # reset queue
35 |
36 |
37 | def on_model_save(trainer):
38 | session = getattr(trainer, 'hub_session', None)
39 | if session:
40 | # Upload checkpoints with rate limiting
41 | is_best = trainer.best_fitness == trainer.fitness
42 | if time() - session.timers['ckpt'] > session.rate_limits['ckpt']:
43 | LOGGER.info(f'{PREFIX}Uploading checkpoint https://hub.ultralytics.com/models/{session.model_id}')
44 | session.upload_model(trainer.epoch, trainer.last, is_best)
45 | session.timers['ckpt'] = time() # reset timer
46 |
47 |
48 | def on_train_end(trainer):
49 | session = getattr(trainer, 'hub_session', None)
50 | if session:
51 | # Upload final model and metrics with exponential standoff
52 | LOGGER.info(f'{PREFIX}Syncing final model...')
53 | session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics.get('metrics/mAP50-95(B)', 0), final=True)
54 | session.alive = False # stop heartbeats
55 | LOGGER.info(f'{PREFIX}Done ✅\n'
56 | f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
57 |
58 |
59 | def on_train_start(trainer):
60 | traces(trainer.args, traces_sample_rate=1.0)
61 |
62 |
63 | def on_val_start(validator):
64 | traces(validator.args, traces_sample_rate=1.0)
65 |
66 |
67 | def on_predict_start(predictor):
68 | traces(predictor.args, traces_sample_rate=1.0)
69 |
70 |
71 | def on_export_start(exporter):
72 | traces(exporter.args, traces_sample_rate=1.0)
73 |
74 |
75 | callbacks = {
76 | 'on_pretrain_routine_end': on_pretrain_routine_end,
77 | 'on_fit_epoch_end': on_fit_epoch_end,
78 | 'on_model_save': on_model_save,
79 | 'on_train_end': on_train_end,
80 | 'on_train_start': on_train_start,
81 | 'on_val_start': on_val_start,
82 | 'on_predict_start': on_predict_start,
83 | 'on_export_start': on_export_start}
84 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/mlflow.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import re
5 | from pathlib import Path
6 |
7 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
8 |
9 | try:
10 | import mlflow
11 |
12 | assert not TESTS_RUNNING # do not log pytest
13 | assert hasattr(mlflow, '__version__') # verify package is not directory
14 | except (ImportError, AssertionError):
15 | mlflow = None
16 |
17 |
18 | def on_pretrain_routine_end(trainer):
19 | global mlflow, run, run_id, experiment_name
20 |
21 | if os.environ.get('MLFLOW_TRACKING_URI') is None:
22 | mlflow = None
23 |
24 | if mlflow:
25 | mlflow_location = os.environ['MLFLOW_TRACKING_URI'] # "http://192.168.xxx.xxx:5000"
26 | mlflow.set_tracking_uri(mlflow_location)
27 |
28 | experiment_name = trainer.args.project or '/Shared/YOLOv8'
29 | experiment = mlflow.get_experiment_by_name(experiment_name)
30 | if experiment is None:
31 | mlflow.create_experiment(experiment_name)
32 | mlflow.set_experiment(experiment_name)
33 |
34 | prefix = colorstr('MLFlow: ')
35 | try:
36 | run, active_run = mlflow, mlflow.active_run()
37 | if not active_run:
38 | active_run = mlflow.start_run(experiment_id=experiment.experiment_id)
39 | run_id = active_run.info.run_id
40 | LOGGER.info(f'{prefix}Using run_id({run_id}) at {mlflow_location}')
41 | run.log_params(vars(trainer.model.args))
42 | except Exception as err:
43 | LOGGER.error(f'{prefix}Failing init - {repr(err)}')
44 | LOGGER.warning(f'{prefix}Continuing without Mlflow')
45 |
46 |
47 | def on_fit_epoch_end(trainer):
48 | if mlflow:
49 | metrics_dict = {f"{re.sub('[()]', '', k)}": float(v) for k, v in trainer.metrics.items()}
50 | run.log_metrics(metrics=metrics_dict, step=trainer.epoch)
51 |
52 |
53 | def on_model_save(trainer):
54 | if mlflow:
55 | run.log_artifact(trainer.last)
56 |
57 |
58 | def on_train_end(trainer):
59 | if mlflow:
60 | root_dir = Path(__file__).resolve().parents[3]
61 | run.log_artifact(trainer.best)
62 | model_uri = f'runs:/{run_id}/'
63 | run.register_model(model_uri, experiment_name)
64 | run.pyfunc.log_model(artifact_path=experiment_name,
65 | code_path=[str(root_dir)],
66 | artifacts={'model_path': str(trainer.save_dir)},
67 | python_model=run.pyfunc.PythonModel())
68 |
69 |
70 | callbacks = {
71 | 'on_pretrain_routine_end': on_pretrain_routine_end,
72 | 'on_fit_epoch_end': on_fit_epoch_end,
73 | 'on_model_save': on_model_save,
74 | 'on_train_end': on_train_end} if mlflow else {}
75 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/callbacks/tensorboard.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
3 |
4 | try:
5 | from torch.utils.tensorboard import SummaryWriter
6 |
7 | assert not TESTS_RUNNING # do not log pytest
8 | except (ImportError, AssertionError):
9 | SummaryWriter = None
10 |
11 | writer = None # TensorBoard SummaryWriter instance
12 |
13 |
14 | def _log_scalars(scalars, step=0):
15 | if writer:
16 | for k, v in scalars.items():
17 | writer.add_scalar(k, v, step)
18 |
19 |
20 | def on_pretrain_routine_start(trainer):
21 | if SummaryWriter:
22 | try:
23 | global writer
24 | writer = SummaryWriter(str(trainer.save_dir))
25 | prefix = colorstr('TensorBoard: ')
26 | LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
27 | except Exception as e:
28 | LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
29 |
30 |
31 | def on_batch_end(trainer):
32 | _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1)
33 |
34 |
35 | def on_fit_epoch_end(trainer):
36 | _log_scalars(trainer.metrics, trainer.epoch + 1)
37 |
38 |
39 | callbacks = {
40 | 'on_pretrain_routine_start': on_pretrain_routine_start,
41 | 'on_fit_epoch_end': on_fit_epoch_end,
42 | 'on_batch_end': on_batch_end}
43 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/dist.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import os
4 | import re
5 | import shutil
6 | import socket
7 | import sys
8 | import tempfile
9 | from pathlib import Path
10 |
11 | from . import USER_CONFIG_DIR
12 | from .torch_utils import TORCH_1_9
13 |
14 |
15 | def find_free_network_port() -> int:
16 | """Finds a free port on localhost.
17 |
18 | It is useful in single-node training when we don't want to connect to a real main node but have to set the
19 | `MASTER_PORT` environment variable.
20 | """
21 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
22 | s.bind(('127.0.0.1', 0))
23 | return s.getsockname()[1] # port
24 |
25 |
26 | def generate_ddp_file(trainer):
27 | module, name = f'{trainer.__class__.__module__}.{trainer.__class__.__name__}'.rsplit('.', 1)
28 |
29 | content = f'''cfg = {vars(trainer.args)} \nif __name__ == "__main__":
30 | from {module} import {name}
31 |
32 | trainer = {name}(cfg=cfg)
33 | trainer.train()'''
34 | (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True)
35 | with tempfile.NamedTemporaryFile(prefix='_temp_',
36 | suffix=f'{id(trainer)}.py',
37 | mode='w+',
38 | encoding='utf-8',
39 | dir=USER_CONFIG_DIR / 'DDP',
40 | delete=False) as file:
41 | file.write(content)
42 | return file.name
43 |
44 |
45 | def generate_ddp_command(world_size, trainer):
46 | import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218
47 | if not trainer.resume:
48 | shutil.rmtree(trainer.save_dir) # remove the save_dir
49 | file = str(Path(sys.argv[0]).resolve())
50 | safe_pattern = re.compile(r'^[a-zA-Z0-9_. /\\-]{1,128}$') # allowed characters and maximum of 100 characters
51 | if not (safe_pattern.match(file) and Path(file).exists() and file.endswith('.py')): # using CLI
52 | file = generate_ddp_file(trainer)
53 | dist_cmd = 'torch.distributed.run' if TORCH_1_9 else 'torch.distributed.launch'
54 | port = find_free_network_port()
55 | exclude_args = ['save_dir']
56 | args = [f'{k}={v}' for k, v in vars(trainer.args).items() if k not in exclude_args]
57 | cmd = [sys.executable, '-m', dist_cmd, '--nproc_per_node', f'{world_size}', '--master_port', f'{port}', file] + args
58 | return cmd, file
59 |
60 |
61 | def ddp_cleanup(trainer, file):
62 | # delete temp file if created
63 | if f'{id(trainer)}.py' in file: # if temp_file suffix in file
64 | os.remove(file)
65 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/files.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import contextlib
4 | import glob
5 | import os
6 | from datetime import datetime
7 | from pathlib import Path
8 |
9 |
10 | class WorkingDirectory(contextlib.ContextDecorator):
11 | # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
12 | def __init__(self, new_dir):
13 | self.dir = new_dir # new dir
14 | self.cwd = Path.cwd().resolve() # current dir
15 |
16 | def __enter__(self):
17 | os.chdir(self.dir)
18 |
19 | def __exit__(self, exc_type, exc_val, exc_tb):
20 | os.chdir(self.cwd)
21 |
22 |
23 | def increment_path(path, exist_ok=False, sep='', mkdir=False):
24 | """
25 | Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
26 |
27 | If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to
28 | the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the
29 | number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a
30 | directory if it does not already exist.
31 |
32 | Args:
33 | path (str or pathlib.Path): Path to increment.
34 | exist_ok (bool, optional): If True, the path will not be incremented and will be returned as-is. Defaults to False.
35 | sep (str, optional): Separator to use between the path and the incrementation number. Defaults to an empty string.
36 | mkdir (bool, optional): If True, the path will be created as a directory if it does not exist. Defaults to False.
37 |
38 | Returns:
39 | pathlib.Path: Incremented path.
40 | """
41 | path = Path(path) # os-agnostic
42 | if path.exists() and not exist_ok:
43 | path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
44 |
45 | # Method 1
46 | for n in range(2, 9999):
47 | p = f'{path}{sep}{n}{suffix}' # increment path
48 | if not os.path.exists(p): #
49 | break
50 | path = Path(p)
51 |
52 | if mkdir:
53 | path.mkdir(parents=True, exist_ok=True) # make directory
54 |
55 | return path
56 |
57 |
58 | def file_age(path=__file__):
59 | # Return days since last file update
60 | dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
61 | return dt.days # + dt.seconds / 86400 # fractional days
62 |
63 |
64 | def file_date(path=__file__):
65 | # Return human-readable file modification date, i.e. '2021-3-26'
66 | t = datetime.fromtimestamp(Path(path).stat().st_mtime)
67 | return f'{t.year}-{t.month}-{t.day}'
68 |
69 |
70 | def file_size(path):
71 | # Return file/dir size (MB)
72 | if isinstance(path, (str, Path)):
73 | mb = 1 << 20 # bytes to MiB (1024 ** 2)
74 | path = Path(path)
75 | if path.is_file():
76 | return path.stat().st_size / mb
77 | elif path.is_dir():
78 | return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
79 | return 0.0
80 |
81 |
82 | def get_latest_run(search_dir='.'):
83 | # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
84 | last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
85 | return max(last_list, key=os.path.getctime) if last_list else ''
86 |
--------------------------------------------------------------------------------
/ultralytics/yolo/utils/loss.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 | from .metrics import bbox_iou
8 | from .tal import bbox2dist
9 |
10 |
11 | class VarifocalLoss(nn.Module):
12 | # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367
13 | def __init__(self):
14 | super().__init__()
15 |
16 | def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
17 | weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
18 | with torch.cuda.amp.autocast(enabled=False):
19 | loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') *
20 | weight).sum()
21 | return loss
22 |
23 |
24 | class BboxLoss(nn.Module):
25 |
26 | def __init__(self, reg_max, use_dfl=False):
27 | super().__init__()
28 | self.reg_max = reg_max
29 | self.use_dfl = use_dfl
30 |
31 | def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
32 | # IoU loss
33 | weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1)
34 | iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
35 | loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
36 |
37 | # DFL loss
38 | if self.use_dfl:
39 | target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max)
40 | loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
41 | loss_dfl = loss_dfl.sum() / target_scores_sum
42 | else:
43 | loss_dfl = torch.tensor(0.0).to(pred_dist.device)
44 |
45 | return loss_iou, loss_dfl
46 |
47 | @staticmethod
48 | def _df_loss(pred_dist, target):
49 | # Return sum of left and right DFL losses
50 | # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
51 | tl = target.long() # target left
52 | tr = tl + 1 # target right
53 | wl = tr - target # weight left
54 | wr = 1 - wl # weight right
55 | return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl +
56 | F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True)
57 |
58 |
59 | class KeypointLoss(nn.Module):
60 |
61 | def __init__(self, sigmas) -> None:
62 | super().__init__()
63 | self.sigmas = sigmas
64 |
65 | def forward(self, pred_kpts, gt_kpts, kpt_mask, area):
66 | d = (pred_kpts[..., 0] - gt_kpts[..., 0]) ** 2 + (pred_kpts[..., 1] - gt_kpts[..., 1]) ** 2
67 | kpt_loss_factor = (torch.sum(kpt_mask != 0) + torch.sum(kpt_mask == 0)) / (torch.sum(kpt_mask != 0) + 1e-9)
68 | # e = d / (2 * (area * self.sigmas) ** 2 + 1e-9) # from formula
69 | e = d / (2 * self.sigmas) ** 2 / (area + 1e-9) / 2 # from cocoeval
70 | return kpt_loss_factor * ((1 - torch.exp(-e)) * kpt_mask).mean()
71 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.v8 import classify, detect, pose, segment
4 |
5 | __all__ = 'classify', 'segment', 'detect', 'pose'
6 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict
4 | from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train
5 | from ultralytics.yolo.v8.classify.val import ClassificationValidator, val
6 |
7 | __all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/classify/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/predict.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/classify/__pycache__/predict.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/classify/__pycache__/train.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/__pycache__/val.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/classify/__pycache__/val.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.engine.predictor import BasePredictor
6 | from ultralytics.yolo.engine.results import Results
7 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT
8 |
9 |
10 | class ClassificationPredictor(BasePredictor):
11 |
12 | def preprocess(self, img):
13 | img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
14 | return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
15 |
16 | def postprocess(self, preds, img, orig_imgs):
17 | results = []
18 | for i, pred in enumerate(preds):
19 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
20 | path, _, _, _, _ = self.batch
21 | img_path = path[i] if isinstance(path, list) else path
22 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, probs=pred))
23 |
24 | return results
25 |
26 |
27 | def predict(cfg=DEFAULT_CFG, use_python=False):
28 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
29 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
30 | else 'https://ultralytics.com/images/bus.jpg'
31 |
32 | args = dict(model=model, source=source)
33 | if use_python:
34 | from ultralytics import YOLO
35 | YOLO(model)(**args)
36 | else:
37 | predictor = ClassificationPredictor(overrides=args)
38 | predictor.predict_cli()
39 |
40 |
41 | if __name__ == '__main__':
42 | predict()
43 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/train.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 | import torchvision
5 |
6 | from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
7 | from ultralytics.yolo import v8
8 | from ultralytics.yolo.data import build_classification_dataloader
9 | from ultralytics.yolo.engine.trainer import BaseTrainer
10 | from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
11 | from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer
12 |
13 |
14 | class ClassificationTrainer(BaseTrainer):
15 |
16 | def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
17 | if overrides is None:
18 | overrides = {}
19 | overrides['task'] = 'classify'
20 | super().__init__(cfg, overrides, _callbacks)
21 |
22 | def set_model_attributes(self):
23 | self.model.names = self.data['names']
24 |
25 | def get_model(self, cfg=None, weights=None, verbose=True):
26 | model = ClassificationModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
27 | if weights:
28 | model.load(weights)
29 |
30 | pretrained = False
31 | for m in model.modules():
32 | if not pretrained and hasattr(m, 'reset_parameters'):
33 | m.reset_parameters()
34 | if isinstance(m, torch.nn.Dropout) and self.args.dropout:
35 | m.p = self.args.dropout # set dropout
36 | for p in model.parameters():
37 | p.requires_grad = True # for training
38 |
39 | # Update defaults
40 | if self.args.imgsz == 640:
41 | self.args.imgsz = 224
42 |
43 | return model
44 |
45 | def setup_model(self):
46 | """
47 | load/create/download model for any task
48 | """
49 | # classification models require special handling
50 |
51 | if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
52 | return
53 |
54 | model = str(self.model)
55 | # Load a YOLO model locally, from torchvision, or from Ultralytics assets
56 | if model.endswith('.pt'):
57 | self.model, _ = attempt_load_one_weight(model, device='cpu')
58 | for p in self.model.parameters():
59 | p.requires_grad = True # for training
60 | elif model.endswith('.yaml'):
61 | self.model = self.get_model(cfg=model)
62 | elif model in torchvision.models.__dict__:
63 | pretrained = True
64 | self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
65 | else:
66 | FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.')
67 | ClassificationModel.reshape_outputs(self.model, self.data['nc'])
68 |
69 | return # dont return ckpt. Classification doesn't support resume
70 |
71 | def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
72 | loader = build_classification_dataloader(path=dataset_path,
73 | imgsz=self.args.imgsz,
74 | batch_size=batch_size if mode == 'train' else (batch_size * 2),
75 | augment=mode == 'train',
76 | rank=rank,
77 | workers=self.args.workers)
78 | # Attach inference transforms
79 | if mode != 'train':
80 | if is_parallel(self.model):
81 | self.model.module.transforms = loader.dataset.torch_transforms
82 | else:
83 | self.model.transforms = loader.dataset.torch_transforms
84 | return loader
85 |
86 | def preprocess_batch(self, batch):
87 | batch['img'] = batch['img'].to(self.device)
88 | batch['cls'] = batch['cls'].to(self.device)
89 | return batch
90 |
91 | def progress_string(self):
92 | return ('\n' + '%11s' * (4 + len(self.loss_names))) % \
93 | ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')
94 |
95 | def get_validator(self):
96 | self.loss_names = ['loss']
97 | return v8.classify.ClassificationValidator(self.test_loader, self.save_dir)
98 |
99 | def criterion(self, preds, batch):
100 | loss = torch.nn.functional.cross_entropy(preds, batch['cls'], reduction='sum') / self.args.nbs
101 | loss_items = loss.detach()
102 | return loss, loss_items
103 |
104 | # def label_loss_items(self, loss_items=None, prefix="train"):
105 | # """
106 | # Returns a loss dict with labelled training loss items tensor
107 | # """
108 | # # Not needed for classification but necessary for segmentation & detection
109 | # keys = [f"{prefix}/{x}" for x in self.loss_names]
110 | # if loss_items is not None:
111 | # loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
112 | # return dict(zip(keys, loss_items))
113 | # else:
114 | # return keys
115 |
116 | def label_loss_items(self, loss_items=None, prefix='train'):
117 | """
118 | Returns a loss dict with labelled training loss items tensor
119 | """
120 | # Not needed for classification but necessary for segmentation & detection
121 | keys = [f'{prefix}/{x}' for x in self.loss_names]
122 | if loss_items is None:
123 | return keys
124 | loss_items = [round(float(loss_items), 5)]
125 | return dict(zip(keys, loss_items))
126 |
127 | def resume_training(self, ckpt):
128 | pass
129 |
130 | def final_eval(self):
131 | for f in self.last, self.best:
132 | if f.exists():
133 | strip_optimizer(f) # strip optimizers
134 | # TODO: validate best.pt after training completes
135 | # if f is self.best:
136 | # LOGGER.info(f'\nValidating {f}...')
137 | # self.validator.args.save_json = True
138 | # self.metrics = self.validator(model=f)
139 | # self.metrics.pop('fitness', None)
140 | # self.run_callbacks('on_fit_epoch_end')
141 | LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
142 |
143 |
144 | def train(cfg=DEFAULT_CFG, use_python=False):
145 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
146 | data = cfg.data or 'mnist160' # or yolo.ClassificationDataset("mnist")
147 | device = cfg.device if cfg.device is not None else ''
148 |
149 | args = dict(model=model, data=data, device=device)
150 | if use_python:
151 | from ultralytics import YOLO
152 | YOLO(model).train(**args)
153 | else:
154 | trainer = ClassificationTrainer(overrides=args)
155 | trainer.train()
156 |
157 |
158 | if __name__ == '__main__':
159 | train()
160 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/classify/val.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.data import build_classification_dataloader
4 | from ultralytics.yolo.engine.validator import BaseValidator
5 | from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER
6 | from ultralytics.yolo.utils.metrics import ClassifyMetrics
7 |
8 |
9 | class ClassificationValidator(BaseValidator):
10 |
11 | def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
12 | super().__init__(dataloader, save_dir, pbar, args, _callbacks)
13 | self.args.task = 'classify'
14 | self.metrics = ClassifyMetrics()
15 |
16 | def get_desc(self):
17 | return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc')
18 |
19 | def init_metrics(self, model):
20 | self.pred = []
21 | self.targets = []
22 |
23 | def preprocess(self, batch):
24 | batch['img'] = batch['img'].to(self.device, non_blocking=True)
25 | batch['img'] = batch['img'].half() if self.args.half else batch['img'].float()
26 | batch['cls'] = batch['cls'].to(self.device)
27 | return batch
28 |
29 | def update_metrics(self, preds, batch):
30 | n5 = min(len(self.model.names), 5)
31 | self.pred.append(preds.argsort(1, descending=True)[:, :n5])
32 | self.targets.append(batch['cls'])
33 |
34 | def finalize_metrics(self, *args, **kwargs):
35 | self.metrics.speed = self.speed
36 | # self.metrics.confusion_matrix = self.confusion_matrix # TODO: classification ConfusionMatrix
37 |
38 | def get_stats(self):
39 | self.metrics.process(self.targets, self.pred)
40 | return self.metrics.results_dict
41 |
42 | def get_dataloader(self, dataset_path, batch_size):
43 | return build_classification_dataloader(path=dataset_path,
44 | imgsz=self.args.imgsz,
45 | batch_size=batch_size,
46 | augment=False,
47 | shuffle=False,
48 | workers=self.args.workers)
49 |
50 | def print_results(self):
51 | pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format
52 | LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5))
53 |
54 |
55 | def val(cfg=DEFAULT_CFG, use_python=False):
56 | model = cfg.model or 'yolov8n-cls.pt' # or "resnet18"
57 | data = cfg.data or 'mnist160'
58 |
59 | args = dict(model=model, data=data)
60 | if use_python:
61 | from ultralytics import YOLO
62 | YOLO(model).val(**args)
63 | else:
64 | validator = ClassificationValidator(args=args)
65 | validator(model=args['model'])
66 |
67 |
68 | if __name__ == '__main__':
69 | val()
70 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import DetectionPredictor, predict
4 | from .train import DetectionTrainer, train
5 | from .val import DetectionValidator, val
6 |
7 | __all__ = 'DetectionPredictor', 'predict', 'DetectionTrainer', 'train', 'DetectionValidator', 'val'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/detect/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/predict.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/detect/__pycache__/predict.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/detect/__pycache__/train.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/__pycache__/val.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/detect/__pycache__/val.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/detect/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.engine.predictor import BasePredictor
6 | from ultralytics.yolo.engine.results import Results
7 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
8 |
9 |
10 | class DetectionPredictor(BasePredictor):
11 |
12 | def preprocess(self, img):
13 | img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
14 | img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
15 | img /= 255 # 0 - 255 to 0.0 - 1.0
16 | return img
17 |
18 | def postprocess(self, preds, img, orig_imgs):
19 | preds = ops.non_max_suppression(preds,
20 | self.args.conf,
21 | self.args.iou,
22 | agnostic=self.args.agnostic_nms,
23 | max_det=self.args.max_det,
24 | classes=self.args.classes)
25 |
26 | results = []
27 | for i, pred in enumerate(preds):
28 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
29 | if not isinstance(orig_imgs, torch.Tensor):
30 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
31 | path, _, _, _, _ = self.batch
32 | img_path = path[i] if isinstance(path, list) else path
33 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred))
34 | return results
35 |
36 |
37 | def predict(cfg=DEFAULT_CFG, use_python=False):
38 | model = cfg.model or 'yolov8n.pt'
39 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
40 | else 'https://ultralytics.com/images/bus.jpg'
41 |
42 | args = dict(model=model, source=source)
43 | if use_python:
44 | from ultralytics import YOLO
45 | YOLO(model)(**args)
46 | else:
47 | predictor = DetectionPredictor(overrides=args)
48 | predictor.predict_cli()
49 |
50 |
51 | if __name__ == '__main__':
52 | predict()
53 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import PosePredictor, predict
4 | from .train import PoseTrainer, train
5 | from .val import PoseValidator, val
6 |
7 | __all__ = 'PoseTrainer', 'train', 'PoseValidator', 'val', 'PosePredictor', 'predict'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/pose/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/__pycache__/predict.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/pose/__pycache__/predict.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/__pycache__/train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/pose/__pycache__/train.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/__pycache__/val.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/pose/__pycache__/val.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from ultralytics.yolo.engine.results import Results
4 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
5 | from ultralytics.yolo.v8.detect.predict import DetectionPredictor
6 |
7 |
8 | class PosePredictor(DetectionPredictor):
9 |
10 | def postprocess(self, preds, img, orig_img):
11 | preds = ops.non_max_suppression(preds,
12 | self.args.conf,
13 | self.args.iou,
14 | agnostic=self.args.agnostic_nms,
15 | max_det=self.args.max_det,
16 | classes=self.args.classes,
17 | nc=len(self.model.names))
18 |
19 | results = []
20 | for i, pred in enumerate(preds):
21 | orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
22 | shape = orig_img.shape
23 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
24 | pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
25 | pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, shape)
26 | path, _, _, _, _ = self.batch
27 | img_path = path[i] if isinstance(path, list) else path
28 | results.append(
29 | Results(orig_img=orig_img,
30 | path=img_path,
31 | names=self.model.names,
32 | boxes=pred[:, :6],
33 | keypoints=pred_kpts))
34 | return results
35 |
36 |
37 | def predict(cfg=DEFAULT_CFG, use_python=False):
38 | model = cfg.model or 'yolov8n-pose.pt'
39 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
40 | else 'https://ultralytics.com/images/bus.jpg'
41 |
42 | args = dict(model=model, source=source)
43 | if use_python:
44 | from ultralytics import YOLO
45 | YOLO(model)(**args)
46 | else:
47 | predictor = PosePredictor(overrides=args)
48 | predictor.predict_cli()
49 |
50 |
51 | if __name__ == '__main__':
52 | predict()
53 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/pose/train.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from copy import copy
4 |
5 | import torch
6 | import torch.nn as nn
7 |
8 | from ultralytics.nn.tasks import PoseModel
9 | from ultralytics.yolo import v8
10 | from ultralytics.yolo.utils import DEFAULT_CFG
11 | from ultralytics.yolo.utils.loss import KeypointLoss
12 | from ultralytics.yolo.utils.metrics import OKS_SIGMA
13 | from ultralytics.yolo.utils.ops import xyxy2xywh
14 | from ultralytics.yolo.utils.plotting import plot_images, plot_results
15 | from ultralytics.yolo.utils.tal import make_anchors
16 | from ultralytics.yolo.utils.torch_utils import de_parallel
17 | from ultralytics.yolo.v8.detect.train import Loss
18 |
19 |
20 | # BaseTrainer python usage
21 | class PoseTrainer(v8.detect.DetectionTrainer):
22 |
23 | def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
24 | if overrides is None:
25 | overrides = {}
26 | overrides['task'] = 'pose'
27 | super().__init__(cfg, overrides, _callbacks)
28 |
29 | def get_model(self, cfg=None, weights=None, verbose=True):
30 | model = PoseModel(cfg, ch=3, nc=self.data['nc'], data_kpt_shape=self.data['kpt_shape'], verbose=verbose)
31 | if weights:
32 | model.load(weights)
33 |
34 | return model
35 |
36 | def set_model_attributes(self):
37 | super().set_model_attributes()
38 | self.model.kpt_shape = self.data['kpt_shape']
39 |
40 | def get_validator(self):
41 | self.loss_names = 'box_loss', 'pose_loss', 'kobj_loss', 'cls_loss', 'dfl_loss'
42 | return v8.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
43 |
44 | def criterion(self, preds, batch):
45 | if not hasattr(self, 'compute_loss'):
46 | self.compute_loss = PoseLoss(de_parallel(self.model))
47 | return self.compute_loss(preds, batch)
48 |
49 | def plot_training_samples(self, batch, ni):
50 | images = batch['img']
51 | kpts = batch['keypoints']
52 | cls = batch['cls'].squeeze(-1)
53 | bboxes = batch['bboxes']
54 | paths = batch['im_file']
55 | batch_idx = batch['batch_idx']
56 | plot_images(images,
57 | batch_idx,
58 | cls,
59 | bboxes,
60 | kpts=kpts,
61 | paths=paths,
62 | fname=self.save_dir / f'train_batch{ni}.jpg')
63 |
64 | def plot_metrics(self):
65 | plot_results(file=self.csv, pose=True) # save results.png
66 |
67 |
68 | # Criterion class for computing training losses
69 | class PoseLoss(Loss):
70 |
71 | def __init__(self, model): # model must be de-paralleled
72 | super().__init__(model)
73 | self.kpt_shape = model.model[-1].kpt_shape
74 | self.bce_pose = nn.BCEWithLogitsLoss()
75 | is_pose = self.kpt_shape == [17, 3]
76 | nkpt = self.kpt_shape[0] # number of keypoints
77 | sigmas = torch.from_numpy(OKS_SIGMA).to(self.device) if is_pose else torch.ones(nkpt, device=self.device) / nkpt
78 | self.keypoint_loss = KeypointLoss(sigmas=sigmas)
79 |
80 | def __call__(self, preds, batch):
81 | loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility
82 | feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1]
83 | pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
84 | (self.reg_max * 4, self.nc), 1)
85 |
86 | # b, grids, ..
87 | pred_scores = pred_scores.permute(0, 2, 1).contiguous()
88 | pred_distri = pred_distri.permute(0, 2, 1).contiguous()
89 | pred_kpts = pred_kpts.permute(0, 2, 1).contiguous()
90 |
91 | dtype = pred_scores.dtype
92 | imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
93 | anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
94 |
95 | # targets
96 | batch_size = pred_scores.shape[0]
97 | batch_idx = batch['batch_idx'].view(-1, 1)
98 | targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
99 | targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
100 | gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
101 | mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
102 |
103 | # pboxes
104 | pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
105 | pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3)
106 |
107 | _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner(
108 | pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
109 | anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt)
110 |
111 | target_scores_sum = max(target_scores.sum(), 1)
112 |
113 | # cls loss
114 | # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
115 | loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
116 |
117 | # bbox loss
118 | if fg_mask.sum():
119 | target_bboxes /= stride_tensor
120 | loss[0], loss[4] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores,
121 | target_scores_sum, fg_mask)
122 | keypoints = batch['keypoints'].to(self.device).float().clone()
123 | keypoints[..., 0] *= imgsz[1]
124 | keypoints[..., 1] *= imgsz[0]
125 | for i in range(batch_size):
126 | if fg_mask[i].sum():
127 | idx = target_gt_idx[i][fg_mask[i]]
128 | gt_kpt = keypoints[batch_idx.view(-1) == i][idx] # (n, 51)
129 | gt_kpt[..., 0] /= stride_tensor[fg_mask[i]]
130 | gt_kpt[..., 1] /= stride_tensor[fg_mask[i]]
131 | area = xyxy2xywh(target_bboxes[i][fg_mask[i]])[:, 2:].prod(1, keepdim=True)
132 | pred_kpt = pred_kpts[i][fg_mask[i]]
133 | kpt_mask = gt_kpt[..., 2] != 0
134 | loss[1] += self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss
135 | # kpt_score loss
136 | if pred_kpt.shape[-1] == 3:
137 | loss[2] += self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss
138 |
139 | loss[0] *= self.hyp.box # box gain
140 | loss[1] *= self.hyp.pose / batch_size # pose gain
141 | loss[2] *= self.hyp.kobj / batch_size # kobj gain
142 | loss[3] *= self.hyp.cls # cls gain
143 | loss[4] *= self.hyp.dfl # dfl gain
144 |
145 | return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
146 |
147 | def kpts_decode(self, anchor_points, pred_kpts):
148 | y = pred_kpts.clone()
149 | y[..., :2] *= 2.0
150 | y[..., 0] += anchor_points[:, [0]] - 0.5
151 | y[..., 1] += anchor_points[:, [1]] - 0.5
152 | return y
153 |
154 |
155 | def train(cfg=DEFAULT_CFG, use_python=False):
156 | model = cfg.model or 'yolov8n-pose.yaml'
157 | data = cfg.data or 'coco8-pose.yaml'
158 | device = cfg.device if cfg.device is not None else ''
159 |
160 | args = dict(model=model, data=data, device=device)
161 | if use_python:
162 | from ultralytics import YOLO
163 | YOLO(model).train(**args)
164 | else:
165 | trainer = PoseTrainer(overrides=args)
166 | trainer.train()
167 |
168 |
169 | if __name__ == '__main__':
170 | train()
171 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__init__.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | from .predict import SegmentationPredictor, predict
4 | from .train import SegmentationTrainer, train
5 | from .val import SegmentationValidator, val
6 |
7 | __all__ = 'SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val'
8 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/__init__.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/segment/__pycache__/__init__.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/predict.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/segment/__pycache__/predict.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/train.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/segment/__pycache__/train.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/__pycache__/val.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/ultralytics/yolo/v8/segment/__pycache__/val.cpython-39.pyc
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/predict.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 |
3 | import torch
4 |
5 | from ultralytics.yolo.engine.results import Results
6 | from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
7 | from ultralytics.yolo.v8.detect.predict import DetectionPredictor
8 |
9 |
10 | class SegmentationPredictor(DetectionPredictor):
11 |
12 | def postprocess(self, preds, img, orig_imgs):
13 | # TODO: filter by classes
14 | p = ops.non_max_suppression(preds[0],
15 | self.args.conf,
16 | self.args.iou,
17 | agnostic=self.args.agnostic_nms,
18 | max_det=self.args.max_det,
19 | nc=len(self.model.names),
20 | classes=self.args.classes)
21 | results = []
22 | proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
23 | for i, pred in enumerate(p):
24 | orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs
25 | path, _, _, _, _ = self.batch
26 | img_path = path[i] if isinstance(path, list) else path
27 | if not len(pred): # save empty boxes
28 | results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6]))
29 | continue
30 | if self.args.retina_masks:
31 | if not isinstance(orig_imgs, torch.Tensor):
32 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
33 | masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC
34 | else:
35 | masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
36 | if not isinstance(orig_imgs, torch.Tensor):
37 | pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
38 | results.append(
39 | Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks))
40 | return results
41 |
42 |
43 | def predict(cfg=DEFAULT_CFG, use_python=False):
44 | model = cfg.model or 'yolov8n-seg.pt'
45 | source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
46 | else 'https://ultralytics.com/images/bus.jpg'
47 |
48 | args = dict(model=model, source=source)
49 | if use_python:
50 | from ultralytics import YOLO
51 | YOLO(model)(**args)
52 | else:
53 | predictor = SegmentationPredictor(overrides=args)
54 | predictor.predict_cli()
55 |
56 |
57 | if __name__ == '__main__':
58 | predict()
59 |
--------------------------------------------------------------------------------
/ultralytics/yolo/v8/segment/train.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | from copy import copy
3 |
4 | import torch
5 | import torch.nn.functional as F
6 |
7 | from ultralytics.nn.tasks import SegmentationModel
8 | from ultralytics.yolo import v8
9 | from ultralytics.yolo.utils import DEFAULT_CFG, RANK
10 | from ultralytics.yolo.utils.ops import crop_mask, xyxy2xywh
11 | from ultralytics.yolo.utils.plotting import plot_images, plot_results
12 | from ultralytics.yolo.utils.tal import make_anchors
13 | from ultralytics.yolo.utils.torch_utils import de_parallel
14 | from ultralytics.yolo.v8.detect.train import Loss
15 |
16 |
17 | # BaseTrainer python usage
18 | class SegmentationTrainer(v8.detect.DetectionTrainer):
19 |
20 | def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
21 | if overrides is None:
22 | overrides = {}
23 | overrides['task'] = 'segment'
24 | super().__init__(cfg, overrides, _callbacks)
25 |
26 | def get_model(self, cfg=None, weights=None, verbose=True):
27 | model = SegmentationModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1)
28 | if weights:
29 | model.load(weights)
30 |
31 | return model
32 |
33 | def get_validator(self):
34 | self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss'
35 | return v8.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
36 |
37 | def criterion(self, preds, batch):
38 | if not hasattr(self, 'compute_loss'):
39 | self.compute_loss = SegLoss(de_parallel(self.model), overlap=self.args.overlap_mask)
40 | return self.compute_loss(preds, batch)
41 |
42 | def plot_training_samples(self, batch, ni):
43 | images = batch['img']
44 | masks = batch['masks']
45 | cls = batch['cls'].squeeze(-1)
46 | bboxes = batch['bboxes']
47 | paths = batch['im_file']
48 | batch_idx = batch['batch_idx']
49 | plot_images(images, batch_idx, cls, bboxes, masks, paths=paths, fname=self.save_dir / f'train_batch{ni}.jpg')
50 |
51 | def plot_metrics(self):
52 | plot_results(file=self.csv, segment=True) # save results.png
53 |
54 |
55 | # Criterion class for computing training losses
56 | class SegLoss(Loss):
57 |
58 | def __init__(self, model, overlap=True): # model must be de-paralleled
59 | super().__init__(model)
60 | self.nm = model.model[-1].nm # number of masks
61 | self.overlap = overlap
62 |
63 | def __call__(self, preds, batch):
64 | loss = torch.zeros(4, device=self.device) # box, cls, dfl
65 | feats, pred_masks, proto = preds if len(preds) == 3 else preds[1]
66 | batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
67 | pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
68 | (self.reg_max * 4, self.nc), 1)
69 |
70 | # b, grids, ..
71 | pred_scores = pred_scores.permute(0, 2, 1).contiguous()
72 | pred_distri = pred_distri.permute(0, 2, 1).contiguous()
73 | pred_masks = pred_masks.permute(0, 2, 1).contiguous()
74 |
75 | dtype = pred_scores.dtype
76 | imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
77 | anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
78 |
79 | # targets
80 | try:
81 | batch_idx = batch['batch_idx'].view(-1, 1)
82 | targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
83 | targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
84 | gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
85 | mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
86 | except RuntimeError as e:
87 | raise TypeError('ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n'
88 | "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, "
89 | "i.e. 'yolo train model=yolov8n-seg.pt data=coco128.yaml'.\nVerify your dataset is a "
90 | "correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' "
91 | 'as an example.\nSee https://docs.ultralytics.com/tasks/segment/ for help.') from e
92 |
93 | # pboxes
94 | pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
95 |
96 | _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner(
97 | pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
98 | anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt)
99 |
100 | target_scores_sum = max(target_scores.sum(), 1)
101 |
102 | # cls loss
103 | # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
104 | loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
105 |
106 | if fg_mask.sum():
107 | # bbox loss
108 | loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor,
109 | target_scores, target_scores_sum, fg_mask)
110 | # masks loss
111 | masks = batch['masks'].to(self.device).float()
112 | if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
113 | masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
114 |
115 | for i in range(batch_size):
116 | if fg_mask[i].sum():
117 | mask_idx = target_gt_idx[i][fg_mask[i]]
118 | if self.overlap:
119 | gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0)
120 | else:
121 | gt_mask = masks[batch_idx.view(-1) == i][mask_idx]
122 | xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]]
123 | marea = xyxy2xywh(xyxyn)[:, 2:].prod(1)
124 | mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)
125 | loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg
126 |
127 | # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
128 | else:
129 | loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss
130 |
131 | # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
132 | else:
133 | loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss
134 |
135 | loss[0] *= self.hyp.box # box gain
136 | loss[1] *= self.hyp.box / batch_size # seg gain
137 | loss[2] *= self.hyp.cls # cls gain
138 | loss[3] *= self.hyp.dfl # dfl gain
139 |
140 | return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
141 |
142 | def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
143 | # Mask loss for one image
144 | pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80)
145 | loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none')
146 | return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
147 |
148 |
149 | def train(cfg=DEFAULT_CFG, use_python=False):
150 | model = cfg.model or 'yolov8n-seg.pt'
151 | data = cfg.data or 'coco128-seg.yaml' # or yolo.ClassificationDataset("mnist")
152 | device = cfg.device if cfg.device is not None else ''
153 |
154 | args = dict(model=model, data=data, device=device)
155 | if use_python:
156 | from ultralytics import YOLO
157 | YOLO(model).train(**args)
158 | else:
159 | trainer = SegmentationTrainer(overrides=args)
160 | trainer.train()
161 |
162 |
163 | if __name__ == '__main__':
164 | train()
165 |
--------------------------------------------------------------------------------
/utils/__pycache__/capnums.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/utils/__pycache__/capnums.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/id_dialog.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/utils/__pycache__/id_dialog.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/id_win.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/utils/__pycache__/id_win.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/rtsp_dialog.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/utils/__pycache__/rtsp_dialog.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/__pycache__/rtsp_win.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/utils/__pycache__/rtsp_win.cpython-39.pyc
--------------------------------------------------------------------------------
/utils/capnums.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | import cv2
5 |
6 |
7 | class Camera:
8 | def __init__(self, cam_preset_num=5):
9 | self.cam_preset_num = cam_preset_num
10 |
11 | def get_cam_num(self):
12 | cnt = 0
13 | devices = []
14 | for device in range(0, self.cam_preset_num):
15 | stream = cv2.VideoCapture(device, cv2.CAP_DSHOW)
16 | grabbed = stream.grab()
17 | stream.release()
18 | if not grabbed:
19 | continue
20 | else:
21 | cnt = cnt + 1
22 | devices.append(device)
23 | return cnt, devices
24 |
25 |
26 | if __name__ == '__main__':
27 | cam = Camera()
28 | cam_num, devices = cam.get_cam_num()
29 | print(cam_num, devices)
30 |
--------------------------------------------------------------------------------
/utils/id_dialog.py:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | # @Author : CatfishW🚀
4 | # @Time : 2023/5/1
5 | from PySide6 import QtCore, QtGui, QtWidgets
6 |
7 |
8 | class id_form(object):
9 | def setupUi(self, Form):
10 | Form.setObjectName("Form")
11 | Form.resize(783, 40)
12 | Form.setMinimumSize(QtCore.QSize(0, 40))
13 | Form.setMaximumSize(QtCore.QSize(16777215, 41))
14 | icon = QtGui.QIcon()
15 | icon.addPixmap(QtGui.QPixmap(":/img/None.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
16 | Form.setWindowIcon(icon)
17 | Form.setStyleSheet("#Form{background:rgba(120,120,120,255)}")
18 | self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
19 | self.horizontalLayout.setContentsMargins(-1, 5, -1, 5)
20 | self.horizontalLayout.setObjectName("horizontalLayout")
21 | self.label = QtWidgets.QLabel(Form)
22 | self.label.setMinimumSize(QtCore.QSize(0, 30))
23 | self.label.setMaximumSize(QtCore.QSize(16777215, 30))
24 | self.label.setStyleSheet("QLabel{font-family: \"Microsoft YaHei\";\n"
25 | "font-size: 18px;\n"
26 | "font-weight: bold;\n"
27 | "color:white;}")
28 | self.label.setObjectName("label")
29 | self.horizontalLayout.addWidget(self.label)
30 | self.idEdit = QtWidgets.QLineEdit(Form)
31 | self.idEdit.setMinimumSize(QtCore.QSize(0, 31))
32 | self.idEdit.setStyleSheet("background-color: rgb(207, 207, 207);")
33 | self.idEdit.setObjectName("rtspEdit")
34 | self.horizontalLayout.addWidget(self.idEdit)
35 | self.idButton = QtWidgets.QPushButton(Form)
36 | self.idButton.setStyleSheet("QPushButton{font-family: \"Microsoft YaHei\";\n"
37 | "font-size: 18px;\n"
38 | "font-weight: bold;\n"
39 | "color:white;\n"
40 | "text-align: center center;\n"
41 | "padding-left: 5px;\n"
42 | "padding-right: 5px;\n"
43 | "padding-top: 4px;\n"
44 | "padding-bottom: 4px;\n"
45 | "border-style: solid;\n"
46 | "border-width: 0px;\n"
47 | "border-color: rgba(255, 255, 255, 255);\n"
48 | "border-radius: 3px;\n"
49 | "background-color: rgba(255,255,255,30);}\n"
50 | "\n"
51 | "QPushButton:focus{outline: none;}\n"
52 | "\n"
53 | "QPushButton::pressed{font-family: \"Microsoft YaHei\";\n"
54 | " font-size: 16px;\n"
55 | " font-weight: bold;\n"
56 | " color:rgb(200,200,200);\n"
57 | " text-align: center center;\n"
58 | " padding-left: 5px;\n"
59 | " padding-right: 5px;\n"
60 | " padding-top: 4px;\n"
61 | " padding-bottom: 4px;\n"
62 | " border-style: solid;\n"
63 | " border-width: 0px;\n"
64 | " border-color: rgba(255, 255, 255, 255);\n"
65 | " border-radius: 3px;\n"
66 | " background-color: rgba(255,255,255,150);}\n"
67 | "\n"
68 | "QPushButton::hover {\n"
69 | "border-style: solid;\n"
70 | "border-width: 0px;\n"
71 | "border-radius: 0px;\n"
72 | "background-color: rgba(255,255,255,50);}")
73 | self.idButton.setObjectName("rtspButton")
74 | self.horizontalLayout.addWidget(self.idButton)
75 |
76 | self.retranslateUi(Form)
77 | QtCore.QMetaObject.connectSlotsByName(Form)
78 |
79 | def retranslateUi(self, Form):
80 | _translate = QtCore.QCoreApplication.translate
81 | Form.setWindowTitle(_translate("Form", "ID"))
82 | self.label.setText(_translate("Form", "id:"))
83 | self.idButton.setText(_translate("Form", "确定"))
84 | # import apprcc_rc
85 |
--------------------------------------------------------------------------------
/utils/id_win.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | import sys
5 | from PySide6.QtWidgets import QApplication, QWidget
6 | from utils.id_dialog import id_form
7 |
8 |
9 | class id_Window(QWidget, id_form):
10 | def __init__(self):
11 | super(id_Window, self).__init__()
12 | self.setupUi(self)
13 |
14 |
15 | if __name__ == '__main__':
16 | app = QApplication(sys.argv)
17 | window = id_Window()
18 | window.show()
19 | sys.exit(app.exec())
20 |
--------------------------------------------------------------------------------
/utils/rtsp_dialog.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | from PySide6 import QtCore, QtGui, QtWidgets
5 |
6 |
7 | class Ui_Form(object):
8 | def setupUi(self, Form):
9 | Form.setObjectName("Form")
10 | Form.resize(783, 40)
11 | Form.setMinimumSize(QtCore.QSize(0, 40))
12 | Form.setMaximumSize(QtCore.QSize(16777215, 41))
13 | icon = QtGui.QIcon()
14 | icon.addPixmap(QtGui.QPixmap(":/img/None.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
15 | Form.setWindowIcon(icon)
16 | Form.setStyleSheet("#Form{background:rgba(120,120,120,255)}")
17 | self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
18 | self.horizontalLayout.setContentsMargins(-1, 5, -1, 5)
19 | self.horizontalLayout.setObjectName("horizontalLayout")
20 | self.label = QtWidgets.QLabel(Form)
21 | self.label.setMinimumSize(QtCore.QSize(0, 30))
22 | self.label.setMaximumSize(QtCore.QSize(16777215, 30))
23 | self.label.setStyleSheet("QLabel{font-family: \"Microsoft YaHei\";\n"
24 | "font-size: 18px;\n"
25 | "font-weight: bold;\n"
26 | "color:white;}")
27 | self.label.setObjectName("label")
28 | self.horizontalLayout.addWidget(self.label)
29 | self.rtspEdit = QtWidgets.QLineEdit(Form)
30 | self.rtspEdit.setMinimumSize(QtCore.QSize(0, 31))
31 | self.rtspEdit.setStyleSheet("background-color: rgb(207, 207, 207);")
32 | self.rtspEdit.setObjectName("rtspEdit")
33 | self.horizontalLayout.addWidget(self.rtspEdit)
34 | self.rtspButton = QtWidgets.QPushButton(Form)
35 | self.rtspButton.setStyleSheet("QPushButton{font-family: \"Microsoft YaHei\";\n"
36 | "font-size: 18px;\n"
37 | "font-weight: bold;\n"
38 | "color:white;\n"
39 | "text-align: center center;\n"
40 | "padding-left: 5px;\n"
41 | "padding-right: 5px;\n"
42 | "padding-top: 4px;\n"
43 | "padding-bottom: 4px;\n"
44 | "border-style: solid;\n"
45 | "border-width: 0px;\n"
46 | "border-color: rgba(255, 255, 255, 255);\n"
47 | "border-radius: 3px;\n"
48 | "background-color: rgba(255,255,255,30);}\n"
49 | "\n"
50 | "QPushButton:focus{outline: none;}\n"
51 | "\n"
52 | "QPushButton::pressed{font-family: \"Microsoft YaHei\";\n"
53 | " font-size: 16px;\n"
54 | " font-weight: bold;\n"
55 | " color:rgb(200,200,200);\n"
56 | " text-align: center center;\n"
57 | " padding-left: 5px;\n"
58 | " padding-right: 5px;\n"
59 | " padding-top: 4px;\n"
60 | " padding-bottom: 4px;\n"
61 | " border-style: solid;\n"
62 | " border-width: 0px;\n"
63 | " border-color: rgba(255, 255, 255, 255);\n"
64 | " border-radius: 3px;\n"
65 | " background-color: rgba(255,255,255,150);}\n"
66 | "\n"
67 | "QPushButton::hover {\n"
68 | "border-style: solid;\n"
69 | "border-width: 0px;\n"
70 | "border-radius: 0px;\n"
71 | "background-color: rgba(255,255,255,50);}")
72 | self.rtspButton.setObjectName("rtspButton")
73 | self.horizontalLayout.addWidget(self.rtspButton)
74 |
75 | self.retranslateUi(Form)
76 | QtCore.QMetaObject.connectSlotsByName(Form)
77 |
78 | def retranslateUi(self, Form):
79 | _translate = QtCore.QCoreApplication.translate
80 | Form.setWindowTitle(_translate("Form", "RTSP"))
81 | self.label.setText(_translate("Form", "rtsp地址:"))
82 | self.rtspButton.setText(_translate("Form", "确定"))
83 | # import apprcc_rc
84 |
--------------------------------------------------------------------------------
/utils/rtsp_win.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | import sys
5 | from PySide6.QtWidgets import QApplication, QWidget
6 | from utils.rtsp_dialog import Ui_Form
7 |
8 |
9 | class Window(QWidget, Ui_Form):
10 | def __init__(self):
11 | super(Window, self).__init__()
12 | self.setupUi(self)
13 |
14 |
15 | if __name__ == '__main__':
16 | app = QApplication(sys.argv)
17 | window = Window()
18 | window.show()
19 | sys.exit(app.exec())
20 |
--------------------------------------------------------------------------------
/utils/video_transform.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # @Author : CatfishW🚀
3 | # @Time : 2023/5/1
4 | #把视频转换为裁剪好的avi格式
5 | import cv2
6 | video = 'tests/test.mp4'#视频地址
7 | cap = cv2.VideoCapture(video)
8 | video_writer = cv2.VideoWriter('test_4_1.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (640, 480))#转换格式
9 | while cap.isOpened():
10 | ret, frame = cap.read()
11 | if ret:
12 | frame = cv2.resize(frame, (640, 480))
13 | video_writer.write(frame)
14 | if cv2.waitKey(1) & 0xFF == ord('q'):
15 | break
16 | else:
17 | video_writer.release()
18 | break
--------------------------------------------------------------------------------
/weights/轻量级模型.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CatfishW/MOT/eb905fad73488077b470de84d7dcf6089d808ce3/weights/轻量级模型.pt
--------------------------------------------------------------------------------