├── CMakeLists.txt
├── LICENSE
├── README.md
├── images
├── DOTA_0032.png
├── bus.jpg
├── bus_out.bmp
└── zidane.jpg
├── main.cpp
├── models
└── put_model_here
├── rtdetr_onnx.cpp
├── rtdetr_onnx.h
├── yolov8.cpp
├── yolov8.h
├── yolov8_obb.cpp
├── yolov8_obb.h
├── yolov8_obb_onnx.cpp
├── yolov8_obb_onnx.h
├── yolov8_onnx.cpp
├── yolov8_onnx.h
├── yolov8_pose.cpp
├── yolov8_pose.h
├── yolov8_pose_onnx.cpp
├── yolov8_pose_onnx.h
├── yolov8_seg.cpp
├── yolov8_seg.h
├── yolov8_seg_onnx.cpp
├── yolov8_seg_onnx.h
├── yolov8_utils.cpp
└── yolov8_utils.h
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | CMAKE_MINIMUM_REQUIRED(VERSION 3.0.0)
2 | project(YOLOv8)
3 |
4 |
5 | SET (OpenCV_DIR path/to/opencv/build) #opencv root
6 | SET (ONNXRUNTIME_DIR path/to/onnxruntime)
7 |
8 | FIND_PACKAGE(OpenCV REQUIRED)
9 | #include_directories("")
10 | ADD_EXECUTABLE(YOLOv8 yolov8.h yolov8_onnx.h yolov8_seg.h yolov8_seg_onnx.h yolov8_utils.h
11 | main.cpp yolov8.cpp yolov8_onnx.cpp yolov8_seg.cpp yolov8_seg_onnx.cpp yolov8_utils.cpp)
12 |
13 | SET(CMAKE_CXX_STANDARD 14)
14 | SET(CMAKE_CXX_STANDARD_REQUIRED ON)
15 |
16 | TARGET_INCLUDE_DIRECTORIES(YOLOv8 PRIVATE "${ONNXRUNTIME_DIR}/include")
17 |
18 | TARGET_COMPILE_FEATURES(YOLOv8 PRIVATE cxx_std_14)
19 | TARGET_LINK_LIBRARIES(YOLOv8 ${OpenCV_LIBS})
20 |
21 | if (WIN32)
22 | TARGET_LINK_LIBRARIES(YOLOv8 "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib")
23 | endif(WIN32)
24 |
25 | if (UNIX)
26 | TARGET_LINK_LIBRARIES(YOLOv8 "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so")
27 | endif(UNIX)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # yolov8-opencv-onnxruntime-cpp
2 | ## 使用OpenCV-dnn和ONNXRuntime部署yolov8目标检测和实例分割模型
3 | 基于yolov8:https://github.com/ultralytics/ultralytics
4 |
5 | ## requirements for opencv-dnn
6 | 1. > OpenCV>=4.7.0
7 | OpenCV>=4.7.0
8 | OpenCV>=4.7.0
9 |
10 | 2. export for opencv-dnn:
11 | ```bash
12 | #Note: When exporting to opencv, it is best to set opset to 12
13 |
14 | yolo export model=path/to/model.pt format=onnx dynamic=False opset=12
15 | ```
16 |
17 | 3. export RT-DETR:
18 | ```bash
19 | #Note: rtdetr need opset>=16,dynamic=False/True
20 |
21 | yolo export model=path/to/rtdetr-l.pt format=onnx opset=16
22 |
23 | ```
24 |
25 | ```python
26 | from ultralytics import YOLO
27 | model = YOLO('./pre_model/yolov8-rtdetr-l.pt')
28 | results = model.export(format='onnx',opset=16)
29 | ```
30 |
31 |
32 | ## requirements for onnxruntime (only yolo*_onnx.h/cpp)
33 | >opencv>=4.5.0
34 | ONNXRuntime>=1.9.0
35 |
36 | ## 更新说明:
37 | #### 2024.04.15更新
38 | + 新增yolov8-pose模型部署(https://github.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/issues/52)
39 | + 修复命名空间使用问题。
40 |
41 | #### 2024.01.22更新
42 | + 新增yolov8-obb模型部署(https://github.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/issues/40)
43 | + 修改一些便利性使用的问题。
44 | #### 2023.12.05更新
45 | + 新增yolov8-RTDETR部署。
46 | + 优化部分代码,例如输出shape之类从输出中获取,而非像之前需要设置正确参数。
47 |
48 | #### 2023.11.09更新
49 | + 修复此pr中提到的一些问题[https://github.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/pull/30],此bug会导致mask与box大小可能会差几个像素从而导致出现一些问题(如果用的时候没有注意的话),本次更新之后会将其缩放到一致大小。
50 | + 新增视频流推理的demo,这是由于发现很多初学者调用视频的时候总是每一张图片都去读取一次模型,所以本次更新一起加上去。
51 |
52 | #### 2023.09.20更新
53 | + 0.新增模型路径检查,部分issue查了半天,发现模型路径不对。
54 | + 1.计算mask部分bug修复,此前如果输入大小非640的话,需要同时设置头文件和结构体才能完成检测,但是大部分人只修改了一个地方,目前优化这部分内容,只需要修头文件中的定义即可。另外将segHeight和segWidth设置为从网络输出中读取,这样如果mask-ratio不是4倍的话,可以不需要修改这两个参数值。
55 | + 2.修复```GetMask2()```中可能导致越界的问题。
56 |
57 |
58 | #### 2023.02.17更新
59 | + 0.新增加onnxruntime旧版本API接口支持
60 | + 1.opencv不支持动态推理,请将dymanic设置为False导出onnx,同时opset需要设置为12。
61 | + 2.关于换行符,windows下面需要设置为CRLF,上传到github会自动切换成LF,windows下面切换一下即可
62 |
63 | #### 2023.02.07 更新:
64 | + yolov8使用opencv-dnn推理的话,目前只支持opencv4.7.0及其以上的版本,我暂时也没找到怎么修改适应opencv4.5.0的版本( ̄へ ̄),这个版本需求和onnxruntime无关,onnxruntime只需要4.5.0的版本,4.x的版本应该都可以用,只要能正确读取,有```cv::dnn::blobFromImages()```这个函数即可,如果真的没有这个函数,你自己将其源码抠出来用也是可以的,或者大佬们自己实现该函数功能。
65 | + 而目前opencv4.7.0的版本有问题(https://github.com/opencv/opencv/issues/23080) ,如果你的CPU不支持```AVX2```指令集,则需要在```net.forward()``` 前面加上```net.enableWinograd(false);```来关闭Winograd加速,如果支持这个指令集的话可以开启加速(蚊子腿)。
66 |
67 | 依照惯例贴一张yolov8-seg.onnx在640x640下用onnxruntime运行结果图:
68 | 
69 |
--------------------------------------------------------------------------------
/images/DOTA_0032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/images/DOTA_0032.png
--------------------------------------------------------------------------------
/images/bus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/images/bus.jpg
--------------------------------------------------------------------------------
/images/bus_out.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/images/bus_out.bmp
--------------------------------------------------------------------------------
/images/zidane.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/images/zidane.jpg
--------------------------------------------------------------------------------
/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 | #include "yolov8.h"
6 | #include "yolov8_onnx.h"
7 | #include "yolov8_seg.h"
8 | #include "rtdetr_onnx.h"
9 | #include "yolov8_seg_onnx.h"
10 | #include "yolov8_obb.h"
11 | #include "yolov8_obb_onnx.h"
12 | #include "yolov8_pose.h"
13 | #include "yolov8_pose_onnx.h"
14 | #include
15 | //#define VIDEO_OPENCV //if define, use opencv for video.
16 |
17 | using namespace std;
18 | using namespace cv;
19 | using namespace dnn;
20 |
21 | template
22 | int yolov8(_Tp& task, cv::Mat& img, std::string& model_path)
23 | {
24 |
25 |
26 | cv::dnn::Net net;
27 | if (task.ReadModel(net, model_path, false)) {
28 | std::cout << "read net ok!" << std::endl;
29 | }
30 | else {
31 | return -1;
32 | }
33 | //生成随机颜色
34 | std::vector color;
35 | srand(time(0));
36 | for (int i = 0; i < 80; i++) {
37 | int b = rand() % 256;
38 | int g = rand() % 256;
39 | int r = rand() % 256;
40 | color.push_back(cv::Scalar(b, g, r));
41 | }
42 | std::vector result;
43 |
44 | bool isPose = false;
45 | if (typeid(task) == typeid(Yolov8Pose)) {
46 | isPose = true;
47 | }
48 | PoseParams poseParams;
49 | if (task.Detect(img, net, result)) {
50 |
51 | if (isPose)
52 | DrawPredPose(img, result, poseParams);
53 | else
54 | DrawPred(img, result, task._className, color);
55 |
56 | }
57 | else {
58 | std::cout << "Detect Failed!" << std::endl;
59 | }
60 | system("pause");
61 | return 0;
62 | }
63 |
64 | template
65 | int yolov8_onnx(_Tp& task, cv::Mat& img, std::string& model_path)
66 | {
67 |
68 | if (task.ReadModel(model_path, false)) {
69 | std::cout << "read net ok!" << std::endl;
70 | }
71 | else {
72 | return -1;
73 | }
74 | //生成随机颜色
75 | std::vector color;
76 | srand(time(0));
77 | for (int i = 0; i < 80; i++) {
78 | int b = rand() % 256;
79 | int g = rand() % 256;
80 | int r = rand() % 256;
81 | color.push_back(cv::Scalar(b, g, r));
82 | }
83 | bool isPose = false;
84 | if (typeid(task) == typeid(Yolov8PoseOnnx)) {
85 | isPose = true;
86 | }
87 | PoseParams poseParams;
88 |
89 | std::vector result;
90 | if (task.OnnxDetect(img, result)) {
91 | if (isPose)
92 | DrawPredPose(img, result, poseParams);
93 | else
94 | DrawPred(img, result, task._className, color);
95 | }
96 | else {
97 | std::cout << "Detect Failed!" << std::endl;
98 | }
99 | system("pause");
100 | return 0;
101 | }
102 |
103 |
104 | template
105 | int video_demo(_Tp& task, std::string& model_path)
106 | {
107 | std::vector color;
108 | srand(time(0));
109 | for (int i = 0; i < 80; i++) {
110 | int b = rand() % 256;
111 | int g = rand() % 256;
112 | int r = rand() % 256;
113 | color.push_back(cv::Scalar(b, g, r));
114 | }
115 | std::vector result;
116 | cv::VideoCapture cap(0);
117 | if (!cap.isOpened())
118 | {
119 | std::cout << "open capture failured!" << std::endl;
120 | return -1;
121 | }
122 | cv::Mat frame;
123 | bool isPose = false;
124 | PoseParams poseParams;
125 | #ifdef VIDEO_OPENCV
126 | cv::dnn::Net net;
127 | if (typeid(task) == typeid(Yolov8Pose)) {
128 | isPose = true;
129 | }
130 | if (task.ReadModel(net, model_path, true)) {
131 | std::cout << "read net ok!" << std::endl;
132 | }
133 | else {
134 | std::cout << "read net failured!" << std::endl;
135 | return -1;
136 | }
137 |
138 | #else
139 | if (typeid(task) == typeid(Yolov8PoseOnnx)) {
140 | isPose = true;
141 | }
142 | if (task.ReadModel(model_path, true)) {
143 | std::cout << "read net ok!" << std::endl;
144 | }
145 | else {
146 | std::cout << "read net failured!" << std::endl;
147 | return -1;
148 | }
149 |
150 | #endif
151 |
152 | while (true)
153 | {
154 |
155 | cap.read(frame);
156 | if (frame.empty())
157 | {
158 | std::cout << "read to end" << std::endl;
159 | break;
160 | }
161 | result.clear();
162 | #ifdef VIDEO_OPENCV
163 |
164 | if (task.Detect(frame, net, result)) {
165 |
166 | if (isPose)
167 | DrawPredPose(frame, result, poseParams,true);
168 | else
169 | DrawPred(frame, result, task._className, color,true);
170 |
171 | }
172 | #else
173 | if (task.OnnxDetect(frame, result)) {
174 | if (isPose)
175 | DrawPredPose(frame, result, poseParams, true);
176 | else
177 | DrawPred(frame, result, task._className, color, true);
178 | }
179 | #endif
180 | int k = waitKey(10);
181 | if (k == 27) { //esc
182 | break;
183 | }
184 |
185 | }
186 | cap.release();
187 |
188 | system("pause");
189 |
190 | return 0;
191 | }
192 |
193 |
194 | int main() {
195 |
196 | std::string img_path = "./images/bus.jpg";
197 |
198 | std::string model_path_detect = "./models/yolov8s-pose1.onnx";
199 | std::string model_path_rtdetr = "./models/rtdetr-l.onnx"; //yolov8-redetr
200 | std::string model_path_obb = "./models/yolov8s-obb.onnx";
201 | std::string model_path_seg = "./models/yolov8s-seg.onnx";
202 | std::string model_path_pose = "./models/yolov8s-pose.onnx";
203 |
204 | cv::Mat src = imread(img_path);
205 | cv::Mat img = src.clone();
206 |
207 | Yolov8 task_detect_ocv;
208 | Yolov8PoseOnnx task_detect_ort;
209 |
210 | Yolov8Seg task_segment_ocv;
211 | Yolov8SegOnnx task_segment_ort;
212 |
213 | Yolov8Obb task_obb_ocv;
214 | Yolov8ObbOnnx task_obb_ort;
215 |
216 | Yolov8Pose task_pose_ocv;
217 | Yolov8PoseOnnx task_pose_ort;
218 |
219 | RTDETROnnx task_rtdetr_ort;
220 |
221 | //yolov8(task_detect_ocv,img,model_path_detect); //yolov8 opencv detect
222 | //img = src.clone();
223 | //yolov8_onnx(task_detect_ort,img,model_path_detect); //yoolov8 onnxruntime detect
224 | //
225 | //img = src.clone();
226 | //yolov8_onnx(task_rtdetr_ort, img, model_path_rtdetr); //yolov8-rtdetr onnxruntime detect
227 |
228 | //img = src.clone();
229 | //yolov8(task_segment_ocv,img,model_path_seg); //yolov8 opencv segment
230 | //img = src.clone();
231 | //yolov8_onnx(task_segment_ort,img,model_path_seg); //yolov8 onnxruntime segment
232 |
233 |
234 | //img = src.clone();
235 | //yolov8(task_obb_ocv, img, model_path_obb); //yolov8 opencv obb
236 | //img = src.clone();
237 | //yolov8_onnx(task_obb_ort, img, model_path_obb); //yolov8 onnxruntime obb
238 |
239 | //img = src.clone();
240 | //yolov8(task_pose_ocv, img, model_path_pose); //yolov8 opencv pose
241 | img = src.clone();
242 | yolov8_onnx(task_pose_ort, img, model_path_pose); //yolov8 onnxruntime pose
243 |
244 | #ifdef VIDEO_OPENCV
245 | video_demo(task_detect_ocv, model_path_detect);
246 | //video_demo(task_segment_ocv, model_path_seg);
247 | //video_demo(task_pose_ocv, model_path_pose);
248 | #else
249 | //video_demo(task_detect_ort, model_path_detect);
250 | //video_demo(task_rtdetr_ort, model_path_rtdetr);
251 | //video_demo(task_segment_ort, model_path_seg);
252 | //video_demo(task_pose_ort, model_path_pose);
253 | #endif
254 | return 0;
255 | }
256 |
257 |
258 |
--------------------------------------------------------------------------------
/models/put_model_here:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/models/put_model_here
--------------------------------------------------------------------------------
/rtdetr_onnx.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/rtdetr_onnx.cpp
--------------------------------------------------------------------------------
/rtdetr_onnx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/rtdetr_onnx.h
--------------------------------------------------------------------------------
/yolov8.cpp:
--------------------------------------------------------------------------------
1 | #include"yolov8.h"
2 |
3 | //using namespace std;
4 | //using namespace cv;
5 | //using namespace cv::dnn;
6 |
7 | bool Yolov8::ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda = false) {
8 | try {
9 | if (!CheckModelPath(netPath))
10 | return false;
11 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR<7
12 | std::cout << "Yolov8 Need OpenCV Version >=4.7.0" << std::endl;
13 | return false;
14 | #endif
15 | net = cv::dnn::readNetFromONNX(netPath);
16 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR==7&&CV_VERSION_REVISION==0
17 | net.enableWinograd(false); //bug of opencv4.7.x in AVX only platform ,https://github.com/opencv/opencv/pull/23112 and https://github.com/opencv/opencv/issues/23080
18 | //net.enableWinograd(true); //If your CPU supports AVX2, you can set it true to speed up
19 | #endif
20 | }
21 | catch (const std::exception&) {
22 | return false;
23 | }
24 |
25 | if (isCuda) {
26 | //cuda
27 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
28 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); //or DNN_TARGET_CUDA_FP16
29 | }
30 | else {
31 | //cpu
32 | std::cout << "Inference device: CPU" << std::endl;
33 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
34 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
35 | }
36 | return true;
37 | }
38 |
39 |
40 | bool Yolov8::Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output) {
41 | cv::Mat blob;
42 | output.clear();
43 | int col = srcImg.cols;
44 | int row = srcImg.rows;
45 | cv::Mat netInputImg;
46 | cv::Vec4d params;
47 | LetterBox(srcImg, netInputImg, params, cv::Size(_netWidth, _netHeight));
48 | cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(0, 0, 0), true, false);
49 | //**************************************************************************************************************************************************/
50 | //如果在其他设置没有问题的情况下但是结果偏差很大,可以尝试下用下面两句语句
51 | // If there is no problem with other settings, but results are a lot different from Python-onnx , you can try to use the following two sentences
52 | //
53 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(104, 117, 123), true, false);
54 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(114, 114,114), true, false);
55 | //****************************************************************************************************************************************************/
56 | net.setInput(blob);
57 | std::vector net_output_img;
58 |
59 | net.forward(net_output_img, net.getUnconnectedOutLayersNames()); //get outputs
60 | std::vector class_ids;// res-class_id
61 | std::vector confidences;// res-conf
62 | std::vector boxes;// res-box
63 | cv::Mat output0=cv::Mat( cv::Size(net_output_img[0].size[2], net_output_img[0].size[1]), CV_32F, (float*)net_output_img[0].data).t(); //[bs,116,8400]=>[bs,8400,116]
64 | int net_width = output0.cols;
65 | int rows = output0.rows;
66 | int socre_array_length = net_width - 4;
67 | float* pdata = (float*)output0.data;
68 | for (int r = 0; r < rows; ++r) {
69 | cv::Mat scores(1, socre_array_length, CV_32FC1, pdata + 4);
70 | cv::Point classIdPoint;
71 | double max_class_socre;
72 | minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
73 | max_class_socre = (float)max_class_socre;
74 | if (max_class_socre >= _classThreshold) {
75 | //rect [x,y,w,h]
76 | float x = (pdata[0] - params[2]) / params[0];
77 | float y = (pdata[1] - params[3]) / params[1];
78 | float w = pdata[2] / params[0];
79 | float h = pdata[3] / params[1];
80 | int left = MAX(int(x - 0.5 * w + 0.5), 0);
81 | int top = MAX(int(y - 0.5 * h + 0.5), 0);
82 | class_ids.push_back(classIdPoint.x);
83 | confidences.push_back(max_class_socre);
84 | boxes.push_back(cv::Rect(left, top, int(w + 0.5), int(h + 0.5)));
85 | }
86 | pdata += net_width;//next line
87 | }
88 | //NMS
89 | std::vector nms_result;
90 | cv::dnn::NMSBoxes(boxes, confidences, _classThreshold, _nmsThreshold, nms_result);
91 | std::vector> temp_mask_proposals;
92 | cv::Rect holeImgRect(0, 0, srcImg.cols, srcImg.rows);
93 | for (int i = 0; i < nms_result.size(); ++i) {
94 | int idx = nms_result[i];
95 | OutputParams result;
96 | result.id = class_ids[idx];
97 | result.confidence = confidences[idx];
98 | result.box = boxes[idx] & holeImgRect;
99 | if (result.box.area() < 1)
100 | continue;
101 | output.push_back(result);
102 | }
103 | if (output.size())
104 | return true;
105 | else
106 | return false;
107 | }
108 |
109 |
--------------------------------------------------------------------------------
/yolov8.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "yolov8_utils.h"
5 |
6 | class Yolov8 {
7 | public:
8 | Yolov8() {
9 | }
10 | ~Yolov8() {}
11 |
12 | bool ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
13 | bool Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output);
14 |
15 | int _netWidth = 640; //ONNX图片输入宽度
16 | int _netHeight = 640; //ONNX图片输入高度
17 |
18 |
19 | //类别名,自己的模型需要修改此项
20 | std::vector _className = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
21 | "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
22 | "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
23 | "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
24 | "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
25 | "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
26 | "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
27 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
28 | "hair drier", "toothbrush" };
29 | private:
30 | float _classThreshold = 0.25;
31 | float _nmsThreshold = 0.45;
32 | };
33 |
--------------------------------------------------------------------------------
/yolov8_obb.cpp:
--------------------------------------------------------------------------------
1 | #include"yolov8_obb.h"
2 |
3 | //using namespace std;
4 | //using namespace cv;
5 | //using namespace cv::dnn;
6 |
7 | bool Yolov8Obb::ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda = false) {
8 | try {
9 | if (!CheckModelPath(netPath))
10 | return false;
11 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR<7
12 | std::cout << "OBB Need OpenCV Version >=4.7.0" << std::endl;
13 | return false;
14 | #endif
15 | net = cv::dnn::readNetFromONNX(netPath);
16 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR==7&&CV_VERSION_REVISION==0
17 | net.enableWinograd(false); //bug of opencv4.7.x in AVX only platform ,https://github.com/opencv/opencv/pull/23112 and https://github.com/opencv/opencv/issues/23080
18 | //net.enableWinograd(true); //If your CPU supports AVX2, you can set it true to speed up
19 | #endif
20 | }
21 | catch (const std::exception&) {
22 | return false;
23 | }
24 |
25 | if (isCuda) {
26 | //cuda
27 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
28 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); //or DNN_TARGET_CUDA_FP16
29 | }
30 | else {
31 | //cpu
32 | std::cout << "Inference device: CPU" << std::endl;
33 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
34 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
35 | }
36 | return true;
37 | }
38 |
39 |
40 | bool Yolov8Obb::Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output) {
41 | cv::Mat blob;
42 | output.clear();
43 | int col = srcImg.cols;
44 | int row = srcImg.rows;
45 | cv::Mat netInputImg;
46 | cv::Vec4d params;
47 | LetterBox(srcImg, netInputImg, params, cv::Size(_netWidth, _netHeight));
48 | cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(0, 0, 0), true, false);
49 | //**************************************************************************************************************************************************/
50 | //如果在其他设置没有问题的情况下但是结果偏差很大,可以尝试下用下面两句语句
51 | // If there is no problem with other settings, but results are a lot different from Python-onnx , you can try to use the following two sentences
52 | //
53 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(104, 117, 123), true, false);
54 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(114, 114,114), true, false);
55 | //****************************************************************************************************************************************************/
56 | net.setInput(blob);
57 | std::vector net_output_img;
58 |
59 | net.forward(net_output_img, net.getUnconnectedOutLayersNames()); //get outputs
60 | std::vector class_ids;// res-class_id
61 | std::vector confidences;// res-conf
62 | std::vector boxes;// res-box
63 | cv::Mat output0 = cv::Mat(cv::Size(net_output_img[0].size[2], net_output_img[0].size[1]), CV_32F, (float*)net_output_img[0].data).t(); //[bs,20,21504]=>[bs,21504,20]
64 | int net_width = output0.cols;
65 | int rows = output0.rows;
66 | int class_score_length = net_width - 5;
67 | int angle_index = net_width - 1;
68 | float* pdata = (float*)output0.data;
69 | for (int r = 0; r < rows; ++r) {
70 | cv::Mat scores(1, class_score_length, CV_32FC1, pdata + 4);
71 | cv::Point classIdPoint;
72 | double max_class_socre;
73 | minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
74 | max_class_socre = (float)max_class_socre;
75 | if (max_class_socre >= _classThreshold) {
76 | //rect [x,y,w,h]
77 | float x = (pdata[0] - params[2]) / params[0];
78 | float y = (pdata[1] - params[3]) / params[1];
79 | float w = pdata[2] / params[0];
80 | float h = pdata[3] / params[1];
81 | float angle = pdata[angle_index] / CV_PI *180.0;
82 | class_ids.push_back(classIdPoint.x);
83 | confidences.push_back(max_class_socre);
84 | //cv::RotatedRect temp_rotated;
85 | //BBox2Obb(x, y, w, h, angle, temp_rotated);
86 | //boxes.push_back(temp_rotated);
87 | boxes.push_back(cv::RotatedRect(cv::Point2f(x, y), cv::Size(w, h), angle));
88 | }
89 | pdata += net_width;//next line
90 | }
91 | //NMS
92 | std::vector nms_result;
93 | cv::dnn::NMSBoxes(boxes, confidences, _classThreshold, _nmsThreshold, nms_result);
94 | std::vector> temp_mask_proposals;
95 | //cv::Rect holeImgRect(0, 0, srcImg.cols, srcImg.rows);
96 | for (int i = 0; i < nms_result.size(); ++i) {
97 | int idx = nms_result[i];
98 | OutputParams result;
99 | result.id = class_ids[idx];
100 | result.confidence = confidences[idx];
101 | result.rotatedBox = boxes[idx];
102 | if (result.rotatedBox.size.width<1|| result.rotatedBox.size.height<1)
103 | continue;
104 | output.push_back(result);
105 | }
106 | if (output.size())
107 | return true;
108 | else
109 | return false;
110 | }
111 |
112 |
--------------------------------------------------------------------------------
/yolov8_obb.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "yolov8_utils.h"
5 |
6 | class Yolov8Obb {
7 | public:
8 | Yolov8Obb() {
9 | }
10 | ~Yolov8Obb() {}
11 |
12 | bool ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
13 | bool Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output);
14 |
15 | int _netWidth = 1024; //ONNX图片输入宽度
16 | int _netHeight = 1024; //ONNX图片输入高度
17 |
18 |
19 | //类别名,自己的模型需要修改此项
20 | std::vector _className =
21 | { "plane", "ship", "storage tank",
22 | "baseball diamond", "tennis court", "basketball court",
23 | "ground track field", "harbor", "bridge",
24 | "large vehicle", "small vehicle", "helicopter",
25 | "roundabout", "soccer ball field", "swimming pool"
26 | };
27 | private:
28 | float _classThreshold = 0.25;
29 | float _nmsThreshold = 0.45;
30 | };
31 |
--------------------------------------------------------------------------------
/yolov8_obb_onnx.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_obb_onnx.cpp
--------------------------------------------------------------------------------
/yolov8_obb_onnx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_obb_onnx.h
--------------------------------------------------------------------------------
/yolov8_onnx.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_onnx.cpp
--------------------------------------------------------------------------------
/yolov8_onnx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_onnx.h
--------------------------------------------------------------------------------
/yolov8_pose.cpp:
--------------------------------------------------------------------------------
1 | #include"yolov8_pose.h"
2 |
3 | //using namespace std;
4 | //using namespace cv;
5 | //using namespace cv::dnn;
6 |
7 | bool Yolov8Pose::ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda = false) {
8 | try {
9 | if (!CheckModelPath(netPath))
10 | return false;
11 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR<7
12 | std::cout << "OBB Need OpenCV Version >=4.7.0" << std::endl;
13 | return false;
14 | #endif
15 | net = cv::dnn::readNetFromONNX(netPath);
16 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR==7&&CV_VERSION_REVISION==0
17 | net.enableWinograd(false); //bug of opencv4.7.x in AVX only platform ,https://github.com/opencv/opencv/pull/23112 and https://github.com/opencv/opencv/issues/23080
18 | //net.enableWinograd(true); //If your CPU supports AVX2, you can set it true to speed up
19 | #endif
20 | }
21 | catch (const std::exception&) {
22 | return false;
23 | }
24 |
25 | if (isCuda) {
26 | //cuda
27 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
28 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); //or DNN_TARGET_CUDA_FP16
29 | }
30 | else {
31 | //cpu
32 | std::cout << "Inference device: CPU" << std::endl;
33 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
34 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
35 | }
36 | return true;
37 | }
38 |
39 |
40 | bool Yolov8Pose::Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output) {
41 | cv::Mat blob;
42 | output.clear();
43 | int col = srcImg.cols;
44 | int row = srcImg.rows;
45 | cv::Mat netInputImg;
46 | cv::Vec4d params;
47 | LetterBox(srcImg, netInputImg, params, cv::Size(_netWidth, _netHeight));
48 | cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(0, 0, 0), true, false);
49 | //**************************************************************************************************************************************************/
50 | //如果在其他设置没有问题的情况下但是结果偏差很大,可以尝试下用下面两句语句
51 | // If there is no problem with other settings, but results are a lot different from Python-onnx , you can try to use the following two sentences
52 | //
53 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(104, 117, 123), true, false);
54 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(114, 114,114), true, false);
55 | //****************************************************************************************************************************************************/
56 | net.setInput(blob);
57 | std::vector net_output_img;
58 | net.forward(net_output_img, net.getUnconnectedOutLayersNames()); //get outputs
59 | std::vector class_ids;// res-class_id
60 | std::vector confidences;// res-conf
61 | std::vector boxes;// res-box
62 | std::vector> pose_key_points; //保存kpt
63 | cv::Mat output0 = cv::Mat(cv::Size(net_output_img[0].size[2], net_output_img[0].size[1]), CV_32F, (float*)net_output_img[0].data).t(); //[bs,20,21504]=>[bs,21504,20]
64 | int net_width = output0.cols;
65 | int rows = output0.rows;
66 | int key_point_length = net_width - 5;
67 | int key_point_num = 17; //_bodyKeyPoints.size(), shape[x, y, confidence]
68 | if (key_point_num * 3 != key_point_length) {
69 | std::cout << "Pose should be shape [x, y, confidence] with 17-points" << std::endl;
70 | return false;
71 | }
72 |
73 | float* pdata = (float*)output0.data;
74 | for (int r = 0; r < rows; ++r) {
75 | float max_class_socre=pdata[4];
76 | if (max_class_socre >= _classThreshold) {
77 | //rect [x,y,w,h]
78 | float x = (pdata[0] - params[2]) / params[0];
79 | float y = (pdata[1] - params[3]) / params[1];
80 | float w = pdata[2] / params[0];
81 | float h = pdata[3] / params[1];
82 | class_ids.push_back(0);
83 | confidences.push_back(max_class_socre);
84 | int left = MAX(int(x - 0.5 * w + 0.5), 0);
85 | int top = MAX(int(y - 0.5 * h + 0.5), 0);
86 | boxes.push_back(cv::Rect(left, top, int(w + 0.5), int(h + 0.5)));
87 | std::vector temp_kpts;
88 | for (int kpt = 0; kpt < key_point_length; kpt += 3) {
89 | PoseKeyPoint temp_kp;
90 | temp_kp.x = (pdata[5 + kpt] - params[2]) / params[0];
91 | temp_kp.y = (pdata[6 + kpt] - params[3]) / params[1];
92 | temp_kp.confidence = pdata[7 + kpt];
93 | temp_kpts.push_back(temp_kp);
94 | }
95 | pose_key_points.push_back(temp_kpts);
96 |
97 | }
98 | pdata += net_width;//next line
99 | }
100 | //NMS
101 | std::vector nms_result;
102 | cv::dnn::NMSBoxes(boxes, confidences, _classThreshold, _nmsThreshold, nms_result);
103 | std::vector> temp_mask_proposals;
104 | cv::Rect holeImgRect(0, 0, srcImg.cols, srcImg.rows);
105 | for (int i = 0; i < nms_result.size(); ++i) {
106 | int idx = nms_result[i];
107 | OutputParams result;
108 | result.id = class_ids[idx];
109 | result.confidence = confidences[idx];
110 | result.box = boxes[idx]& holeImgRect;
111 | result.keyPoints = pose_key_points[idx];
112 | if (result.box.area() < 1)
113 | continue;
114 | output.push_back(result);
115 | }
116 | if (output.size())
117 | return true;
118 | else
119 | return false;
120 | }
121 |
122 |
--------------------------------------------------------------------------------
/yolov8_pose.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "yolov8_utils.h"
5 |
6 | class Yolov8Pose {
7 | public:
8 | Yolov8Pose() {}
9 | ~Yolov8Pose() {}
10 |
11 | bool ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
12 | bool Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output);
13 |
14 | int _netWidth = 640; //ONNX图片输入宽度
15 | int _netHeight = 640; //ONNX图片输入高度
16 |
17 |
18 | //类别名,自己的模型需要修改此项
19 | std::vector _className ={ "person" };
20 |
21 | private:
22 | float _classThreshold = 0.25;
23 | float _nmsThreshold = 0.45;
24 | //float _keyPointThreshold = 0.5;
25 | };
26 |
--------------------------------------------------------------------------------
/yolov8_pose_onnx.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_pose_onnx.cpp
--------------------------------------------------------------------------------
/yolov8_pose_onnx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_pose_onnx.h
--------------------------------------------------------------------------------
/yolov8_seg.cpp:
--------------------------------------------------------------------------------
1 | #include"yolov8_seg.h"
2 | //using namespace std;
3 | //using namespace cv;
4 | //using namespace cv::dnn;
5 |
6 | bool Yolov8Seg::ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda = false) {
7 | try {
8 | if (!CheckModelPath(netPath))
9 | return false;
10 | net = cv::dnn::readNetFromONNX(netPath);
11 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR<7
12 | std::cout << "Yolov8-seg Need OpenCV Version >=4.7.0" << std::endl;
13 | return false;
14 | #endif
15 | #if CV_VERSION_MAJOR==4 &&CV_VERSION_MINOR==7&&CV_VERSION_REVISION==0
16 | net.enableWinograd(false); //bug of opencv4.7.x in AVX only platform ,https://github.com/opencv/opencv/pull/23112 and https://github.com/opencv/opencv/issues/23080
17 | //net.enableWinograd(true); //If your CPU supports AVX2, you can set it true to speed up
18 | #endif
19 | }
20 | catch (const std::exception&) {
21 | return false;
22 | }
23 |
24 | if (isCuda) {
25 | //cuda
26 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
27 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); //or DNN_TARGET_CUDA_FP16
28 | }
29 | else {
30 | //cpu
31 | std::cout << "Inference device: CPU" << std::endl;
32 | net.setPreferableBackend(cv::dnn::DNN_BACKEND_DEFAULT);
33 | net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
34 | }
35 | return true;
36 | }
37 |
38 |
39 | bool Yolov8Seg::Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output) {
40 | cv::Mat blob;
41 | output.clear();
42 | int col = srcImg.cols;
43 | int row = srcImg.rows;
44 | cv::Mat netInputImg;
45 | cv::Vec4d params;
46 | LetterBox(srcImg, netInputImg, params, cv::Size(_netWidth, _netHeight));
47 | cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(0, 0, 0), true, false);
48 | //**************************************************************************************************************************************************/
49 | //如果在其他设置没有问题的情况下但是结果偏差很大,可以尝试下用下面两句语句
50 | // If there is no problem with other settings, but results are a lot different from Python-onnx , you can try to use the following two sentences
51 | //
52 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(104, 117, 123), true, false);
53 | //$ cv::dnn::blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(114, 114,114), true, false);
54 | //****************************************************************************************************************************************************/
55 | net.setInput(blob);
56 | std::vector net_output_img;
57 | std::vector output_layer_names{ "output0","output1" };
58 | net.forward(net_output_img, output_layer_names); //get outputs
59 | std::vector class_ids;// res-class_id
60 | std::vector confidences;// res-conf
61 | std::vector boxes;// res-box
62 | std::vector> picked_proposals; //output0[:,:, 4 + _className.size():net_width]===> for mask
63 | cv::Mat output0 = cv::Mat(cv::Size(net_output_img[0].size[2], net_output_img[0].size[1]), CV_32F, (float*)net_output_img[0].data).t(); //[bs,116,8400]=>[bs,8400,116]
64 | int rows = output0.rows;
65 | int net_width = output0.cols;
66 | int socre_array_length = net_width - 4 - net_output_img[1].size[1];
67 | float* pdata = (float*)output0.data;
68 |
69 | for (int r = 0; r < rows; ++r) {
70 | cv::Mat scores(1, socre_array_length, CV_32FC1, pdata + 4);
71 | cv::Point classIdPoint;
72 | double max_class_socre;
73 | minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
74 | max_class_socre = (float)max_class_socre;
75 | if (max_class_socre >= _classThreshold) {
76 | std::vector temp_proto(pdata + 4 + socre_array_length, pdata + net_width);
77 | picked_proposals.push_back(temp_proto);
78 | //rect [x,y,w,h]
79 | float x = (pdata[0] - params[2]) / params[0];
80 | float y = (pdata[1] - params[3]) / params[1];
81 | float w = pdata[2] / params[0];
82 | float h = pdata[3] / params[1];
83 | int left = MAX(int(x - 0.5 * w + 0.5), 0);
84 | int top = MAX(int(y - 0.5 * h + 0.5), 0);
85 | class_ids.push_back(classIdPoint.x);
86 | confidences.push_back(max_class_socre);
87 | boxes.push_back(cv::Rect(left, top, int(w + 0.5), int(h + 0.5)));
88 | }
89 | pdata += net_width;//next line
90 | }
91 | //NMS
92 | std::vector nms_result;
93 | cv::dnn::NMSBoxes(boxes, confidences, _classThreshold, _nmsThreshold, nms_result);
94 | std::vector> temp_mask_proposals;
95 | cv::Rect holeImgRect(0, 0, srcImg.cols, srcImg.rows);
96 | for (int i = 0; i < nms_result.size(); ++i) {
97 |
98 | int idx = nms_result[i];
99 | OutputParams result;
100 | result.id = class_ids[idx];
101 | result.confidence = confidences[idx];
102 | result.box = boxes[idx] & holeImgRect;
103 | if (result.box.area() < 1)
104 | continue;
105 | temp_mask_proposals.push_back(picked_proposals[idx]);
106 | output.push_back(result);
107 | }
108 | MaskParams mask_params;
109 | mask_params.params = params;
110 | mask_params.srcImgShape = srcImg.size();
111 | mask_params.netHeight = _netHeight;
112 | mask_params.netWidth = _netWidth;
113 | mask_params.maskThreshold = _maskThreshold;
114 | for (int i = 0; i < temp_mask_proposals.size(); ++i) {
115 | GetMask2(cv::Mat(temp_mask_proposals[i]).t(), net_output_img[1], output[i], mask_params);
116 | }
117 |
118 |
119 | //******************** ****************
120 | // 老版本的方案,如果上面在开启我注释的部分之后还一直报错,建议使用这个。
121 | //If the GetMask2() still reports errors , it is recommended to use GetMask().
122 | //cv::Mat mask_proposals;
123 | //for (int i = 0; i < temp_mask_proposals.size(); ++i) {
124 | // mask_proposals.push_back(cv::Mat(temp_mask_proposals[i]).t());
125 | //}
126 | //GetMask(mask_proposals, net_output_img[1], output, mask_params);
127 | //*****************************************************/
128 |
129 |
130 | if (output.size())
131 | return true;
132 | else
133 | return false;
134 | }
135 |
136 |
--------------------------------------------------------------------------------
/yolov8_seg.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include "yolov8_utils.h"
5 |
6 | class Yolov8Seg {
7 | public:
8 | Yolov8Seg() {
9 | }
10 | ~Yolov8Seg() {}
11 |
12 | bool ReadModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
13 | bool Detect(cv::Mat& srcImg, cv::dnn::Net& net, std::vector& output);
14 |
15 | //类别名,自己的模型需要修改此项
16 | std::vector _className = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
17 | "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
18 | "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
19 | "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
20 | "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
21 | "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
22 | "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
23 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
24 | "hair drier", "toothbrush" };
25 | int _netWidth = 640; //ONNX图片输入宽度
26 | int _netHeight = 640; //ONNX图片输入高度
27 |
28 | private:
29 | float _classThreshold = 0.25;
30 | float _nmsThreshold = 0.45;
31 | float _maskThreshold = 0.5;
32 | };
33 |
--------------------------------------------------------------------------------
/yolov8_seg_onnx.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_seg_onnx.cpp
--------------------------------------------------------------------------------
/yolov8_seg_onnx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_seg_onnx.h
--------------------------------------------------------------------------------
/yolov8_utils.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_utils.cpp
--------------------------------------------------------------------------------
/yolov8_utils.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UNeedCryDear/yolov8-opencv-onnxruntime-cpp/ddd906cd9e5e459f2bc9788985fc8516476b5aba/yolov8_utils.h
--------------------------------------------------------------------------------