├── .gitignore
├── README.md
├── android
├── AndroidManifest.xml
├── build.gradle
├── gradle
│ └── wrapper
│ │ ├── gradle-wrapper.jar
│ │ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── res
│ ├── drawable-hdpi
│ │ └── icon.png
│ ├── drawable-ldpi
│ │ └── icon.png
│ ├── drawable-mdpi
│ │ └── icon.png
│ └── values
│ │ └── libs.xml
└── src
│ └── com
│ └── amin
│ └── QtAndroidGallery
│ └── QtAndroidGallery.java
├── assets
├── ManjaroMix.qss
├── ai.png
└── icon.png
├── docs
├── demo1.gif
├── demo2.gif
├── p1.png
├── p2.png
└── p3.png
├── main.cpp
├── model
└── modules_ncnn.py
├── myqlabel.cpp
├── myqlabel.h
├── myvideosurface.cpp
├── myvideosurface.h
├── pic.qrc
├── widget.cpp
├── widget.h
├── widget.ui
├── yolo.cpp
├── yolo.h
└── yolov8.pro
/.gitignore:
--------------------------------------------------------------------------------
1 | # This file is used to ignore files which are generated
2 | # ----------------------------------------------------------------------------
3 |
4 | *~
5 | *.autosave
6 | *.a
7 | *.core
8 | *.moc
9 | *.o
10 | *.obj
11 | *.orig
12 | *.rej
13 | *.so
14 | *.so.*
15 | *_pch.h.cpp
16 | *_resource.rc
17 | *.qm
18 | .#*
19 | *.*#
20 | core
21 | !core/
22 | tags
23 | .DS_Store
24 | .directory
25 | *.debug
26 | Makefile*
27 | *.prl
28 | *.app
29 | moc_*.cpp
30 | ui_*.h
31 | qrc_*.cpp
32 | Thumbs.db
33 | *.res
34 | *.rc
35 | /.qmake.cache
36 | /.qmake.stash
37 |
38 | # qtcreator generated files
39 | *.pro.user*
40 |
41 | # xemacs temporary files
42 | *.flc
43 |
44 | # Vim temporary files
45 | .*.swp
46 |
47 | # Visual Studio generated files
48 | *.ib_pdb_index
49 | *.idb
50 | *.ilk
51 | *.pdb
52 | *.sln
53 | *.suo
54 | *.vcproj
55 | *vcproj.*.*.user
56 | *.ncb
57 | *.sdf
58 | *.opensdf
59 | *.vcxproj
60 | *vcxproj.*
61 |
62 | # MinGW generated files
63 | *.Debug
64 | *.Release
65 |
66 | # Python byte code
67 | *.pyc
68 |
69 | # Binaries
70 | # --------
71 | *.dll
72 | *.exe
73 |
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## YOLOv8 安卓手机部署
2 |
3 | 本项目将YOLOv8s通过QT和NCNN部署到小米手机上运行
4 |
5 | ### 1.模型转换的Trick
6 |
7 | + 修改`ltralytics\nn\modules.py`
8 |
9 | 这一部分主要涉及两个方面的修改一个`c2f bloack`中将split改为slice(**该过程不是必要的**),另一个是去掉detect的头(加上detect的头可以转出来,但是ncnn转出的模型维度不正确,detect的head将在ncnn中通过自定义layer实现)
10 |
11 |
12 |

13 |
14 |
15 |
16 |

17 |
18 |
19 | + 转onnx
20 |
21 | 请参考: https://github.com/DataXujing/YOLOv8
22 |
23 | + 使用ONNX2NCNN生成ncnn支持的模型
24 |
25 | ```
26 | onnx2ncnn yolov8s.onnx yolov8s.param yolov8s.bin
27 | ```
28 |
29 | + ncnnoptimize生成FP16模式的模型
30 |
31 | ```
32 | ncnnoptimize.exe yolov8s.param yolov8s.bin yolov8s_opt.param yolov8s_opt.bin 65536
33 | ```
34 |
35 | ### 2.基于QT的NCNN安卓程序编译
36 |
37 | 关于QT安卓开发的相关配置和QT和NCNN开发安卓程序的其他应用请参考笔者的其他几个项目,内有详细的介绍
38 |
39 | + https://github.com/DataXujing/Qt_NCNN_NanoDet
40 | + https://github.com/DataXujing/ncnn_android_yolov6
41 |
42 |
43 |

44 |
45 |
46 | 该部分代码除了模型文件已经全部在该项目中!
47 |
48 | ### 3.小米手机上的Demo
49 |
50 |
51 |
52 | | 小米手机Demo1 | 小米手机Demo2 |
53 | | :---------------------------: | ------------- |
54 | |
| |
55 |
56 |
57 |
58 | ### 4.download apk
59 |
60 | + apk下载地址:https://github.com/DataXujing/ncnn_android_yolov8/releases
61 |
--------------------------------------------------------------------------------
/android/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
7 |
8 |
9 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/android/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | google()
4 | jcenter()
5 | }
6 |
7 | dependencies {
8 | classpath 'com.android.tools.build:gradle:3.5.0'
9 | }
10 | }
11 |
12 | repositories {
13 | google()
14 | jcenter()
15 | }
16 |
17 | apply plugin: 'com.android.application'
18 |
19 | dependencies {
20 | implementation fileTree(dir: 'libs', include: ['*.jar', '*.aar'])
21 | }
22 |
23 | android {
24 | /*******************************************************
25 | * The following variables:
26 | * - androidBuildToolsVersion,
27 | * - androidCompileSdkVersion
28 | * - qt5AndroidDir - holds the path to qt android files
29 | * needed to build any Qt application
30 | * on Android.
31 | *
32 | * are defined in gradle.properties file. This file is
33 | * updated by QtCreator and androiddeployqt tools.
34 | * Changing them manually might break the compilation!
35 | *******************************************************/
36 |
37 | compileSdkVersion androidCompileSdkVersion.toInteger()
38 |
39 | buildToolsVersion '28.0.3'
40 |
41 | sourceSets {
42 | main {
43 | manifest.srcFile 'AndroidManifest.xml'
44 | java.srcDirs = [qt5AndroidDir + '/src', 'src', 'java']
45 | aidl.srcDirs = [qt5AndroidDir + '/src', 'src', 'aidl']
46 | res.srcDirs = [qt5AndroidDir + '/res', 'res']
47 | resources.srcDirs = ['resources']
48 | renderscript.srcDirs = ['src']
49 | assets.srcDirs = ['assets']
50 | jniLibs.srcDirs = ['libs']
51 | }
52 | }
53 |
54 | lintOptions {
55 | abortOnError false
56 | }
57 |
58 | // Do not compress Qt binary resources file
59 | aaptOptions {
60 | noCompress 'rcc'
61 | }
62 |
63 | defaultConfig {
64 | resConfigs "en"
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/android/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/android/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/android/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionBase=GRADLE_USER_HOME
2 | distributionPath=wrapper/dists
3 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.5.1-bin.zip
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 |
--------------------------------------------------------------------------------
/android/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn () {
37 | echo "$*"
38 | }
39 |
40 | die () {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save () {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/android/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/android/res/drawable-hdpi/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/android/res/drawable-hdpi/icon.png
--------------------------------------------------------------------------------
/android/res/drawable-ldpi/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/android/res/drawable-ldpi/icon.png
--------------------------------------------------------------------------------
/android/res/drawable-mdpi/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/android/res/drawable-mdpi/icon.png
--------------------------------------------------------------------------------
/android/res/values/libs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | - https://download.qt.io/ministro/android/qt5/qt-5.14
5 |
6 |
7 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/android/src/com/amin/QtAndroidGallery/QtAndroidGallery.java:
--------------------------------------------------------------------------------
1 | package com.amin.QtAndroidGallery;
2 |
3 | import org.qtproject.qt5.android.bindings.QtApplication;
4 | import org.qtproject.qt5.android.bindings.QtActivity;
5 |
6 | import android.content.Context;
7 | import android.content.Intent;
8 | import android.database.Cursor;
9 | import android.graphics.Bitmap;
10 | import android.graphics.BitmapFactory;
11 | import android.net.Uri;
12 | import android.os.Bundle;
13 | import android.os.Environment;
14 | import android.provider.MediaStore;
15 | import android.util.Log;
16 | import android.os.Build;
17 | import android.os.Build.VERSION;
18 | import android.widget.Toast;
19 |
20 | import java.io.File;
21 | import java.io.IOException;
22 | import java.text.SimpleDateFormat;
23 | import java.util.ArrayList;
24 | import java.util.Date;
25 | import java.util.List;
26 | import android.provider.DocumentsContract;
27 | import android.content.ContentUris;
28 |
29 | public class QtAndroidGallery extends QtActivity
30 | {
31 |
32 | public static native void fileSelected(String fileName);
33 |
34 | static final int REQUEST_OPEN_IMAGE = 1;
35 | public String lastCameraFileUri;
36 | static final int REQUEST_CAPTURE_IMAGE = 2;
37 |
38 | private static QtAndroidGallery m_instance;
39 |
40 | public QtAndroidGallery()
41 | {
42 | m_instance = this;
43 | }
44 |
45 | @Override
46 | public void onCreate(Bundle savedInstanceState)
47 | {
48 | super.onCreate(savedInstanceState);
49 | }
50 |
51 | @Override
52 | protected void onDestroy()
53 | {
54 | super.onDestroy();
55 | }
56 |
57 | static void openAnImage()
58 | {
59 | m_instance.dispatchOpenGallery();
60 | }
61 |
62 | @Override
63 | protected void onActivityResult(int requestCode, int resultCode, Intent data)
64 | {
65 | if (resultCode == RESULT_OK)
66 | {
67 | if(requestCode == REQUEST_OPEN_IMAGE)
68 | {
69 | System.out.println("debug 123456789");
70 | String filePath = getPath(getApplicationContext(), data.getData());
71 | fileSelected(filePath);
72 | }
73 | else if(requestCode == REQUEST_CAPTURE_IMAGE)
74 | {
75 | String filePath = lastCameraFileUri;
76 | fileSelected(filePath);
77 | }
78 | }
79 | else
80 | {
81 | fileSelected(":(");
82 | }
83 |
84 | super.onActivityResult(requestCode, resultCode, data);
85 | }
86 |
87 | private void dispatchOpenGallery()
88 | {
89 | Intent intent = new Intent(Intent.ACTION_GET_CONTENT);
90 | intent.setType("image/*");
91 | startActivityForResult(intent, REQUEST_OPEN_IMAGE);
92 | }
93 |
94 | public String getRealPathFromURI(Context context, Uri contentUri)
95 | {
96 | Cursor cursor = null;
97 | try
98 | {
99 | String[] proj = { MediaStore.Images.Media.DATA };
100 | cursor = context.getContentResolver().query(contentUri, proj, null, null, null);
101 | int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
102 | cursor.moveToFirst();
103 | return cursor.getString(column_index);
104 | }
105 | finally
106 | {
107 | if (cursor != null)
108 | {
109 | cursor.close();
110 | }
111 | }
112 | }
113 |
114 |
115 |
116 | /**
117 | * Get a file path from a Uri. This will get the the path for Storage Access
118 | * Framework Documents, as well as the _data field for the MediaStore and
119 | * other file-based ContentProviders.
120 | *
121 | * @param context The context.
122 | * @param uri The Uri to query.
123 | * @author paulburke
124 | */
125 | public static String getPath(final Context context, final Uri uri) {
126 |
127 | final boolean isKitKat = Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT;
128 |
129 | // DocumentProvider
130 | if (isKitKat && DocumentsContract.isDocumentUri(context, uri)) {
131 | // ExternalStorageProvider
132 | if (isExternalStorageDocument(uri)) {
133 | final String docId = DocumentsContract.getDocumentId(uri);
134 | final String[] split = docId.split(":");
135 | final String type = split[0];
136 |
137 | if ("primary".equalsIgnoreCase(type)) {
138 | return Environment.getExternalStorageDirectory() + "/" + split[1];
139 | }
140 |
141 | // TODO handle non-primary volumes
142 | }
143 | // DownloadsProvider
144 | else if (isDownloadsDocument(uri)) {
145 |
146 | final String id = DocumentsContract.getDocumentId(uri);
147 | final Uri contentUri = ContentUris.withAppendedId(
148 | Uri.parse("content://downloads/public_downloads"), Long.valueOf(id));
149 |
150 | return getDataColumn(context, contentUri, null, null);
151 | }
152 | // MediaProvider
153 | else if (isMediaDocument(uri)) {
154 | final String docId = DocumentsContract.getDocumentId(uri);
155 | final String[] split = docId.split(":");
156 | final String type = split[0];
157 |
158 | Uri contentUri = null;
159 | if ("image".equals(type)) {
160 | contentUri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI;
161 | } else if ("video".equals(type)) {
162 | contentUri = MediaStore.Video.Media.EXTERNAL_CONTENT_URI;
163 | } else if ("audio".equals(type)) {
164 | contentUri = MediaStore.Audio.Media.EXTERNAL_CONTENT_URI;
165 | }
166 |
167 | final String selection = "_id=?";
168 | final String[] selectionArgs = new String[] {
169 | split[1]
170 | };
171 |
172 | return getDataColumn(context, contentUri, selection, selectionArgs);
173 | }
174 | }
175 | // MediaStore (and general)
176 | else if ("content".equalsIgnoreCase(uri.getScheme())) {
177 | return getDataColumn(context, uri, null, null);
178 | }
179 | // File
180 | else if ("file".equalsIgnoreCase(uri.getScheme())) {
181 | return uri.getPath();
182 | }
183 |
184 | return null;
185 | }
186 |
187 | /**
188 | * Get the value of the data column for this Uri. This is useful for
189 | * MediaStore Uris, and other file-based ContentProviders.
190 | *
191 | * @param context The context.
192 | * @param uri The Uri to query.
193 | * @param selection (Optional) Filter used in the query.
194 | * @param selectionArgs (Optional) Selection arguments used in the query.
195 | * @return The value of the _data column, which is typically a file path.
196 | */
197 | public static String getDataColumn(Context context, Uri uri, String selection,
198 | String[] selectionArgs) {
199 |
200 | Cursor cursor = null;
201 | final String column = "_data";
202 | final String[] projection = {
203 | column
204 | };
205 |
206 | try {
207 | cursor = context.getContentResolver().query(uri, projection, selection, selectionArgs,
208 | null);
209 | if (cursor != null && cursor.moveToFirst()) {
210 | final int column_index = cursor.getColumnIndexOrThrow(column);
211 | return cursor.getString(column_index);
212 | }
213 | } finally {
214 | if (cursor != null)
215 | cursor.close();
216 | }
217 | return null;
218 | }
219 |
220 |
221 | /**
222 | * @param uri The Uri to check.
223 | * @return Whether the Uri authority is ExternalStorageProvider.
224 | */
225 | public static boolean isExternalStorageDocument(Uri uri) {
226 | return "com.android.externalstorage.documents".equals(uri.getAuthority());
227 | }
228 |
229 | /**
230 | * @param uri The Uri to check.
231 | * @return Whether the Uri authority is DownloadsProvider.
232 | */
233 | public static boolean isDownloadsDocument(Uri uri) {
234 | return "com.android.providers.downloads.documents".equals(uri.getAuthority());
235 | }
236 |
237 | /**
238 | * @param uri The Uri to check.
239 | * @return Whether the Uri authority is MediaProvider.
240 | */
241 | public static boolean isMediaDocument(Uri uri) {
242 | return "com.android.providers.media.documents".equals(uri.getAuthority());
243 | }
244 |
245 |
246 | static void captureAnImage()
247 | {
248 | m_instance.dispatchTakePictureIntent();
249 | }
250 |
251 | private void dispatchTakePictureIntent()
252 | {
253 | Intent takePictureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
254 |
255 | // Ensure that there’s a camera activity to handle the intent
256 | if (takePictureIntent.resolveActivity(getPackageManager()) != null)
257 | {
258 | // Create the File where the photo should go
259 | File photoFile = null;
260 | try
261 | {
262 | photoFile = m_instance.createImageFile();
263 | }
264 | catch (IOException ex)
265 | {
266 | // Error occurred while creating the File
267 | Toast.makeText(QtAndroidGallery.this, ex.getLocalizedMessage(), Toast.LENGTH_LONG).show();
268 | Toast.makeText(QtAndroidGallery.this, ex.getMessage(), Toast.LENGTH_LONG).show();
269 | }
270 | // Continue only if the File was successfully created
271 | if (photoFile != null)
272 | {
273 | takePictureIntent.putExtra(MediaStore.EXTRA_OUTPUT, Uri.fromFile(photoFile));
274 |
275 | lastCameraFileUri = photoFile.toString();
276 |
277 | startActivityForResult(takePictureIntent, REQUEST_CAPTURE_IMAGE);
278 | }
279 | }
280 | else
281 | {
282 | Toast.makeText(QtAndroidGallery.this, "Problems with your camera?!", Toast.LENGTH_SHORT).show();
283 | }
284 | }
285 |
286 |
287 | private File createImageFile() throws IOException
288 | {
289 | // Create an image file name
290 | String timeStamp = new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
291 | String imageFileName = "MYAPPTEMP_" + timeStamp + "_";
292 | File storageDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES);
293 | File image = File.createTempFile(imageFileName, /* prefix */
294 | ".jpg", /* suffix */
295 | storageDir /* directory */
296 | );
297 |
298 | return image;
299 | }
300 |
301 |
302 | }
303 |
304 |
305 |
--------------------------------------------------------------------------------
/assets/ManjaroMix.qss:
--------------------------------------------------------------------------------
1 | /*
2 | ManjaroMix Style Sheet for QT Applications
3 | Author: Jaime A. Quiroga P.
4 | Company: GTRONICK
5 | Last updated: 25/02/2020, 15:42.
6 | Available at: https://github.com/GTRONICK/QSS/blob/master/ManjaroMix.qss
7 | */
8 | QMainWindow {
9 | background-color:#151a1e;
10 | }
11 | QCalendar {
12 | background-color: #151a1e;
13 | }
14 | QTextEdit {
15 | border-width: 1px;
16 | border-style: solid;
17 | border-color: #4fa08b;
18 | background-color: #222b2e;
19 | color: #d3dae3;
20 | }
21 | QPlainTextEdit {
22 | border-width: 1px;
23 | border-style: solid;
24 | border-color: #4fa08b;
25 | background-color: #222b2e;
26 | color: #d3dae3;
27 | }
28 | QToolButton {
29 | border-style: solid;
30 | border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
31 | border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));
32 | border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));
33 | border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
34 | border-width: 1px;
35 | border-radius: 5px;
36 | color: #d3dae3;
37 | padding: 2px;
38 | background-color: rgb(255,255,255);
39 | }
40 | QToolButton:hover{
41 | border-style: solid;
42 | border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));
43 | border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));
44 | border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));
45 | border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));
46 | border-width: 1px;
47 | border-radius: 5px;
48 | color: rgb(0,0,0);
49 | padding: 2px;
50 | background-color: rgb(255,255,255);
51 | }
52 | QToolButton:pressed{
53 | border-style: solid;
54 | border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
55 | border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));
56 | border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));
57 | border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
58 | border-width: 1px;
59 | border-radius: 5px;
60 | color: rgb(0,0,0);
61 | padding: 2px;
62 | background-color: rgb(142,142,142);
63 | }
64 | QPushButton{
65 | border-style: solid;
66 | border-color: #050a0e;
67 | border-width: 1px;
68 | border-radius: 5px;
69 | color: #d3dae3;
70 | padding: 2px;
71 | background-color: #151a1e;
72 | }
73 | QPushButton::default{
74 | border-style: solid;
75 | border-color: #050a0e;
76 | border-width: 1px;
77 | border-radius: 5px;
78 | color: #FFFFFF;
79 | padding: 2px;
80 | background-color: #151a1e;;
81 | }
82 | QPushButton:hover{
83 | border-style: solid;
84 | border-color: #050a0e;
85 | border-width: 1px;
86 | border-radius: 5px;
87 | color: #d3dae3;
88 | padding: 2px;
89 | background-color: #1c1f1f;
90 | }
91 | QPushButton:pressed{
92 | border-style: solid;
93 | border-color: #050a0e;
94 | border-width: 1px;
95 | border-radius: 5px;
96 | color: #d3dae3;
97 | padding: 2px;
98 | background-color: #2c2f2f;
99 | }
100 | QPushButton:disabled{
101 | border-style: solid;
102 | border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
103 | border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));
104 | border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));
105 | border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));
106 | border-width: 1px;
107 | border-radius: 5px;
108 | color: #808086;
109 | padding: 2px;
110 | background-color: rgb(142,142,142);
111 | }
112 | QLineEdit {
113 | border-width: 1px;
114 | border-style: solid;
115 | border-color: #4fa08b;
116 | background-color: #222b2e;
117 | color: #d3dae3;
118 | }
119 | QLabel {
120 | color: #d3dae3;
121 | }
122 | QLCDNumber {
123 | color: #4d9b87;
124 | }
125 | QProgressBar {
126 | text-align: center;
127 | color: #d3dae3;
128 | border-radius: 10px;
129 | border-color: transparent;
130 | border-style: solid;
131 | background-color: #52595d;
132 | }
133 | QProgressBar::chunk {
134 | background-color: #214037 ;
135 | border-radius: 10px;
136 | }
137 | QMenuBar {
138 | background-color: #151a1e;
139 | }
140 | QMenuBar::item {
141 | color: #d3dae3;
142 | spacing: 3px;
143 | padding: 1px 4px;
144 | background-color: #151a1e;
145 | }
146 |
147 | QMenuBar::item:selected {
148 | background-color: #252a2e;
149 | color: #FFFFFF;
150 | }
151 | QMenu {
152 | background-color: #151a1e;
153 | }
154 | QMenu::item:selected {
155 | background-color: #252a2e;
156 | color: #FFFFFF;
157 | }
158 | QMenu::item {
159 | color: #d3dae3;
160 | background-color: #151a1e;
161 | }
162 | QTabWidget {
163 | color:rgb(0,0,0);
164 | background-color:#000000;
165 | }
166 | QTabWidget::pane {
167 | border-color: #050a0e;
168 | background-color: #1e282c;
169 | border-style: solid;
170 | border-width: 1px;
171 | border-bottom-left-radius: 4px;
172 | border-bottom-right-radius: 4px;
173 | }
174 | QTabBar::tab:first {
175 | border-style: solid;
176 | border-left-width:1px;
177 | border-right-width:0px;
178 | border-top-width:1px;
179 | border-bottom-width:0px;
180 | border-top-color: #050a0e;
181 | border-left-color: #050a0e;
182 | border-bottom-color: #050a0e;
183 | border-top-left-radius: 4px;
184 | color: #d3dae3;
185 | padding: 3px;
186 | margin-left:0px;
187 | background-color: #151a1e;
188 | }
189 | QTabBar::tab:last {
190 | border-style: solid;
191 | border-top-width:1px;
192 | border-left-width:1px;
193 | border-right-width:1px;
194 | border-bottom-width:0px;
195 | border-color: #050a0e;
196 | border-top-right-radius: 4px;
197 | color: #d3dae3;
198 | padding: 3px;
199 | margin-left:0px;
200 | background-color: #151a1e;
201 | }
202 | QTabBar::tab {
203 | border-style: solid;
204 | border-top-width:1px;
205 | border-bottom-width:0px;
206 | border-left-width:1px;
207 | border-top-color: #050a0e;
208 | border-left-color: #050a0e;
209 | border-bottom-color: #050a0e;
210 | color: #d3dae3;
211 | padding: 3px;
212 | margin-left:0px;
213 | background-color: #151a1e;
214 | }
215 | QTabBar::tab:selected, QTabBar::tab:last:selected, QTabBar::tab:hover {
216 | border-style: solid;
217 | border-left-width:1px;
218 | border-bottom-width:0px;
219 | border-right-color: transparent;
220 | border-top-color: #050a0e;
221 | border-left-color: #050a0e;
222 | border-bottom-color: #050a0e;
223 | color: #FFFFFF;
224 | padding: 3px;
225 | margin-left:0px;
226 | background-color: #1e282c;
227 | }
228 |
229 | QTabBar::tab:selected, QTabBar::tab:first:selected, QTabBar::tab:hover {
230 | border-style: solid;
231 | border-left-width:1px;
232 | border-bottom-width:0px;
233 | border-top-width:1px;
234 | border-right-color: transparent;
235 | border-top-color: #050a0e;
236 | border-left-color: #050a0e;
237 | border-bottom-color: #050a0e;
238 | color: #FFFFFF;
239 | padding: 3px;
240 | margin-left:0px;
241 | background-color: #1e282c;
242 | }
243 |
244 | QCheckBox {
245 | color: #d3dae3;
246 | padding: 2px;
247 | }
248 | QCheckBox:disabled {
249 | color: #808086;
250 | padding: 2px;
251 | }
252 |
253 | QCheckBox:hover {
254 | border-radius:4px;
255 | border-style:solid;
256 | padding-left: 1px;
257 | padding-right: 1px;
258 | padding-bottom: 1px;
259 | padding-top: 1px;
260 | border-width:1px;
261 | border-color: transparent;
262 | }
263 | QCheckBox::indicator:checked {
264 |
265 | height: 10px;
266 | width: 10px;
267 | border-style:solid;
268 | border-width: 1px;
269 | border-color: #4fa08b;
270 | color: #000000;
271 | background-color: qradialgradient(cx:0.4, cy:0.4, radius: 1.5,fx:0, fy:0, stop:0 #1e282c, stop:0.3 #1e282c, stop:0.4 #4fa08b, stop:0.5 #1e282c, stop:1 #1e282c);
272 | }
273 | QCheckBox::indicator:unchecked {
274 |
275 | height: 10px;
276 | width: 10px;
277 | border-style:solid;
278 | border-width: 1px;
279 | border-color: #4fa08b;
280 | color: #000000;
281 | }
282 | QRadioButton {
283 | color: #d3dae3;
284 | padding: 1px;
285 | }
286 | QRadioButton::indicator:checked {
287 | height: 10px;
288 | width: 10px;
289 | border-style:solid;
290 | border-radius:5px;
291 | border-width: 1px;
292 | border-color: #4fa08b;
293 | color: #a9b7c6;
294 | background-color: qradialgradient(cx:0.5, cy:0.5, radius:0.4,fx:0.5, fy:0.5, stop:0 #4fa08b, stop:1 #1e282c);
295 | }
296 | QRadioButton::indicator:!checked {
297 | height: 10px;
298 | width: 10px;
299 | border-style:solid;
300 | border-radius:5px;
301 | border-width: 1px;
302 | border-color: #4fa08b;
303 | color: #a9b7c6;
304 | background-color: transparent;
305 | }
306 | QStatusBar {
307 | color:#027f7f;
308 | }
309 | QSpinBox {
310 | color: #d3dae3;
311 | background-color: #222b2e;
312 | border-width: 1px;
313 | border-style: solid;
314 | border-color: #4fa08b;
315 | }
316 | QDoubleSpinBox {
317 | color: #d3dae3;
318 | background-color: #222b2e;
319 | border-width: 1px;
320 | border-style: solid;
321 | border-color: #4fa08b;
322 | }
323 | QTimeEdit {
324 | color: #d3dae3;
325 | background-color: #222b2e;
326 | border-width: 1px;
327 | border-style: solid;
328 | border-color: #4fa08b;
329 | }
330 | QDateTimeEdit {
331 | color: #d3dae3;
332 | background-color: #222b2e;
333 | border-width: 1px;
334 | border-style: solid;
335 | border-color: #4fa08b;
336 | }
337 | QDateEdit {
338 | color: #d3dae3;
339 | background-color: #222b2e;
340 | border-width: 1px;
341 | border-style: solid;
342 | border-color: #4fa08b;
343 | }
344 | QFontComboBox {
345 | color: #d3dae3;
346 | background-color: #222b2e;
347 | border-width: 1px;
348 | border-style: solid;
349 | border-color: #4fa08b;
350 | }
351 | QComboBox {
352 | color: #d3dae3;
353 | background-color: #222b2e;
354 | border-width: 1px;
355 | border-style: solid;
356 | border-color: #4fa08b;
357 | }
358 |
359 | QDial {
360 | background: #16a085;
361 | }
362 |
363 | QToolBox {
364 | color: #a9b7c6;
365 | background-color: #222b2e;
366 | }
367 | QToolBox::tab {
368 | color: #a9b7c6;
369 | background-color:#222b2e;
370 | }
371 | QToolBox::tab:selected {
372 | color: #FFFFFF;
373 | background-color:#222b2e;
374 | }
375 | QScrollArea {
376 | color: #FFFFFF;
377 | background-color:#222b2e;
378 | }
379 | QSlider::groove:horizontal {
380 | height: 5px;
381 | background-color: #52595d;
382 | }
383 | QSlider::groove:vertical {
384 | width: 5px;
385 | background-color: #52595d;
386 | }
387 | QSlider::handle:horizontal {
388 | background: #1a2224;
389 | border-style: solid;
390 | border-width: 1px;
391 | border-color: rgb(207,207,207);
392 | width: 12px;
393 | margin: -5px 0;
394 | border-radius: 7px;
395 | }
396 | QSlider::handle:vertical {
397 | background: #1a2224;
398 | border-style: solid;
399 | border-width: 1px;
400 | border-color: rgb(207,207,207);
401 | height: 12px;
402 | margin: 0 -5px;
403 | border-radius: 7px;
404 | }
405 | QSlider::add-page:horizontal {
406 | background: #52595d;
407 | }
408 | QSlider::add-page:vertical {
409 | background: #52595d;
410 | }
411 | QSlider::sub-page:horizontal {
412 | background-color: #15433a;
413 | }
414 | QSlider::sub-page:vertical {
415 | background-color: #15433a;
416 | }
417 | QScrollBar:horizontal {
418 | max-height: 10px;
419 | border: 1px transparent grey;
420 | margin: 0px 20px 0px 20px;
421 | background: transparent;
422 | }
423 | QScrollBar:vertical {
424 | max-width: 10px;
425 | border: 1px transparent grey;
426 | margin: 20px 0px 20px 0px;
427 | background: transparent;
428 | }
429 | QScrollBar::handle:horizontal {
430 | background: #52595d;
431 | border-style: transparent;
432 | border-radius: 4px;
433 | min-width: 25px;
434 | }
435 | QScrollBar::handle:horizontal:hover {
436 | background: #58a492;
437 | border-style: transparent;
438 | border-radius: 4px;
439 | min-width: 25px;
440 | }
441 | QScrollBar::handle:vertical {
442 | background: #52595d;
443 | border-style: transparent;
444 | border-radius: 4px;
445 | min-height: 25px;
446 | }
447 | QScrollBar::handle:vertical:hover {
448 | background: #58a492;
449 | border-style: transparent;
450 | border-radius: 4px;
451 | min-height: 25px;
452 | }
453 | QScrollBar::add-line:horizontal {
454 | border: 2px transparent grey;
455 | border-top-right-radius: 4px;
456 | border-bottom-right-radius: 4px;
457 | background: #15433a;
458 | width: 20px;
459 | subcontrol-position: right;
460 | subcontrol-origin: margin;
461 | }
462 | QScrollBar::add-line:horizontal:pressed {
463 | border: 2px transparent grey;
464 | border-top-right-radius: 4px;
465 | border-bottom-right-radius: 4px;
466 | background: rgb(181,181,181);
467 | width: 20px;
468 | subcontrol-position: right;
469 | subcontrol-origin: margin;
470 | }
471 | QScrollBar::add-line:vertical {
472 | border: 2px transparent grey;
473 | border-bottom-left-radius: 4px;
474 | border-bottom-right-radius: 4px;
475 | background: #15433a;
476 | height: 20px;
477 | subcontrol-position: bottom;
478 | subcontrol-origin: margin;
479 | }
480 | QScrollBar::add-line:vertical:pressed {
481 | border: 2px transparent grey;
482 | border-bottom-left-radius: 4px;
483 | border-bottom-right-radius: 4px;
484 | background: rgb(181,181,181);
485 | height: 20px;
486 | subcontrol-position: bottom;
487 | subcontrol-origin: margin;
488 | }
489 | QScrollBar::sub-line:horizontal {
490 | border: 2px transparent grey;
491 | border-top-left-radius: 4px;
492 | border-bottom-left-radius: 4px;
493 | background: #15433a;
494 | width: 20px;
495 | subcontrol-position: left;
496 | subcontrol-origin: margin;
497 | }
498 | QScrollBar::sub-line:horizontal:pressed {
499 | border: 2px transparent grey;
500 | border-top-left-radius: 4px;
501 | border-bottom-left-radius: 4px;
502 | background: rgb(181,181,181);
503 | width: 20px;
504 | subcontrol-position: left;
505 | subcontrol-origin: margin;
506 | }
507 | QScrollBar::sub-line:vertical {
508 | border: 2px transparent grey;
509 | border-top-left-radius: 4px;
510 | border-top-right-radius: 4px;
511 | background: #15433a;
512 | height: 20px;
513 | subcontrol-position: top;
514 | subcontrol-origin: margin;
515 | }
516 | QScrollBar::sub-line:vertical:pressed {
517 | border: 2px transparent grey;
518 | border-top-left-radius: 4px;
519 | border-top-right-radius: 4px;
520 | background: rgb(181,181,181);
521 | height: 20px;
522 | subcontrol-position: top;
523 | subcontrol-origin: margin;
524 | }
525 |
526 | QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {
527 | background: none;
528 | }
529 | QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
530 | background: none;
531 | }
532 |
--------------------------------------------------------------------------------
/assets/ai.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/assets/ai.png
--------------------------------------------------------------------------------
/assets/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/assets/icon.png
--------------------------------------------------------------------------------
/docs/demo1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/docs/demo1.gif
--------------------------------------------------------------------------------
/docs/demo2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/docs/demo2.gif
--------------------------------------------------------------------------------
/docs/p1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/docs/p1.png
--------------------------------------------------------------------------------
/docs/p2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/docs/p2.png
--------------------------------------------------------------------------------
/docs/p3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DataXujing/ncnn_android_yolov8/298db901d8520888e133e906ce73445ead3842ba/docs/p3.png
--------------------------------------------------------------------------------
/main.cpp:
--------------------------------------------------------------------------------
1 | #include "widget.h"
2 |
3 | #include "yolo.h"
4 |
5 | #include
6 | #include
7 |
8 | Yolo *yolov8 = new Yolo();
9 |
10 | int main(int argc, char *argv[])
11 | {
12 | QApplication a(argc, argv);
13 |
14 | // 加载QSS
15 |
16 | QFile qss_(":/assets/ManjaroMix.qss");
17 | if( qss_.open(QFile::ReadOnly)){
18 | qDebug("open success");
19 | QString style = QLatin1String(qss_.readAll());
20 | a.setStyleSheet(style);
21 | qss_.close();
22 | }
23 | else{
24 | qDebug("Open failed");
25 | }
26 |
27 | int target_size = 640;
28 | float norm_vals[3] = { 1 / 255.f, 1 / 255.f, 1 / 255.f };
29 | float mean_vals[3] = { 103.53f, 116.28f, 123.675f };
30 |
31 | yolov8->load(target_size, mean_vals,norm_vals);
32 |
33 |
34 |
35 | Widget w;
36 | w.show();
37 | return a.exec();
38 | }
39 |
--------------------------------------------------------------------------------
/model/modules_ncnn.py:
--------------------------------------------------------------------------------
1 | # Ultralytics YOLO 🚀, GPL-3.0 license
2 | """
3 | Common modules
4 | """
5 |
6 | import math
7 | import warnings
8 | from copy import copy
9 | from pathlib import Path
10 |
11 | import cv2
12 | import numpy as np
13 | import pandas as pd
14 | import requests
15 | import torch
16 | import torch.nn as nn
17 | from PIL import Image, ImageOps
18 | from torch.cuda import amp
19 |
20 | from ultralytics.nn.autobackend import AutoBackend
21 | from ultralytics.yolo.data.augment import LetterBox
22 | from ultralytics.yolo.utils import LOGGER, colorstr
23 | from ultralytics.yolo.utils.files import increment_path
24 | from ultralytics.yolo.utils.ops import Profile, make_divisible, non_max_suppression, scale_boxes, xyxy2xywh
25 | from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
26 | from ultralytics.yolo.utils.tal import dist2bbox, make_anchors
27 | from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode
28 |
29 | # from utils.plots import feature_visualization TODO
30 |
31 |
32 | def autopad(k, p=None, d=1): # kernel, padding, dilation
33 | # Pad to 'same' shape outputs
34 | if d > 1:
35 | k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
36 | if p is None:
37 | p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
38 | return p
39 |
40 |
41 | class Conv(nn.Module):
42 | # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
43 | default_act = nn.SiLU() # default activation
44 |
45 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
46 | super().__init__()
47 | self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
48 | self.bn = nn.BatchNorm2d(c2)
49 | self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
50 |
51 | def forward(self, x):
52 | return self.act(self.bn(self.conv(x)))
53 |
54 | def forward_fuse(self, x):
55 | return self.act(self.conv(x))
56 |
57 |
58 | class DWConv(Conv):
59 | # Depth-wise convolution
60 | def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
61 | super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
62 |
63 |
64 | class DWConvTranspose2d(nn.ConvTranspose2d):
65 | # Depth-wise transpose convolution
66 | def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
67 | super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
68 |
69 |
70 | class ConvTranspose(nn.Module):
71 | # Convolution transpose 2d layer
72 | default_act = nn.SiLU() # default activation
73 |
74 | def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True):
75 | super().__init__()
76 | self.conv_transpose = nn.ConvTranspose2d(c1, c2, k, s, p, bias=not bn)
77 | self.bn = nn.BatchNorm2d(c2) if bn else nn.Identity()
78 | self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
79 |
80 | def forward(self, x):
81 | return self.act(self.bn(self.conv_transpose(x)))
82 |
83 |
84 | class DFL(nn.Module):
85 | # DFL module
86 | def __init__(self, c1=16):
87 | super().__init__()
88 | self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
89 | x = torch.arange(c1, dtype=torch.float)
90 | self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
91 | self.c1 = c1
92 |
93 | def forward(self, x):
94 | b, c, a = x.shape # batch, channels, anchors
95 | return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)
96 | # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a)
97 |
98 |
99 | class TransformerLayer(nn.Module):
100 | # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
101 | def __init__(self, c, num_heads):
102 | super().__init__()
103 | self.q = nn.Linear(c, c, bias=False)
104 | self.k = nn.Linear(c, c, bias=False)
105 | self.v = nn.Linear(c, c, bias=False)
106 | self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
107 | self.fc1 = nn.Linear(c, c, bias=False)
108 | self.fc2 = nn.Linear(c, c, bias=False)
109 |
110 | def forward(self, x):
111 | x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
112 | x = self.fc2(self.fc1(x)) + x
113 | return x
114 |
115 |
116 | class TransformerBlock(nn.Module):
117 | # Vision Transformer https://arxiv.org/abs/2010.11929
118 | def __init__(self, c1, c2, num_heads, num_layers):
119 | super().__init__()
120 | self.conv = None
121 | if c1 != c2:
122 | self.conv = Conv(c1, c2)
123 | self.linear = nn.Linear(c2, c2) # learnable position embedding
124 | self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
125 | self.c2 = c2
126 |
127 | def forward(self, x):
128 | if self.conv is not None:
129 | x = self.conv(x)
130 | b, _, w, h = x.shape
131 | p = x.flatten(2).permute(2, 0, 1)
132 | return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
133 |
134 |
135 | class Bottleneck(nn.Module):
136 | # Standard bottleneck
137 | def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, kernels, groups, expand
138 | super().__init__()
139 | c_ = int(c2 * e) # hidden channels
140 | self.cv1 = Conv(c1, c_, k[0], 1)
141 | self.cv2 = Conv(c_, c2, k[1], 1, g=g)
142 | self.add = shortcut and c1 == c2
143 |
144 | def forward(self, x):
145 | return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
146 |
147 |
148 | class BottleneckCSP(nn.Module):
149 | # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
150 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
151 | super().__init__()
152 | c_ = int(c2 * e) # hidden channels
153 | self.cv1 = Conv(c1, c_, 1, 1)
154 | self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
155 | self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
156 | self.cv4 = Conv(2 * c_, c2, 1, 1)
157 | self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
158 | self.act = nn.SiLU()
159 | self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
160 |
161 | def forward(self, x):
162 | y1 = self.cv3(self.m(self.cv1(x)))
163 | y2 = self.cv2(x)
164 | return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
165 |
166 |
167 | class C3(nn.Module):
168 | # CSP Bottleneck with 3 convolutions
169 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
170 | super().__init__()
171 | c_ = int(c2 * e) # hidden channels
172 | self.cv1 = Conv(c1, c_, 1, 1)
173 | self.cv2 = Conv(c1, c_, 1, 1)
174 | self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
175 | self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
176 |
177 | def forward(self, x):
178 | return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
179 |
180 |
181 | class C2(nn.Module):
182 | # CSP Bottleneck with 2 convolutions
183 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
184 | super().__init__()
185 | self.c = int(c2 * e) # hidden channels
186 | self.cv1 = Conv(c1, 2 * self.c, 1, 1)
187 | self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2)
188 | # self.attention = ChannelAttention(2 * self.c) # or SpatialAttention()
189 | self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)))
190 |
191 | def forward(self, x):
192 | a, b = self.cv1(x).split((self.c, self.c), 1)
193 | return self.cv2(torch.cat((self.m(a), b), 1))
194 |
195 |
196 | class C2f(nn.Module):
197 | # CSP Bottleneck with 2 convolutions
198 | def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
199 | super().__init__()
200 | self.c = int(c2 * e) # hidden channels
201 | self.cv1 = Conv(c1, 2 * self.c, 1, 1)
202 | self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
203 | self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
204 |
205 | # <-----------------------------------------------------
206 | def forward(self, x):
207 | # y = list(self.cv1(x).split((self.c, self.c), 1))
208 | # y.extend(m(y[-1]) for m in self.m)
209 | # return self.cv2(torch.cat(y, 1))
210 |
211 | x = self.cv1(x)
212 | x = [x, x[:, self.c:, ...]]
213 | x.extend(m(x[-1]) for m in self.m)
214 | x.pop(1)
215 | return self.cv2(torch.cat(x, 1))
216 |
217 |
218 |
219 |
220 | class ChannelAttention(nn.Module):
221 | # Channel-attention module https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet
222 | def __init__(self, channels: int) -> None:
223 | super().__init__()
224 | self.pool = nn.AdaptiveAvgPool2d(1)
225 | self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True)
226 | self.act = nn.Sigmoid()
227 |
228 | def forward(self, x: torch.Tensor) -> torch.Tensor:
229 | return x * self.act(self.fc(self.pool(x)))
230 |
231 |
232 | class SpatialAttention(nn.Module):
233 | # Spatial-attention module
234 | def __init__(self, kernel_size=7):
235 | super().__init__()
236 | assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
237 | padding = 3 if kernel_size == 7 else 1
238 | self.cv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
239 | self.act = nn.Sigmoid()
240 |
241 | def forward(self, x):
242 | return x * self.act(self.cv1(torch.cat([torch.mean(x, 1, keepdim=True), torch.max(x, 1, keepdim=True)[0]], 1)))
243 |
244 |
245 | class CBAM(nn.Module):
246 | # CSP Bottleneck with 3 convolutions
247 | def __init__(self, c1, ratio=16, kernel_size=7): # ch_in, ch_out, number, shortcut, groups, expansion
248 | super().__init__()
249 | self.channel_attention = ChannelAttention(c1)
250 | self.spatial_attention = SpatialAttention(kernel_size)
251 |
252 | def forward(self, x):
253 | return self.spatial_attention(self.channel_attention(x))
254 |
255 |
256 | class C1(nn.Module):
257 | # CSP Bottleneck with 3 convolutions
258 | def __init__(self, c1, c2, n=1): # ch_in, ch_out, number, shortcut, groups, expansion
259 | super().__init__()
260 | self.cv1 = Conv(c1, c2, 1, 1)
261 | self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n)))
262 |
263 | def forward(self, x):
264 | y = self.cv1(x)
265 | return self.m(y) + y
266 |
267 |
268 | class C3x(C3):
269 | # C3 module with cross-convolutions
270 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
271 | super().__init__(c1, c2, n, shortcut, g, e)
272 | self.c_ = int(c2 * e)
273 | self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n)))
274 |
275 |
276 | class C3TR(C3):
277 | # C3 module with TransformerBlock()
278 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
279 | super().__init__(c1, c2, n, shortcut, g, e)
280 | c_ = int(c2 * e)
281 | self.m = TransformerBlock(c_, c_, 4, n)
282 |
283 |
284 | class C3Ghost(C3):
285 | # C3 module with GhostBottleneck()
286 | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
287 | super().__init__(c1, c2, n, shortcut, g, e)
288 | c_ = int(c2 * e) # hidden channels
289 | self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
290 |
291 |
292 | class SPP(nn.Module):
293 | # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
294 | def __init__(self, c1, c2, k=(5, 9, 13)):
295 | super().__init__()
296 | c_ = c1 // 2 # hidden channels
297 | self.cv1 = Conv(c1, c_, 1, 1)
298 | self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
299 | self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
300 |
301 | def forward(self, x):
302 | x = self.cv1(x)
303 | with warnings.catch_warnings():
304 | warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
305 | return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
306 |
307 |
308 | class SPPF(nn.Module):
309 | # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
310 | def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
311 | super().__init__()
312 | c_ = c1 // 2 # hidden channels
313 | self.cv1 = Conv(c1, c_, 1, 1)
314 | self.cv2 = Conv(c_ * 4, c2, 1, 1)
315 | self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
316 |
317 | def forward(self, x):
318 | x = self.cv1(x)
319 | with warnings.catch_warnings():
320 | warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
321 | y1 = self.m(x)
322 | y2 = self.m(y1)
323 | return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
324 |
325 |
326 | class Focus(nn.Module):
327 | # Focus wh information into c-space
328 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
329 | super().__init__()
330 | self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
331 | # self.contract = Contract(gain=2)
332 |
333 | def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
334 | return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
335 | # return self.conv(self.contract(x))
336 |
337 |
338 | class GhostConv(nn.Module):
339 | # Ghost Convolution https://github.com/huawei-noah/ghostnet
340 | def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
341 | super().__init__()
342 | c_ = c2 // 2 # hidden channels
343 | self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
344 | self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
345 |
346 | def forward(self, x):
347 | y = self.cv1(x)
348 | return torch.cat((y, self.cv2(y)), 1)
349 |
350 |
351 | class GhostBottleneck(nn.Module):
352 | # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
353 | def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
354 | super().__init__()
355 | c_ = c2 // 2
356 | self.conv = nn.Sequential(
357 | GhostConv(c1, c_, 1, 1), # pw
358 | DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
359 | GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
360 | self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
361 | act=False)) if s == 2 else nn.Identity()
362 |
363 | def forward(self, x):
364 | return self.conv(x) + self.shortcut(x)
365 |
366 |
367 | class Concat(nn.Module):
368 | # Concatenate a list of tensors along dimension
369 | def __init__(self, dimension=1):
370 | super().__init__()
371 | self.d = dimension
372 |
373 | def forward(self, x):
374 | return torch.cat(x, self.d)
375 |
376 |
377 | class AutoShape(nn.Module):
378 | # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
379 | conf = 0.25 # NMS confidence threshold
380 | iou = 0.45 # NMS IoU threshold
381 | agnostic = False # NMS class-agnostic
382 | multi_label = False # NMS multiple labels per box
383 | classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
384 | max_det = 1000 # maximum number of detections per image
385 | amp = False # Automatic Mixed Precision (AMP) inference
386 |
387 | def __init__(self, model, verbose=True):
388 | super().__init__()
389 | if verbose:
390 | LOGGER.info('Adding AutoShape... ')
391 | copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
392 | self.dmb = isinstance(model, AutoBackend) # DetectMultiBackend() instance
393 | self.pt = not self.dmb or model.pt # PyTorch model
394 | self.model = model.eval()
395 | if self.pt:
396 | m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
397 | m.inplace = False # Detect.inplace=False for safe multithread inference
398 | m.export = True # do not output loss values
399 |
400 | def _apply(self, fn):
401 | # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
402 | self = super()._apply(fn)
403 | if self.pt:
404 | m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
405 | m.stride = fn(m.stride)
406 | m.grid = list(map(fn, m.grid))
407 | if isinstance(m.anchor_grid, list):
408 | m.anchor_grid = list(map(fn, m.anchor_grid))
409 | return self
410 |
411 | @smart_inference_mode()
412 | def forward(self, ims, size=640, augment=False, profile=False):
413 | # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
414 | # file: ims = 'data/images/zidane.jpg' # str or PosixPath
415 | # URI: = 'https://ultralytics.com/images/zidane.jpg'
416 | # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
417 | # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
418 | # numpy: = np.zeros((640,1280,3)) # HWC
419 | # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
420 | # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
421 |
422 | dt = (Profile(), Profile(), Profile())
423 | with dt[0]:
424 | if isinstance(size, int): # expand
425 | size = (size, size)
426 | p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
427 | autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
428 | if isinstance(ims, torch.Tensor): # torch
429 | with amp.autocast(autocast):
430 | return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
431 |
432 | # Pre-process
433 | n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
434 | shape0, shape1, files = [], [], [] # image and inference shapes, filenames
435 | for i, im in enumerate(ims):
436 | f = f'image{i}' # filename
437 | if isinstance(im, (str, Path)): # filename or uri
438 | im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
439 | im = np.asarray(ImageOps.exif_transpose(im))
440 | elif isinstance(im, Image.Image): # PIL Image
441 | im, f = np.asarray(ImageOps.exif_transpose(im)), getattr(im, 'filename', f) or f
442 | files.append(Path(f).with_suffix('.jpg').name)
443 | if im.shape[0] < 5: # image in CHW
444 | im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
445 | im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
446 | s = im.shape[:2] # HWC
447 | shape0.append(s) # image shape
448 | g = max(size) / max(s) # gain
449 | shape1.append([y * g for y in s])
450 | ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
451 | shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape
452 | x = [LetterBox(shape1, auto=False)(image=im)["img"] for im in ims] # pad
453 | x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
454 | x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
455 |
456 | with amp.autocast(autocast):
457 | # Inference
458 | with dt[1]:
459 | y = self.model(x, augment=augment) # forward
460 |
461 | # Post-process
462 | with dt[2]:
463 | y = non_max_suppression(y if self.dmb else y[0],
464 | self.conf,
465 | self.iou,
466 | self.classes,
467 | self.agnostic,
468 | self.multi_label,
469 | max_det=self.max_det) # NMS
470 | for i in range(n):
471 | scale_boxes(shape1, y[i][:, :4], shape0[i])
472 |
473 | return Detections(ims, y, files, dt, self.names, x.shape)
474 |
475 |
476 | class Detections:
477 | # YOLOv5 detections class for inference results
478 | def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
479 | super().__init__()
480 | d = pred[0].device # device
481 | gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
482 | self.ims = ims # list of images as numpy arrays
483 | self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
484 | self.names = names # class names
485 | self.files = files # image filenames
486 | self.times = times # profiling times
487 | self.xyxy = pred # xyxy pixels
488 | self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
489 | self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
490 | self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
491 | self.n = len(self.pred) # number of images (batch size)
492 | self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
493 | self.s = tuple(shape) # inference BCHW shape
494 |
495 | def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
496 | s, crops = '', []
497 | for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
498 | s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
499 | if pred.shape[0]:
500 | for c in pred[:, -1].unique():
501 | n = (pred[:, -1] == c).sum() # detections per class
502 | s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
503 | s = s.rstrip(', ')
504 | if show or save or render or crop:
505 | annotator = Annotator(im, example=str(self.names))
506 | for *box, conf, cls in reversed(pred): # xyxy, confidence, class
507 | label = f'{self.names[int(cls)]} {conf:.2f}'
508 | if crop:
509 | file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
510 | crops.append({
511 | 'box': box,
512 | 'conf': conf,
513 | 'cls': cls,
514 | 'label': label,
515 | 'im': save_one_box(box, im, file=file, save=save)})
516 | else: # all others
517 | annotator.box_label(box, label if labels else '', color=colors(cls))
518 | im = annotator.im
519 | else:
520 | s += '(no detections)'
521 |
522 | im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
523 | if show:
524 | im.show(self.files[i]) # show
525 | if save:
526 | f = self.files[i]
527 | im.save(save_dir / f) # save
528 | if i == self.n - 1:
529 | LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
530 | if render:
531 | self.ims[i] = np.asarray(im)
532 | if pprint:
533 | s = s.lstrip('\n')
534 | return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
535 | if crop:
536 | if save:
537 | LOGGER.info(f'Saved results to {save_dir}\n')
538 | return crops
539 |
540 | def show(self, labels=True):
541 | self._run(show=True, labels=labels) # show results
542 |
543 | def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
544 | save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
545 | self._run(save=True, labels=labels, save_dir=save_dir) # save results
546 |
547 | def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
548 | save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
549 | return self._run(crop=True, save=save, save_dir=save_dir) # crop results
550 |
551 | def render(self, labels=True):
552 | self._run(render=True, labels=labels) # render results
553 | return self.ims
554 |
555 | def pandas(self):
556 | # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
557 | new = copy(self) # return copy
558 | ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
559 | cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
560 | for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
561 | a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
562 | setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
563 | return new
564 |
565 | def tolist(self):
566 | # return a list of Detections objects, i.e. 'for result in results.tolist():'
567 | r = range(self.n) # iterable
568 | x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
569 | # for d in x:
570 | # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
571 | # setattr(d, k, getattr(d, k)[0]) # pop out of list
572 | return x
573 |
574 | def print(self):
575 | LOGGER.info(self.__str__())
576 |
577 | def __len__(self): # override len(results)
578 | return self.n
579 |
580 | def __str__(self): # override print(results)
581 | return self._run(pprint=True) # print results
582 |
583 | def __repr__(self):
584 | return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
585 |
586 |
587 | class Proto(nn.Module):
588 | # YOLOv8 mask Proto module for segmentation models
589 | def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
590 | super().__init__()
591 | self.cv1 = Conv(c1, c_, k=3)
592 | self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest')
593 | self.cv2 = Conv(c_, c_, k=3)
594 | self.cv3 = Conv(c_, c2)
595 |
596 | def forward(self, x):
597 | return self.cv3(self.cv2(self.upsample(self.cv1(x))))
598 |
599 |
600 | class Ensemble(nn.ModuleList):
601 | # Ensemble of models
602 | def __init__(self):
603 | super().__init__()
604 |
605 | def forward(self, x, augment=False, profile=False, visualize=False):
606 | y = [module(x, augment, profile, visualize)[0] for module in self]
607 | # y = torch.stack(y).max(0)[0] # max ensemble
608 | # y = torch.stack(y).mean(0) # mean ensemble
609 | y = torch.cat(y, 1) # nms ensemble
610 | return y, None # inference, train output
611 |
612 |
613 | # heads # <---------------------------------------
614 | class Detect(nn.Module):
615 | # YOLOv5 Detect head for detection models
616 | dynamic = False # force grid reconstruction
617 | export = False # export mode
618 | shape = None
619 | anchors = torch.empty(0) # init
620 | strides = torch.empty(0) # init
621 |
622 | def __init__(self, nc=80, ch=()): # detection layer
623 | super().__init__()
624 | self.nc = nc # number of classes
625 | self.nl = len(ch) # number of detection layers
626 | self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
627 | self.no = nc + self.reg_max * 4 # number of outputs per anchor
628 | self.stride = torch.zeros(self.nl) # strides computed during build
629 |
630 | c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], self.nc) # channels
631 | self.cv2 = nn.ModuleList(
632 | nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch)
633 | self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
634 | self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()
635 |
636 | def forward(self, x):
637 | shape = x[0].shape # BCHW
638 | for i in range(self.nl):
639 | x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)
640 |
641 |
642 | if self.training:
643 | return x
644 | elif self.dynamic or self.shape != shape:
645 | self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
646 | self.shape = shape
647 |
648 | # 修改成这样的return
649 | return torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).permute(0, 2,1)
650 |
651 | # box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1)
652 | # dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides
653 | # y = torch.cat((dbox, cls.sigmoid()), 1)
654 | # return y if self.export else (y, x)
655 |
656 | def bias_init(self):
657 | # Initialize Detect() biases, WARNING: requires stride availability
658 | m = self # self.model[-1] # Detect() module
659 | # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
660 | # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency
661 | for a, b, s in zip(m.cv2, m.cv3, m.stride): # from
662 | a[-1].bias.data[:] = 1.0 # box
663 | b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
664 |
665 |
666 | class Segment(Detect):
667 | # YOLOv5 Segment head for segmentation models
668 | def __init__(self, nc=80, nm=32, npr=256, ch=()):
669 | super().__init__(nc, ch)
670 | self.nm = nm # number of masks
671 | self.npr = npr # number of protos
672 | self.proto = Proto(ch[0], self.npr, self.nm) # protos
673 | self.detect = Detect.forward
674 |
675 | c4 = max(ch[0] // 4, self.nm)
676 | self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch)
677 |
678 | def forward(self, x):
679 | p = self.proto(x[0]) # mask protos
680 | bs = p.shape[0] # batch size
681 |
682 | mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients
683 | x = self.detect(self, x)
684 | if self.training:
685 | return x, mc, p
686 | return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p))
687 |
688 |
689 | class Classify(nn.Module):
690 | # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
691 | def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
692 | super().__init__()
693 | c_ = 1280 # efficientnet_b0 size
694 | self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
695 | self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
696 | self.drop = nn.Dropout(p=0.0, inplace=True)
697 | self.linear = nn.Linear(c_, c2) # to x(b,c2)
698 |
699 | def forward(self, x):
700 | if isinstance(x, list):
701 | x = torch.cat(x, 1)
702 | return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
703 |
--------------------------------------------------------------------------------
/myqlabel.cpp:
--------------------------------------------------------------------------------
1 | #include "myqlabel.h"
2 |
3 | myqlabel::myqlabel(const QString &Titter, QWidget *parent)
4 | :QLabel(parent)
5 | {
6 | this->setText(Titter);
7 | connect(this, SIGNAL(clicked()), this, SLOT(slotClicked()));
8 | }
9 |
10 | myqlabel::~myqlabel()
11 | {
12 |
13 | }
14 |
15 | void myqlabel::slotClicked()
16 | {
17 | this->setPixmap(QPixmap(""));
18 |
19 | }
20 | void myqlabel::mousePressEvent(QMouseEvent*)
21 | {
22 | emit clicked();
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/myqlabel.h:
--------------------------------------------------------------------------------
1 | #ifndef MYQLABEL_H
2 | #define MYQLABEL_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | class myqlabel : public QLabel
10 | {
11 | Q_OBJECT
12 | public:
13 | myqlabel(const QString &Titter,QWidget *parent=0);
14 | ~myqlabel();
15 | signals:
16 | //点击信号
17 | void clicked();
18 | public slots:
19 | //点击信号响应槽
20 | void slotClicked();
21 | protected:
22 | //鼠标按下事件
23 | void mousePressEvent(QMouseEvent*/* event*/);
24 | };
25 |
26 |
27 | #endif // MYQLABEL_H
28 |
--------------------------------------------------------------------------------
/myvideosurface.cpp:
--------------------------------------------------------------------------------
1 | #include "myvideosurface.h"
2 |
3 | MyVideoSurface::MyVideoSurface()
4 | {
5 |
6 | }
7 |
8 | //支持的像素格式
9 | QList MyVideoSurface::supportedPixelFormats(QAbstractVideoBuffer::HandleType handleType) const
10 | {
11 | if(handleType == QAbstractVideoBuffer::NoHandle){
12 | return QList() << QVideoFrame::Format_RGB32
13 | << QVideoFrame::Format_ARGB32
14 | << QVideoFrame::Format_ARGB32_Premultiplied
15 | << QVideoFrame::Format_RGB565
16 | << QVideoFrame::Format_NV21
17 | << QVideoFrame::Format_RGB555;
18 | }
19 | else {
20 | return QList();
21 | }
22 | }
23 |
24 | //将视频流中像素格式转换成格式对等的图片格式,若无对等的格式,返回QImage::Format_Invalid
25 | bool MyVideoSurface::isFormatSupported(const QVideoSurfaceFormat &videoformat) const
26 | {
27 | //imageFormatFromPixelFormat()-----返回与视频帧像素格式等效的图像格式
28 | //pixelFormat()-----返回视频流中帧的像素格式
29 | return QVideoFrame::imageFormatFromPixelFormat(videoformat.pixelFormat()) != QImage::Format_Invalid;
30 | }
31 |
32 | //这些虚函数,会自动被调用,start检测图像是否可以对等转换,每一帧有没有
33 | bool MyVideoSurface::start(const QVideoSurfaceFormat &videoformat)
34 | {
35 | // qDebug() << QVideoFrame::imageFormatFromPixelFormat(videoformat.pixelFormat()); //格式是RGB32
36 | // if(QVideoFrame::imageFormatFromPixelFormat(videoformat.pixelFormat()) != QImage::Format_Invalid && !videoformat.frameSize().isEmpty()){
37 | // QAbstractVideoSurface::start(videoformat);
38 | // return true;
39 | // }
40 | QAbstractVideoSurface::start(videoformat);
41 | return false;
42 | }
43 |
44 | bool MyVideoSurface::present(const QVideoFrame &frame)
45 | {
46 | // qDebug() << frame.size();
47 | if (frame.isValid()){
48 | QVideoFrame cloneFrame(frame); //每一帧视频都会进入present中,内部机制
49 | emit frameAvailable(cloneFrame); //直接把视频帧发送出去
50 | return true;
51 | }
52 | stop();
53 | return false;
54 | }
55 |
56 | void MyVideoSurface::stop()
57 | {
58 | QAbstractVideoSurface::stop();
59 | }
60 |
61 |
62 |
--------------------------------------------------------------------------------
/myvideosurface.h:
--------------------------------------------------------------------------------
1 | #ifndef MYVIDEOSURFACE_H
2 | #define MYVIDEOSURFACE_H
3 |
4 |
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | class MyVideoSurface : public QAbstractVideoSurface
11 | {
12 | Q_OBJECT
13 | public:
14 | MyVideoSurface();
15 |
16 | QList supportedPixelFormats(QAbstractVideoBuffer::HandleType type = QAbstractVideoBuffer::NoHandle) const Q_DECL_OVERRIDE;
17 | bool isFormatSupported(const QVideoSurfaceFormat &) const Q_DECL_OVERRIDE; //将视频流中像素格式转换成格式对等的图片格式,若无对等的格式,返回QImage::Format_Invalid
18 | bool start(const QVideoSurfaceFormat &) Q_DECL_OVERRIDE; //只要摄像头开,就会调用
19 | bool present(const QVideoFrame &) Q_DECL_OVERRIDE; //每一帧画面将回到这里处理
20 | void stop() Q_DECL_OVERRIDE;
21 |
22 | signals:
23 | void frameAvailable(QVideoFrame cloneFrame);
24 |
25 | };
26 |
27 | #endif // MYVIDEOSURFACE_H
28 |
--------------------------------------------------------------------------------
/pic.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | assets/ai.png
4 | assets/icon.png
5 | assets/ManjaroMix.qss
6 | model/yolov8s_opt.bin
7 | model/yolov8s_opt.param
8 |
9 |
10 |
--------------------------------------------------------------------------------
/widget.cpp:
--------------------------------------------------------------------------------
1 |
2 |
3 | #include "widget.h"
4 | #include "ui_widget.h"
5 | #include "yolo.h"
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | extern Yolo * yolov8;
12 | const float prob_threshold = 0.25f;
13 | const float nms_threshold = 0.45f;
14 |
15 | cv::Mat ChangeToMat(QImage image)
16 | {
17 |
18 | // image = image.convertToFormat(QImage::Format_RGB888);
19 | cv::Mat tmp(image.height(),image.width(),CV_8UC3,(uchar*)image.bits(),image.bytesPerLine());
20 | cv::Mat result; // deep copy just in case (my lack of knowledge with open cv)
21 | cv::cvtColor(tmp, result, cv::COLOR_BGR2RGB);
22 | return result;
23 | }
24 |
25 | QImage ChangeToQIamge(cv::Mat mat)
26 | {
27 | cv::cvtColor(mat, mat, cv::COLOR_BGR2RGB);
28 | QImage qim((const unsigned char*)mat.data, mat.cols, mat.rows, mat.step,
29 | QImage::Format_RGB888);
30 | return qim;
31 | }
32 |
33 |
34 |
35 | Widget::Widget(QWidget *parent)
36 | : QWidget(parent)
37 | , ui(new Ui::Widget)
38 | {
39 | ui->setupUi(this);
40 |
41 | camera=new QCamera;//摄像头
42 | capture = new QCameraImageCapture(camera);
43 | viewfinder=new QCameraViewfinder(this);//取景器
44 |
45 | // static QMetaObject::Connection connect(const QObject *sender, const char *signal, const QObject *receiver, const char *member,
46 | // Qt::ConnectionType = Qt::AutoConnection);
47 | //第一个参数sender为发送对象;第二个参数为发送对象里面的一个信号;第三个参数为接收对象;第四个参数为接收对象里面的槽函数。一般我们使用发送者触发信号,然后执行接收者的槽函数。
48 | QObject::connect(capture, SIGNAL(imageCaptured(int,QImage)), this, SLOT(displayImage(int,QImage)));
49 | camera->setCaptureMode(QCamera::CaptureStillImage);
50 | // camera->setViewfinder(viewfinder);
51 | mySurface = new MyVideoSurface();
52 | camera->setViewfinder(mySurface);
53 | //处理myvideosurface中每一帧视频
54 | connect(mySurface, SIGNAL(frameAvailable(QVideoFrame)), this, SLOT(rcvFrame(QVideoFrame)), Qt::DirectConnection);
55 |
56 | camera->start(); //启动摄像头
57 |
58 | //获取摄像头支持的分辨率、帧率等参数
59 | // QList ViewSets = camera->supportedViewfinderSettings();
60 | // int i = 0;
61 | // qDebug() << "viewfinderResolutions sizes.len = " << ViewSets.length();
62 | // foreach (QCameraViewfinderSettings ViewSet, ViewSets) {
63 | // qDebug() << i++ <<" max rate = " << ViewSet.maximumFrameRate() << "min rate = "<< ViewSet.minimumFrameRate() << "resolution "<setViewfinderSettings(camerasettings);
75 |
76 | }
77 |
78 | Widget::~Widget()
79 | {
80 | delete ui;
81 | }
82 |
83 |
84 | void Widget::displayImage(int ,QImage image)
85 | {
86 | image=image.convertToFormat(QImage::Format_RGB888);
87 |
88 | cv::Mat cv_image;
89 | cv_image = ChangeToMat(image);
90 |
91 | std::vector