├── Event.cpp ├── Event.h ├── Mutex.cpp ├── Mutex.h ├── QT-GSTREAM-CUDA.pro ├── QT-GSTREAM-CUDA.pro.user ├── QtOpenCV-master.zip ├── README.md ├── Screenshot from 2020-01-29 11-05-53.png ├── commandLine.cpp ├── commandLine.h ├── cvmatandqimage.cpp ├── cvmatandqimage.h ├── detectNet.cpp ├── detectNet.cu ├── detectNet.h ├── detectnet-camera.cpp ├── glDisplay.cpp ├── glDisplay.h ├── glTexture.cpp ├── glTexture.h ├── glUtility.h ├── gstCamera.cpp ├── gstCamera.h ├── gstDecoder.cpp ├── gstDecoder.h ├── gstEncoder.cpp ├── gstEncoder.h ├── gstUtility.cpp ├── gstUtility.h ├── libjetson-inference.so ├── libjetson-utils.so ├── main.cpp ├── mainwindow.cpp ├── mainwindow.h ├── mainwindow.ui ├── opencv.pri ├── process.zip ├── timespec.cpp └── timespec.h /Event.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "Event.h" 24 | #include 25 | 26 | 27 | // constructor 28 | Event::Event( bool autoReset ) 29 | { 30 | mAutoReset = autoReset; 31 | mQuery = false; 32 | 33 | pthread_cond_init(&mID, NULL); 34 | } 35 | 36 | 37 | // destructor 38 | Event::~Event() 39 | { 40 | pthread_cond_destroy(&mID); 41 | } 42 | 43 | 44 | // Query 45 | bool Event::Query() 46 | { 47 | bool r = false; 48 | mQueryMutex.Lock(); 49 | r = mQuery; 50 | mQueryMutex.Unlock(); 51 | return r; 52 | } 53 | 54 | 55 | // Wake 56 | void Event::Wake() 57 | { 58 | mQueryMutex.Lock(); 59 | mQuery = true; 60 | pthread_cond_signal(&mID); 61 | mQueryMutex.Unlock(); 62 | } 63 | 64 | 65 | // Wait 66 | bool Event::Wait() 67 | { 68 | mQueryMutex.Lock(); 69 | 70 | while(!mQuery) 71 | pthread_cond_wait(&mID, mQueryMutex.GetID()); 72 | 73 | if( mAutoReset ) 74 | mQuery = false; 75 | 76 | mQueryMutex.Unlock(); 77 | return true; 78 | } 79 | 80 | 81 | // Wait 82 | bool Event::Wait( const timespec& timeout ) 83 | { 84 | mQueryMutex.Lock(); 85 | 86 | const timespec abs_time = timeAdd( timestamp(), timeout ); 87 | 88 | while(!mQuery) 89 | { 90 | const int ret = pthread_cond_timedwait(&mID, mQueryMutex.GetID(), &abs_time); 91 | 92 | if( ret == ETIMEDOUT ) 93 | { 94 | mQueryMutex.Unlock(); 95 | return false; 96 | } 97 | } 98 | 99 | if( mAutoReset ) 100 | mQuery = false; 101 | 102 | mQueryMutex.Unlock(); 103 | return true; 104 | } 105 | 106 | -------------------------------------------------------------------------------- /Event.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __MULTITHREAD_EVENT_H_ 24 | #define __MULTITHREAD_EVENT_H_ 25 | 26 | #include "Mutex.h" 27 | #include "timespec.h" 28 | 29 | /** 30 | * Event object for signalling other threads. 31 | * @ingroup threads 32 | */ 33 | class Event 34 | { 35 | public: 36 | /** 37 | * Event constructor. By default, it will automatically be reset when it's raised. 38 | * @param auto_reset Once this event has been raised, should it automatically be reset? 39 | */ 40 | Event( bool auto_reset=true ); 41 | 42 | /** 43 | * Destructor 44 | */ 45 | ~Event(); 46 | 47 | /** 48 | * Raise the event. Any threads waiting on this event will be woken up. 49 | */ 50 | void Wake(); 51 | 52 | /** 53 | * Reset the event status to un-raised. 54 | */ 55 | inline void Reset() { mQueryMutex.Lock(); mQuery = false; mQueryMutex.Unlock(); } 56 | 57 | /** 58 | * Query the status of this event. 59 | * @return True if the event is raised, false if not. 60 | */ 61 | bool Query(); 62 | 63 | /** 64 | * Wait until this event is raised. It is likely this will block this thread (and will never timeout). 65 | * @see Wake 66 | */ 67 | bool Wait(); 68 | 69 | /** 70 | * Wait for a specified amount of time until this event is raised or timeout occurs. 71 | * @see Wake 72 | */ 73 | bool Wait( const timespec& timeout ); 74 | 75 | /** 76 | * Wait for a specified number of milliseconds until this event is raised or timeout occurs. 77 | * @see Wake 78 | */ 79 | inline bool Wait( uint64_t timeout ) { return (timeout == UINT64_MAX) ? Wait() : Wait(timeNew(timeout*1000*1000)); } 80 | 81 | /** 82 | * Wait for a specified number of nanoseconds until this event is raised or timeout occurs. 83 | * @see Wake 84 | */ 85 | inline bool WaitNs( uint64_t timeout ) { return (timeout == UINT64_MAX) ? Wait() : Wait(timeNew(timeout)); } 86 | 87 | /** 88 | * Wait for a specified number of microseconds until this event is raised or timeout occurs. 89 | * @see Wake 90 | */ 91 | inline bool WaitUs( uint64_t timeout ) { return (timeout == UINT64_MAX) ? Wait() : Wait(timeNew(timeout*1000)); } 92 | 93 | /** 94 | * Get the Event object 95 | */ 96 | inline pthread_cond_t* GetID() { return &mID; } 97 | 98 | protected: 99 | 100 | pthread_cond_t mID; 101 | 102 | Mutex mQueryMutex; 103 | bool mQuery; 104 | bool mAutoReset; 105 | }; 106 | 107 | 108 | #endif 109 | -------------------------------------------------------------------------------- /Mutex.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "Mutex.h" 24 | 25 | 26 | // constructor 27 | Mutex::Mutex() 28 | { 29 | pthread_mutex_init(&mID, NULL); 30 | } 31 | 32 | 33 | // destructor 34 | Mutex::~Mutex() 35 | { 36 | pthread_mutex_destroy(&mID); 37 | } 38 | -------------------------------------------------------------------------------- /Mutex.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __MULTITHREAD_MUTEX_H_ 24 | #define __MULTITHREAD_MUTEX_H_ 25 | 26 | #include 27 | 28 | 29 | /** 30 | * A lightweight mutual exclusion lock. It is very fast to check if the mutex is available, 31 | * lock it, and release it. However, if the mutex is unavailable when you attempt to 32 | * lock it, execution of the thread will stop until it becomes available. 33 | * @ingroup threads 34 | */ 35 | class Mutex 36 | { 37 | public: 38 | /** 39 | * Constructor 40 | */ 41 | Mutex(); 42 | 43 | /** 44 | * Destructor 45 | */ 46 | ~Mutex(); 47 | 48 | /** 49 | * If the lock is free, aquire it. Otherwise, return without waiting for it to become available. 50 | * @result True if the lock was aquired, false if not. 51 | */ 52 | inline bool AttemptLock() { return (pthread_mutex_trylock(&mID) == 0); } 53 | 54 | /** 55 | * Aquire the lock, whenever it becomes available. This could mean just a few instructions 56 | * if the lock is already free, or to block the thread if it isn't. 57 | */ 58 | inline void Lock() { pthread_mutex_lock(&mID); } 59 | 60 | /** 61 | * Release the lock 62 | */ 63 | inline void Unlock() { pthread_mutex_unlock(&mID); } 64 | 65 | /** 66 | * Wait for the lock, then release it immediately. Use this in situations where you are waiting for 67 | * an event to occur. 68 | */ 69 | inline void Sync() { Lock(); Unlock(); } 70 | 71 | /** 72 | * Get the mutex object 73 | */ 74 | pthread_mutex_t* GetID() { return &mID; } 75 | 76 | protected: 77 | pthread_mutex_t mID; 78 | }; 79 | 80 | #endif -------------------------------------------------------------------------------- /QT-GSTREAM-CUDA.pro: -------------------------------------------------------------------------------- 1 | #------------------------------------------------- 2 | # 3 | # Project created by QtCreator 2020-01-18T17:30:42 4 | # 5 | #------------------------------------------------- 6 | 7 | QT += core gui 8 | 9 | greaterThan(QT_MAJOR_VERSION, 4): QT += widgets 10 | 11 | TARGET = QT-GSTREAM-CUDA 12 | TEMPLATE = app 13 | 14 | # The following define makes your compiler emit warnings if you use 15 | # any feature of Qt which has been marked as deprecated (the exact warnings 16 | # depend on your compiler). Please consult the documentation of the 17 | # deprecated API in order to know how to port your code away from it. 18 | DEFINES += QT_DEPRECATED_WARNINGS 19 | 20 | # You can also make your code fail to compile if you use deprecated APIs. 21 | # In order to do so, uncomment the following line. 22 | # You can also select to disable deprecated APIs only up to a certain version of Qt. 23 | #DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 24 | ///////////////////////////////////////////////////// 25 | CONFIG += link_pkgconfig 26 | 27 | 28 | #INCLUDEPATH+=/usr/include/opencv 29 | #INCLUDEPATH+=/usr/include/opencv 30 | #QMAKE_CXXFLAGS += -Wall 31 | 32 | 33 | #LIBS += `pkg-config opencv --libs` -lopencv_videoio #-lopencv_dnn 34 | #PKGCONFIG += opencv 35 | 36 | 37 | 38 | 39 | 40 | 41 | //////////////////////////////////////////////////// 42 | PKGCONFIG += gstreamer-1.0 glib-2.0 gobject-2.0 gstreamer-app-1.0 gstreamer-pbutils-1.0 43 | 44 | 45 | SOURCES += \ 46 | main.cpp \ 47 | mainwindow.cpp\ 48 | gstCamera.cpp 49 | 50 | HEADERS += \ 51 | mainwindow.h\ 52 | gstCamera.h\ 53 | gstUtility.h\ 54 | Mutex.h\ 55 | Event.h\ 56 | timespec.h\ 57 | glUtility.h\ 58 | glTexture.h 59 | 60 | FORMS += \ 61 | mainwindow.ui 62 | 63 | 64 | #LIBS += -lopencv_core -lopencv_imgcodecs -lopencv_highgui -lopencv_shape -lopencv_videoio 65 | LIBS += -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_ml -lopencv_video -lopencv_features2d -lopencv_calib3d -lopencv_objdetect -lopencv_flann 66 | 67 | 68 | ########################Jetson Inferencing ########################### 69 | LIBS+=-ljetson-inference -ljetson-utils 70 | INCLUDEPATH+=/usr/include/gstreamer-1.0/ /usr/include/glib-2.0 71 | INCLUDEPATH+=/home/brance/Mine/jetson-inference/x86_64/include/jetson-utils/ 72 | INCLUDEPATH+=/home/brance/Mine/jetson-inference/c 73 | LIBS+= `pkg-config --cflags gstreamer-plugins-base-1.0` #`pkg-config --cflags --libs opencv` 74 | 75 | ############################Tensorrt################################## 76 | LIBS+= -lnvparsers -lnvinfer -lnvcaffe_parser 77 | include (/home/brance/QT-GSTREAM-CUDA/opencv.pri) 78 | 79 | 80 | LIBS +=-lnvcaffe_parser 81 | LIBS +=-lnvinfer 82 | LIBS +=-lnvinfer_plugin 83 | #LIBS +=-lnvinfer_static 84 | #LIBS +=-lnvonnxparser 85 | #LIBS +=-lnvonnxparser_runtime 86 | LIBS +=-lnvparsers 87 | #LIBS +=-lnvparsers_statistics 88 | #################cuda and blass####################### 89 | #LIBS +=-lopenblas 90 | LIBS +=-lcudnn 91 | LIBS +=-L"/usr/local/cuda/lib64" 92 | LIBS +=-lcublas 93 | LIBS +=-lcudart 94 | LIBS +=-lcurand 95 | LIBS +=-lcusolver 96 | 97 | 98 | UDA_SOURCES += process/kernel.cu\ 99 | process/util/cuda/cudaNormalize.cu\ 100 | process/util/cuda/cudaOverlay.cu\ 101 | process/util/cuda/cudaResize.cu\ 102 | process/util/cuda/cudaRGB.cu\ 103 | process/util/cuda/cudaYUV-NV12.cu\ 104 | process/util/cuda/cudaYUV-YUYV.cu\ 105 | process/util/cuda/cudaYUV-YV12.cu 106 | # Project dir and outputs 107 | PROJECT_DIR = $$system(pwd) 108 | OBJECTS_DIR = $$PROJECT_DIR/Obj 109 | DESTDIR = ./bin 110 | 111 | # Path to cuda SDK install 112 | 113 | # Path to cuda toolkit install 114 | CUDA_DIR = /usr/local/cuda 115 | # GPU architecture 116 | CUDA_ARCH = sm_52 117 | # nvcc flags (ptxas option verbose is always useful) 118 | NVCCFLAGS = --compiler-options -fno-strict-aliasing -use_fast_math --ptxas-options=-v 119 | # include paths 120 | INCLUDEPATH += $$CUDA_DIR/include 121 | INCLUDEPATH += $$CUDA_SDK/common/inc/ 122 | INCLUDEPATH += $$CUDA_SDK/../shared/inc/ 123 | # lib dirs 124 | QMAKE_LIBDIR += $$CUDA_DIR/lib64 125 | #QMAKE_LIBDIR += $$CUDA_SDK/lib 126 | #QMAKE_LIBDIR += $$CUDA_SDK/common/lib 127 | # libs - note than i'm using a x_86_64 machine 128 | LIBS += -lcudart #-lcutil_x86_64 129 | # join the includes in a line 130 | CUDA_INC = $$join(INCLUDEPATH,' -I','-I',' ') 131 | 132 | cuda.input = CUDA_SOURCES 133 | cuda.output = ${OBJECTS_DIR}${QMAKE_FILE_BASE}_cuda.o 134 | 135 | cuda.commands = $$CUDA_DIR/bin/nvcc -m64 -g -G -arch=$$CUDA_ARCH -c $$NVCCFLAGS $$CUDA_INC $$LIBS ${QMAKE_FILE_NAME} -o ${QMAKE_FILE_OUT} 136 | 137 | cuda.dependency_type = TYPE_C # there was a typo here. Thanks workmate! 138 | cuda.depend_command = $$CUDA_DIR/bin/nvcc -g -G -M $$CUDA_INC $$NVCCFLAGS ${QMAKE_FILE_NAME} 139 | # Tell Qt that we want add more stuff to the Makefile 140 | QMAKE_EXTRA_UNIX_COMPILERS += cuda 141 | 142 | 143 | #unix:{ 144 | # suppress the default RPATH if you wish 145 | #QMAKE_LFLAGS_RPATH= 146 | # add your own with quoting gyrations to make sure $ORIGIN gets to the command line unexpanded 147 | # QMAKE_LFLAGS += "-Wl,-rpath,\'\$$ORIGIN\'" 148 | #} 149 | -------------------------------------------------------------------------------- /QtOpenCV-master.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collincebecky/QT-OPENCV-GSTREAMER-CUDA/4f7fdade20ffa410be805b93fc1661f36c6ef3e6/QtOpenCV-master.zip -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QT-OPENCV-GSTREAMER-CUDA 2 | 3 | # Expected Output 4 | 5 | 6 | ![](https://github.com/collinsokumu/QT-OPENCV-GSTREAMER-CUDA/blob/master/Screenshot%20from%202020-01-29%2011-05-53.png) 7 | -------------------------------------------------------------------------------- /Screenshot from 2020-01-29 11-05-53.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collincebecky/QT-OPENCV-GSTREAMER-CUDA/4f7fdade20ffa410be805b93fc1661f36c6ef3e6/Screenshot from 2020-01-29 11-05-53.png -------------------------------------------------------------------------------- /commandLine.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "commandLine.h" 24 | 25 | #include 26 | #include 27 | 28 | 29 | #define ARGC_START 0 30 | 31 | 32 | // strRemoveDelimiter 33 | static inline int strRemoveDelimiter( char delimiter, const char* string ) 34 | { 35 | int string_start = 0; 36 | 37 | while( string[string_start] == delimiter ) 38 | string_start++; 39 | 40 | if( string_start >= (int)strlen(string)-1 ) 41 | return 0; 42 | 43 | return string_start; 44 | } 45 | 46 | 47 | // constructor 48 | commandLine::commandLine( const int pArgc, char** pArgv ) 49 | { 50 | argc = pArgc; 51 | argv = pArgv; 52 | } 53 | 54 | 55 | // GetInt 56 | int commandLine::GetInt( const char* string_ref, int default_value ) const 57 | { 58 | if( argc < 1 ) 59 | return 0; 60 | 61 | bool bFound = false; 62 | int value = -1; 63 | 64 | for( int i=ARGC_START; i < argc; i++ ) 65 | { 66 | const int string_start = strRemoveDelimiter('-', argv[i]); 67 | 68 | if( string_start == 0 ) 69 | continue; 70 | 71 | const char* string_argv = &argv[i][string_start]; 72 | const int length = (int)strlen(string_ref); 73 | 74 | if (!strncasecmp(string_argv, string_ref, length)) 75 | { 76 | if (length+1 <= (int)strlen(string_argv)) 77 | { 78 | int auto_inc = (string_argv[length] == '=') ? 1 : 0; 79 | value = atoi(&string_argv[length + auto_inc]); 80 | } 81 | else 82 | { 83 | value = 0; 84 | } 85 | 86 | bFound = true; 87 | continue; 88 | } 89 | } 90 | 91 | 92 | if (bFound) 93 | return value; 94 | 95 | return default_value; 96 | } 97 | 98 | 99 | // GetFloat 100 | float commandLine::GetFloat( const char* string_ref, float default_value ) const 101 | { 102 | if( argc < 1 ) 103 | return 0; 104 | 105 | bool bFound = false; 106 | float value = -1; 107 | 108 | for (int i=ARGC_START; i < argc; i++) 109 | { 110 | const int string_start = strRemoveDelimiter('-', argv[i]); 111 | 112 | if( string_start == 0 ) 113 | continue; 114 | 115 | const char* string_argv = &argv[i][string_start]; 116 | const int length = (int)strlen(string_ref); 117 | 118 | if (!strncasecmp(string_argv, string_ref, length)) 119 | { 120 | if (length+1 <= (int)strlen(string_argv)) 121 | { 122 | int auto_inc = (string_argv[length] == '=') ? 1 : 0; 123 | value = (float)atof(&string_argv[length + auto_inc]); 124 | } 125 | else 126 | { 127 | value = 0.f; 128 | } 129 | 130 | bFound = true; 131 | continue; 132 | } 133 | } 134 | 135 | if( bFound ) 136 | return value; 137 | 138 | return default_value; 139 | } 140 | 141 | 142 | // GetFlag 143 | bool commandLine::GetFlag( const char* string_ref ) const 144 | { 145 | if( argc < 1 ) 146 | return false; 147 | 148 | for (int i=ARGC_START; i < argc; i++) 149 | { 150 | const int string_start = strRemoveDelimiter('-', argv[i]); 151 | 152 | if( string_start == 0 ) 153 | continue; 154 | 155 | const char* string_argv = &argv[i][string_start]; 156 | const char* equal_pos = strchr(string_argv, '='); 157 | 158 | const int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv); 159 | const int length = (int)strlen(string_ref); 160 | 161 | if( length == argv_length && !strncasecmp(string_argv, string_ref, length) ) 162 | return true; 163 | } 164 | 165 | return false; 166 | } 167 | 168 | 169 | // GetString 170 | const char* commandLine::GetString( const char* string_ref, const char* default_value ) const 171 | { 172 | if( argc < 1 ) 173 | return 0; 174 | 175 | for (int i=ARGC_START; i < argc; i++) 176 | { 177 | const int string_start = strRemoveDelimiter('-', argv[i]); 178 | 179 | if( string_start == 0 ) 180 | continue; 181 | 182 | char* string_argv = (char*)&argv[i][string_start]; 183 | const int length = (int)strlen(string_ref); 184 | 185 | if (!strncasecmp(string_argv, string_ref, length)) 186 | return (string_argv + length + 1); 187 | //*string_retval = &string_argv[length+1]; 188 | } 189 | 190 | return default_value; 191 | } 192 | 193 | 194 | // GetPosition 195 | const char* commandLine::GetPosition( unsigned int position, const char* default_value ) const 196 | { 197 | if( argc < 1 ) 198 | return 0; 199 | 200 | unsigned int position_count = 0; 201 | 202 | for (int i=1/*ARGC_START*/; i < argc; i++) 203 | { 204 | const int string_start = strRemoveDelimiter('-', argv[i]); 205 | 206 | if( string_start != 0 ) 207 | continue; 208 | 209 | if( position == position_count ) 210 | return argv[i]; 211 | 212 | position_count++; 213 | } 214 | 215 | return default_value; 216 | } 217 | 218 | 219 | // GetPositionArgs 220 | unsigned int commandLine::GetPositionArgs() const 221 | { 222 | unsigned int position_count = 0; 223 | 224 | for (int i=1/*ARGC_START*/; i < argc; i++) 225 | { 226 | const int string_start = strRemoveDelimiter('-', argv[i]); 227 | 228 | if( string_start != 0 ) 229 | continue; 230 | 231 | position_count++; 232 | } 233 | 234 | return position_count; 235 | } 236 | -------------------------------------------------------------------------------- /commandLine.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __COMMAND_LINE_H_ 24 | #define __COMMAND_LINE_H_ 25 | 26 | 27 | #include 28 | 29 | 30 | /** 31 | * Command line parser class. 32 | * @ingroup util 33 | */ 34 | class commandLine 35 | { 36 | public: 37 | /** 38 | * Constructor, takes the command line from `main()` 39 | */ 40 | commandLine( const int argc, char** argv ); 41 | 42 | /** 43 | * Checks to see whether the specified flag was included on the 44 | * command line. For example, if argv contained `--foo`, then 45 | * `GetFlag("foo")` would return `true` 46 | * 47 | * @returns `true`, if the flag with argName was found 48 | * `false`, if the flag with argName was not found 49 | */ 50 | bool GetFlag( const char* argName ) const; 51 | 52 | /** 53 | * Get float argument. For example if argv contained `--foo=3.14159`, 54 | * then `GetInt("foo")` would return `3.14159f` 55 | * 56 | * @returns `defaultValue` if the argument couldn't be found. (`0.0` by default). 57 | * Otherwise, returns the value of the argument. 58 | */ 59 | float GetFloat( const char* argName, float defaultValue=0.0f ) const; 60 | 61 | /** 62 | * Get integer argument. For example if argv contained `--foo=100`, 63 | * then `GetInt("foo")` would return `100` 64 | * 65 | * @returns `defaultValue` if the argument couldn't be found (`0` by default). 66 | * Otherwise, returns the value of the argument. 67 | */ 68 | int GetInt( const char* argName, int defaultValue=0 ) const; 69 | 70 | /** 71 | * Get string argument. For example if argv contained `--foo=bar`, 72 | * then `GetString("foo")` would return `"bar"` 73 | * 74 | * @returns `defaultValue` if the argument couldn't be found (`NULL` by default). 75 | * Otherwise, returns a pointer to the argument value string 76 | * from the `argv` array. 77 | */ 78 | const char* GetString( const char* argName, const char* defaultValue=NULL ) const; 79 | 80 | /** 81 | * Get positional string argument. Positional arguments aren't named, but rather 82 | * referenced by their index in the list. For example if the command line contained 83 | * `my-program --foo=bar /path/to/my_file.txt`, then `GetString(0)` would return 84 | * `"/path/to/my_file.txt" 85 | * 86 | * @returns `defaultValue` if the argument couldn't be found (`NULL` by default). 87 | * Otherwise, returns a pointer to the argument value string 88 | * from the `argv` array. 89 | */ 90 | const char* GetPosition( unsigned int position, const char* defaultValue=NULL ) const; 91 | 92 | /** 93 | * Get the number of positional arguments in the command line. 94 | * Positional arguments are those that don't have a name. 95 | */ 96 | unsigned int GetPositionArgs() const; 97 | 98 | /** 99 | * The argument count that the object was created with from main() 100 | */ 101 | int argc; 102 | 103 | /** 104 | * The argument strings that the object was created with from main() 105 | */ 106 | char** argv; 107 | }; 108 | 109 | 110 | #endif 111 | 112 | -------------------------------------------------------------------------------- /cvmatandqimage.cpp: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | ** Copyright (c) 2012-2015 Debao Zhang 3 | ** All right reserved. 4 | ** 5 | ** Permission is hereby granted, free of charge, to any person obtaining 6 | ** a copy of this software and associated documentation files (the 7 | ** "Software"), to deal in the Software without restriction, including 8 | ** without limitation the rights to use, copy, modify, merge, publish, 9 | ** distribute, sublicense, and/or sell copies of the Software, and to 10 | ** permit persons to whom the Software is furnished to do so, subject to 11 | ** the following conditions: 12 | ** 13 | ** The above copyright notice and this permission notice shall be 14 | ** included in all copies or substantial portions of the Software. 15 | ** 16 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | ** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | ** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | ** NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | ** LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | ** OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | ** WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | ** 24 | ****************************************************************************/ 25 | 26 | #include "cvmatandqimage.h" 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include "opencv2/core/core.hpp" 32 | #include "opencv2/imgproc/imgproc.hpp" 33 | 34 | namespace QtOcv { 35 | namespace { 36 | 37 | /*ARGB <==> BGRA 38 | */ 39 | cv::Mat argb2bgra(const cv::Mat &mat) 40 | { 41 | Q_ASSERT(mat.channels()==4); 42 | 43 | cv::Mat newMat(mat.rows, mat.cols, mat.type()); 44 | int from_to[] = {0,3, 1,2, 2,1, 3,0}; 45 | cv::mixChannels(&mat, 1, &newMat, 1, from_to, 4); 46 | return newMat; 47 | } 48 | 49 | cv::Mat adjustChannelsOrder(const cv::Mat &srcMat, MatColorOrder srcOrder, MatColorOrder targetOrder) 50 | { 51 | Q_ASSERT(srcMat.channels()==4); 52 | 53 | if (srcOrder == targetOrder) 54 | return srcMat.clone(); 55 | 56 | cv::Mat desMat; 57 | 58 | if ((srcOrder == MCO_ARGB && targetOrder == MCO_BGRA) 59 | ||(srcOrder == MCO_BGRA && targetOrder == MCO_ARGB)) { 60 | //ARGB <==> BGRA 61 | desMat = argb2bgra(srcMat); 62 | } else if (srcOrder == MCO_ARGB && targetOrder == MCO_RGBA) { 63 | //ARGB ==> RGBA 64 | desMat = cv::Mat(srcMat.rows, srcMat.cols, srcMat.type()); 65 | int from_to[] = {0,3, 1,0, 2,1, 3,2}; 66 | cv::mixChannels(&srcMat, 1, &desMat, 1, from_to, 4); 67 | } else if (srcOrder == MCO_RGBA && targetOrder == MCO_ARGB) { 68 | //RGBA ==> ARGB 69 | desMat = cv::Mat(srcMat.rows, srcMat.cols, srcMat.type()); 70 | int from_to[] = {0,1, 1,2, 2,3, 3,0}; 71 | cv::mixChannels(&srcMat, 1, &desMat, 1, from_to, 4); 72 | } else { 73 | //BGRA <==> RBGA 74 | cv::cvtColor(srcMat, desMat, CV_BGRA2RGBA); 75 | } 76 | return desMat; 77 | } 78 | 79 | QImage::Format findClosestFormat(QImage::Format formatHint) 80 | { 81 | QImage::Format format; 82 | switch (formatHint) { 83 | case QImage::Format_Indexed8: 84 | case QImage::Format_RGB32: 85 | case QImage::Format_ARGB32: 86 | case QImage::Format_ARGB32_Premultiplied: 87 | #if QT_VERSION >= 0x040400 88 | case QImage::Format_RGB888: 89 | #endif 90 | #if QT_VERSION >= 0x050200 91 | case QImage::Format_RGBX8888: 92 | case QImage::Format_RGBA8888: 93 | case QImage::Format_RGBA8888_Premultiplied: 94 | #endif 95 | #if QT_VERSION >= 0x050500 96 | case QImage::Format_Alpha8: 97 | case QImage::Format_Grayscale8: 98 | #endif 99 | format = formatHint; 100 | break; 101 | case QImage::Format_Mono: 102 | case QImage::Format_MonoLSB: 103 | format = QImage::Format_Indexed8; 104 | break; 105 | case QImage::Format_RGB16: 106 | format = QImage::Format_RGB32; 107 | break; 108 | #if QT_VERSION > 0x040400 109 | case QImage::Format_RGB444: 110 | case QImage::Format_RGB555: 111 | case QImage::Format_RGB666: 112 | format = QImage::Format_RGB888; 113 | break; 114 | case QImage::Format_ARGB4444_Premultiplied: 115 | case QImage::Format_ARGB6666_Premultiplied: 116 | case QImage::Format_ARGB8555_Premultiplied: 117 | case QImage::Format_ARGB8565_Premultiplied: 118 | format = QImage::Format_ARGB32_Premultiplied; 119 | break; 120 | #endif 121 | default: 122 | format = QImage::Format_ARGB32; 123 | break; 124 | } 125 | return format; 126 | } 127 | 128 | MatColorOrder getColorOrderOfRGB32Format() 129 | { 130 | #if Q_BYTE_ORDER == Q_LITTLE_ENDIAN 131 | return MCO_BGRA; 132 | #else 133 | return MCO_ARGB; 134 | #endif 135 | } 136 | } //namespace 137 | 138 | 139 | /* Convert QImage to cv::Mat 140 | */ 141 | cv::Mat image2Mat(const QImage &img, int requiredMatType, MatColorOrder requriedOrder) 142 | { 143 | int targetDepth = CV_MAT_DEPTH(requiredMatType); 144 | int targetChannels = CV_MAT_CN(requiredMatType); 145 | Q_ASSERT(targetChannels==CV_CN_MAX || targetChannels==1 || targetChannels==3 || targetChannels==4); 146 | Q_ASSERT(targetDepth==CV_8U || targetDepth==CV_16U || targetDepth==CV_32F); 147 | 148 | if (img.isNull()) 149 | return cv::Mat(); 150 | 151 | //Find the closest image format that can be used in image2Mat_shared() 152 | QImage::Format format = findClosestFormat(img.format()); 153 | QImage image = (format==img.format()) ? img : img.convertToFormat(format); 154 | 155 | MatColorOrder srcOrder; 156 | cv::Mat mat0 = image2Mat_shared(image, &srcOrder); 157 | 158 | //Adjust mat channells if needed. 159 | cv::Mat mat_adjustCn; 160 | const float maxAlpha = targetDepth==CV_8U ? 255 : (targetDepth==CV_16U ? 65535 : 1.0); 161 | if (targetChannels == CV_CN_MAX) 162 | targetChannels = mat0.channels(); 163 | switch(targetChannels) { 164 | case 1: 165 | if (mat0.channels() == 3) { 166 | cv::cvtColor(mat0, mat_adjustCn, CV_RGB2GRAY); 167 | } else if (mat0.channels() == 4) { 168 | if (srcOrder == MCO_BGRA) 169 | cv::cvtColor(mat0, mat_adjustCn, CV_BGRA2GRAY); 170 | else if (srcOrder == MCO_RGBA) 171 | cv::cvtColor(mat0, mat_adjustCn, CV_RGBA2GRAY); 172 | else//MCO_ARGB 173 | cv::cvtColor(argb2bgra(mat0), mat_adjustCn, CV_BGRA2GRAY); 174 | } 175 | break; 176 | case 3: 177 | if (mat0.channels() == 1) { 178 | cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_GRAY2BGR : CV_GRAY2RGB); 179 | } else if (mat0.channels() == 3) { 180 | if (requriedOrder != srcOrder) 181 | cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGR); 182 | } else if (mat0.channels() == 4) { 183 | if (srcOrder == MCO_ARGB) { 184 | mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 3)); 185 | int ARGB2RGB[] = {1,0, 2,1, 3,2}; 186 | int ARGB2BGR[] = {1,2, 2,1, 3,0}; 187 | cv::mixChannels(&mat0, 1, &mat_adjustCn, 1, requriedOrder == MCO_BGR ? ARGB2BGR : ARGB2RGB, 3); 188 | } else if (srcOrder == MCO_BGRA) { 189 | cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_BGRA2BGR : CV_BGRA2RGB); 190 | } else {//RGBA 191 | cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_RGBA2BGR : CV_RGBA2RGB); 192 | } 193 | } 194 | break; 195 | case 4: 196 | if (mat0.channels() == 1) { 197 | if (requriedOrder == MCO_ARGB) { 198 | cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha)); 199 | mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4)); 200 | cv::Mat in[] = {alphaMat, mat0}; 201 | int from_to[] = {0,0, 1,1, 1,2, 1,3}; 202 | cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4); 203 | } else if (requriedOrder == MCO_RGBA) { 204 | cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2RGBA); 205 | } else {//MCO_BGRA 206 | cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2BGRA); 207 | } 208 | } else if (mat0.channels() == 3) { 209 | if (requriedOrder == MCO_ARGB) { 210 | cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha)); 211 | mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4)); 212 | cv::Mat in[] = {alphaMat, mat0}; 213 | int from_to[] = {0,0, 1,1, 2,2, 3,3}; 214 | cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4); 215 | } else if (requriedOrder == MCO_RGBA) { 216 | cv::cvtColor(mat0, mat_adjustCn, CV_RGB2RGBA); 217 | } else {//MCO_BGRA 218 | cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGRA); 219 | } 220 | } else if (mat0.channels() == 4) { 221 | if (srcOrder != requriedOrder) 222 | mat_adjustCn = adjustChannelsOrder(mat0, srcOrder, requriedOrder); 223 | } 224 | break; 225 | default: 226 | break; 227 | } 228 | 229 | //Adjust depth if needed. 230 | if (targetDepth == CV_8U) 231 | return mat_adjustCn.empty() ? mat0.clone() : mat_adjustCn; 232 | 233 | if (mat_adjustCn.empty()) 234 | mat_adjustCn = mat0; 235 | cv::Mat mat_adjustDepth; 236 | mat_adjustCn.convertTo(mat_adjustDepth, CV_MAKE_TYPE(targetDepth, mat_adjustCn.channels()), targetDepth == CV_16U ? 255.0 : 1/255.0); 237 | return mat_adjustDepth; 238 | } 239 | 240 | /* Convert cv::Mat to QImage 241 | */ 242 | QImage mat2Image(const cv::Mat &mat, MatColorOrder order, QImage::Format formatHint) 243 | { 244 | Q_ASSERT(mat.channels()==1 || mat.channels()==3 || mat.channels()==4); 245 | Q_ASSERT(mat.depth()==CV_8U || mat.depth()==CV_16U || mat.depth()==CV_32F); 246 | 247 | if (mat.empty()) 248 | return QImage(); 249 | 250 | //Adjust mat channels if needed, and find proper QImage format. 251 | QImage::Format format; 252 | cv::Mat mat_adjustCn; 253 | if (mat.channels() == 1) { 254 | format = formatHint; 255 | if (formatHint != QImage::Format_Indexed8 256 | #if QT_VERSION >= 0x050500 257 | && formatHint != QImage::Format_Alpha8 258 | && formatHint != QImage::Format_Grayscale8 259 | #endif 260 | ) { 261 | format = QImage::Format_Indexed8; 262 | } 263 | } else if (mat.channels() == 3) { 264 | #if QT_VERSION >= 0x040400 265 | format = QImage::Format_RGB888; 266 | if (order == MCO_BGR) 267 | cv::cvtColor(mat, mat_adjustCn, CV_BGR2RGB); 268 | #else 269 | format = QImage::Format_RGB32; 270 | cv::Mat mat_tmp; 271 | cv::cvtColor(mat, mat_tmp, order == MCO_BGR ? CV_BGR2BGRA : CV_RGB2BGRA); 272 | #if Q_BYTE_ORDER == Q_LITTLE_ENDIAN 273 | mat_adjustCn = mat_tmp; 274 | #else 275 | mat_adjustCn = argb2bgra(mat_tmp); 276 | #endif 277 | 278 | #endif 279 | } else if (mat.channels() == 4) { 280 | //Find best format if the formatHint can not be applied. 281 | format = findClosestFormat(formatHint); 282 | if (format != QImage::Format_RGB32 283 | && format != QImage::Format_ARGB32 284 | && format != QImage::Format_ARGB32_Premultiplied 285 | #if QT_VERSION >= 0x050200 286 | && format != QImage::Format_RGBX8888 287 | && format != QImage::Format_RGBA8888 288 | && format != QImage::Format_RGBA8888_Premultiplied 289 | #endif 290 | ) { 291 | #if QT_VERSION >= 0x050200 292 | format = order == MCO_RGBA ? QImage::Format_RGBA8888 : QImage::Format_ARGB32; 293 | #else 294 | format = QImage::Format_ARGB32; 295 | #endif 296 | } 297 | 298 | //Channel order requried by the target QImage 299 | MatColorOrder requiredOrder = getColorOrderOfRGB32Format(); 300 | #if QT_VERSION >= 0x050200 301 | if (formatHint == QImage::Format_RGBX8888 302 | || formatHint == QImage::Format_RGBA8888 303 | || formatHint == QImage::Format_RGBA8888_Premultiplied) { 304 | requiredOrder = MCO_RGBA; 305 | } 306 | #endif 307 | 308 | if (order != requiredOrder) 309 | mat_adjustCn = adjustChannelsOrder(mat, order, requiredOrder); 310 | } 311 | 312 | if (mat_adjustCn.empty()) 313 | mat_adjustCn = mat; 314 | 315 | //Adjust mat depth if needed. 316 | cv::Mat mat_adjustDepth = mat_adjustCn; 317 | if (mat.depth() != CV_8U) 318 | mat_adjustCn.convertTo(mat_adjustDepth, CV_8UC(mat_adjustCn.channels()), mat.depth() == CV_16U ? 1/255.0 : 255.0); 319 | 320 | //Should we convert the image to the format specified by formatHint? 321 | QImage image = mat2Image_shared(mat_adjustDepth, format); 322 | if (format == formatHint || formatHint == QImage::Format_Invalid) 323 | return image.copy(); 324 | else 325 | return image.convertToFormat(formatHint); 326 | } 327 | 328 | /* Convert QImage to cv::Mat without data copy 329 | */ 330 | cv::Mat image2Mat_shared(const QImage &img, MatColorOrder *order) 331 | { 332 | if (img.isNull()) 333 | return cv::Mat(); 334 | 335 | switch (img.format()) { 336 | case QImage::Format_Indexed8: 337 | break; 338 | #if QT_VERSION >= 0x040400 339 | case QImage::Format_RGB888: 340 | if (order) 341 | *order = MCO_RGB; 342 | break; 343 | #endif 344 | case QImage::Format_RGB32: 345 | case QImage::Format_ARGB32: 346 | case QImage::Format_ARGB32_Premultiplied: 347 | if (order) 348 | *order = getColorOrderOfRGB32Format(); 349 | break; 350 | #if QT_VERSION >= 0x050200 351 | case QImage::Format_RGBX8888: 352 | case QImage::Format_RGBA8888: 353 | case QImage::Format_RGBA8888_Premultiplied: 354 | if (order) 355 | *order = MCO_RGBA; 356 | break; 357 | #endif 358 | #if QT_VERSION >= 0x050500 359 | case QImage::Format_Alpha8: 360 | case QImage::Format_Grayscale8: 361 | break; 362 | #endif 363 | default: 364 | return cv::Mat(); 365 | } 366 | return cv::Mat(img.height(), img.width(), CV_8UC(img.depth()/8), (uchar*)img.bits(), img.bytesPerLine()); 367 | } 368 | 369 | /* Convert cv::Mat to QImage without data copy 370 | */ 371 | QImage mat2Image_shared(const cv::Mat &mat, QImage::Format formatHint) 372 | { 373 | Q_ASSERT(mat.type() == CV_8UC1 || mat.type() == CV_8UC3 || mat.type() == CV_8UC4); 374 | 375 | if (mat.empty()) 376 | return QImage(); 377 | 378 | //Adjust formatHint if needed. 379 | if (mat.type() == CV_8UC1) { 380 | if (formatHint != QImage::Format_Indexed8 381 | #if QT_VERSION >= 0x050500 382 | && formatHint != QImage::Format_Alpha8 383 | && formatHint != QImage::Format_Grayscale8 384 | #endif 385 | ) { 386 | formatHint = QImage::Format_Indexed8; 387 | } 388 | #if QT_VERSION >= 0x040400 389 | } else if (mat.type() == CV_8UC3) { 390 | formatHint = QImage::Format_RGB888; 391 | #endif 392 | } else if (mat.type() == CV_8UC4) { 393 | if (formatHint != QImage::Format_RGB32 394 | && formatHint != QImage::Format_ARGB32 395 | && formatHint != QImage::Format_ARGB32_Premultiplied 396 | #if QT_VERSION >= 0x050200 397 | && formatHint != QImage::Format_RGBX8888 398 | && formatHint != QImage::Format_RGBA8888 399 | && formatHint != QImage::Format_RGBA8888_Premultiplied 400 | #endif 401 | ) { 402 | formatHint = QImage::Format_ARGB32; 403 | } 404 | } 405 | 406 | QImage img(mat.data, mat.cols, mat.rows, mat.step, formatHint); 407 | 408 | //Should we add directly support for user-customed-colorTable? 409 | if (formatHint == QImage::Format_Indexed8) { 410 | QVector colorTable; 411 | for (int i=0; i<256; ++i) 412 | colorTable.append(qRgb(i,i,i)); 413 | img.setColorTable(colorTable); 414 | } 415 | return img; 416 | } 417 | 418 | } //namespace QtOcv 419 | -------------------------------------------------------------------------------- /cvmatandqimage.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | ** Copyright (c) 2012-2015 Debao Zhang 3 | ** All right reserved. 4 | ** 5 | ** Permission is hereby granted, free of charge, to any person obtaining 6 | ** a copy of this software and associated documentation files (the 7 | ** "Software"), to deal in the Software without restriction, including 8 | ** without limitation the rights to use, copy, modify, merge, publish, 9 | ** distribute, sublicense, and/or sell copies of the Software, and to 10 | ** permit persons to whom the Software is furnished to do so, subject to 11 | ** the following conditions: 12 | ** 13 | ** The above copyright notice and this permission notice shall be 14 | ** included in all copies or substantial portions of the Software. 15 | ** 16 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | ** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | ** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | ** NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | ** LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | ** OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | ** WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | ** 24 | ****************************************************************************/ 25 | 26 | #ifndef CVMATANDQIMAGE_H 27 | #define CVMATANDQIMAGE_H 28 | 29 | #include 30 | #include 31 | 32 | namespace QtOcv { 33 | 34 | enum MatColorOrder { 35 | MCO_BGR, 36 | MCO_RGB, 37 | MCO_BGRA = MCO_BGR, 38 | MCO_RGBA = MCO_RGB, 39 | MCO_ARGB 40 | }; 41 | 42 | 43 | /* Convert QImage to/from cv::Mat 44 | * 45 | * - cv::Mat 46 | * - Supported channels 47 | * - 1 channel 48 | * - 3 channels (B G R), (R G B) 49 | * - 4 channels (B G R A), (R G B A), (A R G B) 50 | * - Supported depth 51 | * - CV_8U [0, 255] 52 | * - CV_16U [0, 65535] 53 | * - CV_32F [0, 1.0] 54 | * 55 | * - QImage 56 | * - All of the formats of QImage are supported. 57 | */ 58 | cv::Mat image2Mat(const QImage &img, int requiredMatType = CV_8UC(0), MatColorOrder requiredOrder=MCO_BGR); 59 | QImage mat2Image(const cv::Mat &mat, MatColorOrder order=MCO_BGR, QImage::Format formatHint = QImage::Format_Invalid); 60 | 61 | /* Convert QImage to/from cv::Mat without data copy 62 | * 63 | * - Supported QImage formats and cv::Mat types are: 64 | * - QImage::Format_Indexed8 <==> CV_8UC1 65 | * - QImage::Format_Alpha8 <==> CV_8UC1 66 | * - QImage::Format_Grayscale8 <==> CV_8UC1 67 | * - QImage::Format_RGB888 <==> CV_8UC3 (R G B) 68 | * - QImage::Format_RGB32 <==> CV_8UC4 (A R G B or B G R A) 69 | * - QImage::Format_ARGB32 <==> CV_8UC4 (A R G B or B G R A) 70 | * - QImage::Format_ARGB32_Premultiplied <==> CV_8UC4 (A R G B or B G R A) 71 | * - QImage::Format_RGBX8888 <==> CV_8UC4 (R G B A) 72 | * - QImage::Format_RGBA8888 <==> CV_8UC4 (R G B A) 73 | * - QImage::Format_RGBA8888_Premultiplied <==> CV_8UC4 (R G B A) 74 | * 75 | * - For QImage::Format_RGB32 ,QImage::Format_ARGB32 76 | * and QImage::Format_ARGB32_Premultiplied, the 77 | * color channel order of cv::Mat will be (B G R A) in little 78 | * endian system or (A R G B) in big endian system. 79 | * 80 | * - User must make sure that the color channels order is the same as 81 | * the color channels order requried by QImage. 82 | */ 83 | cv::Mat image2Mat_shared(const QImage &img, MatColorOrder *order=0); 84 | QImage mat2Image_shared(const cv::Mat &mat, QImage::Format formatHint = QImage::Format_Invalid); 85 | 86 | } //namespace QtOcv 87 | 88 | #endif // CVMATANDQIMAGE_H 89 | -------------------------------------------------------------------------------- /detectNet.cu: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "detectNet.h" 24 | #include "cudaUtility.h" 25 | 26 | 27 | 28 | template 29 | __global__ void gpuDetectionOverlay( T* input, T* output, int width, int height, detectNet::Detection* detections, int numDetections, float4* colors ) 30 | { 31 | const int x = blockIdx.x * blockDim.x + threadIdx.x; 32 | const int y = blockIdx.y * blockDim.y + threadIdx.y; 33 | 34 | if( x >= width || y >= height ) 35 | return; 36 | 37 | const T px_in = input[ y * width + x ]; 38 | T px_out = px_in; 39 | 40 | const float fx = x; 41 | const float fy = y; 42 | 43 | for( int n=0; n < numDetections; n++ ) 44 | { 45 | const detectNet::Detection det = detections[n]; 46 | 47 | // check if this pixel is inside the bounding box 48 | if( fx >= det.Left && fx <= det.Right && fy >= det.Top && fy <= det.Bottom ) 49 | { 50 | const float4 color = colors[det.ClassID]; 51 | 52 | const float alpha = color.w / 255.0f; 53 | const float ialph = 1.0f - alpha; 54 | 55 | px_out.x = alpha * color.x + ialph * px_out.x; 56 | px_out.y = alpha * color.y + ialph * px_out.y; 57 | px_out.z = alpha * color.z + ialph * px_out.z; 58 | } 59 | } 60 | 61 | output[y * width + x] = px_out; 62 | } 63 | 64 | 65 | template 66 | __global__ void gpuDetectionOverlayBox( T* input, T* output, int imgWidth, int imgHeight, int x0, int y0, int boxWidth, int boxHeight, const float4 color ) 67 | { 68 | const int box_x = blockIdx.x * blockDim.x + threadIdx.x; 69 | const int box_y = blockIdx.y * blockDim.y + threadIdx.y; 70 | 71 | if( box_x >= boxWidth || box_y >= boxHeight ) 72 | return; 73 | 74 | const int x = box_x + x0; 75 | const int y = box_y + y0; 76 | 77 | if( x >= imgWidth || y >= imgHeight ) 78 | return; 79 | 80 | const T px_in = input[ y * imgWidth + x ]; 81 | 82 | const float alpha = color.w / 255.0f; 83 | const float ialph = 1.0f - alpha; 84 | 85 | output[y * imgWidth + x] = make_float4( alpha * color.x + ialph * px_in.x, 86 | alpha * color.y + ialph * px_in.y, 87 | alpha * color.z + ialph * px_in.z, 88 | px_in.w ); 89 | } 90 | 91 | cudaError_t cudaDetectionOverlay( float4* input, float4* output, uint32_t width, uint32_t height, detectNet::Detection* detections, int numDetections, float4* colors ) 92 | { 93 | if( !input || !output || width == 0 || height == 0 || !detections || numDetections == 0 || !colors ) 94 | return cudaErrorInvalidValue; 95 | 96 | // if input and output are the same image, then we can use the faster method 97 | // which draws 1 box per kernel, but doesn't copy pixels that aren't inside boxes 98 | if( input == output ) 99 | { 100 | for( int n=0; n < numDetections; n++ ) 101 | { 102 | const int boxWidth = (int)detections[n].Width(); 103 | const int boxHeight = (int)detections[n].Height(); 104 | 105 | // launch kernel 106 | const dim3 blockDim(8, 8); 107 | const dim3 gridDim(iDivUp(boxWidth,blockDim.x), iDivUp(boxHeight,blockDim.y)); 108 | 109 | gpuDetectionOverlayBox<<>>(input, output, width, height, (int)detections[n].Left, (int)detections[n].Top, boxWidth, boxHeight, colors[detections[n].ClassID]); 110 | } 111 | } 112 | else 113 | { 114 | // launch kernel 115 | const dim3 blockDim(8, 8); 116 | const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y)); 117 | 118 | gpuDetectionOverlay<<>>(input, output, width, height, detections, numDetections, colors); 119 | } 120 | 121 | return cudaGetLastError(); 122 | } 123 | 124 | -------------------------------------------------------------------------------- /detectNet.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __DETECT_NET_H__ 24 | #define __DETECT_NET_H__ 25 | 26 | 27 | #include "tensorNet.h" 28 | 29 | 30 | /** 31 | * Name of default input blob for DetectNet caffe model. 32 | * @ingroup detectNet 33 | */ 34 | #define DETECTNET_DEFAULT_INPUT "data" 35 | 36 | /** 37 | * Name of default output blob of the coverage map for DetectNet caffe model. 38 | * @ingroup detectNet 39 | */ 40 | #define DETECTNET_DEFAULT_COVERAGE "coverage" 41 | 42 | /** 43 | * Name of default output blob of the grid of bounding boxes for DetectNet caffe model. 44 | * @ingroup detectNet 45 | */ 46 | #define DETECTNET_DEFAULT_BBOX "bboxes" 47 | 48 | /** 49 | * Default value of the minimum detection threshold 50 | * @ingroup detectNet 51 | */ 52 | #define DETECTNET_DEFAULT_THRESHOLD 0.5f 53 | 54 | /** 55 | * Default alpha blending value used during overlay 56 | * @ingroup detectNet 57 | */ 58 | #define DETECTNET_DEFAULT_ALPHA 120 59 | 60 | /** 61 | * Command-line options able to be passed to imageNet::Create() 62 | * @ingroup imageNet 63 | */ 64 | #define DETECTNET_USAGE_STRING "detectNet arguments: \n" \ 65 | " --network NETWORK pre-trained model to load, one of the following:\n" \ 66 | " * ssd-mobilenet-v1\n" \ 67 | " * ssd-mobilenet-v2 (default)\n" \ 68 | " * ssd-inception-v2\n" \ 69 | " * pednet\n" \ 70 | " * multiped\n" \ 71 | " * facenet\n" \ 72 | " * coco-airplane\n" \ 73 | " * coco-bottle\n" \ 74 | " * coco-chair\n" \ 75 | " * coco-dog\n" \ 76 | " --model MODEL path to custom model to load (caffemodel, uff, or onnx)\n" \ 77 | " --prototxt PROTOTXT path to custom prototxt to load (for .caffemodel only)\n" \ 78 | " --class_labels LABELS path to text file containing the labels for each class\n" \ 79 | " --threshold THRESHOLD minimum threshold for detection (default is 0.5)\n" \ 80 | " --input_blob INPUT name of the input layer (default is '" DETECTNET_DEFAULT_INPUT "')\n" \ 81 | " --output_cvg COVERAGE name of the coverge output layer (default is '" DETECTNET_DEFAULT_COVERAGE "')\n" \ 82 | " --output_bbox BOXES name of the bounding output layer (default is '" DETECTNET_DEFAULT_BBOX "')\n" \ 83 | " --mean_pixel PIXEL mean pixel value to subtract from input (default is 0.0)\n" \ 84 | " --batch_size BATCH maximum batch size (default is 1)\n" \ 85 | " --alpha ALPHA overlay alpha blending value, range 0-255 (default: 120)\n" \ 86 | " --profile enable layer profiling in TensorRT\n" 87 | 88 | 89 | /** 90 | * Object recognition and localization networks with TensorRT support. 91 | * @ingroup detectNet 92 | */ 93 | class detectNet : public tensorNet 94 | { 95 | public: 96 | /** 97 | * Object Detection result. 98 | */ 99 | struct Detection 100 | { 101 | // Object Info 102 | uint32_t Instance; /**< Index of this unique object instance */ 103 | uint32_t ClassID; /**< Class index of the detected object. */ 104 | float Confidence; /**< Confidence value of the detected object. */ 105 | 106 | // Bounding Box Coordinates 107 | float Left; /**< Left bounding box coordinate (in pixels) */ 108 | float Right; /**< Right bounding box coordinate (in pixels) */ 109 | float Top; /**< Top bounding box cooridnate (in pixels) */ 110 | float Bottom; /**< Bottom bounding box coordinate (in pixels) */ 111 | 112 | /**< Calculate the width of the object */ 113 | inline float Width() const { return Right - Left; } 114 | 115 | /**< Calculate the height of the object */ 116 | inline float Height() const { return Bottom - Top; } 117 | 118 | /**< Calculate the area of the object */ 119 | inline float Area() const { return Width() * Height(); } 120 | 121 | /**< Calculate the width of the bounding box */ 122 | static inline float Width( float x1, float x2 ) { return x2 - x1; } 123 | 124 | /**< Calculate the height of the bounding box */ 125 | static inline float Height( float y1, float y2 ) { return y2 - y1; } 126 | 127 | /**< Calculate the area of the bounding box */ 128 | static inline float Area( float x1, float y1, float x2, float y2 ) { return Width(x1,x2) * Height(y1,y2); } 129 | 130 | /**< Return the center of the object */ 131 | inline void Center( float* x, float* y ) const { if(x) *x = Left + Width() * 0.5f; if(y) *y = Top + Height() * 0.5f; } 132 | 133 | /**< Return true if the coordinate is inside the bounding box */ 134 | inline bool Contains( float x, float y ) const { return x >= Left && x <= Right && y >= Top && y <= Bottom; } 135 | 136 | /**< Return true if the bounding boxes intersect and exceeds area % threshold */ 137 | inline bool Intersects( const Detection& det, float areaThreshold=0.0f ) const { return (IntersectionArea(det) / fmaxf(Area(), det.Area()) > areaThreshold); } 138 | 139 | /**< Return true if the bounding boxes intersect and exceeds area % threshold */ 140 | inline bool Intersects( float x1, float y1, float x2, float y2, float areaThreshold=0.0f ) const { return (IntersectionArea(x1,y1,x2,y2) / fmaxf(Area(), Area(x1,y1,x2,y2)) > areaThreshold); } 141 | 142 | /**< Return the area of the bounding box intersection */ 143 | inline float IntersectionArea( const Detection& det ) const { return IntersectionArea(det.Left, det.Top, det.Right, det.Bottom); } 144 | 145 | /**< Return the area of the bounding box intersection */ 146 | inline float IntersectionArea( float x1, float y1, float x2, float y2 ) const { if(!Overlaps(x1,y1,x2,y2)) return 0.0f; return (fminf(Right, x2) - fmaxf(Left, x1)) * (fminf(Bottom, y2) - fmaxf(Top, y1)); } 147 | 148 | /**< Return true if the bounding boxes overlap */ 149 | inline bool Overlaps( const Detection& det ) const { return !(det.Left > Right || det.Right < Left || det.Top > Bottom || det.Bottom < Top); } 150 | 151 | /**< Return true if the bounding boxes overlap */ 152 | inline bool Overlaps( float x1, float y1, float x2, float y2 ) const { return !(x1 > Right || x2 < Left || y1 > Bottom || y2 < Top); } 153 | 154 | /**< Expand the bounding box if they overlap (return true if so) */ 155 | inline bool Expand( float x1, float y1, float x2, float y2 ) { if(!Overlaps(x1, y1, x2, y2)) return false; Left = fminf(x1, Left); Top = fminf(y1, Top); Right = fmaxf(x2, Right); Bottom = fmaxf(y2, Bottom); return true; } 156 | 157 | /**< Expand the bounding box if they overlap (return true if so) */ 158 | inline bool Expand( const Detection& det ) { if(!Overlaps(det)) return false; Left = fminf(det.Left, Left); Top = fminf(det.Top, Top); Right = fmaxf(det.Right, Right); Bottom = fmaxf(det.Bottom, Bottom); return true; } 159 | 160 | /**< Reset all member variables to zero */ 161 | inline void Reset() { Instance = 0; ClassID = 0; Confidence = 0; Left = 0; Right = 0; Top = 0; Bottom = 0; } 162 | 163 | /**< Default constructor */ 164 | inline Detection() { Reset(); } 165 | }; 166 | 167 | /** 168 | * Overlay flags (can be OR'd together). 169 | */ 170 | enum OverlayFlags 171 | { 172 | OVERLAY_NONE = 0, /**< No overlay. */ 173 | OVERLAY_BOX = (1 << 0), /**< Overlay the object bounding boxes */ 174 | OVERLAY_LABEL = (1 << 1), /**< Overlay the class description labels */ 175 | OVERLAY_CONFIDENCE = (1 << 2), /**< Overlay the detection confidence values */ 176 | }; 177 | 178 | /** 179 | * Network choice enumeration. 180 | */ 181 | enum NetworkType 182 | { 183 | CUSTOM = 0, /**< Custom model from user */ 184 | COCO_AIRPLANE, /**< MS-COCO airplane class */ 185 | COCO_BOTTLE, /**< MS-COCO bottle class */ 186 | COCO_CHAIR, /**< MS-COCO chair class */ 187 | COCO_DOG, /**< MS-COCO dog class */ 188 | FACENET, /**< Human facial detector trained on FDDB */ 189 | PEDNET, /**< Pedestrian / person detector */ 190 | PEDNET_MULTI, /**< Multi-class pedestrian + baggage detector */ 191 | 192 | #if NV_TENSORRT_MAJOR > 4 193 | SSD_MOBILENET_V1, /**< SSD Mobilenet-v1 UFF model, trained on MS-COCO */ 194 | SSD_MOBILENET_V2, /**< SSD Mobilenet-v2 UFF model, trained on MS-COCO */ 195 | SSD_INCEPTION_V2 /**< SSD Inception-v2 UFF model, trained on MS-COCO */ 196 | #endif 197 | }; 198 | 199 | /** 200 | * Parse a string to one of the built-in pretrained models. 201 | * Valid names are "pednet", "multiped", "facenet", "face", "coco-airplane", "airplane", 202 | * "coco-bottle", "bottle", "coco-chair", "chair", "coco-dog", or "dog". 203 | * @returns one of the detectNet::NetworkType enums, or detectNet::CUSTOM on invalid string. 204 | */ 205 | static NetworkType NetworkTypeFromStr( const char* model_name ); 206 | 207 | /** 208 | * Parse a string sequence into OverlayFlags enum. 209 | * Valid flags are "none", "box", "label", and "conf" and it is possible to combine flags 210 | * (bitwise OR) together with commas or pipe (|) symbol. For example, the string sequence 211 | * "box,label,conf" would return the flags `OVERLAY_BOX|OVERLAY_LABEL|OVERLAY_CONFIDENCE`. 212 | */ 213 | static uint32_t OverlayFlagsFromStr( const char* flags ); 214 | 215 | /** 216 | * Load a new network instance 217 | * @param networkType type of pre-supported network to load 218 | * @param threshold default minimum threshold for detection 219 | * @param maxBatchSize The maximum batch size that the network will support and be optimized for. 220 | */ 221 | static detectNet* Create( NetworkType networkType=PEDNET_MULTI, float threshold=DETECTNET_DEFAULT_THRESHOLD, 222 | uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, 223 | deviceType device=DEVICE_GPU, bool allowGPUFallback=true ); 224 | 225 | /** 226 | * Load a custom network instance 227 | * @param prototxt_path File path to the deployable network prototxt 228 | * @param model_path File path to the caffemodel 229 | * @param mean_binary File path to the mean value binary proto 230 | * @param class_labels File path to list of class name labels 231 | * @param threshold default minimum threshold for detection 232 | * @param input Name of the input layer blob. 233 | * @param coverage Name of the output coverage classifier layer blob, which contains the confidence values for each bbox. 234 | * @param bboxes Name of the output bounding box layer blob, which contains a grid of rectangles in the image. 235 | * @param maxBatchSize The maximum batch size that the network will support and be optimized for. 236 | */ 237 | static detectNet* Create( const char* prototxt_path, const char* model_path, const char* mean_binary, 238 | const char* class_labels, float threshold=DETECTNET_DEFAULT_THRESHOLD, 239 | const char* input = DETECTNET_DEFAULT_INPUT, 240 | const char* coverage = DETECTNET_DEFAULT_COVERAGE, 241 | const char* bboxes = DETECTNET_DEFAULT_BBOX, 242 | uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, 243 | precisionType precision=TYPE_FASTEST, 244 | deviceType device=DEVICE_GPU, bool allowGPUFallback=true ); 245 | 246 | /** 247 | * Load a custom network instance 248 | * @param prototxt_path File path to the deployable network prototxt 249 | * @param model_path File path to the caffemodel 250 | * @param mean_pixel Input transform subtraction value (use 0.0 if the network already does this) 251 | * @param class_labels File path to list of class name labels 252 | * @param threshold default minimum threshold for detection 253 | * @param input Name of the input layer blob. 254 | * @param coverage Name of the output coverage classifier layer blob, which contains the confidence values for each bbox. 255 | * @param bboxes Name of the output bounding box layer blob, which contains a grid of rectangles in the image. 256 | * @param maxBatchSize The maximum batch size that the network will support and be optimized for. 257 | */ 258 | static detectNet* Create( const char* prototxt_path, const char* model_path, float mean_pixel=0.0f, 259 | const char* class_labels=NULL, float threshold=DETECTNET_DEFAULT_THRESHOLD, 260 | const char* input = DETECTNET_DEFAULT_INPUT, 261 | const char* coverage = DETECTNET_DEFAULT_COVERAGE, 262 | const char* bboxes = DETECTNET_DEFAULT_BBOX, 263 | uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, 264 | precisionType precision=TYPE_FASTEST, 265 | deviceType device=DEVICE_GPU, bool allowGPUFallback=true ); 266 | 267 | /** 268 | * Load a custom network instance of a UFF model 269 | * @param model_path File path to the UFF model 270 | * @param class_labels File path to list of class name labels 271 | * @param threshold default minimum threshold for detection 272 | * @param input Name of the input layer blob. 273 | * @param inputDims Dimensions of the input layer blob. 274 | * @param output Name of the output layer blob containing the bounding boxes, ect. 275 | * @param numDetections Name of the output layer blob containing the detection count. 276 | * @param maxBatchSize The maximum batch size that the network will support and be optimized for. 277 | */ 278 | static detectNet* Create( const char* model_path, const char* class_labels, float threshold, 279 | const char* input, const Dims3& inputDims, 280 | const char* output, const char* numDetections, 281 | uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, 282 | precisionType precision=TYPE_FASTEST, 283 | deviceType device=DEVICE_GPU, bool allowGPUFallback=true ); 284 | 285 | /** 286 | * Load a new network instance by parsing the command line. 287 | */ 288 | static detectNet* Create( int argc, char** argv ); 289 | 290 | /** 291 | * Usage string for command line arguments to Create() 292 | */ 293 | static inline const char* Usage() { return DETECTNET_USAGE_STRING; } 294 | 295 | /** 296 | * Destory 297 | */ 298 | virtual ~detectNet(); 299 | 300 | /** 301 | * Detect object locations from an RGBA image, returning an array containing the detection results. 302 | * @param[in] input float4 RGBA input image in CUDA device memory. 303 | * @param[in] width width of the input image in pixels. 304 | * @param[in] height height of the input image in pixels. 305 | * @param[out] detections pointer that will be set to array of detection results (residing in shared CPU/GPU memory) 306 | * @param[in] overlay bitwise OR combination of overlay flags (@see OverlayFlags and @see Overlay()), or OVERLAY_NONE. 307 | * @returns The number of detected objects, 0 if there were no detected objects, and -1 if an error was encountered. 308 | */ 309 | int Detect( float* input, uint32_t width, uint32_t height, Detection** detections, uint32_t overlay=OVERLAY_BOX ); 310 | 311 | /** 312 | * Detect object locations in an RGBA image, into an array of the results allocated by the user. 313 | * @param[in] input float4 RGBA input image in CUDA device memory. 314 | * @param[in] width width of the input image in pixels. 315 | * @param[in] height height of the input image in pixels. 316 | * @param[out] detections pointer to user-allocated array that will be filled with the detection results. 317 | * @see GetMaxDetections() for the number of detection results that should be allocated in this buffer. 318 | * @param[in] overlay bitwise OR combination of overlay flags (@see OverlayFlags and @see Overlay()), or OVERLAY_NONE. 319 | * @returns The number of detected objects, 0 if there were no detected objects, and -1 if an error was encountered. 320 | */ 321 | int Detect( float* input, uint32_t width, uint32_t height, Detection* detections, uint32_t overlay=OVERLAY_BOX ); 322 | 323 | /** 324 | * Draw the detected bounding boxes overlayed on an RGBA image. 325 | * @note Overlay() will automatically be called by default by Detect(), if the overlay parameter is true 326 | * @param input float4 RGBA input image in CUDA device memory. 327 | * @param output float4 RGBA output image in CUDA device memory. 328 | * @param detections Array of detections allocated in CUDA device memory. 329 | */ 330 | bool Overlay( float* input, float* output, uint32_t width, uint32_t height, Detection* detections, uint32_t numDetections, uint32_t flags=OVERLAY_BOX ); 331 | 332 | /** 333 | * Retrieve the minimum threshold for detection. 334 | * TODO: change this to per-class in the future 335 | */ 336 | inline float GetThreshold() const { return mCoverageThreshold; } 337 | 338 | /** 339 | * Set the minimum threshold for detection. 340 | */ 341 | inline void SetThreshold( float threshold ) { mCoverageThreshold = threshold; } 342 | 343 | /** 344 | * Retrieve the maximum number of simultaneous detections the network supports. 345 | * Knowing this is useful for allocating the buffers to store the output detection results. 346 | */ 347 | inline uint32_t GetMaxDetections() const { return mMaxDetections; } 348 | 349 | /** 350 | * Retrieve the number of object classes supported in the detector 351 | */ 352 | inline uint32_t GetNumClasses() const { return mNumClasses; } 353 | 354 | /** 355 | * Retrieve the description of a particular class. 356 | */ 357 | inline const char* GetClassDesc( uint32_t index ) const { return mClassDesc[index].c_str(); } 358 | 359 | /** 360 | * Retrieve the class synset category of a particular class. 361 | */ 362 | inline const char* GetClassSynset( uint32_t index ) const { return mClassSynset[index].c_str(); } 363 | 364 | /** 365 | * Retrieve the path to the file containing the class descriptions. 366 | */ 367 | inline const char* GetClassPath() const { return mClassPath.c_str(); } 368 | 369 | /** 370 | * Retrieve the RGBA visualization color a particular class. 371 | */ 372 | inline float* GetClassColor( uint32_t classIndex ) const { return mClassColors[0] + (classIndex*4); } 373 | 374 | /** 375 | * Set the visualization color of a particular class of object. 376 | */ 377 | void SetClassColor( uint32_t classIndex, float r, float g, float b, float a=255.0f ); 378 | 379 | /** 380 | * Set overlay alpha blending value for all classes (between 0-255). 381 | */ 382 | void SetOverlayAlpha( float alpha ); 383 | 384 | 385 | protected: 386 | 387 | // constructor 388 | detectNet( float meanPixel=0.0f ); 389 | 390 | bool allocDetections(); 391 | bool defaultColors(); 392 | void defaultClassDesc(); 393 | bool loadClassDesc( const char* filename ); 394 | 395 | bool init( const char* prototxt_path, const char* model_path, const char* mean_binary, const char* class_labels, 396 | float threshold, const char* input, const char* coverage, const char* bboxes, uint32_t maxBatchSize, 397 | precisionType precision, deviceType device, bool allowGPUFallback ); 398 | 399 | int clusterDetections( Detection* detections, uint32_t width, uint32_t height ); 400 | int clusterDetections( Detection* detections, int n, float threshold=0.75f ); 401 | 402 | void sortDetections( Detection* detections, int numDetections ); 403 | 404 | float mCoverageThreshold; 405 | float* mClassColors[2]; 406 | float mMeanPixel; 407 | 408 | std::vector mClassDesc; 409 | std::vector mClassSynset; 410 | 411 | std::string mClassPath; 412 | uint32_t mCustomClasses; 413 | uint32_t mNumClasses; 414 | 415 | Detection* mDetectionSets[2]; // list of detections, mNumDetectionSets * mMaxDetections 416 | uint32_t mDetectionSet; // index of next detection set to use 417 | uint32_t mMaxDetections; // number of raw detections in the grid 418 | 419 | static const uint32_t mNumDetectionSets = 16; // size of detection ringbuffer 420 | }; 421 | 422 | 423 | #endif 424 | -------------------------------------------------------------------------------- /detectnet-camera.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "gstCamera.h" 24 | #include "glDisplay.h" 25 | 26 | #include "detectNet.h" 27 | #include "commandLine.h" 28 | //#include "opencv2/opencv.hpp" 29 | 30 | #include 31 | 32 | 33 | bool signal_recieved = false; 34 | 35 | void sig_handler(int signo) 36 | { 37 | if( signo == SIGINT ) 38 | { 39 | printf("received SIGINT\n"); 40 | signal_recieved = true; 41 | } 42 | } 43 | 44 | int usage() 45 | { 46 | printf("usage: detectnet-camera [-h] [--network NETWORK] [--threshold THRESHOLD]\n"); 47 | printf(" [--camera CAMERA] [--width WIDTH] [--height HEIGHT]\n\n"); 48 | printf("Locate objects in a live camera stream using an object detection DNN.\n\n"); 49 | printf("optional arguments:\n"); 50 | printf(" --help show this help message and exit\n"); 51 | printf(" --network NETWORK pre-trained model to load (see below for options)\n"); 52 | printf(" --overlay OVERLAY detection overlay flags (e.g. --overlay=box,labels,conf)\n"); 53 | printf(" valid combinations are: 'box', 'labels', 'conf', 'none'\n"); 54 | printf(" --alpha ALPHA overlay alpha blending value, range 0-255 (default: 120)\n"); 55 | printf(" --camera CAMERA index of the MIPI CSI camera to use (e.g. CSI camera 0),\n"); 56 | printf(" or for VL42 cameras the /dev/video device to use.\n"); 57 | printf(" by default, MIPI CSI camera 0 will be used.\n"); 58 | printf(" --width WIDTH desired width of camera stream (default is 1280 pixels)\n"); 59 | printf(" --height HEIGHT desired height of camera stream (default is 720 pixels)\n"); 60 | printf(" --threshold VALUE minimum threshold for detection (default is 0.5)\n\n"); 61 | 62 | printf("%s\n", detectNet::Usage()); 63 | 64 | return 0; 65 | } 66 | 67 | 68 | 69 | 70 | int main( int argc, char** argv ) 71 | { 72 | /* 73 | * parse command line 74 | */ 75 | commandLine cmdLine(argc, argv); 76 | 77 | if( cmdLine.GetFlag("help") ) 78 | return usage(); 79 | 80 | 81 | /* 82 | * attach signal handler 83 | */ 84 | if( signal(SIGINT, sig_handler) == SIG_ERR ) 85 | printf("\ncan't catch SIGINT\n"); 86 | 87 | 88 | /* 89 | * create the camera device 90 | */ 91 | gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), 92 | cmdLine.GetInt("height", gstCamera::DefaultHeight), 93 | cmdLine.GetString("camera")); 94 | 95 | if( !camera ) 96 | { 97 | printf("\ndetectnet-camera: failed to initialize camera device\n"); 98 | return 0; 99 | } 100 | 101 | printf("\ndetectnet-camera: successfully initialized camera device\n"); 102 | printf(" width: %u\n", camera->GetWidth()); 103 | printf(" height: %u\n", camera->GetHeight()); 104 | printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); 105 | 106 | 107 | /* 108 | * create detection network 109 | */ 110 | detectNet* net = detectNet::Create(argc, argv); 111 | 112 | if( !net ) 113 | { 114 | printf("detectnet-camera: failed to load detectNet model\n"); 115 | return 0; 116 | } 117 | 118 | // parse overlay flags 119 | const uint32_t overlayFlags = detectNet::OverlayFlagsFromStr(cmdLine.GetString("overlay", "box,labels,conf")); 120 | 121 | 122 | /* 123 | * create openGL window 124 | */ 125 | glDisplay* display = glDisplay::Create(); 126 | 127 | if( !display ) 128 | printf("detectnet-camera: failed to create openGL display\n"); 129 | 130 | 131 | /* 132 | * start streaming 133 | */ 134 | if( !camera->Open() ) 135 | { 136 | printf("detectnet-camera: failed to open camera for streaming\n"); 137 | return 0; 138 | } 139 | 140 | printf("detectnet-camera: camera open for streaming\n"); 141 | 142 | 143 | /* 144 | * processing loop 145 | */ 146 | float confidence = 0.0f; 147 | 148 | //cv::Mat rgba[4]; 149 | 150 | while( !signal_recieved ) 151 | { 152 | // capture RGBA image 153 | float* imgRGBA = NULL; 154 | 155 | if( !camera->CaptureRGBA(&imgRGBA, 1000) ) 156 | printf("detectnet-camera: failed to capture RGBA image from camera\n"); 157 | 158 | // detect objects in the frame 159 | detectNet::Detection* detections = NULL; 160 | 161 | const int numDetections = net->Detect(imgRGBA, camera->GetWidth(), camera->GetHeight(), &detections, overlayFlags); 162 | 163 | 164 | 165 | if( numDetections > 0 ) 166 | { 167 | 168 | printf("%i objects detected\n", numDetections); 169 | 170 | for( int n=0; n < numDetections; n++ ) 171 | { 172 | printf("detected obj %i class #%u (%s) confidence=%f\n", n, detections[n].ClassID, net->GetClassDesc(detections[n].ClassID), detections[n].Confidence); 173 | printf("bounding box %i (%f, %f) (%f, %f) w=%f h=%f\n", n, detections[n].Left, detections[n].Top, detections[n].Right, detections[n].Bottom, detections[n].Width(), detections[n].Height()); 174 | } 175 | } 176 | 177 | printf("No objects detected .................... "); 178 | // update display 179 | 180 | 181 | if( display != NULL ) 182 | { 183 | // render the image 184 | display->RenderOnce(imgRGBA, camera->GetWidth(), camera->GetHeight()); 185 | 186 | // update the status bar 187 | char str[256]; 188 | sprintf(str, "TensorRT %i.%i.%i | %s | Network %.0f FPS", NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, precisionTypeToStr(net->GetPrecision()), net->GetNetworkFPS()); 189 | display->SetTitle(str); 190 | 191 | // check if the user quit 192 | if( display->IsClosed() ) 193 | signal_recieved = true; 194 | } 195 | 196 | // print out timing info 197 | net->PrintProfilerTimes(); 198 | 199 | 200 | 201 | 202 | 203 | /* 204 | cv::imwrite("alpha1.png",imgRGBA); 205 | cv::imshow("alpha1.png",imgRGBA); 206 | char c=(char)waitKey(25); 207 | if(c==27)break; 208 | */ 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | } 232 | 233 | 234 | /* 235 | * destroy resources 236 | */ 237 | printf("detectnet-camera: shutting down...\n"); 238 | 239 | SAFE_DELETE(camera); 240 | SAFE_DELETE(display); 241 | SAFE_DELETE(net); 242 | 243 | printf("detectnet-camera: shutdown complete.\n"); 244 | return 0; 245 | } 246 | 247 | -------------------------------------------------------------------------------- /glDisplay.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "glDisplay.h" 24 | #include "cudaNormalize.h" 25 | #include "timespec.h" 26 | 27 | 28 | // DEFAULT_TITLE 29 | const char* glDisplay::DEFAULT_TITLE = "NVIDIA Jetson"; 30 | 31 | 32 | // Constructor 33 | glDisplay::glDisplay() 34 | { 35 | mWindowX = 0; 36 | mScreenX = NULL; 37 | mVisualX = NULL; 38 | mContextGL = NULL; 39 | mDisplayX = NULL; 40 | 41 | mWidth = 0; 42 | mHeight = 0; 43 | mAvgTime = 1.0f; 44 | 45 | mBgColor[0] = 0.0f; 46 | mBgColor[1] = 0.0f; 47 | mBgColor[2] = 0.0f; 48 | mBgColor[3] = 1.0f; 49 | 50 | mNormalizedCUDA = NULL; 51 | mNormalizedWidth = 0; 52 | mNormalizedHeight = 0; 53 | 54 | mWindowClosed = false; 55 | 56 | clock_gettime(CLOCK_REALTIME, &mLastTime); 57 | } 58 | 59 | 60 | // Destructor 61 | glDisplay::~glDisplay() 62 | { 63 | const size_t numTextures = mTextures.size(); 64 | 65 | for( size_t n=0; n < numTextures; n++ ) 66 | { 67 | if( mTextures[n] != NULL ) 68 | { 69 | delete mTextures[n]; 70 | mTextures[n] = NULL; 71 | } 72 | } 73 | 74 | mTextures.clear(); 75 | 76 | if( mNormalizedCUDA != NULL ) 77 | { 78 | CUDA(cudaFree(mNormalizedCUDA)); 79 | mNormalizedCUDA = NULL; 80 | } 81 | 82 | glXDestroyContext(mDisplayX, mContextGL); 83 | } 84 | 85 | 86 | // Create 87 | glDisplay* glDisplay::Create( const char* title, float r, float g, float b, float a ) 88 | { 89 | glDisplay* vp = new glDisplay(); 90 | 91 | if( !vp ) 92 | return NULL; 93 | 94 | if( !vp->initWindow() ) 95 | { 96 | printf(LOG_GL "failed to create X11 Window.\n"); 97 | delete vp; 98 | return NULL; 99 | } 100 | 101 | if( !vp->initGL() ) 102 | { 103 | printf(LOG_GL "failed to initialize OpenGL.\n"); 104 | delete vp; 105 | return NULL; 106 | } 107 | 108 | GLenum err = glewInit(); 109 | 110 | if (GLEW_OK != err) 111 | { 112 | printf(LOG_GL "GLEW Error: %s\n", glewGetErrorString(err)); 113 | delete vp; 114 | return NULL; 115 | } 116 | 117 | if( title != NULL ) 118 | vp->SetTitle(title); 119 | 120 | vp->SetBackgroundColor(r, g, b, a); 121 | 122 | printf(LOG_GL "glDisplay -- display device initialized\n"); 123 | return vp; 124 | } 125 | 126 | 127 | // Create 128 | glDisplay* glDisplay::Create( float r, float g, float b, float a ) 129 | { 130 | return Create(DEFAULT_TITLE, r, g, b, a); 131 | } 132 | 133 | 134 | // initWindow 135 | bool glDisplay::initWindow() 136 | { 137 | if( !mDisplayX ) 138 | mDisplayX = XOpenDisplay(0); 139 | 140 | if( !mDisplayX ) 141 | { 142 | printf(LOG_GL "failed to open X11 server connection.\n"); 143 | return false; 144 | } 145 | 146 | 147 | if( !mDisplayX ) 148 | { 149 | printf(LOG_GL "InitWindow() - no X11 server connection.\n" ); 150 | return false; 151 | } 152 | 153 | // retrieve screen info 154 | const int screenIdx = DefaultScreen(mDisplayX); 155 | const int screenWidth = DisplayWidth(mDisplayX, screenIdx); 156 | const int screenHeight = DisplayHeight(mDisplayX, screenIdx); 157 | 158 | printf(LOG_GL "glDisplay -- X screen %i resolution: %ix%i\n", screenIdx, screenWidth, screenHeight); 159 | 160 | Screen* screen = XScreenOfDisplay(mDisplayX, screenIdx); 161 | 162 | if( !screen ) 163 | { 164 | printf("failed to retrieve default Screen instance\n"); 165 | return false; 166 | } 167 | 168 | Window winRoot = XRootWindowOfScreen(screen); 169 | 170 | // get framebuffer format 171 | static int fbAttribs[] = 172 | { 173 | GLX_X_RENDERABLE, True, 174 | GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT, 175 | GLX_RENDER_TYPE, GLX_RGBA_BIT, 176 | GLX_X_VISUAL_TYPE, GLX_TRUE_COLOR, 177 | GLX_RED_SIZE, 8, 178 | GLX_GREEN_SIZE, 8, 179 | GLX_BLUE_SIZE, 8, 180 | GLX_ALPHA_SIZE, 8, 181 | GLX_DEPTH_SIZE, 24, 182 | GLX_STENCIL_SIZE, 8, 183 | GLX_DOUBLEBUFFER, True, 184 | GLX_SAMPLE_BUFFERS, 0, 185 | GLX_SAMPLES, 0, 186 | None 187 | }; 188 | 189 | int fbCount = 0; 190 | GLXFBConfig* fbConfig = glXChooseFBConfig(mDisplayX, screenIdx, fbAttribs, &fbCount); 191 | 192 | if( !fbConfig || fbCount == 0 ) 193 | return false; 194 | 195 | // get a 'visual' 196 | XVisualInfo* visual = glXGetVisualFromFBConfig(mDisplayX, fbConfig[0]); 197 | 198 | if( !visual ) 199 | return false; 200 | 201 | // populate windows attributes 202 | XSetWindowAttributes winAttr; 203 | winAttr.colormap = XCreateColormap(mDisplayX, winRoot, visual->visual, AllocNone); 204 | winAttr.background_pixmap = None; 205 | winAttr.border_pixel = 0; 206 | winAttr.event_mask = StructureNotifyMask|KeyPressMask|KeyReleaseMask|PointerMotionMask|ButtonPressMask|ButtonReleaseMask; 207 | 208 | 209 | // create window 210 | Window win = XCreateWindow(mDisplayX, winRoot, 0, 0, screenWidth, screenHeight, 0, 211 | visual->depth, InputOutput, visual->visual, CWBorderPixel|CWColormap|CWEventMask, &winAttr); 212 | 213 | if( !win ) 214 | return false; 215 | 216 | 217 | // setup WM_DELETE message 218 | mWindowClosedMsg = XInternAtom(mDisplayX, "WM_DELETE_WINDOW", False); 219 | XSetWMProtocols(mDisplayX, win, &mWindowClosedMsg, 1); 220 | 221 | // set default window title 222 | XStoreName(mDisplayX, win, DEFAULT_TITLE); 223 | 224 | // show the window 225 | XMapWindow(mDisplayX, win); 226 | 227 | // cleanup 228 | mWindowX = win; 229 | mScreenX = screen; 230 | mVisualX = visual; 231 | mWidth = screenWidth; 232 | mHeight = screenHeight; 233 | 234 | XFree(fbConfig); 235 | return true; 236 | } 237 | 238 | 239 | void glDisplay::SetTitle( const char* str ) 240 | { 241 | XStoreName(mDisplayX, mWindowX, str); 242 | } 243 | 244 | // initGL 245 | bool glDisplay::initGL() 246 | { 247 | mContextGL = glXCreateContext(mDisplayX, mVisualX, 0, True); 248 | 249 | if( !mContextGL ) 250 | return false; 251 | 252 | GL(glXMakeCurrent(mDisplayX, mWindowX, mContextGL)); 253 | 254 | return true; 255 | } 256 | 257 | 258 | // MakeCurrent 259 | void glDisplay::BeginRender( bool userEvents ) 260 | { 261 | if( userEvents ) 262 | UserEvents(); 263 | 264 | GL(glXMakeCurrent(mDisplayX, mWindowX, mContextGL)); 265 | 266 | GL(glClearColor(mBgColor[0], mBgColor[1], mBgColor[2], mBgColor[3])); 267 | GL(glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT|GL_STENCIL_BUFFER_BIT)); 268 | 269 | GL(glViewport(0, 0, mWidth, mHeight)); 270 | GL(glMatrixMode(GL_PROJECTION)); 271 | GL(glLoadIdentity()); 272 | GL(glOrtho(0.0f, mWidth, mHeight, 0.0f, 0.0f, 1.0f)); 273 | } 274 | 275 | 276 | // EndRender 277 | void glDisplay::EndRender() 278 | 279 | { 280 | glXSwapBuffers(mDisplayX, mWindowX); 281 | 282 | // measure framerate 283 | timespec currTime; 284 | clock_gettime(CLOCK_REALTIME, &currTime); 285 | 286 | const timespec diffTime = timeDiff(mLastTime, currTime); 287 | const float ns = 1000000000 * diffTime.tv_sec + diffTime.tv_nsec; 288 | 289 | mAvgTime = mAvgTime * 0.8f + ns * 0.2f; 290 | mLastTime = currTime; 291 | } 292 | 293 | 294 | // allocTexture 295 | glTexture* glDisplay::allocTexture( uint32_t width, uint32_t height ) 296 | { 297 | if( width == 0 || height == 0 ) 298 | return NULL; 299 | 300 | const size_t numTextures = mTextures.size(); 301 | 302 | for( size_t n=0; n < numTextures; n++ ) 303 | { 304 | glTexture* tex = mTextures[n]; 305 | 306 | if( tex->GetWidth() == width && tex->GetHeight() == height ) 307 | return tex; 308 | } 309 | 310 | glTexture* tex = glTexture::Create(width, height, GL_RGBA32F_ARB); 311 | 312 | if( !tex ) 313 | { 314 | printf(LOG_GL "glDisplay.Render() failed to create OpenGL interop texture\n"); 315 | return NULL; 316 | } 317 | 318 | mTextures.push_back(tex); 319 | return tex; 320 | 321 | } 322 | 323 | 324 | // Render 325 | void glDisplay::Render( glTexture* texture, float x, float y ) 326 | { 327 | if( !texture ) 328 | return; 329 | 330 | texture->Render(x,y); 331 | } 332 | 333 | // Render 334 | void glDisplay::Render( float* img, uint32_t width, uint32_t height, float x, float y, bool normalize ) 335 | { 336 | if( !img || width == 0 || height == 0 ) 337 | return; 338 | 339 | // obtain the OpenGL texture to use 340 | glTexture* interopTex = allocTexture(width, height); 341 | 342 | if( !interopTex ) 343 | return; 344 | 345 | // normalize pixels from [0,255] -> [0,1] 346 | if( normalize ) 347 | { 348 | if( !mNormalizedCUDA || mNormalizedWidth < width || mNormalizedHeight < height ) 349 | { 350 | if( mNormalizedCUDA != NULL ) 351 | { 352 | CUDA(cudaFree(mNormalizedCUDA)); 353 | mNormalizedCUDA = NULL; 354 | } 355 | 356 | if( CUDA_FAILED(cudaMalloc(&mNormalizedCUDA, width * height * sizeof(float) * 4)) ) 357 | { 358 | printf(LOG_GL "glDisplay.Render() failed to allocate CUDA memory for normalization\n"); 359 | return; 360 | } 361 | 362 | mNormalizedWidth = width; 363 | mNormalizedHeight = height; 364 | } 365 | 366 | // rescale image pixel intensities for display 367 | CUDA(cudaNormalizeRGBA((float4*)img, make_float2(0.0f, 255.0f), 368 | (float4*)mNormalizedCUDA, make_float2(0.0f, 1.0f), 369 | width, height)); 370 | } 371 | 372 | // map from CUDA to openGL using GL interop 373 | void* tex_map = interopTex->MapCUDA(); 374 | 375 | if( tex_map != NULL ) 376 | { 377 | CUDA(cudaMemcpy(tex_map, normalize ? mNormalizedCUDA : img, interopTex->GetSize(), cudaMemcpyDeviceToDevice)); 378 | //CUDA(cudaDeviceSynchronize()); 379 | interopTex->Unmap(); 380 | } 381 | 382 | // draw the texture 383 | interopTex->Render(x,y); 384 | } 385 | 386 | 387 | // RenderOnce 388 | void glDisplay::RenderOnce( float* img, uint32_t width, uint32_t height, float x, float y, bool normalize ) 389 | { 390 | BeginRender(); 391 | Render(img, width, height, x, y, normalize); 392 | EndRender(); 393 | } 394 | 395 | 396 | 397 | #define MOUSE_MOVE 0 398 | #define MOUSE_BUTTON 1 399 | #define MOUSE_WHEEL 2 400 | #define MOUSE_DOUBLE 3 401 | #define KEY_STATE 4 402 | #define KEY_CHAR 5 403 | #define WINDOW_CLOSED 6 404 | 405 | 406 | // OnEvent 407 | void glDisplay::onEvent( uint msg, int a, int b ) 408 | { 409 | switch(msg) 410 | { 411 | case MOUSE_MOVE: 412 | { 413 | //mMousePos.Set(a,b); 414 | break; 415 | } 416 | case MOUSE_BUTTON: 417 | { 418 | /*if( mMouseButton[a] != (bool)b ) 419 | { 420 | mMouseButton[a] = b; 421 | 422 | if( b ) 423 | mMouseDownEvent = true; 424 | 425 | // ignore right-mouse up events 426 | if( !(a == 1 && !b) ) 427 | mMouseEvent = true; 428 | }*/ 429 | 430 | break; 431 | } 432 | case MOUSE_DOUBLE: 433 | { 434 | /*mMouseDblClick = b; 435 | 436 | if( b ) 437 | { 438 | mMouseEvent = true; 439 | mMouseDownEvent = true; 440 | }*/ 441 | 442 | break; 443 | } 444 | case MOUSE_WHEEL: 445 | { 446 | //mMouseWheel = a; 447 | break; 448 | } 449 | case KEY_STATE: 450 | { 451 | //mKeys[a] = b; 452 | break; 453 | } 454 | case KEY_CHAR: 455 | { 456 | //mKeyText = a; 457 | break; 458 | } 459 | case WINDOW_CLOSED: 460 | { 461 | printf(LOG_GL "glDisplay -- the window has been closed\n"); 462 | mWindowClosed = true; 463 | break; 464 | } 465 | } 466 | 467 | //if( msg == MOUSE_MOVE || msg == MOUSE_BUTTON || msg == MOUSE_DOUBLE || msg == MOUSE_WHEEL ) 468 | // mMouseEventLast = time(); 469 | } 470 | 471 | 472 | // UserEvents() 473 | void glDisplay::UserEvents() 474 | { 475 | // reset input states 476 | /*mMouseEvent = false; 477 | mMouseDownEvent = false; 478 | mMouseDblClick = false; 479 | mMouseWheel = 0; 480 | mKeyText = 0;*/ 481 | XEvent evt; 482 | 483 | while( XEventsQueued(mDisplayX, QueuedAlready) > 0 ) 484 | { 485 | XNextEvent(mDisplayX, &evt); 486 | 487 | switch( evt.type ) 488 | { 489 | case KeyPress: onEvent(KEY_STATE, evt.xkey.keycode, 1); break; 490 | case KeyRelease: onEvent(KEY_STATE, evt.xkey.keycode, 0); break; 491 | case ButtonPress: onEvent(MOUSE_BUTTON, evt.xbutton.button, 1); break; 492 | case ButtonRelease: onEvent(MOUSE_BUTTON, evt.xbutton.button, 0); break; 493 | case MotionNotify: 494 | { 495 | XWindowAttributes attr; 496 | XGetWindowAttributes(mDisplayX, evt.xmotion.root, &attr); 497 | onEvent(MOUSE_MOVE, evt.xmotion.x_root + attr.x, evt.xmotion.y_root + attr.y); 498 | break; 499 | } 500 | case ClientMessage: 501 | { 502 | if( evt.xclient.data.l[0] == mWindowClosedMsg ) 503 | onEvent(WINDOW_CLOSED, 0, 0); 504 | 505 | break; 506 | } 507 | } 508 | } 509 | } 510 | 511 | -------------------------------------------------------------------------------- /glDisplay.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GL_VIEWPORT_H__ 24 | #define __GL_VIEWPORT_H__ 25 | 26 | 27 | #include "glUtility.h" 28 | #include "glTexture.h" 29 | 30 | #include 31 | #include 32 | 33 | 34 | /** 35 | * OpenGL display window / video viewer 36 | * @ingroup OpenGL 37 | */ 38 | class glDisplay 39 | { 40 | public: 41 | /** 42 | * Create a new maximized openGL display window. 43 | * @param r default background RGBA color, red component (0.0-1.0f) 44 | * @param g default background RGBA color, green component (0.0-1.0f) 45 | * @param b default background RGBA color, blue component (0.0-1.0f) 46 | * @param a default background RGBA color, alpha component (0.0-1.0f) 47 | */ 48 | static glDisplay* Create( float r=0.05f, float g=0.05f, float b=0.05f, float a=1.0f ); 49 | 50 | /** 51 | * Create a new maximized openGL display window. 52 | * @param title window title bar label string 53 | * @param r default background RGBA color, red component (0.0-1.0f) 54 | * @param g default background RGBA color, green component (0.0-1.0f) 55 | * @param b default background RGBA color, blue component (0.0-1.0f) 56 | * @param a default background RGBA color, alpha component (0.0-1.0f) 57 | */ 58 | static glDisplay* Create( const char* title, float r=0.05f, float g=0.05f, float b=0.05f, float a=1.0f ); 59 | 60 | /** 61 | * Destroy window 62 | */ 63 | ~glDisplay(); 64 | 65 | /** 66 | * Clear window and begin rendering a frame. 67 | * If userEvents is true, UserEvents() will automatically be processed. 68 | */ 69 | void BeginRender( bool userEvents=true ); 70 | 71 | /** 72 | * Finish rendering and refresh / flip the backbuffer. 73 | */ 74 | void EndRender(); 75 | 76 | /** 77 | * Render an OpenGL texture 78 | * @note for more texture rendering methods, @see glTexture 79 | */ 80 | void Render( glTexture* texture, float x=5.0f, float y=30.0f ); 81 | 82 | /** 83 | * Render a CUDA float4 image using OpenGL interop 84 | * If normalize is true, the image's pixel values will be rescaled from the range of [0-255] to [0-1] 85 | * If normalize is false, the image's pixel values are assumed to already be in the range of [0-1] 86 | * Note that if normalization is selected to be performed, it will be done in-place on the image 87 | */ 88 | void Render( float* image, uint32_t width, uint32_t height, float x=0.0f, float y=30.0f, bool normalize=true ); 89 | 90 | /** 91 | * Begin the frame, render one CUDA float4 image using OpenGL interop, and end the frame. 92 | * Note that this function is only useful if you are rendering a single texture per frame. 93 | * If normalize is true, the image's pixel values will be rescaled from the range of [0-255] to [0-1] 94 | * If normalize is false, the image's pixel values are assumed to already be in the range of [0-1] 95 | * Note that if normalization is selected to be performed, it will be done in-place on the image 96 | */ 97 | void RenderOnce( float* image, uint32_t width, uint32_t height, float x=5.0f, float y=30.0f, bool normalize=true ); 98 | 99 | /** 100 | * Process UI events. 101 | */ 102 | void UserEvents(); 103 | 104 | /** 105 | * UI event handler. 106 | */ 107 | void onEvent( uint msg, int a, int b ); 108 | 109 | /** 110 | * Returns true if the window is open. 111 | */ 112 | inline bool IsOpen() const { return !mWindowClosed; } 113 | 114 | /** 115 | * Returns true if the window has been closed. 116 | */ 117 | inline bool IsClosed() const { return mWindowClosed; } 118 | 119 | /** 120 | * Get the average frame time (in milliseconds). 121 | */ 122 | inline float GetFPS() const { return 1000000000.0f / mAvgTime; } 123 | 124 | /** 125 | * Get the width of the window (in pixels) 126 | */ 127 | inline uint32_t GetWidth() const { return mWidth; } 128 | 129 | /** 130 | * Get the height of the window (in pixels) 131 | */ 132 | inline uint32_t GetHeight() const { return mHeight; } 133 | 134 | /** 135 | * Set the window title string. 136 | */ 137 | void SetTitle( const char* str ); 138 | 139 | /** 140 | * Set the background color. 141 | * @param r background RGBA color, red component (0.0-1.0f) 142 | * @param g background RGBA color, green component (0.0-1.0f) 143 | * @param b background RGBA color, blue component (0.0-1.0f) 144 | * @param a background RGBA color, alpha component (0.0-1.0f) 145 | */ 146 | inline void SetBackgroundColor( float r, float g, float b, float a ) { mBgColor[0] = r; mBgColor[1] = g; mBgColor[2] = b; mBgColor[3] = a; } 147 | 148 | /** 149 | * Default title bar name 150 | */ 151 | static const char* DEFAULT_TITLE; 152 | 153 | protected: 154 | glDisplay(); 155 | 156 | bool initWindow(); 157 | bool initGL(); 158 | 159 | glTexture* allocTexture( uint32_t width, uint32_t height ); 160 | 161 | static const int screenIdx = 0; 162 | 163 | Display* mDisplayX; 164 | Screen* mScreenX; 165 | XVisualInfo* mVisualX; 166 | Window mWindowX; 167 | GLXContext mContextGL; 168 | bool mWindowClosed; 169 | Atom mWindowClosedMsg; 170 | 171 | uint32_t mWidth; 172 | uint32_t mHeight; 173 | 174 | timespec mLastTime; 175 | float mAvgTime; 176 | float mBgColor[4]; 177 | 178 | float* mNormalizedCUDA; 179 | uint32_t mNormalizedWidth; 180 | uint32_t mNormalizedHeight; 181 | 182 | std::vector mTextures; 183 | }; 184 | 185 | #endif 186 | 187 | -------------------------------------------------------------------------------- /glTexture.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "glUtility.h" 24 | #include "glTexture.h" 25 | 26 | #include "cudaMappedMemory.h" 27 | 28 | 29 | //----------------------------------------------------------------------------------- 30 | inline uint32_t glTextureLayout( uint32_t format ) 31 | { 32 | switch(format) 33 | { 34 | case GL_LUMINANCE8: 35 | case GL_LUMINANCE16: 36 | case GL_LUMINANCE32UI_EXT: 37 | case GL_LUMINANCE8I_EXT: 38 | case GL_LUMINANCE16I_EXT: 39 | case GL_LUMINANCE32I_EXT: 40 | case GL_LUMINANCE16F_ARB: 41 | case GL_LUMINANCE32F_ARB: return GL_LUMINANCE; 42 | 43 | case GL_LUMINANCE8_ALPHA8: 44 | case GL_LUMINANCE16_ALPHA16: 45 | case GL_LUMINANCE_ALPHA32UI_EXT: 46 | case GL_LUMINANCE_ALPHA8I_EXT: 47 | case GL_LUMINANCE_ALPHA16I_EXT: 48 | case GL_LUMINANCE_ALPHA32I_EXT: 49 | case GL_LUMINANCE_ALPHA16F_ARB: 50 | case GL_LUMINANCE_ALPHA32F_ARB: return GL_LUMINANCE_ALPHA; 51 | 52 | case GL_RGB8: 53 | case GL_RGB16: 54 | case GL_RGB32UI: 55 | case GL_RGB8I: 56 | case GL_RGB16I: 57 | case GL_RGB32I: 58 | case GL_RGB16F_ARB: 59 | case GL_RGB32F_ARB: return GL_RGB; 60 | 61 | case GL_RGBA8: 62 | case GL_RGBA16: 63 | case GL_RGBA32UI: 64 | case GL_RGBA8I: 65 | case GL_RGBA16I: 66 | case GL_RGBA32I: 67 | //case GL_RGBA_FLOAT32: 68 | case GL_RGBA16F_ARB: 69 | case GL_RGBA32F_ARB: return GL_RGBA; 70 | } 71 | 72 | return 0; 73 | } 74 | 75 | 76 | inline uint32_t glTextureLayoutChannels( uint32_t format ) 77 | { 78 | const uint layout = glTextureLayout(format); 79 | 80 | switch(layout) 81 | { 82 | case GL_LUMINANCE: return 1; 83 | case GL_LUMINANCE_ALPHA: return 2; 84 | case GL_RGB: return 3; 85 | case GL_RGBA: return 4; 86 | } 87 | 88 | return 0; 89 | } 90 | 91 | 92 | inline uint32_t glTextureType( uint32_t format ) 93 | { 94 | switch(format) 95 | { 96 | case GL_LUMINANCE8: 97 | case GL_LUMINANCE8_ALPHA8: 98 | case GL_RGB8: 99 | case GL_RGBA8: return GL_UNSIGNED_BYTE; 100 | 101 | case GL_LUMINANCE16: 102 | case GL_LUMINANCE16_ALPHA16: 103 | case GL_RGB16: 104 | case GL_RGBA16: return GL_UNSIGNED_SHORT; 105 | 106 | case GL_LUMINANCE32UI_EXT: 107 | case GL_LUMINANCE_ALPHA32UI_EXT: 108 | case GL_RGB32UI: 109 | case GL_RGBA32UI: return GL_UNSIGNED_INT; 110 | 111 | case GL_LUMINANCE8I_EXT: 112 | case GL_LUMINANCE_ALPHA8I_EXT: 113 | case GL_RGB8I: 114 | case GL_RGBA8I: return GL_BYTE; 115 | 116 | case GL_LUMINANCE16I_EXT: 117 | case GL_LUMINANCE_ALPHA16I_EXT: 118 | case GL_RGB16I: 119 | case GL_RGBA16I: return GL_SHORT; 120 | 121 | case GL_LUMINANCE32I_EXT: 122 | case GL_LUMINANCE_ALPHA32I_EXT: 123 | case GL_RGB32I: 124 | case GL_RGBA32I: return GL_INT; 125 | 126 | 127 | case GL_LUMINANCE16F_ARB: 128 | case GL_LUMINANCE_ALPHA16F_ARB: 129 | case GL_RGB16F_ARB: 130 | case GL_RGBA16F_ARB: return GL_FLOAT; 131 | 132 | case GL_LUMINANCE32F_ARB: 133 | case GL_LUMINANCE_ALPHA32F_ARB: 134 | //case GL_RGBA_FLOAT32: 135 | case GL_RGB32F_ARB: 136 | case GL_RGBA32F_ARB: return GL_FLOAT; 137 | } 138 | 139 | return 0; 140 | } 141 | 142 | 143 | inline uint glTextureTypeSize( uint32_t format ) 144 | { 145 | const uint type = glTextureType(format); 146 | 147 | switch(type) 148 | { 149 | case GL_UNSIGNED_BYTE: 150 | case GL_BYTE: return 1; 151 | 152 | case GL_UNSIGNED_SHORT: 153 | case GL_SHORT: return 2; 154 | 155 | case GL_UNSIGNED_INT: 156 | case GL_INT: 157 | case GL_FLOAT: return 4; 158 | } 159 | 160 | return 0; 161 | } 162 | //----------------------------------------------------------------------------------- 163 | 164 | // constructor 165 | glTexture::glTexture() 166 | { 167 | mID = 0; 168 | mDMA = 0; 169 | mWidth = 0; 170 | mHeight = 0; 171 | mFormat = 0; 172 | mSize = 0; 173 | 174 | mInteropCUDA = NULL; 175 | mInteropHost = NULL; 176 | mInteropDevice = NULL; 177 | } 178 | 179 | 180 | // destructor 181 | glTexture::~glTexture() 182 | { 183 | GL(glDeleteTextures(1, &mID)); 184 | } 185 | 186 | 187 | // Create 188 | glTexture* glTexture::Create( uint32_t width, uint32_t height, uint32_t format, void* data ) 189 | { 190 | glTexture* tex = new glTexture(); 191 | 192 | if( !tex->init(width, height, format, data) ) 193 | { 194 | printf("[OpenGL] failed to create %ux%u texture\n", width, height); 195 | return NULL; 196 | } 197 | 198 | return tex; 199 | } 200 | 201 | 202 | // Alloc 203 | bool glTexture::init( uint32_t width, uint32_t height, uint32_t format, void* data ) 204 | { 205 | const uint32_t size = width * height * glTextureLayoutChannels(format) * glTextureTypeSize(format); 206 | 207 | if( size == 0 ) 208 | return NULL; 209 | 210 | // generate texture objects 211 | uint32_t id = 0; 212 | 213 | GL(glEnable(GL_TEXTURE_2D)); 214 | GL(glGenTextures(1, &id)); 215 | GL(glBindTexture(GL_TEXTURE_2D, id)); 216 | 217 | // set default texture parameters 218 | GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)); 219 | GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)); 220 | GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)); 221 | GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)); 222 | 223 | printf("[OpenGL] creating %ux%u texture\n", width, height); 224 | 225 | // allocate texture 226 | GL_VERIFYN(glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, glTextureLayout(format), glTextureType(format), data)); 227 | GL(glBindTexture(GL_TEXTURE_2D, 0)); 228 | 229 | // allocate DMA PBO 230 | uint32_t dma = 0; 231 | 232 | GL(glGenBuffers(1, &dma)); 233 | GL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, dma)); 234 | GL(glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, size, NULL, GL_DYNAMIC_DRAW_ARB)); 235 | GL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0)); 236 | 237 | 238 | mID = id; 239 | mDMA = dma; 240 | mWidth = width; 241 | mHeight = height; 242 | mFormat = format; 243 | mSize = size; 244 | return true; 245 | } 246 | 247 | 248 | // MapCUDA 249 | void* glTexture::MapCUDA() 250 | { 251 | if( !mInteropCUDA ) 252 | { 253 | if( CUDA_FAILED(cudaGraphicsGLRegisterBuffer(&mInteropCUDA, mDMA, cudaGraphicsRegisterFlagsWriteDiscard)) ) 254 | return NULL; 255 | 256 | printf( "[cuda] registered %u byte openGL texture for interop access (%ux%u)\n", mSize, mWidth, mHeight); 257 | } 258 | 259 | if( CUDA_FAILED(cudaGraphicsMapResources(1, &mInteropCUDA)) ) 260 | return NULL; 261 | 262 | void* devPtr = NULL; 263 | size_t mappedSize = 0; 264 | 265 | if( CUDA_FAILED(cudaGraphicsResourceGetMappedPointer(&devPtr, &mappedSize, mInteropCUDA)) ) 266 | { 267 | CUDA(cudaGraphicsUnmapResources(1, &mInteropCUDA)); 268 | return NULL; 269 | } 270 | 271 | if( mSize != mappedSize ) 272 | printf("[OpenGL] glTexture::MapCUDA() -- size mismatch %zu bytes (expected=%u)\n", mappedSize, mSize); 273 | 274 | return devPtr; 275 | } 276 | 277 | 278 | // Unmap 279 | void glTexture::Unmap() 280 | { 281 | if( !mInteropCUDA ) 282 | return; 283 | 284 | CUDA(cudaGraphicsUnmapResources(1, &mInteropCUDA)); 285 | 286 | GL(glEnable(GL_TEXTURE_2D)); 287 | GL(glBindTexture(GL_TEXTURE_2D, mID)); 288 | GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, mDMA)); 289 | GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mWidth, mHeight, glTextureLayout(mFormat), glTextureType(mFormat), NULL)); 290 | 291 | GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0)); 292 | GL(glBindTexture(GL_TEXTURE_2D, 0)); 293 | GL(glDisable(GL_TEXTURE_2D)); 294 | } 295 | 296 | 297 | // Upload 298 | bool glTexture::UploadCPU( void* data ) 299 | { 300 | // activate texture & pbo 301 | GL(glEnable(GL_TEXTURE_2D)); 302 | GL(glActiveTextureARB(GL_TEXTURE0_ARB)); 303 | GL(glBindTexture(GL_TEXTURE_2D, mID)); 304 | GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0)); 305 | GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, mDMA)); 306 | 307 | //GL(glPixelStorei(GL_UNPACK_ALIGNMENT, 1)); 308 | //GL(glPixelStorei(GL_UNPACK_ROW_LENGTH, img->GetWidth())); 309 | //GL(glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, img->GetHeight())); 310 | 311 | // hint to driver to double-buffer 312 | // glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, mImage->GetSize(), NULL, GL_STREAM_DRAW_ARB); 313 | 314 | // map PBO 315 | GLubyte* ptr = (GLubyte*)glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB); 316 | 317 | if( !ptr ) 318 | { 319 | GL_CHECK("glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB)"); 320 | return NULL; 321 | } 322 | 323 | memcpy(ptr, data, mSize); 324 | 325 | GL(glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB)); 326 | 327 | //GL(glEnable(GL_TEXTURE_2D)); 328 | //GL(glBindTexture(GL_TEXTURE_2D, mID)); 329 | //GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, mDMA)); 330 | GL(glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mWidth, mHeight, glTextureLayout(mFormat), glTextureType(mFormat), NULL)); 331 | 332 | GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0)); 333 | GL(glBindTexture(GL_TEXTURE_2D, 0)); 334 | GL(glDisable(GL_TEXTURE_2D)); 335 | 336 | /*if( !mInteropHost || !mInteropDevice ) 337 | { 338 | if( !cudaAllocMapped(&mInteropHost, &mInteropDevice, mSize) ) 339 | return false; 340 | } 341 | 342 | memcpy(mInteropHost, data, mSize); 343 | 344 | void* devGL = MapCUDA(); 345 | 346 | if( !devGL ) 347 | return false; 348 | 349 | CUDA(cudaMemcpy(devGL, mInteropDevice, mSize, cudaMemcpyDeviceToDevice)); 350 | Unmap();*/ 351 | 352 | return true; 353 | } 354 | 355 | 356 | // Render 357 | void glTexture::Render( const float4& rect ) 358 | { 359 | GL(glEnable(GL_TEXTURE_2D)); 360 | GL(glBindTexture(GL_TEXTURE_2D, mID)); 361 | 362 | glBegin(GL_QUADS); 363 | 364 | glColor4f(1.0f,1.0f,1.0f,1.0f); 365 | 366 | glTexCoord2f(0.0f, 0.0f); 367 | glVertex2d(rect.x, rect.y); 368 | 369 | glTexCoord2f(1.0f, 0.0f); 370 | glVertex2d(rect.z, rect.y); 371 | 372 | glTexCoord2f(1.0f, 1.0f); 373 | glVertex2d(rect.z, rect.w); 374 | 375 | glTexCoord2f(0.0f, 1.0f); 376 | glVertex2d(rect.x, rect.w); 377 | 378 | glEnd(); 379 | 380 | GL(glBindTexture(GL_TEXTURE_2D, 0)); 381 | } 382 | 383 | 384 | 385 | void glTexture::Render( float x, float y ) 386 | { 387 | Render(x, y, mWidth, mHeight); 388 | } 389 | 390 | void glTexture::Render( float x, float y, float width, float height ) 391 | { 392 | Render(make_float4(x, y, x + width, y + height)); 393 | } 394 | 395 | 396 | -------------------------------------------------------------------------------- /glTexture.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GL_TEXTURE_H__ 24 | #define __GL_TEXTURE_H__ 25 | 26 | 27 | #include "cudaUtility.h" 28 | #include "cuda_gl_interop.h" 29 | 30 | 31 | /** 32 | * OpenGL texture with CUDA interoperability. 33 | * @ingroup OpenGL 34 | */ 35 | class glTexture 36 | { 37 | public: 38 | static glTexture* Create( uint32_t width, uint32_t height, uint32_t format, void* data=NULL ); 39 | ~glTexture(); 40 | 41 | void Render( float x, float y ); 42 | void Render( float x, float y, float width, float height ); 43 | void Render( const float4& rect ); 44 | 45 | inline uint32_t GetID() const { return mID; } 46 | inline uint32_t GetWidth() const { return mWidth; } 47 | inline uint32_t GetHeight() const { return mHeight; } 48 | inline uint32_t GetFormat() const { return mFormat; } 49 | inline uint32_t GetSize() const { return mSize; } 50 | 51 | void* MapCUDA(); 52 | void Unmap(); 53 | 54 | bool UploadCPU( void* data ); 55 | 56 | private: 57 | glTexture(); 58 | bool init(uint32_t width, uint32_t height, uint32_t format, void* data); 59 | 60 | uint32_t mID; 61 | uint32_t mDMA; 62 | uint32_t mWidth; 63 | uint32_t mHeight; 64 | uint32_t mFormat; 65 | uint32_t mSize; 66 | 67 | cudaGraphicsResource* mInteropCUDA; 68 | void* mInteropHost; 69 | void* mInteropDevice; 70 | }; 71 | 72 | 73 | #endif 74 | -------------------------------------------------------------------------------- /glUtility.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __OPENGL_UTILITY_H 24 | #define __OPENGL_UTILITY_H 25 | 26 | 27 | #include 28 | #include 29 | 30 | #include 31 | 32 | 33 | /** 34 | * OpenGL logging prefix. 35 | * @ingroup OpenGL 36 | */ 37 | #define LOG_GL "[OpenGL] " 38 | 39 | /** 40 | * OpenGL error-checking macro 41 | * @ingroup OpenGL 42 | */ 43 | #define GL(x) { x; glCheckError( #x, __FILE__, __LINE__ ); } 44 | 45 | /** 46 | * Return false on OpenGL error. 47 | * @ingroup OpenGL 48 | */ 49 | #define GL_VERIFY(x) { x; if(glCheckError( #x, __FILE__, __LINE__ )) return false; } 50 | 51 | /** 52 | * OpenGL NULL on OpenGL error. 53 | * @ingroup OpenGL 54 | */ 55 | #define GL_VERIFYN(x) { x; if(glCheckError( #x, __FILE__, __LINE__ )) return NULL; } 56 | 57 | /** 58 | * Print a message on OpenGL error. 59 | * @ingroup OpenGL 60 | */ 61 | #define GL_CHECK(msg) { glCheckError(msg, __FILE__, __LINE__); } 62 | 63 | /** 64 | * OpenGL error-checking messsage function. 65 | * @ingroup OpenGL 66 | */ 67 | inline bool glCheckError(const char* msg, const char* file, int line) 68 | { 69 | GLenum err = glGetError(); 70 | 71 | if( err == GL_NO_ERROR ) 72 | return false; 73 | 74 | const char* e = NULL; 75 | 76 | switch(err) 77 | { 78 | case GL_INVALID_ENUM: e = "invalid enum"; break; 79 | case GL_INVALID_VALUE: e = "invalid value"; break; 80 | case GL_INVALID_OPERATION: e = "invalid operation"; break; 81 | case GL_STACK_OVERFLOW: e = "stack overflow"; break; 82 | case GL_STACK_UNDERFLOW: e = "stack underflow"; break; 83 | case GL_OUT_OF_MEMORY: e = "out of memory"; break; 84 | #ifdef GL_TABLE_TOO_LARGE_EXT 85 | case GL_TABLE_TOO_LARGE_EXT: e = "table too large"; break; 86 | #endif 87 | #ifdef GL_TEXTURE_TOO_LARGE_EXT 88 | case GL_TEXTURE_TOO_LARGE_EXT: e = "texture too large"; break; 89 | #endif 90 | default: e = "unknown error"; 91 | } 92 | 93 | printf(LOG_GL "Error %i - '%s'\n", (uint)err, e); 94 | printf(LOG_GL " %s::%i\n", file, line ); 95 | printf(LOG_GL " %s\n", msg ); 96 | 97 | return true; 98 | } 99 | 100 | 101 | /** 102 | * OpenGL error check + logging 103 | * @ingroup OpenGL 104 | */ 105 | inline bool glCheckError(const char* msg) 106 | { 107 | GLenum err = glGetError(); 108 | 109 | if( err == GL_NO_ERROR ) 110 | return false; 111 | 112 | const char* e = NULL; 113 | 114 | switch(err) 115 | { 116 | case GL_INVALID_ENUM: e = "invalid enum"; break; 117 | case GL_INVALID_VALUE: e = "invalid value"; break; 118 | case GL_INVALID_OPERATION: e = "invalid operation"; break; 119 | case GL_STACK_OVERFLOW: e = "stack overflow"; break; 120 | case GL_STACK_UNDERFLOW: e = "stack underflow"; break; 121 | case GL_OUT_OF_MEMORY: e = "out of memory"; break; 122 | #ifdef GL_TABLE_TOO_LARGE_EXT 123 | case GL_TABLE_TOO_LARGE_EXT: e = "table too large"; break; 124 | #endif 125 | #ifdef GL_TEXTURE_TOO_LARGE_EXT 126 | case GL_TEXTURE_TOO_LARGE_EXT: e = "texture too large"; break; 127 | #endif 128 | default: e = "unknown error"; 129 | } 130 | 131 | printf(LOG_GL "%s (error %i - %s)\n", msg, (uint)err, e); 132 | return true; 133 | } 134 | 135 | 136 | 137 | #define GL_GPU_MEM_INFO_TOTAL_AVAILABLE_MEM_NVX 0x9048 138 | #define GL_GPU_MEM_INFO_CURRENT_AVAILABLE_MEM_NVX 0x9049 139 | 140 | 141 | /** 142 | * Print the amount of free GPU memory. 143 | * @ingroup OpenGL 144 | */ 145 | inline void glPrintFreeMem() 146 | { 147 | GLint total_mem_kb = 0; 148 | GLint cur_avail_mem_kb = 0; 149 | 150 | glGetIntegerv(GL_GPU_MEM_INFO_TOTAL_AVAILABLE_MEM_NVX, &total_mem_kb); 151 | glGetIntegerv(GL_GPU_MEM_INFO_CURRENT_AVAILABLE_MEM_NVX,&cur_avail_mem_kb); 152 | 153 | printf(LOG_GL "GPU memory free %i / %i kb\n", cur_avail_mem_kb, total_mem_kb); 154 | } 155 | 156 | 157 | 158 | #endif 159 | 160 | -------------------------------------------------------------------------------- /gstCamera.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "gstCamera.h" 24 | #include "gstUtility.h" 25 | 26 | #include 27 | #include 28 | 29 | #include 30 | #include 31 | #include 32 | 33 | #include "cudaMappedMemory.h" 34 | #include "cudaYUV.h" 35 | #include "cudaRGB.h" 36 | 37 | #include "NvInfer.h" 38 | 39 | 40 | // gstCameraSrcToString 41 | const char* gstCameraSrcToString( gstCameraSrc src ) 42 | { 43 | if( src == GST_SOURCE_NVCAMERA ) return "GST_SOURCE_NVCAMERA"; 44 | else if( src == GST_SOURCE_NVARGUS ) return "GST_SOURCE_NVARGUS"; 45 | else if( src == GST_SOURCE_V4L2 ) return "GST_SOURCE_V4L2"; 46 | 47 | return "UNKNOWN"; 48 | } 49 | 50 | 51 | // constructor 52 | gstCamera::gstCamera() 53 | { 54 | mAppSink = NULL; 55 | mBus = NULL; 56 | mPipeline = NULL; 57 | mSensorCSI = -1; 58 | mStreaming = false; 59 | 60 | mWidth = 0; 61 | mHeight = 0; 62 | mDepth = 0; 63 | mSize = 0; 64 | mSource = GST_SOURCE_NVCAMERA; 65 | 66 | mLatestRGBA = 0; 67 | mLatestRingbuffer = 0; 68 | mLatestRetrieved = false; 69 | 70 | for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ ) 71 | { 72 | mRingbufferCPU[n] = NULL; 73 | mRingbufferGPU[n] = NULL; 74 | mRGBA[n] = NULL; 75 | } 76 | 77 | mRGBAZeroCopy = false; 78 | } 79 | 80 | 81 | // destructor 82 | gstCamera::~gstCamera() 83 | { 84 | Close(); 85 | 86 | for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ ) 87 | { 88 | // free capture buffer 89 | if( mRingbufferCPU[n] != NULL ) 90 | { 91 | CUDA(cudaFreeHost(mRingbufferCPU[n])); 92 | 93 | mRingbufferCPU[n] = NULL; 94 | mRingbufferGPU[n] = NULL; 95 | } 96 | 97 | // free convert buffer 98 | if( mRGBA[n] != NULL ) 99 | { 100 | if( mRGBAZeroCopy ) 101 | CUDA(cudaFreeHost(mRGBA[n])); 102 | else 103 | CUDA(cudaFree(mRGBA[n])); 104 | 105 | mRGBA[n] = NULL; 106 | } 107 | } 108 | } 109 | 110 | 111 | // onEOS 112 | void gstCamera::onEOS(_GstAppSink* sink, void* user_data) 113 | { 114 | printf(LOG_GSTREAMER "gstCamera onEOS\n"); 115 | } 116 | 117 | 118 | // onPreroll 119 | GstFlowReturn gstCamera::onPreroll(_GstAppSink* sink, void* user_data) 120 | { 121 | printf(LOG_GSTREAMER "gstCamera onPreroll\n"); 122 | return GST_FLOW_OK; 123 | } 124 | 125 | 126 | // onBuffer 127 | GstFlowReturn gstCamera::onBuffer(_GstAppSink* sink, void* user_data) 128 | { 129 | //printf(LOG_GSTREAMER "gstCamera onBuffer\n"); 130 | 131 | if( !user_data ) 132 | return GST_FLOW_OK; 133 | 134 | gstCamera* dec = (gstCamera*)user_data; 135 | 136 | //->Disable CSI check 137 | 138 | dec->checkBuffer(); 139 | 140 | dec->checkMsgBus(); 141 | return GST_FLOW_OK; 142 | } 143 | 144 | 145 | // Capture 146 | bool gstCamera::Capture( void** cpu, void** cuda, uint64_t timeout ) 147 | { 148 | // confirm the camera is streaming 149 | if( !mStreaming ) 150 | { 151 | if( !Open() ) 152 | return false; 153 | } 154 | 155 | // wait until a new frame is recieved 156 | if( !mWaitEvent.Wait(timeout) ) 157 | return false; 158 | 159 | // get the latest ringbuffer 160 | mRingMutex.Lock(); 161 | const uint32_t latest = mLatestRingbuffer; 162 | const bool retrieved = mLatestRetrieved; 163 | mLatestRetrieved = true; 164 | mRingMutex.Unlock(); 165 | 166 | // skip if it was already retrieved 167 | if( retrieved ) 168 | return false; 169 | 170 | // set output pointers 171 | if( cpu != NULL ) 172 | *cpu = mRingbufferCPU[latest]; 173 | 174 | if( cuda != NULL ) 175 | *cuda = mRingbufferGPU[latest]; 176 | 177 | return true; 178 | } 179 | 180 | 181 | // CaptureRGBA 182 | bool gstCamera::CaptureRGBA( float** output, unsigned long timeout, bool zeroCopy ) 183 | { 184 | void* cpu = NULL; 185 | void* gpu = NULL; 186 | 187 | if( !Capture(&cpu, &gpu, timeout) ) 188 | { 189 | printf(LOG_GSTREAMER "gstCamera failed to capture frame\n"); 190 | return false; 191 | } 192 | 193 | if( !ConvertRGBA(gpu, output, zeroCopy) ) 194 | { 195 | printf(LOG_GSTREAMER "gstCamera failed to convert frame to RGBA\n"); 196 | return false; 197 | } 198 | 199 | return true; 200 | } 201 | 202 | 203 | // ConvertRGBA 204 | bool gstCamera::ConvertRGBA( void* input, float** output, bool zeroCopy ) 205 | { 206 | if( !input || !output ) 207 | return false; 208 | 209 | // check if the buffers were previously allocated with a different zeroCopy option 210 | // if necessary, free them so they can be re-allocated with the correct option 211 | if( mRGBA[0] != NULL && zeroCopy != mRGBAZeroCopy ) 212 | { 213 | for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ ) 214 | { 215 | if( mRGBA[n] != NULL ) 216 | { 217 | if( mRGBAZeroCopy ) 218 | CUDA(cudaFreeHost(mRGBA[n])); 219 | else 220 | CUDA(cudaFree(mRGBA[n])); 221 | 222 | mRGBA[n] = NULL; 223 | } 224 | } 225 | 226 | mRGBAZeroCopy = false; // reset for sanity 227 | } 228 | 229 | // check if the buffers need allocated 230 | if( !mRGBA[0] ) 231 | { 232 | const size_t size = mWidth * mHeight * sizeof(float4); 233 | 234 | for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ ) 235 | { 236 | if( zeroCopy ) 237 | { 238 | void* cpuPtr = NULL; 239 | void* gpuPtr = NULL; 240 | 241 | if( !cudaAllocMapped(&cpuPtr, &gpuPtr, size) ) 242 | { 243 | printf(LOG_GSTREAMER "gstCamera -- failed to allocate zeroCopy memory for %ux%xu RGBA texture\n", mWidth, mHeight); 244 | return false; 245 | } 246 | 247 | if( cpuPtr != gpuPtr ) 248 | { 249 | printf(LOG_GSTREAMER "gstCamera -- zeroCopy memory has different pointers, please use a UVA-compatible GPU\n"); 250 | return false; 251 | } 252 | 253 | mRGBA[n] = gpuPtr; 254 | } 255 | else 256 | { 257 | if( CUDA_FAILED(cudaMalloc(&mRGBA[n], size)) ) 258 | { 259 | printf(LOG_GSTREAMER "gstCamera -- failed to allocate memory for %ux%u RGBA texture\n", mWidth, mHeight); 260 | return false; 261 | } 262 | } 263 | } 264 | 265 | printf(LOG_GSTREAMER "gstCamera -- allocated %u RGBA ringbuffers\n", NUM_RINGBUFFERS); 266 | mRGBAZeroCopy = zeroCopy; 267 | } 268 | 269 | if( csiCamera() ) 270 | { 271 | // MIPI CSI camera is NV12 272 | if( CUDA_FAILED(cudaNV12ToRGBA32((uint8_t*)input, (float4*)mRGBA[mLatestRGBA], mWidth, mHeight)) ) 273 | return false; 274 | } 275 | else 276 | { 277 | // V4L2 webcam is RGB 278 | 279 | printf("webcam is rgb ..................................."); 280 | if( CUDA_FAILED(cudaRGB8ToRGBA32((uchar3*)input, (float4*)mRGBA[mLatestRGBA], mWidth, mHeight)) ) 281 | return false; 282 | } 283 | 284 | *output = (float*)mRGBA[mLatestRGBA]; 285 | mLatestRGBA = (mLatestRGBA + 1) % NUM_RINGBUFFERS; 286 | return true; 287 | } 288 | 289 | 290 | #define release_return { gst_sample_unref(gstSample); return; } 291 | 292 | 293 | // checkBuffer 294 | void gstCamera::checkBuffer() 295 | { 296 | if( !mAppSink ) 297 | return; 298 | 299 | // block waiting for the buffer 300 | GstSample* gstSample = gst_app_sink_pull_sample(mAppSink); 301 | 302 | if( !gstSample ) 303 | { 304 | printf(LOG_GSTREAMER "gstCamera -- gst_app_sink_pull_sample() returned NULL...\n"); 305 | return; 306 | } 307 | 308 | GstBuffer* gstBuffer = gst_sample_get_buffer(gstSample); 309 | 310 | if( !gstBuffer ) 311 | { 312 | printf(LOG_GSTREAMER "gstCamera -- gst_sample_get_buffer() returned NULL...\n"); 313 | return; 314 | } 315 | 316 | // retrieve 317 | GstMapInfo map; 318 | 319 | if( !gst_buffer_map(gstBuffer, &map, GST_MAP_READ) ) 320 | { 321 | printf(LOG_GSTREAMER "gstCamera -- gst_buffer_map() failed...\n"); 322 | return; 323 | } 324 | 325 | //gst_util_dump_mem(map.data, map.size); 326 | 327 | void* gstData = map.data; //GST_BUFFER_DATA(gstBuffer); 328 | const uint32_t gstSize = map.size; //GST_BUFFER_SIZE(gstBuffer); 329 | 330 | if( !gstData ) 331 | { 332 | printf(LOG_GSTREAMER "gstCamera -- gst_buffer had NULL data pointer...\n"); 333 | release_return; 334 | } 335 | 336 | // retrieve caps 337 | GstCaps* gstCaps = gst_sample_get_caps(gstSample); 338 | 339 | if( !gstCaps ) 340 | { 341 | printf(LOG_GSTREAMER "gstCamera -- gst_buffer had NULL caps...\n"); 342 | release_return; 343 | } 344 | 345 | GstStructure* gstCapsStruct = gst_caps_get_structure(gstCaps, 0); 346 | 347 | if( !gstCapsStruct ) 348 | { 349 | printf(LOG_GSTREAMER "gstCamera -- gst_caps had NULL structure...\n"); 350 | release_return; 351 | } 352 | 353 | // get width & height of the buffer 354 | int width = 0; 355 | int height = 0; 356 | 357 | if( !gst_structure_get_int(gstCapsStruct, "width", &width) || 358 | !gst_structure_get_int(gstCapsStruct, "height", &height) ) 359 | { 360 | printf(LOG_GSTREAMER "gstCamera -- gst_caps missing width/height...\n"); 361 | release_return; 362 | } 363 | 364 | if( width < 1 || height < 1 ) 365 | release_return; 366 | 367 | mWidth = width; 368 | mHeight = height; 369 | mDepth = (gstSize * 8) / (width * height); 370 | mSize = gstSize; 371 | 372 | //printf(LOG_GSTREAMER "gstCamera recieved %ix%i frame (%u bytes, %u bpp)\n", width, height, gstSize, mDepth); 373 | 374 | // make sure ringbuffer is allocated 375 | if( !mRingbufferCPU[0] ) 376 | { 377 | for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ ) 378 | { 379 | if( !cudaAllocMapped(&mRingbufferCPU[n], &mRingbufferGPU[n], gstSize) ) 380 | printf(LOG_GSTREAMER "gstCamera -- failed to allocate ringbuffer %u (size=%u)\n", n, gstSize); 381 | } 382 | 383 | printf(LOG_GSTREAMER "gstCamera -- allocated %u ringbuffers, %u bytes each\n", NUM_RINGBUFFERS, gstSize); 384 | } 385 | 386 | // copy to next ringbuffer 387 | const uint32_t nextRingbuffer = (mLatestRingbuffer + 1) % NUM_RINGBUFFERS; 388 | 389 | //printf(LOG_GSTREAMER "gstCamera -- using ringbuffer #%u for next frame\n", nextRingbuffer); 390 | memcpy(mRingbufferCPU[nextRingbuffer], gstData, gstSize); 391 | gst_buffer_unmap(gstBuffer, &map); 392 | //gst_buffer_unref(gstBuffer); 393 | gst_sample_unref(gstSample); 394 | 395 | 396 | // update and signal sleeping threads 397 | mRingMutex.Lock(); 398 | mLatestRingbuffer = nextRingbuffer; 399 | mLatestRetrieved = false; 400 | mRingMutex.Unlock(); 401 | mWaitEvent.Wake(); 402 | } 403 | 404 | 405 | // buildLaunchStr 406 | bool gstCamera::buildLaunchStr( gstCameraSrc src ) 407 | { 408 | // gst-launch-1.0 nvcamerasrc fpsRange="30.0 30.0" ! 'video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1' ! \ 409 | // nvvidconv flip-method=2 ! 'video/x-raw(memory:NVMM), format=(string)I420' ! fakesink silent=false -v 410 | // #define CAPS_STR "video/x-raw(memory:NVMM), width=(int)2592, height=(int)1944, format=(string)I420, framerate=(fraction)30/1" 411 | // #define CAPS_STR "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1" 412 | std::ostringstream ss; 413 | /* 414 | 415 | if( csiCamera() && src != GST_SOURCE_V4L2 ) 416 | { 417 | mSource = src; // store camera source method 418 | 419 | #if NV_TENSORRT_MAJOR > 1 && NV_TENSORRT_MAJOR < 5 // if JetPack 3.1-3.3 (different flip-method) 420 | const int flipMethod = 0; // Xavier (w/TRT5) camera is mounted inverted 421 | #else 422 | const int flipMethod = 2; 423 | #endif 424 | 425 | if( src == GST_SOURCE_NVCAMERA ) 426 | ss << "nvcamerasrc fpsRange=\"30.0 30.0\" ! video/x-raw(memory:NVMM), width=(int)" << mWidth << ", height=(int)" << mHeight << ", format=(string)NV12 ! nvvidconv flip-method=" << flipMethod << " ! "; //'video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1' ! "; 427 | else if( src == GST_SOURCE_NVARGUS ) 428 | ss << "nvarguscamerasrc sensor-id=" << mSensorCSI << " ! video/x-raw(memory:NVMM), width=(int)" << mWidth << ", height=(int)" << mHeight << ", framerate=30/1, format=(string)NV12 ! nvvidconv flip-method=" << flipMethod << " ! "; 429 | 430 | ss << "video/x-raw ! appsink name=mysink"; 431 | } 432 | else 433 | { 434 | ss << "v4l2src device=" << mCameraStr << " ! "; 435 | ss << "video/x-raw, width=(int)" << mWidth << ", height=(int)" << mHeight << ", "; 436 | 437 | #if NV_TENSORRT_MAJOR >= 5 438 | ss << "format=YUY2 ! videoconvert ! video/x-raw, format=RGB ! videoconvert !"; 439 | #else 440 | ss << "format=RGB ! videoconvert ! video/x-raw, format=RGB ! videoconvert !"; 441 | #endif 442 | 443 | ss << "appsink name=mysink"; 444 | 445 | mSource = GST_SOURCE_V4L2; 446 | } 447 | */ 448 | //ss<<"rtspsrc location=rtsp://admin:K3Y@H1K.C4M3R4@192.168.40.232:554 ! rtph264depay ! h264parse ! omxh264dec ! appsink name=mysink"; 449 | ss<<"rtspsrc location=rtsp://192.168.40.232:554/Streaming/Channels/101/ user-id=admin user-pw=K3Y@H1K.C4M3R4 protocols=tcp latency=100 ! queue ! rtph264depay ! h264parse ! nvdec ! appsink name=mysink"; 450 | //ss<<"rtspsrc location=rtsp://192.168.1.110:554/Streaming/Channels/101/ user-id=admin user-pw=qqq12345 protocols=tcp latency=100 ! queue ! rtph264depay ! h264parse ! ! appsink name=mysink"; 451 | 452 | mLaunchStr = ss.str(); 453 | 454 | printf(LOG_GSTREAMER "gstCamera pipeline string:\n"); 455 | printf("%s\n", mLaunchStr.c_str()); 456 | return true; 457 | } 458 | 459 | 460 | // parseCameraStr 461 | bool gstCamera::parseCameraStr( const char* camera ) 462 | { 463 | if( !camera || strlen(camera) == 0 ) 464 | { 465 | mSensorCSI = 0; 466 | mCameraStr = "0"; 467 | return true; 468 | } 469 | 470 | mCameraStr = camera; 471 | 472 | // check if the string is a V4L2 device 473 | const char* prefixV4L2 = "/dev/video"; 474 | 475 | const size_t prefixLength = strlen(prefixV4L2); 476 | const size_t cameraLength = strlen(camera); 477 | 478 | if( cameraLength < prefixLength ) 479 | { 480 | const int result = sscanf(camera, "%i", &mSensorCSI); 481 | 482 | if( result == 1 && mSensorCSI >= 0 ) 483 | return true; 484 | } 485 | else if( strncmp(camera, prefixV4L2, prefixLength) == 0 ) 486 | { 487 | return true; 488 | } 489 | 490 | printf(LOG_GSTREAMER "gstCamera::Create('%s') -- invalid camera device requested\n", camera); 491 | return false; 492 | } 493 | 494 | 495 | // Create 496 | gstCamera* gstCamera::Create( uint32_t width, uint32_t height, const char* camera ) 497 | { 498 | if( !gstreamerInit() ) 499 | { 500 | printf(LOG_GSTREAMER "failed to initialize gstreamer API\n"); 501 | return NULL; 502 | } 503 | 504 | gstCamera* cam = new gstCamera(); 505 | 506 | if( !cam ) 507 | return NULL; 508 | 509 | if( !cam->parseCameraStr(camera) ) 510 | return NULL; 511 | 512 | cam->mWidth = width; 513 | cam->mHeight = height; 514 | 515 | cam->mDepth = 12; 516 | 517 | //->Deactivate default cam->mDepth 518 | 519 | cam->mDepth = 12;//cam->csiCamera() ? 12 : 24; // NV12 or RGB 520 | cam->mSize = (width * height * cam->mDepth) / 8; 521 | 522 | if( !cam->init(GST_SOURCE_NVARGUS) ) 523 | { 524 | printf(LOG_GSTREAMER "failed to init gstCamera (GST_SOURCE_NVARGUS, camera %s)\n", cam->mCameraStr.c_str()); 525 | 526 | if( !cam->init(GST_SOURCE_NVCAMERA) ) 527 | { 528 | printf(LOG_GSTREAMER "failed to init gstCamera (GST_SOURCE_NVCAMERA, camera %s)\n", cam->mCameraStr.c_str()); 529 | 530 | if( cam->mSensorCSI >= 0 ) 531 | cam->mSensorCSI = -1; 532 | 533 | if( !cam->init(GST_SOURCE_V4L2) ) 534 | { 535 | printf(LOG_GSTREAMER "failed to init gstCamera (GST_SOURCE_V4L2, camera %s)\n", cam->mCameraStr.c_str()); 536 | return NULL; 537 | } 538 | } 539 | } 540 | 541 | printf(LOG_GSTREAMER "gstCamera successfully initialized with %s, camera %s\n", gstCameraSrcToString(cam->mSource), cam->mCameraStr.c_str()); 542 | return cam; 543 | } 544 | 545 | 546 | // Create 547 | gstCamera* gstCamera::Create( const char* camera ) 548 | { 549 | return Create( DefaultWidth, DefaultHeight, camera ); 550 | } 551 | 552 | 553 | // init 554 | bool gstCamera::init( gstCameraSrc src ) 555 | { 556 | GError* err = NULL; 557 | printf(LOG_GSTREAMER "gstCamera attempting to initialize with %s, camera %s\n", gstCameraSrcToString(src), mCameraStr.c_str()); 558 | 559 | // build pipeline string 560 | if( !buildLaunchStr(src) ) 561 | { 562 | printf(LOG_GSTREAMER "gstCamera failed to build pipeline string ..............................................\n"); 563 | return false; 564 | } 565 | 566 | // launch pipeline 567 | mPipeline = gst_parse_launch(mLaunchStr.c_str(), &err); 568 | 569 | if( err != NULL ) 570 | { 571 | printf(LOG_GSTREAMER "gstCamera failed to create pipeline .........................................................\n"); 572 | printf(LOG_GSTREAMER " (%s)\n", err->message); 573 | g_error_free(err); 574 | return false; 575 | } 576 | 577 | GstPipeline* pipeline = GST_PIPELINE(mPipeline); 578 | 579 | if( !pipeline ) 580 | { 581 | printf(LOG_GSTREAMER "gstCamera failed to cast GstElement into GstPipeline\n"); 582 | return false; 583 | } 584 | 585 | // retrieve pipeline bus 586 | /*GstBus**/ mBus = gst_pipeline_get_bus(pipeline); 587 | 588 | if( !mBus ) 589 | { 590 | printf(LOG_GSTREAMER "gstCamera failed to retrieve GstBus from pipeline\n"); 591 | return false; 592 | } 593 | 594 | // add watch for messages (disabled when we poll the bus ourselves, instead of gmainloop) 595 | //gst_bus_add_watch(mBus, (GstBusFunc)gst_message_print, NULL); 596 | 597 | // get the appsrc 598 | GstElement* appsinkElement = gst_bin_get_by_name(GST_BIN(pipeline), "mysink"); 599 | GstAppSink* appsink = GST_APP_SINK(appsinkElement); 600 | 601 | if( !appsinkElement || !appsink) 602 | { 603 | printf(LOG_GSTREAMER "gstCamera failed to retrieve AppSink element from pipeline\n"); 604 | return false; 605 | } 606 | 607 | mAppSink = appsink; 608 | 609 | // setup callbacks 610 | GstAppSinkCallbacks cb; 611 | memset(&cb, 0, sizeof(GstAppSinkCallbacks)); 612 | 613 | cb.eos = onEOS; 614 | cb.new_preroll = onPreroll; 615 | cb.new_sample = onBuffer; 616 | 617 | gst_app_sink_set_callbacks(mAppSink, &cb, (void*)this, NULL); 618 | 619 | return true; 620 | } 621 | 622 | 623 | // Open 624 | bool gstCamera::Open() 625 | { 626 | if( mStreaming ) 627 | return true; 628 | 629 | // transition pipline to STATE_PLAYING 630 | printf(LOG_GSTREAMER "opening gstCamera for streaming, transitioning pipeline to GST_STATE_PLAYING\n"); 631 | 632 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_PLAYING); 633 | 634 | if( result == GST_STATE_CHANGE_ASYNC ) 635 | { 636 | #if 0 637 | GstMessage* asyncMsg = gst_bus_timed_pop_filtered(mBus, 5 * GST_SECOND, 638 | (GstMessageType)(GST_MESSAGE_ASYNC_DONE|GST_MESSAGE_ERROR)); 639 | 640 | if( asyncMsg != NULL ) 641 | { 642 | gst_message_print(mBus, asyncMsg, this); 643 | gst_message_unref(asyncMsg); 644 | } 645 | else 646 | printf(LOG_GSTREAMER "gstCamera NULL message after transitioning pipeline to PLAYING...\n"); 647 | #endif 648 | } 649 | else if( result != GST_STATE_CHANGE_SUCCESS ) 650 | { 651 | printf(LOG_GSTREAMER "gstCamera failed to set pipeline state to PLAYING (error %u)\n", result); 652 | return false; 653 | } 654 | 655 | checkMsgBus(); 656 | usleep(100*1000); 657 | checkMsgBus(); 658 | 659 | mStreaming = true; 660 | return true; 661 | } 662 | 663 | 664 | // Close 665 | void gstCamera::Close() 666 | { 667 | if( !mStreaming ) 668 | return; 669 | 670 | // stop pipeline 671 | printf(LOG_GSTREAMER "closing gstCamera for streaming, transitioning pipeline to GST_STATE_NULL\n"); 672 | 673 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_NULL); 674 | 675 | if( result != GST_STATE_CHANGE_SUCCESS ) 676 | printf(LOG_GSTREAMER "gstCamera failed to set pipeline state to PLAYING (error %u)\n", result); 677 | 678 | usleep(250*1000); 679 | mStreaming = false; 680 | } 681 | 682 | 683 | // checkMsgBus 684 | void gstCamera::checkMsgBus() 685 | { 686 | while(true) 687 | { 688 | GstMessage* msg = gst_bus_pop(mBus); 689 | 690 | if( !msg ) 691 | break; 692 | 693 | gst_message_print(mBus, msg, this); 694 | gst_message_unref(msg); 695 | } 696 | } 697 | 698 | -------------------------------------------------------------------------------- /gstCamera.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GSTREAMER_CAMERA_H__ 24 | #define __GSTREAMER_CAMERA_H__ 25 | 26 | #include 27 | #include 28 | 29 | #include "Mutex.h" 30 | #include "Event.h" 31 | 32 | 33 | // Forward declarations 34 | struct _GstAppSink; 35 | 36 | 37 | /** 38 | * Enumeration of camera input source methods 39 | * @ingroup gstCamera 40 | */ 41 | enum gstCameraSrc 42 | { 43 | GST_SOURCE_NVCAMERA, /* use nvcamerasrc element */ 44 | GST_SOURCE_NVARGUS, /* use nvargussrc element */ 45 | GST_SOURCE_V4L2 /* use v4l2src element */ 46 | }; 47 | 48 | /** 49 | * Stringize function to convert gstCameraSrc enum to text 50 | * @ingroup gstCamera 51 | */ 52 | const char* gstCameraSrcToString( gstCameraSrc src ); 53 | 54 | 55 | /** 56 | * MIPI CSI and V4L2 camera capture using GStreamer and `nvarguscamerasrc` or `v4l2src` elements. 57 | * gstCamera supports both MIPI CSI cameras and V4L2-compliant devices like USB webcams. 58 | * 59 | * Examples of MIPI CSI cameras that work out of the box are the OV5693 module from the 60 | * Jetson TX1/TX2 devkits, and the IMX219 sensor from the Raspberry Pi Camera Module v2. 61 | * 62 | * For MIPI CSI cameras, the GStreamer element `nvarguscamerasrc` will be used for capture. 63 | * For V4L2 devices, the GStreamer element `v4l2src` will be used for camera capture. 64 | * 65 | * gstCamera uses CUDA underneath for any necessary colorspace conversion, and provides 66 | * the captured image frames in CUDA device memory, or zero-copy shared CPU/GPU memory. 67 | * 68 | * @ingroup gstCamera 69 | */ 70 | class gstCamera 71 | { 72 | public: 73 | /** 74 | * Create a MIPI CSI or V4L2 camera device. 75 | * 76 | * gstCamera will use the `nvarguscamerasrc` GStreamer element for MIPI CSI cameras, 77 | * and the `v4l2src` GStreamer element for capturing V4L2 cameras, like USB webcams. 78 | * 79 | * The camera will be created with a resolution indicated by gstCamera::DefaultWidth 80 | * and gstCamera::DefaultHeight (1280x720 by default). 81 | * 82 | * @param camera Camera device to use. If using MIPI CSI, this string can be `NULL` 83 | * to default to CSI camera 0, otherwise the string should contain the 84 | * device index of the CSI camera (e.g. `"0"` for CSI camera 0 or `"1"` 85 | * for CSI camera 1, ect). If using V4L2, the string should contain 86 | * the `/dev/video` node to use (e.g. `"/dev/video0"` for V4L2 camera 0). 87 | * By default, `camera` parameter is NULL and MIPI CSI camera 0 is used. 88 | * 89 | * @returns A pointer to the created gstCamera device, or NULL if there was an error. 90 | */ 91 | static gstCamera* Create( const char* camera=NULL ); // use MIPI CSI camera by default 92 | 93 | /** 94 | * Create a MIPI CSI or V4L2 camera device. 95 | * 96 | * gstCamera will use the `nvarguscamerasrc` GStreamer element for MIPI CSI cameras, 97 | * and the `v4l2src` GStreamer element for capturing V4L2 cameras, like USB webcams. 98 | * 99 | * @param width desired width (in pixels) of the camera resolution. 100 | * This should be from a format that the camera supports. 101 | * 102 | * @param height desired height (in pixels) of the camera resolution. 103 | * This should be from a format that the camera supports. 104 | * 105 | * @param camera Camera device to use. If using MIPI CSI, this string can be `NULL` 106 | * to default to CSI camera 0, otherwise the string should contain the 107 | * device index of the CSI camera (e.g. `"0"` for CSI camera 0 or `"1"` 108 | * for CSI camera 1, ect). If using V4L2, the string should contain 109 | * the `/dev/video` node to use (e.g. `"/dev/video0"` for V4L2 camera 0). 110 | * By default, `camera` parameter is NULL and MIPI CSI camera 0 is used. 111 | * 112 | * @returns A pointer to the created gstCamera device, or NULL if there was an error. 113 | */ 114 | static gstCamera* Create( uint32_t width, uint32_t height, const char* camera=NULL ); 115 | 116 | /** 117 | * Release the camera interface and resources. 118 | * Destroying the camera will also Close() the stream if it is still open. 119 | */ 120 | ~gstCamera(); 121 | 122 | /** 123 | * Begin streaming the camera. 124 | * After Open() is called, frames from the camera will begin to be captured. 125 | * 126 | * Open() is not stricly necessary to call, if you call one of the Capture() 127 | * functions they will first check to make sure that the stream is opened, 128 | * and if not they will open it automatically for you. 129 | * 130 | * @returns `true` on success, `false` if an error occurred opening the stream. 131 | */ 132 | bool Open(); 133 | 134 | /** 135 | * Stop streaming the camera. 136 | * @note Close() is automatically called by the camera's destructor when 137 | * it gets deleted, so you do not explicitly need to call Close() before 138 | * exiting the program if you delete your camera object. 139 | */ 140 | void Close(); 141 | 142 | /** 143 | * Check if the camera is streaming or not. 144 | * @returns `true` if the camera is streaming (open), or `false` if it's closed. 145 | */ 146 | inline bool IsStreaming() const { return mStreaming; } 147 | 148 | /** 149 | * Capture the next image frame from the camera. 150 | * 151 | * For MIPI CSI cameras, Capture() will provide an image in YUV (NV12) format. 152 | * For V4L2 devices, Capture() will provide an image in RGB (24-bit) format. 153 | * 154 | * The captured images reside in shared CPU/GPU memory, also known as CUDA 155 | * mapped memory or zero-copy memory. Hence it is unnessary to copy them to GPU. 156 | * This memory is managed internally by gstCamera, so don't attempt to free it. 157 | * 158 | * @param[out] cpu Pointer that gets returned to the image in CPU address space. 159 | * @param[out] cuda Pointer that gets returned to the image in GPU address space. 160 | * 161 | * @param[in] timeout The time in milliseconds for the calling thread to wait to 162 | * return if a new camera frame isn't recieved by that time. 163 | * If timeout is 0, the calling thread will return immediately 164 | * if a new frame isn't already available. 165 | * If timeout is UINT64_MAX, the calling thread will wait 166 | * indefinetly for a new frame to arrive (this is the default behavior). 167 | * 168 | * @returns `true` if a frame was successfully captured, otherwise `false` if a timeout 169 | * or error occurred, or if timeout was 0 and a frame wasn't ready. 170 | */ 171 | bool Capture( void** cpu, void** cuda, uint64_t timeout=UINT64_MAX ); 172 | 173 | /** 174 | * Capture the next image frame from the camera and convert it to float4 RGBA format, 175 | * with pixel intensities ranging between 0.0 and 255.0. 176 | * 177 | * Internally, CaptureRGBA() first calls Capture() and then ConvertRGBA(). 178 | * The ConvertRGBA() function uses CUDA, so if you want to capture from a different 179 | * thread than your CUDA device, use the Capture() and ConvertRGBA() functions. 180 | * 181 | * @param[out] image Pointer that gets returned to the image in GPU address space, 182 | * or if the zeroCopy parameter is true, then the pointer is valid 183 | * in both CPU and GPU address spaces. Do not manually free the image memory, 184 | * it is managed internally. The image is in float4 RGBA format. 185 | * The size of the image is: `GetWidth() * GetHeight() * sizeof(float) * 4` 186 | * 187 | * @param[in] timeout The time in milliseconds for the calling thread to wait to 188 | * return if a new camera frame isn't recieved by that time. 189 | * If timeout is 0, the calling thread will return immediately 190 | * if a new frame isn't already available. 191 | * If timeout is UINT64_MAX, the calling thread will wait 192 | * indefinetly for a new frame to arrive (this is the default behavior). 193 | * 194 | * @param[in] zeroCopy If `true`, the image will reside in shared CPU/GPU memory. 195 | * If `false`, the image will only be accessible from the GPU. 196 | * You would need to set zeroCopy to `true` if you wanted to 197 | * access the image pixels from the CPU. Since this isn't 198 | * generally the case, the default is `false` (GPU only). 199 | * 200 | * @returns `true` if a frame was successfully captured, otherwise `false` if a timeout 201 | * or error occurred, or if timeout was 0 and a frame wasn't ready. 202 | */ 203 | bool CaptureRGBA( float** image, uint64_t timeout=UINT64_MAX, bool zeroCopy=false ); 204 | 205 | /** 206 | * Convert an image to float4 RGBA that was previously aquired with Capture(). 207 | * This function uses CUDA to perform the colorspace conversion to float4 RGBA, 208 | * with pixel intensities ranging from 0.0 to 255.0. 209 | * 210 | * @param[in] input Pointer to the input image, typically the pointer from Capture(). 211 | * If this is a MIPI CSI camera, it's expected to be in YUV (NV12) format. 212 | * If this is a V4L2 device, it's expected to be in RGB (24-bit) format. 213 | * In both cases, these are the formats that Capture() provides the image in. 214 | * 215 | * @param[out] output Pointer that gets returned to the image in GPU address space, 216 | * or if the zeroCopy parameter is true, then the pointer is valid 217 | * in both CPU and GPU address spaces. Do not manually free the image memory, 218 | * it is managed internally. The image is in float4 RGBA format. 219 | * The size of the image is: `GetWidth() * GetHeight() * sizeof(float) * 4` 220 | * 221 | * @param[in] zeroCopy If `true`, the image will reside in shared CPU/GPU memory. 222 | * If `false`, the image will only be accessible from the GPU. 223 | * You would need to set zeroCopy to `true` if you wanted to 224 | * access the image pixels from the CPU. Since this isn't 225 | * generally the case, the default is `false` (GPU only). 226 | * 227 | * @returns `true` on success, `false` if an error occurred. 228 | */ 229 | bool ConvertRGBA( void* input, float** output, bool zeroCopy=false ); 230 | 231 | /** 232 | * Return the width of the camera. 233 | */ 234 | inline uint32_t GetWidth() const { return mWidth; } 235 | 236 | /** 237 | * Return the height of the camera. 238 | */ 239 | inline uint32_t GetHeight() const { return mHeight; } 240 | 241 | /** 242 | * Return the pixel bit depth of the camera (measured in bits). 243 | * This will be 12 for MIPI CSI cameras (YUV NV12 format) 244 | * or 24 for VL42 cameras (RGB 24-bit). 245 | */ 246 | inline uint32_t GetPixelDepth() const { return mDepth; } 247 | 248 | /** 249 | * Return the size (in bytes) of a camera frame from Capture(). 250 | * 251 | * @note this is not the size of the converted float4 RGBA image 252 | * from Convert(), but rather the YUV (NV12) or RGB (24-bit) 253 | * image that gets aquired by the Capture() function. 254 | * To calculate the size of the converted float4 RGBA image, 255 | * take: `GetWidth() * GetHeight() * sizeof(float) * 4` 256 | */ 257 | inline uint32_t GetSize() const { return mSize; } 258 | 259 | /** 260 | * Default camera width, unless otherwise specified during Create() 261 | */ 262 | static const uint32_t DefaultWidth = 1280; 263 | 264 | /** 265 | * Default camera height, unless otherwise specified during Create() 266 | */ 267 | static const uint32_t DefaultHeight = 720; 268 | 269 | private: 270 | static void onEOS(_GstAppSink* sink, void* user_data); 271 | static GstFlowReturn onPreroll(_GstAppSink* sink, void* user_data); 272 | static GstFlowReturn onBuffer(_GstAppSink* sink, void* user_data); 273 | 274 | gstCamera(); 275 | 276 | bool init( gstCameraSrc src ); 277 | bool buildLaunchStr( gstCameraSrc src ); 278 | bool parseCameraStr( const char* camera ); 279 | 280 | void checkMsgBus(); 281 | void checkBuffer(); 282 | 283 | _GstBus* mBus; 284 | _GstAppSink* mAppSink; 285 | _GstElement* mPipeline; 286 | gstCameraSrc mSource; 287 | 288 | std::string mLaunchStr; 289 | std::string mCameraStr; 290 | 291 | uint32_t mWidth; 292 | uint32_t mHeight; 293 | uint32_t mDepth; 294 | uint32_t mSize; 295 | 296 | static const uint32_t NUM_RINGBUFFERS = 16; 297 | 298 | void* mRingbufferCPU[NUM_RINGBUFFERS]; 299 | void* mRingbufferGPU[NUM_RINGBUFFERS]; 300 | 301 | Event mWaitEvent; 302 | Mutex mWaitMutex; 303 | Mutex mRingMutex; 304 | 305 | uint32_t mLatestRGBA; 306 | uint32_t mLatestRingbuffer; 307 | bool mLatestRetrieved; 308 | 309 | void* mRGBA[NUM_RINGBUFFERS]; 310 | bool mRGBAZeroCopy; // were the RGBA buffers allocated with zeroCopy? 311 | bool mStreaming; // true if the device is currently open 312 | int mSensorCSI; // -1 for V4L2, >=0 for MIPI CSI 313 | 314 | inline bool csiCamera() const { return (mSensorCSI >= 0); } 315 | }; 316 | 317 | #endif 318 | -------------------------------------------------------------------------------- /gstDecoder.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "gstDecoder.h" 24 | 25 | #include "filesystem.h" 26 | #include "timespec.h" 27 | 28 | #include 29 | #include 30 | 31 | #include 32 | #include 33 | #include 34 | 35 | 36 | 37 | // constructor 38 | gstDecoder::gstDecoder() 39 | { 40 | mAppSink = NULL; 41 | mBus = NULL; 42 | mPipeline = NULL; 43 | mPort = 0; 44 | mCodec = GST_CODEC_H264; 45 | } 46 | 47 | 48 | // destructor 49 | gstDecoder::~gstDecoder() 50 | { 51 | // stop pipeline 52 | printf(LOG_GSTREAMER "gstDecoder - shutting down pipeline\n"); 53 | printf(LOG_GSTREAMER "gstDecoder - transitioning pipeline to GST_STATE_NULL\n"); 54 | 55 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_NULL); 56 | 57 | if( result != GST_STATE_CHANGE_SUCCESS ) 58 | printf(LOG_GSTREAMER "gstDecoder - failed to stop pipeline (error %u)\n", result); 59 | 60 | sleepMs(250); 61 | 62 | printf(LOG_GSTREAMER "gstDecoder - pipeline shutdown complete\n"); 63 | } 64 | 65 | 66 | // Create 67 | gstDecoder* gstDecoder::Create( gstCodec codec, const char* filename ) 68 | { 69 | gstDecoder* dec = new gstDecoder(); 70 | 71 | if( !dec ) 72 | return NULL; 73 | 74 | if( !dec->init(codec, filename, NULL, 0) ) 75 | { 76 | printf(LOG_GSTREAMER "gstDecoder::Create() failed\n"); 77 | return NULL; 78 | } 79 | 80 | return dec; 81 | } 82 | 83 | 84 | // Create 85 | gstDecoder* gstDecoder::Create( gstCodec codec, uint16_t port ) 86 | { 87 | return Create(codec, NULL, port); 88 | } 89 | 90 | 91 | // Create 92 | gstDecoder* gstDecoder::Create( gstCodec codec, const char* multicastIP, uint16_t port ) 93 | { 94 | gstDecoder* dec = new gstDecoder(); 95 | 96 | if( !dec ) 97 | return NULL; 98 | 99 | if( !dec->init(codec, NULL, multicastIP, port) ) 100 | { 101 | printf(LOG_GSTREAMER "gstDecoder::Create() failed\n"); 102 | return NULL; 103 | } 104 | 105 | return dec; 106 | } 107 | 108 | 109 | // init 110 | bool gstDecoder::init( gstCodec codec, const char* filename, const char* multicastIP, uint16_t port ) 111 | { 112 | mCodec = codec; 113 | mInputPath = filename; 114 | mMulticastIP = multicastIP; 115 | mPort = port; 116 | GError* err = NULL; 117 | 118 | if( !filename && !multicastIP ) 119 | return false; 120 | 121 | // build pipeline string 122 | if( !buildLaunchStr() ) 123 | { 124 | printf(LOG_GSTREAMER "gstDecoder - failed to build pipeline string\n"); 125 | return false; 126 | } 127 | 128 | // create pipeline 129 | mPipeline = gst_parse_launch(mLaunchStr.c_str(), &err); 130 | 131 | if( err != NULL ) 132 | { 133 | printf(LOG_GSTREAMER "gstDecoder - failed to create pipeline\n"); 134 | printf(LOG_GSTREAMER " (%s)\n", err->message); 135 | g_error_free(err); 136 | return false; 137 | } 138 | 139 | GstPipeline* pipeline = GST_PIPELINE(mPipeline); 140 | 141 | if( !pipeline ) 142 | { 143 | printf(LOG_GSTREAMER "gstDecoder - failed to cast GstElement into GstPipeline\n"); 144 | return false; 145 | } 146 | 147 | // retrieve pipeline bus 148 | /*GstBus**/ mBus = gst_pipeline_get_bus(pipeline); 149 | 150 | if( !mBus ) 151 | { 152 | printf(LOG_GSTREAMER "gstDecoder - failed to retrieve GstBus from pipeline\n"); 153 | return false; 154 | } 155 | 156 | // add watch for messages (disabled when we poll the bus ourselves, instead of gmainloop) 157 | //gst_bus_add_watch(mBus, (GstBusFunc)gst_message_print, NULL); 158 | 159 | // get the appsrc 160 | GstElement* appsinkElement = gst_bin_get_by_name(GST_BIN(pipeline), "mysink"); 161 | GstAppSink* appsink = GST_APP_SINK(appsinkElement); 162 | 163 | if( !appsinkElement || !appsink) 164 | { 165 | printf(LOG_GSTREAMER "gstDecoder - failed to retrieve AppSink element from pipeline\n"); 166 | return false; 167 | } 168 | 169 | mAppSink = appsink; 170 | 171 | // setup callbacks 172 | GstAppSinkCallbacks cb; 173 | memset(&cb, 0, sizeof(GstAppSinkCallbacks)); 174 | 175 | cb.eos = onEOS; 176 | cb.new_preroll = onPreroll; 177 | #if GST_CHECK_VERSION(1,0,0) 178 | cb.new_sample = onBuffer; 179 | #else 180 | cb.new_buffer = onBuffer; 181 | #endif 182 | 183 | gst_app_sink_set_callbacks(mAppSink, &cb, (void*)this, NULL); 184 | 185 | 186 | // transition pipline to STATE_PLAYING 187 | printf(LOG_GSTREAMER "gstDecoder - transitioning pipeline to GST_STATE_PLAYING\n"); 188 | 189 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_PLAYING); 190 | 191 | if( result == GST_STATE_CHANGE_ASYNC ) 192 | { 193 | #if 0 194 | GstMessage* asyncMsg = gst_bus_timed_pop_filtered(mBus, 5 * GST_SECOND, 195 | (GstMessageType)(GST_MESSAGE_ASYNC_DONE|GST_MESSAGE_ERROR)); 196 | 197 | if( asyncMsg != NULL ) 198 | { 199 | gst_message_print(mBus, asyncMsg, this); 200 | gst_message_unref(asyncMsg); 201 | } 202 | else 203 | printf(LOG_GSTREAMER "gstDecoder - NULL message after transitioning pipeline to PLAYING...\n"); 204 | #endif 205 | } 206 | else if( result != GST_STATE_CHANGE_SUCCESS ) 207 | { 208 | printf(LOG_GSTREAMER "gstDecoder - failed to set pipeline state to PLAYING (error %u)\n", result); 209 | return false; 210 | } 211 | 212 | checkMsgBus(); 213 | sleepMs(100); 214 | checkMsgBus(); 215 | 216 | return true; 217 | } 218 | 219 | 220 | // buildLaunchStr 221 | bool gstDecoder::buildLaunchStr() 222 | { 223 | const size_t fileLen = mInputPath.size(); 224 | 225 | if( fileLen > 0 && mPort != 0 ) 226 | { 227 | printf(LOG_GSTREAMER "gstDecoder - can only use port %u or %s as input\n", mPort, mInputPath.c_str()); 228 | return false; 229 | } 230 | 231 | std::ostringstream ss; 232 | 233 | if( fileLen > 0 ) 234 | { 235 | ss << "filesrc location=" << mInputPath << " ! matroskademux ! queue ! "; 236 | 237 | if( mCodec == GST_CODEC_H264 ) 238 | ss << "h264parse ! "; 239 | else if( mCodec == GST_CODEC_H265 ) 240 | ss << "h265parse ! "; 241 | } 242 | else if( mPort != 0 ) 243 | { 244 | ss << "udpsrc port=" << mPort; 245 | 246 | if( mMulticastIP.length() > 0 ) 247 | ss << " multicast-group=" << mMulticastIP << " auto-multicast=true"; 248 | 249 | ss << " caps=\"" << "application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)"; 250 | 251 | if( mCodec == GST_CODEC_H264 ) 252 | ss << "H264\" ! rtph264depay ! "; 253 | else if( mCodec == GST_CODEC_H265 ) 254 | ss << "H265\" ! rtph265depay ! "; 255 | } 256 | else 257 | return false; 258 | 259 | #if GST_CHECK_VERSION(1,0,0) 260 | if( mCodec == GST_CODEC_H264 ) 261 | ss << "omxh264dec ! "; 262 | else if( mCodec == GST_CODEC_H265 ) 263 | ss << "omxh265dec ! "; 264 | #else 265 | if( mCodec == GST_CODEC_H264 ) 266 | ss << "nv_omx_h264dec ! "; 267 | else if( mCodec == GST_CODEC_H265 ) 268 | ss << "nv_omx_h265dec ! "; 269 | #endif 270 | 271 | #define CAPS_STR "video/x-raw,format=(string)RGBA" 272 | //#define CAPS_STR "video/x-raw-yuv,format=(fourcc)NV12" 273 | 274 | ss << "nvvidconv ! \"" << CAPS_STR << "\" ! "; 275 | ss << "appsink name=mysink caps=\"" << CAPS_STR << "\""; 276 | 277 | mLaunchStr = ss.str(); 278 | 279 | printf(LOG_GSTREAMER "gstDecoder - pipeline string:\n"); 280 | printf("%s\n", mLaunchStr.c_str()); 281 | return true; 282 | } 283 | 284 | 285 | // onEOS 286 | void gstDecoder::onEOS( _GstAppSink* sink, void* user_data ) 287 | { 288 | printf(LOG_GSTREAMER "gstDecoder - onEOS()\n"); 289 | } 290 | 291 | 292 | // onPreroll 293 | GstFlowReturn gstDecoder::onPreroll( _GstAppSink* sink, void* user_data ) 294 | { 295 | printf(LOG_GSTREAMER "gstDecoder - onPreroll()\n"); 296 | return GST_FLOW_OK; 297 | } 298 | 299 | 300 | // onBuffer 301 | GstFlowReturn gstDecoder::onBuffer(_GstAppSink* sink, void* user_data) 302 | { 303 | printf(LOG_GSTREAMER "gstDecoder - onBuffer()\n"); 304 | 305 | if( !user_data ) 306 | return GST_FLOW_OK; 307 | 308 | gstDecoder* dec = (gstDecoder*)user_data; 309 | 310 | dec->checkBuffer(); 311 | dec->checkMsgBus(); 312 | return GST_FLOW_OK; 313 | } 314 | 315 | #if GST_CHECK_VERSION(1,0,0) 316 | #define release_return { gst_sample_unref(gstSample); return; } 317 | #else 318 | #define release_return { gst_buffer_unref(gstBuffer); return; } 319 | #endif 320 | 321 | // checkBuffer 322 | void gstDecoder::checkBuffer() 323 | { 324 | if( !mAppSink ) 325 | return; 326 | 327 | 328 | #if GST_CHECK_VERSION(1,0,0) 329 | // block waiting for the sample 330 | GstSample* gstSample = gst_app_sink_pull_sample(mAppSink); 331 | 332 | if( !gstSample ) 333 | { 334 | printf(LOG_GSTREAMER "gstDecoder - app_sink_pull_sample() returned NULL...\n"); 335 | return; 336 | } 337 | 338 | // retrieve sample caps 339 | GstCaps* gstCaps = gst_sample_get_caps(gstSample); 340 | 341 | if( !gstCaps ) 342 | { 343 | printf(LOG_GSTREAMER "gstDecoder - gst_sample had NULL caps...\n"); 344 | release_return; 345 | } 346 | 347 | // retrieve the buffer from the sample 348 | GstBuffer* gstBuffer = gst_sample_get_buffer(gstSample); 349 | 350 | if( !gstBuffer ) 351 | { 352 | printf(LOG_GSTREAMER "gstDecoder - app_sink_pull_sample() returned NULL...\n"); 353 | release_return; 354 | } 355 | 356 | // map the buffer memory for read access 357 | GstMapInfo map; 358 | 359 | if( !gst_buffer_map(gstBuffer, &map, GST_MAP_READ) ) 360 | { 361 | printf(LOG_GSTREAMER "gstDecoder - failed to map gstreamer buffer memory\n"); 362 | release_return; 363 | } 364 | 365 | const void* gstData = map.data; 366 | const guint gstSize = map.size; 367 | #else 368 | // block waiting for the buffer 369 | GstBuffer* gstBuffer = gst_app_sink_pull_buffer(mAppSink); 370 | 371 | if( !gstBuffer ) 372 | { 373 | printf(LOG_GSTREAMER "gstDecoder - app_sink_pull_buffer() returned NULL...\n"); 374 | return; 375 | } 376 | 377 | // retrieve data pointer 378 | void* gstData = GST_BUFFER_DATA(gstBuffer); 379 | const guint gstSize = GST_BUFFER_SIZE(gstBuffer); 380 | 381 | if( !gstData ) 382 | { 383 | printf(LOG_GSTREAMER "gstDecoder - gst_buffer had NULL data pointer...\n"); 384 | release_return; 385 | } 386 | 387 | // retrieve caps 388 | GstCaps* gstCaps = gst_buffer_get_caps(gstBuffer); 389 | 390 | if( !gstCaps ) 391 | { 392 | printf(LOG_GSTREAMER "gstDecoder - gst_buffer had NULL caps...\n"); 393 | release_return; 394 | } 395 | #endif 396 | // retrieve caps structure 397 | GstStructure* gstCapsStruct = gst_caps_get_structure(gstCaps, 0); 398 | 399 | if( !gstCapsStruct ) 400 | { 401 | printf(LOG_GSTREAMER "gstDecoder - gst_caps had NULL structure...\n"); 402 | release_return; 403 | } 404 | 405 | // retrieve the width and height of the buffer 406 | int width = 0; 407 | int height = 0; 408 | 409 | if( !gst_structure_get_int(gstCapsStruct, "width", &width) || 410 | !gst_structure_get_int(gstCapsStruct, "height", &height) ) 411 | { 412 | printf(LOG_GSTREAMER "gstDecoder - gst_caps missing width/height...\n"); 413 | release_return; 414 | } 415 | 416 | printf(LOG_GSTREAMER "gstDecoder - recieved %ix%i frame\n", width, height); 417 | 418 | if( width < 1 || height < 1 ) 419 | release_return; 420 | 421 | /*// alloc ringbuffer 422 | const DataType type(12, 1, false, false); // NV12 423 | 424 | if( !AllocRingbuffer2D(width, height, type) ) 425 | { 426 | printf(LOG_GASKET "gstreamer decoder -- failed to alloc %ix%i ringbuffer\n", width, height); 427 | release_return; 428 | }*/ 429 | 430 | #if GST_CHECK_VERSION(1,0,0) 431 | gst_buffer_unmap(gstBuffer, &map); 432 | #endif 433 | 434 | release_return; 435 | } 436 | 437 | 438 | // checkMsgBus 439 | void gstDecoder::checkMsgBus() 440 | { 441 | while(true) 442 | { 443 | GstMessage* msg = gst_bus_pop(mBus); 444 | 445 | if( !msg ) 446 | break; 447 | 448 | gst_message_print(mBus, msg, this); 449 | gst_message_unref(msg); 450 | } 451 | } 452 | 453 | -------------------------------------------------------------------------------- /gstDecoder.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GSTREAMER_DECODER_H__ 24 | #define __GSTREAMER_DECODER_H__ 25 | 26 | #include "gstUtility.h" 27 | 28 | 29 | struct _GstAppSink; 30 | 31 | 32 | /** 33 | * Hardware-accelerated H.264/H.265 video decoder for Jetson using GStreamer. 34 | * @ingroup codec 35 | */ 36 | class gstDecoder 37 | { 38 | public: 39 | /** 40 | * Create an decoder instance that reads from a video file on disk. 41 | */ 42 | gstDecoder* Create( gstCodec codec, const char* filename ); 43 | 44 | /** 45 | * Create an decoder instance that streams over the network. 46 | */ 47 | gstDecoder* Create( gstCodec codec, uint16_t port ); 48 | 49 | /** 50 | * Create an decoder instance that streams over the network using multicast. 51 | */ 52 | gstDecoder* Create( gstCodec codec, const char* multicastIP, uint16_t port ); 53 | 54 | /** 55 | * Destructor 56 | */ 57 | ~gstDecoder(); 58 | 59 | 60 | protected: 61 | gstDecoder(); 62 | 63 | void checkMsgBus(); 64 | void checkBuffer(); 65 | bool buildLaunchStr(); 66 | 67 | bool init( gstCodec codec, const char* filename, const char* multicastIP, uint16_t port ); 68 | 69 | static void onEOS(_GstAppSink* sink, void* user_data); 70 | static GstFlowReturn onPreroll(_GstAppSink* sink, void* user_data); 71 | static GstFlowReturn onBuffer(_GstAppSink* sink, void* user_data); 72 | 73 | _GstBus* mBus; 74 | _GstAppSink* mAppSink; 75 | _GstElement* mPipeline; 76 | gstCodec mCodec; 77 | 78 | std::string mLaunchStr; 79 | std::string mInputPath; 80 | std::string mMulticastIP; 81 | uint16_t mPort; 82 | }; 83 | 84 | #endif 85 | -------------------------------------------------------------------------------- /gstEncoder.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "gstEncoder.h" 24 | 25 | #include "filesystem.h" 26 | #include "timespec.h" 27 | 28 | #include "cudaMappedMemory.h" 29 | #include "cudaRGB.h" 30 | #include "cudaYUV.h" 31 | 32 | #include 33 | #include 34 | 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | 41 | // constructor 42 | gstEncoder::gstEncoder() 43 | { 44 | mAppSrc = NULL; 45 | mBus = NULL; 46 | mBufferCaps = NULL; 47 | mPipeline = NULL; 48 | mNeedData = false; 49 | mOutputPort = 0; 50 | mWidth = 0; 51 | mHeight = 0; 52 | mCodec = GST_CODEC_H264; 53 | 54 | mCpuRGBA = NULL; 55 | mGpuRGBA = NULL; 56 | mCpuI420 = NULL; 57 | mGpuI420 = NULL; 58 | } 59 | 60 | 61 | // destructor 62 | gstEncoder::~gstEncoder() 63 | { 64 | // send EOS 65 | mNeedData = false; 66 | 67 | printf(LOG_GSTREAMER "gstEncoder - shutting down pipeline, sending EOS\n"); 68 | GstFlowReturn eos_result = gst_app_src_end_of_stream(GST_APP_SRC(mAppSrc)); 69 | 70 | if( eos_result != 0 ) 71 | printf(LOG_GSTREAMER "gstEncoder - failed sending appsrc EOS (result %u)\n", eos_result); 72 | 73 | sleep(1); 74 | 75 | // stop pipeline 76 | printf(LOG_GSTREAMER "gstEncoder - transitioning pipeline to GST_STATE_NULL\n"); 77 | 78 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_NULL); 79 | 80 | if( result != GST_STATE_CHANGE_SUCCESS ) 81 | printf(LOG_GSTREAMER "gstEncoder - failed to set pipeline state to NULL (error %u)\n", result); 82 | 83 | sleep(1); 84 | 85 | printf(LOG_GSTREAMER "gstEncoder - pipeline shutdown complete\n"); 86 | } 87 | 88 | 89 | // Create 90 | gstEncoder* gstEncoder::Create( gstCodec codec, uint32_t width, uint32_t height, const char* filename ) 91 | { 92 | return Create(codec, width, height, filename, NULL, 0); 93 | } 94 | 95 | 96 | // Create 97 | gstEncoder* gstEncoder::Create( gstCodec codec, uint32_t width, uint32_t height, const char* ipAddress, uint16_t port ) 98 | { 99 | return Create(codec, width, height, NULL, ipAddress, port); 100 | } 101 | 102 | 103 | // Create 104 | gstEncoder* gstEncoder::Create( gstCodec codec, uint32_t width, uint32_t height, const char* filename, const char* ipAddress, uint16_t port ) 105 | { 106 | gstEncoder* enc = new gstEncoder(); 107 | 108 | if( !enc ) 109 | return NULL; 110 | 111 | if( !enc->init(codec, width, height, filename, ipAddress, port) ) 112 | { 113 | printf(LOG_GSTREAMER "gstEncoder::Create() failed\n"); 114 | return NULL; 115 | } 116 | 117 | return enc; 118 | } 119 | 120 | 121 | // init 122 | bool gstEncoder::init( gstCodec codec, uint32_t width, uint32_t height, const char* filename, const char* ipAddress, uint16_t port ) 123 | { 124 | mCodec = codec; 125 | mWidth = width; 126 | mHeight = height; 127 | 128 | if( mWidth == 0 || mHeight == 0 ) 129 | return false; 130 | 131 | if( filename != NULL ) 132 | mOutputPath = filename; 133 | 134 | if( ipAddress != NULL ) 135 | mOutputIP = ipAddress; 136 | 137 | mOutputPort = port; 138 | 139 | if( !filename && !ipAddress ) 140 | return false; 141 | 142 | // initialize GStreamer libraries 143 | if( !gstreamerInit() ) 144 | { 145 | printf(LOG_GSTREAMER "failed to initialize gstreamer API\n"); 146 | return NULL; 147 | } 148 | 149 | // build caps string 150 | if( !buildCapsStr() ) 151 | { 152 | printf(LOG_GSTREAMER "gstEncoder - failed to build caps string\n"); 153 | return false; 154 | } 155 | 156 | mBufferCaps = gst_caps_from_string(mCapsStr.c_str()); 157 | 158 | if( !mBufferCaps ) 159 | { 160 | printf(LOG_GSTREAMER "gstEncoder - failed to parse caps from string\n"); 161 | return false; 162 | } 163 | 164 | // build pipeline string 165 | if( !buildLaunchStr() ) 166 | { 167 | printf(LOG_GSTREAMER "gstEncoder - failed to build pipeline string\n"); 168 | return false; 169 | } 170 | 171 | // create the pipeline 172 | GError* err = NULL; 173 | mPipeline = gst_parse_launch(mLaunchStr.c_str(), &err); 174 | 175 | if( err != NULL ) 176 | { 177 | printf(LOG_GSTREAMER "gstEncoder - failed to create pipeline\n"); 178 | printf(LOG_GSTREAMER " (%s)\n", err->message); 179 | g_error_free(err); 180 | return false; 181 | } 182 | 183 | GstPipeline* pipeline = GST_PIPELINE(mPipeline); 184 | 185 | if( !pipeline ) 186 | { 187 | printf(LOG_GSTREAMER "gstEncoder - failed to cast GstElement into GstPipeline\n"); 188 | return false; 189 | } 190 | 191 | // retrieve pipeline bus 192 | mBus = gst_pipeline_get_bus(pipeline); 193 | 194 | if( !mBus ) 195 | { 196 | printf(LOG_GSTREAMER "gstEncoder - failed to retrieve GstBus from pipeline\n"); 197 | return false; 198 | } 199 | 200 | // add watch for messages (disabled when we poll the bus ourselves, instead of gmainloop) 201 | //gst_bus_add_watch(mBus, (GstBusFunc)gst_message_print, NULL); 202 | 203 | // get the appsrc element 204 | GstElement* appsrcElement = gst_bin_get_by_name(GST_BIN(pipeline), "mysource"); 205 | GstAppSrc* appsrc = GST_APP_SRC(appsrcElement); 206 | 207 | if( !appsrcElement || !appsrc ) 208 | { 209 | printf(LOG_GSTREAMER "gstEncoder - failed to retrieve AppSrc element from pipeline\n"); 210 | return false; 211 | } 212 | 213 | mAppSrc = appsrcElement; 214 | 215 | g_signal_connect(appsrcElement, "need-data", G_CALLBACK(onNeedData), this); 216 | g_signal_connect(appsrcElement, "enough-data", G_CALLBACK(onEnoughData), this); 217 | 218 | #if GST_CHECK_VERSION(1,0,0) 219 | gst_app_src_set_caps(appsrc, mBufferCaps); 220 | #endif 221 | 222 | // set stream properties 223 | gst_app_src_set_stream_type(appsrc, GST_APP_STREAM_TYPE_STREAM); 224 | 225 | g_object_set(G_OBJECT(mAppSrc), "is-live", TRUE, NULL); 226 | g_object_set(G_OBJECT(mAppSrc), "do-timestamp", TRUE, NULL); 227 | 228 | // transition pipline to STATE_PLAYING 229 | printf(LOG_GSTREAMER "gstEncoder - transitioning pipeline to GST_STATE_PLAYING\n"); 230 | 231 | const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_PLAYING); 232 | 233 | if( result == GST_STATE_CHANGE_ASYNC ) 234 | { 235 | #if 0 236 | GstMessage* asyncMsg = gst_bus_timed_pop_filtered(mBus, 5 * GST_SECOND, 237 | (GstMessageType)(GST_MESSAGE_ASYNC_DONE|GST_MESSAGE_ERROR)); 238 | 239 | if( asyncMsg != NULL ) 240 | { 241 | gst_message_print(mBus, asyncMsg, this); 242 | gst_message_unref(asyncMsg); 243 | } 244 | else 245 | printf(LOG_GSTREAMER "gstEncoder - NULL message after transitioning pipeline to PLAYING...\n"); 246 | #endif 247 | } 248 | else if( result != GST_STATE_CHANGE_SUCCESS ) 249 | { 250 | printf(LOG_GSTREAMER "gstEncoder - failed to set pipeline state to PLAYING (error %u)\n", result); 251 | return false; 252 | } 253 | 254 | return true; 255 | } 256 | 257 | 258 | // buildCapsStr 259 | bool gstEncoder::buildCapsStr() 260 | { 261 | std::ostringstream ss; 262 | 263 | #if GST_CHECK_VERSION(1,0,0) 264 | ss << "video/x-raw"; 265 | ss << ",width=" << mWidth; 266 | ss << ",height=" << mHeight; 267 | ss << ",format=(string)I420"; 268 | ss << ",framerate=30/1"; 269 | #else 270 | ss << "video/x-raw-yuv"; 271 | ss << ",width=" << mWidth; 272 | ss << ",height=" << mHeight; 273 | ss << ",format=(fourcc)I420"; 274 | ss << ",framerate=30/1"; 275 | #endif 276 | 277 | mCapsStr = ss.str(); 278 | 279 | printf(LOG_GSTREAMER "gstEncoder - buffer caps string:\n"); 280 | printf("%s\n", mCapsStr.c_str()); 281 | return true; 282 | } 283 | 284 | 285 | // buildLaunchStr 286 | bool gstEncoder::buildLaunchStr() 287 | { 288 | const size_t fileLen = mOutputPath.size(); 289 | const size_t ipLen = mOutputIP.size(); 290 | 291 | std::ostringstream ss; 292 | ss << "appsrc name=mysource ! "; 293 | 294 | #if GST_CHECK_VERSION(1,0,0) 295 | ss << mCapsStr << " ! "; 296 | 297 | if( mCodec == GST_CODEC_H264 ) 298 | ss << "omxh264enc ! video/x-h264 ! "; // TODO: investigate quality-level replacement 299 | else if( mCodec == GST_CODEC_H265 ) 300 | ss << "omxh265enc ! video/x-h265 ! "; 301 | #else 302 | if( mCodec == GST_CODEC_H264 ) 303 | ss << "nv_omx_h264enc quality-level=2 ! video/x-h264 ! "; 304 | else if( mCodec == GST_CODEC_H265 ) 305 | ss << "nv_omx_h265enc quality-level=2 ! video/x-h265 ! "; 306 | #endif 307 | 308 | if( fileLen > 0 && ipLen > 0 ) 309 | ss << "nvtee name=t ! "; 310 | 311 | if( fileLen > 0 ) 312 | { 313 | std::string ext = fileExtension(mOutputPath); 314 | 315 | if( strcasecmp(ext.c_str(), "mkv") == 0 ) 316 | { 317 | //ss << "matroskamux ! queue ! "; 318 | ss << "matroskamux ! "; 319 | } 320 | else if( strcasecmp(ext.c_str(), "mp4") == 0 ) 321 | { 322 | if( mCodec == GST_CODEC_H264 ) 323 | ss << "h264parse ! qtmux ! "; 324 | else if( mCodec == GST_CODEC_H265 ) 325 | ss << "h265parse ! qtmux ! "; 326 | } 327 | else if( strcasecmp(ext.c_str(), "h264") != 0 && strcasecmp(ext.c_str(), "h265") != 0 ) 328 | { 329 | printf(LOG_GSTREAMER "gstEncoder - invalid output extension %s\n", ext.c_str()); 330 | return false; 331 | } 332 | 333 | ss << "filesink location=" << mOutputPath; 334 | 335 | if( ipLen > 0 ) 336 | ss << " t. ! "; // begin the second tee 337 | } 338 | 339 | if( ipLen > 0 ) 340 | { 341 | ss << "rtph264pay config-interval=1 ! udpsink host="; 342 | ss << mOutputIP << " "; 343 | 344 | if( mOutputPort != 0 ) 345 | ss << "port=" << mOutputPort; 346 | 347 | ss << " auto-multicast=true"; 348 | } 349 | 350 | mLaunchStr = ss.str(); 351 | 352 | printf(LOG_GSTREAMER "gstEncoder - pipeline launch string:\n"); 353 | printf("%s\n", mLaunchStr.c_str()); 354 | return true; 355 | } 356 | 357 | 358 | // onNeedData 359 | void gstEncoder::onNeedData( GstElement* pipeline, guint size, gpointer user_data ) 360 | { 361 | printf(LOG_GSTREAMER "gstEncoder - AppSrc requesting data (%u bytes)\n", size); 362 | 363 | if( !user_data ) 364 | return; 365 | 366 | gstEncoder* enc = (gstEncoder*)user_data; 367 | enc->mNeedData = true; 368 | } 369 | 370 | 371 | // onEnoughData 372 | void gstEncoder::onEnoughData( GstElement* pipeline, gpointer user_data ) 373 | { 374 | printf(LOG_GSTREAMER "gstEncoder - AppSrc signalling enough data\n"); 375 | 376 | if( !user_data ) 377 | return; 378 | 379 | gstEncoder* enc = (gstEncoder*)user_data; 380 | enc->mNeedData = false; 381 | } 382 | 383 | 384 | // EncodeFrame 385 | bool gstEncoder::EncodeI420( void* buffer, size_t size ) 386 | { 387 | if( !buffer || size == 0 ) 388 | return false; 389 | 390 | if( !mNeedData ) 391 | { 392 | printf(LOG_GSTREAMER "gstEncoder - pipeline full, skipping frame (%zu bytes)\n", size); 393 | return true; 394 | } 395 | 396 | 397 | #if GST_CHECK_VERSION(1,0,0) 398 | // allocate gstreamer buffer memory 399 | GstBuffer* gstBuffer = gst_buffer_new_allocate(NULL, size, NULL); 400 | 401 | // map the buffer for write access 402 | GstMapInfo map; 403 | 404 | if( gst_buffer_map(gstBuffer, &map, GST_MAP_WRITE) ) 405 | { 406 | if( map.size != size ) 407 | { 408 | printf(LOG_GSTREAMER "gstEncoder - gst_buffer_map() size mismatch, got %zu bytes, expected %zu bytes\n", map.size, size); 409 | gst_buffer_unref(gstBuffer); 410 | return false; 411 | } 412 | 413 | memcpy(map.data, buffer, size); 414 | gst_buffer_unmap(gstBuffer, &map); 415 | } 416 | else 417 | { 418 | printf(LOG_GSTREAMER "gstEncoder - failed to map gstreamer buffer memory (%zu bytes)\n", size); 419 | gst_buffer_unref(gstBuffer); 420 | return false; 421 | } 422 | 423 | #else 424 | // convert memory to GstBuffer 425 | GstBuffer* gstBuffer = gst_buffer_new(); 426 | 427 | GST_BUFFER_MALLOCDATA(gstBuffer) = (guint8*)g_malloc(size); 428 | GST_BUFFER_DATA(gstBuffer) = GST_BUFFER_MALLOCDATA(gstBuffer); 429 | GST_BUFFER_SIZE(gstBuffer) = size; 430 | 431 | //static size_t num_frame = 0; 432 | //GST_BUFFER_TIMESTAMP(gstBuffer) = (GstClockTime)((num_frame / 30.0) * 1e9); // for 1.0, use GST_BUFFER_PTS or GST_BUFFER_DTS instead 433 | //num_frame++; 434 | 435 | if( mBufferCaps != NULL ) 436 | gst_buffer_set_caps(gstBuffer, mBufferCaps); 437 | 438 | memcpy(GST_BUFFER_DATA(gstBuffer), buffer, size); 439 | #endif 440 | 441 | // queue buffer to gstreamer 442 | GstFlowReturn ret; 443 | g_signal_emit_by_name(mAppSrc, "push-buffer", gstBuffer, &ret); 444 | gst_buffer_unref(gstBuffer); 445 | 446 | if( ret != 0 ) 447 | printf(LOG_GSTREAMER "gstEncoder - AppSrc pushed buffer abnormally (result %u)\n", ret); 448 | 449 | // check for any messages 450 | while(true) 451 | { 452 | GstMessage* msg = gst_bus_pop(mBus); 453 | 454 | if( !msg ) 455 | break; 456 | 457 | gst_message_print(mBus, msg, this); 458 | gst_message_unref(msg); 459 | } 460 | 461 | return true; 462 | } 463 | 464 | 465 | // EncodeRGBA 466 | bool gstEncoder::EncodeRGBA( uint8_t* buffer ) 467 | { 468 | if( !buffer ) 469 | return false; 470 | 471 | const size_t i420Size = (mWidth * mHeight * 12) / 8; 472 | 473 | if( !mCpuI420 || !mGpuI420 ) 474 | { 475 | if( !cudaAllocMapped(&mCpuI420, &mGpuI420, i420Size) ) 476 | { 477 | printf(LOG_GSTREAMER "gstEncoder - failed to allocate CUDA memory for YUV I420 conversion\n"); 478 | return false; 479 | } 480 | } 481 | 482 | if( CUDA_FAILED(cudaRGBAToI420((uchar4*)buffer, (uint8_t*)mGpuI420, mWidth, mHeight)) ) 483 | { 484 | printf(LOG_GSTREAMER "gstEncoder - failed convert RGBA image to I420\n"); 485 | return false; 486 | } 487 | 488 | CUDA(cudaDeviceSynchronize()); 489 | 490 | return EncodeI420(mCpuI420, i420Size); 491 | } 492 | 493 | 494 | // EncodeRGBA 495 | bool gstEncoder::EncodeRGBA( float* buffer, float maxPixelValue ) 496 | { 497 | if( !buffer ) 498 | return false; 499 | 500 | if( !mCpuRGBA || !mGpuRGBA ) 501 | { 502 | if( !cudaAllocMapped(&mCpuRGBA, &mGpuRGBA, mWidth * mHeight * 4 * sizeof(uint8_t)) ) 503 | { 504 | printf(LOG_GSTREAMER "gstEncoder - failed to allocate CUDA memory for RGBA8 conversion\n"); 505 | return false; 506 | } 507 | } 508 | 509 | if( CUDA_FAILED(cudaRGBA32ToRGBA8((float4*)buffer, (uchar4*)mGpuRGBA, mWidth, mHeight, make_float2(0.0f, maxPixelValue))) ) 510 | { 511 | printf(LOG_GSTREAMER "gstEncoder - failed convert RGBA32f image to RGBA8\n"); 512 | return false; 513 | } 514 | 515 | return EncodeRGBA((uint8_t*)mGpuRGBA); 516 | } 517 | 518 | 519 | 520 | -------------------------------------------------------------------------------- /gstEncoder.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GSTREAMER_ENCODER_H__ 24 | #define __GSTREAMER_ENCODER_H__ 25 | 26 | #include "gstUtility.h" 27 | 28 | 29 | /** 30 | * Hardware-accelerated H.264/H.265 video encoder for Jetson using GStreamer. 31 | * The encoder can write the encoded video to disk in .mkv or .h264/.h265 formats, 32 | * or handle streaming network transmission to remote host(s) via RTP/RTSP protocol. 33 | * @ingroup codec 34 | */ 35 | class gstEncoder 36 | { 37 | public: 38 | /** 39 | * Create an encoder instance that outputs to a file on disk. 40 | */ 41 | static gstEncoder* Create( gstCodec codec, uint32_t width, uint32_t height, const char* filename ); 42 | 43 | /** 44 | * Create an encoder instance that streams over the network. 45 | */ 46 | static gstEncoder* Create( gstCodec codec, uint32_t width, uint32_t height, const char* ipAddress, uint16_t port ); 47 | 48 | /** 49 | * Create an encoder instance that outputs to a file on disk and streams over the network. 50 | */ 51 | static gstEncoder* Create( gstCodec codec, uint32_t width, uint32_t height, const char* filename, const char* ipAddress, uint16_t port ); 52 | 53 | /** 54 | * Destructor 55 | */ 56 | ~gstEncoder(); 57 | 58 | /** 59 | * Encode the next fixed-point RGBA frame. 60 | * Expects 8-bit per channel, 32-bit per pixel unsigned image, range 0-255. 61 | * It is assumed the width of the buffer is equal to GetWidth(), 62 | * and that the height of the buffer is equal to GetHeight(). 63 | * This function performs colorspace conversion using CUDA, so the 64 | * buffer pointer is expected to be CUDA memory allocated on the GPU. 65 | * @param buffer CUDA pointer to the RGBA image. 66 | */ 67 | bool EncodeRGBA( uint8_t* buffer ); 68 | 69 | /** 70 | * Encode the next floating-point RGBA frame. 71 | * It is assumed the width of the buffer is equal to GetWidth(), 72 | * and that the height of the buffer is equal to GetHeight(). 73 | * This function performs colorspace conversion using CUDA, so the 74 | * buffer pointer is expected to be CUDA memory allocated on the GPU. 75 | * @param buffer CUDA pointer to the RGBA image. 76 | * @param maxPixelValue indicates the maximum pixel intensity (typically 255.0f or 1.0f) 77 | */ 78 | bool EncodeRGBA( float* buffer, float maxPixelValue=255.0f ); 79 | 80 | /** 81 | * Encode the next I420 frame provided by the user. 82 | * Expects 12-bpp (bit per pixel) image in YUV I420 format. 83 | * This image is passed to GStreamer, so CPU pointer should be used. 84 | * @param buffer CPU pointer to the I420 image 85 | */ 86 | bool EncodeI420( void* buffer, size_t size ); 87 | 88 | /** 89 | * Retrieve the width that the encoder was created for, in pixels. 90 | */ 91 | inline uint32_t GetWidth() const { return mWidth; } 92 | 93 | /** 94 | * Retrieve the height that the encoder was created for, in pixels. 95 | */ 96 | inline uint32_t GetHeight() const { return mHeight; } 97 | 98 | protected: 99 | gstEncoder(); 100 | 101 | bool buildCapsStr(); 102 | bool buildLaunchStr(); 103 | 104 | bool init( gstCodec codec, uint32_t width, uint32_t height, const char* filename, const char* ipAddress, uint16_t port ); 105 | 106 | static void onNeedData( _GstElement* pipeline, uint32_t size, void* user_data ); 107 | static void onEnoughData( _GstElement* pipeline, void* user_data ); 108 | 109 | _GstBus* mBus; 110 | _GstCaps* mBufferCaps; 111 | _GstElement* mAppSrc; 112 | _GstElement* mPipeline; 113 | gstCodec mCodec; 114 | bool mNeedData; 115 | uint32_t mWidth; 116 | uint32_t mHeight; 117 | 118 | std::string mCapsStr; 119 | std::string mLaunchStr; 120 | std::string mOutputPath; 121 | std::string mOutputIP; 122 | uint16_t mOutputPort; 123 | 124 | // format conversion buffers 125 | void* mCpuRGBA; 126 | void* mGpuRGBA; 127 | void* mCpuI420; 128 | void* mGpuI420; 129 | }; 130 | 131 | 132 | #endif 133 | -------------------------------------------------------------------------------- /gstUtility.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "gstUtility.h" 24 | 25 | #include 26 | #include 27 | #include 28 | 29 | 30 | inline const char* gst_debug_level_str( GstDebugLevel level ) 31 | { 32 | switch (level) 33 | { 34 | case GST_LEVEL_NONE: return "GST_LEVEL_NONE "; 35 | case GST_LEVEL_ERROR: return "GST_LEVEL_ERROR "; 36 | case GST_LEVEL_WARNING: return "GST_LEVEL_WARNING"; 37 | case GST_LEVEL_INFO: return "GST_LEVEL_INFO "; 38 | case GST_LEVEL_DEBUG: return "GST_LEVEL_DEBUG "; 39 | case GST_LEVEL_LOG: return "GST_LEVEL_LOG "; 40 | case GST_LEVEL_FIXME: return "GST_LEVEL_FIXME "; 41 | #ifdef GST_LEVEL_TRACE 42 | case GST_LEVEL_TRACE: return "GST_LEVEL_TRACE "; 43 | #endif 44 | case GST_LEVEL_MEMDUMP: return "GST_LEVEL_MEMDUMP"; 45 | default: return " "; 46 | } 47 | } 48 | 49 | #define SEP " " 50 | 51 | void rilog_debug_function(GstDebugCategory* category, GstDebugLevel level, 52 | const gchar* file, const char* function, 53 | gint line, GObject* object, GstDebugMessage* message, 54 | gpointer data) 55 | { 56 | if( level > GST_LEVEL_WARNING /*GST_LEVEL_INFO*/ ) 57 | return; 58 | 59 | //gchar* name = NULL; 60 | //if( object != NULL ) 61 | // g_object_get(object, "name", &name, NULL); 62 | 63 | const char* typeName = " "; 64 | const char* className = " "; 65 | 66 | if( object != NULL ) 67 | { 68 | typeName = G_OBJECT_TYPE_NAME(object); 69 | className = G_OBJECT_CLASS_NAME(object); 70 | } 71 | 72 | printf(LOG_GSTREAMER "%s %s %s\n" SEP "%s:%i %s\n" SEP "%s\n", 73 | gst_debug_level_str(level), typeName, 74 | gst_debug_category_get_name(category), file, line, function, 75 | gst_debug_message_get(message)); 76 | 77 | } 78 | 79 | 80 | bool gstreamerInit() 81 | { 82 | int argc = 0; 83 | //char* argv[] = { "none" }; 84 | 85 | if( !gst_init_check(&argc, NULL, NULL) ) 86 | { 87 | printf(LOG_GSTREAMER "failed to initialize gstreamer library with gst_init()\n"); 88 | return false; 89 | } 90 | 91 | uint32_t ver[] = { 0, 0, 0, 0 }; 92 | gst_version( &ver[0], &ver[1], &ver[2], &ver[3] ); 93 | 94 | printf(LOG_GSTREAMER "initialized gstreamer, version %u.%u.%u.%u\n", ver[0], ver[1], ver[2], ver[3]); 95 | 96 | 97 | // debugging 98 | gst_debug_remove_log_function(gst_debug_log_default); 99 | 100 | if( true ) 101 | { 102 | gst_debug_add_log_function(rilog_debug_function, NULL, NULL); 103 | 104 | gst_debug_set_active(true); 105 | gst_debug_set_colored(false); 106 | } 107 | 108 | return true; 109 | } 110 | //--------------------------------------------------------------------------------------------- 111 | 112 | static void gst_print_one_tag(const GstTagList * list, const gchar * tag, gpointer user_data) 113 | { 114 | int i, num; 115 | 116 | num = gst_tag_list_get_tag_size (list, tag); 117 | for (i = 0; i < num; ++i) { 118 | const GValue *val; 119 | 120 | /* Note: when looking for specific tags, use the gst_tag_list_get_xyz() API, 121 | * we only use the GValue approach here because it is more generic */ 122 | val = gst_tag_list_get_value_index (list, tag, i); 123 | if (G_VALUE_HOLDS_STRING (val)) { 124 | printf("\t%20s : %s\n", tag, g_value_get_string (val)); 125 | } else if (G_VALUE_HOLDS_UINT (val)) { 126 | printf("\t%20s : %u\n", tag, g_value_get_uint (val)); 127 | } else if (G_VALUE_HOLDS_DOUBLE (val)) { 128 | printf("\t%20s : %g\n", tag, g_value_get_double (val)); 129 | } else if (G_VALUE_HOLDS_BOOLEAN (val)) { 130 | printf("\t%20s : %s\n", tag, 131 | (g_value_get_boolean (val)) ? "true" : "false"); 132 | } else if (GST_VALUE_HOLDS_BUFFER (val)) { 133 | //GstBuffer *buf = gst_value_get_buffer (val); 134 | //guint buffer_size = GST_BUFFER_SIZE(buf); 135 | 136 | printf("\t%20s : buffer of size %u\n", tag, /*buffer_size*/0); 137 | } /*else if (GST_VALUE_HOLDS_DATE_TIME (val)) { 138 | GstDateTime *dt = (GstDateTime*)g_value_get_boxed (val); 139 | gchar *dt_str = gst_date_time_to_iso8601_string (dt); 140 | 141 | printf("\t%20s : %s\n", tag, dt_str); 142 | g_free (dt_str); 143 | }*/ else { 144 | printf("\t%20s : tag of type '%s'\n", tag, G_VALUE_TYPE_NAME (val)); 145 | } 146 | } 147 | } 148 | 149 | static const char* gst_stream_status_string( GstStreamStatusType status ) 150 | { 151 | switch(status) 152 | { 153 | case GST_STREAM_STATUS_TYPE_CREATE: return "CREATE"; 154 | case GST_STREAM_STATUS_TYPE_ENTER: return "ENTER"; 155 | case GST_STREAM_STATUS_TYPE_LEAVE: return "LEAVE"; 156 | case GST_STREAM_STATUS_TYPE_DESTROY: return "DESTROY"; 157 | case GST_STREAM_STATUS_TYPE_START: return "START"; 158 | case GST_STREAM_STATUS_TYPE_PAUSE: return "PAUSE"; 159 | case GST_STREAM_STATUS_TYPE_STOP: return "STOP"; 160 | default: return "UNKNOWN"; 161 | } 162 | } 163 | 164 | // gst_message_print 165 | gboolean gst_message_print(GstBus* bus, GstMessage* message, gpointer user_data) 166 | { 167 | 168 | switch (GST_MESSAGE_TYPE (message)) 169 | { 170 | case GST_MESSAGE_ERROR: 171 | { 172 | GError *err = NULL; 173 | gchar *dbg_info = NULL; 174 | 175 | gst_message_parse_error (message, &err, &dbg_info); 176 | printf(LOG_GSTREAMER "gstreamer %s ERROR %s\n", GST_OBJECT_NAME (message->src), err->message); 177 | printf(LOG_GSTREAMER "gstreamer Debugging info: %s\n", (dbg_info) ? dbg_info : "none"); 178 | 179 | g_error_free(err); 180 | g_free(dbg_info); 181 | //g_main_loop_quit (app->loop); 182 | break; 183 | } 184 | case GST_MESSAGE_EOS: 185 | { 186 | printf(LOG_GSTREAMER "gstreamer %s recieved EOS signal...\n", GST_OBJECT_NAME(message->src)); 187 | //g_main_loop_quit (app->loop); // TODO trigger plugin Close() upon error 188 | break; 189 | } 190 | case GST_MESSAGE_STATE_CHANGED: 191 | { 192 | GstState old_state, new_state; 193 | 194 | gst_message_parse_state_changed(message, &old_state, &new_state, NULL); 195 | 196 | printf(LOG_GSTREAMER "gstreamer changed state from %s to %s ==> %s\n", 197 | gst_element_state_get_name(old_state), 198 | gst_element_state_get_name(new_state), 199 | GST_OBJECT_NAME(message->src)); 200 | break; 201 | } 202 | case GST_MESSAGE_STREAM_STATUS: 203 | { 204 | GstStreamStatusType streamStatus; 205 | gst_message_parse_stream_status(message, &streamStatus, NULL); 206 | 207 | printf(LOG_GSTREAMER "gstreamer stream status %s ==> %s\n", 208 | gst_stream_status_string(streamStatus), 209 | GST_OBJECT_NAME(message->src)); 210 | break; 211 | } 212 | case GST_MESSAGE_TAG: 213 | { 214 | GstTagList *tags = NULL; 215 | 216 | gst_message_parse_tag(message, &tags); 217 | 218 | #ifdef gst_tag_list_to_string 219 | gchar* txt = gst_tag_list_to_string(tags); 220 | #else 221 | gchar* txt = "missing gst_tag_list_to_string()"; 222 | #endif 223 | 224 | if( txt != NULL ) 225 | { 226 | printf(LOG_GSTREAMER "gstreamer %s %s\n", GST_OBJECT_NAME(message->src), txt); 227 | #ifdef gst_tag_list_to_string 228 | g_free(txt); 229 | #endif 230 | } 231 | 232 | //gst_tag_list_foreach(tags, gst_print_one_tag, NULL); 233 | if( tags != NULL ) 234 | gst_tag_list_free(tags); 235 | break; 236 | } 237 | default: 238 | { 239 | printf(LOG_GSTREAMER "gstreamer msg %s ==> %s\n", gst_message_type_get_name(GST_MESSAGE_TYPE(message)), GST_OBJECT_NAME(message->src)); 240 | break; 241 | } 242 | } 243 | 244 | return TRUE; 245 | } 246 | 247 | -------------------------------------------------------------------------------- /gstUtility.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __GSTREAMER_UTILITY_H__ 24 | #define __GSTREAMER_UTILITY_H__ 25 | 26 | #include 27 | #include 28 | 29 | 30 | /** 31 | * Video codec (H.264/H.265) enumeration. 32 | * @ingroup codec 33 | */ 34 | enum gstCodec 35 | { 36 | GST_CODEC_H264 = 0, 37 | GST_CODEC_H265 38 | }; 39 | 40 | 41 | /** 42 | * LOG_GSTREAMER logging prefix 43 | * @ingroup codec 44 | */ 45 | #define LOG_GSTREAMER "[gstreamer] " 46 | 47 | 48 | /** 49 | * gstreamerInit 50 | * @internal 51 | * @ingroup codec 52 | */ 53 | bool gstreamerInit(); 54 | 55 | 56 | /** 57 | * gst_message_print 58 | * @internal 59 | * @ingroup codec 60 | */ 61 | gboolean gst_message_print(_GstBus* bus, _GstMessage* message, void* user_data); 62 | 63 | 64 | #endif 65 | 66 | -------------------------------------------------------------------------------- /libjetson-inference.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collincebecky/QT-OPENCV-GSTREAMER-CUDA/4f7fdade20ffa410be805b93fc1661f36c6ef3e6/libjetson-inference.so -------------------------------------------------------------------------------- /libjetson-utils.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collincebecky/QT-OPENCV-GSTREAMER-CUDA/4f7fdade20ffa410be805b93fc1661f36c6ef3e6/libjetson-utils.so -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include "mainwindow.h" 4 | bool MainWindow::signal_recieved = false; 5 | 6 | int main(int argc, char *argv[]) 7 | { 8 | QApplication a(argc, argv); 9 | MainWindow w; 10 | w.show(); 11 | 12 | return a.exec(); 13 | } 14 | -------------------------------------------------------------------------------- /mainwindow.cpp: -------------------------------------------------------------------------------- 1 | #include "mainwindow.h" 2 | #include "ui_mainwindow.h" 3 | #include 4 | 5 | 6 | 7 | MainWindow::MainWindow(QWidget *parent) : 8 | QMainWindow(parent), 9 | ui(new Ui::MainWindow) 10 | { 11 | ui->setupUi(this); 12 | QObject::connect(this, SIGNAL(returntomain(QImage)), 13 | this, SLOT(update_image(QImage))); 14 | } 15 | 16 | 17 | 18 | 19 | void MainWindow::sig_handler(int signo) 20 | { 21 | if( signo == SIGINT ) 22 | { 23 | printf("received SIGINT\n"); 24 | signal_recieved = true; 25 | } 26 | } 27 | 28 | int MainWindow::usage() 29 | { 30 | printf("usage: detectnet-camera [-h] [--network NETWORK] [--threshold THRESHOLD]\n"); 31 | printf(" [--camera CAMERA] [--width WIDTH] [--height HEIGHT]\n\n"); 32 | printf("Locate objects in a live camera stream using an object detection DNN.\n\n"); 33 | printf("optional arguments:\n"); 34 | printf(" --help show this help message and exit\n"); 35 | printf(" --network NETWORK pre-trained model to load (see below for options)\n"); 36 | printf(" --overlay OVERLAY detection overlay flags (e.g. --overlay=box,labels,conf)\n"); 37 | printf(" valid combinations are: 'box', 'labels', 'conf', 'none'\n"); 38 | printf(" --alpha ALPHA overlay alpha blending value, range 0-255 (default: 120)\n"); 39 | printf(" --camera CAMERA index of the MIPI CSI camera to use (e.g. CSI camera 0),\n"); 40 | printf(" or for VL42 cameras the /dev/video device to use.\n"); 41 | printf(" by default, MIPI CSI camera 0 will be used.\n"); 42 | printf(" --width WIDTH desired width of camera stream (default is 1280 pixels)\n"); 43 | printf(" --height HEIGHT desired height of camera stream (default is 720 pixels)\n"); 44 | printf(" --threshold VALUE minimum threshold for detection (default is 0.5)\n\n"); 45 | 46 | printf("%s\n", detectNet::Usage()); 47 | 48 | 49 | } 50 | void MainWindow::show_video(){ 51 | /* 52 | * parse command line 53 | */ 54 | 55 | 56 | //if( cmdLine.GetFlag("help") ) 57 | // return usage(); 58 | 59 | 60 | /* 61 | * attach signal handler 62 | */ 63 | if( signal(SIGINT, sig_handler) == SIG_ERR ) 64 | printf("\ncan't catch SIGINT\n"); 65 | 66 | 67 | /* 68 | * create the camera device 69 | */ 70 | 71 | 72 | /* 73 | 74 | gstCamera* camera = gstCamera::Create(cmdLine.GetInt("width", gstCamera::DefaultWidth), 75 | cmdLine.GetInt("height", gstCamera::DefaultHeight), 76 | cmdLine.GetString("camera")); 77 | */ 78 | 79 | 80 | gstCamera* camera = gstCamera::Create(width,height,NULL); 81 | 82 | 83 | 84 | 85 | if( !camera ) 86 | { 87 | printf("\ndetectnet-camera: failed to initialize camera device\n"); 88 | 89 | } 90 | 91 | printf("\ndetectnet-camera: successfully initialized camera device\n"); 92 | printf(" width: %u\n", camera->GetWidth()); 93 | printf(" height: %u\n", camera->GetHeight()); 94 | printf(" depth: %u (bpp)\n\n", camera->GetPixelDepth()); 95 | 96 | 97 | /* 98 | * create detection network 99 | */ 100 | detectNet* net = detectNet::Create(prototxt_path,model_path); 101 | 102 | if( !net ) 103 | { 104 | printf("detectnet-camera: failed to load detectNet model\n"); 105 | 106 | } 107 | 108 | // parse overlay flags 109 | const uint32_t overlayFlags = detectNet::OverlayFlagsFromStr("overlay"); 110 | 111 | 112 | /* 113 | * create openGL window 114 | */ 115 | // glDisplay* display = glDisplay::Create(); 116 | 117 | 118 | 119 | 120 | /* 121 | * start streaming 122 | */ 123 | if( !camera->Open() ) 124 | { 125 | printf("detectnet-camera: failed to open camera for streaming\n"); 126 | 127 | } 128 | 129 | printf("detectnet-camera: camera open for streaming\n"); 130 | 131 | 132 | /* 133 | * processing loop 134 | */ 135 | float confidence = 0.0f; 136 | 137 | //cv::Mat rgba[4]; 138 | 139 | while( !signal_recieved ) 140 | { 141 | // capture RGBA image 142 | float* imgRGBA = NULL; 143 | 144 | 145 | //->NOTE add 1 into the parameter so the data could be available in CPU/GPU shared memory 146 | 147 | if( !camera->CaptureRGBA(&imgRGBA, 1000,1) ) 148 | printf("detectnet-camera: failed to capture RGBA image from camera\n"); 149 | 150 | // detect objects in the frame 151 | detectNet::Detection* detections = NULL; 152 | 153 | const int numDetections = net->Detect(imgRGBA, camera->GetWidth(), camera->GetHeight(), &detections, overlayFlags); 154 | 155 | qDebug()<<"out on you way .........................................."< 0 ) 158 | { 159 | 160 | printf("%i objects detected\n", numDetections); 161 | 162 | for( int n=0; n < numDetections; n++ ) 163 | { 164 | printf("detected obj %i class #%u (%s) confidence=%f\n", n, detections[n].ClassID, net->GetClassDesc(detections[n].ClassID), detections[n].Confidence); 165 | printf("bounding box %i (%f, %f) (%f, %f) w=%f h=%f\n", n, detections[n].Left, detections[n].Top, detections[n].Right, detections[n].Bottom, detections[n].Width(), detections[n].Height()); 166 | } 167 | } 168 | 169 | 170 | 171 | cv::Mat dst_img=cv::Mat(camera->GetHeight(),camera->GetWidth(),CV_32FC4,imgRGBA); 172 | 173 | 174 | dst_img/=255; 175 | cv::cvtColor(dst_img,dst_img,CV_BGRA2RGB); 176 | 177 | 178 | QImage qt_image=putImage(dst_img); 179 | 180 | // emit returntomain(qt_image); 181 | 182 | ui->label->setPixmap(QPixmap::fromImage(qt_image).scaled(ui->label->size(), 183 | Qt::KeepAspectRatio, Qt::FastTransformation)); 184 | 185 | 186 | /// cv::imshow("Imshow",dst_img); 187 | //if(cv::waitKey(10)==27) signal_recieved=true; 188 | break; 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | } 198 | 199 | 200 | /* 201 | * destroy resources 202 | */ 203 | //printf("detectnet-camera: shutting down...\n"); 204 | 205 | SAFE_DELETE(camera); 206 | 207 | SAFE_DELETE(net); 208 | 209 | printf("detectnet-camera: shutdown complete.\n"); 210 | 211 | 212 | 213 | 214 | } 215 | const QImage MainWindow::mat8ToImage(const cv::Mat &mat) 216 | { 217 | switch (mat.type()) { 218 | case CV_8UC1: 219 | { 220 | 221 | QVector ct; 222 | for (int i = 0; i < 256; ++i) 223 | ct.append(qRgb(i, i, i)); 224 | QImage result(mat.data, mat.cols, mat.rows, (int) mat.step, QImage::Format_Indexed8); 225 | result.setColorTable(ct); 226 | return result.copy(); 227 | } 228 | case CV_8UC3: 229 | { 230 | 231 | cv::Mat tmp; 232 | cvtColor(mat, tmp, cv::COLOR_BGR2BGRA); 233 | return mat8ToImage(tmp); 234 | } 235 | case CV_8UC4: 236 | { 237 | QImage result(mat.data, mat.cols, mat.rows, (int) mat.step, QImage::Format_RGB32); 238 | return result.rgbSwapped(); 239 | } 240 | default: 241 | qWarning("Unhandled Mat format %d", mat.type()); 242 | return QImage(); 243 | } 244 | } 245 | 246 | void MainWindow::update_image(const QImage &image){ 247 | 248 | qDebug()<<"comming here .................."<label->setPixmap(QPixmap::fromImage(image).scaled(ui->label->pixmap()->size(), 250 | Qt::KeepAspectRatio,Qt::FastTransformation)); 251 | 252 | 253 | 254 | //ui->label->resize(ui->label->pixmap()->size()); 255 | 256 | 257 | 258 | 259 | } 260 | MainWindow::~MainWindow(){ 261 | delete ui; 262 | } 263 | 264 | 265 | void MainWindow::on_pushButton_clicked() 266 | { 267 | show_video(); 268 | } 269 | /////////////////////////////////////////////////////////////////////////////// 270 | 271 | 272 | QImage MainWindow::putImage(const cv::Mat& mat) 273 | { 274 | 275 | 276 | 277 | 278 | if(mat.type()==CV_8UC1) 279 | { 280 | // Set the color table (used to translate color indexes to qRgb values) 281 | QVector colorTable; 282 | for (int i = 0; i<256; i++) 283 | colorTable.push_back( qRgb(i,i,i) ); 284 | /* 285 | * 286 | if (deepCopy) 287 | { 288 | QImage img(mat.cols, mat.rows, QImage::Format_Indexed8); 289 | for (int i = 0; i < img.height(); i++) 290 | // scanLine returns a ptr to the start of the data for that row 291 | memcpy( img.scanLine(i), mat.ptr(i), img.bytesPerLine() ); //correct 292 | return img; 293 | } 294 | else 295 | { 296 | // Copy input Mat 297 | const uchar *qImageBuffer = (const uchar*)mat.data; 298 | 299 | // Create QImage with same dimensions as input Mat 300 | QImage img(qImageBuffer, mat.cols, mat.rows, mat.step, QImage::Format_Indexed8); 301 | img.setColorTable(colorTable); 302 | return img; 303 | } 304 | */ 305 | } 306 | else if (mat.type()==CV_16UC1) 307 | { 308 | cv::Mat ucharMatScaled; 309 | cv::Mat ushortMatScaled; 310 | cv::Mat floatMatScaled; 311 | double minImage, maxImage; 312 | cv::minMaxLoc(mat, &minImage, &maxImage); 313 | mat.convertTo(floatMatScaled, CV_32FC1); 314 | 315 | // to ensure [0-1.0] 316 | floatMatScaled = (floatMatScaled - minImage) / (maxImage - minImage); 317 | floatMatScaled.convertTo(ucharMatScaled, CV_8UC1, 255, 0); 318 | return putImage(ucharMatScaled); 319 | } 320 | else if (mat.type()==CV_32FC1) 321 | { 322 | cv::Mat ucharMatScaled; 323 | cv::Mat floatMatScaled; 324 | double minImage, maxImage; 325 | cv::minMaxLoc(mat, &minImage, &maxImage); 326 | // to ensure [0-1.0] 327 | floatMatScaled = (mat - minImage) / (maxImage - minImage); 328 | floatMatScaled.convertTo(ucharMatScaled, CV_8UC1, 255, 0); 329 | 330 | return putImage(ucharMatScaled); 331 | } 332 | else if (mat.type() == CV_32FC3) 333 | { 334 | cv::Mat ucharMatScaled; 335 | cv::Mat floatMatScaled; 336 | double minImage, maxImage; 337 | cv::minMaxLoc(mat, &minImage, &maxImage); 338 | 339 | normalize(mat, floatMatScaled, 0.0, 1.0, cv::NORM_MINMAX); 340 | cv::pow(floatMatScaled, 1. / 5, floatMatScaled); // apply gamma curve: img = img ** (1./5) 341 | mat.convertTo(ucharMatScaled, CV_8UC3, 255, 0); 342 | //qDebug() << "type ucharMatScaled = " << MiscFunctions::matTypeToText( ucharMatScaled.type() ); 343 | 344 | return putImage(ucharMatScaled); 345 | } 346 | 347 | // 8-bits unsigned, NO. OF CHANNELS=3 348 | else 349 | { 350 | cv::Mat rgbMat; 351 | int qImageFormat = QImage::Format_RGB888; 352 | if(mat.type()==CV_8UC4) 353 | { 354 | qImageFormat = QImage::Format_ARGB32; 355 | rgbMat = mat; 356 | } 357 | else if (mat.type()==CV_8UC3) 358 | { 359 | rgbMat = mat; 360 | } 361 | else 362 | { 363 | cvtColor(mat, rgbMat,CV_BGR2RGB); 364 | } 365 | 366 | // Copy input Mat 367 | const uchar *qImageBuffer = (const uchar*)mat.data; 368 | 369 | // Create QImage with same dimensions as input Mat 370 | QImage img(qImageBuffer, mat.cols, mat.rows, mat.step,(QImage::Format) qImageFormat); 371 | return img.rgbSwapped(); // deep copy !! 372 | //} 373 | } 374 | } 375 | 376 | 377 | //////////////////////////////////////// 378 | -------------------------------------------------------------------------------- /mainwindow.h: -------------------------------------------------------------------------------- 1 | #ifndef MAINWINDOW_H 2 | #define MAINWINDOW_H 3 | 4 | #include 5 | #include "gstCamera.h" 6 | //#include "glDisplay.h" 7 | 8 | #include "detectNet.h" 9 | #include "commandLine.h" 10 | #include "cvmatandqimage.h" 11 | 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | //#include "opencv2/opencv.hpp" 18 | 19 | #include 20 | using namespace cv; 21 | 22 | namespace Ui { 23 | class MainWindow; 24 | } 25 | 26 | class MainWindow : public QMainWindow 27 | { 28 | Q_OBJECT 29 | 30 | public: 31 | explicit MainWindow(QWidget *parent = 0); 32 | ~MainWindow(); 33 | //////////////////////////// 34 | static bool signal_recieved; 35 | void show_video(); 36 | int usage(); 37 | static void sig_handler(int signo); 38 | int32_t width=1280; 39 | int32_t height=720; 40 | 41 | 42 | char *prototxt_path="/home/brance/Mine/jetson-inference/data/networks/ped-100/deploy.prototxt"; 43 | char *model_path="/home/brance/Mine/jetson-inference/data/networks/ped-100/snapshot_iter_70800.caffemodel"; 44 | const QImage mat8ToImage(const cv::Mat &mat); 45 | QImage putImage(const Mat& mat); 46 | 47 | //////////////////////////// 48 | 49 | 50 | signals: 51 | void returntomain(const QImage & image); 52 | 53 | private slots: 54 | void on_pushButton_clicked(); 55 | void update_image(const QImage & image); 56 | 57 | private: 58 | Ui::MainWindow *ui; 59 | }; 60 | 61 | #endif // MAINWINDOW_H 62 | -------------------------------------------------------------------------------- /mainwindow.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | MainWindow 4 | 5 | 6 | 7 | 0 8 | 0 9 | 620 10 | 520 11 | 12 | 13 | 14 | MainWindow 15 | 16 | 17 | 18 | 19 | 20 | 21 | background-color: rgb(136, 138, 133); 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | PushButton 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 0 41 | 0 42 | 620 43 | 22 44 | 45 | 46 | 47 | 48 | 49 | TopToolBarArea 50 | 51 | 52 | false 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /opencv.pri: -------------------------------------------------------------------------------- 1 | ########################################################################### 2 | # Copyright (c) 2016 Debao Zhang 3 | # All right reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining 6 | # a copy of this software and associated documentation files (the 7 | # "Software"), to deal in the Software without restriction, including 8 | # without limitation the rights to use, copy, modify, merge, publish, 9 | # distribute, sublicense, and/or sell copies of the Software, and to 10 | # permit persons to whom the Software is furnished to do so, subject to 11 | # the following conditions: 12 | # 13 | # The above copyright notice and this permission notice shall be 14 | # included in all copies or substantial portions of the Software. 15 | # 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | # 24 | ########################################################################### 25 | 26 | ## Note for Windows user: 27 | ## you need to create an opencv.prf file, 28 | ## then move the .prf file to %QTDIR%/mkspecs/features/ 29 | ## see README.md for more information. 30 | 31 | unix{ 32 | CONFIG += link_pkgconfig 33 | PKGCONFIG += opencv 34 | } 35 | win32{ 36 | # load(opencv) instead of CONFIG+=opencv used here 37 | !load(opencv):message("You must create an opencv.prf, and move it to $$[QT_INSTALL_PREFIX]/mkspecs/features/") 38 | } 39 | 40 | # silence msvc warning 4819 41 | win32-msvc*:QMAKE_CXXFLAGS += -wd4819 42 | 43 | INCLUDEPATH += $$PWD 44 | DEPENDPATH += $$PWD 45 | 46 | HEADERS += \ 47 | $$PWD/cvmatandqimage.h 48 | 49 | SOURCES += \ 50 | $$PWD/cvmatandqimage.cpp 51 | -------------------------------------------------------------------------------- /process.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collincebecky/QT-OPENCV-GSTREAMER-CUDA/4f7fdade20ffa410be805b93fc1661f36c6ef3e6/process.zip -------------------------------------------------------------------------------- /timespec.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "timespec.h" 24 | 25 | 26 | -------------------------------------------------------------------------------- /timespec.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef __TIMESPEC_UTIL_H__ 24 | #define __TIMESPEC_UTIL_H__ 25 | 26 | #include 27 | 28 | #include 29 | #include 30 | 31 | 32 | /** 33 | * Retrieve a timestamp of the current system time. 34 | * @ingroup time 35 | */ 36 | inline void timestamp( timespec* timestampOut ) { if(!timestampOut) return; timestampOut->tv_sec=0; timestampOut->tv_nsec=0; clock_gettime(CLOCK_REALTIME, timestampOut); } 37 | 38 | 39 | /** 40 | * Retrieve a timestamp of the current system time. 41 | * @ingroup time 42 | */ 43 | inline timespec timestamp() { timespec t; timestamp(&t); return t; } 44 | 45 | 46 | /** 47 | * Return a blank timespec that's been zero'd. 48 | * @ingroup time 49 | */ 50 | inline timespec timeZero() { timespec t; t.tv_sec=0; t.tv_nsec=0; return t; } 51 | 52 | 53 | /** 54 | * Return an initialized `timespec` 55 | * @ingroup time 56 | */ 57 | inline timespec timeNew( time_t seconds, long int nanoseconds ) { timespec t; t.tv_sec=seconds; t.tv_nsec=nanoseconds; return t; } 58 | 59 | 60 | /** 61 | * Return an initialized `timespec` 62 | * @ingroup time 63 | */ 64 | inline timespec timeNew( long int nanoseconds ) { const time_t sec=nanoseconds/1e-9; return timeNew(sec, nanoseconds-sec*1e-9); } 65 | 66 | 67 | /** 68 | * Add two times together. 69 | * @ingroup time 70 | */ 71 | inline timespec timeAdd( const timespec& a, const timespec& b ) { timespec t; t.tv_sec=a.tv_sec+b.tv_sec; t.tv_nsec=a.tv_nsec+b.tv_nsec; const time_t sec=t.tv_nsec/1e-9; t.tv_sec+=sec; t.tv_nsec-=sec*1e-9; return t; } 72 | 73 | 74 | /** 75 | * Find the difference between two timestamps. 76 | * @ingroup time 77 | */ 78 | inline void timeDiff( const timespec& start, const timespec& end, timespec* result ) 79 | { 80 | if ((end.tv_nsec-start.tv_nsec)<0) { 81 | result->tv_sec = end.tv_sec-start.tv_sec-1; 82 | result->tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; 83 | } else { 84 | result->tv_sec = end.tv_sec-start.tv_sec; 85 | result->tv_nsec = end.tv_nsec-start.tv_nsec; 86 | } 87 | } 88 | 89 | 90 | /** 91 | * Find the difference between two timestamps. 92 | * @ingroup time 93 | */ 94 | inline timespec timeDiff( const timespec& start, const timespec& end ) 95 | { 96 | timespec result; 97 | timeDiff(start, end, &result); 98 | return result; 99 | } 100 | 101 | 102 | /** 103 | * Compare two timestamps. 104 | * 105 | * @return 0, if timestamp A equals timestamp B 106 | * >0, if timestamp A is greater than timestamp B 107 | * <0, if timestamp A is less than timestamp B 108 | * 109 | * @ingroup time 110 | */ 111 | inline int timeCmp( const timespec& a, const timespec& b ) 112 | { 113 | if( a.tv_sec < b.tv_sec ) 114 | return -1; 115 | else if( a.tv_sec > b.tv_sec ) 116 | return 1; 117 | else 118 | { 119 | if( a.tv_nsec < b.tv_nsec ) 120 | return -1; 121 | else if( a.tv_nsec > b.tv_nsec ) 122 | return 1; 123 | else 124 | return 0; 125 | } 126 | } 127 | 128 | /** 129 | * Convert to 32-bit float (in milliseconds). 130 | * @ingroup time 131 | */ 132 | inline float timeFloat( const timespec& a ) { return a.tv_sec * 1000.0f + a.tv_nsec * 0.000001f; } 133 | 134 | 135 | /** 136 | * Convert to 64-bit double (in milliseconds). 137 | * @ingroup time 138 | */ 139 | inline float timeDouble( const timespec& a ) { return a.tv_sec * 1000.0 + a.tv_nsec * 0.000001; } 140 | 141 | 142 | /** 143 | * Produce a text representation of the timestamp. 144 | * @ingroup time 145 | */ 146 | inline char* timeStr( const timespec& timestamp, char* strOut ) { sprintf(strOut, "%lu s %lu ns", (uint64_t)timestamp.tv_sec, (uint64_t)timestamp.tv_nsec); return strOut; } 147 | 148 | 149 | /** 150 | * Print the time to stdout. 151 | * @ingroup time 152 | */ 153 | inline void timePrint( const timespec& timestamp, const char* text=NULL ) { printf("%s %lus %010luns\n", text, (uint64_t)timestamp.tv_sec, (uint64_t)timestamp.tv_nsec); } 154 | 155 | 156 | /** 157 | * Put the current thread to sleep for a specified time. 158 | * @ingroup time 159 | */ 160 | inline void sleepTime( const timespec& duration ) { nanosleep(&duration, NULL); } 161 | 162 | 163 | /** 164 | * Put the current thread to sleep for a specified time. 165 | * @ingroup time 166 | */ 167 | inline void sleepTime( time_t seconds, long int nanoseconds ) { sleepTime(timeNew(seconds,nanoseconds)); } 168 | 169 | 170 | /** 171 | * Put the current thread to sleep for a specified number of milliseconds. 172 | * @ingroup time 173 | */ 174 | inline void sleepMs( uint64_t milliseconds ) { sleepTime(timeNew(0, milliseconds * 1000 * 1000)); } 175 | 176 | 177 | /** 178 | * Put the current thread to sleep for a specified number of microseconds. 179 | * @ingroup time 180 | */ 181 | inline void sleepUs( uint64_t microseconds ) { sleepTime(timeNew(0, microseconds * 1000)); } 182 | 183 | 184 | /** 185 | * Put the current thread to sleep for a specified number of nanoseconds. 186 | * @ingroup time 187 | */ 188 | inline void sleepNs( uint64_t nanoseconds ) { sleepTime(timeNew(0, nanoseconds)); } 189 | 190 | 191 | #endif 192 | --------------------------------------------------------------------------------