├── activate_raspi_cam.sh ├── src ├── helpers.cpp ├── opencl_kernel_include.cpp ├── deps │ ├── cpu_features │ │ ├── FeatureDetector-master.zip │ │ ├── Main.cpp │ │ ├── cpu_x86_Linux.ipp │ │ ├── cpu_x86_Windows.ipp │ │ ├── cpu_x86.h │ │ ├── LICENSE │ │ └── cpu_x86.cpp │ ├── dependencies.h │ ├── aruco │ │ ├── aruco_cvversioning.h │ │ ├── fractallabelers │ │ │ ├── fractalmarker.h │ │ │ ├── fractallabeler.h │ │ │ ├── fractalmarker.cpp │ │ │ ├── fractalmarkerset.h │ │ │ ├── fractalposetracker.h │ │ │ └── fractallabeler.cpp │ │ ├── aruco.h │ │ ├── aruco_export.h │ │ ├── debug.cpp │ │ ├── markerlabeler.cpp │ │ ├── CMakeLists.txt │ │ ├── cvdrawingutils.h │ │ ├── fractaldetector.h │ │ ├── debug.h │ │ ├── markerlabeler.h │ │ ├── fractaldetector.cpp │ │ ├── cameraparameters.h │ │ ├── timers.h │ │ ├── marker.h │ │ ├── posetracker.h │ │ ├── markermap.h │ │ ├── cvdrawingutils.cpp │ │ └── dictionary.h │ ├── DeviceEnumerator.h │ └── DeviceEnumerator.cpp ├── aruco_include.cpp ├── lt_lsl_protocol.h ├── speller_canvas.h ├── cv_button.h ├── test_calibration_solve.m ├── helpers.h ├── calibration.h ├── pupil_tracking.cpp ├── eyetracking.h ├── speller_canvas.cpp ├── aruco_canvas.h ├── pupil_tracking.h └── lsl_client_example.cpp ├── assets ├── calibration_marker_5x5.dict ├── aruco_00100.png ├── aruco_00301.png ├── aruco_00450.png ├── aruco_00700.png ├── aruco_00100_e.png ├── aruco_00301_e.png ├── aruco_00450_e.png ├── aruco_00700_e.png ├── calibration_marker.png ├── aruco_mip_36h12_00002.png ├── aruco_mip_36h12_00004.png ├── aruco_mip_36h12_00006.png ├── aruco_mip_36h12_00008.png ├── lpw_01_sample_eye_image.png └── kernel_inner_gradients_all.cl ├── documentation ├── images │ ├── delock_camera.png │ ├── eye_cam_small.jpg │ ├── screenshot01.png │ ├── screenshot02.png │ ├── headmount_proto_01.png │ ├── camera_mount_connector.png │ ├── camera_mount_headband_arm_01.png │ ├── camera_mount_headband_arm_02.png │ ├── phillipp_libretracker_small.jpg │ └── low_cost_head_mounted_eyetracker_small.jpg └── publications │ ├── KrauseEssig2019b_ECEM2019_APA.bib │ ├── KrauseEssig2019a_etra2019_paper130.pdf │ ├── KrauseEssig2019b_poster_ecem2019_v2.pdf │ ├── KrauseEssig2019b_ECEM2019_APA.txt │ ├── KrauseEssig2019a_ETRA2019_APA.txt │ └── KrauseEssig2019a_ETRA2019.bib ├── set_directory_vars.bat ├── git_update_submodules.sh ├── show_temp.sh ├── .github └── workflows │ └── ubuntu.yml ├── .gitmodules ├── codelite └── codelite.workspace ├── vstudio ├── Libretracker.sln └── lsl_client_example.vcxproj └── CMakeLists.txt /activate_raspi_cam.sh: -------------------------------------------------------------------------------- 1 | sudo modprobe bcm2835-v4l2 2 | lsmod 3 | -------------------------------------------------------------------------------- /src/helpers.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/src/helpers.cpp -------------------------------------------------------------------------------- /assets/calibration_marker_5x5.dict: -------------------------------------------------------------------------------- 1 | name MYOWN 2 | nbits 25 3 | 1111110001101011000111011 4 | -------------------------------------------------------------------------------- /assets/aruco_00100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00100.png -------------------------------------------------------------------------------- /assets/aruco_00301.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00301.png -------------------------------------------------------------------------------- /assets/aruco_00450.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00450.png -------------------------------------------------------------------------------- /assets/aruco_00700.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00700.png -------------------------------------------------------------------------------- /assets/aruco_00100_e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00100_e.png -------------------------------------------------------------------------------- /assets/aruco_00301_e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00301_e.png -------------------------------------------------------------------------------- /assets/aruco_00450_e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00450_e.png -------------------------------------------------------------------------------- /assets/aruco_00700_e.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_00700_e.png -------------------------------------------------------------------------------- /assets/calibration_marker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/calibration_marker.png -------------------------------------------------------------------------------- /assets/aruco_mip_36h12_00002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_mip_36h12_00002.png -------------------------------------------------------------------------------- /assets/aruco_mip_36h12_00004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_mip_36h12_00004.png -------------------------------------------------------------------------------- /assets/aruco_mip_36h12_00006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_mip_36h12_00006.png -------------------------------------------------------------------------------- /assets/aruco_mip_36h12_00008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/aruco_mip_36h12_00008.png -------------------------------------------------------------------------------- /src/opencl_kernel_include.cpp: -------------------------------------------------------------------------------- 1 | #ifdef OPENCL_ENABLED 2 | #include "deps/timms_algorithm/src/opencl_kernel.cpp" 3 | #endif -------------------------------------------------------------------------------- /assets/lpw_01_sample_eye_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/assets/lpw_01_sample_eye_image.png -------------------------------------------------------------------------------- /documentation/images/delock_camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/delock_camera.png -------------------------------------------------------------------------------- /documentation/images/eye_cam_small.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/eye_cam_small.jpg -------------------------------------------------------------------------------- /documentation/images/screenshot01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/screenshot01.png -------------------------------------------------------------------------------- /documentation/images/screenshot02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/screenshot02.png -------------------------------------------------------------------------------- /set_directory_vars.bat: -------------------------------------------------------------------------------- 1 | SET PATH=%PATH%;c:\andre_dlls\ 2 | setx MY_LIB_DIR d:\andre_daten\lib 3 | set MY_LIB_DIR=d:\andre_daten\lib 4 | -------------------------------------------------------------------------------- /documentation/images/headmount_proto_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/headmount_proto_01.png -------------------------------------------------------------------------------- /documentation/images/camera_mount_connector.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/camera_mount_connector.png -------------------------------------------------------------------------------- /src/deps/cpu_features/FeatureDetector-master.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/src/deps/cpu_features/FeatureDetector-master.zip -------------------------------------------------------------------------------- /documentation/images/camera_mount_headband_arm_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/camera_mount_headband_arm_01.png -------------------------------------------------------------------------------- /documentation/images/camera_mount_headband_arm_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/camera_mount_headband_arm_02.png -------------------------------------------------------------------------------- /documentation/images/phillipp_libretracker_small.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/phillipp_libretracker_small.jpg -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019b_ECEM2019_APA.bib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/publications/KrauseEssig2019b_ECEM2019_APA.bib -------------------------------------------------------------------------------- /documentation/images/low_cost_head_mounted_eyetracker_small.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/images/low_cost_head_mounted_eyetracker_small.jpg -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019a_etra2019_paper130.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/publications/KrauseEssig2019a_etra2019_paper130.pdf -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019b_poster_ecem2019_v2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/afkrause/libretracker/HEAD/documentation/publications/KrauseEssig2019b_poster_ecem2019_v2.pdf -------------------------------------------------------------------------------- /git_update_submodules.sh: -------------------------------------------------------------------------------- 1 | git submodule update 2 | git submodule foreach git checkout master 3 | git submodule foreach git pull origin master 4 | #__git submodule update --recursive --remote 5 | -------------------------------------------------------------------------------- /src/deps/dependencies.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "s/timer_hd.h" 4 | #include "s/eigenlab.h" 5 | #include "s/filters.h" 6 | #include "s/eigen_pseudoinverse.h" 7 | #include "s/simple_gui_fltk.h" 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019b_ECEM2019_APA.txt: -------------------------------------------------------------------------------- 1 | Krause, A. F., & Essig, K. (2019). LibreTracker: A Free and Open-source Eyetracking Software for head-mounted Eyetrackers. In 20th European Conference on Eye Movements, (ECEM 2019) (p. 391) -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019a_ETRA2019_APA.txt: -------------------------------------------------------------------------------- 1 | Krause, A. F., & Essig, K. (2019, June). Boosting speed- and accuracy of gradient based dark pupil tracking using vectorization and differential evolution. In Proceedings of the 11th ACM Symposium on Eye Tracking Research & Applications (p. 34). ACM. -------------------------------------------------------------------------------- /documentation/publications/KrauseEssig2019a_ETRA2019.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{krause2019boosting, 2 | title={Boosting speed-and accuracy of gradient based dark pupil tracking using vectorization and differential evolution}, 3 | author={Krause, Andr{\'e} Frank and Essig, Kai}, 4 | booktitle={Proceedings of the 11th ACM Symposium on Eye Tracking Research \& Applications}, 5 | pages={34}, 6 | year={2019}, 7 | organization={ACM} 8 | } -------------------------------------------------------------------------------- /show_temp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script: my-pi-temp.sh 3 | # Purpose: Display the ARM CPU and GPU temperature of Raspberry Pi 2/3 4 | # Author: Vivek Gite under GPL v2.x+ 5 | # ------------------------------------------------------- 6 | while : 7 | do 8 | cpu=$( $(/opt/vc/bin/vcgencmd measure_temp)" 12 | echo "CPU => $((cpu/1000))'C" 13 | sleep 2s 14 | done 15 | 16 | -------------------------------------------------------------------------------- /src/deps/aruco/aruco_cvversioning.h: -------------------------------------------------------------------------------- 1 | #ifndef ARUCO_CV_VERSIONING 2 | #define ARUCO_CV_VERSIONING 3 | #include 4 | #if CV_MAJOR_VERSION >= 4 5 | #include 6 | 7 | #define CV_CAP_PROP_FRAME_COUNT cv::CAP_PROP_FRAME_COUNT 8 | #define CV_CAP_PROP_POS_FRAMES cv::CAP_PROP_POS_FRAMES 9 | #define CV_BGR2GRAY cv::COLOR_BGR2GRAY 10 | #define CV_GRAY2BGR cv::COLOR_GRAY2BGR 11 | #define CV_FONT_HERSHEY_COMPLEX cv::FONT_HERSHEY_COMPLEX 12 | #define CV_FILLED cv::FILLED 13 | #endif 14 | 15 | #endif 16 | 17 | -------------------------------------------------------------------------------- /src/deps/cpu_features/Main.cpp: -------------------------------------------------------------------------------- 1 | /* Main.cpp 2 | * 3 | * Author : Alexander J. Yee 4 | * Date Created : 04/17/2015 5 | * Last Modified : 04/17/2015 6 | * 7 | */ 8 | 9 | #include 10 | using std::cout; 11 | using std::endl; 12 | 13 | #include "x86/cpu_x86.h" 14 | using namespace FeatureDetector; 15 | 16 | int main(){ 17 | 18 | cout << "CPU Vendor String: " << cpu_x86::get_vendor_string() << endl; 19 | cout << endl; 20 | 21 | cpu_x86::print_host(); 22 | 23 | #if _WIN32 24 | system("pause"); 25 | #endif 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/ubuntu.yml: -------------------------------------------------------------------------------- 1 | name: ubuntu 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: ubuntu-20.04 13 | 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | submodules: 'recursive' 18 | - name: dependencies 19 | run: sudo apt-get install libeigen3-dev libfltk1.3-dev libopencv-dev 20 | - name: cmake 21 | run: cmake . 22 | - name: make 23 | run: make 24 | #- name: make check 25 | # run: make check 26 | #- name: make distcheck 27 | # run: make distcheck 28 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "src/deps/compute"] 2 | path = src/deps/compute 3 | url = https://github.com/boostorg/compute.git 4 | [submodule "src/deps/s"] 5 | path = src/deps/s 6 | url = https://github.com/afkrause/s.git 7 | [submodule "src/deps/OpenCL-Headers"] 8 | path = src/deps/OpenCL-Headers 9 | url = https://github.com/KhronosGroup/OpenCL-Headers.git 10 | [submodule "src/deps/tuebingen_pure"] 11 | path = src/deps/tuebingen_pure 12 | url = https://github.com/afkrause/tuebingen_pure.git 13 | branch = master 14 | [submodule "src/deps/timms_algorithm"] 15 | path = src/deps/timms_algorithm 16 | url = https://github.com/afkrause/timms_algorithm.git 17 | -------------------------------------------------------------------------------- /src/aruco_include.cpp: -------------------------------------------------------------------------------- 1 | #include "deps/aruco/cameraparameters.cpp" 2 | #include "deps/aruco/cvdrawingutils.cpp" 3 | #include "deps/aruco/dictionary.cpp" 4 | #include "deps/aruco/dictionary_based.cpp" 5 | #include "deps/aruco/ippe.cpp" 6 | #include "deps/aruco/marker.cpp" 7 | #include "deps/aruco/markerdetector.cpp" 8 | #include "deps/aruco/markerdetector_impl.cpp" 9 | #include "deps/aruco/markerlabeler.cpp" 10 | #include "deps/aruco/markermap.cpp" 11 | #include "deps/aruco/posetracker.cpp" 12 | 13 | #include "deps/aruco/debug.cpp" 14 | 15 | 16 | //#include "deps/aruco/checkrectcontour.cpp" 17 | //#include "deps/aruco/ar_omp.cpp" 18 | 19 | //#include "deps/aruco/markerlabelers/svmmarkers.cpp" -------------------------------------------------------------------------------- /codelite/codelite.workspace: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /src/deps/DeviceEnumerator.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define NOMINMAX // avoid the definition of min / max macros by windows includes 4 | #include 5 | #include 6 | 7 | #pragma comment(lib, "strmiids") 8 | 9 | #include 10 | #include 11 | 12 | struct Device 13 | { 14 | int id; // This can be used to open the device in OpenCV 15 | std::string devicePath; 16 | std::string deviceName; // This can be used to show the devices to the user 17 | }; 18 | 19 | class DeviceEnumerator 20 | { 21 | 22 | public: 23 | 24 | DeviceEnumerator() = default; 25 | std::map getDevicesMap(const GUID deviceClass); 26 | std::map getVideoDevicesMap(); 27 | std::map getAudioDevicesMap(); 28 | 29 | private: 30 | 31 | std::string ConvertBSTRToMBS(BSTR bstr); 32 | std::string ConvertWCSToMBS(const wchar_t* pstr, long wslen); 33 | 34 | }; 35 | -------------------------------------------------------------------------------- /src/lt_lsl_protocol.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // labstreaming layer sends data as arrays of e.g. floats or doubles. 4 | // for example, an EEG headset might send 20 doubles ( = number of EEG channels ) per frame. 5 | 6 | // data structure for one eye. because the timestamp must be a double, the other values are coded as doubles as well, even if floats would be enough. 7 | // currently, only monocular tracking 8 | enum enum_eye_data 9 | { 10 | LT_TIMESTAMP, 11 | LT_PUPIL_X, // pupil centre 12 | LT_PUPIL_Y, 13 | LT_GAZE_X, // gaze coordinates in scene camera koordinate system 14 | LT_GAZE_Y, 15 | LT_SCREEN_X, // gaze coordinates transformed to screen space using AR Markers 16 | LT_SCREEN_Y, // set to NaN if AR-Marker Tracking is not working 17 | LT_SCREEN_X_FILTERED, // jitter - filtered gaze coordinates in screen space 18 | LT_SCREEN_Y_FILTERED, 19 | LT_N_EYE_DATA 20 | }; 21 | 22 | 23 | // TODO marker streaming 24 | 25 | // TODO video streaming -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractalmarker.h: -------------------------------------------------------------------------------- 1 | #ifndef FRACTALMARKER_H 2 | #define FRACTALMARKER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include "../markermap.h" 8 | 9 | namespace aruco 10 | { 11 | class FractalMarker : public aruco::Marker3DInfo 12 | { 13 | public: 14 | FractalMarker(); 15 | FractalMarker(int id, cv::Mat m, std::vector corners, std::vector id_submarkers); 16 | 17 | //Add new submarker 18 | void addSubFractalMarker(FractalMarker submarker); 19 | 20 | //Get inner corners 21 | std::vector getInnerCorners(); 22 | 23 | //Marker MAT 24 | const cv::Mat mat() const 25 | { 26 | return _M; 27 | } 28 | 29 | //Marker mask (mask applied to submarkers) 30 | const cv::Mat mask() const 31 | { 32 | return _mask; 33 | } 34 | 35 | //Total number of bits 36 | int nBits() 37 | { 38 | return _M.total(); 39 | } 40 | 41 | //Submarkers ids 42 | std::vector subMarkers() 43 | { 44 | return _submarkers; 45 | } 46 | 47 | private: 48 | cv::Mat _M; 49 | cv::Mat _mask; 50 | std::vector _submarkers; //id subfractalmarkers 51 | }; 52 | } 53 | 54 | #endif // FRACTALMARKER_H 55 | -------------------------------------------------------------------------------- /assets/kernel_inner_gradients_all.cl: -------------------------------------------------------------------------------- 1 | __kernel void kernel_inner_gradients(__write_only image2d_t img_out, const __constant float * data, const int n, const int w, const int h) 2 | { 3 | //#pragma OPENCL EXTENSION cl_khr_fp16 : enable 4 | const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; 5 | 6 | // Store each work-item's unique row and column 7 | const int x = get_global_id(0); 8 | const int y = get_global_id(1); 9 | 10 | // Each work-item iterates around its local area based on the size of the filter 11 | const int2 coords = { x, y }; // Coordinates for accessing the image 12 | 13 | // center positions 14 | const float2 c = { x, y }; 15 | float2 p; 16 | float2 g; 17 | float2 d; 18 | float dp = 0.0f; 19 | float sum = 0.0f; 20 | size_t idx = 0; 21 | 22 | //d = fast_normalize(d); // uses reciprocal square root internally .. but still slower than manual half_sqrt !! (but why??) 23 | //d = d * half_rsqrt(dot(d, d)); also not faster .. 24 | 25 | for (int i = 0; i < h; i++) 26 | { 27 | p.y = i; 28 | for (int k = 0; k < w; k++) 29 | { 30 | g.x = data[idx++]; 31 | g.y = data[idx++]; 32 | p.x = k; 33 | d = p - c; 34 | d = d * rsqrt(dot(d, d)); 35 | dp = dot(d, g); 36 | dp = max(0.0f, dp); 37 | sum += dp*dp; 38 | } 39 | } 40 | 41 | //sum = 0.5+0.5*sin( fx * fy / fn); 42 | 43 | //barrier(CLK_GLOBAL_MEM_FENCE); 44 | 45 | //Same channel is copied in all three channels 46 | //write_imagef(img_out, coords, (float4)(sum, sum, sum, 1.0f)); 47 | write_imagef(img_out, coords, sum); 48 | } -------------------------------------------------------------------------------- /src/deps/aruco/aruco.h: -------------------------------------------------------------------------------- 1 | /** 2 | 3 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are 6 | permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this list of 9 | conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 12 | of conditions and the following disclaimer in the documentation and/or other materials 13 | provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 16 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 17 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 18 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 22 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 23 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | The views and conclusions contained in the software and documentation are those of the 26 | authors and should not be interpreted as representing official policies, either expressed 27 | or implied, of Rafael Muñoz Salinas. 28 | */ 29 | 30 | #include "fractaldetector.h" 31 | #include "markerdetector.h" 32 | #include "posetracker.h" 33 | #include "cvdrawingutils.h" 34 | #include "dictionary.h" 35 | 36 | #define ARUCO_VERSION_MAJOR 3 37 | #define ARUCO_VERSION_MINOR 0 38 | 39 | 40 | -------------------------------------------------------------------------------- /src/speller_canvas.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include "cv_button.h" 10 | 11 | // draw keyboard buttons 12 | class Speller_canvas 13 | { 14 | protected: 15 | cv::Scalar flicker_col_0{ 0, 55, 0 }; // dark green 16 | cv::Scalar flicker_col_1{ 100, 255, 100 }; // bright green 17 | std::array flicker_code_01{ 0,1 }; 18 | std::array flicker_code_02{ 0,0,1 }; 19 | std::array flicker_code_03{ 0,0,1,1 }; 20 | std::array flicker_code_04{ 0,0,0,1,1 }; 21 | size_t flicker_counter = 0; 22 | public: 23 | 24 | // button labels 25 | 26 | //Eigen::Matrix buttons; 27 | //Eigen::Matrix labels; 28 | 29 | Eigen::Matrix buttons; 30 | Eigen::Matrix labels; 31 | 32 | std::string speller_str = ""; 33 | 34 | void setup() 35 | { 36 | /* 37 | // nach häufigkeit in der deutschen sprache sortiert 38 | labels << 39 | 'E', 'N', 'I', 'S', 'R', 'A', 'T', 40 | 'D', 'H', 'U', 'L', 'C', 'G', 'M', 41 | 'O', 'B', 'W', 'F', 'K', 'Z', 'P', 42 | 'V', 'J', 'Y', 'X', 'Q', '_', '<'; 43 | */ 44 | 45 | // standard german keyboard layout 46 | labels << 47 | 'Q', 'W', 'E', 'R', 'T', 'Z', 'U', 'I', 'O', 'P', 48 | 'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', '!', 49 | 'Y', 'X', 'C', 'V', 'B', 'N', 'M', '_', '_', '<'; 50 | 51 | } 52 | 53 | void draw_keyboard(cv::Mat& img, int x, int y, int w, int h, int marker_size, int mx, int my, bool& button_released); 54 | void draw_keyboard_ssvep(cv::Mat& img, int x, int y, int w, int h, int marker_size, int mx, int my, bool& button_released); 55 | 56 | void draw() 57 | { 58 | //draw_keyboard(cv::Mat& img, int x, int y, int w, int h, int mx, int my, Eigen::Matrix& buttons, const Eigen::Matrix& labels, string& speller_str) 59 | } 60 | }; 61 | -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractallabeler.h: -------------------------------------------------------------------------------- 1 | #include "../markerlabeler.h" 2 | #include "fractalposetracker.h" 3 | 4 | namespace aruco 5 | { 6 | class FractalMarkerLabeler : public MarkerLabeler 7 | { 8 | public: 9 | static cv::Ptr create(std::string params) 10 | { 11 | FractalMarkerSet fractalMarkerSet = FractalMarkerSet::load(params); 12 | FractalMarkerLabeler* fml=new FractalMarkerLabeler(); 13 | fml->setConfiguration(fractalMarkerSet); 14 | return fml; 15 | } 16 | 17 | static cv::Ptr create(FractalMarkerSet::CONF_TYPES conf) 18 | { 19 | FractalMarkerSet fractalMarkerSet = FractalMarkerSet::loadPredefined(conf); 20 | FractalMarkerLabeler* fml=new FractalMarkerLabeler(); 21 | fml->setConfiguration(fractalMarkerSet); 22 | return fml; 23 | } 24 | 25 | void setConfiguration(const FractalMarkerSet& fractMarkerSet); 26 | 27 | static bool isFractalDictionaryFile(const std::string &path); 28 | 29 | virtual ~FractalMarkerLabeler() 30 | { 31 | } 32 | 33 | bool load(const std::string &path); 34 | 35 | // returns the configuration name 36 | std::string getName() const; 37 | 38 | // main virtual class to o detection 39 | bool detect(const cv::Mat& in, int& marker_id, int& nRotations,std::string &additionalInfo); 40 | 41 | int getNSubdivisions()const{return (sqrt(_fractalMarkerSet.nBits())+2);} 42 | 43 | FractalMarkerSet _fractalMarkerSet; 44 | 45 | private: 46 | bool getInnerCode(const cv::Mat& thres_img, int total_nbits, std::vector& ids); 47 | cv::Mat rotate(const cv::Mat& in); 48 | uint64_t touulong(const cv::Mat& code); 49 | void toMat(uint64_t code, int nbits_sq, cv::Mat& out); 50 | }; 51 | } 52 | 53 | -------------------------------------------------------------------------------- /src/cv_button.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | // simple opencv based imgui Button 3 | 4 | inline bool is_inside(int x, int y, int w, int h, int mx, int my) 5 | { 6 | if (mx >= x && my >= y && mx <= x + w && my <= y + h) { return true; } 7 | else { return false; } 8 | return false; 9 | } 10 | 11 | 12 | class Button 13 | { 14 | private: 15 | float was_inside = 0.0f; 16 | float was_triggered = 0.0f; 17 | public: 18 | 19 | // returns true of button was triggered 20 | bool draw(cv::Mat& img, const int x, const int y, const int w, const int h, const int mx, const int my, bool& mouse_button_released, const std::string label, cv::Scalar button_color = cv::Scalar(0, 100, 0), cv::Scalar button_color_hoover = cv::Scalar(0, 225, 0)) 21 | { 22 | using namespace cv; 23 | using namespace std; 24 | 25 | bool triggered = false; 26 | 27 | if (is_inside(x, y, w, h, mx, my)) 28 | { 29 | //cout << "i"; 30 | was_inside = 1.0f; 31 | if (mouse_button_released) 32 | { 33 | mouse_button_released = false; // consume the event 34 | triggered = true; 35 | was_triggered = 1.0f; 36 | cout << "button triggered" << endl; 37 | } 38 | } 39 | 40 | // fade color depending on states 41 | auto c = button_color + button_color_hoover * was_inside + Scalar(200.0f * was_triggered); 42 | 43 | // saturate cast: copy constructor automatically saturates colors to 0 .. 255 44 | c = Scalar_(c); 45 | 46 | 47 | const float fade_speed = 0.96; 48 | was_inside = fade_speed * was_inside; 49 | was_triggered = fade_speed * was_triggered; 50 | 51 | rectangle(img, Rect(x, y, w, h), c, FILLED); 52 | const int fw = 15; // font width in pixel of FONT_HERSHEY_SIMPLEX 53 | const int fh = 15; // font width in pixel of FONT_HERSHEY_SIMPLEX 54 | int s = label.size(); 55 | auto p = Point(x + 0.5*w - 0.5*s*fw, y + 0.5*h - 0.5*fh); 56 | putText(img, label, p, FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 255, 255), 4); 57 | 58 | 59 | return triggered; 60 | } 61 | }; -------------------------------------------------------------------------------- /src/deps/cpu_features/cpu_x86_Linux.ipp: -------------------------------------------------------------------------------- 1 | /* cpu_x86_Linux.ipp 2 | * 3 | * Author : Alexander J. Yee 4 | * Date Created : 04/12/2014 5 | * Last Modified : 04/12/2014 6 | * 7 | */ 8 | 9 | //////////////////////////////////////////////////////////////////////////////// 10 | //////////////////////////////////////////////////////////////////////////////// 11 | //////////////////////////////////////////////////////////////////////////////// 12 | //////////////////////////////////////////////////////////////////////////////// 13 | // Dependencies 14 | #include 15 | #include "cpu_x86.h" 16 | namespace cpu_feature_detector{ 17 | //////////////////////////////////////////////////////////////////////////////// 18 | //////////////////////////////////////////////////////////////////////////////// 19 | //////////////////////////////////////////////////////////////////////////////// 20 | //////////////////////////////////////////////////////////////////////////////// 21 | void cpu_x86::cpuid(int32_t out[4], int32_t x){ 22 | __cpuid_count(x, 0, out[0], out[1], out[2], out[3]); 23 | } 24 | uint64_t xgetbv(unsigned int index){ 25 | uint32_t eax, edx; 26 | __asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(index)); 27 | return ((uint64_t)edx << 32) | eax; 28 | } 29 | #define _XCR_XFEATURE_ENABLED_MASK 0 30 | //////////////////////////////////////////////////////////////////////////////// 31 | //////////////////////////////////////////////////////////////////////////////// 32 | // Detect 64-bit 33 | bool cpu_x86::detect_OS_x64(){ 34 | // We only support x64 on Linux. 35 | return true; 36 | } 37 | //////////////////////////////////////////////////////////////////////////////// 38 | //////////////////////////////////////////////////////////////////////////////// 39 | //////////////////////////////////////////////////////////////////////////////// 40 | //////////////////////////////////////////////////////////////////////////////// 41 | } 42 | -------------------------------------------------------------------------------- /src/deps/aruco/aruco_export.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | 30 | 31 | #ifndef __OPENARUCO_CORE_TYPES_H__ 32 | #define __OPENARUCO_CORE_TYPES_H__ 33 | 34 | #if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300 35 | #define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ 36 | #endif 37 | 38 | 39 | #if (defined WIN32 || defined _WIN32 || defined WINCE) && defined ARUCO_DSO_EXPORTS 40 | #define ARUCO_EXPORT __declspec(dllexport) 41 | #else 42 | #define ARUCO_EXPORT 43 | #endif 44 | 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /src/deps/aruco/debug.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #include "debug.h" 30 | #include 31 | namespace aruco{ 32 | int Debug::level=2; 33 | std::map Debug::strings; 34 | void Debug::addString(std::string &label, std::string &data){ 35 | strings.insert(make_pair(label,data)); 36 | } 37 | 38 | std::string Debug::getString(std::string &str){ 39 | auto it=strings.find(str); 40 | if (it==strings.end())return ""; 41 | else return it->second; 42 | } 43 | 44 | 45 | bool Debug::isInited=false; 46 | 47 | void Debug::setLevel ( int l ) { 48 | level=l; 49 | isInited=false; 50 | init(); 51 | } 52 | int Debug::getLevel() { 53 | init(); 54 | return level; 55 | } 56 | void Debug::init() { 57 | if ( !isInited ) { 58 | isInited=true; 59 | if ( level>=1 ) { 60 | } 61 | } 62 | 63 | } 64 | 65 | 66 | } 67 | 68 | -------------------------------------------------------------------------------- /src/deps/cpu_features/cpu_x86_Windows.ipp: -------------------------------------------------------------------------------- 1 | /* cpu_x86_Windows.ipp 2 | * 3 | * Author : Alexander J. Yee 4 | * Date Created : 04/12/2014 5 | * Last Modified : 04/12/2014 6 | * 7 | */ 8 | 9 | //////////////////////////////////////////////////////////////////////////////// 10 | //////////////////////////////////////////////////////////////////////////////// 11 | //////////////////////////////////////////////////////////////////////////////// 12 | //////////////////////////////////////////////////////////////////////////////// 13 | // Dependencies 14 | #include 15 | #include 16 | #include "cpu_x86.h" 17 | namespace cpu_feature_detector{ 18 | //////////////////////////////////////////////////////////////////////////////// 19 | //////////////////////////////////////////////////////////////////////////////// 20 | //////////////////////////////////////////////////////////////////////////////// 21 | //////////////////////////////////////////////////////////////////////////////// 22 | void cpu_x86::cpuid(int32_t out[4], int32_t x){ 23 | __cpuidex(out, x, 0); 24 | } 25 | __int64 xgetbv(unsigned int x){ 26 | return _xgetbv(x); 27 | } 28 | //////////////////////////////////////////////////////////////////////////////// 29 | //////////////////////////////////////////////////////////////////////////////// 30 | // Detect 64-bit - Note that this snippet of code for detecting 64-bit has been copied from MSDN. 31 | typedef BOOL (WINAPI *LPFN_ISWOW64PROCESS) (HANDLE, PBOOL); 32 | BOOL IsWow64() 33 | { 34 | BOOL bIsWow64 = FALSE; 35 | 36 | LPFN_ISWOW64PROCESS fnIsWow64Process = (LPFN_ISWOW64PROCESS) GetProcAddress( 37 | GetModuleHandle(TEXT("kernel32")), "IsWow64Process"); 38 | 39 | if (NULL != fnIsWow64Process) 40 | { 41 | if (!fnIsWow64Process(GetCurrentProcess(), &bIsWow64)) 42 | { 43 | printf("Error Detecting Operating System.\n"); 44 | printf("Defaulting to 32-bit OS.\n\n"); 45 | bIsWow64 = FALSE; 46 | } 47 | } 48 | return bIsWow64; 49 | } 50 | bool cpu_x86::detect_OS_x64(){ 51 | #ifdef _M_X64 52 | return true; 53 | #else 54 | return IsWow64() != 0; 55 | #endif 56 | } 57 | //////////////////////////////////////////////////////////////////////////////// 58 | //////////////////////////////////////////////////////////////////////////////// 59 | //////////////////////////////////////////////////////////////////////////////// 60 | //////////////////////////////////////////////////////////////////////////////// 61 | } 62 | -------------------------------------------------------------------------------- /src/deps/aruco/markerlabeler.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 1. Redistributions of source code must retain the above copyright notice, this list of 6 | conditions and the following disclaimer. 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 8 | of conditions and the following disclaimer in the documentation and/or other materials 9 | provided with the distribution. 10 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 11 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 12 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 13 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 14 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 15 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 16 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 17 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 18 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 19 | The views and conclusions contained in the software and documentation are those of the 20 | authors and should not be interpreted as representing official policies, either expressed 21 | or implied, of Rafael Muñoz Salinas. 22 | */ 23 | #include "markerlabeler.h" 24 | #include "dictionary_based.h" 25 | namespace aruco 26 | { 27 | cv::Ptr MarkerLabeler::create(Dictionary::DICT_TYPES dict_type, 28 | float error_correction_rate) 29 | { 30 | Dictionary dict = Dictionary::loadPredefined(dict_type); 31 | DictionaryBased* db = new DictionaryBased(); 32 | db->setParams(dict, error_correction_rate); 33 | return db; 34 | } 35 | cv::Ptr MarkerLabeler::create(std::string detector, std::string params) 36 | { 37 | auto _stof=[](std::string str){ 38 | float f;sscanf(str.c_str(),"%f",&f);return f; 39 | }; 40 | (void)params; 41 | Dictionary dict = Dictionary::load(detector); 42 | // try with one from file 43 | DictionaryBased* db = new DictionaryBased(); 44 | db->setParams(dict, _stof(params)); 45 | return db; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractalmarker.cpp: -------------------------------------------------------------------------------- 1 | #include "fractalmarker.h" 2 | 3 | #include 4 | 5 | 6 | namespace aruco 7 | { 8 | FractalMarker::FractalMarker() 9 | { 10 | 11 | } 12 | 13 | FractalMarker::FractalMarker(int id, cv::Mat m, std::vector corners, std::vector id_submarkers) 14 | { 15 | this->id = id; 16 | this->_M = m; 17 | for(auto p:corners) 18 | push_back(p); 19 | 20 | _submarkers = id_submarkers; 21 | _mask = cv::Mat::ones(m.size(), CV_8UC1); 22 | } 23 | 24 | void FractalMarker::addSubFractalMarker(FractalMarker submarker) 25 | { 26 | float bitSize = (at(1).x - at(0).x) / int(mat().cols+2); 27 | float nsubBits = (submarker.at(1).x - submarker.at(0).x) / bitSize; 28 | 29 | int x_min = int(round(submarker[0].x / bitSize + mat().cols/2)); 30 | int x_max = x_min + nsubBits; 31 | int y_min = int(round(-submarker[0].y / bitSize + mat().cols/2)); 32 | int y_max = y_min + nsubBits; 33 | 34 | for(int y=y_min; y(y,x)=0; 37 | } 38 | } 39 | } 40 | 41 | std::vector FractalMarker::getInnerCorners() 42 | { 43 | int nBitsSquared = int(sqrt(mat().total())); 44 | float bitSize = getMarkerSize() / (nBitsSquared+2); 45 | 46 | //Add border bits 47 | cv::Mat markerBorder; 48 | copyMakeBorder(mat(), markerBorder, 1,1,1,1,cv::BORDER_CONSTANT,0); 49 | 50 | std::vector innerCorners; 51 | for(int y=0; y< markerBorder.rows-1; y++) 52 | { 53 | for(int x=0; x< markerBorder.cols-1; x++) 54 | { 55 | 56 | if( ((markerBorder.at(y, x) == markerBorder.at(y+1, x+1)) && 57 | (markerBorder.at(y, x) != markerBorder.at(y, x+1) || 58 | markerBorder.at(y, x) != markerBorder.at(y+1, x))) 59 | 60 | || 61 | 62 | ((markerBorder.at(y, x+1) == markerBorder.at(y+1, x)) && 63 | (markerBorder.at(y, x+1) != markerBorder.at(y, x) || 64 | markerBorder.at(y, x+1) != markerBorder.at(y+1, x+1))) 65 | ) 66 | innerCorners.push_back(cv::Point3f(x-nBitsSquared/2.f, -(y-nBitsSquared/2.f), 0) * bitSize); 67 | } 68 | } 69 | return innerCorners; 70 | } 71 | } 72 | 73 | -------------------------------------------------------------------------------- /src/deps/aruco/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | SET (LIBNAME ${EXTRALIBNAME}aruco) 2 | include_directories(.) 3 | 4 | 5 | SET(sources 6 | cameraparameters.cpp debug.cpp dictionary.cpp ippe.cpp markerdetector.cpp markerlabeler.cpp posetracker.cpp 7 | cvdrawingutils.cpp dictionary_based.cpp marker.cpp markerdetector_impl.cpp markermap.cpp fractaldetector.cpp 8 | ) 9 | SET(headers 10 | aruco_cvversioning.h cameraparameters.h dictionary_based.h ippe.h markerdetector_impl.h markermap.h timers.h 11 | aruco_export.h cvdrawingutils.h dictionary.h levmarq.h marker.h picoflann.h 12 | aruco.h debug.h markerdetector.h markerlabeler.h posetracker.h fractaldetector.h 13 | ) 14 | set(fractal_sources 15 | fractallabelers/fractalposetracker.cpp 16 | fractallabelers/fractalmarkerset.cpp 17 | fractallabelers/fractalmarker.cpp 18 | fractallabelers/fractallabeler.cpp 19 | ) 20 | set(fractal_headers 21 | fractallabelers/fractalposetracker.h 22 | fractallabelers/fractalmarkerset.h 23 | fractallabelers/fractalmarker.h 24 | fractallabelers/fractallabeler.h 25 | ) 26 | 27 | add_library(${LIBNAME} ${sources} ${headers} ${fractal_sources} ${fractal_headers}) 28 | 29 | set_target_properties(${LIBNAME} PROPERTIES # create *nix style library versions + symbolic links 30 | DEFINE_SYMBOL ARUCO_DSO_EXPORTS 31 | VERSION ${PROJECT_VERSION} 32 | SOVERSION ${PROJECT_SOVERSION} 33 | CLEAN_DIRECT_OUTPUT 1 # allow creating static and shared libs without conflicts 34 | OUTPUT_NAME "${LIBNAME}${PROJECT_DLLVERSION}" # avoid conflicts between library and binary target names 35 | ) 36 | 37 | target_link_libraries(${LIBNAME} PUBLIC opencv_core) 38 | IF(BUILD_SVM) 39 | add_definitions(USE_SVM_LABELER) 40 | target_link_libraries(${LIBNAME} PRIVATE opencv_imgproc opencv_calib3d opencv_features2d opencv_ml) 41 | else() 42 | target_link_libraries(${LIBNAME} PRIVATE opencv_imgproc opencv_calib3d opencv_features2d ) 43 | endif() 44 | 45 | INSTALL(TARGETS ${LIBNAME} 46 | RUNTIME DESTINATION bin COMPONENT main # Install the dll file in bin directory 47 | LIBRARY DESTINATION lib PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE COMPONENT main 48 | ARCHIVE DESTINATION lib COMPONENT main) # Install the dll.a file in lib directory 49 | 50 | IF(ARUCO_DEVINSTALL) 51 | install(FILES ${headers} DESTINATION include/aruco) 52 | install(FILES ${fractal_headers} DESTINATION include/aruco/fractallabelers) 53 | ENDIF() 54 | 55 | -------------------------------------------------------------------------------- /src/deps/aruco/cvdrawingutils.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef _ArUco_DrawUtils_H_ 30 | #define _ArUco_DrawUtils_H_ 31 | 32 | #include "aruco.h" 33 | #include "aruco_export.h" 34 | 35 | namespace aruco 36 | { 37 | /**\brief A set of functions to draw in opencv images 38 | */ 39 | class ARUCO_EXPORT CvDrawingUtils 40 | { 41 | public: 42 | 43 | static void draw3dAxis(cv::Mat& Image, const CameraParameters& CP, const cv::Mat& Rvec, const cv::Mat& Tvec, 44 | float axis_size); 45 | static void draw3dAxis(cv::Mat& Image, Marker& m, const CameraParameters& CP,int lineSize=1); 46 | 47 | static void draw3dCube(cv::Mat& Image, Marker& m, const CameraParameters& CP,int lineSize=1, bool setYperpendicular = false); 48 | 49 | // static void draw3dAxis(cv::Mat &Image, MarkerMap &m, const CameraParameters &CP); 50 | // static void draw3dCube(cv::Mat &Image, MarkerMap &m, const CameraParameters &CP, bool setYperpendicular = 51 | // false); 52 | }; 53 | } 54 | 55 | #endif 56 | -------------------------------------------------------------------------------- /src/deps/cpu_features/cpu_x86.h: -------------------------------------------------------------------------------- 1 | /* cpu_x86.h 2 | * 3 | * Author : Alexander J. Yee 4 | * Date Created : 04/12/2014 5 | * Last Modified : 04/12/2014 6 | * 7 | */ 8 | 9 | #pragma once 10 | #ifndef _cpu_x86_H 11 | #define _cpu_x86_H 12 | //////////////////////////////////////////////////////////////////////////////// 13 | //////////////////////////////////////////////////////////////////////////////// 14 | //////////////////////////////////////////////////////////////////////////////// 15 | //////////////////////////////////////////////////////////////////////////////// 16 | // Dependencies 17 | #include 18 | #include 19 | namespace cpu_feature_detector{ 20 | //////////////////////////////////////////////////////////////////////////////// 21 | //////////////////////////////////////////////////////////////////////////////// 22 | //////////////////////////////////////////////////////////////////////////////// 23 | //////////////////////////////////////////////////////////////////////////////// 24 | struct cpu_x86{ 25 | // Vendor 26 | bool Vendor_AMD; 27 | bool Vendor_Intel; 28 | 29 | // OS Features 30 | bool OS_x64; 31 | bool OS_AVX; 32 | bool OS_AVX512; 33 | 34 | // Misc. 35 | bool HW_MMX; 36 | bool HW_x64; 37 | bool HW_ABM; 38 | bool HW_RDRAND; 39 | bool HW_BMI1; 40 | bool HW_BMI2; 41 | bool HW_ADX; 42 | bool HW_PREFETCHWT1; 43 | bool HW_MPX; 44 | 45 | // SIMD: 128-bit 46 | bool HW_SSE; 47 | bool HW_SSE2; 48 | bool HW_SSE3; 49 | bool HW_SSSE3; 50 | bool HW_SSE41; 51 | bool HW_SSE42; 52 | bool HW_SSE4a; 53 | bool HW_AES; 54 | bool HW_SHA; 55 | 56 | // SIMD: 256-bit 57 | bool HW_AVX; 58 | bool HW_XOP; 59 | bool HW_FMA3; 60 | bool HW_FMA4; 61 | bool HW_AVX2; 62 | 63 | // SIMD: 512-bit 64 | bool HW_AVX512_F; 65 | bool HW_AVX512_PF; 66 | bool HW_AVX512_ER; 67 | bool HW_AVX512_CD; 68 | bool HW_AVX512_VL; 69 | bool HW_AVX512_BW; 70 | bool HW_AVX512_DQ; 71 | bool HW_AVX512_IFMA; 72 | bool HW_AVX512_VBMI; 73 | 74 | public: 75 | cpu_x86(); 76 | void detect_host(); 77 | 78 | void print() const; 79 | static void print_host(); 80 | 81 | static void cpuid(int32_t out[4], int32_t x); 82 | static std::string get_vendor_string(); 83 | 84 | private: 85 | static void print(const char* label, bool yes); 86 | 87 | static bool detect_OS_x64(); 88 | static bool detect_OS_AVX(); 89 | static bool detect_OS_AVX512(); 90 | }; 91 | //////////////////////////////////////////////////////////////////////////////// 92 | //////////////////////////////////////////////////////////////////////////////// 93 | //////////////////////////////////////////////////////////////////////////////// 94 | //////////////////////////////////////////////////////////////////////////////// 95 | } 96 | #endif 97 | -------------------------------------------------------------------------------- /src/test_calibration_solve.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | 4 | % s = screen coordinates of calibration points 5 | % e = pupil center coordinates in eye cam coordinates 6 | 7 | % simulate a misaligned (slightly rotated) eye camera relative to toe scene 8 | % camera 9 | a = pi*0/180; 10 | R = [cos(a), -sin(a); 11 | sin(a), cos(a)]; 12 | 13 | % 3 point calibration 14 | %s = [0, 100, 100; 15 | % 0, 0, 100]; 16 | %e = [10, 50, 50; 17 | % 10, 10, 50]; 18 | 19 | 20 | %{ 21 | % 4 point calibration 22 | s = [0, 100, 100 0; 23 | 0, 0, 100, 100]; 24 | e = [10, 50, 50, 10; 25 | 10, 10, 50, 50]; 26 | %} 27 | 28 | %%{ 29 | % 4 point real data 30 | s = [257 556 551 265; 31 | 154 151 345 334]; 32 | 33 | e = [545 485 489 551; 34 | 235 233 263 265]; 35 | %} 36 | 37 | % 5 point calibration 38 | %s = [0, 100, 100 0, 50; 39 | % 0, 0, 100, 100, 50]; 40 | %e = [10, 50, 50, 10, 30; 41 | % 10, 10, 50, 50, 30]; 42 | 43 | 44 | % 9 point calibration 45 | %s = [0, 100, 100 0, 50, 0, 50, 50, 100; 46 | % 0, 0, 100, 100, 50, 50, 0, 100, 50]; 47 | %e = [10, 50, 50, 10, 30, 10, 30, 30, 50; 48 | % 10, 10, 50, 50, 30, 30, 10, 50, 30]; 49 | 50 | %e = [5.1, 15.2, 15.1, 4.9; 51 | % 2.9, 3.1, 15.2, 14.8]; 52 | 53 | %simulate pupil tracking noise 54 | %e = e + 0.5*randn(size(e)); 55 | %e 56 | 57 | % rotate data to simulate misalignment 58 | %n_samples = size(e,2); 59 | %for i = 1:n_samples 60 | % e(:,i) = R*e(:,i); 61 | %end 62 | 63 | % s = W*e 64 | % what is W ?? 65 | % W = s*pinv(e); 66 | 67 | %tmp = pinv(e) 68 | tmp = inv(e'*e) * e'; % alternative to pinv but not precise enough 69 | W = s*tmp 70 | 71 | W * (R*[50 50]') 72 | 73 | % now add nonlinearities 74 | 75 | n_samples = size(e,2); 76 | tmp = poly_features(0,0); 77 | ee = zeros(size(tmp,1), n_samples); 78 | for i = 1 : n_samples 79 | ee(:,i) = poly_features(e(1,i),e(2,i)); 80 | end 81 | 82 | 83 | ee_inv = pinv(ee); 84 | %ee_inv = inv(ee'*ee) * ee'; % alternative to pinv 85 | ee_inv 86 | W = s*ee_inv 87 | 88 | error = zeros(1,4); 89 | for i = 1:4 90 | error(i) = norm(W*ee(:,i) - s(:,i)); 91 | end 92 | error 93 | 94 | figure(22); 95 | tmp = W*ee; 96 | plot(tmp(1,:),tmp(2,:), 'kx'); 97 | hold on; 98 | plot(s(1,:), s(2,:), 'ro'); 99 | 100 | %% 101 | 102 | figure(1); 103 | n_subdiv = 25; 104 | X = zeros(n_subdiv,n_subdiv); 105 | Y = zeros(n_subdiv,n_subdiv); 106 | %xlin = linspace(10,50, n_subdiv); 107 | %ylin = linspace(10,50, n_subdiv); 108 | 109 | xlin = linspace(min(e(1,:)), max(e(1,:)), n_subdiv); 110 | ylin = linspace(min(e(2,:)), max(e(2,:)), n_subdiv); 111 | 112 | 113 | for i = 1:n_subdiv 114 | for k = 1:n_subdiv 115 | tmp = R*[xlin(i) ylin(k)]'; 116 | tmp = W * poly_features(tmp(1), tmp(2)); 117 | %X(i,k) = tmp(1); 118 | %Y(i,k) = tmp(2); 119 | plot(tmp(1), tmp(2), 'kx'); 120 | hold on; 121 | end 122 | end 123 | 124 | 125 | function v = poly_features(x,y) 126 | v = [1, x, y, x*y]';%, x*x, y*y]';%, x*x*y*y]';%, x*x*x, y*y*y]'; 127 | end -------------------------------------------------------------------------------- /vstudio/Libretracker.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 16 4 | VisualStudioVersion = 16.0.28803.202 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Libretracker", "libretracker.vcxproj", "{A25D0D46-5329-4B31-A4BA-04D2765DEA9F}" 7 | EndProject 8 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lsl_client_example", "lsl_client_example.vcxproj", "{5D517B9F-A94B-453A-B967-938207231405}" 9 | EndProject 10 | Global 11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 12 | Debug|x64 = Debug|x64 13 | Debug|x86 = Debug|x86 14 | OpenCL_Release|x64 = OpenCL_Release|x64 15 | OpenCL_Release|x86 = OpenCL_Release|x86 16 | Release|x64 = Release|x64 17 | Release|x86 = Release|x86 18 | EndGlobalSection 19 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 20 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Debug|x64.ActiveCfg = Debug|x64 21 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Debug|x64.Build.0 = Debug|x64 22 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Debug|x86.ActiveCfg = Debug|Win32 23 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Debug|x86.Build.0 = Debug|Win32 24 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.OpenCL_Release|x64.ActiveCfg = OpenCL_Release|x64 25 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.OpenCL_Release|x64.Build.0 = OpenCL_Release|x64 26 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.OpenCL_Release|x86.ActiveCfg = OpenCL_Release|Win32 27 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.OpenCL_Release|x86.Build.0 = OpenCL_Release|Win32 28 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Release|x64.ActiveCfg = Release|x64 29 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Release|x64.Build.0 = Release|x64 30 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Release|x86.ActiveCfg = Release|Win32 31 | {A25D0D46-5329-4B31-A4BA-04D2765DEA9F}.Release|x86.Build.0 = Release|Win32 32 | {5D517B9F-A94B-453A-B967-938207231405}.Debug|x64.ActiveCfg = Debug|x64 33 | {5D517B9F-A94B-453A-B967-938207231405}.Debug|x64.Build.0 = Debug|x64 34 | {5D517B9F-A94B-453A-B967-938207231405}.Debug|x86.ActiveCfg = Debug|Win32 35 | {5D517B9F-A94B-453A-B967-938207231405}.Debug|x86.Build.0 = Debug|Win32 36 | {5D517B9F-A94B-453A-B967-938207231405}.OpenCL_Release|x64.ActiveCfg = Release|x64 37 | {5D517B9F-A94B-453A-B967-938207231405}.OpenCL_Release|x64.Build.0 = Release|x64 38 | {5D517B9F-A94B-453A-B967-938207231405}.OpenCL_Release|x86.ActiveCfg = Release|Win32 39 | {5D517B9F-A94B-453A-B967-938207231405}.OpenCL_Release|x86.Build.0 = Release|Win32 40 | {5D517B9F-A94B-453A-B967-938207231405}.Release|x64.ActiveCfg = Release|x64 41 | {5D517B9F-A94B-453A-B967-938207231405}.Release|x64.Build.0 = Release|x64 42 | {5D517B9F-A94B-453A-B967-938207231405}.Release|x86.ActiveCfg = Release|Win32 43 | {5D517B9F-A94B-453A-B967-938207231405}.Release|x86.Build.0 = Release|Win32 44 | EndGlobalSection 45 | GlobalSection(SolutionProperties) = preSolution 46 | HideSolutionNode = FALSE 47 | EndGlobalSection 48 | GlobalSection(ExtensibilityGlobals) = postSolution 49 | SolutionGuid = {12A49FD9-8F22-4884-B376-D659BC8A7C79} 50 | EndGlobalSection 51 | EndGlobal 52 | -------------------------------------------------------------------------------- /src/helpers.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #include "deps/s/cv_camera_control.h" 10 | 11 | 12 | constexpr auto nan() { return std::numeric_limits::quiet_NaN(); } 13 | constexpr auto NaNf() { return std::numeric_limits::quiet_NaN(); } 14 | 15 | 16 | // given input value x, set the output value to the closest value to x found in allowed_values 17 | template float to_closest(float x, const std::array& allowed_values) 18 | { 19 | float tmp = FLT_MAX; 20 | float y = 0; 21 | 22 | for (auto v : allowed_values) 23 | { 24 | if (abs(x - v) < tmp) 25 | { 26 | tmp = abs(x - v); 27 | y = v; 28 | } 29 | } 30 | 31 | return y; 32 | } 33 | 34 | // clip value x to range min..max 35 | template inline T clip(T x, const T& min, const T& max) 36 | { 37 | if (x < min)x = min; 38 | if (x > max)x = max; 39 | return x; 40 | } 41 | 42 | // produce a random number between [a,b] , a and b inclusive 43 | inline float rnd(float a, float b) { return a + (b - a)*(rand() / float(RAND_MAX)); } 44 | 45 | 46 | template std::string add_leading_zeros(std::string s) 47 | { 48 | auto to_add = clip(n - s.length(), 0, n); for (size_t a = 0; a < to_add; a++) { s = "0" + s; }; return s; 49 | } 50 | 51 | // create a string with date and time like this: 20080501123104 ( 01.05. 2008, 12:31.04 uhr) 52 | std::string date_time_str(); 53 | 54 | 55 | 56 | // toggle a boolean 57 | inline void toggle(bool& b) 58 | { 59 | if (b) { b = false; } 60 | else { b = true; } 61 | } 62 | 63 | 64 | 65 | // Finds the intersection of two lines, or returns false. 66 | // The lines are defined by (o1, p1) and (o2, p2). 67 | // from: https://answers.opencv.org/question/9511/how-to-find-the-intersection-point-of-two-lines/ 68 | inline bool line_intersection(cv::Point2f o1, cv::Point2f p1, cv::Point2f o2, cv::Point2f p2, cv::Point2f& r) 69 | { 70 | using namespace cv; 71 | Point2f x = o2 - o1; 72 | Point2f d1 = p1 - o1; 73 | Point2f d2 = p2 - o2; 74 | 75 | float cross = d1.x * d2.y - d1.y * d2.x; 76 | if (abs(cross) < /*EPS*/1e-8) 77 | return false; 78 | 79 | double t1 = (x.x * d2.y - x.y * d2.x) / cross; 80 | r = o1 + d1 * t1; 81 | return true; 82 | } 83 | 84 | // #################################### 85 | 86 | // draw a scaled copy of the one image (e.g. scene cam) to the a larger image (e.g. main canvas). 87 | // if x or y = -1 then the image is centered along the x or y axis 88 | // returns the calculated scaling and x,y offsets if -1 was specified 89 | std::tuple draw_preview(cv::Mat& img_preview, cv::Mat& img_target, float scaling = 1.0f, int x = -1, int y = -1); 90 | 91 | 92 | // generic opencv camera selection dialog 93 | // if provided an camera id != -1, it tries to open this id. 94 | // if that fails, the selection dialog is presented (win32: including a list of available cameras) 95 | std::shared_ptr select_camera(std::string message = "select video camera nr. (default=0):", int id = -1, cv::VideoCaptureAPIs backend = cv::CAP_ANY); 96 | 97 | 98 | // helper function. when pressing a button in the fltk gui, 99 | // calling this function grabs the focus of the specified opencv window 100 | void grab_focus(const char* wname); 101 | 102 | std::string to_fourcc_string(int code); 103 | -------------------------------------------------------------------------------- /src/calibration.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "aruco_canvas.h" 8 | #include "speller_canvas.h" 9 | //#include "deps/dependencies.h" 10 | 11 | 12 | class Calibration_base 13 | { 14 | protected: 15 | 16 | // best suited for 3-point calibration 17 | // auto polynomial_features(float x, float y) { Eigen::Matrix v; v << 1.0f, x, y; return v; }; 18 | 19 | // best suited for 4-point calibration 20 | auto polynomial_features(double x, double y) { Eigen::Matrix v; v << 1.0f, x, y, x* y, x* x, y* y, x* x* x, y* y* y, x* y* y, x* x* y; return v; }; 21 | double mapping_error = 0; 22 | int n_polynomial_features = 4; 23 | public: 24 | void calibrate(int n_polynomial_features=4); 25 | Eigen::MatrixXd validation_points, calibration_points, calibration_targets, W_calib; 26 | 27 | cv::Point2f mapping_2d_to_2d(cv::Point2f p) 28 | { 29 | Eigen::Vector2d tmp = W_calib * polynomial_features(p.x, p.y).block(0, 0, n_polynomial_features, 1); 30 | return cv::Point2f(tmp(0), tmp(1)); 31 | } 32 | 33 | void setup(int n_calibration_points); 34 | 35 | }; 36 | 37 | class Calibration : public Calibration_base 38 | { 39 | protected: 40 | 41 | const int marker_size = 120; 42 | 43 | void update_calibration(cv::Mat& frame_scene_cam, cv::Point2f pupil_pos, int key_pressed); 44 | 45 | void draw_prep(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 46 | void draw_calibration(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 47 | void draw_validation(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 48 | void draw_visualization(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 49 | void draw_observe(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 50 | int n_calib_points = 4; 51 | cv::Point2f p_calibrated, p_projected; 52 | 53 | std::vector targets; 54 | 55 | // special aruco dictionary and marker for calibration purposes, only 56 | aruco::MarkerDetector marker_detector_calib; 57 | std::vector markers_calib; 58 | cv::Point2f marker_calib_center; 59 | cv::Mat img_marker_calib, img_marker_calib_scaled; 60 | float marker_calib_anim_fx = 0; // just a helper variable for a neat marker animation effect 61 | 62 | int calibration_counter = 0; 63 | int validation_counter = 0; 64 | int tracking_lost_counter = 0; 65 | 66 | // the offset that was measured during validation 67 | cv::Point2f offset_validation{ 0.0f, 0.0f }; 68 | 69 | int n_polynomial_features = 4; 70 | public: 71 | Calibration(); 72 | 73 | Aruco_canvas ar_canvas; 74 | 75 | enum enum_calib_state 76 | { 77 | STATE_PREPARE, 78 | STATE_CALIBRATION, 79 | STATE_VISUALIZE_CALIBRATION, 80 | STATE_VISUALIZE_VALIDATION, 81 | STATE_VALIDATION 82 | } state = STATE_PREPARE; 83 | 84 | 85 | // TODO 86 | // the offset that will be used. this gives the user the option to set offset = offset_validation or leave it as calibrated 87 | cv::Point2f offset{ 0.0f, 0.0f }; 88 | 89 | void fix_offset() 90 | { 91 | offset = offset_validation; 92 | } 93 | 94 | 95 | void draw(cv::Mat& frame_scene_cam, cv::Mat& img_screen); 96 | 97 | void setup(int n_calibration_points=5); 98 | void setup_validation(int n_validation_points=5); 99 | 100 | void update(cv::Mat& frame_scene_cam, cv::Point2f pupil_pos, int key_pressed); 101 | 102 | void set_number_of_polynomial_features(int n) { n_polynomial_features = n; } 103 | }; -------------------------------------------------------------------------------- /src/pupil_tracking.cpp: -------------------------------------------------------------------------------- 1 | #include "helpers.h" 2 | #include "pupil_tracking.h" 3 | #include "deps/s/cv_camera_control.h" 4 | 5 | using namespace std; 6 | 7 | 8 | void Pupil_tracking::setup(enum_simd_variant simd_width, enum_pupil_tracking_variant pupil_tracking_variant) 9 | { 10 | 11 | // initialize with a random image 12 | using namespace cv; 13 | frame_eye_cam = Mat(480, 640, CV_8UC3); 14 | randu(frame_eye_cam, Scalar::all(0), Scalar::all(255)); 15 | 16 | switch (pupil_tracking_variant) 17 | { 18 | case PUPIL_TRACKING_TIMM: pupil_tracker = make_shared(); break; 19 | case PUPIL_TRACKING_PURE: pupil_tracker = make_shared(); break; 20 | case PUPIL_TRACKING_PUREST: pupil_tracker = make_shared(); break; 21 | default: pupil_tracker = make_shared(); break; 22 | } 23 | 24 | pupil_tracker->setup(simd_width); 25 | is_running = true; // set to false to exit while loop 26 | } 27 | 28 | 29 | void Pupil_tracking::run(enum_simd_variant simd_width, shared_ptr eye_camera)//int eye_cam_id, cv::VideoCaptureAPIs eye_cam_backend) 30 | { 31 | Camera_control eye_cam_controls; 32 | auto eye_cam = eye_camera;//select_camera("select the eye camera id:", eye_cam_id, eye_cam_backend); 33 | 34 | // test xtal video 35 | //auto eye_cam = make_shared("d:/temp/xtal_eye_videos/2019-09-06_142643_470_b.avi"); 36 | //auto eye_cam = make_shared("c:/Users/Frosch/Downloads/eye_200fps_200x200__h642.mp4"); 37 | 38 | 39 | 40 | auto sg = Simple_gui(20, 60, 400, 300); 41 | 42 | sg.num_columns(1); 43 | sg.add_separator_box("adjust camera:"); 44 | sg.add_button("eye-camera", [&]() { eye_cam_controls.setup(eye_cam, 20, 20, 400, 400, "Eye-Camera Controls"); }, "adjust the eye-camera settings."); 45 | 46 | sg.add_separator_box("switch Pupil-Tracking algorithm:"); 47 | sg.num_columns(1); 48 | sg.add_radio_button("Timm's algorithm", [&, s = simd_width]() { setup(s, PUPIL_TRACKING_TIMM); }, "Timms Algorithm is a gradient based algorithm. License: GPL3."); 49 | sg.add_radio_button("PuRe (for research only!)", [&, s = simd_width]() {setup(s, PUPIL_TRACKING_PURE); }, "PuRe is a high accuracy- and performance algorithm from the university of t�bingen. License: research only! You are not allowed to use this algorithm and its code for commercial applications."); 50 | auto button = sg.add_radio_button("PuReST (for research only!)", [&, s = simd_width]() {setup(s, PUPIL_TRACKING_PUREST); }, "PuReST is a high accuracy - and performance algorithm from the university of t�bingen.License: research only!You are not allowed to use this algorithm and its code for commercial applications."); 51 | button->value(true); 52 | sg.add_button("adjust settings", [&]() { pupil_tracker->show_gui(); }); 53 | sg.finish(); 54 | sg.show(); 55 | 56 | setup(simd_width, PUPIL_TRACKING_PUREST); 57 | 58 | // generate a random image 59 | using namespace cv; 60 | auto img_rand = Mat(480, 640, CV_8UC3); 61 | randu(img_rand, Scalar::all(0), Scalar::all(255)); 62 | 63 | cv::Mat frame, frame_tmp; 64 | cv::Mat frame_gray; 65 | 66 | cout << "\nCAP_PROP_FOURCC: " << to_fourcc_string(eye_cam->get(CAP_PROP_FOURCC)); 67 | cout << "\nCAP_PROP_CONVERT_RGB: " << eye_cam->get(CAP_PROP_CONVERT_RGB); 68 | 69 | Timer timer(100); 70 | while (is_running) 71 | { 72 | // read a frame from the camera 73 | eye_cam->read(frame); 74 | 75 | 76 | if (!frame.empty()) 77 | { 78 | /* // code for XTAL VR Headset 79 | cv::rotate(frame, frame_tmp, ROTATE_180); 80 | frame = frame_tmp; 81 | cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY); 82 | cv::imshow("eye_gray", frame_gray); 83 | // */ 84 | 85 | pupil_tracker->update(frame); 86 | pupil_tracker->draw(frame); 87 | 88 | cv::imshow("eye_cam", frame); 89 | } 90 | cv::waitKey(1); 91 | sg.update(); 92 | } 93 | } 94 | 95 | -------------------------------------------------------------------------------- /src/deps/DeviceEnumerator.cpp: -------------------------------------------------------------------------------- 1 | #include "DeviceEnumerator.h" 2 | 3 | std::map DeviceEnumerator::getVideoDevicesMap() { 4 | return getDevicesMap(CLSID_VideoInputDeviceCategory); 5 | } 6 | 7 | std::map DeviceEnumerator::getAudioDevicesMap() { 8 | return getDevicesMap(CLSID_AudioInputDeviceCategory); 9 | } 10 | 11 | // Returns a map of id and devices that can be used 12 | std::map DeviceEnumerator::getDevicesMap(const GUID deviceClass) 13 | { 14 | std::map deviceMap; 15 | 16 | HRESULT hr = CoInitialize(nullptr); 17 | if (FAILED(hr)) { 18 | return deviceMap; // Empty deviceMap as an error 19 | } 20 | 21 | // Create the System Device Enumerator 22 | ICreateDevEnum *pDevEnum; 23 | hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pDevEnum)); 24 | 25 | // If succeeded, create an enumerator for the category 26 | IEnumMoniker *pEnum = NULL; 27 | if (SUCCEEDED(hr)) { 28 | hr = pDevEnum->CreateClassEnumerator(deviceClass, &pEnum, 0); 29 | if (hr == S_FALSE) { 30 | hr = VFW_E_NOT_FOUND; 31 | } 32 | pDevEnum->Release(); 33 | } 34 | 35 | // Now we check if the enumerator creation succeeded 36 | int deviceId = -1; 37 | if (SUCCEEDED(hr)) { 38 | // Fill the map with id and friendly device name 39 | IMoniker *pMoniker = NULL; 40 | while (pEnum->Next(1, &pMoniker, NULL) == S_OK) { 41 | 42 | IPropertyBag *pPropBag; 43 | HRESULT hr = pMoniker->BindToStorage(0, 0, IID_PPV_ARGS(&pPropBag)); 44 | if (FAILED(hr)) { 45 | pMoniker->Release(); 46 | continue; 47 | } 48 | 49 | // Create variant to hold data 50 | VARIANT var; 51 | VariantInit(&var); 52 | 53 | std::string deviceName; 54 | std::string devicePath; 55 | 56 | // Read FriendlyName or Description 57 | hr = pPropBag->Read(L"Description", &var, 0); // Read description 58 | if (FAILED(hr)) { 59 | // If description fails, try with the friendly name 60 | hr = pPropBag->Read(L"FriendlyName", &var, 0); 61 | } 62 | // If still fails, continue with next device 63 | if (FAILED(hr)) { 64 | VariantClear(&var); 65 | continue; 66 | } 67 | // Convert to string 68 | else { 69 | deviceName = ConvertBSTRToMBS(var.bstrVal); 70 | } 71 | 72 | VariantClear(&var); // We clean the variable in order to read the next value 73 | 74 | // We try to read the DevicePath 75 | hr = pPropBag->Read(L"DevicePath", &var, 0); 76 | if (FAILED(hr)) { 77 | VariantClear(&var); 78 | continue; // If it fails we continue with next device 79 | } 80 | else { 81 | devicePath = ConvertBSTRToMBS(var.bstrVal); 82 | } 83 | 84 | // We populate the map 85 | deviceId++; 86 | Device currentDevice; 87 | currentDevice.id = deviceId; 88 | currentDevice.deviceName = deviceName; 89 | currentDevice.devicePath = devicePath; 90 | deviceMap[deviceId] = currentDevice; 91 | 92 | } 93 | pEnum->Release(); 94 | } 95 | CoUninitialize(); 96 | return deviceMap; 97 | } 98 | 99 | /* 100 | This two methods were taken from 101 | https://stackoverflow.com/questions/6284524/bstr-to-stdstring-stdwstring-and-vice-versa 102 | */ 103 | 104 | std::string DeviceEnumerator::ConvertBSTRToMBS(BSTR bstr) 105 | { 106 | int wslen = ::SysStringLen(bstr); 107 | return ConvertWCSToMBS((wchar_t*)bstr, wslen); 108 | } 109 | 110 | std::string DeviceEnumerator::ConvertWCSToMBS(const wchar_t* pstr, long wslen) 111 | { 112 | int len = ::WideCharToMultiByte(CP_ACP, 0, pstr, wslen, NULL, 0, NULL, NULL); 113 | 114 | std::string dblstr(len, '\0'); 115 | len = ::WideCharToMultiByte(CP_ACP, 0 /* no flags */, 116 | pstr, wslen /* not necessary NULL-terminated */, 117 | &dblstr[0], len, 118 | NULL, NULL /* no default char */); 119 | 120 | return dblstr; 121 | } 122 | -------------------------------------------------------------------------------- /src/deps/aruco/fractaldetector.h: -------------------------------------------------------------------------------- 1 | #ifndef _ARUCO_FractalDetector_H 2 | #define _ARUCO_FractalDetector_H 3 | 4 | #include "markerdetector.h" 5 | #include "fractallabelers/fractallabeler.h" 6 | #include "aruco_export.h" 7 | namespace aruco { 8 | class ARUCO_EXPORT FractalDetector 9 | { 10 | struct ARUCO_EXPORT Params 11 | { 12 | std::string configuration_type; 13 | }; 14 | 15 | public: 16 | FractalDetector(); 17 | 18 | /** 19 | * @brief setConfiguration 20 | * @param configuration fractal id 21 | */ 22 | void setConfiguration(int configuration); 23 | 24 | /** 25 | * @brief setConfiguration 26 | * @param configuration fractal file 27 | */ 28 | void setConfiguration(std::string configuration); 29 | 30 | /** 31 | * @brief setParams 32 | * @param cam_params camera parameters 33 | * @param markerSize in meters 34 | */ 35 | void setParams(const CameraParameters& cam_params, float markerSize) 36 | { 37 | _cam_params = cam_params; 38 | _msize = markerSize; 39 | 40 | Tracker.setParams(cam_params, getConfiguration()); 41 | } 42 | 43 | // return fractalmarkerset 44 | FractalMarkerSet getConfiguration() 45 | { 46 | return _fractalLabeler->_fractalMarkerSet; 47 | } 48 | 49 | // return true if any marker is detected, false otherwise 50 | bool detect(const cv::Mat& input) 51 | { 52 | Markers = _markerDetector->detect(input); 53 | 54 | if(Markers.size() > 0) return true; 55 | else return false; 56 | } 57 | 58 | // return true if the pose is estimated, false otherwise 59 | bool poseEstimation() 60 | { 61 | if (_cam_params.isValid()) 62 | { 63 | return Tracker.fractalInnerPose(_markerDetector, Markers); 64 | } 65 | else 66 | return false; 67 | } 68 | 69 | // return the rotation vector. Returns an empty matrix if last call to estimatePose returned false 70 | cv::Mat getRvec(){ 71 | return Tracker.getRvec(); 72 | } 73 | // return the translation vector. Returns an empty matrix if last call to estimatePose returned false 74 | cv::Mat getTvec(){ 75 | return Tracker.getTvec(); 76 | } 77 | 78 | void drawImage(cv::Mat &img,cv::Mat &img2); 79 | 80 | // draw borders of markers 81 | void drawMarkers(cv::Mat &img); 82 | 83 | // draw inner corners of markers 84 | void draw2d(cv::Mat &img); 85 | 86 | // draw pose estimation axes 87 | void draw3d(cv::Mat &img, bool cube=true, bool axis=true); 88 | 89 | // draw marker as cube 90 | void draw3dCube(cv::Mat& Image, FractalMarker m, const CameraParameters& CP, int lineSize); 91 | 92 | // return detected markers 93 | std::vector getMarkers() 94 | { 95 | return Markers; 96 | } 97 | 98 | private: 99 | // return image pyramid 100 | std::vector getImagePyramid() 101 | { 102 | return _markerDetector->getImagePyramid(); 103 | } 104 | 105 | std::vector Markers; //detected markers 106 | FractalPoseTracker Tracker; 107 | cv::Mat _rvec,_tvec;//CV32F 108 | Params _params; 109 | float _msize; //marker size 110 | CameraParameters _cam_params; //Camera parameters 111 | cv::Ptr _fractalLabeler; 112 | cv::Ptr _markerDetector; 113 | }; 114 | } 115 | #endif 116 | -------------------------------------------------------------------------------- /src/deps/aruco/debug.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef __Debug_H 30 | #define __Debug_H 31 | #include 32 | #include 33 | #include 34 | #include "aruco_export.h" 35 | #include 36 | #include 37 | namespace aruco{ 38 | 39 | class ARUCO_EXPORT Debug{ 40 | private: 41 | static int level;//0(no debug), 1 medium, 2 high 42 | static bool isInited; 43 | 44 | static std::map strings; 45 | public: 46 | static void init(); 47 | static void setLevel(int l); 48 | static int getLevel(); 49 | 50 | 51 | 52 | static void addString(std::string &label,std::string &data); 53 | static std::string getString(std::string &str); 54 | 55 | 56 | static std::string getFileName(std::string filepath){ 57 | //go backwards until finding a separator or start 58 | size_t i; 59 | for( i=filepath.size()-1;i!=0;i--){ 60 | if ( filepath[i]=='\\' || filepath[i]=='/') break; 61 | } 62 | std::string fn;fn.reserve( filepath.size()-i); 63 | for(size_t s=i;s=level){x}} 77 | #define _debug_exec_( x) x 78 | #ifndef WIN32 79 | #define _debug_msg(x,level) {Debug::init();\ 80 | if (Debug::getLevel()>=level)\ 81 | std::cout<<"#" <=5)\ 85 | std::cout<<"#" <=level)\ 91 | std::cout<<__func__<<":"<< Debug::getFileName(__FILE__)<<":"<<__LINE__ <<": "<=5)\ 96 | std::cout<<__func__<<":"<< Debug::getFileName(__FILE__)<<":"<<__LINE__ <<": "< 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "aruco_canvas.h" 9 | #include "speller_canvas.h" 10 | #include "pupil_tracking.h" 11 | #include "deps/dependencies.h" 12 | #include "deps/s/opencv_threaded_capture.h" 13 | #include "deps/s/cv_save_video.h" 14 | #include "calibration.h" 15 | 16 | 17 | 18 | class Eyetracking : public Pupil_tracking 19 | { 20 | protected: 21 | 22 | // these canvas window sizes should work out-of-the-box for windows 10 and a full-HD display 23 | unsigned int w = 1280; 24 | unsigned int h = 1000; 25 | unsigned int w_old = 0, h_old = 0; 26 | double gui_param_w = w, gui_param_h = h; 27 | double gui_param_marker_size = 150; 28 | 29 | cv::Mat img_screen, img_screen_background; 30 | cv::Mat frame_scene_cam, frame_scene_cam_scaled; 31 | 32 | 33 | 34 | Threaded_capture thread_eyecam; 35 | Threaded_capture thread_scenecam; 36 | 37 | 38 | //options_type opt; 39 | 40 | 41 | Speller_canvas speller; 42 | 43 | //shared_ptr eye_camera, scene_camera; 44 | std::shared_ptr scene_camera; 45 | 46 | 47 | 48 | // "mouse" values 49 | int mx = 0, my = 0; 50 | bool eye_button_up = false; 51 | bool eye_button_down = false; 52 | 53 | int calibration_counter = 0; 54 | int validation_counter = 0; 55 | int tracking_lost_counter = 150; 56 | 57 | 58 | Timer timer{100,"\npupil tracking:"}; 59 | 60 | int key_pressed = -1; 61 | 62 | cv::Point2f pupil_pos;// , pupil_pos_coarse; 63 | cv::Point2f p_calibrated; // gaze point in scene cam coordinates (after calibration) 64 | cv::Point2f p_projected; // calibrated gaze point after inverse projection from 65 | 66 | 67 | enum enum_state 68 | { 69 | STATE_INSTRUCTIONS, 70 | STATE_CALIBRATION_SCENE_CAM, 71 | STATE_CALIBRATION_EYE_CAM, 72 | STATE_CALIBRATION, 73 | STATE_OBSERVE, 74 | STATE_RUN_SPELLER 75 | } state = STATE_INSTRUCTIONS; 76 | 77 | 78 | // GUI 79 | Simple_gui sg; 80 | Simple_gui sg_stream_and_record; 81 | Camera_control eye_cam_controls; 82 | Camera_control scene_cam_controls; 83 | 84 | //////////////////////////// 85 | // for calibration 86 | Calibration calibration; 87 | 88 | // for jitter filter 89 | double filter_smoothing = 0.25; 90 | double filter_predictive = 0.075; 91 | Filter_double_exponential gaze_filter_x; 92 | Filter_double_exponential gaze_filter_y; 93 | 94 | //////////////////////////// 95 | // for scene camera video recording (maybe later: eye video recording) 96 | Save_video scene_cam_video_saver; 97 | Save_video eye_cam_video_saver; 98 | bool stream_via_LSL = true; 99 | bool save_scene_cam_video = true; 100 | bool save_eye_cam_video = false; 101 | bool save_gaze_data = true; 102 | bool show_scene_cam_during_recording = false; 103 | bool show_eye_cam_during_recording = false; 104 | double video_writer_buffer_size = 25; 105 | //std::fstream fstream_gaze_data; 106 | //std::chrono::time_point time_start_recording; 107 | 108 | // special function for hybrid eyetracking+ssvep speller 109 | void run_multithreaded(); 110 | 111 | public: 112 | 113 | ~Eyetracking() 114 | { 115 | cv::destroyAllWindows(); 116 | } 117 | 118 | void run(enum_simd_variant simd_width, std::shared_ptr eye_cam, std::shared_ptr scene_cam); 119 | //void run(enum_simd_variant simd_width, int eye_cam_id = -1, int scenen_cam_id=-1, cv::VideoCaptureAPIs eye_cam_backend = cv::CAP_ANY, cv::VideoCaptureAPIs scene_cam_backend = cv::CAP_ANY); 120 | void setup(enum_simd_variant simd_width); 121 | void update(); 122 | 123 | void draw_instructions(); 124 | void draw_validation(); 125 | 126 | void draw_calibration_prep(); 127 | void draw_calibration_vis() 128 | { 129 | // TODO ! 130 | } 131 | 132 | 133 | void draw_speller(bool ssvep=false); 134 | void draw_observe(); 135 | void draw(); 136 | 137 | void set_mouse(int x, int y, bool eye_button_up_, bool eye_button_down_) { mx = x; my = y; eye_button_up = eye_button_up_; eye_button_down = eye_button_down_; } 138 | }; 139 | -------------------------------------------------------------------------------- /src/speller_canvas.cpp: -------------------------------------------------------------------------------- 1 | #include "speller_canvas.h" 2 | 3 | // clip value x to range min..max 4 | template inline T clip(T x, const T& min, const T& max) 5 | { 6 | if (x < min)x = min; 7 | if (x > max)x = max; 8 | return x; 9 | } 10 | 11 | void Speller_canvas::draw_keyboard(cv::Mat& img, int x, int y, int w, int h, int marker_size, int mx, int my, bool& button_released) 12 | { 13 | using namespace cv; 14 | const int s = 20; // spacing 15 | const int button_w = (w - s) / float(buttons.cols()) - s; // button width 16 | const int button_h = (h - s) / float(buttons.rows()) - s; // button height 17 | 18 | for (int i = 0; i < buttons.rows(); i++) 19 | { 20 | for (int k = 0; k < buttons.cols(); k++) 21 | { 22 | int button_x = x + s + k * (button_w + s); 23 | int button_y = y + s + i * (button_h + s); 24 | 25 | 26 | //if (buttons(i, k).draw(img_screen, mb + 10 + k * 110, mb + 10 + i * 110, 100, 100, mx_p, my_p, eye_button_up, std::string(1, labels(i, k)))) 27 | bool pressed = buttons(i, k).draw(img, button_x, button_y, button_w, button_h, mx, my, button_released, std::string(1, labels(i, k))); 28 | if (pressed) 29 | { 30 | //if (labels(i, k) == '_') { speller_str += " "; } 31 | if (labels(i, k) == '<') { if (speller_str.size() > 0) { speller_str.pop_back(); } } 32 | else { speller_str += labels(i, k); } 33 | 34 | } 35 | } 36 | } 37 | 38 | cv::putText(img, speller_str.c_str(), Point(marker_size + 30, marker_size - 30), FONT_HERSHEY_SIMPLEX, 2, Scalar(255, 0, 255), 4); 39 | 40 | // put text close to gaze point ! 41 | cv::putText(img, speller_str.c_str(), Point(mx - 20 * speller_str.size(), my + 20), FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 100, 100), 3); 42 | } 43 | 44 | 45 | void Speller_canvas::draw_keyboard_ssvep(cv::Mat& img, int x, int y, int w, int h, int marker_size, int mx, int my, bool& button_released) 46 | { 47 | 48 | using namespace cv; 49 | const int s = 20; // spacing 50 | const int button_w = (w - s) / float(buttons.cols()) - s; // button width 51 | const int button_h = (h - s) / float(buttons.rows()) - s; // button height 52 | 53 | 54 | auto flicker_color_01 = flicker_code_01[flicker_counter % flicker_code_01.size()] ? flicker_col_0 : flicker_col_1; 55 | auto flicker_color_02 = flicker_code_02[flicker_counter % flicker_code_02.size()] ? flicker_col_0 : flicker_col_1; 56 | auto flicker_color_03 = flicker_code_03[flicker_counter % flicker_code_03.size()] ? flicker_col_0 : flicker_col_1; 57 | auto flicker_color_04 = flicker_code_04[flicker_counter % flicker_code_04.size()] ? flicker_col_0 : flicker_col_1; 58 | 59 | flicker_counter++; 60 | 61 | 62 | // flicker block coordinates 63 | static int fx = -10, fy = -10; 64 | 65 | for (int i = 0; i < buttons.rows(); i++) 66 | { 67 | for (int k = 0; k < buttons.cols(); k++) 68 | { 69 | 70 | int button_x = x + s + k * (button_w + s); 71 | int button_y = y + s + i * (button_h + s); 72 | 73 | if (is_inside(button_x, button_y, button_w, button_h, mx, my)) 74 | { 75 | // start coordinates of 2x2 flicker block 76 | fx = clip(2 * floor(0.5f * k), 0, buttons.cols() - 2); 77 | fy = clip(2 * floor(0.5f * i), 0, buttons.rows() - 2); 78 | } 79 | 80 | Scalar button_col1(0,100,0); 81 | if (k == fx + 0 && i == fy + 0) { button_col1 = flicker_color_01; } 82 | if (k == fx + 1 && i == fy + 0) { button_col1 = flicker_color_02; } 83 | if (k == fx + 1 && i == fy + 1) { button_col1 = flicker_color_03; } 84 | if (k == fx + 0 && i == fy + 1) { button_col1 = flicker_color_04; } 85 | 86 | // hoover col 87 | Scalar button_col2 = Scalar_(button_col1 * 1.5f); 88 | 89 | bool pressed = buttons(i, k).draw(img, button_x, button_y, button_w, button_h, mx, my, button_released, std::string(1, labels(i, k)), button_col1, button_col2); 90 | if (pressed) 91 | { 92 | //if (labels(i, k) == '_') { speller_str += " "; } 93 | if (labels(i, k) == '<') { if (speller_str.size() > 0) { speller_str.pop_back(); } } 94 | else { speller_str += labels(i, k); } 95 | 96 | } 97 | } 98 | } 99 | cv::putText(img, speller_str.c_str(), Point(marker_size + 30, marker_size - 30), FONT_HERSHEY_SIMPLEX, 2, Scalar(255, 0, 255), 4); 100 | 101 | // put text close to gaze point ! 102 | cv::putText(img, speller_str.c_str(), Point(mx - 20 * speller_str.size(), my + 20), FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 100, 100), 3); 103 | } 104 | 105 | -------------------------------------------------------------------------------- /src/deps/aruco/markerlabeler.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef _aruco_MarkerLabeler_ 30 | #define _aruco_MarkerLabeler_ 31 | 32 | #include "aruco_export.h" 33 | #include "dictionary.h" 34 | #include 35 | 36 | namespace aruco 37 | { 38 | /**\brief Base class of labelers. A labelers receive a square of the image and determines if it has a valid marker, 39 | * its id and rotation 40 | * Additionally, it implements the factory model 41 | */ 42 | 43 | class Marker; 44 | class ARUCO_EXPORT MarkerLabeler 45 | { 46 | public: 47 | /** Factory function that returns a labeler for a given dictionary 48 | * @param dict_type type of dictionary 49 | * @param error_correction_rate some dictionaries are subsceptible of error correction. This params specify the 50 | * correction rate. 51 | * 0 means no correction at all. 1 means full correction (maximum correction bits = (tau-1) /2, tau= predefined 52 | * mimum intermarker distance). 53 | * 54 | * If you want correction capabilities and not sure how much, use 0.5 in this parameter 55 | */ 56 | static cv::Ptr create(Dictionary::DICT_TYPES dict_type, 57 | float error_correction_rate = 0); 58 | 59 | /** Factory function that returns the desired detector 60 | 61 | * 62 | * @brief create Factory function that returns the desired detector 63 | * @param detector 64 | * * Possible names implemented are: 65 | * ARUCO,CHILITAGS....: original aruco markers (0-1024) 66 | http://www.sciencedirect.com/science/article/pii/S0031320314000235 67 | * SVM: 68 | * @return 69 | */ 70 | static cv::Ptr create(std::string detector, std::string params = ""); 71 | 72 | /** function that identifies a marker. 73 | * @param in input image to analyze 74 | * @param marker_id id of the marker (if valid) 75 | * @param nRotations : output parameter nRotations must indicate how many times the marker must be rotated 76 | * clockwise 90 deg to be in its ideal position. (The way you would see it when you print it). This is employed 77 | * to know 78 | * always which is the corner that acts as reference system. 79 | * @return true marker valid, false otherwise 80 | */ 81 | virtual bool detect(const cv::Mat& in, int& marker_id, int& nRotations,std::string &additionalInfo) = 0; 82 | /** 83 | * @brief getBestInputSize if desired, you can set the desired input size to the detect function 84 | * @return -1 if detect accept any type of input, or a size otherwise 85 | */ 86 | virtual int getBestInputSize() 87 | { 88 | return -1; 89 | } 90 | 91 | /** 92 | * @brief getNSubdivisions returns the number of subdivisions in each axis that the iamge will be subject to. 93 | * This is in dictionary based labelers, equals to the number of bits in each dimension plus the border bits. 94 | * @return 95 | */ 96 | virtual int getNSubdivisions()const{ 97 | return -1; 98 | } 99 | 100 | // returns an string that describes the labeler and can be used to create it 101 | virtual std::string getName() const = 0; 102 | virtual ~MarkerLabeler() 103 | { 104 | } 105 | }; 106 | }; 107 | #endif 108 | -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractalmarkerset.h: -------------------------------------------------------------------------------- 1 | #include "fractalmarker.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include "aruco_export.h" 7 | 8 | namespace aruco { 9 | class ARUCO_EXPORT FractalMarkerSet{ 10 | public: 11 | enum CONF_TYPES: 12 | uint64_t{ 13 | FRACTAL_2L_6 = 0, 14 | FRACTAL_3L_6 = 1, 15 | FRACTAL_4L_6 = 2, 16 | FRACTAL_5L_6 = 3, 17 | CUSTOM=4 // for used defined dictionaries (using load). 18 | }; 19 | 20 | /** create set of markers 21 | * @brief create 22 | * @param regionsConfig {N(f1),K(f1)}{N(f2):K(f2)}...{N(fn):K(fn)} 23 | * @param pixSize 24 | */ 25 | void create(std::vector> regionsConfig, float pixSize); 26 | 27 | /** configure bits of inner marker 28 | * @brief configureMat 29 | * @param nVal N region 30 | * @param kVal K region 31 | * @param maxIter Number of iteration 32 | * @return Mat configurated marker 33 | */ 34 | cv::Mat configureMat(int nVal, int kVal, int maxIter=10000); 35 | // computes the distance of a marker to itself 36 | int dstMarker(const cv::Mat m); 37 | 38 | // computes distance between marker to marker 39 | int dstMarkerToMarker(const cv::Mat m1, const cv::Mat m2); 40 | 41 | // computes distance between marker to set of markers 42 | int dstMarkerToFractalDict(cv::Mat m); 43 | 44 | // saves to a binary stream 45 | static void _toStream(FractalMarkerSet &configuration, std::ostream &str); 46 | 47 | // load from a binary stream 48 | static void _fromStream(FractalMarkerSet &configuration, std::istream &str); 49 | 50 | static bool isPredefinedConfigurationString(std::string str); 51 | 52 | static std::string getTypeString(FractalMarkerSet::CONF_TYPES t); 53 | 54 | static CONF_TYPES getTypeFromString(std::string str); 55 | 56 | static FractalMarkerSet load(std::string info); 57 | 58 | static FractalMarkerSet loadPredefined(std::string info); 59 | 60 | static FractalMarkerSet loadPredefined(CONF_TYPES info); 61 | 62 | static FractalMarkerSet readFromFile(std::string path); 63 | 64 | // saves configuration to a text file 65 | void saveToFile(cv::FileStorage& fs); 66 | 67 | //Fractal configuration. id_marker 68 | std::map fractalMarkerCollection; 69 | //Nbits_idmarkers 70 | std::map> nbits_fractalMarkerIDs ; 71 | 72 | enum Fractal3DInfoType 73 | { 74 | NONE = -1, 75 | PIX = 0, 76 | METERS = 1, 77 | NORM = 2 78 | }; // indicates if the data in Fractal is expressed in meters or in pixels 79 | 80 | /**Indicates if the corners are expressed in meters 81 | */ 82 | bool isExpressedInMeters() const 83 | { 84 | return mInfoType == METERS; 85 | } 86 | /**Indicates if the corners are expressed in meters 87 | */ 88 | bool isExpressedInPixels() const 89 | { 90 | return mInfoType == PIX; 91 | } 92 | /**Indicates if the corners are normalized. -1..1 external marker 93 | */ 94 | bool isNormalize() const 95 | { 96 | return mInfoType == NORM; 97 | } 98 | 99 | //Normalize fractal marker. The corners will go on to take the values (-1,1)(1,1),(1,-1)(-1,-1) 100 | FractalMarkerSet normalize(); 101 | 102 | //Convert marker to meters 103 | FractalMarkerSet convertToMeters(float fractalSize_meters); 104 | 105 | static std::vector getConfigurations(); 106 | 107 | //Get fractal size (external marker) 108 | float getFractalSize() const 109 | { 110 | FractalMarker externalMarker = fractalMarkerCollection.at(_idExternal); 111 | return externalMarker.getMarkerSize(); 112 | } 113 | 114 | //Get number of bits (external marker) 115 | int nBits() const 116 | { 117 | FractalMarker externalMarker = fractalMarkerCollection.at(_idExternal); 118 | return externalMarker.nBits(); 119 | } 120 | 121 | // Check if m is a inner marker, and get its id. 122 | bool isFractalMarker(cv::Mat &m, int nbits, int&id); 123 | 124 | // Get all inners corners 125 | std::map> getInnerCorners(); 126 | 127 | cv::Mat getFractalMarkerImage(int pixSize, bool border=false); 128 | 129 | // variable indicates if the data is expressed in meters or in pixels or are normalized 130 | int mInfoType;/* -1:NONE, 0:PIX, 1:METERS, 2:NORMALIZE*/ 131 | 132 | private: 133 | // Number of levels 134 | int _nmarkers; 135 | //ID external marker 136 | int _idExternal=0; 137 | //Configuration dictionary 138 | std::string config; 139 | }; 140 | } 141 | -------------------------------------------------------------------------------- /src/aruco_canvas.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | 7 | #include "deps/dependencies.h" 8 | 9 | #include "deps/aruco/aruco.h" 10 | #include "deps/aruco/cvdrawingutils.h" 11 | 12 | 13 | // implements a drawing canvas / flat screen that is tracked using aruco markers 14 | class Aruco_canvas 15 | { 16 | protected: 17 | std::array img_markers_orig; 18 | int marker_size_old = 120; 19 | 20 | 21 | // the relevant markers in this frame that have the proper ID for the canvas 22 | std::array< aruco::Marker, 4> markers; 23 | 24 | // ok == 4 if all four markers are detected. 25 | int n_visible_markers = 0; 26 | 27 | 28 | aruco::CameraParameters CamParam; 29 | aruco::MarkerDetector MDetector; 30 | std::array img_markers; 31 | 32 | // helpers for estimating the position of temporatily invisible / untrackable markers 33 | /* 34 | class Marker_estimation_features 35 | { 36 | public: 37 | cv::Point2f offset; // the offset between two Markers (average over the difference between the 4 corresponding Corner Points) 38 | cv::Vec2f direction_vector; 39 | float scaling; 40 | }; 41 | std::array< std::array, 4> marker_features; 42 | */ 43 | std::array< std::array, 4> mutual_marker_offsets; 44 | 45 | void calc_canvas_plane_from_markers(); 46 | 47 | // track the canvas using all four markers, no marker prediction. 48 | 49 | 50 | // track the canvas and estimate the position of invisible / currently not trackable markers using a simple prediction: 51 | // calculate the average over all marker offsets ( offsets = vectors from the non-trackable marker to the othe, still trackable markers) 52 | void predict_markers_using_mutual_offsets(); 53 | 54 | // use the marker edges that point to the neighbouring markers and calculate the scaling that is required to let this edge vector point to the neighbouring marker 55 | // this is better than the simple offset predictor, because it is mostly invariant to head rotations and distance changes, 56 | // but is prone to jitter if markers are too small. hence, here, larger markers should be used. 57 | std::array< std::array, 4> edge_scale; 58 | void predict_markers_using_edge_vectors(); 59 | 60 | public: 61 | 62 | enum enum_prediction_method 63 | { 64 | NO_MARKER_PREDICTION, 65 | MARKER_PREDICTION_MUTUAL_OFFSETS, 66 | MARKER_PREDICTION_EDGE_VECTORS 67 | } prediction_method = MARKER_PREDICTION_EDGE_VECTORS; 68 | 69 | 70 | // active area in screen coordinates 71 | typedef std::array plane_type; 72 | 73 | 74 | // store the Marker corners relevant for defining the screen plane 75 | plane_type screen_plane; 76 | 77 | // stores the points of the detected rectangle / image plane 78 | plane_type image_plane; 79 | 80 | 81 | // use this plane definition with corners (0,0) - (1,1) 82 | // if an external application renders the AR Markers or of you use 83 | // "physical" / printed markers 84 | const plane_type screen_plane_external = 85 | { 86 | cv::Point2f(0,0), 87 | cv::Point2f(1,0), 88 | cv::Point2f(1,1), 89 | cv::Point2f(0,1) 90 | }; 91 | 92 | int marker_size = 150; 93 | 94 | bool valid() { return n_visible_markers == 4; } 95 | 96 | 97 | void setup(bool use_enclosed_markers = false); 98 | 99 | // void set_detection_size(float minimum_procentual_marker_size); 100 | 101 | void draw(cv::Mat& img, const int x, const int y, const int w, const int h); 102 | 103 | void draw_detected_markers(cv::Mat& img); 104 | 105 | // blur markers in img such that there is no double-detection of e.g. markers in a scenecam preview 106 | void blur_detected_markers(cv::Mat& img); 107 | 108 | void update(cv::Mat& img_cam); 109 | 110 | // assumes all markers are visible, hence p1..p4 are well defined 111 | // check with valid() before calling transform 112 | cv::Point2f transform(cv::Point2f gaze_point) 113 | { 114 | return transform(gaze_point, screen_plane); 115 | } 116 | 117 | // assumes all markers are visible, hence p1..p4 are well defined 118 | // check with valid() before calling transform 119 | cv::Point2f transform(cv::Point2f gaze_point, const plane_type& target_plane) 120 | { 121 | using namespace cv; 122 | using namespace std; 123 | // try to calc screen coordinates from gaze point 124 | auto H = calc_perspective_matrix(image_plane, target_plane); 125 | return perspective_transform(H, gaze_point); 126 | } 127 | 128 | 129 | protected: 130 | 131 | // https://www.researchgate.net/profile/Christopher_R_Wren/publication/215439543_Perspective_Transform_Estimation/links/56df558708ae9b93f79a948e.pdf 132 | // see PerspectiveTransformEstimation.pdf 133 | // array of points on: 134 | // ip = Image plane 135 | // wp = world plane 136 | Eigen::Matrix calc_perspective_matrix(const std::array & ip, const std::array & wp); 137 | 138 | 139 | 140 | inline cv::Point2f perspective_transform(Eigen::Matrix H, const cv::Point2f& p) 141 | { 142 | using namespace cv; 143 | using namespace Eigen; 144 | 145 | Vector3f v(p.x, p.y, 1); 146 | 147 | Vector3f C(H(2, 0), H(2, 1), 1); 148 | H(2, 0) = 0; 149 | H(2, 1) = 0; 150 | H(2, 2) = 1; 151 | auto t = (H * v) / C.dot(v); 152 | return Point2f(t[0], t[1]); 153 | } 154 | 155 | }; -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.1) 2 | 3 | INCLUDE(CheckIncludeFiles) 4 | 5 | 6 | project (libretracker) 7 | 8 | 9 | ### enable / disable OpenCL and Labstreaming layer ### 10 | option (OPENCL_ENABLED "Use OpenCL" OFF) 11 | option (LIBLSL_ENABLED "Add Labstreaming Layer" OFF) 12 | ###################################################### 13 | 14 | 15 | ### add MACRO Definitions ###################### 16 | if (OPENCL_ENABLED) 17 | add_definitions(-DOPENCL_ENABLED) 18 | endif () 19 | if (LIBLSL_ENABLED) 20 | # set (LSL_DIR "/home/labadmin/cpp/labstreaminglayer/build/install/LSL/share/LSL/") % not working.. 21 | add_definitions(-DLSL_ENABLED) 22 | endif () 23 | ################################################## 24 | 25 | 26 | 27 | ### define the required libraries ################ 28 | 29 | 30 | # cmake doesnt know how to deal with Eigen3 under Windows, so search only under linux / unix for eigen 3... 31 | if (UNIX) 32 | find_package(OpenCV REQUIRED) 33 | find_package(Eigen3 REQUIRED) 34 | set(FLTK_SKIP_FLUID true) 35 | find_package(FLTK REQUIRED) 36 | 37 | if (LIBLSL_ENABLED) 38 | # link_directories("/home/labadmin/cpp/labstreaminglayer/build/install/LSL/lib/") 39 | # set (LSL_INCLUDE_DIR "/home/labadmin/cpp/labstreaminglayer/build/install/LSL/include") 40 | set(LSL_LIBRARY_PATH "enter path" CACHE STRING "path to the LSL library") 41 | link_directories(${LSL_LIBRARY_PATH}) 42 | set (LSL_LIBRARY "lsl64") 43 | endif () 44 | 45 | else () 46 | # under windows (and using cmake-gui), ask the user to enter the path to the headers and lib files. 47 | set(OPENCV_INCLUDE_DIR "enter path" CACHE STRING "path to the eigen3 include directory") 48 | set(EIGEN3_INCLUDE_DIR "enter path" CACHE STRING "path to the OpenCV include directory") 49 | set(FLTK_INCLUDE_DIR "enter path" CACHE STRING "path to the FLTK include directory") 50 | 51 | # i assume that you have a path under wich you have a collection of subdirectories containing your libs like eigen, fltk and opencv. 52 | # specific library names including subpaths are defined in main.cpp using #pragma comment(lib...) 53 | set(LIBRARIES_PATH "enter path" CACHE STRING "path to your library collection. before compinling, adjust all the #pragma comment(lib...) in main.cpp.") 54 | link_directories(${LIBRARIES_PATH}) 55 | 56 | if (LIBLSL_ENABLED) 57 | set (LSL_INCLUDE_DIR "enter path" CACHE STRING "path to the Labstreaming Layer include directory") 58 | endif () 59 | endif() 60 | 61 | 62 | 63 | if (OPENCL_ENABLED) 64 | find_package(Boost REQUIRED) 65 | find_package(OpenCL REQUIRED) 66 | endif () 67 | ################################################## 68 | 69 | 70 | 71 | ### define the list of source files ############## 72 | set(SOURCES 73 | src/helpers.cpp 74 | src/main.cpp 75 | src/aruco_include.cpp 76 | src/eyetracking.cpp 77 | src/speller_canvas.cpp 78 | src/aruco_canvas.cpp 79 | src/calibration.cpp 80 | src/pupil_tracking.cpp 81 | src/pupil_tracker_timm.cpp 82 | src/deps/s/simple_gui_fltk.cpp 83 | src/deps/s/cv_camera_control.cpp 84 | src/deps/s/cv_save_video.cpp 85 | src/deps/tuebingen_pure/PuRe.cpp 86 | src/deps/tuebingen_pure/PupilDetectionMethod.cpp 87 | src/deps/tuebingen_pure/pupil-tracking/PupilTrackingMethod.cpp 88 | src/deps/tuebingen_pure/pupil-tracking/PuReST.cpp 89 | src/deps/timms_algorithm/src/timm.cpp) 90 | 91 | if (OPENCL_ENABLED) 92 | set(SOURCES ${SOURCES} src/deps/timms_algorithm/src/opencl_kernel.cpp) 93 | endif () 94 | 95 | if (WIN32) 96 | set(SOURCES ${SOURCES} src/deps/cpu_features/cpu_x86.cpp) 97 | set(SOURCES ${SOURCES} src/deps/DeviceEnumerator.cpp) 98 | endif() 99 | 100 | ################################################## 101 | 102 | 103 | 104 | 105 | 106 | ### define some include directories ############## 107 | include_directories (${EIGEN3_INCLUDE_DIR}) 108 | include_directories (${OPENCV_INCLUDE_DIR}) 109 | include_directories (${FLTK_INCLUDE_DIR}) 110 | include_directories ("/usr/local/include/opencv4/") # for some unknown reasons cmake and opencv4 do not get along well on raspberry pi 4.. 111 | include_directories ("src/deps/aruco/") 112 | 113 | 114 | if (OPENCL_ENABLED) 115 | include_directories (${Boost_INCLUDE_DIRS}) 116 | include_directories (${OpenCL_INCLUDE_DIRS}) 117 | #include_directories ("src/deps/OpenCL-Headers/") 118 | #include_directories ("src/deps/compute/include/") 119 | endif () 120 | 121 | if (LIBLSL_ENABLED) 122 | include_directories (${LSL_INCLUDE_DIR}) 123 | endif () 124 | 125 | ################################################## 126 | 127 | 128 | 129 | 130 | ### define the executable ###################### 131 | #message("defining executable and link options") 132 | #message("fltk libs: ${FLTK_LIBRARIES}") 133 | #message("opencv libs: ${OpenCV_LIBS}") 134 | 135 | CHECK_INCLUDE_FILES(arm_neon.h ARM_NEON_AVAILABLE) 136 | if (ARM_NEON_AVAILABLE) 137 | message("enabling usage of ARM NEON Instructions") 138 | add_compile_options("-mfpu=neon") 139 | endif() 140 | 141 | 142 | add_executable(libretracker ${SOURCES}) 143 | 144 | #add_compile_options("-std=c++14") 145 | target_compile_features(libretracker PUBLIC cxx_std_14) # this is the better way 146 | 147 | if (UNIX) 148 | target_link_libraries(libretracker GL pthread ${OpenCV_LIBS} ${FLTK_LIBRARIES}) 149 | endif() 150 | 151 | if (OPENCL_ENABLED) 152 | target_link_libraries(libretracker ${OpenCL_LIBRARY}) 153 | endif () 154 | 155 | if (LIBLSL_ENABLED) 156 | target_link_libraries(libretracker ${LSL_LIBRARY}) 157 | endif () 158 | ################################################## 159 | 160 | 161 | 162 | 163 | -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractalposetracker.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "../picoflann.h" 3 | #include "../cameraparameters.h" 4 | #include "../markerdetector.h" 5 | #include "fractalmarkerset.h" 6 | #include "aruco_export.h" 7 | namespace aruco { 8 | struct PicoFlann_KeyPointAdapter{ 9 | inline float operator( )(const cv::KeyPoint &elem, int dim)const { return dim==0?elem.pt.x:elem.pt.y; } 10 | inline float operator( )(const cv::Point2f &elem, int dim)const { return dim==0?elem.x:elem.y; } 11 | }; 12 | 13 | class ARUCO_EXPORT FractalPoseTracker 14 | { 15 | public: 16 | FractalPoseTracker(); 17 | /** init fractlPoseTracker parameters 18 | * @brief setParams 19 | * @param cam_params camera paremeters 20 | * @param msconf FractalMarkerSet configuration 21 | */ 22 | void setParams(const CameraParameters& cam_params, const FractalMarkerSet& msconf); 23 | /** estimate the pose of the fractal marker. 24 | * @brief fractalInnerPose 25 | * @param markerDetector 26 | * @param detected markers 27 | * @param refinement, use or not pose refinement. True by default. 28 | * @return true if the pose is estimated and false otherwise. If not estimated, the parameters m.Rvec and m.Tvec 29 | * and not set. 30 | */ 31 | bool fractalInnerPose(const cv::Ptr markerDetector, const std::vector& markers, bool refinement=true); 32 | /** extraction of the region of the image where the marker is estimated to be based on the previous pose 33 | * @brief ROI 34 | * @param imagePyramid set images 35 | * @param img original image. The image is scaled according to the selected pyramid image. 36 | * @param innerPoints2d collection fractal inner points. The points are scaled according to the selected pyramid image. 37 | * @param offset. Position of the upper inner corner of the marker. The offset is scaled according to the selected pyramid image. 38 | * @param ratio selected scaling factor 39 | */ 40 | void ROI(const std::vector imagePyramid, cv::Mat &img, std::vector &innerPoints2d, cv::Point2f &offset, float &ratio); 41 | /** classification of the corners of the marker 42 | * @brief assignClass 43 | * @param im image used in the classification 44 | * @param kpoints to classify 45 | * @param norm. Is it necessary to normalize keypoints? (-1 .. 1) 46 | * @param wsize. Window size 47 | */ 48 | void assignClass(const cv::Mat &im, std::vector& kpoints, bool norm=false, int wsize=5); 49 | 50 | /** estimate the pose of the fractal marker. Method case 2, paper. 51 | * @brief fractal_solve_ransac 52 | * @param ninners (number of total inners points used) 53 | * @param inner_kpnt matches (inner points - detected keypoints) 54 | * @param kpnts keypoints 55 | * @param maxIter maximum number of iterations 56 | * @param _minInliers beta in paper 57 | * @param _thresInliers alpha in paper 58 | * @return Mat best model homography. 59 | */ 60 | cv::Mat fractal_solve_ransac(int ninners, std::vector>> inner_kpnt, std::vector kpnts, uint32_t maxIter=500, float _minInliers=0.35f, float _thresInliers=0.7f); 61 | 62 | /** Refinement of the internal points of the marker and pose estimation with these. 63 | * @brief fractalRefinement 64 | * @param markerDetector 65 | * @param markerWarpPix. Optimal markerwarpPix used to select the image of the pyramid. Default value 10. 66 | * @return true if the pose is estimated and false otherwise. If not estimated, the parameters m.Rvec and m.Tvec 67 | * and not set. 68 | */ 69 | bool fractalRefinement(const cv::Ptr markerDetector, int markerWarpPix=10); 70 | 71 | // return the rotation vector. Returns an empty matrix if last call to estimatePose returned false 72 | const cv::Mat getRvec() const 73 | { 74 | return _rvec; 75 | } 76 | 77 | // return the translation vector. Returns an empty matrix if last call to estimatePose returned false 78 | const cv::Mat getTvec() const 79 | { 80 | return _tvec; 81 | } 82 | // return all inner points from fractal marker 83 | const std::vector getInner3d() 84 | { 85 | return _innerp3d; 86 | } 87 | //is the pose valid? 88 | bool isPoseValid()const{return !_rvec.empty() && !_tvec.empty();} 89 | 90 | private: 91 | FractalMarkerSet _fractalMarker; //FractalMarkerSet configuration 92 | aruco::CameraParameters _cam_params; //Camera parameters. 93 | cv::Mat _rvec, _tvec; // current poses 94 | std::map> _id_innerp3d; //Id_marker-Inners_corners 95 | std::vector _innerp3d; //All inners corners 96 | std::vector _innerkpoints; //All inners keypoints 97 | picoflann::KdTreeIndex<2,PicoFlann_KeyPointAdapter> _kdtree; 98 | std::map _id_radius; //Idmarker_Radius(Optimus) 99 | std::map _id_area; //Idmarker_projectedArea(Optimus) 100 | float _preRadius = 0; //radius used previous iteration 101 | }; 102 | } 103 | -------------------------------------------------------------------------------- /src/deps/aruco/fractallabelers/fractallabeler.cpp: -------------------------------------------------------------------------------- 1 | #include "fractallabeler.h" 2 | 3 | #include 4 | #include "../aruco_cvversioning.h" 5 | namespace aruco 6 | { 7 | 8 | void FractalMarkerLabeler::setConfiguration(const FractalMarkerSet& fractMarkerSet) { 9 | _fractalMarkerSet = fractMarkerSet; 10 | } 11 | 12 | bool FractalMarkerLabeler::detect(const cv::Mat& in, int& marker_id, int& nRotations, std::string &additionalInfo) 13 | { 14 | assert(in.rows == in.cols); 15 | cv::Mat grey; 16 | if (in.type() == CV_8UC1) 17 | grey = in; 18 | else 19 | cv::cvtColor(in, grey, CV_BGR2GRAY); 20 | // threshold image 21 | cv::threshold(grey, grey, 125, 255, cv::THRESH_BINARY | cv::THRESH_OTSU); 22 | 23 | std::map > nbits_innerCodes; 24 | 25 | for(auto bitsids:_fractalMarkerSet.nbits_fractalMarkerIDs){ 26 | 27 | int nbits = bitsids.first; 28 | std::vector innerCodes; 29 | getInnerCode(grey, nbits, innerCodes); 30 | 31 | if (innerCodes.size()>0){ 32 | if (sum(innerCodes[0])[0]!=0){ 33 | nbits_innerCodes[nbits]=innerCodes; 34 | } 35 | } 36 | } 37 | 38 | if ( nbits_innerCodes.size()==0)return false; 39 | 40 | //check if any dictionary recognizes it 41 | for(auto bit_innerCodes:nbits_innerCodes){ 42 | 43 | uint32_t nb = bit_innerCodes.first; 44 | auto innerCodes = bit_innerCodes.second; 45 | 46 | for (int i = 0; i < 4; i++) 47 | { 48 | if (_fractalMarkerSet.isFractalMarker(innerCodes[i], nb, marker_id)) 49 | { 50 | // is in the set? 51 | nRotations = i; // how many rotations are and its id 52 | return true; // bye bye 53 | } 54 | } 55 | } 56 | return false; 57 | } 58 | 59 | 60 | std::string FractalMarkerLabeler::getName() const 61 | { 62 | return "fractal";; 63 | } 64 | 65 | bool FractalMarkerLabeler::getInnerCode(const cv::Mat& thres_img, int total_nbits, std::vector& innerCodes) 66 | { 67 | int bits_noborder = static_cast(std::sqrt(total_nbits)); 68 | int bits_withborder = bits_noborder + 2; 69 | // Markers are divided in (bits_a+2)x(bits_a+2) regions, of which the inner bits_axbits_a belongs to marker 70 | // info 71 | // the external border shoould be entirely black 72 | cv::Mat nonZeros(bits_withborder,bits_withborder,CV_32SC1); 73 | cv::Mat nValues(bits_withborder,bits_withborder,CV_32SC1); 74 | nonZeros.setTo(cv::Scalar::all(0)); 75 | nValues.setTo(cv::Scalar::all(0)); 76 | for (int y = 0; y < thres_img.rows; y++) 77 | { 78 | const uchar *ptr=thres_img.ptr(y); 79 | int my= float(bits_withborder)*float(y)/ float(thres_img.rows); 80 | for (int x = 0; x < thres_img.cols; x++) 81 | { 82 | int mx= float(bits_withborder)*float(x)/ float(thres_img.cols); 83 | if( ptr[x]>125) 84 | nonZeros.at(my,mx)++; 85 | nValues.at(my,mx)++; 86 | } 87 | } 88 | cv::Mat binaryCode(bits_withborder,bits_withborder,CV_8UC1); 89 | //now, make the theshold 90 | for(int y=0;y(y,x)>nValues.at(y,x)/2) 93 | binaryCode.at(y,x)=1; 94 | else 95 | binaryCode.at(y,x)=0; 96 | } 97 | 98 | //check if border is completely black 99 | for (int y = 0; y < bits_withborder; y++) 100 | { 101 | int inc = bits_withborder - 1; 102 | if (y == 0 || y == bits_withborder - 1) 103 | inc = 1; // for first and last row, check the whole border 104 | for (int x = 0; x < bits_withborder; x += inc) 105 | if (binaryCode.at(y,x)!=0 ) return false; 106 | } 107 | 108 | //take the inner code 109 | 110 | cv::Mat _bits(bits_noborder,bits_noborder,CV_8UC1); 111 | for(int y=0;y(y,x)=binaryCode.at(y+1,x+1); 114 | 115 | // now, get the 64bits ids 116 | int nr = 0; 117 | do 118 | { 119 | innerCodes.push_back(_bits); 120 | _bits = rotate(_bits); 121 | nr++; 122 | } while (nr < 4); 123 | return true; 124 | } 125 | 126 | // convert matrix of (0,1)s in a 64 bit value 127 | uint64_t FractalMarkerLabeler::touulong(const cv::Mat& code) 128 | { 129 | std::bitset<64> bits; 130 | int bidx = 0; 131 | for (int y = code.rows - 1; y >= 0; y--) 132 | for (int x = code.cols - 1; x >= 0; x--) 133 | bits[bidx++] = code.at(y, x); 134 | 135 | return bits.to_ullong(); 136 | } 137 | 138 | cv::Mat FractalMarkerLabeler::rotate(const cv::Mat& in) 139 | { 140 | cv::Mat out; 141 | in.copyTo(out); 142 | for (int i = 0; i < in.rows; i++) 143 | { 144 | for (int j = 0; j < in.cols; j++) 145 | { 146 | out.at(i, j) = in.at(in.cols - j - 1, i); 147 | } 148 | } 149 | return out; 150 | } 151 | } 152 | 153 | -------------------------------------------------------------------------------- /src/deps/aruco/fractaldetector.cpp: -------------------------------------------------------------------------------- 1 | #include "fractaldetector.h" 2 | #include "opencv2/calib3d/calib3d.hpp" 3 | #include 4 | #include "cvdrawingutils.h" 5 | #include 6 | #include "aruco_cvversioning.h" 7 | 8 | namespace aruco 9 | { 10 | FractalDetector::FractalDetector() 11 | { 12 | _markerDetector = new MarkerDetector(); 13 | }; 14 | 15 | void FractalDetector::setConfiguration(int params) 16 | { 17 | _fractalLabeler = FractalMarkerLabeler::create((FractalMarkerSet::CONF_TYPES)params); 18 | _params.configuration_type=FractalMarkerSet::getTypeString((FractalMarkerSet::CONF_TYPES)params); 19 | _markerDetector->setMarkerLabeler(_fractalLabeler); 20 | } 21 | 22 | void FractalDetector::setConfiguration(std::string params) 23 | { 24 | _params.configuration_type=params; 25 | _fractalLabeler = FractalMarkerLabeler::create(params); 26 | _markerDetector->setMarkerLabeler(_fractalLabeler); 27 | } 28 | 29 | void FractalDetector::drawMarkers(cv::Mat &img) 30 | { 31 | float size= std::max(1.,float(img.cols)/ 1280.); 32 | for(auto m:Markers) 33 | m.draw(img, cv::Scalar(0, 0, 255), size, false); 34 | } 35 | 36 | void FractalDetector::draw2d(cv::Mat &img){ 37 | if(Markers.size() > 0) 38 | { 39 | std::map id_fmarker = _fractalLabeler->_fractalMarkerSet.fractalMarkerCollection; 40 | 41 | std::vector inners; 42 | std::map> id_innerCorners = _fractalLabeler->_fractalMarkerSet.getInnerCorners(); 43 | for(auto id_innerC:id_innerCorners) 44 | { 45 | std::vector inner3d; 46 | for(auto pt:id_innerC.second) 47 | inners.push_back(cv::Point2f(pt.x,pt.y)); 48 | } 49 | 50 | std::vector srcPnts; 51 | std::vector point2d; 52 | for(auto m:Markers) 53 | { 54 | for(auto p:id_fmarker[m.id].points) 55 | { 56 | cv::Point3f p3d = p/(_fractalLabeler->_fractalMarkerSet.getFractalSize()/2); 57 | srcPnts.push_back(cv::Point2f(p3d.x, p3d.y)); 58 | } 59 | for(auto p:m) 60 | point2d.push_back(p); 61 | 62 | } 63 | 64 | cv::Mat H; 65 | H = cv::findHomography(srcPnts, point2d); 66 | std::vector dstPnt; 67 | cv::perspectiveTransform(inners, dstPnt, H); 68 | 69 | float size= std::max(1.,float(img.cols)/ 1280.); 70 | for(auto p:dstPnt) 71 | cv::circle(img, p, size, cv::Scalar(0,0,255), CV_FILLED); 72 | } 73 | } 74 | 75 | void FractalDetector::draw3d(cv::Mat &img, bool cube, bool axis){ 76 | if(Tracker.isPoseValid()) 77 | { 78 | std::vector innerPoints3d = Tracker.getInner3d(); 79 | std::vector innerPoints; 80 | projectPoints(innerPoints3d, Tracker.getRvec(), Tracker.getTvec(), _cam_params.CameraMatrix, _cam_params.Distorsion, innerPoints); 81 | for(auto p:innerPoints) 82 | circle(img, p, 3 , cv::Scalar(0,0,255),CV_FILLED); 83 | 84 | //Draw cube 85 | if(cube) 86 | { 87 | std::map id_fmarker = _fractalLabeler->_fractalMarkerSet.fractalMarkerCollection; 88 | for(auto m:Markers) 89 | draw3dCube(img, id_fmarker[m.id], _cam_params, 2); 90 | } 91 | 92 | //Draw axes 93 | if(axis) 94 | CvDrawingUtils::draw3dAxis(img, _cam_params, getRvec(), getTvec(), 1); 95 | } 96 | } 97 | 98 | void FractalDetector::draw3dCube(cv::Mat& Image, FractalMarker m, const CameraParameters& CP, int lineSize) 99 | { 100 | cv::Mat objectPoints(8, 3, CV_32FC1); 101 | 102 | float halfSize = (m.getMarkerSize()/_fractalLabeler->_fractalMarkerSet.getFractalSize()); 103 | float msize= (m.getMarkerSize()/_fractalLabeler->_fractalMarkerSet.getFractalSize())*2; 104 | 105 | objectPoints.at(0, 0) = -halfSize; 106 | objectPoints.at(0, 1) = -halfSize; 107 | objectPoints.at(0, 2) = 0; 108 | objectPoints.at(1, 0) = halfSize; 109 | objectPoints.at(1, 1) = -halfSize; 110 | objectPoints.at(1, 2) = 0; 111 | objectPoints.at(2, 0) = halfSize; 112 | objectPoints.at(2, 1) = halfSize; 113 | objectPoints.at(2, 2) = 0; 114 | objectPoints.at(3, 0) = -halfSize; 115 | objectPoints.at(3, 1) = halfSize; 116 | objectPoints.at(3, 2) = 0; 117 | 118 | objectPoints.at(4, 0) = -halfSize; 119 | objectPoints.at(4, 1) = -halfSize; 120 | objectPoints.at(4, 2) = msize; 121 | objectPoints.at(5, 0) = halfSize; 122 | objectPoints.at(5, 1) = -halfSize; 123 | objectPoints.at(5, 2) = msize; 124 | objectPoints.at(6, 0) = halfSize; 125 | objectPoints.at(6, 1) = halfSize; 126 | objectPoints.at(6, 2) = msize; 127 | objectPoints.at(7, 0) = -halfSize; 128 | objectPoints.at(7, 1) = halfSize; 129 | objectPoints.at(7, 2) = msize; 130 | 131 | 132 | std::vector imagePoints; 133 | projectPoints(objectPoints, getRvec(), getTvec(), CP.CameraMatrix, CP.Distorsion, imagePoints); 134 | 135 | for (int i = 0; i < 4; i++) 136 | cv::line(Image, imagePoints[i], imagePoints[(i + 1) % 4], cv::Scalar(0, 0, 255, 255), lineSize); 137 | 138 | for (int i = 0; i < 4; i++) 139 | cv::line(Image, imagePoints[i + 4], imagePoints[4 + (i + 1) % 4], cv::Scalar(0, 0, 255, 255), lineSize); 140 | 141 | for (int i = 0; i < 4; i++) 142 | cv::line(Image, imagePoints[i], imagePoints[i + 4], cv::Scalar(0, 0, 255, 255), lineSize); 143 | } 144 | }; 145 | 146 | -------------------------------------------------------------------------------- /src/pupil_tracking.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "deps/dependencies.h" 4 | #ifdef _WIN32 5 | #include "deps/DeviceEnumerator.h" 6 | #endif 7 | 8 | #include "deps/timms_algorithm/src/timm_two_stage.h" 9 | #include "deps/s/cv_camera_control.h" 10 | #include 11 | 12 | 13 | enum enum_pupil_tracking_variant 14 | { 15 | PUPIL_TRACKING_TIMM, 16 | PUPIL_TRACKING_PURE, 17 | PUPIL_TRACKING_PUREST 18 | }; 19 | 20 | /* 21 | class Pupil 22 | { 23 | public: 24 | cv::Point2f center{ nan(), nan() }; 25 | cv::RotatedRect ellipse{ cv::Point2f{nan(), nan()}, cv::Size2f{nan(), nan()}, nan() }; 26 | }; 27 | */ 28 | 29 | #include "deps/tuebingen_pure/PuRe.h" 30 | #include "deps/tuebingen_pure/pupil-tracking/PuReST.h" 31 | 32 | // todo - use class Pupil 33 | class Pupil_tracker_base 34 | { 35 | public: 36 | Pupil pupil; 37 | virtual void setup(enum_simd_variant simd_width) = 0; 38 | virtual void update(cv::Mat& eye_cam_frame) = 0; 39 | virtual cv::Point2f pupil_center() = 0; 40 | 41 | virtual void show_gui() {} 42 | 43 | 44 | virtual void draw(cv::Mat& img) {} 45 | 46 | }; 47 | 48 | 49 | 50 | class Pupil_tracker_pure : public Pupil_tracker_base 51 | { 52 | protected: 53 | PuRe pupil_detector; 54 | public: 55 | cv::Mat frame_gray; 56 | 57 | public: 58 | virtual void setup(enum_simd_variant simd_width) 59 | { 60 | 61 | } 62 | 63 | virtual void update(cv::Mat& eye_cam_frame) 64 | { 65 | cv::cvtColor(eye_cam_frame, frame_gray, cv::COLOR_BGR2GRAY); 66 | pupil = pupil_detector.run(frame_gray); 67 | } 68 | 69 | virtual cv::Point2f pupil_center() 70 | { 71 | return pupil.center; 72 | } 73 | 74 | virtual void draw(cv::Mat& img) 75 | { 76 | cv::circle(img, pupil.center, 4, cv::Scalar(255, 0, 255), 2); 77 | 78 | if (pupil.size.width > 0 && pupil.size.height > 0) 79 | { 80 | cv::ellipse(img, pupil, cv::Scalar(0, 255, 0), 1); 81 | } 82 | } 83 | 84 | 85 | }; 86 | 87 | class Pupil_tracker_purest : public Pupil_tracker_base 88 | { 89 | protected: 90 | PuRe pupil_detect; 91 | std::unique_ptr pupil_tracking; 92 | int frame_counter = 0; 93 | public: 94 | cv::Mat frame_gray; 95 | 96 | public: 97 | virtual void setup(enum_simd_variant simd_width) 98 | { 99 | pupil_tracking = std::make_unique(); 100 | frame_counter = 0; 101 | } 102 | 103 | virtual void update(cv::Mat& eye_cam_frame) 104 | { 105 | // for now, roi is as large as whole eye cam frame 106 | // TODO: user selectabel roi 107 | cv::Rect roi(0, 0, eye_cam_frame.cols, eye_cam_frame.rows); 108 | cv::cvtColor(eye_cam_frame, frame_gray, cv::COLOR_BGR2GRAY); 109 | pupil_tracking->run(frame_counter, frame_gray, roi, pupil, pupil_detect); 110 | frame_counter++; 111 | } 112 | 113 | virtual cv::Point2f pupil_center() 114 | { 115 | return pupil.center; 116 | } 117 | 118 | virtual void draw(cv::Mat& img) 119 | { 120 | cv::circle(img, pupil.center, 4, cv::Scalar(255, 0, 255), 2); 121 | 122 | if (pupil.size.width > 0 && pupil.size.height > 0 && pupil.confidence != -1.0) 123 | { 124 | cv::ellipse(img, pupil, cv::Scalar(0, double(pupil.confidence) * 255, 0), 1); 125 | } 126 | } 127 | 128 | 129 | }; 130 | 131 | class Pupil_tracker_timm : public Pupil_tracker_base 132 | { 133 | 134 | public: 135 | Timm_two_stage timm; // todo.. move to protected 136 | //protected: 137 | cv::Mat frame_gray; 138 | 139 | 140 | enum enum_parameter_settings 141 | { 142 | SETTINGS_DEFAULT, 143 | SETTINGS_LPW, 144 | SETTINGS_FUTURE1, 145 | SETTINGS_FUTURE2, 146 | SETTINGS_FUTURE3, 147 | SETTINGS_FROM_FILE, 148 | }; 149 | 150 | using options_type = typename Timm_two_stage::options; 151 | using params_type = std::array; 152 | 153 | 154 | // allowed sizes for the kernels 155 | const std::array sobel_kernel_sizes{ -1, 1, 3, 5, 7 }; 156 | const std::array blur_kernel_sizes{ 0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29 }; 157 | 158 | // helper functions for the fltk gui 159 | params_type set_params(options_type opt); 160 | options_type set_options(params_type params); 161 | 162 | 163 | cv::Point pupil_pos, pupil_pos_coarse; 164 | 165 | options_type opt; 166 | std::array params; 167 | std::array debug_toggles{ false,false, false,false }; 168 | std::array debug_toggles_old; 169 | bool do_init_windows = true; 170 | 171 | 172 | double n_threads = 1; 173 | 174 | Simple_gui sg; 175 | void setup_gui(); 176 | public: 177 | 178 | virtual void setup(enum_simd_variant simd_width); 179 | virtual void update(cv::Mat& eye_cam_frame); 180 | 181 | virtual void draw(cv::Mat& img); 182 | 183 | virtual void show_gui() { sg.show(); } 184 | 185 | virtual cv::Point2f pupil_center() 186 | { 187 | return pupil_pos; 188 | } 189 | 190 | // special to timms algorithm 191 | options_type decode_genom(Eigen::VectorXf params); 192 | options_type load_parameters(enum_parameter_settings s); 193 | 194 | }; 195 | 196 | class Pupil_tracking 197 | { 198 | protected: 199 | 200 | std::shared_ptr pupil_tracker; 201 | 202 | std::atomic is_running; // must be atomic because it is later used to exit the asynchronous capture thread 203 | 204 | public: 205 | 206 | // TODO: proper encapsulation 207 | std::shared_ptr eye_camera; 208 | cv::Mat frame_eye_cam; 209 | std::vector cameras; // list of active cameras. required for cv::VideoCapture:waitAny 210 | std::vector camera_ready{ 0, 0 }; 211 | 212 | 213 | void setup(enum_simd_variant simd_width, enum_pupil_tracking_variant pupil_tracking_variant); 214 | 215 | void update() 216 | { 217 | // TODO ! 218 | } 219 | 220 | 221 | // capture from the usb webcam 222 | //void run(enum_simd_variant simd_width, int eye_cam_id = -1, cv::VideoCaptureAPIs eye_cam_backend=cv::CAP_ANY); 223 | void run(enum_simd_variant simd_width, std::shared_ptr eye_camera); 224 | 225 | 226 | 227 | }; 228 | 229 | class Pupil_tracker_timm_tests : public Pupil_tracker_timm 230 | { 231 | 232 | protected: 233 | 234 | public: 235 | 236 | std::vector> timings_vector; 237 | 238 | // fitness function 239 | Eigen::VectorXf eval_fitness(Eigen::VectorXf params, const std::vector& idx, const int n, const std::vector& images_all, const std::vector& ground_truth, bool visualize, const int subsampling_width = 100); 240 | 241 | // evaluate the best parameter set over ALL images of the EXCUSE and ELSE dataset 242 | // run tests on different datasets 243 | void run_lpw_test_all(enum_simd_variant simd_width); 244 | void run_swirski_test(enum_simd_variant simd_width); 245 | void run_excuse_test(enum_simd_variant simd_width); 246 | void run_differential_evolution_optim(enum_simd_variant simd_width); 247 | 248 | }; -------------------------------------------------------------------------------- /src/deps/cpu_features/LICENSE: -------------------------------------------------------------------------------- 1 | CC0 1.0 Universal 2 | 3 | Statement of Purpose 4 | 5 | The laws of most jurisdictions throughout the world automatically confer 6 | exclusive Copyright and Related Rights (defined below) upon the creator and 7 | subsequent owner(s) (each and all, an "owner") of an original work of 8 | authorship and/or a database (each, a "Work"). 9 | 10 | Certain owners wish to permanently relinquish those rights to a Work for the 11 | purpose of contributing to a commons of creative, cultural and scientific 12 | works ("Commons") that the public can reliably and without fear of later 13 | claims of infringement build upon, modify, incorporate in other works, reuse 14 | and redistribute as freely as possible in any form whatsoever and for any 15 | purposes, including without limitation commercial purposes. These owners may 16 | contribute to the Commons to promote the ideal of a free culture and the 17 | further production of creative, cultural and scientific works, or to gain 18 | reputation or greater distribution for their Work in part through the use and 19 | efforts of others. 20 | 21 | For these and/or other purposes and motivations, and without any expectation 22 | of additional consideration or compensation, the person associating CC0 with a 23 | Work (the "Affirmer"), to the extent that he or she is an owner of Copyright 24 | and Related Rights in the Work, voluntarily elects to apply CC0 to the Work 25 | and publicly distribute the Work under its terms, with knowledge of his or her 26 | Copyright and Related Rights in the Work and the meaning and intended legal 27 | effect of CC0 on those rights. 28 | 29 | 1. Copyright and Related Rights. A Work made available under CC0 may be 30 | protected by copyright and related or neighboring rights ("Copyright and 31 | Related Rights"). Copyright and Related Rights include, but are not limited 32 | to, the following: 33 | 34 | i. the right to reproduce, adapt, distribute, perform, display, communicate, 35 | and translate a Work; 36 | 37 | ii. moral rights retained by the original author(s) and/or performer(s); 38 | 39 | iii. publicity and privacy rights pertaining to a person's image or likeness 40 | depicted in a Work; 41 | 42 | iv. rights protecting against unfair competition in regards to a Work, 43 | subject to the limitations in paragraph 4(a), below; 44 | 45 | v. rights protecting the extraction, dissemination, use and reuse of data in 46 | a Work; 47 | 48 | vi. database rights (such as those arising under Directive 96/9/EC of the 49 | European Parliament and of the Council of 11 March 1996 on the legal 50 | protection of databases, and under any national implementation thereof, 51 | including any amended or successor version of such directive); and 52 | 53 | vii. other similar, equivalent or corresponding rights throughout the world 54 | based on applicable law or treaty, and any national implementations thereof. 55 | 56 | 2. Waiver. To the greatest extent permitted by, but not in contravention of, 57 | applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and 58 | unconditionally waives, abandons, and surrenders all of Affirmer's Copyright 59 | and Related Rights and associated claims and causes of action, whether now 60 | known or unknown (including existing as well as future claims and causes of 61 | action), in the Work (i) in all territories worldwide, (ii) for the maximum 62 | duration provided by applicable law or treaty (including future time 63 | extensions), (iii) in any current or future medium and for any number of 64 | copies, and (iv) for any purpose whatsoever, including without limitation 65 | commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes 66 | the Waiver for the benefit of each member of the public at large and to the 67 | detriment of Affirmer's heirs and successors, fully intending that such Waiver 68 | shall not be subject to revocation, rescission, cancellation, termination, or 69 | any other legal or equitable action to disrupt the quiet enjoyment of the Work 70 | by the public as contemplated by Affirmer's express Statement of Purpose. 71 | 72 | 3. Public License Fallback. Should any part of the Waiver for any reason be 73 | judged legally invalid or ineffective under applicable law, then the Waiver 74 | shall be preserved to the maximum extent permitted taking into account 75 | Affirmer's express Statement of Purpose. In addition, to the extent the Waiver 76 | is so judged Affirmer hereby grants to each affected person a royalty-free, 77 | non transferable, non sublicensable, non exclusive, irrevocable and 78 | unconditional license to exercise Affirmer's Copyright and Related Rights in 79 | the Work (i) in all territories worldwide, (ii) for the maximum duration 80 | provided by applicable law or treaty (including future time extensions), (iii) 81 | in any current or future medium and for any number of copies, and (iv) for any 82 | purpose whatsoever, including without limitation commercial, advertising or 83 | promotional purposes (the "License"). The License shall be deemed effective as 84 | of the date CC0 was applied by Affirmer to the Work. Should any part of the 85 | License for any reason be judged legally invalid or ineffective under 86 | applicable law, such partial invalidity or ineffectiveness shall not 87 | invalidate the remainder of the License, and in such case Affirmer hereby 88 | affirms that he or she will not (i) exercise any of his or her remaining 89 | Copyright and Related Rights in the Work or (ii) assert any associated claims 90 | and causes of action with respect to the Work, in either case contrary to 91 | Affirmer's express Statement of Purpose. 92 | 93 | 4. Limitations and Disclaimers. 94 | 95 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 96 | surrendered, licensed or otherwise affected by this document. 97 | 98 | b. Affirmer offers the Work as-is and makes no representations or warranties 99 | of any kind concerning the Work, express, implied, statutory or otherwise, 100 | including without limitation warranties of title, merchantability, fitness 101 | for a particular purpose, non infringement, or the absence of latent or 102 | other defects, accuracy, or the present or absence of errors, whether or not 103 | discoverable, all to the greatest extent permissible under applicable law. 104 | 105 | c. Affirmer disclaims responsibility for clearing rights of other persons 106 | that may apply to the Work or any use thereof, including without limitation 107 | any person's Copyright and Related Rights in the Work. Further, Affirmer 108 | disclaims responsibility for obtaining any necessary consents, permissions 109 | or other rights required for any use of the Work. 110 | 111 | d. Affirmer understands and acknowledges that Creative Commons is not a 112 | party to this document and has no duty or obligation with respect to this 113 | CC0 or use of the Work. 114 | 115 | For more information, please see 116 | 117 | -------------------------------------------------------------------------------- /src/deps/aruco/cameraparameters.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | #ifndef _Aruco_CameraParameters_H 29 | #define _Aruco_CameraParameters_H 30 | 31 | #include "aruco_export.h" 32 | #include 33 | #include 34 | #include 35 | 36 | namespace aruco 37 | { 38 | /**\brief Parameters of the camera 39 | */ 40 | 41 | class ARUCO_EXPORT CameraParameters 42 | { 43 | public: 44 | // 3x3 matrix (fx 0 cx, 0 fy cy, 0 0 1) 45 | cv::Mat CameraMatrix; 46 | // distortion matrix 47 | cv::Mat Distorsion; 48 | // size of the image 49 | cv::Size CamSize; 50 | 51 | /**Empty constructor 52 | */ 53 | CameraParameters(); 54 | /**Creates the object from the info passed 55 | * @param cameraMatrix 3x3 matrix (fx 0 cx, 0 fy cy, 0 0 1) 56 | * @param distorsionCoeff 4x1 matrix (k1,k2,p1,p2) 57 | * @param size image size 58 | */ 59 | CameraParameters(cv::Mat cameraMatrix, cv::Mat distorsionCoeff, cv::Size size); 60 | /**Sets the parameters 61 | * @param cameraMatrix 3x3 matrix (fx 0 cx, 0 fy cy, 0 0 1) 62 | * @param distorsionCoeff 4x1 matrix (k1,k2,p1,p2) 63 | * @param size image size 64 | */ 65 | void setParams(cv::Mat cameraMatrix, cv::Mat distorsionCoeff, cv::Size size); 66 | /**Copy constructor 67 | */ 68 | CameraParameters(const CameraParameters& CI); 69 | 70 | /**Indicates whether this object is valid 71 | */ 72 | bool isValid() const 73 | { 74 | return CameraMatrix.rows != 0 && CameraMatrix.cols != 0 && Distorsion.rows != 0 && Distorsion.cols != 0 75 | && CamSize.width != -1 && CamSize.height != -1; 76 | } 77 | /**Assign operator 78 | */ 79 | CameraParameters& operator=(const CameraParameters& CI); 80 | 81 | /**Saves this to a file 82 | */ 83 | void saveToFile(std::string path, bool inXML = true); 84 | 85 | /**Reads from a YAML file generated with the opencv2.2 calibration utility 86 | */ 87 | void readFromXMLFile(std::string filePath); 88 | 89 | /**Adjust the parameters to the size of the image indicated 90 | */ 91 | void resize(cv::Size size); 92 | 93 | /**Returns the location of the camera in the reference system of the marker. 94 | * 95 | * Rvec and Tvec are the transform from the marker to the camera as calculated in other parts of the library 96 | * NOT TESTED 97 | */ 98 | static cv::Point3f getCameraLocation(const cv::Mat &Rvec,const cv::Mat &Tvec); 99 | 100 | /**Given the intrinsic camera parameters returns the GL_PROJECTION matrix for opengl. 101 | * PLease NOTE that when using OpenGL, it is assumed no camera distorsion! So, if it is not true, you should have 102 | * undistor image 103 | * 104 | * @param orgImgSize size of the original image 105 | * @param size of the image/window where to render (can be different from the real camera image). Please not that 106 | *it must be related to CamMatrix 107 | * @param proj_matrix output projection matrix to give to opengl 108 | * @param gnear,gfar: visible rendering range 109 | * @param invert: indicates if the output projection matrix has to yield a horizontally inverted image because 110 | *image data has not been stored in the order of 111 | *glDrawPixels: bottom-to-top. 112 | */ 113 | void glGetProjectionMatrix(cv::Size orgImgSize, cv::Size size, double proj_matrix[16], double gnear, 114 | double gfar, bool invert = false); 115 | 116 | /** 117 | * setup camera for an Ogre project. 118 | * Use: 119 | * ... 120 | * Ogre::Matrix4 PM(proj_matrix[0], proj_matrix[1], ... , proj_matrix[15]); 121 | * yourCamera->setCustomProjectionMatrix(true, PM); 122 | * yourCamera->setCustomViewMatrix(true, Ogre::Matrix4::IDENTITY); 123 | * ... 124 | * As in OpenGL, it assumes no camera distorsion 125 | */ 126 | void OgreGetProjectionMatrix(cv::Size orgImgSize, cv::Size size, double proj_matrix[16], double gnear, 127 | double gfar, bool invert = false); 128 | 129 | /**Returns the 4x4 homogeneous transform matrix from the R and T vectors computed 130 | */ 131 | static cv::Mat getRTMatrix(const cv::Mat& R_, const cv::Mat& T_, int forceType); 132 | 133 | 134 | /**Makes this invalid 135 | */ 136 | void clear(); 137 | 138 | ARUCO_EXPORT friend std::ostream &operator<<(std::ostream &str,const CameraParameters&cp); 139 | ARUCO_EXPORT friend std::istream &operator>>(std::istream &str,CameraParameters&cp); 140 | private: 141 | // GL routines 142 | 143 | static void argConvGLcpara2(double cparam[3][4], int width, int height, double gnear, double gfar, double m[16], 144 | bool invert); 145 | static int arParamDecompMat(double source[3][4], double cpara[3][4], double trans[3][4]); 146 | static double norm(double a, double b, double c); 147 | static double dot(double a1, double a2, double a3, double b1, double b2, double b3); 148 | }; 149 | } 150 | #endif 151 | -------------------------------------------------------------------------------- /src/deps/aruco/timers.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef ARUCO_TIMERS_H 30 | #define ARUCO_TIMERS_H 31 | 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include "aruco_export.h" 38 | namespace aruco{ 39 | 40 | //timer 41 | struct ScopeTimer 42 | { 43 | std::chrono::high_resolution_clock::time_point begin,end; 44 | 45 | std::string name; 46 | bool use; 47 | enum SCALE {NSEC,MSEC,SEC}; 48 | SCALE sc; 49 | ScopeTimer(std::string name_,bool use_=true,SCALE _sc=MSEC) 50 | { 51 | #ifdef USE_TIMERS 52 | name=name_; 53 | use=use_; 54 | sc=_sc; 55 | begin= std::chrono::high_resolution_clock::now(); 56 | #else 57 | (void)name_; 58 | (void)use_; 59 | (void)_sc; 60 | 61 | #endif 62 | } 63 | ~ScopeTimer() 64 | { 65 | #ifdef USE_TIMERS 66 | if (use){ 67 | end= std::chrono::high_resolution_clock::now(); 68 | double fact=1; 69 | std::string str; 70 | switch(sc) 71 | { 72 | case NSEC:fact=1;str="ns";break; 73 | case MSEC:fact=1e6;str="ms";break; 74 | case SEC:fact=1e9;str="s";break; 75 | }; 76 | 77 | std::cout << "Time("<(end-begin).count())/fact< vtimes; 88 | std::vector names; 89 | std::string _name; 90 | 91 | ScopedTimerEvents(std::string name="",bool start=true,SCALE _sc=MSEC){ 92 | #ifdef USE_TIMERS 93 | if(start) add("start");sc=_sc;_name=name; 94 | #else 95 | (void)name; 96 | (void)start; 97 | (void)_sc; 98 | #endif 99 | } 100 | 101 | void add(std::string name){ 102 | #ifdef USE_TIMERS 103 | vtimes.push_back(std::chrono::high_resolution_clock::now()); 104 | names.push_back(name); 105 | #else 106 | (void)name; 107 | #endif 108 | } 109 | void addspaces(std::vector &str ){ 110 | //get max size 111 | size_t m=0; 112 | for(auto &s:str)m=std::max(size_t(s.size()),m); 113 | for(auto &s:str){ 114 | while(s.size()(vtimes[i]-vtimes[i-1]).count())/fact<(vtimes[i]-vtimes[0]).count())/fact<(e-_s).count()); 153 | n++; 154 | } 155 | 156 | void print(SCALE sc=MSEC){ 157 | #ifdef USE_TIMERS 158 | double fact=1; 159 | std::string str; 160 | switch(sc) 161 | { 162 | case NSEC:fact=1;str="ns";break; 163 | case MSEC:fact=1e6;str="ms";break; 164 | case SEC:fact=1e9;str="s";break; 165 | }; 166 | std::cout<<"Time("<<_name<<")= "<< ( sum/n)/fact< 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | #include "lt_lsl_protocol.h" 15 | 16 | 17 | // optional but helpful for centering the headset such that all markers are visible by the scene camera 18 | int draw_markers(cv::Mat& img, int x, int y, int w, int h, const std::vector& marker_data, const std::vector>& eye_data); 19 | 20 | int main() 21 | { 22 | using namespace lsl; 23 | using namespace std; 24 | using namespace cv; 25 | 26 | //////////// init labstreaming layer and stream //////////////////////// 27 | cout << "trying to resolve libretracker stream(s)..\n"; 28 | 29 | vector results = resolve_stream("type", "LT_EYE"); 30 | if (results.size() == 0) { throw runtime_error("Libretracker not running / not streaming data."); } 31 | cout << "found LT_EYE stream!\n"; 32 | 33 | // assume that only one eyetracker is connected, hence simply take the first stream 34 | stream_inlet inlet_eye(results[0]); 35 | cout << "opened a eye data stream. Stream Information:\n\n " << inlet_eye.info().as_xml(); 36 | 37 | // optional marker stream 38 | results = resolve_stream("type", "LT_MARKER"); 39 | stream_inlet inlet_marker(results[0]); 40 | cout << "marker data stream opened.\n"; 41 | cout << "\n\n"; 42 | 43 | const int screen_w = 1280; 44 | const int screen_h = 900; 45 | 46 | 47 | //////////////// load the AR markers //////////////// 48 | const int marker_size = 150; 49 | std::array img_markers; 50 | // array marker_file_names{ "marker_1.jpg", "marker_5.jpg", "marker_10.jpg","marker_25.jpg" }; 51 | 52 | // according to the aruco main developer, ARUCO_MIP_36h12 is the preferred dictionary. 53 | // https://stackoverflow.com/questions/50076117/what-are-the-advantages-disadvantages-between-the-different-predefined-aruco-d 54 | array marker_file_names { "aruco_mip_36h12_00002.png", "aruco_mip_36h12_00004.png", "aruco_mip_36h12_00006.png", "aruco_mip_36h12_00008.png" }; 55 | 56 | 57 | for (size_t i = 0; i < marker_file_names.size(); i++) 58 | { 59 | auto tmp = imread("assets/" + marker_file_names[i]); 60 | resize( tmp, img_markers[i], Size(marker_size, marker_size)); 61 | } 62 | 63 | Mat img; // render everything to this image 64 | Mat img_screen_background = Mat(screen_h, screen_w, CV_8UC3, Scalar(255, 255, 255)); // white background 65 | 66 | cout << "entering main loop. continously reading data from stream inlet.\n"; 67 | vector< vector > data; 68 | vector marker_data; 69 | 70 | float x=0, y=0; 71 | while (true) 72 | { 73 | // this grabs all data from all channels that have accumulated up to this time point 74 | inlet_eye.pull_chunk(data); 75 | 76 | 77 | // render the markers 78 | img_screen_background.copyTo(img); 79 | const int w = screen_w; 80 | const int h = screen_h; 81 | const int b = 0; 82 | const int s = marker_size; 83 | img_markers[0].copyTo(img(Rect(b, b, s, s))); 84 | img_markers[1].copyTo(img(Rect(w - s - b, b, s, s))); 85 | img_markers[2].copyTo(img(Rect(w - s - b, h - s - b, s, s))); 86 | img_markers[3].copyTo(img(Rect(b, h - s - b, s, s))); 87 | 88 | // all streamed gaze coordinates are normalized to the range [0..1] 89 | // * this has the advantage that the tracker doesnt need to know the distance between the markers in the client 90 | // * the client doesnt need to know the image resolution of the webcams 91 | 92 | // calc offset and scaling 93 | int dw = w - 2 * s - b; 94 | int dh = h - 2 * s - b; 95 | int offset = s + b; 96 | 97 | // there *might* be a higher update rate for the gaze data than the screen refresh data. 98 | // draw a circle for each gaze point that was received until now 99 | //(currently: only 30 hz from eye cam, so there should be only one sample in data or data should be empty) 100 | for (auto& v : data) 101 | { 102 | double x_01 = v[LT_SCREEN_X_FILTERED]; 103 | double y_01 = v[LT_SCREEN_Y_FILTERED]; 104 | 105 | if (!std::isnan(x_01) && !std::isnan(y_01)) 106 | { 107 | x = float(offset + dw * x_01); // todo: proper down-conversion of double to float 108 | y = float(offset + dh * y_01); 109 | circle(img, Point2f(x, y), 8, Scalar(255, 0, 255), 4); 110 | cout << "\n(x,y)=" << x << " " << y; 111 | } 112 | } 113 | // also draw the last gaze point if no new data arrived 114 | circle(img, Point2f(x, y), 8, Scalar(255, 0, 255), 4); 115 | 116 | /////// optional: visualize tracked marker ///////////// 117 | inlet_marker.pull_sample(marker_data); 118 | int n_visible = draw_markers(img, 0.5 * w - 0.5 * 640, h - 480, 640, 480, marker_data, data); 119 | 120 | // process events 121 | imshow("screen", img); 122 | cv::waitKey(1000 / 60); // render with approx 60 fps 123 | } 124 | 125 | return EXIT_SUCCESS; 126 | } 127 | 128 | 129 | // optional code 130 | int draw_markers(cv::Mat& img, int x, int y, int w, int h, const std::vector& marker_data, const std::vector>& eye_data) 131 | { 132 | using namespace std; 133 | using namespace cv; 134 | if (marker_data.size() != 1 + 2 * 4) { return 0; } // cerr << "\nwrong marker data."; 135 | 136 | array markers; 137 | int n_visible_markers = 0; 138 | for (int i = 0; i < 4; i++) 139 | { 140 | double mx = x + w * marker_data[1 + 2 * i + 0]; 141 | double my = y + h * marker_data[1 + 2 * i + 1]; 142 | markers[i] = Point2f(mx, my); 143 | //if (!isnan(mx) && !isnan(my)) 144 | if(!std::isnan(mx) && !std::isnan(my)) 145 | { 146 | rectangle(img, Rect(mx - 5, my - 5, 10, 10), Scalar(255, 255, 0)); 147 | n_visible_markers++; 148 | } 149 | } 150 | 151 | string n_visible_str = to_string(n_visible_markers); 152 | cv::putText(img, "visible markers = " + n_visible_str, Point2i(x + 0.5 * w - 150, y + h - 240), FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 170, 0), 2); 153 | 154 | 155 | // image space rectangle 156 | if (n_visible_markers == 4) 157 | { 158 | for (int i = 0; i < 4; i++) 159 | { 160 | line(img, markers[i], markers[(i + 1) % 4], Scalar(0, 155, 255)); 161 | } 162 | } 163 | 164 | // draw gaze point in scene cam coordinates 165 | if (eye_data.size() > 0) 166 | { 167 | auto& v = eye_data.back(); 168 | circle(img, Point2f(x + w * v[LT_GAZE_X], y + h * v[LT_GAZE_Y]), 5, Scalar(100, 100, 100), 1); 169 | } 170 | 171 | 172 | // border 173 | rectangle(img, Rect(x, y, w, h), Scalar(125, 125, 125)); 174 | 175 | return n_visible_markers; 176 | } 177 | 178 | // Sdl_opencv sdl; // vsynced alternative to opencv imshow 179 | 180 | /* // code for vertically synchronized rendering using libSDL 181 | // render part 182 | img_screen_background.copyTo(img_screen); 183 | // draw to screen (vsynced flip) 184 | sdl.imshow(img_screen,100,100); 185 | if (sdl.waitKey().sym == SDLK_ESCAPE) { break; } 186 | */ 187 | -------------------------------------------------------------------------------- /src/deps/aruco/marker.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef _Aruco_Marker_H 30 | #define _Aruco_Marker_H 31 | 32 | #include "aruco_export.h" 33 | 34 | #include 35 | 36 | #include 37 | #include 38 | #include 39 | 40 | namespace aruco 41 | { 42 | /**\brief This class represents a marker. It is a vector of the fours corners ot the marker 43 | * 44 | */ 45 | 46 | class CameraParameters; 47 | class ARUCO_EXPORT Marker : public std::vector 48 | { 49 | public: 50 | // id of the marker 51 | int id; 52 | // size of the markers sides in meters 53 | float ssize; 54 | // matrices of rotation and translation respect to the camera 55 | cv::Mat Rvec, Tvec; 56 | //additional info about the dictionary 57 | std::string dict_info; 58 | //points of the contour 59 | vector contourPoints; 60 | 61 | /** 62 | */ 63 | Marker(); 64 | /** 65 | */ 66 | Marker(int id); 67 | /** 68 | */ 69 | Marker(const Marker& M); 70 | /** 71 | */ 72 | Marker(const std::vector& corners, int _id = -1); 73 | /** 74 | */ 75 | ~Marker() 76 | { 77 | } 78 | /**Indicates if this object is valid 79 | */ 80 | bool isValid() const 81 | { 82 | return id != -1 && size() == 4; 83 | } 84 | 85 | bool isPoseValid()const{return !Rvec.empty() && !Tvec.empty();} 86 | /**Draws this marker in the input image 87 | */ 88 | void draw(cv::Mat& in, cv::Scalar color=cv::Scalar(0,0,255), int lineWidth = -1, bool writeId = true,bool writeInfo=false) const; 89 | 90 | /**Calculates the extrinsics (Rvec and Tvec) of the marker with respect to the camera 91 | * @param markerSize size of the marker side expressed in meters 92 | * @param CP parmeters of the camera 93 | * @param setYPerpendicular If set the Y axis will be perpendicular to the surface. Otherwise, it will be the Z 94 | * axis 95 | */ 96 | void calculateExtrinsics(float markerSize, const CameraParameters& CP, 97 | bool setYPerpendicular = true); 98 | /**Calculates the extrinsics (Rvec and Tvec) of the marker with respect to the camera 99 | * @param markerSize size of the marker side expressed in meters 100 | * @param CameraMatrix matrix with camera parameters (fx,fy,cx,cy) 101 | * @param Distorsion matrix with distorsion parameters (k1,k2,p1,p2) 102 | * @param setYPerpendicular If set the Y axis will be perpendicular to the surface. Otherwise, it will be the Z 103 | * axis 104 | */ 105 | void calculateExtrinsics(float markerSize, cv::Mat CameraMatrix, cv::Mat Distorsion = cv::Mat(), 106 | bool setYPerpendicular = true); 107 | 108 | /**Given the extrinsic camera parameters returns the GL_MODELVIEW matrix for opengl. 109 | * Setting this matrix, the reference coordinate system will be set in this marker 110 | */ 111 | void glGetModelViewMatrix(double modelview_matrix[16]); 112 | 113 | /** 114 | * Returns position vector and orientation quaternion for an Ogre scene node or entity. 115 | * Use: 116 | * ... 117 | * Ogre::Vector3 ogrePos (position[0], position[1], position[2]); 118 | * Ogre::Quaternion ogreOrient (orientation[0], orientation[1], orientation[2], orientation[3]); 119 | * mySceneNode->setPosition( ogrePos ); 120 | * mySceneNode->setOrientation( ogreOrient ); 121 | * ... 122 | */ 123 | void OgreGetPoseParameters(double position[3], double orientation[4]); 124 | 125 | /**Returns the centroid of the marker 126 | */ 127 | cv::Point2f getCenter() const; 128 | /**Returns the perimeter of the marker 129 | */ 130 | float getPerimeter() const; 131 | /**Returns the area 132 | */ 133 | float getArea() const; 134 | /**Returns radius of enclosing circle 135 | */ 136 | float getRadius()const; 137 | /**compares ids 138 | */ 139 | bool operator==(const Marker& m) const 140 | { 141 | return m.id == id; 142 | } 143 | 144 | void copyTo(Marker &m) const; 145 | /**compares ids 146 | */ 147 | Marker & operator=(const Marker& m) ; 148 | 149 | /** 150 | */ 151 | friend bool operator<(const Marker& M1, const Marker& M2) 152 | { 153 | return M1.id < M2.id; 154 | } 155 | /** 156 | */ 157 | friend std::ostream& operator<<(std::ostream& str, const Marker& M){ 158 | str << M.id << "="; 159 | for (int i = 0; i < 4; i++) 160 | str << "(" << M[i].x << "," << M[i].y << ") "; 161 | if( !M.Tvec.empty() && !M.Rvec.empty()){ 162 | str << "Txyz="; 163 | for (int i = 0; i < 3; i++) 164 | str << M.Tvec.ptr(0)[i] << " "; 165 | str << "Rxyz="; 166 | for (int i = 0; i < 3; i++) 167 | str << M.Rvec.ptr(0)[i] << " "; 168 | } 169 | return str; 170 | } 171 | 172 | 173 | // saves to a binary stream 174 | void toStream(std::ostream& str) const; 175 | // reads from a binary stream 176 | void fromStream(std::istream& str); 177 | 178 | // returns the 3d points of a marker wrt its center 179 | static vector get3DPoints(float msize); 180 | //returns the 3d points of this marker wrt its center 181 | inline vector get3DPoints()const{ 182 | return get3DPoints(ssize); 183 | } 184 | 185 | //returns the SE3 (4x4) transform matrix 186 | 187 | cv::Mat getTransformMatrix()const; 188 | private: 189 | void rotateXAxis(cv::Mat& rotation); 190 | }; 191 | } 192 | #endif 193 | -------------------------------------------------------------------------------- /src/deps/aruco/posetracker.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef ARUCO_POSETRACKER 30 | #define ARUCO_POSETRACKER 31 | 32 | #include "aruco_export.h" 33 | #include "cameraparameters.h" 34 | #include "marker.h" 35 | #include "markermap.h" 36 | 37 | #include 38 | #include 39 | 40 | namespace aruco 41 | { 42 | /**Tracks the position of a marker. Instead of trying to calculate the position from scratch everytime, it uses past 43 | * observations to 44 | * estimate the pose. It should solve the problem with ambiguities that arises in some circumstances 45 | * 46 | * To solve ambiguity we follow the following idea. We are using the IPPE method, which returns the two possible 47 | * solutions s0,s1. 48 | * Error solution has a reprojection error e(s_i) and it is assumed that e(s0)& p3d, const std::vector& p2d, 98 | const cv::Mat& cam_matrix, const cv::Mat& dist, cv::Mat& r_io, cv::Mat& t_io); 99 | }; 100 | /**Tracks the position of a markermap 101 | */ 102 | 103 | class ARUCO_EXPORT MarkerMapPoseTracker 104 | { 105 | public: 106 | MarkerMapPoseTracker(); 107 | // Sets the parameters required for operation 108 | // If the msconf has data expressed in meters, then the markerSize parameter is not required. If it is in 109 | // pixels, the markersize will be used to 110 | // transform to meters 111 | // Throws exception if wrong configuraiton 112 | void setParams(const CameraParameters& cam_params, const MarkerMap& msconf, 113 | float markerSize = -1); 114 | // indicates if the call to setParams has been successfull and this object is ready to call estimatePose 115 | bool isValid() const 116 | { 117 | return _isValid; 118 | } 119 | 120 | //resets current state 121 | void reset(){ 122 | _isValid=false; 123 | _rvec=cv::Mat(); 124 | _tvec=cv::Mat(); 125 | } 126 | 127 | // estimates camera pose wrt the markermap 128 | // returns true if pose has been obtained and false otherwise 129 | bool estimatePose(const std::vector& v_m); 130 | 131 | // returns the 4x4 transform matrix. Returns an empty matrix if last call to estimatePose returned false 132 | cv::Mat getRTMatrix() const; 133 | // return the rotation vector. Returns an empty matrix if last call to estimatePose returned false 134 | const cv::Mat getRvec() const 135 | { 136 | return _rvec; 137 | } 138 | // return the translation vector. Returns an empty matrix if last call to estimatePose returned false 139 | const cv::Mat getTvec() const 140 | { 141 | return _tvec; 142 | } 143 | //prevents from big jumps. If the difference between current and previous positions are greater than the value indicated 144 | //assumes no good tracking and the pose will be set as null 145 | void setMaxTrackingDifference(float maxTranslation,float maxAngle){ 146 | _maxTranslation=maxTranslation; 147 | _maxAngle=maxAngle; 148 | } 149 | 150 | private: 151 | cv::Mat _rvec, _tvec; // current poses 152 | aruco::CameraParameters _cam_params; 153 | MarkerMap _msconf; 154 | std::map _map_mm; 155 | bool _isValid; 156 | cv::Mat relocalization(const std::vector& v_m); 157 | float aruco_minerrratio_valid;/*tau_e in paper*/ 158 | std::map marker_m2g;//for each marker, the transform from the global ref system to the marker ref system 159 | float _maxTranslation=-1,_maxAngle=-1; 160 | }; 161 | }; 162 | 163 | #endif 164 | -------------------------------------------------------------------------------- /src/deps/aruco/markermap.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef _Aruco_MarkerMap_h 30 | #define _Aruco_MarkerMap_h 31 | 32 | #include "aruco_export.h" 33 | #include "marker.h" 34 | 35 | #include 36 | 37 | #include 38 | #include 39 | 40 | namespace aruco 41 | { 42 | /** 43 | * 3d representation of a marker 44 | */ 45 | class ARUCO_EXPORT Marker3DInfo 46 | { 47 | public: 48 | std::vector points; 49 | int id; // maker id 50 | 51 | Marker3DInfo(); 52 | Marker3DInfo(int _id); 53 | inline bool operator==(const Marker3DInfo& MI) 54 | { 55 | return id == MI.id; 56 | } 57 | 58 | // returns the distance of the marker side 59 | inline float getMarkerSize() const 60 | { 61 | return static_cast(cv::norm(points[0] - points[1])); 62 | } 63 | inline cv::Point3f at(size_t idx)const{return points.at(idx);} 64 | inline cv::Point3f & operator[](size_t idx) {return points[idx];} 65 | inline const cv::Point3f & operator[](size_t idx)const {return points[idx];} 66 | inline void push_back(const cv::Point3f &p){ points.push_back(p);} 67 | inline size_t size()const{return points.size();} 68 | 69 | public: 70 | 71 | inline void toStream(std::ostream& str) 72 | { 73 | str << id << " " << size() << " "; 74 | for (size_t i = 0; i < size(); i++) 75 | str << at(i).x << " " << at(i).y << " " << at(i).z << " "; 76 | } 77 | inline void fromStream(std::istream& str) 78 | { 79 | int s; 80 | str >> id >> s; 81 | points.resize(s); 82 | for (size_t i = 0; i < size(); i++) 83 | str >> points[i].x >> points[i].y >> points[i].z; 84 | } 85 | 86 | }; 87 | 88 | /**\brief This class defines a set of markers whose locations are attached to a common reference system, i.e., they 89 | * do not move wrt each other. 90 | * A MarkerMap contains several markers so that they are more robustly detected. 91 | * 92 | * A MarkerMap is only a list of the id of the markers along with the position of their corners. 93 | * A MarkerMap may have information about the dictionary the markers belongs to @see getDictionary() 94 | * 95 | * The position of the corners can be specified either in pixels (in a non-specific size) or in meters. 96 | * The first is the typical case in which you generate the image of board and the print it. Since you do not know 97 | * in advance the real 98 | * size of the markers, their corners are specified in pixels, and then, the translation to meters can be made once 99 | * you know the real size. 100 | * 101 | * On the other hand, you may want to have the information of your boards in meters. The MarkerMap allows you to do 102 | * so. 103 | * 104 | * The point is in the mInfoType variable. It can be either PIX or METERS according to your needs. 105 | * 106 | */ 107 | 108 | class ARUCO_EXPORT MarkerMap : public std::vector 109 | { 110 | public: 111 | /** 112 | */ 113 | MarkerMap(); 114 | 115 | /**Loads from file 116 | * @param filePath to the config file 117 | */ 118 | MarkerMap(std::string filePath); 119 | 120 | /**Indicates if the corners are expressed in meters 121 | */ 122 | bool isExpressedInMeters() const 123 | { 124 | return mInfoType == METERS; 125 | } 126 | /**Indicates if the corners are expressed in meters 127 | */ 128 | bool isExpressedInPixels() const 129 | { 130 | return mInfoType == PIX; 131 | } 132 | /**converts the passed board into meters 133 | */ 134 | MarkerMap convertToMeters(float markerSize) const; 135 | // simple way of knowing which elements detected in an image are from this markermap 136 | // returns the indices of the elements in the vector 'markers' that belong to this set 137 | // Example: The set has the elements with ids 10,21,31,41,92 138 | // The input vector has the markers with ids 10,88,9,12,41 139 | // function returns {0,4}, because element 0 (10) of the vector belongs to the set, and also element 4 (41) 140 | // belongs to the set 141 | std::vector getIndices(const vector &markers) const; 142 | 143 | /**Returns the Info of the marker with id specified. If not in the set, throws exception 144 | */ 145 | const Marker3DInfo& getMarker3DInfo(int id) const; 146 | 147 | /**Returns the index of the marker (in this object) with id indicated, if is in the vector 148 | */ 149 | int getIndexOfMarkerId(int id) const; 150 | /**Set in the list passed the set of the ids 151 | */ 152 | void getIdList(vector& ids, bool append = true) const; 153 | 154 | /**Returns an image of this to be printed. This object must be in pixels @see isExpressedInPixels(). If 155 | * not,please provide the METER2PIX conversion parameter 156 | */ 157 | cv::Mat getImage(float METER2PIX = 0) const; 158 | 159 | /**Saves the board info to a file 160 | */ 161 | void saveToFile(std::string sfile); 162 | /**Reads board info from a file 163 | */ 164 | void readFromFile(std::string sfile); 165 | 166 | // calculates the camera location w.r.t. the map using the information provided 167 | // returns the 168 | std::pair calculateExtrinsics(const std::vector& markers, float markerSize, 169 | cv::Mat CameraMatrix, cv::Mat Distorsion); 170 | 171 | // returns string indicating the dictionary 172 | inline std::string getDictionary() const 173 | { 174 | return dictionary; 175 | } 176 | 177 | enum Marker3DInfoType 178 | { 179 | NONE = -1, 180 | PIX = 0, 181 | METERS = 1 182 | }; // indicates if the data in MakersInfo is expressed in meters or in pixels so as to do conversion internally 183 | // returns string indicating the dictionary 184 | void setDictionary(std::string d) 185 | { 186 | dictionary = d; 187 | } 188 | 189 | // variable indicates if the data in MakersInfo is expressed in meters or in pixels so as to do conversion 190 | // internally 191 | int mInfoType; 192 | 193 | private: 194 | // dictionary it belongs to (if any) 195 | std::string dictionary; 196 | 197 | private: 198 | /**Saves the board info to a file 199 | */ 200 | void saveToFile(cv::FileStorage& fs); 201 | /**Reads board info from a file 202 | */ 203 | void readFromFile(cv::FileStorage& fs); 204 | 205 | public: 206 | void toStream(std::ostream& str); 207 | void fromStream(std::istream& str); 208 | }; 209 | } 210 | 211 | #endif 212 | -------------------------------------------------------------------------------- /src/deps/aruco/cvdrawingutils.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | #include 29 | #include 30 | #include "cvdrawingutils.h" 31 | #include "cameraparameters.h" 32 | using namespace cv; 33 | namespace aruco 34 | { 35 | void CvDrawingUtils::draw3dAxis(cv::Mat& Image, const CameraParameters& CP, const cv::Mat& Rvec, 36 | const cv::Mat& Tvec, float axis_size) 37 | { 38 | Mat objectPoints(4, 3, CV_32FC1); 39 | objectPoints.at(0, 0) = 0; 40 | objectPoints.at(0, 1) = 0; 41 | objectPoints.at(0, 2) = 0; 42 | objectPoints.at(1, 0) = axis_size; 43 | objectPoints.at(1, 1) = 0; 44 | objectPoints.at(1, 2) = 0; 45 | objectPoints.at(2, 0) = 0; 46 | objectPoints.at(2, 1) = axis_size; 47 | objectPoints.at(2, 2) = 0; 48 | objectPoints.at(3, 0) = 0; 49 | objectPoints.at(3, 1) = 0; 50 | objectPoints.at(3, 2) = axis_size; 51 | 52 | std::vector imagePoints; 53 | cv::projectPoints(objectPoints, Rvec, Tvec, CP.CameraMatrix, CP.Distorsion, imagePoints); 54 | // draw lines of different colours 55 | cv::line(Image, imagePoints[0], imagePoints[1], Scalar(0, 0, 255, 255), 1); 56 | cv::line(Image, imagePoints[0], imagePoints[2], Scalar(0, 255, 0, 255), 1); 57 | cv::line(Image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0, 255), 1); 58 | putText(Image, "x", imagePoints[1], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 0, 255, 255), 2); 59 | putText(Image, "y", imagePoints[2], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0, 255), 2); 60 | putText(Image, "z", imagePoints[3], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(255, 0, 0, 255), 2); 61 | } 62 | /**** 63 | * 64 | * 65 | * 66 | ****/ 67 | void CvDrawingUtils::draw3dAxis(cv::Mat& Image, Marker& m, const CameraParameters& CP,int lineSize) 68 | { 69 | float size = m.ssize *0.6; 70 | Mat objectPoints(4, 3, CV_32FC1); 71 | objectPoints.at(0, 0) = 0; 72 | objectPoints.at(0, 1) = 0; 73 | objectPoints.at(0, 2) = 0; 74 | objectPoints.at(1, 0) = size; 75 | objectPoints.at(1, 1) = 0; 76 | objectPoints.at(1, 2) = 0; 77 | objectPoints.at(2, 0) = 0; 78 | objectPoints.at(2, 1) = size; 79 | objectPoints.at(2, 2) = 0; 80 | objectPoints.at(3, 0) = 0; 81 | objectPoints.at(3, 1) = 0; 82 | objectPoints.at(3, 2) = size; 83 | 84 | std::vector imagePoints; 85 | cv::projectPoints(objectPoints, m.Rvec, m.Tvec, CP.CameraMatrix, CP.Distorsion, imagePoints); 86 | // draw lines of different colours 87 | cv::line(Image, imagePoints[0], imagePoints[1], Scalar(0, 0, 255, 255), lineSize); 88 | cv::line(Image, imagePoints[0], imagePoints[2], Scalar(0, 255, 0, 255), lineSize); 89 | cv::line(Image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0, 255), lineSize); 90 | putText(Image, "x", imagePoints[1], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 0, 255, 255), 2); 91 | putText(Image, "y", imagePoints[2], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0, 255), 2); 92 | putText(Image, "z", imagePoints[3], FONT_HERSHEY_SIMPLEX, 0.6, Scalar(255, 0, 0, 255), 2); 93 | } 94 | 95 | /**** 96 | * 97 | * 98 | * 99 | ****/ 100 | void CvDrawingUtils::draw3dCube(cv::Mat& Image, Marker& m, const CameraParameters& CP, int lineSize, bool setYperpendicular) 101 | { 102 | Mat objectPoints(8, 3, CV_32FC1); 103 | float halfSize = m.ssize / 2.f; 104 | 105 | if (setYperpendicular) 106 | { 107 | objectPoints.at(0, 0) = -halfSize; 108 | objectPoints.at(0, 1) = 0; 109 | objectPoints.at(0, 2) = -halfSize; 110 | objectPoints.at(1, 0) = halfSize; 111 | objectPoints.at(1, 1) = 0; 112 | objectPoints.at(1, 2) = -halfSize; 113 | objectPoints.at(2, 0) = halfSize; 114 | objectPoints.at(2, 1) = 0; 115 | objectPoints.at(2, 2) = halfSize; 116 | objectPoints.at(3, 0) = -halfSize; 117 | objectPoints.at(3, 1) = 0; 118 | objectPoints.at(3, 2) = halfSize; 119 | 120 | objectPoints.at(4, 0) = -halfSize; 121 | objectPoints.at(4, 1) = m.ssize; 122 | objectPoints.at(4, 2) = -halfSize; 123 | objectPoints.at(5, 0) = halfSize; 124 | objectPoints.at(5, 1) = m.ssize; 125 | objectPoints.at(5, 2) = -halfSize; 126 | objectPoints.at(6, 0) = halfSize; 127 | objectPoints.at(6, 1) = m.ssize; 128 | objectPoints.at(6, 2) = halfSize; 129 | objectPoints.at(7, 0) = -halfSize; 130 | objectPoints.at(7, 1) = m.ssize; 131 | objectPoints.at(7, 2) = halfSize; 132 | } 133 | else 134 | { 135 | objectPoints.at(0, 0) = -halfSize; 136 | objectPoints.at(0, 1) = -halfSize; 137 | objectPoints.at(0, 2) = 0; 138 | objectPoints.at(1, 0) = halfSize; 139 | objectPoints.at(1, 1) = -halfSize; 140 | objectPoints.at(1, 2) = 0; 141 | objectPoints.at(2, 0) = halfSize; 142 | objectPoints.at(2, 1) = halfSize; 143 | objectPoints.at(2, 2) = 0; 144 | objectPoints.at(3, 0) = -halfSize; 145 | objectPoints.at(3, 1) = halfSize; 146 | objectPoints.at(3, 2) = 0; 147 | 148 | objectPoints.at(4, 0) = -halfSize; 149 | objectPoints.at(4, 1) = -halfSize; 150 | objectPoints.at(4, 2) = m.ssize; 151 | objectPoints.at(5, 0) = halfSize; 152 | objectPoints.at(5, 1) = -halfSize; 153 | objectPoints.at(5, 2) = m.ssize; 154 | objectPoints.at(6, 0) = halfSize; 155 | objectPoints.at(6, 1) = halfSize; 156 | objectPoints.at(6, 2) = m.ssize; 157 | objectPoints.at(7, 0) = -halfSize; 158 | objectPoints.at(7, 1) = halfSize; 159 | objectPoints.at(7, 2) = m.ssize; 160 | } 161 | 162 | std::vector imagePoints; 163 | projectPoints(objectPoints, m.Rvec, m.Tvec, CP.CameraMatrix, CP.Distorsion, imagePoints); 164 | // draw lines of different colours 165 | for (int i = 0; i < 4; i++) 166 | cv::line(Image, imagePoints[i], imagePoints[(i + 1) % 4], Scalar(0, 0, 255, 255), lineSize); 167 | 168 | for (int i = 0; i < 4; i++) 169 | cv::line(Image, imagePoints[i + 4], imagePoints[4 + (i + 1) % 4], Scalar(0, 0, 255, 255), lineSize); 170 | 171 | for (int i = 0; i < 4; i++) 172 | cv::line(Image, imagePoints[i], imagePoints[i + 4], Scalar(0, 0, 255, 255), lineSize); 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /vstudio/lsl_client_example.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | Debug 14 | x64 15 | 16 | 17 | Release 18 | x64 19 | 20 | 21 | 22 | 16.0 23 | {5D517B9F-A94B-453A-B967-938207231405} 24 | Win32Proj 25 | lslclientexample 26 | 10.0 27 | 28 | 29 | 30 | Application 31 | true 32 | v142 33 | Unicode 34 | 35 | 36 | Application 37 | false 38 | v142 39 | true 40 | Unicode 41 | 42 | 43 | Application 44 | true 45 | v142 46 | Unicode 47 | 48 | 49 | Application 50 | false 51 | v142 52 | true 53 | Unicode 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | false 75 | $(SolutionDir)$(Platform)\$(Configuration)_client\ 76 | $(Platform)\$(Configuration)_client\ 77 | 78 | 79 | true 80 | 81 | 82 | true 83 | 84 | 85 | false 86 | 87 | 88 | 89 | 90 | 91 | Level3 92 | MaxSpeed 93 | true 94 | true 95 | true 96 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 97 | true 98 | $(MY_LIB_DIR);$(MY_LIB_DIR)\SDL2\include;$(MY_LIB_DIR)\opencv43\build\include\;$(MY_LIB_DIR)\labstreaminglayer\LSL\liblsl\include\ 99 | 100 | 101 | Console 102 | true 103 | true 104 | true 105 | $(MY_LIB_DIR); 106 | $(OutDir)$(TargetName)$(TargetExt) 107 | 108 | 109 | 110 | 111 | 112 | 113 | Level3 114 | Disabled 115 | true 116 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) 117 | true 118 | 119 | 120 | Console 121 | true 122 | 123 | 124 | 125 | 126 | 127 | 128 | Level3 129 | Disabled 130 | true 131 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions) 132 | true 133 | $(MY_LIB_DIR);$(MY_LIB_DIR)\SDL2\include;$(MY_LIB_DIR)\opencv41\build\include\;$(MY_LIB_DIR)\labstreaminglayer\LSL\liblsl\include\;$(MY_LIB_DIR)\eigen3\ 134 | 135 | 136 | Console 137 | true 138 | $(MY_LIB_DIR); 139 | 140 | 141 | 142 | 143 | 144 | 145 | Level3 146 | MaxSpeed 147 | true 148 | true 149 | true 150 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) 151 | true 152 | 153 | 154 | Console 155 | true 156 | true 157 | true 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | -------------------------------------------------------------------------------- /src/deps/aruco/dictionary.h: -------------------------------------------------------------------------------- 1 | /** 2 | Copyright 2017 Rafael Muñoz Salinas. All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are 5 | permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this list of 8 | conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 11 | of conditions and the following disclaimer in the documentation and/or other materials 12 | provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED 15 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 16 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR 17 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 21 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 22 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those of the 25 | authors and should not be interpreted as representing official policies, either expressed 26 | or implied, of Rafael Muñoz Salinas. 27 | */ 28 | 29 | #ifndef ARUCO_DICTIONARY_ 30 | #define ARUCO_DICTIONARY_ 31 | 32 | #include "aruco_export.h" 33 | 34 | #include 35 | 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | namespace aruco 43 | { 44 | class MarkerMap; 45 | /**Represents a set of valid marker ids with a maximum size of 8x8 = 64 bits. 46 | * In our approach, markers are seen as a pair code-id. The code is the internal binary code printed on the marker. 47 | * Maximum size is 8x8 bits. 48 | * The id is a smaller number you can use to identify it. You will use only the id 49 | * 50 | * See enum DICT_TYPES for the set of dicitionaries availables 51 | */ 52 | 53 | class ARUCO_EXPORT Dictionary 54 | { 55 | public: 56 | // loads from a set of predefined ones 57 | enum DICT_TYPES: 58 | // uint64_t{ 59 | // ARUCO_MIP_36h12=0x8, //*** recommended 60 | // ARUCO=0x1, // original aruco dictionary. By default 61 | // ARUCO_MIP_25h7=0x2, 62 | // ARUCO_MIP_16h3=0x4, 63 | // ARTAG=0x10, // 64 | // ARTOOLKITPLUS=0x20, 65 | // ARTOOLKITPLUSBCH=0x40, // 66 | // TAG16h5=0x80, 67 | // TAG25h7=0x100, 68 | // TAG25h9=0x200, 69 | // TAG36h11=0x400, 70 | // TAG36h10=0x800, // april tags 71 | // CHILITAGS=0x1000, // chili tags dictionary . NOT RECOMMENDED. It has distance 0. Markers 806 and 682 should not be 72 | // // used!!! 73 | // CUSTOM=0x4000 , // for used defined dictionaries (using loadFromfile). 74 | // ALL_DICTS=0xFFFF 75 | // }; 76 | 77 | uint64_t{ 78 | ALL_DICTS=0, 79 | ARUCO_MIP_36h12=1, //*** recommended 80 | ARUCO=2, // original aruco dictionary. By default 81 | ARUCO_MIP_25h7=3, 82 | ARUCO_MIP_16h3=4, 83 | ARTAG=5, // 84 | ARTOOLKITPLUS=6, 85 | ARTOOLKITPLUSBCH=7, // 86 | TAG16h5=8, 87 | TAG25h7=9, 88 | TAG25h9=10, 89 | TAG36h11=11, 90 | TAG36h10=12, // april tags 91 | CHILITAGS=13, // chili tags dictionary . NOT RECOMMENDED. It has distance 0. Markers 806 and 682 should not be 92 | // used!!! 93 | CUSTOM=14 , // for used defined dictionaries (using loadFromfile). 94 | }; 95 | // indicates if a code is in the dictionary 96 | bool is(uint64_t code) const 97 | { 98 | return _code_id.find(code) != _code_id.end(); 99 | } 100 | 101 | DICT_TYPES getType() const 102 | { 103 | return _type; 104 | } 105 | 106 | // reutnr the numerber of ids 107 | uint64_t size() const 108 | { 109 | return _code_id.size(); 110 | } 111 | // returns the total number of bits of the binary code 112 | uint32_t nbits() const 113 | { 114 | return _nbits; 115 | } 116 | // returns the dictionary distance 117 | uint32_t tau() const 118 | { 119 | return _tau; 120 | } 121 | // returns the name 122 | std::string getName() const 123 | { 124 | return _name; 125 | } 126 | // return the set of ids 127 | const std::map& getMapCode() const 128 | { 129 | return _code_id; 130 | } 131 | 132 | // returns the id of a given code. 133 | int operator[](uint64_t code) 134 | { 135 | return _code_id[code]; 136 | } // returns the id of a given code. 137 | int at(uint64_t code) 138 | { 139 | return _code_id[code]; 140 | } 141 | 142 | // returns the image of the marker indicated by its id. It the id is not, returns empty matrix 143 | //@param id of the marker image to return 144 | //@param bit_size of the image will be AxA, A=(nbits()+2)*bit_size 145 | //@param enclosed_corners if true, extra rectagles are added touching the marker corners. it can be used to 146 | //allow subpixel refinement 147 | cv::Mat getMarkerImage_id(int id, int bit_size, bool addWaterMark = true, bool enclosed_corners = false,bool printExternalWhiteBorder=false,bool centralCircle=false); 148 | 149 | // used for boards 150 | MarkerMap createMarkerMap(cv::Size gridSize, int MarkerSize, int MarkerDistance, const std::vector& Ids, 151 | bool chess_board = false); 152 | 153 | static Dictionary loadPredefined(DICT_TYPES type); 154 | static Dictionary loadPredefined(std::string type); 155 | 156 | /** loads a dictionary defined in a file 157 | * Please note that the parsing is very basic and you must be very strict. 158 | 159 | * Here is an example of a 3x3 dictionary of 3 markers 160 | * 010 111 000 161 | * 001 101 001 162 | * 001 010 100 163 | * 164 | * 165 | * File: myown.dict 166 | *------------------------------------------- 167 | * name MYOWN 168 | * nbits 9 169 | * 010001001 170 | * 111101010 171 | * 000001100 172 | */ 173 | static Dictionary loadFromFile(std::string path); 174 | 175 | /**Loads a dictioanary using the string passed. If it is a string of the predefined dictionaries, then returns 176 | * it. 177 | * Otherwise, tries to load from a file 178 | */ 179 | static Dictionary load(std::string info); 180 | 181 | // //io functions 182 | // void saveToFile(std::string file); 183 | // void readFromFile(std::string file); 184 | // void saveToStream(std::ostream & str); 185 | // void readFromStream(std::istream &str); 186 | 187 | // returns the dictionary distance 188 | static uint64_t computeDictionaryDistance(const Dictionary& d); 189 | 190 | // given a string,returns the type 191 | static DICT_TYPES getTypeFromString(std::string str); 192 | static std::string getTypeString(DICT_TYPES t); 193 | static bool isPredefinedDictinaryString(std::string str); 194 | static std::vector getDicTypes(); 195 | 196 | private: 197 | //obfuscate start 198 | 199 | void insert(uint64_t code, int id) 200 | { 201 | _code_id.insert(std::make_pair(code, id)); 202 | } 203 | static void fromVector(const std::vector& codes, std::map& code_id_map); 204 | 205 | std::map _code_id; // marker have and code (internal binary code), 206 | // which correspond to an id. 207 | 208 | uint32_t _nbits; // total number of bits . So, there are sqrt(nbits) in each axis 209 | uint32_t _tau; // minimum distance between elements 210 | 211 | DICT_TYPES _type; 212 | std::string _name; 213 | //obfuscate end 214 | 215 | }; 216 | } 217 | 218 | #endif 219 | -------------------------------------------------------------------------------- /src/deps/cpu_features/cpu_x86.cpp: -------------------------------------------------------------------------------- 1 | /* cpu_x86.cpp 2 | * 3 | * Author : Alexander J. Yee 4 | * Date Created : 04/12/2014 5 | * Last Modified : 04/12/2014 6 | * 7 | */ 8 | 9 | //////////////////////////////////////////////////////////////////////////////// 10 | //////////////////////////////////////////////////////////////////////////////// 11 | //////////////////////////////////////////////////////////////////////////////// 12 | //////////////////////////////////////////////////////////////////////////////// 13 | // Dependencies 14 | #include 15 | #include 16 | #include "cpu_x86.h" 17 | //////////////////////////////////////////////////////////////////////////////// 18 | //////////////////////////////////////////////////////////////////////////////// 19 | //////////////////////////////////////////////////////////////////////////////// 20 | //////////////////////////////////////////////////////////////////////////////// 21 | #if defined(__x86_64__) || defined(_M_X64) || defined(__i386) || defined(_M_IX86) 22 | # if _WIN32 23 | # include "cpu_x86_Windows.ipp" 24 | # elif defined(__GNUC__) || defined(__clang__) 25 | # include "cpu_x86_Linux.ipp" 26 | # else 27 | # error "No cpuid intrinsic defined for compiler." 28 | # endif 29 | #else 30 | # error "No cpuid intrinsic defined for processor architecture." 31 | #endif 32 | 33 | namespace cpu_feature_detector{ 34 | using std::cout; 35 | using std::endl; 36 | using std::memcpy; 37 | using std::memset; 38 | //////////////////////////////////////////////////////////////////////////////// 39 | //////////////////////////////////////////////////////////////////////////////// 40 | //////////////////////////////////////////////////////////////////////////////// 41 | //////////////////////////////////////////////////////////////////////////////// 42 | void cpu_x86::print(const char* label, bool yes){ 43 | cout << label; 44 | cout << (yes ? "Yes" : "No") << endl; 45 | } 46 | //////////////////////////////////////////////////////////////////////////////// 47 | //////////////////////////////////////////////////////////////////////////////// 48 | //////////////////////////////////////////////////////////////////////////////// 49 | //////////////////////////////////////////////////////////////////////////////// 50 | cpu_x86::cpu_x86(){ 51 | memset(this, 0, sizeof(*this)); 52 | } 53 | bool cpu_x86::detect_OS_AVX(){ 54 | // Copied from: http://stackoverflow.com/a/22521619/922184 55 | 56 | bool avxSupported = false; 57 | 58 | int cpuInfo[4]; 59 | cpuid(cpuInfo, 1); 60 | 61 | bool osUsesXSAVE_XRSTORE = (cpuInfo[2] & (1 << 27)) != 0; 62 | bool cpuAVXSuport = (cpuInfo[2] & (1 << 28)) != 0; 63 | 64 | if (osUsesXSAVE_XRSTORE && cpuAVXSuport) 65 | { 66 | uint64_t xcrFeatureMask = xgetbv(_XCR_XFEATURE_ENABLED_MASK); 67 | avxSupported = (xcrFeatureMask & 0x6) == 0x6; 68 | } 69 | 70 | return avxSupported; 71 | } 72 | bool cpu_x86::detect_OS_AVX512(){ 73 | if (!detect_OS_AVX()) 74 | return false; 75 | 76 | uint64_t xcrFeatureMask = xgetbv(_XCR_XFEATURE_ENABLED_MASK); 77 | return (xcrFeatureMask & 0xe6) == 0xe6; 78 | } 79 | std::string cpu_x86::get_vendor_string(){ 80 | int32_t CPUInfo[4]; 81 | char name[13]; 82 | 83 | cpuid(CPUInfo, 0); 84 | memcpy(name + 0, &CPUInfo[1], 4); 85 | memcpy(name + 4, &CPUInfo[3], 4); 86 | memcpy(name + 8, &CPUInfo[2], 4); 87 | name[12] = '\0'; 88 | 89 | return name; 90 | } 91 | //////////////////////////////////////////////////////////////////////////////// 92 | //////////////////////////////////////////////////////////////////////////////// 93 | //////////////////////////////////////////////////////////////////////////////// 94 | //////////////////////////////////////////////////////////////////////////////// 95 | void cpu_x86::detect_host(){ 96 | // OS Features 97 | OS_x64 = detect_OS_x64(); 98 | OS_AVX = detect_OS_AVX(); 99 | OS_AVX512 = detect_OS_AVX512(); 100 | 101 | // Vendor 102 | std::string vendor(get_vendor_string()); 103 | if (vendor == "GenuineIntel"){ 104 | Vendor_Intel = true; 105 | }else if (vendor == "AuthenticAMD"){ 106 | Vendor_AMD = true; 107 | } 108 | 109 | int info[4]; 110 | cpuid(info, 0); 111 | int nIds = info[0]; 112 | 113 | cpuid(info, 0x80000000); 114 | uint32_t nExIds = info[0]; 115 | 116 | // Detect Features 117 | if (nIds >= 0x00000001){ 118 | cpuid(info, 0x00000001); 119 | HW_MMX = (info[3] & ((int)1 << 23)) != 0; 120 | HW_SSE = (info[3] & ((int)1 << 25)) != 0; 121 | HW_SSE2 = (info[3] & ((int)1 << 26)) != 0; 122 | HW_SSE3 = (info[2] & ((int)1 << 0)) != 0; 123 | 124 | HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0; 125 | HW_SSE41 = (info[2] & ((int)1 << 19)) != 0; 126 | HW_SSE42 = (info[2] & ((int)1 << 20)) != 0; 127 | HW_AES = (info[2] & ((int)1 << 25)) != 0; 128 | 129 | HW_AVX = (info[2] & ((int)1 << 28)) != 0; 130 | HW_FMA3 = (info[2] & ((int)1 << 12)) != 0; 131 | 132 | HW_RDRAND = (info[2] & ((int)1 << 30)) != 0; 133 | } 134 | if (nIds >= 0x00000007){ 135 | cpuid(info, 0x00000007); 136 | HW_AVX2 = (info[1] & ((int)1 << 5)) != 0; 137 | 138 | HW_BMI1 = (info[1] & ((int)1 << 3)) != 0; 139 | HW_BMI2 = (info[1] & ((int)1 << 8)) != 0; 140 | HW_ADX = (info[1] & ((int)1 << 19)) != 0; 141 | HW_MPX = (info[1] & ((int)1 << 14)) != 0; 142 | HW_SHA = (info[1] & ((int)1 << 29)) != 0; 143 | HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0; 144 | 145 | HW_AVX512_F = (info[1] & ((int)1 << 16)) != 0; 146 | HW_AVX512_CD = (info[1] & ((int)1 << 28)) != 0; 147 | HW_AVX512_PF = (info[1] & ((int)1 << 26)) != 0; 148 | HW_AVX512_ER = (info[1] & ((int)1 << 27)) != 0; 149 | HW_AVX512_VL = (info[1] & ((int)1 << 31)) != 0; 150 | HW_AVX512_BW = (info[1] & ((int)1 << 30)) != 0; 151 | HW_AVX512_DQ = (info[1] & ((int)1 << 17)) != 0; 152 | HW_AVX512_IFMA = (info[1] & ((int)1 << 21)) != 0; 153 | HW_AVX512_VBMI = (info[2] & ((int)1 << 1)) != 0; 154 | } 155 | if (nExIds >= 0x80000001){ 156 | cpuid(info, 0x80000001); 157 | HW_x64 = (info[3] & ((int)1 << 29)) != 0; 158 | HW_ABM = (info[2] & ((int)1 << 5)) != 0; 159 | HW_SSE4a = (info[2] & ((int)1 << 6)) != 0; 160 | HW_FMA4 = (info[2] & ((int)1 << 16)) != 0; 161 | HW_XOP = (info[2] & ((int)1 << 11)) != 0; 162 | } 163 | } 164 | void cpu_x86::print() const{ 165 | cout << "CPU Vendor:" << endl; 166 | print(" AMD = ", Vendor_AMD); 167 | print(" Intel = ", Vendor_Intel); 168 | cout << endl; 169 | 170 | cout << "OS Features:" << endl; 171 | #ifdef _WIN32 172 | print(" 64-bit = ", OS_x64); 173 | #endif 174 | print(" OS AVX = ", OS_AVX); 175 | print(" OS AVX512 = ", OS_AVX512); 176 | cout << endl; 177 | 178 | cout << "Hardware Features:" << endl; 179 | print(" MMX = ", HW_MMX); 180 | print(" x64 = ", HW_x64); 181 | print(" ABM = ", HW_ABM); 182 | print(" RDRAND = ", HW_RDRAND); 183 | print(" BMI1 = ", HW_BMI1); 184 | print(" BMI2 = ", HW_BMI2); 185 | print(" ADX = ", HW_ADX); 186 | print(" MPX = ", HW_MPX); 187 | print(" PREFETCHWT1 = ", HW_PREFETCHWT1); 188 | cout << endl; 189 | 190 | cout << "SIMD: 128-bit" << endl; 191 | print(" SSE = ", HW_SSE); 192 | print(" SSE2 = ", HW_SSE2); 193 | print(" SSE3 = ", HW_SSE3); 194 | print(" SSSE3 = ", HW_SSSE3); 195 | print(" SSE4a = ", HW_SSE4a); 196 | print(" SSE4.1 = ", HW_SSE41); 197 | print(" SSE4.2 = ", HW_SSE42); 198 | print(" AES-NI = ", HW_AES); 199 | print(" SHA = ", HW_SHA); 200 | cout << endl; 201 | 202 | cout << "SIMD: 256-bit" << endl; 203 | print(" AVX = ", HW_AVX); 204 | print(" XOP = ", HW_XOP); 205 | print(" FMA3 = ", HW_FMA3); 206 | print(" FMA4 = ", HW_FMA4); 207 | print(" AVX2 = ", HW_AVX2); 208 | cout << endl; 209 | 210 | cout << "SIMD: 512-bit" << endl; 211 | print(" AVX512-F = ", HW_AVX512_F); 212 | print(" AVX512-CD = ", HW_AVX512_CD); 213 | print(" AVX512-PF = ", HW_AVX512_PF); 214 | print(" AVX512-ER = ", HW_AVX512_ER); 215 | print(" AVX512-VL = ", HW_AVX512_VL); 216 | print(" AVX512-BW = ", HW_AVX512_BW); 217 | print(" AVX512-DQ = ", HW_AVX512_DQ); 218 | print(" AVX512-IFMA = ", HW_AVX512_IFMA); 219 | print(" AVX512-VBMI = ", HW_AVX512_VBMI); 220 | cout << endl; 221 | 222 | cout << "Summary:" << endl; 223 | print(" Safe to use AVX: ", HW_AVX && OS_AVX); 224 | print(" Safe to use AVX512: ", HW_AVX512_F && OS_AVX512); 225 | cout << endl; 226 | } 227 | void cpu_x86::print_host(){ 228 | cpu_x86 features; 229 | features.detect_host(); 230 | features.print(); 231 | } 232 | //////////////////////////////////////////////////////////////////////////////// 233 | //////////////////////////////////////////////////////////////////////////////// 234 | //////////////////////////////////////////////////////////////////////////////// 235 | //////////////////////////////////////////////////////////////////////////////// 236 | } 237 | --------------------------------------------------------------------------------