├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── code ├── .gitignore ├── CMakeLists.txt ├── comparison.h ├── erosdirect.h ├── erosplus.h ├── image_processing.h ├── main.cpp └── projection.h ├── configCAR.ini ├── configDRAGON.ini ├── configGELATIN.ini ├── configMUSTARD.ini ├── configPOTTED.ini ├── configSUGAR.ini ├── configTOMATO.ini ├── docker-compose.yaml ├── objects ├── car │ ├── model.obj │ ├── model.obj.mtl │ └── model_tex.png ├── dragon │ ├── dragon.obj │ ├── dragon.obj.mtl │ └── dragon.png ├── gelatin2 │ ├── texture_map.png │ ├── textured.obj │ └── textured.obj.mtl ├── mustard │ ├── texture_map.png │ ├── textured.obj │ └── textured.obj.mtl ├── potted │ ├── texture_map.png │ ├── textured.obj │ └── textured.obj.mtl ├── sugar │ ├── texture_map.png │ ├── textured.obj │ └── textured.obj.mtl └── tomato │ ├── texture_map.png │ ├── textured.obj │ └── textured.obj.mtl ├── scripts ├── extractGTfromJson.py ├── frames_to_events.py ├── plotObjects.py └── plotResults.py ├── superimpose.patch └── superimposeroi.patch /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | build/ 3 | scripts/bimvee 4 | results 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #FROM ubuntu:20.04 2 | FROM nvidia/opengl:1.2-glvnd-devel-ubuntu20.04 3 | ENV DEBIAN_FRONTEND noninteractive 4 | ENV NVIDIA_VISIBLE_DEVICES all 5 | ENV NVIDIA_DRIVER_CAPABILITIES graphics,utility,compute 6 | 7 | ARG CODE_DIR=/usr/local/src 8 | 9 | RUN apt update 10 | 11 | #basic environment 12 | RUN apt install -y \ 13 | ca-certificates \ 14 | build-essential \ 15 | git \ 16 | cmake \ 17 | cmake-curses-gui \ 18 | libace-dev \ 19 | libassimp-dev \ 20 | libglew-dev \ 21 | libglfw3-dev \ 22 | libglm-dev \ 23 | libeigen3-dev 24 | 25 | # Suggested dependencies for YARP 26 | RUN apt update && apt install -y \ 27 | qtbase5-dev qtdeclarative5-dev qtmultimedia5-dev \ 28 | qml-module-qtquick2 qml-module-qtquick-window2 \ 29 | qml-module-qtmultimedia qml-module-qtquick-dialogs \ 30 | qml-module-qtquick-controls qml-module-qt-labs-folderlistmodel \ 31 | qml-module-qt-labs-settings \ 32 | libqcustomplot-dev \ 33 | libgraphviz-dev \ 34 | libjpeg-dev \ 35 | libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev \ 36 | gstreamer1.0-plugins-base \ 37 | gstreamer1.0-plugins-good \ 38 | gstreamer1.0-plugins-bad \ 39 | gstreamer1.0-libav 40 | 41 | RUN echo "deb [arch=amd64 trusted=yes] https://apt.prophesee.ai/dists/public/b4b3528d/ubuntu focal sdk" >> /etc/apt/sources.list &&\ 42 | apt update 43 | 44 | RUN apt install -y \ 45 | libcanberra-gtk-module \ 46 | mesa-utils \ 47 | ffmpeg \ 48 | libboost-program-options-dev \ 49 | libopencv-dev \ 50 | metavision-sdk 51 | 52 | #my favourites 53 | RUN apt update && apt install -y \ 54 | vim \ 55 | gdb 56 | 57 | # Github CLI 58 | RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \ 59 | && mkdir -p -m 755 /etc/apt/keyrings \ 60 | && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \ 61 | && chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \ 62 | && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ 63 | && apt update \ 64 | && apt install gh -y 65 | 66 | # YCM 67 | ARG YCM_VERSION=v0.15.2 68 | RUN cd $CODE_DIR && \ 69 | git clone --depth 1 --branch $YCM_VERSION https://github.com/robotology/ycm.git && \ 70 | cd ycm && \ 71 | mkdir build && cd build && \ 72 | cmake .. && \ 73 | make -j `nproc` install 74 | 75 | 76 | # YARP 77 | ARG YARP_VERSION=v3.8.0 78 | RUN cd $CODE_DIR && \ 79 | git clone --depth 1 --branch $YARP_VERSION https://github.com/robotology/yarp.git &&\ 80 | cd yarp &&\ 81 | mkdir build && cd build &&\ 82 | cmake .. &&\ 83 | make -j `nproc` install 84 | 85 | EXPOSE 10000/tcp 10000/udp 86 | RUN yarp check 87 | 88 | 89 | # event-driven 90 | ARG ED_VERSION=main 91 | RUN cd $CODE_DIR &&\ 92 | git clone --depth 1 --branch $ED_VERSION https://github.com/robotology/event-driven.git &&\ 93 | cd event-driven &&\ 94 | mkdir build && cd build &&\ 95 | cmake .. &&\ 96 | make -j `nproc` install 97 | 98 | # object-tracking-six-dof 99 | 100 | ###################### 101 | # set github ssh keys # 102 | ####################### 103 | 104 | RUN apt install -y \ 105 | openssh-client git \ 106 | libmysqlclient-dev \ 107 | libsm6 libxext6 108 | 109 | # Authorize SSH Host 110 | RUN mkdir -p /root/.ssh && \ 111 | chmod 0700 /root/.ssh 112 | RUN ssh-keyscan github.com > /root/.ssh/known_hosts 113 | 114 | ARG GIT_BRANCH=main 115 | RUN cd $CODE_DIR &&\ 116 | git clone https://github.com/event-driven-robotics/EDOPT.git &&\ 117 | cd EDOPT &&\ 118 | git checkout $GIT_BRANCH 119 | 120 | # SUPERIMPOSEMESH 121 | ARG SIML_VERSION=devel 122 | RUN cd $CODE_DIR &&\ 123 | git clone --depth 1 --branch $SIML_VERSION https://github.com/robotology/superimpose-mesh-lib.git &&\ 124 | cd superimpose-mesh-lib &&\ 125 | git apply $CODE_DIR/EDOPT/superimposeroi.patch &&\ 126 | mkdir build && cd build &&\ 127 | cmake .. &&\ 128 | make -j `nproc` install 129 | 130 | # Build EDOPT 131 | RUN cd $CODE_DIR &&\ 132 | cd EDOPT/code &&\ 133 | mkdir build && cd build &&\ 134 | cmake .. &&\ 135 | make -j `nproc` -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, Event Driven Perception for Robotics 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EDOPT: Event-camera 6-DoF Dynamic Object Pose Tracking 2 | 3 | Paper: https://ieeexplore.ieee.org/abstract/document/10611511 4 | 5 | Datasets: https://zenodo.org/records/10829647 6 | 7 | ``` 8 | @inproceedings{glover2024edopt, 9 | title={EDOPT: Event-camera 6-DoF Dynamic Object Pose Tracking}, 10 | author={Glover, Arren and Gava, Luna and Li, Zhichao and Bartolozzi, Chiara}, 11 | booktitle={2024 IEEE International Conference on Robotics and Automation (ICRA)}, 12 | pages={18200--18206}, 13 | year={2024}, 14 | organization={IEEE} 15 | } 16 | ``` 17 | 18 | Assumes a known object model 19 | 20 | Three simultaneous computations: 21 | 22 | * EROS 23 | * model projection 24 | * state estimation 25 | 26 | ### Build the docker using: 27 | 28 | ``` 29 | cd EDOPT 30 | docker build -t edopt:latest . 31 | 32 | ## if you want to build another remote branch for debugging ... 33 | docker build -t edopt:latest --build-arg GIT_BRANCH=your/specified/remote/branch . 34 | ``` 35 | ### Make and enter the container using: 36 | 37 | ``` 38 | docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb -v /tmp/.X11-unix/:/tmp/.X11-unix -e DISPLAY=unix$DISPLAY --network host --gpus all --name edopt edopt:latest 39 | ``` 40 | or 41 | ``` 42 | docker compose up -d 43 | docker exec -it edopt /bin/bash 44 | ``` 45 | ### How to build EDOPT 46 | Terminal 1 (on docker container) 47 | ``` 48 | mkdir -p /usr/local/src/EDOPT/code/build 49 | cd /usr/local/src/EDOPT/code/build 50 | cmake .. 51 | make 52 | ``` 53 | 54 | ### How to run EDOPT 55 | Terminal 1 (on docker container) 56 | ``` 57 | yarpserver 58 | ``` 59 | 60 | Terminal 2 (on docker container) 61 | ``` 62 | ## if you do not have yarp config 63 | yarp config {YOUR YARP IPADDRESS} {PORT} 64 | yarp namespace {NAMESPACE} 65 | yarp detect --write 66 | ## Run atis-bridge-sdk to receive event stream 67 | atis-bridge-sdk --s 50 68 | ``` 69 | 70 | Terminal 3 (on docker container) 71 | ``` 72 | cd /usr/local/src/EDOPT/code/build 73 | ./edopt 74 | ``` 75 | 76 | ### For development from docker container 77 | Solution for Git Authentication 78 | 79 | Terminal 1 (on docker container) 80 | ``` 81 | gh auth login 82 | ``` 83 | - Then, select as follows 84 | - ? Where do you use GitHub? > GitHub.com 85 | - ? What is your preferred protocol for Git operations on this host? > HTTPS 86 | - ? How would you like to authenticate GitHub CLI? > Login with a web browser 87 | - ! First copy your one-time code: XXXX-XXX 88 | - Open this link [https://github.com/login/device](https://github.com/login/device) 89 | - In the web browser, input one-time code to login -------------------------------------------------------------------------------- /code/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | .vscode 3 | code.code-workspace 4 | -------------------------------------------------------------------------------- /code/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # requires minimum cmake version 2 | cmake_minimum_required(VERSION 3.5) 3 | 4 | # produce the cmake var PROJECT_NAME 5 | project(edopt) 6 | 7 | if(NOT CMAKE_BUILD_TYPE) 8 | set_property(CACHE CMAKE_BUILD_TYPE PROPERTY VALUE "Release") 9 | endif() 10 | 11 | #include(GNUInstallDirs) 12 | 13 | # mandatory use of these packages 14 | find_package(YCM REQUIRED) 15 | find_package(MetavisionSDK COMPONENTS core driver QUIET) 16 | find_package(SuperimposeMesh REQUIRED) 17 | find_package(OpenCV REQUIRED) 18 | find_package(YARP COMPONENTS os REQUIRED) 19 | find_package(event-driven REQUIRED) 20 | 21 | #include(AddInstallRPATHSupport) 22 | #add_install_rpath_support(BIN_DIRS "${CMAKE_INSTALL_FULL_BINDIR}" 23 | # LIB_DIRS "${CMAKE_INSTALL_FULL_LIBDIR}" 24 | # INSTALL_NAME_DIR "${CMAKE_INSTALL_FULL_LIBDIR}" 25 | # USE_LINK_PATH) 26 | 27 | add_executable(${PROJECT_NAME} main.cpp image_processing.h projection.h erosdirect.h comparison.h) 28 | target_compile_features(${PROJECT_NAME} PRIVATE cxx_std_17) 29 | #target_compile_options(sixdofobject PRIVATE -Werror -Wall -Wextra) 30 | target_link_libraries(${PROJECT_NAME} PRIVATE YARP_init 31 | YARP_os 32 | event-driven 33 | ${OpenCV_LIBS} 34 | MetavisionSDK::core 35 | MetavisionSDK::driver 36 | SI::SuperimposeMesh) 37 | 38 | install(TARGETS ${PROJECT_NAME} DESTINATION ${CMAKE_INSTALL_BINDIR}) 39 | 40 | -------------------------------------------------------------------------------- /code/comparison.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "projection.h" 6 | 7 | // from the interaction matrix and current depth calculate the desired state 8 | // change to move the image by approximately 1 pixel. 9 | 10 | //[du dv] = 11 | //[ fx/d, 0, -(u-cx)/d, -(u-cx)(v-cy)/fy, fx*fx+(u-cx)(u-cx)/fx, -(v-cy)fx/fy 12 | // 0, fy/d, -(v-cy)/d, -(fy*fy)-(v-cy)(v-cy)/fy, (u-cx)(v-cy)/fx, (u-cx)fy/fx] * 13 | 14 | // [dx, dy, dz, dalpha, dbeta, dgamma] 15 | 16 | class warpManager { 17 | 18 | public: 19 | 20 | //internal definitions 21 | enum cam_param_name{w,h,cx,cy,fx,fy}; 22 | enum warp_name{xp, yp, zp, ap, bp, cp, xn, yn, zn, an, bn, cn, 23 | xp2, yp2, zp2, ap2, bp2, cp2, xn2, yn2, zn2, an2, bn2, cn2}; 24 | enum axis_name{x=0,y=1,z=2,a=3,b=4,c=5}; 25 | typedef struct warp_bundle 26 | { 27 | cv::Mat M; 28 | cv::Mat rmp; 29 | cv::Mat rmsp; 30 | cv::Mat img_warp; 31 | int axis{0}; 32 | double delta{0.0}; 33 | double score{-DBL_MAX}; 34 | bool active{false}; 35 | } warp_bundle; 36 | 37 | //fixed parameters to set 38 | std::array cam; 39 | cv::Size proc_size{cv::Size(100, 100)}; 40 | 41 | //parameters that must be udpated externally 42 | double scale{1.0}; 43 | cv::Mat proc_obs; 44 | 45 | //internal variables 46 | warp_bundle projection; 47 | std::array warps; 48 | std::deque warp_history; 49 | cv::Mat prmx; 50 | cv::Mat prmy; 51 | cv::Mat nrmx; 52 | cv::Mat nrmy; 53 | 54 | //output 55 | std::array state_current; 56 | 57 | public: 58 | 59 | void initialise(int size_to_process, bool dp2) 60 | { 61 | proc_size = cv::Size(size_to_process, size_to_process); 62 | 63 | projection.axis = -1; 64 | projection.delta = 0; 65 | proc_obs = cv::Mat::zeros(proc_size, CV_32F); 66 | projection.img_warp = cv::Mat::zeros(proc_size, CV_32F); 67 | for(auto &warp : warps) { 68 | warp.img_warp = cv::Mat::zeros(proc_size, CV_32F); 69 | } 70 | 71 | prmx = cv::Mat::zeros(proc_size, CV_32F); 72 | prmy = cv::Mat::zeros(proc_size, CV_32F); 73 | nrmx = cv::Mat::zeros(proc_size, CV_32F); 74 | nrmy = cv::Mat::zeros(proc_size, CV_32F); 75 | 76 | warps[xp].active = warps[xn].active = true; 77 | warps[yp].active = warps[yn].active = true; 78 | warps[zp].active = warps[zn].active = true; 79 | warps[ap].active = warps[an].active = true; 80 | warps[bp].active = warps[bn].active = true; 81 | warps[cp].active = warps[cn].active = true; 82 | 83 | if (dp2) { 84 | warps[xp2].active = warps[xn2].active = true; 85 | warps[yp2].active = warps[yn2].active = true; 86 | warps[zp2].active = warps[zn2].active = true; 87 | warps[ap2].active = warps[an2].active = true; 88 | warps[bp2].active = warps[bn2].active = true; 89 | warps[cp2].active = warps[cn2].active = true; 90 | } 91 | } 92 | 93 | void create_m_x(double dp, warp_name p, warp_name n) 94 | { 95 | //x shift by dp 96 | for(int x = 0; x < proc_size.width; x++) { 97 | for(int y = 0; y < proc_size.height; y++) { 98 | //positive 99 | prmx.at(y, x) = x-dp; 100 | prmy.at(y, x) = y; 101 | //negative 102 | nrmx.at(y, x) = x+dp; 103 | nrmy.at(y, x) = y; 104 | } 105 | } 106 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 107 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 108 | warps[p].axis = x; warps[n].axis = x; 109 | warps[p].delta = dp; warps[n].delta = -dp; 110 | } 111 | 112 | void create_m_y(double dp, warp_name p, warp_name n) 113 | { 114 | for(int x = 0; x < proc_size.width; x++) { 115 | for(int y = 0; y < proc_size.height; y++) { 116 | //positive 117 | prmx.at(y, x) = x; 118 | prmy.at(y, x) = y-dp; 119 | //negative 120 | nrmx.at(y, x) = x; 121 | nrmy.at(y, x) = y+dp; 122 | } 123 | } 124 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 125 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 126 | warps[p].axis = y; warps[n].axis = y; 127 | warps[p].delta = dp; warps[n].delta = -dp; 128 | } 129 | 130 | void create_m_z(double dp, warp_name p, warp_name n) 131 | { 132 | double cy = proc_size.height * 0.5; 133 | double cx = proc_size.width * 0.5; 134 | for(int x = 0; x < proc_size.width; x++) { 135 | for(int y = 0; y < proc_size.height; y++) { 136 | double dx = -(x-cx) * dp / proc_size.width; 137 | double dy = -(y-cy) * dp / proc_size.height; 138 | //positive 139 | prmx.at(y, x) = x - dx; 140 | prmy.at(y, x) = y - dy; 141 | //negative 142 | nrmx.at(y, x) = x + dx; 143 | nrmy.at(y, x) = y + dy; 144 | } 145 | } 146 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 147 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 148 | warps[p].axis = z; warps[n].axis = z; 149 | warps[p].delta = dp / proc_size.width; 150 | warps[n].delta = -dp / proc_size.width; 151 | } 152 | 153 | void create_m_a(double dp, warp_name p, warp_name n) 154 | { 155 | double cy = proc_size.height * 0.5; 156 | double cx = proc_size.width * 0.5; 157 | double theta = M_PI_2 * dp / (proc_size.height * 0.5); 158 | for(int x = 0; x < proc_size.width; x++) { 159 | for(int y = 0; y < proc_size.height; y++) { 160 | double dy = -dp * cos(0.5 * M_PI * (y - cy) / (proc_size.height * 0.5)); 161 | //positive 162 | prmx.at(y, x) = x; 163 | prmy.at(y, x) = y - dy; 164 | //negative 165 | nrmx.at(y, x) = x; 166 | nrmy.at(y, x) = y + dy; 167 | } 168 | } 169 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 170 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 171 | warps[p].axis = a; warps[n].axis = a; 172 | warps[p].delta = theta; warps[n].delta = -theta; 173 | } 174 | 175 | void create_m_b(double dp, warp_name p, warp_name n) 176 | { 177 | double cy = proc_size.height * 0.5; 178 | double cx = proc_size.width * 0.5; 179 | double theta = M_PI_2 * dp / (proc_size.width * 0.5); 180 | 181 | for(int x = 0; x < proc_size.width; x++) { 182 | for(int y = 0; y < proc_size.height; y++) { 183 | double dx = -dp * cos(0.5 * M_PI * (x - cx) / (proc_size.width * 0.5)); 184 | //positive 185 | prmx.at(y, x) = x - dx; 186 | prmy.at(y, x) = y; 187 | //negative 188 | nrmx.at(y, x) = x + dx; 189 | nrmy.at(y, x) = y; 190 | } 191 | } 192 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 193 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 194 | warps[p].axis = b; warps[n].axis = b; 195 | warps[p].delta = theta; warps[n].delta = -theta; 196 | } 197 | 198 | void create_m_c(double dp, warp_name p, warp_name n) 199 | { 200 | double cy = proc_size.height * 0.5; 201 | double cx = proc_size.width * 0.5; 202 | double theta = atan2(dp, std::max(proc_size.width, proc_size.height)*0.5); 203 | for(int x = 0; x < proc_size.width; x++) { 204 | for(int y = 0; y < proc_size.height; y++) { 205 | double dx = -(y - cy) * cam[fx] / cam[fy] * theta; 206 | double dy = (x - cx) * cam[fy] / cam[fx] * theta; 207 | //positive 208 | prmx.at(y, x) = x - dx; 209 | prmy.at(y, x) = y - dy; 210 | //negative 211 | nrmx.at(y, x) = x + dx; 212 | nrmy.at(y, x) = y + dy; 213 | } 214 | } 215 | cv::convertMaps(prmx, prmy, warps[p].rmp, warps[p].rmsp, CV_16SC2); 216 | cv::convertMaps(nrmx, nrmy, warps[n].rmp, warps[n].rmsp, CV_16SC2); 217 | warps[p].axis = c; warps[n].axis = c; 218 | warps[p].delta = theta; warps[n].delta = -theta; 219 | } 220 | 221 | void create_Ms(double dp) 222 | { 223 | create_m_x(dp, xp, xn); 224 | create_m_y(dp, yp, yn); 225 | create_m_z(dp, zp, zn); 226 | create_m_a(dp, ap, an); 227 | create_m_b(dp, bp, bn); 228 | create_m_c(dp, cp, cn); 229 | 230 | create_m_x(dp*2, xp2, xn2); 231 | create_m_y(dp*2, yp2, yn2); 232 | create_m_z(dp*2, zp2, zn2); 233 | create_m_a(dp*2, ap2, an2); 234 | create_m_b(dp*2, bp2, bn2); 235 | create_m_c(dp*2, cp2, cn2); 236 | } 237 | 238 | void set_current(const std::array &state) 239 | { 240 | state_current = state; 241 | } 242 | 243 | void make_predictive_warps() 244 | { 245 | for(auto &warp : warps) { 246 | if(warp.axis == a || warp.axis == b) continue; 247 | if(warp.active) 248 | cv::remap(projection.img_warp, warp.img_warp, warp.rmp, warp.rmsp, cv::INTER_LINEAR); 249 | } 250 | } 251 | 252 | void warp_by_history(cv::Mat &image) 253 | { 254 | for(auto warp : warp_history) 255 | cv::remap(image, image, warp->rmp, warp->rmsp, cv::INTER_LINEAR); 256 | warp_history.clear(); 257 | } 258 | 259 | double similarity_score(const cv::Mat &observation, const cv::Mat &expectation) { 260 | static cv::Mat muld; 261 | muld = expectation.mul(observation); 262 | return cv::sum(cv::sum(muld))[0]; 263 | } 264 | 265 | void score_predictive_warps() 266 | { 267 | projection.score = similarity_score(proc_obs, projection.img_warp); 268 | projection.score = projection.score < 0 ? 0 : projection.score; 269 | for(auto &w : warps) 270 | if(w.active) w.score = similarity_score(proc_obs, w.img_warp); 271 | } 272 | 273 | void update_state(const warp_bundle &best) 274 | { 275 | double d = fabs(state_current[z]); 276 | switch(best.axis) { 277 | case(x): 278 | state_current[best.axis] += best.delta * scale * d / cam[fx]; 279 | break; 280 | case(y): 281 | state_current[best.axis] += best.delta * scale * d / cam[fy]; 282 | break; 283 | case(z): 284 | state_current[best.axis] += best.delta * d; 285 | break; 286 | case(a): 287 | perform_rotation(state_current, 0, best.delta); 288 | break; 289 | case(b): 290 | perform_rotation(state_current, 1, best.delta); 291 | break; 292 | case(c): 293 | perform_rotation(state_current, 2, best.delta); 294 | break; 295 | } 296 | //cv::remap(projection.img_warp, projection.img_warp, best.rmp, best.rmsp, cv::INTER_LINEAR); 297 | //warp_history.push_back(&best); 298 | } 299 | 300 | bool update_all_possible() 301 | { 302 | bool updated = false; 303 | for(auto &warp : warps) { 304 | if(warp.score > projection.score) { 305 | update_state(warp); 306 | updated = true; 307 | } 308 | } 309 | return updated; 310 | } 311 | 312 | bool update_from_max() 313 | { 314 | warp_bundle *best = &projection; 315 | for(auto &warp : warps) 316 | if(warp.score > best->score) 317 | best = &warp; 318 | 319 | if(best != &projection) { 320 | update_state(*best); 321 | return true; 322 | } 323 | return false; 324 | } 325 | 326 | bool update_heuristically() 327 | { 328 | bool updated = false; 329 | //best of x axis and roation around y (yaw) 330 | warp_bundle *best; 331 | best = &projection; 332 | if (warps[xp].score > best->score) best = &warps[xp]; 333 | if (warps[xn].score > best->score) best = &warps[xn]; 334 | if (warps[bp].score > best->score) best = &warps[bp]; 335 | if (warps[bn].score > best->score) best = &warps[bn]; 336 | if (warps[xp2].score > best->score) best = &warps[xp2]; 337 | if (warps[xn2].score > best->score) best = &warps[xn2]; 338 | if (warps[bp2].score > best->score) best = &warps[bp2]; 339 | if (warps[bn2].score > best->score) best = &warps[bn2]; 340 | if(best != &projection) update_state(*best); 341 | updated = updated || best != &projection; 342 | 343 | //best of y axis and rotation around x (pitch) 344 | best = &projection; 345 | if (warps[yp].score > best->score) best = &warps[yp]; 346 | if (warps[yn].score > best->score) best = &warps[yn]; 347 | if (warps[ap].score > best->score) best = &warps[ap]; 348 | if (warps[an].score > best->score) best = &warps[an]; 349 | if (warps[yp2].score > best->score) best = &warps[yp2]; 350 | if (warps[yn2].score > best->score) best = &warps[yn2]; 351 | if (warps[ap2].score > best->score) best = &warps[ap2]; 352 | if (warps[an2].score > best->score) best = &warps[an2]; 353 | if(best != &projection) update_state(*best); 354 | updated = updated || best != &projection; 355 | 356 | //best of roll 357 | best = &projection; 358 | if (warps[cp].score > best->score) best = &warps[cp]; 359 | if (warps[cn].score > best->score) best = &warps[cn]; 360 | if (warps[cp2].score > best->score) best = &warps[cp2]; 361 | if (warps[cn2].score > best->score) best = &warps[cn2]; 362 | if(best != &projection) update_state(*best); 363 | updated = updated || best != &projection; 364 | 365 | //best of z 366 | best = &projection; 367 | if (warps[zp].score > best->score) best = &warps[zp]; 368 | if (warps[zn].score > best->score) best = &warps[zn]; 369 | if (warps[zp2].score > best->score) best = &warps[zp2]; 370 | if (warps[zn2].score > best->score) best = &warps[zn2]; 371 | if(best != &projection) update_state(*best); 372 | updated = updated || best != &projection; 373 | 374 | return updated; 375 | } 376 | 377 | void score_overlay(double score, cv::Mat image) 378 | { 379 | if(score > cam[h]) score = cam[h]; 380 | if(score < 0.0) score = 0.0; 381 | for(int i = 0; i < cam[w]*0.05; i++) 382 | for(int j = 0; j < (int)score; j++) 383 | image.at(cam[h]-j-1, i) = 1.0; 384 | 385 | } 386 | 387 | cv::Mat create_translation_visualisation() 388 | { 389 | static cv::Mat joined = cv::Mat::zeros(cam[h]*3, cam[w]*3, CV_32F); 390 | static cv::Mat joined_scaled = cv::Mat::zeros(cam[h], cam[w], CV_32F); 391 | cv::Mat tile; 392 | int col = 0; int row = 0; 393 | 394 | for(auto &warp : warps) { 395 | if(!warp.active) continue; 396 | switch(warp.axis) { 397 | case(x): 398 | row = 1; 399 | col = warp.delta > 0 ? 2 : 0; 400 | break; 401 | case(y): 402 | col = 1; 403 | row = warp.delta > 0 ? 0 : 2; 404 | break; 405 | case(z): 406 | col = warp.delta > 0 ? 2 : 0; 407 | row = warp.delta > 0 ? 2 : 0; 408 | break; 409 | default: 410 | continue; 411 | } 412 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 413 | cv::resize(warp.img_warp, tile, tile.size()); 414 | score_overlay(warp.score, tile); 415 | } 416 | col = 1; 417 | row = 1; 418 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 419 | cv::resize(projection.img_warp, tile, tile.size()); 420 | score_overlay(projection.score, tile); 421 | 422 | col = 0; 423 | row = 2; 424 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 425 | cv::resize(proc_obs, tile, tile.size()); 426 | 427 | cv::resize(joined, joined_scaled, joined_scaled.size()); 428 | 429 | return joined_scaled; 430 | 431 | } 432 | 433 | cv::Mat create_rotation_visualisation() 434 | { 435 | static cv::Mat joined = cv::Mat::zeros(cam[h]*3, cam[w]*3, CV_32F); 436 | static cv::Mat joined_scaled = cv::Mat::zeros(cam[h], cam[w], CV_32F); 437 | cv::Mat tile; 438 | int col = 0; int row = 0; 439 | 440 | for(auto &warp : warps) { 441 | if(!warp.active) continue; 442 | switch(warp.axis) { 443 | case(b): 444 | row = 1; 445 | col = warp.delta > 0 ? 2 : 0; 446 | break; 447 | case(a): 448 | col = 1; 449 | row = warp.delta > 0 ? 0 : 2; 450 | break; 451 | case(c): 452 | col = warp.delta > 0 ? 2 : 0; 453 | row = warp.delta > 0 ? 2 : 0; 454 | break; 455 | default: 456 | continue; 457 | } 458 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 459 | cv::resize(warp.img_warp, tile, tile.size()); 460 | score_overlay(warp.score, tile); 461 | } 462 | col = 1; 463 | row = 1; 464 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 465 | cv::resize(projection.img_warp, tile, tile.size()); 466 | score_overlay(projection.score, tile); 467 | 468 | col = 0; 469 | row = 2; 470 | tile = joined(cv::Rect(cam[w] * col, cam[h] * row, cam[w], cam[h])); 471 | cv::resize(proc_obs, tile, tile.size()); 472 | 473 | cv::resize(joined, joined_scaled, joined_scaled.size()); 474 | 475 | return joined_scaled; 476 | 477 | } 478 | 479 | }; 480 | -------------------------------------------------------------------------------- /code/erosdirect.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | using namespace Metavision; 6 | #include 7 | using namespace yarp::os; 8 | #include 9 | #include 10 | 11 | #include "erosplus.h" 12 | 13 | class EROSdirect 14 | { 15 | public: 16 | Metavision::Camera cam; 17 | ev::EROS eros; 18 | ev::vNoiseFilter filter; 19 | cv::Size res; 20 | 21 | bool start(double sensitivity, double filter_value) 22 | { 23 | if(sensitivity < 0.0 || sensitivity > 1.0) 24 | { 25 | yError() << "sensitivity 0 < s < 1"; 26 | return false; 27 | } 28 | 29 | try { 30 | cam = Camera::from_first_available(); 31 | I_LL_Biases* bias_control = cam.biases().get_facility(); 32 | int diff_on = (66 - 350) * sensitivity + 650 - 66; 33 | int diff_off = (66 + 200) * sensitivity + 100 - 66; 34 | bias_control->set("bias_diff_on", diff_on); 35 | bias_control->set("bias_diff_off", diff_off); 36 | } catch(const std::exception &e) { 37 | yError() << "no camera :("; 38 | return false; 39 | } 40 | 41 | const Metavision::Geometry &geo = cam.geometry(); 42 | yInfo() << "[" << geo.width() << "x" << geo.height() << "]"; 43 | 44 | res = cv::Size(geo.width(), geo.height()); 45 | eros.init(res.width, res.height, 7, 0.3); 46 | filter.use_temporal_filter(filter_value); 47 | filter.initialise(res.width, res.height); 48 | 49 | cam.cd().add_callback([this](const Metavision::EventCD *ev_begin, const Metavision::EventCD *ev_end) { 50 | this->erosUpdate(ev_begin, ev_end); 51 | }); 52 | 53 | if (!cam.start()) { 54 | yError() << "Could not start the camera"; 55 | return false; 56 | } 57 | 58 | return true; 59 | } 60 | 61 | void stop() 62 | { 63 | cam.stop(); 64 | } 65 | 66 | void erosUpdate(const Metavision::EventCD *ev_begin, const Metavision::EventCD *ev_end) 67 | { 68 | double t = yarp::os::Time::now(); 69 | for(auto &v = ev_begin; v != ev_end; ++v) 70 | { 71 | if(filter.check(v->x, v->y, v->p, t)) 72 | eros.update(v->x, v->y); 73 | } 74 | } 75 | 76 | }; 77 | 78 | class EROSfromYARP 79 | { 80 | public: 81 | 82 | ev::window input_port; 83 | ev::EROS eros; 84 | std::thread eros_worker; 85 | double tic{-1}; 86 | cv::Mat event_image; 87 | 88 | void erosUpdate() 89 | { 90 | while (!input_port.isStopping()) { 91 | ev::info my_info = input_port.readAll(true); 92 | tic = my_info.timestamp; 93 | for(auto &v : input_port) { 94 | eros.update(v.x, v.y); 95 | if(v.p) 96 | event_image.at(v.y, v.x) = cv::Vec3b(255, 255, 255); 97 | else 98 | event_image.at(v.y, v.x) = cv::Vec3b(255, 0, 0); 99 | 100 | } 101 | } 102 | } 103 | 104 | public: 105 | bool start(cv::Size resolution, std::string sourcename, std::string portname, int k = 5, double d = 0.3) 106 | { 107 | eros.init(resolution.width, resolution.height, k, d); 108 | event_image = cv::Mat(resolution, CV_8UC3, cv::Vec3b(0, 0, 0)); 109 | 110 | if (!input_port.open(portname)) 111 | return false; 112 | yarp::os::Network::connect(sourcename, portname, "fast_tcp"); 113 | 114 | eros_worker = std::thread([this]{erosUpdate();}); 115 | return true; 116 | } 117 | 118 | void stop() 119 | { 120 | input_port.stop(); 121 | eros_worker.join(); 122 | } 123 | 124 | }; 125 | 126 | class ARESfromYARP 127 | { 128 | public: 129 | 130 | ev::window input_port; 131 | erosplus eros; 132 | std::thread eros_worker; 133 | double tic{-1}; 134 | cv::Mat event_image; 135 | 136 | void erosUpdate() 137 | { 138 | while (!input_port.isStopping()) { 139 | ev::info my_info = input_port.readAll(true); 140 | tic = my_info.timestamp; 141 | for(auto &v : input_port) 142 | eros.update(v.x, v.y); 143 | } 144 | } 145 | 146 | public: 147 | bool start(cv::Size resolution, std::string sourcename, std::string portname, int k = 5, double d = 0.3) 148 | { 149 | eros.init(resolution.width, resolution.height, 7, 0.05, 0.003); 150 | event_image = cv::Mat(resolution, CV_8UC3, cv::Vec3b(0, 0, 0)); 151 | 152 | if (!input_port.open(portname)) 153 | return false; 154 | yarp::os::Network::connect(sourcename, portname, "fast_tcp"); 155 | 156 | eros_worker = std::thread([this]{erosUpdate();}); 157 | return true; 158 | } 159 | 160 | void stop() 161 | { 162 | input_port.stop(); 163 | eros_worker.join(); 164 | } 165 | 166 | }; -------------------------------------------------------------------------------- /code/erosplus.h: -------------------------------------------------------------------------------- 1 | class erosplus 2 | { 3 | private: 4 | 5 | cv::Mat surface; 6 | cv::Mat true_surface; 7 | int k_size{7}; 8 | double absorb{0.05}; 9 | double inject{0.01}; 10 | double balance{100.0}; 11 | 12 | public: 13 | void init(int width, int height, int kernel_size = 7, double absorb = 0.05, double inject = 0.003) 14 | { 15 | k_size = kernel_size % 2 == 0 ? kernel_size + 1 : kernel_size; 16 | this->absorb = absorb; 17 | this->inject = inject; 18 | surface = cv::Mat(height + k_size-1, width + k_size-1, CV_64F, cv::Scalar(10)); 19 | true_surface = surface({k_size/2, k_size/2, width, height}); 20 | } 21 | 22 | void update(int u, int v) 23 | { 24 | static int half_kernel = k_size / 2; 25 | static cv::Rect region = {0, 0, k_size, k_size}; 26 | static cv::Mat region_mat; 27 | static double nabsorb = 1.0 - absorb; 28 | 29 | region.x = u; region.y = v; region_mat = surface(region); 30 | double& c = surface.at(v+half_kernel, u+half_kernel); 31 | if(c > balance*2.0) { 32 | region_mat *= 0.5; 33 | } else { 34 | double ca = 0.0; 35 | for(int x = 0; x < k_size; x++) { 36 | for(int y = 0; y < k_size; y++) { 37 | double& cc = region_mat.at(y, x); 38 | ca += cc * absorb; 39 | cc *= nabsorb; 40 | } 41 | } 42 | c += ca + balance*inject; 43 | } 44 | } 45 | 46 | cv::Mat& getSurface() 47 | { 48 | static cv::Mat ret; 49 | true_surface.convertTo(ret, CV_8U); 50 | return ret; 51 | } 52 | 53 | }; 54 | -------------------------------------------------------------------------------- /code/image_processing.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | class imageProcessing 6 | { 7 | public: 8 | //given parameters 9 | int blur{10}; 10 | cv::Size proc_size{cv::Size(100, 100)}; 11 | int canny_thresh{40}; 12 | double canny_scale{3}; 13 | 14 | //internal variables 15 | //for the projection 16 | cv::Rect img_roi; //roi on the image (square) 17 | cv::Rect proc_roi; //roi within the proc_proj 18 | 19 | //for the observation (copies for threading purposes) 20 | cv::Rect o_img_roi; //roi within the image 21 | cv::Rect o_proc_roi; //roi within the proc_obs 22 | 23 | //output 24 | double scale{1.0}; //final scale between process and image roi 25 | cv::Mat proc_proj; //final projection used to compare to 26 | cv::Mat proc_obs; //final eros to compare to 27 | 28 | void make_template(const cv::Mat &input, cv::Mat &output) { 29 | static cv::Mat canny_img, f, pos_hat, neg_hat; 30 | static cv::Size pblur(blur, blur); 31 | static cv::Size nblur(2*blur-1, 2*blur-1); 32 | static double minval, maxval; 33 | 34 | //cv::GaussianBlur(input, input, cv::Size(3, 3), 0); 35 | //cv::normalize(input, input, 0, 255, cv::NORM_MINMAX); 36 | //cv::Sobel(input, 37 | 38 | input.convertTo(canny_img, CV_32F, 0.003921569); 39 | cv::Sobel(canny_img, f, CV_32F, 1, 1); 40 | f = (cv::max(f, 0.0) + cv::max(-f, 0.0)); 41 | cv::minMaxLoc(f, &minval, &maxval); 42 | cv::threshold(f, f, maxval*0.05, 0, cv::THRESH_TRUNC); 43 | 44 | 45 | // cv::imshow("temp", f+0.5); 46 | //f.copyTo(output); 47 | // return; 48 | // cv::Canny(input, canny_img, canny_thresh, canny_thresh*canny_scale, 3); 49 | // canny_img.convertTo(f, CV_32F); 50 | 51 | cv::GaussianBlur(f, pos_hat, pblur, 0); 52 | cv::GaussianBlur(f, neg_hat, nblur, 0); 53 | output = pos_hat - neg_hat; 54 | cv::minMaxLoc(output, &minval, &maxval); 55 | double scale_factor = 1.0 / (2 * std::max(fabs(minval), fabs(maxval))); 56 | output *= scale_factor; 57 | } 58 | 59 | void process_eros(const cv::Mat &input, cv::Mat &output) { 60 | static cv::Mat eros_blurred1, eros_blurred2, eros_f; 61 | //cv::GaussianBlur(input, eros_blurred1, cv::Size(5, 5), 0); 62 | //cv::GaussianBlur(input, eros_blurred2, cv::Size(9, 9), 0); 63 | //eros_blurred1 = eros_blurred1 - eros_blurred2; 64 | //cv::normalize(eros_blurred, eros_blurred, 0, 255, cv::NORM_MINMAX); 65 | //cv::Canny(eros_blurred, eros_blurred, 240, 250, 3); 66 | cv::medianBlur(input, eros_blurred1, 3); 67 | cv::GaussianBlur(eros_blurred1, eros_blurred2, cv::Size(3, 3), 0); 68 | eros_blurred1.convertTo(output, CV_32F, 0.003921569); 69 | //cv::normalize(eros_f, eros_fn, 0.0, 1.0, cv::NORM_MINMAX); 70 | } 71 | 72 | public: 73 | 74 | void initialise(int process_size, int blur_size, int canny_thresh, double canny_scale) 75 | { 76 | this->canny_thresh = canny_thresh; 77 | this->canny_scale = canny_scale; 78 | blur = blur_size % 2 ? blur_size : blur_size + 1; 79 | proc_size = cv::Size(process_size, process_size); 80 | proc_proj = cv::Mat(proc_size, CV_32F); 81 | proc_obs = cv::Mat(proc_size, CV_32F); 82 | o_img_roi = o_proc_roi = proc_roi = img_roi = cv::Rect(cv::Point(0, 0), proc_size); 83 | } 84 | 85 | void set_projection_rois(const cv::Mat &projected, int buffer = 20) { 86 | 87 | static cv::Rect full_roi = cv::Rect(cv::Point(0, 0), projected.size()); 88 | 89 | // convert to grey 90 | // static cv::Mat grey; 91 | // cv::cvtColor(projected, grey, cv::COLOR_BGR2GRAY); 92 | 93 | // find the bounding rectangle and add some buffer 94 | img_roi = cv::boundingRect(projected); 95 | 96 | if(img_roi.width == 0) { 97 | img_roi = full_roi; 98 | return; 99 | } 100 | 101 | img_roi.x -= buffer; 102 | img_roi.y -= buffer; 103 | img_roi.width += buffer * 2; 104 | img_roi.height += buffer * 2; 105 | 106 | // limit the roi to the image space. 107 | img_roi = img_roi & full_roi; 108 | 109 | // find the process rois and the scale factor 110 | if (img_roi.width >= img_roi.height) { 111 | proc_roi.width = proc_size.width; 112 | proc_roi.x = 0; 113 | scale = (double)img_roi.width / proc_roi.width; 114 | double ratio = (double)img_roi.height / img_roi.width; 115 | proc_roi.height = proc_size.height * ratio; 116 | proc_roi.y = (proc_size.height - proc_roi.height) * 0.5; 117 | } else { 118 | proc_roi.height = proc_size.height; 119 | proc_roi.y = 0; 120 | scale = (double)img_roi.height / proc_roi.height; 121 | double ratio = (double)img_roi.width / img_roi.height; 122 | proc_roi.width = proc_size.width * ratio; 123 | proc_roi.x = (proc_size.width - proc_roi.width) * 0.5; 124 | } 125 | 126 | } 127 | 128 | void set_obs_rois_from_projected() 129 | { 130 | //projection roi's are separate from eros roi's so that projection 131 | //rois can be updated in parallel, and copied over when thread safe 132 | o_img_roi = img_roi; 133 | o_proc_roi = proc_roi; 134 | } 135 | 136 | 137 | void setProcProj(const cv::Mat &image) 138 | { 139 | //the projection(roi) is resized to the process size and then processed 140 | static cv::Mat roi_rgb = cv::Mat::zeros(proc_size, CV_8UC1); 141 | roi_rgb = 0; 142 | cv::resize(image(img_roi), roi_rgb(proc_roi), proc_roi.size(), 0, 0, cv::INTER_NEAREST); 143 | make_template(roi_rgb, proc_proj); 144 | } 145 | 146 | void setProcObs(const cv::Mat &image) 147 | { 148 | //eros(roi) is processed as full image size and then resized 149 | //otherwise it has too many artefacts 150 | static cv::Mat roi_32f;// = cv::Mat::zeros(proc_size, CV_32F); 151 | proc_obs = 0; 152 | process_eros(image(o_img_roi), roi_32f); 153 | cv::resize(roi_32f, proc_obs(o_proc_roi), o_proc_roi.size(), 0, 0, cv::INTER_CUBIC); 154 | } 155 | 156 | // cv::Mat make_visualisation(cv::Mat full_obs) { 157 | // cv::Mat rgb_img, temp, temp8; 158 | // std::vector channels; 159 | // channels.resize(3); 160 | // channels[0] = cv::Mat::zeros(full_obs.size(), CV_8U); 161 | // //channels[1] = cv::Mat::zeros(full_obs.size(), CV_8U); 162 | // channels[2] = cv::Mat::zeros(full_obs.size(), CV_8U); 163 | // // green = events 164 | // full_obs.copyTo(channels[1]); 165 | // cv::Rect roi = img_roi; 166 | 167 | // // blue = positive space 168 | // cv::threshold(projection.img_warp(proc_roi), temp, 0.0, 0.5, cv::THRESH_TOZERO); 169 | // cv::resize(temp, temp, roi.size()); 170 | // temp.convertTo(channels[0](roi), CV_8U, 1024); 171 | // // red = negative space 172 | // temp = projection.img_warp(proc_roi) * -1.0; 173 | // cv::threshold(temp, temp, 0.0, 0.5, cv::THRESH_TOZERO); 174 | // cv::resize(temp, temp, roi.size()); 175 | // temp.convertTo(channels[2](roi), CV_8U, 1024); 176 | 177 | // cv::merge(channels, rgb_img); 178 | 179 | // return rgb_img; 180 | // } 181 | 182 | }; -------------------------------------------------------------------------------- /code/main.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | using namespace yarp::os; 11 | 12 | #include "erosdirect.h" 13 | #include "projection.h" 14 | #include "comparison.h" 15 | #include "image_processing.h" 16 | 17 | //__NV_PRIME_RENDER_OFFLOAD=1 __GLX_VENDOR_LIBRARY_NAME=nvidia 18 | 19 | 20 | class tracker : public yarp::os::RFModule 21 | { 22 | private: 23 | 24 | //parameters 25 | int proc_size{100}; 26 | int canny_thresh{40}; double canny_scale{3.0}; 27 | int eros_k{7}; double eros_d{0.7}; 28 | bool dp2{false}; 29 | bool parallel_method; 30 | bool run{true}; 31 | double period{0.1}; 32 | double render_scaler{1.0}; 33 | 34 | //threads and mutex, handlers 35 | std::thread worker; 36 | std::thread proj_worker; 37 | std::thread warp_worker; 38 | std::mutex m; 39 | std::condition_variable signal; 40 | 41 | //handlers 42 | //EROSdirect eros_handler; 43 | EROSfromYARP eros_handler; 44 | //ARESfromYARP eros_handler; 45 | imageProcessing img_handler; 46 | warpManager warp_handler; 47 | 48 | //internal variables 49 | SICAD* si_cad; 50 | cv::Size img_size; 51 | std::array intrinsics; 52 | std::array initial_state, camera_pose, state; 53 | cv::Mat proj_rgb, proj_32f; 54 | bool projection_available{false}; 55 | 56 | //stats 57 | double toc_eros{0}, toc_proj{0}, toc_projproc{0}, toc_warp{0}; 58 | int toc_count{0}; 59 | int proj_count{0}, warp_count{0}; 60 | int vis_type{0}; 61 | 62 | //output 63 | std::ofstream fs; 64 | std::string file_name; 65 | std::deque< std::array > data_to_save; 66 | cv::VideoWriter vid_writer; 67 | 68 | public: 69 | 70 | bool configure(yarp::os::ResourceFinder& rf) override 71 | { 72 | setName(rf.check("name", Value("/ekom")).asString().c_str()); 73 | double bias_sens = rf.check("s", Value(0.6)).asFloat64(); 74 | double cam_filter = rf.check("f", Value(0.1)).asFloat64(); 75 | 76 | proc_size = rf.check("proc_size", Value(100)).asInt32(); 77 | canny_thresh = rf.check("canny_thresh", Value(40)).asInt32(); 78 | canny_scale = rf.check("canny_scale", Value(3)).asFloat64(); 79 | eros_k = rf.check("eros_k", Value(7)).asInt32(); 80 | eros_d = rf.check("eros_d", Value(0.7)).asFloat64(); 81 | period = rf.check("period", Value(0.1)).asFloat64(); 82 | dp2 = rf.check("dp2") && rf.check("dp2", Value(true)).asBool(); //default false 83 | //run = rf.check("run") && !rf.find("run").asBool() ? false : true; //default true 84 | run = rf.check("run", Value(false)).asBool(); 85 | parallel_method = rf.check("parallel") && rf.check("parallel", Value(true)).asBool(); // defaulat false 86 | render_scaler = rf.check("render_scaler", Value(1.0)).asFloat64(); 87 | 88 | yarp::os::Bottle& intrinsic_parameters = rf.findGroup("CAMERA_CALIBRATION"); 89 | if (intrinsic_parameters.isNull()) { 90 | yError() << "Wrong .ini file or [CAMERA_CALIBRATION] not present. Deal breaker."; 91 | return false; 92 | } 93 | warp_handler.cam[warpManager::x] = intrinsic_parameters.find("w").asInt32()*render_scaler; 94 | warp_handler.cam[warpManager::y] = intrinsic_parameters.find("h").asInt32()*render_scaler; 95 | warp_handler.cam[warpManager::cx] = intrinsic_parameters.find("cx").asFloat32()*render_scaler; 96 | warp_handler.cam[warpManager::cy] = intrinsic_parameters.find("cy").asFloat32()*render_scaler; 97 | warp_handler.cam[warpManager::fx] = intrinsic_parameters.find("fx").asFloat32()*render_scaler; 98 | warp_handler.cam[warpManager::fy] = intrinsic_parameters.find("fy").asFloat32()*render_scaler; 99 | img_size = cv::Size(warp_handler.cam[warpManager::x], warp_handler.cam[warpManager::y]); 100 | 101 | if(!loadPose(rf, "object_pose", initial_state)) return false; 102 | if(!loadPose(rf, "camera_pose", camera_pose)) return false; 103 | state = initial_state; 104 | 105 | if(!Network::checkNetwork(1.0)) { 106 | yError() << "could not connect to YARP"; 107 | return false; 108 | } 109 | 110 | if (!eros_handler.start(cv::Size(intrinsic_parameters.find("w").asInt32(), intrinsic_parameters.find("h").asInt32()), "/atis3/AE:o", getName("/AE:i"), eros_k, eros_d)) { 111 | yError() << "could not open the YARP eros handler"; 112 | return false; 113 | } 114 | 115 | si_cad = createProjectorClass(rf); 116 | if(!si_cad) 117 | return false; 118 | 119 | int blur = proc_size / 20; 120 | double dp = 1;//+rescale_size / 100; 121 | warp_handler.initialise(proc_size, dp2); 122 | warp_handler.create_Ms(dp); 123 | warp_handler.set_current(state); 124 | img_handler.initialise(proc_size, blur, canny_thresh, canny_scale); 125 | 126 | proj_rgb = cv::Mat::zeros(img_size, CV_8UC1); 127 | proj_32f = cv::Mat::zeros(img_size, CV_32F); 128 | 129 | cv::namedWindow("EROS", cv::WINDOW_NORMAL); 130 | cv::resizeWindow("EROS", img_size); 131 | cv::moveWindow("EROS", 1920, 100); 132 | 133 | // cv::namedWindow("Translations", cv::WINDOW_AUTOSIZE); 134 | // cv::resizeWindow("Translations", img_size); 135 | // cv::moveWindow("Translations", 0, 540); 136 | 137 | // cv::namedWindow("Rotations", cv::WINDOW_AUTOSIZE); 138 | // cv::resizeWindow("Rotations", img_size); 139 | // cv::moveWindow("Rotations", 700, 540); 140 | 141 | //quaternion_test(); 142 | //quaternion_test_camera(); 143 | 144 | if(parallel_method) { 145 | proj_worker = std::thread([this]{projection_loop();}); 146 | warp_worker = std::thread([this]{warp_loop();}); 147 | } else { 148 | worker = std::thread([this]{sequential_loop();}); 149 | } 150 | 151 | if (rf.check("file")) { 152 | std::string filename = rf.find("file").asString(); 153 | fs.open(filename); 154 | if (!fs.is_open()) { 155 | yError() << "Could not open output file" << filename; 156 | return false; 157 | } 158 | vid_writer.open(filename + ".mp4", cv::VideoWriter::fourcc('H','2','6','4'), (int)(1.0/period), img_size, true); 159 | if (!vid_writer.isOpened()) { 160 | yError() << "Could not open output file" << filename << ".mp4"; 161 | return false; 162 | } 163 | } 164 | 165 | 166 | yInfo() << "====== Configuration ======"; 167 | yInfo() << "Camera Size:" << img_size.width << "x" << img_size.height; 168 | yInfo() << "Process re-sized:" << proc_size << "x" << proc_size << "(--proc_size )"; 169 | yInfo() << "EROS:" << eros_k << "," << eros_d << "(--eros_k --eros_d )"; 170 | yInfo() << "Canny:" << canny_thresh << "," << canny_thresh<<"x"< --canny_scale )"; 171 | if(dp2){yInfo()<<"ON: multi-size dp warps (--dp2)";}else{yInfo()<<"OFF: multi-size dp warps (--dp2)";} 172 | if(parallel_method){yInfo()<<"ON: threaded projections (--parallel)";}else{yInfo()<<"OFF: threaded projections (--parallel)";} 173 | if(!run) yInfo() << "WARNING: press G to start tracking (--run)"; 174 | yInfo() << "==========================="; 175 | 176 | return true; 177 | } 178 | 179 | 180 | 181 | double getPeriod() override 182 | { 183 | return period; 184 | } 185 | 186 | bool updateModule() override 187 | { 188 | static cv::Mat vis, proj_vis, eros_vis; 189 | //vis = warp_handler.make_visualisation(eros_u); 190 | //proj_rgb.copyTo(proj_vis); 191 | //img_handler.process_eros(eros_handler.eros.getSurface(), eros_vis); 192 | cv::medianBlur(eros_handler.eros.getSurface(), eros_vis, 3); 193 | cv::GaussianBlur(eros_vis, eros_vis, cv::Size(3, 3), 0); 194 | cv::cvtColor(eros_handler.eros.getSurface(), eros_vis, cv::COLOR_GRAY2BGR); 195 | cv::resize(eros_vis, eros_vis, img_size); 196 | cv::cvtColor(proj_rgb, proj_vis, cv::COLOR_GRAY2BGR); 197 | if(vis_type == 0) 198 | vis = proj_vis + eros_vis; 199 | else 200 | vis = proj_vis + eros_handler.event_image; 201 | eros_handler.event_image.setTo(0); 202 | //cv::rectangle(vis, img_handler.img_roi, cv::Scalar(255, 255, 255)); 203 | // static cv::Mat warps_t = cv::Mat::zeros(100, 100, CV_8U); 204 | // warps_t = warp_handler.create_translation_visualisation(); 205 | // static cv::Mat warps_r = cv::Mat::zeros(100, 100, CV_8U); 206 | // warps_r = warp_handler.create_rotation_visualisation(); 207 | //cv::flip(vis, vis, 1); 208 | 209 | std::string rate_string = "- Hz"; 210 | std::stringstream ss; 211 | ss.str(); 212 | if(toc_count) 213 | ss << (int)toc_count / (period*10) << "Hz"; 214 | 215 | cv::putText(vis, ss.str(), cv::Point(640-160, 480-10), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(200, 200, 200)); 216 | 217 | cv::imshow("EROS", vis); 218 | //cv::imshow("Translations", warps_t+0.5); 219 | //cv::imshow("Rotations", warps_r+0.5); 220 | static bool stop_running = false; 221 | if(stop_running) { 222 | run = stop_running = false; 223 | } 224 | int c = cv::waitKey(1); 225 | if (c == 32) { 226 | state = initial_state; 227 | warp_handler.set_current(initial_state); 228 | stop_running = true; 229 | } 230 | if (c == 'g') 231 | run = true; 232 | if(c == 27) { 233 | stopModule(); 234 | return false; 235 | } 236 | 237 | if(c == 'v') 238 | vis_type = (++vis_type % 2); 239 | 240 | // yInfo() << cv::sum(cv::sum(warp_handler.warps[predictions::z].img_warp))[0] 241 | // << cv::sum(cv::sum(warp_handler.projection.img_warp))[0]; 242 | 243 | // std::array s = warp_handler.scores_p; 244 | // yInfo() << warp_handler.score_projection << s[0] << s[1] << s[2] << s[3] << s[4] << s[5]; 245 | // s = warp_handler.scores_n; 246 | // yInfo() << warp_handler.score_projection << s[0] << s[1] << s[2] << s[3] << s[4] << s[5]; 247 | //yInfo(); 248 | 249 | // yInfo() << state[0] << state[1] << state[2] << state[3] << state[4] 250 | // << state[5] << state[6]; 251 | if(vid_writer.isOpened() && eros_handler.tic > 0) 252 | vid_writer << vis; 253 | static int updated_divisor=0; 254 | if (updated_divisor++ % 10 == 0) { 255 | if (toc_count) { 256 | yInfo() << (int)(toc_eros / toc_count) << "\t" 257 | << (int)(toc_proj / toc_count) << "\t" 258 | << (int)(toc_projproc / toc_count) << "\t" 259 | << (int)(toc_warp / toc_count) << "\t" 260 | << (int)toc_count / (period*10) << "Hz"; 261 | toc_count = toc_warp = toc_projproc = toc_proj = toc_eros = 0; 262 | } 263 | if (warp_count || proj_count) { 264 | yInfo() << (int)(warp_count / (period*10)) << "Hz" << (int)(proj_count / (10*period)) << "Hz"; 265 | warp_count = proj_count = 0; 266 | } 267 | } 268 | return true; 269 | } 270 | 271 | void projection_loop() 272 | { 273 | while (!isStopping()) 274 | { 275 | std::unique_lock lk(m); 276 | signal.wait(lk, [this]{return projection_available == false;}); 277 | // project the current state 278 | complexProjection(si_cad, camera_pose, warp_handler.state_current, proj_rgb); 279 | 280 | // get the ROI of the current state 281 | img_handler.set_projection_rois(proj_rgb, 20); 282 | // process the projection 283 | img_handler.setProcProj(proj_rgb); 284 | projection_available = true; 285 | proj_count++; 286 | } 287 | } 288 | 289 | void warp_loop() 290 | { 291 | bool updated = false; 292 | while (!isStopping()) 293 | { 294 | if(projection_available) 295 | { 296 | // set the current image 297 | img_handler.proc_proj.copyTo(warp_handler.projection.img_warp); 298 | 299 | // warp the current projection based on the warp list 300 | // and clear the list 301 | warp_handler.warp_by_history(warp_handler.projection.img_warp); 302 | 303 | // copy over the region of interest (needs to be thread safe) 304 | img_handler.set_obs_rois_from_projected(); 305 | warp_handler.scale = img_handler.scale; 306 | 307 | updated = true; 308 | projection_available = false; 309 | signal.notify_one(); 310 | } 311 | 312 | // perform warps 313 | if(updated) 314 | warp_handler.make_predictive_warps(); 315 | 316 | // get the current EROS 317 | img_handler.setProcObs(eros_handler.eros.getSurface()); 318 | warp_handler.proc_obs = img_handler.proc_obs; 319 | 320 | // perform the comparison 321 | warp_handler.score_predictive_warps(); 322 | 323 | // update the state and update the warped image projection 324 | if (run) { 325 | // warp_handler.update_from_max(); 326 | // warp_handler.update_all_possible(); 327 | updated = warp_handler.update_heuristically(); 328 | //updated = warp_handler.update_from_max(); 329 | // state = warp_handler.state_current; 330 | //step = true; 331 | } 332 | warp_count++; 333 | } 334 | projection_available = false; 335 | signal.notify_one(); 336 | } 337 | 338 | void sequential_loop() 339 | { 340 | double dataset_time = -1; 341 | bool updated = true; 342 | 343 | projectStateYawPitch(state); 344 | warp_handler.make_predictive_warps(); 345 | 346 | 347 | while (!isStopping()) { 348 | updated = true; 349 | 350 | double dtic = Time::now(); 351 | double dtoc_proj = Time::now(); 352 | double dtoc_projproc = Time::now(); 353 | 354 | if(updated) { 355 | 356 | projectStateYawPitch(state); 357 | dtoc_proj = Time::now(); 358 | 359 | warp_handler.make_predictive_warps(); 360 | dtoc_projproc = Time::now(); 361 | 362 | 363 | } 364 | 365 | // if(updated) { 366 | 367 | // //perform the projection 368 | // si_cad->superimpose(q2aa(state), q2aa(camera_pose), proj_rgb); 369 | 370 | // //extract RoIs 371 | // img_handler.set_projection_rois(proj_rgb); 372 | 373 | // //and copy them also for the observation 374 | // img_handler.set_obs_rois_from_projected(); 375 | // warp_handler.scale = img_handler.scale; 376 | // //warp_handler.extract_rois(proj_rgb); 377 | 378 | // //make the projection template 379 | // img_handler.setProcProj(proj_rgb); 380 | // img_handler.proc_proj.copyTo(warp_handler.projection.img_warp); 381 | // dtoc_proj = Time::now(); 382 | 383 | // //make predictions 384 | // warp_handler.make_predictive_warps(); 385 | // replaceyawpitch(img_handler.img_roi); 386 | // dtoc_projproc = Time::now(); 387 | // } 388 | 389 | //get the EROS 390 | dataset_time = eros_handler.tic; 391 | static cv::Mat scaled_eros = cv::Mat::zeros(img_size, CV_8U); 392 | cv::resize(eros_handler.eros.getSurface(), scaled_eros, scaled_eros.size()); 393 | img_handler.setProcObs(scaled_eros); 394 | //warp_handler.proc_obs = img_handler.proc_obs; 395 | img_handler.proc_obs.copyTo(warp_handler.proc_obs); 396 | double dtoc_eros = Time::now(); 397 | 398 | warp_handler.score_predictive_warps(); 399 | if(run) { 400 | //updated = warp_handler.update_from_max(); 401 | //updated = warp_handler.update_all_possible(); 402 | updated = warp_handler.update_heuristically(); 403 | state = warp_handler.state_current; 404 | } 405 | 406 | double dtoc_warp = Time::now(); 407 | //yInfo() << "update:"<< dtoc_eros - dtoc_warp; 408 | 409 | this->toc_eros += ((dtoc_eros - dtoc_projproc) * 1e6); 410 | this->toc_proj += ((dtoc_proj - dtic) * 1e6); 411 | this->toc_projproc += ((dtoc_projproc - dtoc_proj) * 1e6); 412 | this->toc_warp += ((dtoc_warp - dtoc_eros) * 1e6); 413 | this->toc_count++; 414 | if (fs.is_open() && dataset_time > 0) { 415 | data_to_save.push_back({dataset_time, state[0], state[1], state[2], state[3], state[4], state[5], state[6]}); 416 | } 417 | } 418 | } 419 | 420 | void projectStateYawPitch(std::array object) 421 | { 422 | 423 | std::array state_temp; 424 | std::vector > objects; 425 | static std::vector images; 426 | if (images.empty()) { 427 | for (auto i = 0; i < 5; i++) 428 | images.push_back(cv::Mat::zeros(img_size, CV_8U)); 429 | } 430 | 431 | objects.push_back(q2aa(object)); 432 | 433 | double pitch = 1 * M_PI_2 * 1.0 / (img_handler.img_roi.height*0.5); 434 | pitch = 2.0 * M_PI / 180.0; 435 | state_temp = object; 436 | perform_rotation(state_temp, 0, pitch); 437 | objects.push_back(q2aa(state_temp)); 438 | 439 | state_temp = object; 440 | perform_rotation(state_temp, 0, -pitch); 441 | objects.push_back(q2aa(state_temp)); 442 | 443 | double yaw = 1 * M_PI_2 * 1.0 / (img_handler.img_roi.width*0.5); 444 | yaw = 2.0 * M_PI / 180.0; 445 | state_temp = object; 446 | perform_rotation(state_temp, 1, yaw); 447 | objects.push_back(q2aa(state_temp)); 448 | 449 | state_temp = object; 450 | perform_rotation(state_temp, 1, -yaw); 451 | objects.push_back(q2aa(state_temp)); 452 | 453 | si_cad->superimpose(q2aa(camera_pose), objects, img_handler.img_roi, images); 454 | 455 | img_handler.set_projection_rois(images[0]); 456 | img_handler.set_obs_rois_from_projected(); 457 | warp_handler.scale = img_handler.scale; 458 | 459 | img_handler.setProcProj(images[0]); 460 | img_handler.proc_proj.copyTo(warp_handler.projection.img_warp); 461 | proj_rgb = images[0]; 462 | 463 | img_handler.setProcProj(images[1]); 464 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::ap].img_warp); 465 | warp_handler.warps[warpManager::ap].delta = pitch; 466 | 467 | img_handler.setProcProj(images[2]); 468 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::an].img_warp); 469 | warp_handler.warps[warpManager::an].delta = -pitch; 470 | 471 | img_handler.setProcProj(images[3]); 472 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::bp].img_warp); 473 | warp_handler.warps[warpManager::bp].delta = yaw; 474 | 475 | img_handler.setProcProj(images[4]); 476 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::bn].img_warp); 477 | warp_handler.warps[warpManager::bn].delta = -yaw; 478 | 479 | } 480 | 481 | void replaceyawpitch(cv::Rect roi) 482 | { 483 | static cv::Mat rgb = cv::Mat::zeros(img_size, CV_8UC1); 484 | rgb = 0; 485 | cv::Mat rgb_roi = rgb(roi); 486 | //get the state change for delta pitch (around x axis) 487 | 488 | double theta = M_PI_2 * 3.0 / (roi.height*0.5); 489 | auto state_temp = state; 490 | perform_rotation(state_temp, 0, theta); 491 | si_cad->superimpose(q2aa(state_temp), q2aa(camera_pose), rgb_roi, roi); 492 | //complexProjection(si_cad, camera_pose, state_temp, rgb); 493 | img_handler.setProcProj(rgb); 494 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::ap].img_warp); 495 | warp_handler.warps[warpManager::ap].delta = theta; 496 | 497 | state_temp = state; 498 | perform_rotation(state_temp, 0, -theta); 499 | si_cad->superimpose(q2aa(state_temp), q2aa(camera_pose), rgb_roi, roi); 500 | img_handler.setProcProj(rgb); 501 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::an].img_warp); 502 | warp_handler.warps[warpManager::an].delta = -theta; 503 | 504 | theta = M_PI_2 * 3.0 / (roi.width*0.5); 505 | state_temp = state; 506 | perform_rotation(state_temp, 1, theta); 507 | si_cad->superimpose(q2aa(state_temp), q2aa(camera_pose), rgb_roi, roi); 508 | img_handler.setProcProj(rgb); 509 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::bp].img_warp); 510 | warp_handler.warps[warpManager::bp].delta = theta; 511 | 512 | state_temp = state; 513 | perform_rotation(state_temp, 1, -theta); 514 | si_cad->superimpose(q2aa(state_temp), q2aa(camera_pose), rgb_roi, roi); 515 | img_handler.setProcProj(rgb); 516 | img_handler.proc_proj.copyTo(warp_handler.warps[warpManager::bn].img_warp); 517 | warp_handler.warps[warpManager::bn].delta = -theta; 518 | 519 | } 520 | 521 | bool quaternion_test(bool return_value = true) 522 | { 523 | double delta = 0.01; 524 | auto temp_state = initial_state; 525 | 526 | for(double th = 0.0; th > -M_PI_2; th-=delta) 527 | { 528 | if (!simpleProjection(si_cad, temp_state, proj_rgb)) { 529 | yError() << "Could not perform projection"; 530 | return false; 531 | } 532 | cv::imshow("qtest", proj_rgb); 533 | cv::waitKey(1); 534 | perform_rotation(temp_state, 2, delta); 535 | } 536 | 537 | for(double th = 0.0; th > -M_PI_2; th-=delta) 538 | { 539 | if (!simpleProjection(si_cad, temp_state, proj_rgb)) { 540 | yError() << "Could not perform projection"; 541 | return false; 542 | } 543 | cv::imshow("qtest", proj_rgb); 544 | cv::waitKey(1); 545 | perform_rotation(temp_state, 1, delta); 546 | } 547 | 548 | for(double th = 0.0; th > -M_PI_2; th-=delta) 549 | { 550 | if (!simpleProjection(si_cad, temp_state, proj_rgb)) { 551 | yError() << "Could not perform projection"; 552 | return false; 553 | } 554 | cv::imshow("qtest", proj_rgb); 555 | cv::waitKey(1); 556 | perform_rotation(temp_state, 0, delta); 557 | } 558 | 559 | return return_value; 560 | } 561 | 562 | bool quaternion_test_camera(bool return_value = true) 563 | { 564 | 565 | auto camera = camera_pose; 566 | double delta = 0.01; 567 | 568 | //yInfo() << "First Rotation"; 569 | for(double th = 0.0; th < 2*M_PI; th+=delta) 570 | { 571 | if (!complexProjection(si_cad, camera, state, proj_rgb)) { 572 | yError() << "Could not perform projection"; 573 | return false; 574 | } 575 | cv::imshow("qtest", proj_rgb); 576 | if(cv::waitKey(1) == '\e') return false; 577 | perform_rotation(camera, 2, delta); 578 | // yInfo() << camera[3] << camera[4] << camera[5] << camera[6]; 579 | // yInfo() << 2 << th; 580 | } 581 | 582 | //yInfo() << "Second Rotation"; 583 | for(double th = 0.0; th < 2*M_PI; th+=delta) 584 | { 585 | if (!complexProjection(si_cad, camera, state, proj_rgb)) { 586 | yError() << "Could not perform projection"; 587 | return false; 588 | } 589 | cv::imshow("qtest", proj_rgb); 590 | if(cv::waitKey(1) == '\e') return false; 591 | perform_rotation(camera, 1, delta); 592 | // yInfo() << camera[3] << camera[4] << camera[5] << camera[6]; 593 | // yInfo() << 1 << th; 594 | } 595 | 596 | //yInfo() << "Third Rotation"; 597 | for(double th = 0.0; th < 2*M_PI; th+=delta) 598 | { 599 | if (!complexProjection(si_cad, camera, state, proj_rgb)) { 600 | yError() << "Could not perform projection"; 601 | return false; 602 | } 603 | cv::imshow("qtest", proj_rgb); 604 | if(cv::waitKey(1) == '\e') return false; 605 | perform_rotation(camera, 0, delta); 606 | // yInfo() << camera[3] << camera[4] << camera[5] << camera[6]; 607 | // yInfo() << 0 << th; 608 | } 609 | //yInfo()<< "Done"; 610 | 611 | return return_value; 612 | } 613 | 614 | bool interruptModule() override 615 | { 616 | yInfo() << "interrupt caught"; 617 | return true; 618 | } 619 | 620 | bool close() override 621 | { 622 | yInfo() << "waiting for eros handler ... "; 623 | eros_handler.stop(); 624 | yInfo() << "waiting for worker threads ... "; 625 | if(parallel_method) { 626 | warp_worker.join(); 627 | proj_worker.join(); 628 | } else { 629 | worker.join(); 630 | } 631 | if(vid_writer.isOpened()) 632 | vid_writer.release(); 633 | if(fs.is_open()) 634 | { 635 | yInfo() << "Writing data ..."; 636 | for(auto i : data_to_save) 637 | fs << i[0] << ", " << i[1] << ", " << i[2] << ", " << i[3] << ", " << i[4] << ", " << i[5] << ", " << i[6] << ", " << i[7] << std::endl; 638 | fs.close(); 639 | yInfo() << "Finished Writing data ..."; 640 | } 641 | 642 | yInfo() << "close function finished"; 643 | return true; 644 | } 645 | 646 | }; 647 | 648 | int main(int argc, char* argv[]) 649 | { 650 | tracker my_tracker; 651 | ResourceFinder rf; 652 | rf.setDefaultConfigFile("/usr/local/src/EDOPT/configCAR.ini"); 653 | rf.configure(argc, argv); 654 | 655 | return my_tracker.runModule(rf); 656 | } 657 | -------------------------------------------------------------------------------- /code/projection.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | std::array q2aa(const std::array &state) 8 | { 9 | std::array aa{0}; 10 | 11 | aa[0] = state[0]; //x 12 | aa[1] = state[1]; //y 13 | aa[2] = state[2]; //z 14 | 15 | //if acos return -nan it means the quaternion wasn't normalised ! 16 | aa[6] = 2 * acos(state[6]); //state[6] is w, pose[6] = angle 17 | double scaler = sqrt(1 - state[6]*state[6]); 18 | if(scaler < 0.001) { //angle is close to 0 so it is insignificant (but don't divide by 0) 19 | aa[3] = 1.0; //ax 20 | } else { 21 | scaler = 1.0 / scaler; 22 | aa[3] = state[3] * scaler; 23 | aa[4] = state[4] * scaler; 24 | aa[5] = state[5] * scaler; 25 | } 26 | 27 | return aa; 28 | } 29 | 30 | Superimpose::ModelPose quaternion_to_axisangle(const std::array &state) 31 | { 32 | Superimpose::ModelPose pose; 33 | pose.resize(7); 34 | 35 | pose[0] = state[0]; //x 36 | pose[1] = state[1]; //y 37 | pose[2] = state[2]; //z 38 | 39 | //if acos return -nan it means the quaternion wasn't normalised ! 40 | pose[6] = 2 * acos(state[6]); //state[6] is w, pose[6] = angle 41 | double scaler = sqrt(1 - state[6]*state[6]); 42 | if(scaler < 0.001) { //angle is close to 0 so it is insignificant (but don't divide by 0) 43 | pose[6] = 0.0; //angle 44 | pose[3] = 1.0; //ax 45 | pose[4] = 0.0; //ay 46 | pose[5] = 0.0; //az 47 | } else { 48 | scaler = 1.0 / scaler; 49 | pose[3] = state[3] * scaler; 50 | pose[4] = state[4] * scaler; 51 | pose[5] = state[5] * scaler; 52 | } 53 | 54 | return pose; 55 | } 56 | 57 | 58 | SICAD* createProjectorClass(yarp::os::ResourceFinder &config) 59 | { 60 | 61 | 62 | if(!config.check("object_path")) { 63 | yError() << "Did not find object path"; 64 | return nullptr; 65 | } 66 | 67 | yarp::os::Bottle &intrinsic_parameters = config.findGroup("CAMERA_CALIBRATION"); 68 | if(intrinsic_parameters.isNull()) { 69 | yError() << "Could not load camera parameters"; 70 | return nullptr; 71 | } 72 | std::string object_path = config.find("object_path").asString(); 73 | SICAD::ModelPathContainer obj; 74 | obj.emplace("model", object_path); 75 | 76 | yInfo() << "Creating SICAD class with object: " << object_path 77 | << "and parameters" << intrinsic_parameters.toString(); 78 | 79 | double render_scaler = config.check("render_scaler", Value(1.0)).asFloat64(); 80 | 81 | return new SICAD(obj, 82 | intrinsic_parameters.find("w").asInt32()*render_scaler, 83 | intrinsic_parameters.find("h").asInt32()*render_scaler, 84 | intrinsic_parameters.find("fx").asFloat32()*render_scaler, 85 | intrinsic_parameters.find("fy").asFloat32()*render_scaler, 86 | intrinsic_parameters.find("cx").asFloat32()*render_scaler, 87 | intrinsic_parameters.find("cy").asFloat32()*render_scaler); 88 | 89 | } 90 | 91 | bool complexProjection(SICAD *si_cad, const std::array &camera, const std::array &object, cv::Mat &image) { 92 | 93 | return si_cad->superimpose(q2aa(object), q2aa(camera), image); 94 | // Superimpose::ModelPoseContainer objpose_map; 95 | 96 | // Superimpose::ModelPose op = quaternion_to_axisangle(object); 97 | // Superimpose::ModelPose cp = quaternion_to_axisangle(camera); 98 | // objpose_map.emplace("model", op); 99 | 100 | // return si_cad->superimpose(objpose_map, &(cp[0]), &(cp[3]), image); 101 | 102 | } 103 | 104 | bool cameraBasedProjection(SICAD *si_cad, const std::array &camera, cv::Mat &image) 105 | { 106 | static std::array objectatorigin = {0, 0, 0, 1, 0, 0, 0}; 107 | return complexProjection(si_cad, camera, objectatorigin, image); 108 | } 109 | 110 | 111 | bool simpleProjection(SICAD *si_cad, const std::array &object, cv::Mat &image) 112 | { 113 | static std::array cameraatorigin = {0, 0, 0, 1, 0, 0, 0}; 114 | return complexProjection(si_cad, cameraatorigin, object, image); 115 | } 116 | 117 | bool loadPose(yarp::os::ResourceFinder &config, const std::string pose_name, std::array &pose) 118 | { 119 | yarp::os::Bottle *loaded_pose = config.find(pose_name).asList(); 120 | if(!loaded_pose) { 121 | yError() << "Could not find pose name"; 122 | return false; 123 | } 124 | 125 | yInfo() << pose_name << loaded_pose->toString(); 126 | 127 | if(loaded_pose->size() != pose.size()) { 128 | yError() << "Pose incorrect size: " << loaded_pose->size(); 129 | return false; 130 | } 131 | 132 | for(size_t i = 0; i < pose.size(); i++) 133 | pose[i] = loaded_pose->get(i).asFloat32(); 134 | 135 | return true; 136 | 137 | } 138 | 139 | 140 | Superimpose::ModelPose euler_to_axisangle(const std::vector &state) 141 | { 142 | static constexpr double halfdeg2rad = 0.5 * 2.0 * M_PI / 180.0; 143 | Superimpose::ModelPose pose; 144 | pose.resize(7); 145 | 146 | double c1 = cos(state[3]*halfdeg2rad); 147 | double c2 = cos(state[4]*halfdeg2rad); 148 | double c3 = cos(state[5]*halfdeg2rad); 149 | double s1 = sin(state[3]*halfdeg2rad); 150 | double s2 = sin(state[4]*halfdeg2rad); 151 | double s3 = sin(state[5]*halfdeg2rad); 152 | 153 | pose[0] = state[0]; 154 | pose[1] = state[1]; 155 | pose[2] = state[2]; 156 | 157 | 158 | pose[3] = s1*s2*c3 + c1*c2*s3; 159 | pose[4] = s1*c2*c3 + c1*s2*s3; 160 | pose[5] = c1*s2*c3 - s1*c2*s3; 161 | 162 | double norm = pose[3]*pose[3] + pose[4]*pose[4] + pose[5]*pose[5]; 163 | if(norm < 0.001) { 164 | pose[3] = 1.0; 165 | pose[4] = pose[5] = pose[6] = 0.0; 166 | } else { 167 | norm = sqrt(norm); 168 | pose[3] /= norm; 169 | pose[4] /= norm; 170 | pose[5] /= norm; 171 | pose[6] = 2.0 * acos(c1*c2*c3 - s1*s2*s3); 172 | } 173 | return pose; 174 | } 175 | 176 | void normalise_quaternion(std::array &q) 177 | { 178 | 179 | double normval = 1.0 / sqrt(q[0]*q[0] + q[1]*q[1] + 180 | q[2]*q[2] + q[3]*q[3]); 181 | q[0] *= normval; 182 | q[1] *= normval; 183 | q[2] *= normval; 184 | q[3] *= normval; 185 | } 186 | 187 | std::array quaternion_rotation(const std::array &q1, const std::array &q2) 188 | { 189 | std::array q3; 190 | q3[3] = q1[3]*q2[3] - q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2]; 191 | q3[0] = q1[3]*q2[0] + q1[0]*q2[3] - q1[1]*q2[2] + q1[2]*q2[1]; 192 | q3[1] = q1[3]*q2[1] + q1[0]*q2[2] + q1[1]*q2[3] - q1[2]*q2[0]; 193 | q3[2] = q1[3]*q2[2] - q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3]; 194 | normalise_quaternion(q3); 195 | return q3; 196 | } 197 | 198 | std::array create_quaternion(int axis, double radians) 199 | { 200 | std::array q{0}; 201 | axis %= 3; 202 | radians *= 0.5; 203 | q[axis] = sin(radians); 204 | q[3] = cos(radians); 205 | return q; 206 | } 207 | 208 | 209 | void perform_rotation(std::array &state, int axis, double radians) 210 | { 211 | axis %= 3; 212 | std::array rq = create_quaternion(axis, radians); 213 | std::array q; 214 | q[0] = state[3]; 215 | q[1] = state[4]; 216 | q[2] = state[5]; 217 | q[3] = state[6]; 218 | std::array fq = quaternion_rotation(q, rq); 219 | state[3] = fq[0]; 220 | state[4] = fq[1]; 221 | state[5] = fq[2]; 222 | state[6] = fq[3]; 223 | } 224 | 225 | -------------------------------------------------------------------------------- /configCAR.ini: -------------------------------------------------------------------------------- 1 | #config file for using the car with the live camera 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/car/model.obj" 4 | object_pose = ( 0, 80, 300, 0, 0, 0, 1 ) 5 | camera_pose = ( 0, 0, 0, 1, 0, 0, 0 ); 6 | 7 | eros_k 9 8 | eros_d 0.5 9 | proc_size 100 10 | render_scaler 1.0 11 | 12 | [CAMERA_CALIBRATION] 13 | 14 | w 640 15 | h 480 16 | fx 590.308 17 | fy 590.819 18 | cx 317.58 19 | cy 220.033 20 | k1 0 21 | k2 0 22 | p1 0 23 | p2 0 24 | 25 | 26 | #k1 -0.189124 27 | #k2 0.0946017 28 | #p1 -0.000181359 29 | #p2 0.000536105 30 | -------------------------------------------------------------------------------- /configDRAGON.ini: -------------------------------------------------------------------------------- 1 | #config file for using the dragon 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/dragon/dragon.obj" 4 | ; object_pose = (0.093592996597290039, 0.02, 0.46664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, 0.0066990000009536743, 0.46664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.52431301116943359, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = (0.00058299999684095383, 0.02013700008392334, 0.46664299011230469, 0.19490000605583191, 0.67970001697540283, -0.66860002279281616, 0.23029999434947968) 8 | ; object_pose = (0, 0.022920000553131104, 0.46914600372314453 , 0.18369999527931213, 0.58270001411437988, -0.75499999523162842, 0.23800000548362732) 9 | ; object_pose = (-0.000043999999761581421, 0.02, 0.46665298461914062, 0.21410000324249268, 0.67390000820159912, -0.67390000820159912, 0.21410000324249268) 10 | object_pose = (-0.26180500030517578, -0.077519998550415039, 0.75466697692871094, 0.20090000331401825, -0.54400002956390381, 0.63569998741149902, 0.50940001010894775) 11 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 12 | 13 | eros_k 9 14 | eros_d 0.75 15 | proc_size 100 16 | 17 | [CAMERA_CALIBRATION] 18 | 19 | w 640 20 | h 480 21 | fx 502.29928588867188 22 | fy 502.29928588867188 23 | cx 320 24 | cy 240 25 | k1 0 26 | k2 0 27 | p1 0 28 | p2 0 29 | 30 | #k1 -0.189124 31 | #k2 0.0946017 32 | #p1 -0.000181359 33 | #p2 0.000536105 -------------------------------------------------------------------------------- /configGELATIN.ini: -------------------------------------------------------------------------------- 1 | #config file for using the dragon 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/gelatin2/textured.obj" 4 | object_pose = (-0.088291997909545898, 0.02, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, -0.024577000141143799, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.33603900909423828, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = ( -0.0000050000002374872565, 0.020006000995635986, 0.36664299011230469, 0.2257000058889389, 0.67009997367858887, -0.67839998006820679, 0.19949999451637268) 8 | ; object_pose = (0, 0.02, 0.36663898468017578, 0.2151000052690506, 0.68220001459121704, -0.66640001535415649, 0.21009999513626099) 9 | ; object_pose = (0, 0.02, 0.36664299011230469, 0.21170000731945038, 0.67470002174377441, -0.67470002174377441, 0.21170000731945038) 10 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 11 | 12 | eros_k 9 13 | eros_d 0.75 14 | proc_size 120 15 | canny_thresh 100 16 | 17 | ; x, y, z, roll, pitch 18 | ; eros_k 9 19 | ; eros_d 0.75 20 | ; proc_size 120 21 | ; canny_thresh 100 22 | 23 | ; yaw 24 | ; eros_k 9 25 | ; eros_d 0.75 26 | ; proc_size 120 27 | ; canny_thresh 200 28 | 29 | [CAMERA_CALIBRATION] 30 | 31 | w 640 32 | h 480 33 | fx 502.29928588867188 34 | fy 502.29928588867188 35 | cx 320 36 | cy 240 37 | k1 0 38 | k2 0 39 | p1 0 40 | p2 0 41 | 42 | #k1 -0.189124 43 | #k2 0.0946017 44 | #p1 -0.000181359 45 | #p2 0.000536105 -------------------------------------------------------------------------------- /configMUSTARD.ini: -------------------------------------------------------------------------------- 1 | #config file for using the mustard bottle 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/mustard/textured.obj" 4 | object_pose = (-0.019744000434875488, 0.02, 0.46664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, 0.017533999681472778, 0.46664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.39127399444580078, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = (0.00074900001287460327, 0.020034000873565674, 0.46664299011230469, 0.18260000646114349, 0.68309998512268066, -0.66430002450942993, 0.24220000207424164) 8 | ; object_pose = (0, 0.020065999031066895, 0.46762798309326172, 0.19990000128746033, 0.63400000333786011, -0.71249997615814209, 0.22470000386238098) 9 | ; object_pose = (-0.0000030000001424923539, 0.02, 0.46664299011230469, 0.22640000283718109, 0.66990000009536743, -0.66990000009536743, 0.22640000283718109) 10 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 11 | 12 | ; y, z 13 | ; eros_k 9 14 | ; eros_d 0.5 15 | ; proc_size 150 16 | ; canny_thresh 500 17 | 18 | ; x 19 | ; eros_k 9 20 | ; eros_d 0.75 21 | ; proc_size 120 22 | ; canny_thresh 500 23 | 24 | ; roll, pitch, yaw 25 | ; eros_k 9 26 | ; eros_d 0.65 27 | ; proc_size 150 28 | ; canny_thresh 500 29 | 30 | eros_k 9 31 | eros_d 0.65 32 | proc_size 150 33 | canny_thresh 500 34 | 35 | [CAMERA_CALIBRATION] 36 | 37 | w 640 38 | h 480 39 | fx 502.29928588867188 40 | fy 502.29928588867188 41 | cx 320 42 | cy 240 43 | k1 0 44 | k2 0 45 | p1 0 46 | p2 0 47 | 48 | #k1 -0.189124 49 | #k2 0.0946017 50 | #p1 -0.000181359 51 | #p2 0.000536105 52 | -------------------------------------------------------------------------------- /configPOTTED.ini: -------------------------------------------------------------------------------- 1 | #config file for using the potted 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/potted/textured.obj" 4 | object_pose = (-0.074475002288818359, 0.02, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, 0.0041310000419616699, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.43065898895263672, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = (0, 0.019989999532699585, 0.36664299011230469, 0.25249999761581421, 0.66049998998641968, -0.68589997291564941, 0.17200000584125519) 8 | ; object_pose = (0, 0.019991999864578247, 0.36664199829101562, 0.23309999704360962, 0.73919999599456787, -0.60259997844696045, 0.18999999761581421) 9 | ; object_pose = ( -0.0000019999999494757503, 0.02, 0.36664699554443359, 0.22840000689029694, 0.66920000314712524, -0.66920000314712524, 0.22840000689029694) 10 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 11 | ; x, z, roll, pitch, yaw 12 | ; eros_k 9 13 | ; eros_d 0.75 14 | ; proc_size 100 15 | ; canny_thresh 100 16 | ; canny_scale 3 17 | 18 | ; y 19 | ; eros_k 9 20 | ; eros_d 0.75 21 | ; proc_size 120 22 | ; canny_thresh 100 23 | ; canny_scale 3 24 | 25 | eros_k 9 26 | eros_d 0.75 27 | proc_size 100 28 | canny_thresh 100 29 | canny_scale 3 30 | 31 | [CAMERA_CALIBRATION] 32 | 33 | w 640 34 | h 480 35 | fx 502.29928588867188 36 | fy 502.29928588867188 37 | cx 320 38 | cy 240 39 | k1 0 40 | k2 0 41 | p1 0 42 | p2 0 43 | 44 | #k1 -0.189124 45 | #k2 0.0946017 46 | #p1 -0.000181359 47 | #p2 0.000536105 48 | -------------------------------------------------------------------------------- /configSUGAR.ini: -------------------------------------------------------------------------------- 1 | #config file for using the sugar 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/sugar/textured.obj" 4 | object_pose = ( 0.07706299781799316, 0.02, 0.7666429901123047, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, 0.12137399673461914, 0.7666429901123047, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.5476639938354492, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = (-0.04158400058746338, 0.011347999572753906, 0.7666429901123047, 0.04320000112056732, 0.7057999968528748, -0.6029000282287598, 0.3693999946117401) 8 | ; object_pose = (0, 0.02156399965286255, 0.7776319885253906, 0.22540000081062317, 0.714900016784668, -0.6312999725341797, 0.19900000095367432) 9 | ; object_pose = (0.007178000211715698, 0.02, 0.7696849822998047, 0.3504999876022339, 0.6140999794006348, -0.6140999794006348, 0.3504999876022339) 10 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 11 | 12 | eros_k 9 13 | eros_d 0.75 14 | proc_size 100 15 | canny_thresh 120 16 | canny_scale 3 17 | 18 | [CAMERA_CALIBRATION] 19 | 20 | w 640 21 | h 480 22 | fx 502.29928588867188 23 | fy 502.29928588867188 24 | cx 320 25 | cy 240 26 | k1 0 27 | k2 0 28 | p1 0 29 | p2 0 30 | 31 | #k1 -0.189124 32 | #k2 0.0946017 33 | #p1 -0.000181359 34 | #p2 0.000536105 -------------------------------------------------------------------------------- /configTOMATO.ini: -------------------------------------------------------------------------------- 1 | #config file for using the potted 2 | 3 | object_path = "/usr/local/src/EDOPT/objects/tomato/textured.obj" 4 | object_pose = (0.067933998107910156, 0.02, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 5 | ; object_pose = (0, 0.039082000255584717, 0.36664299011230469, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 6 | ; object_pose = (0, 0.02, 0.46529499053955078, 0.2125999927520752, 0.6743999719619751, -0.6743999719619751, 0.2125999927520752) 7 | ; object_pose = (0.00000699999975040555, 0.019993000030517578, 0.36664299011230469, 0.22619999945163727, 0.66990000009536743, -0.67849999666213989, 0.1988999992609024) 8 | ; object_pose = (0, 0.020074000358581543, 0.36670600891113281, 0.24140000343322754, 0.76569998264312744, -0.56859999895095825, 0.17929999530315399) 9 | ; object_pose = (-0.000065000001341104507, 0.02, 0.36661399841308594, -0.13809999823570251, -0.69349998235702515, 0.69349998235702515, -0.13809999823570251) 10 | camera_pose = (0, 0, 0, 1, 0, 0, 0 ); 11 | ; x, y, z, roll, pitch, yaw 12 | ; eros_k 9 13 | ; eros_d 0.75 14 | ; proc_size 100 15 | ; canny_thresh 120 16 | 17 | eros_k 9 18 | eros_d 0.75 19 | proc_size 100 20 | canny_thresh 120 21 | 22 | [CAMERA_CALIBRATION] 23 | 24 | w 640 25 | h 480 26 | fx 502.29928588867188 27 | fy 502.29928588867188 28 | cx 320 29 | cy 240 30 | k1 0 31 | k2 0 32 | p1 0 33 | p2 0 34 | 35 | #k1 -0.189124 36 | #k2 0.0946017 37 | #p1 -0.000181359 38 | #p2 0.000536105 39 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | edopt: 3 | image: edopt:latest 4 | container_name: edopt 5 | privileged: true 6 | volumes: 7 | - /dev/bus/usb:/dev/bus/usb 8 | - /tmp/.X11-unix/:/tmp/.X11-unix 9 | environment: 10 | - DISPLAY=unix$DISPLAY 11 | network_mode: host 12 | deploy: 13 | resources: 14 | reservations: 15 | devices: 16 | - driver: nvidia 17 | count: all 18 | capabilities: [gpu] 19 | command: sleep infinity -------------------------------------------------------------------------------- /objects/car/model.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd model_tex.png 14 | 15 | -------------------------------------------------------------------------------- /objects/car/model_tex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/car/model_tex.png -------------------------------------------------------------------------------- /objects/dragon/dragon.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 0.000000 0.000000 0.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd dragon.png 14 | 15 | -------------------------------------------------------------------------------- /objects/dragon/dragon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/dragon/dragon.png -------------------------------------------------------------------------------- /objects/gelatin2/texture_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/gelatin2/texture_map.png -------------------------------------------------------------------------------- /objects/gelatin2/textured.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd texture_map.png 14 | 15 | -------------------------------------------------------------------------------- /objects/mustard/texture_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/mustard/texture_map.png -------------------------------------------------------------------------------- /objects/mustard/textured.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd texture_map.png 14 | 15 | -------------------------------------------------------------------------------- /objects/potted/texture_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/potted/texture_map.png -------------------------------------------------------------------------------- /objects/potted/textured.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd texture_map.png 14 | 15 | -------------------------------------------------------------------------------- /objects/sugar/texture_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/sugar/texture_map.png -------------------------------------------------------------------------------- /objects/sugar/textured.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd texture_map.png 14 | 15 | -------------------------------------------------------------------------------- /objects/tomato/texture_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/event-driven-robotics/EDOPT/803c142b1438a9e061ca903af5529b9a554d7987/objects/tomato/texture_map.png -------------------------------------------------------------------------------- /objects/tomato/textured.obj.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # Wavefront material file 3 | # Converted by Meshlab Group 4 | # 5 | 6 | newmtl material_0 7 | Ka 0.200000 0.200000 0.200000 8 | Kd 1.000000 1.000000 1.000000 9 | Ks 1.000000 1.000000 1.000000 10 | Tr 1.000000 11 | illum 2 12 | Ns 0.000000 13 | map_Kd texture_map.png 14 | 15 | -------------------------------------------------------------------------------- /scripts/extractGTfromJson.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import os, json 4 | from tqdm import tqdm 5 | 6 | dir = '/data/dragon/dragon_translation_z_1_m_s/photorealistic1/' 7 | 8 | json_list = sorted([json_files for json_files in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), dir)) if json_files.endswith('.json')]) 9 | 10 | open(os.path.join(dir, 'ground_truth.csv'), 'w').close() 11 | 12 | for file in tqdm(json_list): 13 | with open(os.path.join(dir, file)) as jsonFile: 14 | metadata = json.load(jsonFile) 15 | jsonFile.close() 16 | position = metadata['objects'][0]['location'] 17 | orientation = metadata['objects'][0]['quaternion_xyzw'] 18 | time = metadata['timestamp'] 19 | 20 | with open(os.path.join(dir, 'ground_truth.csv'), 'a') as outFile: 21 | position = str(position)[1:-1] 22 | orientation = str(orientation)[1:-1] 23 | outFile.write(str(time) + ', ' + position + ', ' + orientation +'\n') 24 | outFile.close() 25 | -------------------------------------------------------------------------------- /scripts/frames_to_events.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import os 4 | import json 5 | from tqdm import tqdm 6 | from bimvee.exportIitYarp import encodeEvents24Bit 7 | import argparse 8 | 9 | 10 | def frame2events(input_path, output_path): 11 | numBins = 10 12 | step = 0.5 / (numBins) 13 | dirList = os.listdir(input_path) 14 | prev_ts = None 15 | events = [] 16 | ev_img_list = sorted([x for x in dirList if x.__contains__('ec') and os.path.splitext(x)[-1] == '.png']) 17 | 18 | for file in tqdm(ev_img_list): 19 | with open(os.path.join(input_path, file.split('.')[0] + '.json'), 'r') as jsonFile: 20 | metadata = json.load(jsonFile) 21 | timestamp = metadata['timestamp'] 22 | if prev_ts is None: 23 | prev_ts = timestamp 24 | continue 25 | image = plt.imread(os.path.join(input_path, file)) 26 | vCount = np.round(image / step - numBins).astype(int) 27 | vIndices = vCount.nonzero() 28 | for y, x in zip(vIndices[0], vIndices[1]): 29 | num_events = vCount[y, x] 30 | for v in range(abs(num_events)): 31 | polarity = 1 if num_events > 0 else 0 32 | ts_noise = (np.random.rand() - 0.5) / 250 33 | ts = prev_ts + v * ((timestamp - prev_ts) / abs(num_events)) + ts_noise 34 | events.append([x, y, ts, polarity]) 35 | prev_ts = timestamp 36 | events = np.array(events) 37 | events = events[events[:, 2].argsort()] 38 | prev_ts = events[0, 2] 39 | 40 | # dataDict = {'x': events[:, 0], 'y': events[:, 1], 'ts': events[:, 2], 'pol': events[:, 3].astype(bool)} 41 | encodedData = np.array(encodeEvents24Bit(events[:, 2] - events[0, 2], events[:, 0], events[:, 1], events[:, 3].astype(bool))).astype(np.uint32) 42 | if not os.path.exists(output_path): 43 | os.makedirs(output_path) 44 | with open(os.path.join(output_path, 'binaryevents.log'), 'wb') as f: 45 | encodedData.tofile(f) 46 | 47 | 48 | if __name__ == '__main__': 49 | parser = argparse.ArgumentParser(description='Extract binary events from sequence of frames') 50 | parser.add_argument('--input', '-i', dest='input_path', type=str, required=True, 51 | help='Path to input file') 52 | parser.add_argument('--output', '-o', dest='output_path', type=str, required=True, 53 | help='Path to output file') 54 | args = parser.parse_args() 55 | 56 | input_path = args.input_path 57 | output_path = args.output_path 58 | 59 | # input_path = '/data/Sphere_old/1/photorealistic1' 60 | # output_path = '/data/Sphere_old/1/' 61 | 62 | frame2events(input_path, output_path) 63 | -------------------------------------------------------------------------------- /scripts/plotResults.py: -------------------------------------------------------------------------------- 1 | from array import array 2 | from operator import gt 3 | import sys, os 4 | import matplotlib.pyplot as plt 5 | from matplotlib.ticker import MaxNLocator 6 | from matplotlib.lines import Line2D 7 | import numpy as np 8 | import math 9 | from scipy.interpolate import interp1d 10 | import quaternion as quat 11 | from pyquaternion import Quaternion 12 | 13 | color_x = '#CC1EEA' 14 | color_y = '#06AE97' 15 | color_z = '#FFA409' 16 | 17 | color_edopt = '#5A8CFF' 18 | color_rgbde = '#FF4782' 19 | 20 | color_dragon = '#A80000' 21 | color_gelating = '#040276' 22 | color_mustard = '#A89800' 23 | 24 | color_blue = '#1A589F' 25 | color_purple = '#742C81' 26 | color_yellow = '#FFA62B' 27 | 28 | SMALL_SIZE = 18 29 | MEDIUM_SIZE = 20 30 | 31 | plt.rc('font', size=SMALL_SIZE) # controls default text sizes 32 | plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title 33 | plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels 34 | plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels 35 | plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels 36 | plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize 37 | # plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title 38 | 39 | def computeEuclideanDistance(x1, y1, z1, x2, y2, z2): 40 | list_dist = [] 41 | for xp1,yp1,zp1,xp2,yp2,zp2 in zip(x1,y1,z1,x2,y2,z2): 42 | if np.isneginf(xp2) or np.isneginf(yp2) or np.isneginf(zp2) or np.isnan(xp2) or np.isnan(yp2) or np.isnan(zp2): 43 | continue 44 | else: 45 | list_dist.append(math.sqrt((xp2-xp1)*(xp2-xp1)+(yp2-yp1)*(yp2-yp1)+(zp2-zp1)*(zp2-zp1))) 46 | array_dist = np.array(list_dist) 47 | return array_dist 48 | 49 | def computeQuaternionError(qx1, qy1, qz1, qw1, qx2, qy2, qz2, qw2): 50 | 51 | list_q_error = [] 52 | for qxp1,qyp1,qzp1,qwp1,qxp2,qyp2,qzp2,qwp2 in zip(qx1,qy1,qz1,qw1,qx2,qy2,qz2,qw2): 53 | normalizer1 = math.sqrt(qwp1*qwp1+qxp1*qxp1+qyp1*qyp1+qzp1*qzp1) 54 | qxp1 = qxp1/normalizer1 55 | qyp1 = qyp1/normalizer1 56 | qzp1 = qzp1/normalizer1 57 | qwp1 = qwp1/normalizer1 58 | 59 | normalizer2 = math.sqrt(qwp2*qwp2+qxp2*qxp2+qyp2*qyp2+qzp2*qzp2) 60 | qxp2 = qxp2/normalizer2 61 | qyp2 = qyp2/normalizer2 62 | qzp2 = qzp2/normalizer2 63 | qwp2 = qwp2/normalizer2 64 | inner_product = qwp1*qwp2+qxp1*qxp2+qyp1*qyp2+qzp1*qzp2 65 | quaternion_angle = np.arccos(np.clip(2*inner_product*inner_product-1, -1, 1)) 66 | if np.isnan(quaternion_angle): 67 | continue 68 | else: 69 | list_q_error.append(quaternion_angle) 70 | 71 | array_q_error = np.array(list_q_error) 72 | return array_q_error 73 | 74 | def resampling_by_interpolate(time_samples, x_values, y_values): 75 | f_neareast = interp1d(x_values, y_values, kind='nearest', fill_value="extrapolate") 76 | resampled = f_neareast(time_samples) 77 | return resampled 78 | 79 | def quaternion_to_euler_angle(w, x, y, z): 80 | """ 81 | Convert a quaternion into euler angles (roll, pitch, yaw) 82 | roll is rotation around x in radians (counterclockwise) 83 | pitch is rotation around y in radians (counterclockwise) 84 | yaw is rotation around z in radians (counterclockwise) 85 | """ 86 | 87 | roll_list = [] 88 | pitch_list = [] 89 | yaw_list = [] 90 | 91 | degrees = 57.2958 92 | 93 | for qx,qy,qz,qw in zip(x,y,z,w): 94 | 95 | t0 = +2.0 * (qw * qx + qy * qz) 96 | t1 = +1.0 - 2.0 * (qx * qx + qy * qy) 97 | roll_x = math.atan2(t0, t1) 98 | 99 | t2 = +2.0 * (qw * qy - qz * qx) 100 | t2 = +1.0 if t2 > +1.0 else t2 101 | t2 = -1.0 if t2 < -1.0 else t2 102 | pitch_y = math.asin(t2) 103 | 104 | t3 = +2.0 * (qw * qz + qx * qy) 105 | t4 = +1.0 - 2.0 * (qy * qy + qz * qz) 106 | yaw_z = math.atan2(t3, t4) 107 | 108 | roll_list.append(roll_x*degrees) 109 | pitch_list.append(pitch_y*degrees) 110 | yaw_list.append(yaw_z*degrees) 111 | 112 | roll_array = np.array(roll_list) 113 | pitch_array = np.array(pitch_list) 114 | yaw_array = np.array(yaw_list) 115 | 116 | return roll_array, pitch_array, yaw_array # in degrees 117 | 118 | def cleanEuler(angle, angle_type): 119 | 120 | # angle = angle[~np.isnan(angle)] 121 | diff_arrays = angle[1:-1]-angle[0:-2] 122 | prev_x = 0 123 | th = 180 124 | filtered_angles_list = [] 125 | diff_list=[] 126 | for idx, x in enumerate(angle): 127 | if idx == 0: 128 | if angle_type==2 and x < 0: 129 | x+=360 130 | prev_x = x 131 | else: 132 | diff = abs(x - prev_x) 133 | diff_list.append(diff) 134 | if diff > th: 135 | x += 360 136 | else: 137 | if angle_type==2 and x<0: 138 | x += 360 139 | prev_x = x 140 | filtered_angles_list.append(x) 141 | 142 | return(np.array(filtered_angles_list)) 143 | 144 | def cleanEuler2(angle_list): 145 | 146 | for idx in range(len(angle_list)-1): 147 | a0 = angle_list[idx] 148 | a1 = angle_list[idx+1] 149 | real_val = [a1, a1 + 360, a1 - 360] 150 | abs_diff = np.abs(real_val-a0) 151 | 152 | angle_list[idx+1] = real_val[np.argmin(abs_diff)] 153 | 154 | return (np.array(angle_list)) 155 | 156 | # --------------------------------------------------------------------------- DRAGON --------------------------------------------------------------------------------------- 157 | 158 | filePath_dataset = '/home/luna/shared/data/6-DOF-Objects/results_icra_2024/dragon/' 159 | gt_trans_x_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/x.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 160 | gt_trans_y_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/y.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 161 | gt_trans_z_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/z.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 162 | gt_roll_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/roll.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 163 | gt_pitch_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/pitch.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 164 | gt_yaw_old = np.genfromtxt(os.path.join(filePath_dataset, 'gt_old_dataset/yaw.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 165 | 166 | gt_trans_x_old['t'] = (gt_trans_x_old['t']-gt_trans_x_old['t'][0])*10 167 | gt_trans_x_old['x'] = gt_trans_x_old['x']*0.01 168 | gt_trans_x_old['y'] = gt_trans_x_old['y']*0.01 169 | gt_trans_x_old['z'] = gt_trans_x_old['z']*0.01 170 | 171 | gt_trans_y_old['t'] = (gt_trans_y_old['t']-gt_trans_y_old['t'][0])*10 172 | gt_trans_y_old['x'] = gt_trans_y_old['x']*0.01 173 | gt_trans_y_old['y'] = gt_trans_y_old['y']*0.01 174 | gt_trans_y_old['z'] = gt_trans_y_old['z']*0.01 175 | 176 | gt_trans_z_old['t'] = (gt_trans_z_old['t']-gt_trans_z_old['t'][0])*10 177 | gt_trans_z_old['x'] = gt_trans_z_old['x']*0.01 178 | gt_trans_z_old['y'] = gt_trans_z_old['y']*0.01 179 | gt_trans_z_old['z'] = gt_trans_z_old['z']*0.01 180 | 181 | gt_roll_old['t'] = (gt_roll_old['t']-gt_roll_old['t'][0])*10 182 | gt_roll_old['x'] = gt_roll_old['x']*0.01 183 | gt_roll_old['y'] = gt_roll_old['y']*0.01 184 | gt_roll_old['z'] = gt_roll_old['z']*0.01 185 | 186 | gt_pitch_old['t'] = (gt_pitch_old['t']-gt_pitch_old['t'][0])*10 187 | gt_pitch_old['x'] = gt_pitch_old['x']*0.01 188 | gt_pitch_old['y'] = gt_pitch_old['y']*0.01 189 | gt_pitch_old['z'] = gt_pitch_old['z']*0.01 190 | 191 | gt_yaw_old['t'] = (gt_yaw_old['t']-gt_yaw_old['t'][0])*10 192 | gt_yaw_old['x'] = gt_yaw_old['x']*0.01 193 | gt_yaw_old['y'] = gt_yaw_old['y']*0.01 194 | gt_yaw_old['z'] = gt_yaw_old['z']*0.01 195 | 196 | gt_trans_x_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/x.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 197 | gt_trans_y_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/y.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 198 | gt_trans_z_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/z.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 199 | gt_roll_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/roll.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 200 | gt_pitch_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/pitch.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 201 | gt_yaw_new = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/yaw.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 202 | gt_6dof = np.genfromtxt(os.path.join(filePath_dataset, 'gt_new_dataset/6dof.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 203 | 204 | gt_trans_x_new['t'] = (gt_trans_x_new['t']-gt_trans_x_new['t'][0]) 205 | gt_trans_x_new['x'] = gt_trans_x_new['x']*0.01 206 | gt_trans_x_new['y'] = gt_trans_x_new['y']*0.01 207 | gt_trans_x_new['z'] = gt_trans_x_new['z']*0.01 208 | 209 | gt_trans_y_new['t'] = (gt_trans_y_new['t']-gt_trans_y_new['t'][0]) 210 | gt_trans_y_new['x'] = gt_trans_y_new['x']*0.01 211 | gt_trans_y_new['y'] = gt_trans_y_new['y']*0.01 212 | gt_trans_y_new['z'] = gt_trans_y_new['z']*0.01 213 | 214 | gt_trans_z_new['t'] = (gt_trans_z_new['t']-gt_trans_z_new['t'][0]) 215 | gt_trans_z_new['x'] = gt_trans_z_new['x']*0.01 216 | gt_trans_z_new['y'] = gt_trans_z_new['y']*0.01 217 | gt_trans_z_new['z'] = gt_trans_z_new['z']*0.01 218 | 219 | gt_roll_new['t'] = (gt_roll_new['t']-gt_roll_new['t'][0]) 220 | gt_roll_new['x'] = gt_roll_new['x']*0.01 221 | gt_roll_new['y'] = gt_roll_new['y']*0.01 222 | gt_roll_new['z'] = gt_roll_new['z']*0.01 223 | 224 | gt_pitch_new['t'] = (gt_pitch_new['t']-gt_pitch_new['t'][0]) 225 | gt_pitch_new['x'] = gt_pitch_new['x']*0.01 226 | gt_pitch_new['y'] = gt_pitch_new['y']*0.01 227 | gt_pitch_new['z'] = gt_pitch_new['z']*0.01 228 | 229 | gt_yaw_new['t'] = (gt_yaw_new['t']-gt_yaw_new['t'][0]) 230 | gt_yaw_new['x'] = gt_yaw_new['x']*0.01 231 | gt_yaw_new['y'] = gt_yaw_new['y']*0.01 232 | gt_yaw_new['z'] = gt_yaw_new['z']*0.01 233 | 234 | gt_6dof['t'] = (gt_6dof['t']-gt_6dof['t'][0]) 235 | gt_6dof['x'] = gt_6dof['x']*0.01 236 | gt_6dof['y'] = gt_6dof['y']*0.01 237 | gt_6dof['z'] = gt_6dof['z']*0.01 238 | 239 | edopt_trans_x = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/x.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 240 | edopt_trans_y = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/y.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 241 | edopt_trans_z = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/z.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 242 | edopt_roll = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/roll.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 243 | edopt_pitch = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/pitch.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 244 | edopt_yaw = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/yaw.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 245 | edopt_6dof = np.genfromtxt(os.path.join(filePath_dataset, 'edopt/6dof.csv'), delimiter=",", names=["t", "x", "y", "z", "qx", "qy", "qz", "qw"]) 246 | 247 | rgbde_trans_x = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_translation_x_1_m_s_2_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 248 | rgbde_trans_y = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_translation_y_1_m_s_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 249 | rgbde_trans_z = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_translation_z_1_m_s_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 250 | rgbde_roll = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_roll_4rad_s_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 251 | rgbde_pitch = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_pitch_4rad_s_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 252 | rgbde_yaw = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/dragon_yaw_2_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 253 | rgbde_6dof = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/6DoF_1_tracked_pose_quaternion_corrected.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 254 | rgbde_6dof_5 = np.genfromtxt(os.path.join(filePath_dataset, 'rgbd-e/6DoF_2_tracked_pose_quaternion.csv'), delimiter=",", names=["x", "y", "z", "qw", "qx", "qy", "qz"]) 255 | 256 | rgbde_time_trans_x = (np.arange(0, 1/60*len(rgbde_trans_x['x']), 1/60))*10 257 | rgbde_time_trans_y = (np.arange(0, 1/60*len(rgbde_trans_y['x']), 1/60))*10 258 | rgbde_time_trans_z = (np.arange(0, 1/60*len(rgbde_trans_z['x']), 1/60))*10 259 | rgbde_time_roll = (np.arange(0, 1/60*len(rgbde_roll['qx']), 1/60))*10 260 | rgbde_time_pitch = (np.arange(0, 1/60*len(rgbde_pitch['qx']), 1/60))*10 261 | rgbde_time_yaw = (np.arange(0, 1/60*len(rgbde_yaw['qx']), 1/60))*10 262 | rgbde_time_6dof = (np.arange(0, 1/60*len(rgbde_6dof['x']), 1/60)) 263 | rgbde_time_6dof_5 = (np.arange(0, 1/5*len(rgbde_6dof_5['x']), 1/5)) 264 | 265 | 266 | edopt_trans_x_resampled_x = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['x']) 267 | edopt_trans_x_resampled_y = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['y']) 268 | edopt_trans_x_resampled_z = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['z']) 269 | edopt_trans_x_resampled_qx = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['qx']) 270 | edopt_trans_x_resampled_qy = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['qy']) 271 | edopt_trans_x_resampled_qz = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['qz']) 272 | edopt_trans_x_resampled_qw = resampling_by_interpolate(gt_trans_x_new['t'], edopt_trans_x['t'], edopt_trans_x['qw']) 273 | 274 | rgbde_trans_x_resampled_x = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['x']) 275 | rgbde_trans_x_resampled_y = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['y']) 276 | rgbde_trans_x_resampled_z = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['z']) 277 | rgbde_trans_x_resampled_qx = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['qx']) 278 | rgbde_trans_x_resampled_qy = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['qy']) 279 | rgbde_trans_x_resampled_qz = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['qz']) 280 | rgbde_trans_x_resampled_qw = resampling_by_interpolate(gt_trans_x_old['t'], rgbde_time_trans_x, rgbde_trans_x['qw']) 281 | 282 | edopt_trans_y_resampled_x = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['x']) 283 | edopt_trans_y_resampled_y = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['y']) 284 | edopt_trans_y_resampled_z = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['z']) 285 | edopt_trans_y_resampled_qx = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['qx']) 286 | edopt_trans_y_resampled_qy = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['qy']) 287 | edopt_trans_y_resampled_qz = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['qz']) 288 | edopt_trans_y_resampled_qw = resampling_by_interpolate(gt_trans_y_new['t'], edopt_trans_y['t'], edopt_trans_y['qw']) 289 | 290 | rgbde_trans_y_resampled_x = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['x']) 291 | rgbde_trans_y_resampled_y = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['y']) 292 | rgbde_trans_y_resampled_z = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['z']) 293 | rgbde_trans_y_resampled_qx = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['qx']) 294 | rgbde_trans_y_resampled_qy = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['qy']) 295 | rgbde_trans_y_resampled_qz = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['qz']) 296 | rgbde_trans_y_resampled_qw = resampling_by_interpolate(gt_trans_y_old['t'], rgbde_time_trans_y, rgbde_trans_y['qw']) 297 | 298 | edopt_trans_z_resampled_x = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['x']) 299 | edopt_trans_z_resampled_y = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['y']) 300 | edopt_trans_z_resampled_z = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['z']) 301 | edopt_trans_z_resampled_qx = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['qx']) 302 | edopt_trans_z_resampled_qy = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['qy']) 303 | edopt_trans_z_resampled_qz = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['qz']) 304 | edopt_trans_z_resampled_qw = resampling_by_interpolate(gt_trans_z_new['t'], edopt_trans_z['t'], edopt_trans_z['qw']) 305 | 306 | rgbde_trans_z_resampled_x = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['x']) 307 | rgbde_trans_z_resampled_y = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['y']) 308 | rgbde_trans_z_resampled_z = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['z']) 309 | rgbde_trans_z_resampled_qx = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['qx']) 310 | rgbde_trans_z_resampled_qy = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['qy']) 311 | rgbde_trans_z_resampled_qz = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['qz']) 312 | rgbde_trans_z_resampled_qw = resampling_by_interpolate(gt_trans_z_old['t'], rgbde_time_trans_z, rgbde_trans_z['qw']) 313 | 314 | edopt_roll_resampled_x = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['x']) 315 | edopt_roll_resampled_y = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['y']) 316 | edopt_roll_resampled_z = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['z']) 317 | edopt_roll_resampled_qx = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['qx']) 318 | edopt_roll_resampled_qy = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['qy']) 319 | edopt_roll_resampled_qz = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['qz']) 320 | edopt_roll_resampled_qw = resampling_by_interpolate(gt_roll_new['t'], edopt_roll['t'], edopt_roll['qw']) 321 | 322 | rgbde_roll_resampled_x = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['x']) 323 | rgbde_roll_resampled_y = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['y']) 324 | rgbde_roll_resampled_z = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['z']) 325 | rgbde_roll_resampled_qx = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['qx']) 326 | rgbde_roll_resampled_qy = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['qy']) 327 | rgbde_roll_resampled_qz = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['qz']) 328 | rgbde_roll_resampled_qw = resampling_by_interpolate(gt_roll_old['t'], rgbde_time_roll, rgbde_roll['qw']) 329 | 330 | edopt_pitch_resampled_x = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['x']) 331 | edopt_pitch_resampled_y = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['y']) 332 | edopt_pitch_resampled_z = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['z']) 333 | edopt_pitch_resampled_qx = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['qx']) 334 | edopt_pitch_resampled_qy = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['qy']) 335 | edopt_pitch_resampled_qz = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['qz']) 336 | edopt_pitch_resampled_qw = resampling_by_interpolate(gt_pitch_new['t'], edopt_pitch['t'], edopt_pitch['qw']) 337 | 338 | rgbde_pitch_resampled_x = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['x']) 339 | rgbde_pitch_resampled_y = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['y']) 340 | rgbde_pitch_resampled_z = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['z']) 341 | rgbde_pitch_resampled_qx = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['qx']) 342 | rgbde_pitch_resampled_qy = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['qy']) 343 | rgbde_pitch_resampled_qz = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['qz']) 344 | rgbde_pitch_resampled_qw = resampling_by_interpolate(gt_pitch_old['t'], rgbde_time_pitch, rgbde_pitch['qw']) 345 | 346 | edopt_yaw_resampled_x = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['x']) 347 | edopt_yaw_resampled_y = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['y']) 348 | edopt_yaw_resampled_z = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['z']) 349 | edopt_yaw_resampled_qx = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['qx']) 350 | edopt_yaw_resampled_qy = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['qy']) 351 | edopt_yaw_resampled_qz = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['qz']) 352 | edopt_yaw_resampled_qw = resampling_by_interpolate(gt_yaw_new['t'], edopt_yaw['t'], edopt_yaw['qw']) 353 | 354 | rgbde_yaw_resampled_x = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['x']) 355 | rgbde_yaw_resampled_y = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['y']) 356 | rgbde_yaw_resampled_z = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['z']) 357 | rgbde_yaw_resampled_qx = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['qx']) 358 | rgbde_yaw_resampled_qy = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['qy']) 359 | rgbde_yaw_resampled_qz = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['qz']) 360 | rgbde_yaw_resampled_qw = resampling_by_interpolate(gt_yaw_old['t'], rgbde_time_yaw, rgbde_yaw['qw']) 361 | 362 | edopt_6dof_resampled_x = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['x']) 363 | edopt_6dof_resampled_y = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['y']) 364 | edopt_6dof_resampled_z = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['z']) 365 | edopt_6dof_resampled_qx = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['qx']) 366 | edopt_6dof_resampled_qy = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['qy']) 367 | edopt_6dof_resampled_qz = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['qz']) 368 | edopt_6dof_resampled_qw = resampling_by_interpolate(gt_6dof['t'], edopt_6dof['t'], edopt_6dof['qw']) 369 | 370 | rgbde_6dof_resampled_x = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['x']) 371 | rgbde_6dof_resampled_y = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['y']) 372 | rgbde_6dof_resampled_z = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['z']) 373 | rgbde_6dof_resampled_qx = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['qx']) 374 | rgbde_6dof_resampled_qy = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['qy']) 375 | rgbde_6dof_resampled_qz = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['qz']) 376 | rgbde_6dof_resampled_qw = resampling_by_interpolate(gt_6dof['t'], rgbde_time_6dof, rgbde_6dof['qw']) 377 | 378 | edopt_trans_x_alpha,edopt_trans_x_beta,edopt_trans_x_gamma = quaternion_to_euler_angle(edopt_trans_x['qw'], edopt_trans_x['qx'], edopt_trans_x['qy'], edopt_trans_x['qz']) 379 | rgbde_trans_x_alpha,rgbde_trans_x_beta,rgbde_trans_x_gamma = quaternion_to_euler_angle(rgbde_trans_x['qw'], rgbde_trans_x['qx'], rgbde_trans_x['qy'], rgbde_trans_x['qz']) 380 | gt_trans_x_alpha_new,gt_trans_x_beta_new,gt_trans_x_gamma_new = quaternion_to_euler_angle(gt_trans_x_new['qw'], gt_trans_x_new['qx'], gt_trans_x_new['qy'], gt_trans_x_new['qz']) 381 | gt_trans_x_alpha_old,gt_trans_x_beta_old,gt_trans_x_gamma_old = quaternion_to_euler_angle(gt_trans_x_old['qw'], gt_trans_x_old['qx'], gt_trans_x_old['qy'], gt_trans_x_old['qz']) 382 | 383 | edopt_trans_x_alpha_cleaned = cleanEuler(edopt_trans_x_alpha,0) 384 | edopt_trans_x_beta_cleaned = cleanEuler(edopt_trans_x_beta,1) 385 | edopt_trans_x_gamma_cleaned = cleanEuler(edopt_trans_x_gamma,2) 386 | 387 | rgbde_trans_x_alpha_cleaned = cleanEuler(rgbde_trans_x_alpha,0) 388 | rgbde_trans_x_beta_cleaned = cleanEuler(rgbde_trans_x_beta,1) 389 | rgbde_trans_x_gamma_cleaned = cleanEuler(rgbde_trans_x_gamma,2) 390 | 391 | gt_trans_x_alpha_cleaned_new = cleanEuler(gt_trans_x_alpha_new,0) 392 | gt_trans_x_beta_cleaned_new = cleanEuler(gt_trans_x_beta_new,1) 393 | gt_trans_x_gamma_cleaned_new = cleanEuler(gt_trans_x_gamma_new,1) 394 | 395 | gt_trans_x_alpha_cleaned_old = cleanEuler(gt_trans_x_alpha_old,0) 396 | gt_trans_x_beta_cleaned_old = cleanEuler(gt_trans_x_beta_old,1) 397 | gt_trans_x_gamma_cleaned_old = cleanEuler(gt_trans_x_gamma_old,1) 398 | 399 | edopt_trans_y_alpha,edopt_trans_y_beta,edopt_trans_y_gamma = quaternion_to_euler_angle(edopt_trans_y['qw'], edopt_trans_y['qx'], edopt_trans_y['qy'], edopt_trans_y['qz']) 400 | rgbde_trans_y_alpha,rgbde_trans_y_beta,rgbde_trans_y_gamma = quaternion_to_euler_angle(rgbde_trans_y['qw'], rgbde_trans_y['qx'], rgbde_trans_y['qy'], rgbde_trans_y['qz']) 401 | gt_trans_y_alpha_new,gt_trans_y_beta_new,gt_trans_y_gamma_new = quaternion_to_euler_angle(gt_trans_y_new['qw'], gt_trans_y_new['qx'], gt_trans_y_new['qy'], gt_trans_y_new['qz']) 402 | gt_trans_y_alpha_old,gt_trans_y_beta_old,gt_trans_y_gamma_old = quaternion_to_euler_angle(gt_trans_y_old['qw'], gt_trans_y_old['qx'], gt_trans_y_old['qy'], gt_trans_y_old['qz']) 403 | 404 | edopt_trans_y_alpha_cleaned = cleanEuler(edopt_trans_y_alpha,0) 405 | edopt_trans_y_beta_cleaned = cleanEuler(edopt_trans_y_beta,1) 406 | edopt_trans_y_gamma_cleaned = cleanEuler(edopt_trans_y_gamma,2) 407 | 408 | rgbde_trans_y_alpha_cleaned = cleanEuler(rgbde_trans_y_alpha,0) 409 | rgbde_trans_y_beta_cleaned = cleanEuler(rgbde_trans_y_beta,1) 410 | rgbde_trans_y_gamma_cleaned = cleanEuler(rgbde_trans_y_gamma,2) 411 | 412 | gt_trans_y_alpha_cleaned_new = cleanEuler(gt_trans_y_alpha_new,0) 413 | gt_trans_y_beta_cleaned_new = cleanEuler(gt_trans_y_beta_new,1) 414 | gt_trans_y_gamma_cleaned_new = cleanEuler(gt_trans_y_gamma_new,2) 415 | 416 | gt_trans_y_alpha_cleaned_old = cleanEuler(gt_trans_y_alpha_old,0) 417 | gt_trans_y_beta_cleaned_old = cleanEuler(gt_trans_y_beta_old,1) 418 | gt_trans_y_gamma_cleaned_old = cleanEuler(gt_trans_y_gamma_old,2) 419 | 420 | edopt_trans_z_alpha,edopt_trans_z_beta,edopt_trans_z_gamma = quaternion_to_euler_angle(edopt_trans_z['qw'], edopt_trans_z['qx'], edopt_trans_z['qy'], edopt_trans_z['qz']) 421 | rgbde_trans_z_alpha,rgbde_trans_z_beta,rgbde_trans_z_gamma = quaternion_to_euler_angle(rgbde_trans_z['qw'], rgbde_trans_z['qx'], rgbde_trans_z['qy'], rgbde_trans_z['qz']) 422 | gt_trans_z_alpha_new,gt_trans_z_beta_new,gt_trans_z_gamma_new = quaternion_to_euler_angle(gt_trans_z_new['qw'], gt_trans_z_new['qx'], gt_trans_z_new['qy'], gt_trans_z_new['qz']) 423 | gt_trans_z_alpha_old,gt_trans_z_beta_old,gt_trans_z_gamma_old = quaternion_to_euler_angle(gt_trans_z_old['qw'], gt_trans_z_old['qx'], gt_trans_z_old['qy'], gt_trans_z_old['qz']) 424 | 425 | edopt_trans_z_alpha_cleaned = cleanEuler(edopt_trans_z_alpha,0) 426 | edopt_trans_z_beta_cleaned = cleanEuler(edopt_trans_z_beta,1) 427 | edopt_trans_z_gamma_cleaned = cleanEuler(edopt_trans_z_gamma,2) 428 | 429 | rgbde_trans_z_alpha_cleaned = cleanEuler(rgbde_trans_z_alpha,0) 430 | rgbde_trans_z_beta_cleaned = cleanEuler(rgbde_trans_z_beta,1) 431 | rgbde_trans_z_gamma_cleaned = cleanEuler(rgbde_trans_z_gamma,2) 432 | 433 | gt_trans_z_alpha_cleaned_new = cleanEuler(gt_trans_z_alpha_new,0) 434 | gt_trans_z_beta_cleaned_new = cleanEuler(gt_trans_z_beta_new,1) 435 | gt_trans_z_gamma_cleaned_new = cleanEuler(gt_trans_z_gamma_new,2) 436 | 437 | gt_trans_z_alpha_cleaned_old = cleanEuler(gt_trans_z_alpha_old,0) 438 | gt_trans_z_beta_cleaned_old = cleanEuler(gt_trans_z_beta_old,1) 439 | gt_trans_z_gamma_cleaned_old = cleanEuler(gt_trans_z_gamma_old,2) 440 | 441 | edopt_roll_alpha,edopt_roll_beta,edopt_roll_gamma = quaternion_to_euler_angle(edopt_roll['qw'], edopt_roll['qx'], edopt_roll['qy'], edopt_roll['qz']) 442 | rgbde_roll_alpha,rgbde_roll_beta,rgbde_roll_gamma = quaternion_to_euler_angle(rgbde_roll['qw'], rgbde_roll['qx'], rgbde_roll['qy'], rgbde_roll['qz']) 443 | gt_roll_alpha_new,gt_roll_beta_new,gt_roll_gamma_new = quaternion_to_euler_angle(gt_roll_new['qw'], gt_roll_new['qx'], gt_roll_new['qy'], gt_roll_new['qz']) 444 | gt_roll_alpha_old,gt_roll_beta_old,gt_roll_gamma_old = quaternion_to_euler_angle(gt_roll_old['qw'], gt_roll_old['qx'], gt_roll_old['qy'], gt_roll_old['qz']) 445 | 446 | edopt_roll_alpha_cleaned = cleanEuler(edopt_roll_alpha,0) 447 | edopt_roll_beta_cleaned = cleanEuler(edopt_roll_beta,1) 448 | edopt_roll_gamma_cleaned = cleanEuler(edopt_roll_gamma,2) 449 | 450 | rgbde_roll_alpha_cleaned = cleanEuler(rgbde_roll_alpha,0) 451 | rgbde_roll_beta_cleaned = cleanEuler(rgbde_roll_beta,1) 452 | rgbde_roll_gamma_cleaned = cleanEuler(rgbde_roll_gamma,2) 453 | 454 | gt_roll_alpha_cleaned_new = cleanEuler(gt_roll_alpha_new,0) 455 | gt_roll_beta_cleaned_new = cleanEuler(gt_roll_beta_new,1) 456 | gt_roll_gamma_cleaned_new = cleanEuler(gt_roll_gamma_new,2) 457 | 458 | gt_roll_alpha_cleaned_old = cleanEuler(gt_roll_alpha_old,0) 459 | gt_roll_beta_cleaned_old = cleanEuler(gt_roll_beta_old,1) 460 | gt_roll_gamma_cleaned_old = cleanEuler(gt_roll_gamma_old,2) 461 | 462 | edopt_pitch_alpha,edopt_pitch_beta,edopt_pitch_gamma = quaternion_to_euler_angle(edopt_pitch['qw'], edopt_pitch['qx'], edopt_pitch['qy'], edopt_pitch['qz']) 463 | rgbde_pitch_alpha,rgbde_pitch_beta,rgbde_pitch_gamma = quaternion_to_euler_angle(rgbde_pitch['qw'], rgbde_pitch['qx'], rgbde_pitch['qy'], rgbde_pitch['qz']) 464 | gt_pitch_alpha_new,gt_pitch_beta_new,gt_pitch_gamma_new = quaternion_to_euler_angle(gt_pitch_new['qw'], gt_pitch_new['qx'], gt_pitch_new['qy'], gt_pitch_new['qz']) 465 | gt_pitch_alpha_old,gt_pitch_beta_old,gt_pitch_gamma_old = quaternion_to_euler_angle(gt_pitch_old['qw'], gt_pitch_old['qx'], gt_pitch_old['qy'], gt_pitch_old['qz']) 466 | 467 | edopt_pitch_alpha_cleaned = cleanEuler(edopt_pitch_alpha,0) 468 | edopt_pitch_beta_cleaned = cleanEuler(edopt_pitch_beta,1) 469 | edopt_pitch_gamma_cleaned = cleanEuler(edopt_pitch_gamma,2) 470 | 471 | rgbde_pitch_alpha_cleaned = cleanEuler(rgbde_pitch_alpha,0) 472 | rgbde_pitch_beta_cleaned = cleanEuler(rgbde_pitch_beta,1) 473 | rgbde_pitch_gamma_cleaned = cleanEuler(rgbde_pitch_gamma,2) 474 | 475 | gt_pitch_alpha_cleaned_new = cleanEuler(gt_pitch_alpha_new,0) 476 | gt_pitch_beta_cleaned_new = cleanEuler(gt_pitch_beta_new,1) 477 | gt_pitch_gamma_cleaned_new = cleanEuler(gt_pitch_gamma_new,2) 478 | 479 | gt_pitch_alpha_cleaned_old = cleanEuler(gt_pitch_alpha_old,0) 480 | gt_pitch_beta_cleaned_old = cleanEuler(gt_pitch_beta_old,1) 481 | gt_pitch_gamma_cleaned_old = cleanEuler(gt_pitch_gamma_old,2) 482 | 483 | edopt_yaw_alpha,edopt_yaw_beta,edopt_yaw_gamma = quaternion_to_euler_angle(edopt_yaw['qw'], edopt_yaw['qx'], edopt_yaw['qy'], edopt_yaw['qz']) 484 | rgbde_yaw_alpha,rgbde_yaw_beta,rgbde_yaw_gamma = quaternion_to_euler_angle(rgbde_yaw['qw'], rgbde_yaw['qx'], rgbde_yaw['qy'], rgbde_yaw['qz']) 485 | gt_yaw_alpha_new,gt_yaw_beta_new,gt_yaw_gamma_new = quaternion_to_euler_angle(gt_yaw_new['qw'], gt_yaw_new['qx'], gt_yaw_new['qy'], gt_yaw_new['qz']) 486 | gt_yaw_alpha_old,gt_yaw_beta_old,gt_yaw_gamma_old = quaternion_to_euler_angle(gt_yaw_old['qw'], gt_yaw_old['qx'], gt_yaw_old['qy'], gt_yaw_old['qz']) 487 | 488 | edopt_yaw_alpha_cleaned = cleanEuler(edopt_yaw_alpha,0) 489 | edopt_yaw_beta_cleaned = cleanEuler(edopt_yaw_beta,1) 490 | edopt_yaw_gamma_cleaned = cleanEuler(edopt_yaw_gamma,2) 491 | 492 | rgbde_yaw_alpha_cleaned = cleanEuler(rgbde_yaw_alpha,0) 493 | rgbde_yaw_beta_cleaned = cleanEuler(rgbde_yaw_beta,1) 494 | rgbde_yaw_gamma_cleaned = cleanEuler(rgbde_yaw_gamma,2) 495 | 496 | gt_yaw_alpha_cleaned_new = cleanEuler(gt_yaw_alpha_new,0) 497 | gt_yaw_beta_cleaned_new = cleanEuler(gt_yaw_beta_new,1) 498 | gt_yaw_gamma_cleaned_new = cleanEuler(gt_yaw_gamma_new,2) 499 | 500 | edopt_6dof_alpha,edopt_6dof_beta,edopt_6dof_gamma = quaternion_to_euler_angle(edopt_6dof['qw'], edopt_6dof['qx'], edopt_6dof['qy'], edopt_6dof['qz']) 501 | rgbde_6dof_alpha,rgbde_6dof_beta,rgbde_6dof_gamma = quaternion_to_euler_angle(rgbde_6dof['qw'], rgbde_6dof['qx'], rgbde_6dof['qy'], rgbde_6dof['qz']) 502 | rgbde_6dof_alpha_5,rgbde_6dof_beta_5,rgbde_6dof_gamma_5 = quaternion_to_euler_angle(rgbde_6dof_5['qw'], rgbde_6dof_5['qx'], rgbde_6dof_5['qy'], rgbde_6dof_5['qz']) 503 | gt_6dof_alpha,gt_6dof_beta,gt_6dof_gamma = quaternion_to_euler_angle(gt_6dof['qw'], gt_6dof['qx'], gt_6dof['qy'], gt_6dof['qz']) 504 | 505 | edopt_6dof_alpha_cleaned = cleanEuler2(edopt_6dof_alpha) 506 | edopt_6dof_beta_cleaned = cleanEuler2(edopt_6dof_beta) 507 | edopt_6dof_gamma_cleaned = cleanEuler2(edopt_6dof_gamma) 508 | 509 | rgbde_6dof_alpha_cleaned = cleanEuler2(rgbde_6dof_alpha) 510 | rgbde_6dof_beta_cleaned = cleanEuler2(rgbde_6dof_beta) 511 | rgbde_6dof_gamma_cleaned = cleanEuler2(rgbde_6dof_gamma) 512 | 513 | rgbde_6dof_alpha_cleaned_5 = cleanEuler2(rgbde_6dof_alpha_5) 514 | rgbde_6dof_beta_cleaned_5 = cleanEuler2(rgbde_6dof_beta_5) 515 | rgbde_6dof_gamma_cleaned_5 = cleanEuler2(rgbde_6dof_gamma_5) 516 | 517 | gt_6dof_alpha_cleaned = cleanEuler2(gt_6dof_alpha) 518 | gt_6dof_beta_cleaned = cleanEuler2(gt_6dof_beta) 519 | gt_6dof_gamma_cleaned = cleanEuler2(gt_6dof_gamma) 520 | 521 | overlapping0 = 0.8 522 | overlapping1 = 1 523 | overlapping2 = 1 524 | 525 | plt.figure(figsize=(18,6)) 526 | plt.plot(gt_6dof['t'], gt_6dof['x'], color='k', lw=4, alpha=overlapping0, ls='-', label='ground truth') 527 | plt.plot(gt_6dof['t'], gt_6dof['y'], color='k', lw=4, alpha=overlapping0, ls='-') 528 | plt.plot(gt_6dof['t'], gt_6dof['z'], color='k', lw=4, alpha=overlapping0, ls='-') 529 | 530 | plt.plot(rgbde_time_6dof, rgbde_6dof['x'], color=color_z, lw=2, alpha=overlapping2, label='RGB-D-E 60 Hz') 531 | plt.plot(rgbde_time_6dof, rgbde_6dof['y'], color=color_z, lw=2, alpha=overlapping2) 532 | plt.plot(rgbde_time_6dof, rgbde_6dof['z'], color=color_z, lw=2, alpha=overlapping2) 533 | 534 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_5['x'], color=color_blue, lw=2, alpha=overlapping2, label='RGB-D-E 5 Hz') 535 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_5['y'], color=color_blue, lw=2, alpha=overlapping2) 536 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_5['z'], color=color_blue, lw=2, alpha=overlapping2) 537 | 538 | plt.plot(edopt_6dof['t'],edopt_6dof['x'], color=color_y, lw=3, alpha=overlapping1, ls=':', label='EDOPT') 539 | plt.plot(edopt_6dof['t'],edopt_6dof['y'], color=color_y, lw=3, alpha=overlapping1, ls=':') 540 | plt.plot(edopt_6dof['t'],edopt_6dof['z'], color=color_y, lw=3, alpha=overlapping1, ls=':') 541 | 542 | # plt.xlabel("Time [s]") 543 | plt.ylabel("Position [m]") 544 | 545 | plt.xticks([]) 546 | 547 | plt.legend(bbox_to_anchor=(0.1, 1.02, 0, 0.2), loc='lower left', ncol=4) 548 | 549 | plt.text(-0.3, 0.75, 'z', fontsize = 23) 550 | plt.text(-0.3, -0.05, 'y', fontsize = 23) 551 | plt.text(-0.3, -0.25, 'x', fontsize = 23) 552 | 553 | plt.show() 554 | 555 | plt.figure(figsize=(18,6)) 556 | 557 | plt.plot(gt_6dof['t'], gt_6dof_alpha_cleaned, color='k', lw=4, alpha=overlapping0, ls='-', label='ground truth') 558 | plt.plot(gt_6dof['t'], gt_6dof_beta_cleaned, color='k', lw=4, alpha=overlapping0, ls='-') 559 | plt.plot(gt_6dof['t'], gt_6dof_gamma_cleaned, color='k', lw=4, alpha=overlapping0, ls='-') 560 | 561 | plt.plot(rgbde_time_6dof, rgbde_6dof_alpha_cleaned, color=color_z, lw=2, alpha=overlapping2, label='RGB-D-E 60 Hz') 562 | plt.plot(rgbde_time_6dof, rgbde_6dof_beta_cleaned, color=color_z, lw=2, alpha=overlapping2) 563 | plt.plot(rgbde_time_6dof, rgbde_6dof_gamma_cleaned, color=color_z, lw=2, alpha=overlapping2) 564 | 565 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_alpha_cleaned_5, color=color_blue, lw=2, alpha=overlapping2, label='RGB-D-E 5 Hz') 566 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_beta_cleaned_5, color=color_blue, lw=2, alpha=overlapping2) 567 | plt.plot(rgbde_time_6dof_5, rgbde_6dof_gamma_cleaned_5, color=color_blue, lw=2, alpha=overlapping2) 568 | 569 | plt.plot(edopt_6dof['t'],edopt_6dof_alpha_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':', label='EDOPT') 570 | plt.plot(edopt_6dof['t'],edopt_6dof_beta_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':') 571 | plt.plot(edopt_6dof['t'],edopt_6dof_gamma_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':') 572 | 573 | plt.text(-0.3, 140, '$\\gamma$', fontsize = 23) 574 | plt.text(-0.3, -40, '$\\alpha$', fontsize = 23) 575 | plt.text(-0.3, -80, '$\\beta$', fontsize = 23) 576 | 577 | plt.xlabel("Time [s]") 578 | plt.ylabel("Rotation [deg]") 579 | 580 | plt.show() 581 | 582 | fig_multi, axs = plt.subplots(1,2) 583 | fig_multi.set_size_inches(18, 6) 584 | axs[0].plot(gt_6dof['t'], gt_6dof['x'], color='k', lw=4, alpha=overlapping0, ls='-', label='ground truth') 585 | axs[0].plot(gt_6dof['t'], gt_6dof['y'], color='k', lw=4, alpha=overlapping0, ls='-') 586 | axs[0].plot(gt_6dof['t'], gt_6dof['z'], color='k', lw=4, alpha=overlapping0, ls='-') 587 | 588 | axs[0].plot(rgbde_time_6dof, rgbde_6dof['x'], color=color_z, lw=2, alpha=overlapping2, label='RGB-D-E 60 Hz') 589 | axs[0].plot(rgbde_time_6dof, rgbde_6dof['y'], color=color_z, lw=2, alpha=overlapping2) 590 | axs[0].plot(rgbde_time_6dof, rgbde_6dof['z'], color=color_z, lw=2, alpha=overlapping2) 591 | 592 | axs[0].plot(rgbde_time_6dof_5, rgbde_6dof_5['x'], color=color_blue, lw=2, alpha=overlapping2, label='RGB-D-E 5 Hz') 593 | axs[0].plot(rgbde_time_6dof_5, rgbde_6dof_5['y'], color=color_blue, lw=2, alpha=overlapping2) 594 | axs[0].plot(rgbde_time_6dof_5, rgbde_6dof_5['z'], color=color_blue, lw=2, alpha=overlapping2) 595 | 596 | axs[0].plot(edopt_6dof['t'],edopt_6dof['x'], color=color_y, lw=3, alpha=overlapping1, ls=':', label='EDOPT') 597 | axs[0].plot(edopt_6dof['t'],edopt_6dof['y'], color=color_y, lw=3, alpha=overlapping1, ls=':') 598 | axs[0].plot(edopt_6dof['t'],edopt_6dof['z'], color=color_y, lw=3, alpha=overlapping1, ls=':') 599 | 600 | axs[1].plot(gt_6dof['t'], gt_6dof_alpha_cleaned, color='k', lw=4, alpha=overlapping0, ls='-', label='ground truth') 601 | axs[1].plot(gt_6dof['t'], gt_6dof_beta_cleaned, color='k', lw=4, alpha=overlapping0, ls='-') 602 | axs[1].plot(gt_6dof['t'], gt_6dof_gamma_cleaned, color='k', lw=4, alpha=overlapping0, ls='-') 603 | 604 | axs[1].plot(rgbde_time_6dof, rgbde_6dof_alpha_cleaned, color=color_z, lw=2, alpha=overlapping2, label='RGB-D-E 60 Hz') 605 | axs[1].plot(rgbde_time_6dof, rgbde_6dof_beta_cleaned, color=color_z, lw=2, alpha=overlapping2) 606 | axs[1].plot(rgbde_time_6dof, rgbde_6dof_gamma_cleaned, color=color_z, lw=2, alpha=overlapping2) 607 | 608 | axs[1].plot(rgbde_time_6dof_5, rgbde_6dof_alpha_cleaned_5, color=color_blue, lw=2, alpha=overlapping2, label='RGB-D-E 5 Hz') 609 | axs[1].plot(rgbde_time_6dof_5, rgbde_6dof_beta_cleaned_5, color=color_blue, lw=2, alpha=overlapping2) 610 | axs[1].plot(rgbde_time_6dof_5, rgbde_6dof_gamma_cleaned_5, color=color_blue, lw=2, alpha=overlapping2) 611 | 612 | axs[1].plot(edopt_6dof['t'],edopt_6dof_alpha_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':', label='EDOPT') 613 | axs[1].plot(edopt_6dof['t'],edopt_6dof_beta_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':') 614 | axs[1].plot(edopt_6dof['t'],edopt_6dof_gamma_cleaned, color=color_y, lw=2, alpha=overlapping1, ls=':') 615 | 616 | axs[0].set(xlabel='Time [s]', ylabel='Position [m]') 617 | axs[1].set(xlabel='Time [s]', ylabel='Rotation [deg]') 618 | 619 | axs[0].legend(bbox_to_anchor=(0.16, 1.02, 0, 0.2), loc='lower left', ncol=4) 620 | 621 | plt.show() 622 | 623 | # fig_summary, axs = plt.subplots(4,3) 624 | # fig_summary.set_size_inches(18.5, 9.5) 625 | # axs[0,0].plot(edopt_trans_x['t'], edopt_trans_x['x'], color=color_x, lw=4, label='x') 626 | # axs[0,0].plot(edopt_trans_x['t'], edopt_trans_x['y'], color=color_y, lw=4, label='y') 627 | # axs[0,0].plot(edopt_trans_x['t'], edopt_trans_x['z'], color=color_z, lw=4, label='z') 628 | # axs[0,0].plot(gt_trans_x['t'], gt_trans_x['x'], color='k', lw=2, ls='--') 629 | # axs[0,0].plot(gt_trans_x['t'], gt_trans_x['y'], color='k', lw=2, ls='--') 630 | # axs[0,0].plot(gt_trans_x['t'], gt_trans_x['z'], color='k', lw=2, ls='--') 631 | # axs[0,0].plot(rgbde_time_trans_x, rgbde_trans_x['x'], color=color_x, lw=12, alpha=overlapping2) 632 | # axs[0,0].plot(rgbde_time_trans_x, rgbde_trans_x['y'], color=color_y, lw=12, alpha=overlapping2) 633 | # axs[0,0].plot(rgbde_time_trans_x, rgbde_trans_x['z'], color=color_z, lw=12, alpha=overlapping2) 634 | # axs[0,0].spines['top'].set_visible(False) 635 | # axs[0,0].spines['right'].set_visible(False) 636 | # # axs[0,0].spines['bottom'].set_visible(False) 637 | # # axs[0,0].spines['left'].set_visible(False) 638 | # axs[0,0].text(.5,.9,'D1',horizontalalignment='center',transform=axs[0,0].transAxes) 639 | # axs[0,1].plot(edopt_trans_y['t'], edopt_trans_y['x'], color=color_x, lw=4, label='x') 640 | # axs[0,1].plot(edopt_trans_y['t'], edopt_trans_y['y'], color=color_y, lw=4, label='y') 641 | # axs[0,1].plot(edopt_trans_y['t'], edopt_trans_y['z'], color=color_z, lw=4, label='z') 642 | # axs[0,1].plot(gt_trans_y['t'], gt_trans_y['x'], color='k', lw=2, ls='--') 643 | # axs[0,1].plot(gt_trans_y['t'], gt_trans_y['y'], color='k', lw=2, ls='--') 644 | # axs[0,1].plot(gt_trans_y['t'], gt_trans_y['z'], color='k', lw=2, ls='--') 645 | # axs[0,1].plot(rgbde_time_trans_y, rgbde_trans_y['x'], color=color_x, lw=12, alpha=overlapping2) 646 | # axs[0,1].plot(rgbde_time_trans_y, rgbde_trans_y['y'], color=color_y, lw=12, alpha=overlapping2) 647 | # axs[0,1].plot(rgbde_time_trans_y, rgbde_trans_y['z'], color=color_z, lw=12, alpha=overlapping2) 648 | # axs[0,1].spines['top'].set_visible(False) 649 | # axs[0,1].spines['right'].set_visible(False) 650 | # # axs[0,1].spines['bottom'].set_visible(False) 651 | # axs[0,1].spines['left'].set_visible(False) 652 | # axs[0,1].text(.5,.9,'D2',horizontalalignment='center',transform=axs[0,1].transAxes) 653 | # axs[0,2].plot(edopt_trans_z['t'], edopt_trans_z['x'], color=color_x, lw=4, label='x') 654 | # axs[0,2].plot(edopt_trans_z['t'], edopt_trans_z['y'], color=color_y, lw=4, label='y') 655 | # axs[0,2].plot(edopt_trans_z['t'], edopt_trans_z['z'], color=color_z, lw=4, label='z') 656 | # axs[0,2].plot(gt_trans_z['t'], gt_trans_z['x'], color='k', lw=2, ls='--') 657 | # axs[0,2].plot(gt_trans_z['t'], gt_trans_z['y'], color='k', lw=2, ls='--') 658 | # axs[0,2].plot(gt_trans_z['t'], gt_trans_z['z'], color='k', lw=2, ls='--') 659 | # axs[0,2].plot(rgbde_time_trans_z, rgbde_trans_z['x'], color=color_x, lw=12, alpha=overlapping2) 660 | # axs[0,2].plot(rgbde_time_trans_z, rgbde_trans_z['y'], color=color_y, lw=12, alpha=overlapping2) 661 | # axs[0,2].plot(rgbde_time_trans_z, rgbde_trans_z['z'], color=color_z, lw=12, alpha=overlapping2) 662 | # axs[0,2].spines['top'].set_visible(False) 663 | # axs[0,2].spines['right'].set_visible(False) 664 | # # axs[0,2].spines['bottom'].set_visible(False) 665 | # axs[0,2].spines['left'].set_visible(False) 666 | # axs[0,2].text(.5,.9,'D3',horizontalalignment='center',transform=axs[0,2].transAxes) 667 | # axs[2,2].plot(edopt_roll['t'], edopt_roll['x'], color=color_x, lw=4, label='x') 668 | # axs[2,2].plot(edopt_roll['t'], edopt_roll['y'], color=color_y, lw=4, label='y') 669 | # axs[2,2].plot(edopt_roll['t'], edopt_roll['z'], color=color_z, lw=4, label='z') 670 | # axs[2,2].plot(gt_roll['t'], gt_roll['x'], color='k', lw=2, ls='--') 671 | # axs[2,2].plot(gt_roll['t'], gt_roll['y'], color='k', lw=2, ls='--') 672 | # axs[2,2].plot(gt_roll['t'], gt_roll['z'], color='k', lw=2, ls='--') 673 | # axs[2,2].plot(rgbde_time_roll, rgbde_roll['x'], color=color_x, lw=12, alpha=overlapping2) 674 | # axs[2,2].plot(rgbde_time_roll, rgbde_roll['y'], color=color_y, lw=12, alpha=overlapping2) 675 | # axs[2,2].plot(rgbde_time_roll, rgbde_roll['z'], color=color_z, lw=12, alpha=overlapping2) 676 | # axs[2,2].spines['top'].set_visible(False) 677 | # axs[2,2].spines['right'].set_visible(False) 678 | # # axs[2,2].spines['bottom'].set_visible(False) 679 | # axs[2,2].spines['left'].set_visible(False) 680 | # axs[2,2].text(.5,.9,'D6',horizontalalignment='center',transform=axs[2,2].transAxes) 681 | # axs[2,0].plot(edopt_pitch['t'], edopt_pitch['x'], color=color_x, lw=4, label='x') 682 | # axs[2,0].plot(edopt_pitch['t'], edopt_pitch['y'], color=color_y, lw=4, label='y') 683 | # axs[2,0].plot(edopt_pitch['t'], edopt_pitch['z'], color=color_z, lw=4, label='z') 684 | # axs[2,0].plot(gt_pitch['t'], gt_pitch['x'], color='k', lw=2, ls='--') 685 | # axs[2,0].plot(gt_pitch['t'], gt_pitch['y'], color='k', lw=2, ls='--') 686 | # axs[2,0].plot(gt_pitch['t'], gt_pitch['z'], color='k', lw=2, ls='--') 687 | # axs[2,0].plot(rgbde_time_pitch, rgbde_pitch['x'], color=color_x, lw=12, alpha=overlapping2) 688 | # axs[2,0].plot(rgbde_time_pitch, rgbde_pitch['y'], color=color_y, lw=12, alpha=overlapping2) 689 | # axs[2,0].plot(rgbde_time_pitch, rgbde_pitch['z'], color=color_z, lw=12, alpha=overlapping2) 690 | # axs[2,0].spines['top'].set_visible(False) 691 | # axs[2,0].spines['right'].set_visible(False) 692 | # # axs[2,0].spines['bottom'].set_visible(False) 693 | # axs[2,0].text(.5,.9,'D4',horizontalalignment='center',transform=axs[2,0].transAxes) 694 | # axs[2,1].plot(edopt_yaw['t'], edopt_yaw['x'], color=color_x, lw=4, label='x') 695 | # axs[2,1].plot(edopt_yaw['t'], edopt_yaw['y'], color=color_y, lw=4, label='y') 696 | # axs[2,1].plot(edopt_yaw['t'], edopt_yaw['z'], color=color_z, lw=4, label='z') 697 | # axs[2,1].plot(gt_yaw['t'], gt_yaw['x'], color='k', lw=2, ls='--') 698 | # axs[2,1].plot(gt_yaw['t'], gt_yaw['y'], color='k', lw=2, ls='--') 699 | # axs[2,1].plot(gt_yaw['t'], gt_yaw['z'], color='k', lw=2, ls='--') 700 | # axs[2,1].plot(rgbde_time_yaw, rgbde_yaw['x'], color=color_x, lw=12, alpha=overlapping2) 701 | # axs[2,1].plot(rgbde_time_yaw, rgbde_yaw['y'], color=color_y, lw=12, alpha=overlapping2) 702 | # axs[2,1].plot(rgbde_time_yaw, rgbde_yaw['z'], color=color_z, lw=12, alpha=overlapping2) 703 | # axs[2,1].spines['top'].set_visible(False) 704 | # axs[2,1].spines['right'].set_visible(False) 705 | # # axs[2,1].spines['bottom'].set_visible(False) 706 | # axs[2,1].spines['left'].set_visible(False) 707 | # axs[2,1].text(.5,.9,'D5',horizontalalignment='center',transform=axs[2,1].transAxes) 708 | # axs[1,0].plot(edopt_trans_x['t'], edopt_trans_x_alpha_cleaned, color=color_x, lw=4, label='qx') 709 | # axs[1,0].plot(edopt_trans_x['t'], edopt_trans_x_beta_cleaned, color=color_y, lw=4, label='qy') 710 | # axs[1,0].plot(edopt_trans_x['t'], edopt_trans_x_gamma_cleaned, color=color_z, lw=4, label='qz') 711 | # axs[1,0].plot(gt_trans_x['t'], gt_trans_x_alpha_cleaned, color='k', lw=1, ls='--') 712 | # axs[1,0].plot(gt_trans_x['t'], gt_trans_x_beta_cleaned, color='k', lw=1, ls='--') 713 | # axs[1,0].plot(gt_trans_x['t'], gt_trans_x_gamma_cleaned, color='k', lw=1, ls='--') 714 | # axs[1,0].plot(rgbde_time_trans_x, rgbde_trans_x_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 715 | # axs[1,0].plot(rgbde_time_trans_x, rgbde_trans_x_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 716 | # axs[1,0].plot(rgbde_time_trans_x, rgbde_trans_x_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 717 | # axs[1,0].spines['top'].set_visible(False) 718 | # axs[1,0].spines['right'].set_visible(False) 719 | # # axs[1,0].spines['bottom'].set_visible(False) 720 | # axs[1,1].plot(edopt_trans_y['t'], edopt_trans_y_alpha_cleaned, color=color_x, lw=4, label='qx') 721 | # axs[1,1].plot(edopt_trans_y['t'], edopt_trans_y_beta_cleaned, color=color_y, lw=4, label='qy') 722 | # axs[1,1].plot(edopt_trans_y['t'], edopt_trans_y_gamma_cleaned, color=color_z, lw=4, label='qz') 723 | # axs[1,1].plot(gt_trans_y['t'], gt_trans_y_alpha_cleaned, color='k', lw=2, ls='--') 724 | # axs[1,1].plot(gt_trans_y['t'], gt_trans_y_beta_cleaned, color='k', lw=2, ls='--') 725 | # axs[1,1].plot(gt_trans_y['t'], gt_trans_y_gamma_cleaned, color='k', lw=2, ls='--') 726 | # axs[1,1].plot(rgbde_time_trans_y, rgbde_trans_y_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 727 | # axs[1,1].plot(rgbde_time_trans_y, rgbde_trans_y_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 728 | # axs[1,1].plot(rgbde_time_trans_y, rgbde_trans_y_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 729 | # axs[1,1].spines['top'].set_visible(False) 730 | # axs[1,1].spines['right'].set_visible(False) 731 | # # axs[1,1].spines['bottom'].set_visible(False) 732 | # axs[1,1].spines['left'].set_visible(False) 733 | # axs[1,2].plot(edopt_trans_z['t'], edopt_trans_z_alpha_cleaned, color=color_x, lw=4, label='qx') 734 | # axs[1,2].plot(edopt_trans_z['t'], edopt_trans_z_beta_cleaned, color=color_y, lw=4, label='qy') 735 | # axs[1,2].plot(edopt_trans_z['t'], edopt_trans_z_gamma_cleaned, color=color_z, lw=4, label='qz') 736 | # axs[1,2].plot(gt_trans_z['t'], gt_trans_z_alpha_cleaned, color='k', lw=2, ls='--') 737 | # axs[1,2].plot(gt_trans_z['t'], gt_trans_z_beta_cleaned, color='k', lw=2, ls='--') 738 | # axs[1,2].plot(gt_trans_z['t'], gt_trans_z_gamma_cleaned, color='k', lw=2, ls='--') 739 | # axs[1,2].plot(rgbde_time_trans_z, rgbde_trans_z_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 740 | # axs[1,2].plot(rgbde_time_trans_z, rgbde_trans_z_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 741 | # axs[1,2].plot(rgbde_time_trans_z, rgbde_trans_z_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 742 | # axs[1,2].spines['top'].set_visible(False) 743 | # axs[1,2].spines['right'].set_visible(False) 744 | # # axs[1,2].spines['bottom'].set_visible(False) 745 | # axs[1,2].spines['left'].set_visible(False) 746 | # axs[3,2].plot(edopt_roll['t'], edopt_roll_alpha_cleaned, color=color_x, lw=4, label='qx') 747 | # axs[3,2].plot(edopt_roll['t'], edopt_roll_beta_cleaned, color=color_y, lw=4, label='qy') 748 | # axs[3,2].plot(edopt_roll['t'], edopt_roll_gamma_cleaned, color=color_z, lw=4, label='qz') 749 | # axs[3,2].plot(gt_roll['t'], gt_roll_alpha_cleaned, color='k', lw=2, ls='--') 750 | # axs[3,2].plot(gt_roll['t'], gt_roll_beta_cleaned, color='k', lw=2, ls='--') 751 | # axs[3,2].plot(gt_roll['t'], gt_roll_gamma_cleaned, color='k', lw=2, ls='--') 752 | # axs[3,2].plot(rgbde_time_roll, rgbde_roll_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 753 | # axs[3,2].plot(rgbde_time_roll, rgbde_roll_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 754 | # axs[3,2].plot(rgbde_time_roll, rgbde_roll_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 755 | # axs[3,2].spines['top'].set_visible(False) 756 | # axs[3,2].spines['right'].set_visible(False) 757 | # # axs[3,2].spines['bottom'].set_visible(False) 758 | # axs[3,2].spines['left'].set_visible(False) 759 | # axs[3,0].plot(edopt_pitch['t'], edopt_pitch_alpha_cleaned, color=color_x, lw=4) 760 | # axs[3,0].plot(edopt_pitch['t'], edopt_pitch_beta_cleaned, color=color_y, lw=4) 761 | # axs[3,0].plot(edopt_pitch['t'], edopt_pitch_gamma_cleaned, color=color_z, lw=4) 762 | # axs[3,0].plot(gt_pitch['t'], gt_pitch_alpha_cleaned, color='k', lw=2, ls='--') 763 | # axs[3,0].plot(gt_pitch['t'], gt_pitch_beta_cleaned, color='k', lw=2, ls='--') 764 | # axs[3,0].plot(gt_pitch['t'], gt_pitch_gamma_cleaned, color='k', lw=2, ls='--') 765 | # axs[3,0].plot(rgbde_time_pitch, rgbde_pitch_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 766 | # axs[3,0].plot(rgbde_time_pitch, rgbde_pitch_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 767 | # axs[3,0].plot(rgbde_time_pitch, rgbde_pitch_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 768 | # axs[3,0].spines['top'].set_visible(False) 769 | # axs[3,0].spines['right'].set_visible(False) 770 | # # axs[3,0].spines['bottom'].set_visible(False) 771 | # axs[3,1].plot(edopt_yaw['t'], edopt_yaw_alpha_cleaned, color=color_x, label="x / "+r'$\alpha$'+" (pitch)", lw=4) 772 | # axs[3,1].plot(edopt_yaw['t'], edopt_yaw_beta_cleaned, color=color_y, label="y / "+r'$\beta$'+" (yaw)", lw=4) 773 | # axs[3,1].plot(edopt_yaw['t'], edopt_yaw_gamma_cleaned, color=color_z, label="z / "+r'$\gamma$'+" (roll)", lw=4) 774 | # axs[3,1].plot(gt_yaw['t'], gt_yaw_alpha_cleaned, color='k', lw=2, ls='--') 775 | # axs[3,1].plot(gt_yaw['t'], gt_yaw_beta_cleaned, color='k', lw=2, ls='--') 776 | # axs[3,1].plot(gt_yaw['t'], gt_yaw_gamma_cleaned, color='k', lw=2, ls='--') 777 | # axs[3,1].plot(rgbde_time_yaw, rgbde_yaw_alpha_cleaned, color=color_x, lw=12, alpha=overlapping2) 778 | # axs[3,1].plot(rgbde_time_yaw, rgbde_yaw_beta_cleaned, color=color_y, lw=12, alpha=overlapping2) 779 | # axs[3,1].plot(rgbde_time_yaw, rgbde_yaw_gamma_cleaned, color=color_z, lw=12, alpha=overlapping2) 780 | # axs[3,1].spines['top'].set_visible(False) 781 | # axs[3,1].spines['right'].set_visible(False) 782 | # # axs[3,1].spines['bottom'].set_visible(False) 783 | # axs[3,1].spines['left'].set_visible(False) 784 | # for i in range(0, 4): 785 | # for j in range(0, 3): 786 | # axs[i,j].set_xlim([-2, 50]) 787 | # axs[0,0].set_ylim(-0.3, 1.2) 788 | # axs[0,1].set_ylim(-0.3, 1.2) 789 | # axs[0,2].set_ylim(-0.3, 1.2) 790 | # axs[2,0].set_ylim(-0.3, 1.2) 791 | # axs[2,1].set_ylim(-0.3, 1.2) 792 | # axs[2,2].set_ylim(-0.3, 1.2) 793 | # axs[1,0].set_ylim(-210, 300) 794 | # axs[1,1].set_ylim(-210, 300) 795 | # axs[1,2].set_ylim(-210, 300) 796 | # axs[3,0].set_ylim(-210, 300) 797 | # axs[3,1].set_ylim(-210, 300) 798 | # axs[3,2].set_ylim(-210, 300) 799 | # axs[0,0].set_xticks([]) 800 | # axs[1,0].set_xticks([]) 801 | # axs[2,0].set_xticks([]) 802 | # axs[0,1].set_xticks([]) 803 | # axs[1,1].set_xticks([]) 804 | # axs[2,1].set_xticks([]) 805 | # axs[0,2].set_xticks([]) 806 | # axs[1,2].set_xticks([]) 807 | # axs[2,2].set_xticks([]) 808 | # axs[0,0].set_xticklabels([]) 809 | # axs[1,0].set_xticklabels([]) 810 | # axs[2,0].set_xticklabels([]) 811 | # axs[0,1].set_xticklabels([]) 812 | # axs[1,1].set_xticklabels([]) 813 | # axs[2,1].set_xticklabels([]) 814 | # axs[0,2].set_xticklabels([]) 815 | # axs[1,2].set_xticklabels([]) 816 | # axs[2,2].set_xticklabels([]) 817 | # axs[0,1].set_yticklabels([]) 818 | # axs[0,2].set_yticklabels([]) 819 | # axs[1,1].set_yticklabels([]) 820 | # axs[1,2].set_yticklabels([]) 821 | # axs[2,1].set_yticklabels([]) 822 | # axs[2,2].set_yticklabels([]) 823 | # axs[3,1].set_yticklabels([]) 824 | # axs[3,2].set_yticklabels([]) 825 | # axs[0,0].set(ylabel='Pos. [m]') 826 | # axs[1,0].set(ylabel='Rot. [deg]') 827 | # axs[2,0].set(ylabel='Pos. [m]') 828 | # axs[3,0].set(xlabel='Time [s]', ylabel='Rot.[deg]') 829 | # axs[3,1].set(xlabel='Time [s]') 830 | # axs[3,2].set(xlabel='Time [s]') 831 | # axs[3,1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.32), 832 | # fancybox=True, shadow=True, ncol=3) 833 | # fig_summary.align_ylabels(axs[:, 0]) 834 | # line1 = plt.Line2D((.385,.385),(.1,.9), color="k", linewidth=2) 835 | # line2 = plt.Line2D((.65,.65),(.1,.9), color="k", linewidth=2) 836 | # line3 = plt.Line2D((.125,.9),(.5,.5), color="k", linewidth=2) 837 | # fig_summary.add_artist(line1) 838 | # fig_summary.add_artist(line2) 839 | # fig_summary.add_artist(line3) 840 | # fig_summary.subplots_adjust(wspace=0.05, hspace=0.06) 841 | # plt.show() 842 | 843 | # fig1, (ax1, ax2) = plt.subplots(2) 844 | # ax1.plot(edopt_trans_x['t'], edopt_trans_x['x'], color=color_x, label='x') 845 | # ax1.plot(edopt_trans_x['t'], edopt_trans_x['y'], color=color_y, label='y') 846 | # ax1.plot(edopt_trans_x['t'], edopt_trans_x['z'], color=color_z, label='z') 847 | # ax1.plot(gt_trans_x['t'], gt_trans_x['x'], color=color_x, ls='--') 848 | # ax1.plot(gt_trans_x['t'], gt_trans_x['y'], color=color_y, ls='--') 849 | # ax1.plot(gt_trans_x['t'], gt_trans_x['z'], color=color_z, ls='--') 850 | # ax1.plot(rgbde_time_trans_x, rgbde_trans_x['x'], color=color_x, ls='-.') 851 | # ax1.plot(rgbde_time_trans_x, rgbde_trans_x['y'], color=color_y, ls='-.') 852 | # ax1.plot(rgbde_time_trans_x, rgbde_trans_x['z'], color=color_z, ls='-.') 853 | # ax2.plot(edopt_trans_x['t'], edopt_trans_x_alpha, color=color_x, label='roll') 854 | # ax2.plot(edopt_trans_x['t'], edopt_trans_x_beta, color=color_y, label='pitch') 855 | # ax2.plot(edopt_trans_x['t'], edopt_trans_x_gamma, color=color_z, label='yaw') 856 | # ax2.plot(rgbde_time_trans_x, rgbde_trans_x_alpha, color=color_x, ls='-.') 857 | # ax2.plot(rgbde_time_trans_x, rgbde_trans_x_beta, color=color_y, ls='-.') 858 | # ax2.plot(rgbde_time_trans_x, rgbde_trans_x_gamma, color=color_z, ls='-.') 859 | # ax2.plot(gt_trans_x['t'], gt_trans_x_alpha, color=color_x, ls='--') 860 | # ax2.plot(gt_trans_x['t'], gt_trans_x_beta, color=color_y, ls='--') 861 | # ax2.plot(gt_trans_x['t'], gt_trans_x_gamma, color=color_z, ls='--') 862 | # ax1.legend(loc='upper right') 863 | # ax2.legend(loc='upper right') 864 | # ax1.set_xticks([]) 865 | # ax1.set(ylabel='Position[m]') 866 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 867 | # plt.show() 868 | 869 | # fig2, (ax1, ax2) = plt.subplots(2) 870 | # ax1.plot(edopt_trans_y['t'], edopt_trans_y['x'], color=color_x, label='x') 871 | # ax1.plot(edopt_trans_y['t'], edopt_trans_y['y'], color=color_y, label='y') 872 | # ax1.plot(edopt_trans_y['t'], edopt_trans_y['z'], color=color_z, label='z') 873 | # ax1.plot(gt_trans_y['t'], gt_trans_y['x'], color=color_x, ls='--') 874 | # ax1.plot(gt_trans_y['t'], gt_trans_y['y'], color=color_y, ls='--') 875 | # ax1.plot(gt_trans_y['t'], gt_trans_y['z'], color=color_z, ls='--') 876 | # ax1.plot(rgbde_time_trans_y, rgbde_trans_y['x'], color=color_x, ls='-.') 877 | # ax1.plot(rgbde_time_trans_y, rgbde_trans_y['y'], color=color_y, ls='-.') 878 | # ax1.plot(rgbde_time_trans_y, rgbde_trans_y['z'], color=color_z, ls='-.') 879 | # ax2.plot(edopt_trans_y['t'], edopt_trans_y_alpha, color=color_x, label='roll') 880 | # ax2.plot(edopt_trans_y['t'], edopt_trans_y_beta, color=color_y, label='pitch') 881 | # ax2.plot(edopt_trans_y['t'], edopt_trans_y_gamma, color=color_z, label='yaw') 882 | # ax2.plot(rgbde_time_trans_y, rgbde_trans_y_alpha, color=color_x, ls='-.') 883 | # ax2.plot(rgbde_time_trans_y, rgbde_trans_y_beta, color=color_y, ls='-.') 884 | # ax2.plot(rgbde_time_trans_y, rgbde_trans_y_gamma, color=color_z, ls='-.') 885 | # ax2.plot(gt_trans_y['t'], gt_trans_y_alpha, color=color_x, ls='--') 886 | # ax2.plot(gt_trans_y['t'], gt_trans_y_beta, color=color_y, ls='--') 887 | # ax2.plot(gt_trans_y['t'], gt_trans_y_gamma, color=color_z, ls='--') 888 | # ax1.legend(loc='upper right') 889 | # ax2.legend(loc='upper right') 890 | # ax1.set_xticks([]) 891 | # ax1.set(ylabel='Position[m]') 892 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 893 | # plt.show() 894 | 895 | # fig3, (ax1, ax2) = plt.subplots(2) 896 | # ax1.plot(edopt_trans_z['t'], edopt_trans_z['x'], 'r', label='x') 897 | # ax1.plot(edopt_trans_z['t'], edopt_trans_z['y'], 'g', label='y') 898 | # ax1.plot(edopt_trans_z['t'], edopt_trans_z['z'], 'b', label='z') 899 | # ax1.plot(gt_trans_z['t'], gt_trans_z['x'], 'r--') 900 | # ax1.plot(gt_trans_z['t'], gt_trans_z['y'], 'g--') 901 | # ax1.plot(gt_trans_z['t'], gt_trans_z['z'], 'b--') 902 | # ax1.plot(rgbde_time_trans_z, rgbde_trans_z['x'], 'r-.') 903 | # ax1.plot(rgbde_time_trans_z, rgbde_trans_z['y'], 'g-.') 904 | # ax1.plot(rgbde_time_trans_z, rgbde_trans_z['z'], 'b-.') 905 | # ax2.plot(edopt_trans_z['t'], edopt_trans_z['qx'], 'k', label='qx') 906 | # ax2.plot(edopt_trans_z['t'], edopt_trans_z['qy'], 'y', label='qy') 907 | # ax2.plot(edopt_trans_z['t'], edopt_trans_z['qz'], 'm', label='qz') 908 | # ax2.plot(edopt_trans_z['t'], edopt_trans_z['qw'], 'c', label='qw') 909 | # ax2.plot(gt_trans_z['t'], gt_trans_z['qx'], 'k--') 910 | # ax2.plot(gt_trans_z['t'], gt_trans_z['qy'], 'y--') 911 | # ax2.plot(gt_trans_z['t'], gt_trans_z['qz'], 'm--') 912 | # ax2.plot(gt_trans_z['t'], gt_trans_z['qw'], 'c--') 913 | # ax2.plot(rgbde_time_trans_z, rgbde_trans_z['qx'], 'k-.') 914 | # ax2.plot(rgbde_time_trans_z, rgbde_trans_z['qy'], 'y-.') 915 | # ax2.plot(rgbde_time_trans_z, rgbde_trans_z['qz'], 'm-.') 916 | # ax2.plot(rgbde_time_trans_z, rgbde_trans_z['qw'], 'c-.') 917 | # ax1.legend(loc='upper right') 918 | # ax2.legend(loc='upper right') 919 | # ax1.set_xticks([]) 920 | # ax1.set(ylabel='Position[m]') 921 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 922 | # plt.show() 923 | 924 | # fig4, (ax1, ax2) = plt.subplots(2) 925 | # ax1.plot(edopt_roll['t'], edopt_roll['x'], 'r', label='x') 926 | # ax1.plot(edopt_roll['t'], edopt_roll['y'], 'g', label='y') 927 | # ax1.plot(edopt_roll['t'], edopt_roll['z'], 'b', label='z') 928 | # ax1.plot(gt_roll['t'], gt_roll['x'], 'r--') 929 | # ax1.plot(gt_roll['t'], gt_roll['y'], 'g--') 930 | # ax1.plot(gt_roll['t'], gt_roll['z'], 'b--') 931 | # ax1.plot(rgbde_time_roll, rgbde_roll['x'], 'r-.') 932 | # ax1.plot(rgbde_time_roll, rgbde_roll['y'], 'g-.') 933 | # ax1.plot(rgbde_time_roll, rgbde_roll['z'], 'b-.') 934 | # ax2.plot(edopt_roll['t'], -edopt_roll['qx'], 'k', label='qx') 935 | # ax2.plot(edopt_roll['t'], -edopt_roll['qy'], 'y', label='qy') 936 | # ax2.plot(edopt_roll['t'], -edopt_roll['qz'], 'm', label='qz') 937 | # ax2.plot(edopt_roll['t'], -edopt_roll['qw'], 'c', label='qw') 938 | # ax2.plot(gt_roll['t'], gt_roll['qx'], 'k--') 939 | # ax2.plot(gt_roll['t'], gt_roll['qy'], 'y--') 940 | # ax2.plot(gt_roll['t'], gt_roll['qz'], 'm--') 941 | # ax2.plot(gt_roll['t'], gt_roll['qw'], 'c--') 942 | # ax2.plot(rgbde_time_roll, -rgbde_roll['qx'], 'k-.') 943 | # ax2.plot(rgbde_time_roll, rgbde_roll['qy'], 'y-.') 944 | # ax2.plot(rgbde_time_roll, rgbde_roll['qz'], 'm-.') 945 | # ax2.plot(rgbde_time_roll, rgbde_roll['qw'], 'c-.') 946 | # ax1.legend(loc='upper right') 947 | # ax2.legend(loc='upper right') 948 | # ax1.set_xticks([]) 949 | # ax1.set(ylabel='Position[m]') 950 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 951 | # plt.show() 952 | 953 | 954 | # fig4bis, (ax1, ax2) = plt.subplots(2) 955 | # # ax1.plot(edopt_roll['t'], edopt_roll['x'], 'r', label='x') 956 | # # ax1.plot(edopt_roll['t'], edopt_roll['y'], 'g', label='y') 957 | # # ax1.plot(edopt_roll['t'], edopt_roll['z'], 'b', label='z') 958 | # # ax1.plot(gt_roll['t'], gt_roll['x'], 'r--') 959 | # # ax1.plot(gt_roll['t'], gt_roll['y'], 'g--') 960 | # # ax1.plot(gt_roll['t'], gt_roll['z'], 'b--') 961 | # ax1.plot(rgbde_time_roll, rgbde_roll['x'], 'r-.') 962 | # ax1.plot(rgbde_time_roll, rgbde_roll['y'], 'g-.') 963 | # ax1.plot(rgbde_time_roll, rgbde_roll['z'], 'b-.') 964 | # # ax2.plot(edopt_roll['t'], -edopt_roll['qx'], 'k', label='qx') 965 | # # ax2.plot(edopt_roll['t'], -edopt_roll['qy'], 'y', label='qy') 966 | # # ax2.plot(edopt_roll['t'], -edopt_roll['qz'], 'm', label='qz') 967 | # # ax2.plot(edopt_roll['t'], -edopt_roll['qw'], 'c', label='qw') 968 | # # ax2.plot(gt_roll['t'], gt_roll['qx'], 'k--') 969 | # # ax2.plot(gt_roll['t'], gt_roll['qy'], 'y--') 970 | # # ax2.plot(gt_roll['t'], gt_roll['qz'], 'm--') 971 | # # ax2.plot(gt_roll['t'], gt_roll['qw'], 'c--') 972 | # ax2.plot(rgbde_time_roll, -rgbde_roll['qx'], 'k-.') 973 | # ax2.plot(rgbde_time_roll, rgbde_roll['qy'], 'y-.') 974 | # ax2.plot(rgbde_time_roll, rgbde_roll['qz'], 'm-.') 975 | # ax2.plot(rgbde_time_roll, rgbde_roll['qw'], 'c-.') 976 | # ax1.legend(loc='upper right') 977 | # ax2.legend(loc='upper right') 978 | # ax1.set_xticks([]) 979 | # ax1.set(ylabel='Position[m]') 980 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 981 | # plt.show() 982 | 983 | # fig5, (ax1, ax2) = plt.subplots(2) 984 | # ax1.plot(edopt_pitch['t'], edopt_pitch['x'], 'r', label='x') 985 | # ax1.plot(edopt_pitch['t'], edopt_pitch['y'], 'g', label='y') 986 | # ax1.plot(edopt_pitch['t'], edopt_pitch['z'], 'b', label='z') 987 | # ax1.plot(gt_pitch['t'], gt_pitch['x'], 'r--') 988 | # ax1.plot(gt_pitch['t'], gt_pitch['y'], 'g--') 989 | # ax1.plot(gt_pitch['t'], gt_pitch['z'], 'b--') 990 | # ax1.plot(rgbde_time_pitch, rgbde_pitch['x'], 'r-.') 991 | # ax1.plot(rgbde_time_pitch, rgbde_pitch['y'], 'g-.') 992 | # ax1.plot(rgbde_time_pitch, rgbde_pitch['z'], 'b-.') 993 | # ax2.plot(edopt_pitch['t'], edopt_pitch['qx'], 'k', label='qx') 994 | # ax2.plot(edopt_pitch['t'], edopt_pitch['qy'], 'y', label='qy') 995 | # ax2.plot(edopt_pitch['t'], edopt_pitch['qz'], 'm', label='qz') 996 | # ax2.plot(edopt_pitch['t'], edopt_pitch['qw'], 'c', label='qw') 997 | # ax2.plot(gt_pitch['t'], gt_pitch['qx'], 'k--') 998 | # ax2.plot(gt_pitch['t'], gt_pitch['qy'], 'y--') 999 | # ax2.plot(gt_pitch['t'], gt_pitch['qz'], 'm--') 1000 | # ax2.plot(gt_pitch['t'], gt_pitch['qw'], 'c--') 1001 | # ax2.plot(rgbde_time_pitch, rgbde_pitch['qx'], 'k-.') 1002 | # ax2.plot(rgbde_time_pitch, rgbde_pitch['qy'], 'y-.') 1003 | # ax2.plot(rgbde_time_pitch, rgbde_pitch['qz'], 'm-.') 1004 | # ax2.plot(rgbde_time_pitch, rgbde_pitch['qw'], 'c-.') 1005 | # ax1.legend(loc='upper right') 1006 | # ax2.legend(loc='upper right') 1007 | # ax1.set_xticks([]) 1008 | # ax1.set(ylabel='Position[m]') 1009 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 1010 | # plt.show() 1011 | 1012 | # fig6, (ax1, ax2) = plt.subplots(2) 1013 | # ax1.plot(edopt_yaw['t'], edopt_yaw['x'], 'r', label='x') 1014 | # ax1.plot(edopt_yaw['t'], edopt_yaw['y'], 'g', label='y') 1015 | # ax1.plot(edopt_yaw['t'], edopt_yaw['z'], 'b', label='z') 1016 | # ax1.plot(gt_yaw['t'], gt_yaw['x'], 'r--') 1017 | # ax1.plot(gt_yaw['t'], gt_yaw['y'], 'g--') 1018 | # ax1.plot(gt_yaw['t'], gt_yaw['z'], 'b--') 1019 | # # ax1.plot(rgbde_time_yaw, rgbde_yaw['x'], 'r-.') 1020 | # # ax1.plot(rgbde_time_yaw, rgbde_yaw['y'], 'g-.') 1021 | # # ax1.plot(rgbde_time_yaw, rgbde_yaw['z'], 'b-.') 1022 | # ax2.plot(edopt_yaw['t'], -edopt_yaw['qx'], 'k', label='qx') 1023 | # ax2.plot(edopt_yaw['t'], -edopt_yaw['qy'], 'y', label='qy') 1024 | # ax2.plot(edopt_yaw['t'], -edopt_yaw['qz'], 'm', label='qz') 1025 | # ax2.plot(edopt_yaw['t'], -edopt_yaw['qw'], 'c', label='qw') 1026 | # ax2.plot(gt_yaw['t'], gt_yaw['qx'], 'k--') 1027 | # ax2.plot(gt_yaw['t'], gt_yaw['qy'], 'y--') 1028 | # ax2.plot(gt_yaw['t'], gt_yaw['qz'], 'm--') 1029 | # ax2.plot(gt_yaw['t'], gt_yaw['qw'], 'c--') 1030 | # # ax2.plot(rgbde_time_yaw, rgbde_yaw['qx'], 'k-.') 1031 | # # ax2.plot(rgbde_time_yaw, rgbde_yaw['qy'], 'y-.') 1032 | # # ax2.plot(rgbde_time_yaw, rgbde_yaw['qz'], 'm-.') 1033 | # # ax2.plot(rgbde_time_yaw, rgbde_yaw['qw'], 'c-.') 1034 | # ax1.legend(loc='upper right') 1035 | # ax2.legend(loc='upper right') 1036 | # ax1.set_xticks([]) 1037 | # ax1.set(ylabel='Position[m]') 1038 | # ax2.set(xlabel='Time [s]', ylabel='Quaternions') 1039 | # plt.show() 1040 | 1041 | # gt_max_x_trans_x = np.max(gt_trans_x['x']) 1042 | # gt_min_x_trans_x = np.min(gt_trans_x['x']) 1043 | # distance_x_trans_x = gt_max_x_trans_x - gt_min_x_trans_x 1044 | 1045 | # gt_max_trans_y = np.max(gt_trans_y['y']) 1046 | # gt_min_trans_y = np.min(gt_trans_y['y']) 1047 | # distance_trans_y = gt_max_trans_y - gt_min_trans_y 1048 | 1049 | # gt_max_trans_z = np.max(gt_trans_z['z']) 1050 | # gt_min_trans_z = np.min(gt_trans_z['z']) 1051 | # distance_trans_z = gt_max_trans_z - gt_min_trans_z 1052 | 1053 | # gt_max_roll = np.max(gt_roll_gamma_cleaned) 1054 | # gt_min_roll = np.min(gt_roll_gamma_cleaned) 1055 | # total_angle_roll = gt_max_roll - gt_min_roll 1056 | 1057 | # gt_max_pitch = np.max(gt_pitch_alpha_cleaned) 1058 | # gt_min_pitch = np.min(gt_pitch_alpha_cleaned) 1059 | # total_angle_pitch = gt_max_pitch - gt_min_pitch 1060 | 1061 | # gt_max_yaw = np.max(gt_yaw_beta_cleaned) 1062 | # gt_min_yaw = np.min(gt_yaw_beta_cleaned) 1063 | # total_angle_yaw = gt_max_yaw - gt_min_yaw 1064 | 1065 | edopt_error_trans_x = computeEuclideanDistance(gt_trans_x_new['x'], gt_trans_x_new['y'], gt_trans_x_new['z'], edopt_trans_x_resampled_x, edopt_trans_x_resampled_y, edopt_trans_x_resampled_z) 1066 | edopt_error_trans_y = computeEuclideanDistance(gt_trans_y_new['x'], gt_trans_y_new['y'], gt_trans_y_new['z'], edopt_trans_y_resampled_x, edopt_trans_y_resampled_y, edopt_trans_y_resampled_z) 1067 | edopt_error_trans_z = computeEuclideanDistance(gt_trans_z_new['x'], gt_trans_z_new['y'], gt_trans_z_new['z'], edopt_trans_z_resampled_x, edopt_trans_z_resampled_y, edopt_trans_z_resampled_z) 1068 | edopt_error_roll = computeEuclideanDistance(gt_roll_new['x'], gt_roll_new['y'], gt_roll_new['z'], edopt_roll_resampled_x, edopt_roll_resampled_y, edopt_roll_resampled_z) 1069 | edopt_error_pitch = computeEuclideanDistance(gt_pitch_new['x'], gt_pitch_new['y'], gt_pitch_new['z'], edopt_pitch_resampled_x, edopt_pitch_resampled_y, edopt_pitch_resampled_z) 1070 | edopt_error_yaw = computeEuclideanDistance(gt_yaw_new['x'], gt_yaw_new['y'], gt_yaw_new['z'], edopt_yaw_resampled_x, edopt_yaw_resampled_y, edopt_yaw_resampled_z) 1071 | edopt_error_6dof = computeEuclideanDistance(gt_6dof['x'], gt_6dof['y'], gt_6dof['z'], edopt_6dof_resampled_x, edopt_6dof_resampled_y, edopt_6dof_resampled_z) 1072 | 1073 | edopt_q_angle_trans_x = computeQuaternionError(edopt_trans_x_resampled_qx, edopt_trans_x_resampled_qy, edopt_trans_x_resampled_qz, edopt_trans_x_resampled_qw, gt_trans_x_new['qx'], gt_trans_x_new['qy'], gt_trans_x_new['qz'], gt_trans_x_new['qw']) 1074 | edopt_q_angle_trans_y = computeQuaternionError(edopt_trans_y_resampled_qx, edopt_trans_y_resampled_qy, edopt_trans_y_resampled_qz, edopt_trans_y_resampled_qw, gt_trans_y_new['qx'], gt_trans_y_new['qy'], gt_trans_y_new['qz'], gt_trans_y_new['qw']) 1075 | edopt_q_angle_trans_z = computeQuaternionError(edopt_trans_z_resampled_qx, edopt_trans_z_resampled_qy, edopt_trans_z_resampled_qz, edopt_trans_z_resampled_qw, gt_trans_z_new['qx'], gt_trans_z_new['qy'], gt_trans_z_new['qz'], gt_trans_z_new['qw']) 1076 | edopt_q_angle_roll = computeQuaternionError(edopt_roll_resampled_qx, edopt_roll_resampled_qy, edopt_roll_resampled_qz, edopt_roll_resampled_qw, gt_roll_new['qx'], gt_roll_new['qy'], gt_roll_new['qz'], gt_roll_new['qw']) 1077 | edopt_q_angle_pitch = computeQuaternionError(edopt_pitch_resampled_qx, edopt_pitch_resampled_qy, edopt_pitch_resampled_qz, edopt_pitch_resampled_qw, gt_pitch_new['qx'], gt_pitch_new['qy'], gt_pitch_new['qz'], gt_pitch_new['qw']) 1078 | edopt_q_angle_yaw = computeQuaternionError(edopt_yaw_resampled_qx, edopt_yaw_resampled_qy, edopt_yaw_resampled_qz, edopt_yaw_resampled_qw, gt_yaw_new['qx'], gt_yaw_new['qy'], gt_yaw_new['qz'], gt_yaw_new['qw']) 1079 | edopt_q_angle_6dof = computeQuaternionError(edopt_6dof_resampled_qx, edopt_6dof_resampled_qy, edopt_6dof_resampled_qz, edopt_6dof_resampled_qw, gt_6dof['qx'], gt_6dof['qy'], gt_6dof['qz'], gt_6dof['qw']) 1080 | 1081 | rgbde_error_trans_x = computeEuclideanDistance(gt_trans_x_old['x'], gt_trans_x_old['y'], gt_trans_x_old['z'], rgbde_trans_x_resampled_x, rgbde_trans_x_resampled_y, rgbde_trans_x_resampled_z) 1082 | rgbde_error_trans_y = computeEuclideanDistance(gt_trans_y_old['x'], gt_trans_y_old['y'], gt_trans_y_old['z'], rgbde_trans_y_resampled_x, rgbde_trans_y_resampled_y, rgbde_trans_y_resampled_z) 1083 | rgbde_error_trans_z = computeEuclideanDistance(gt_trans_z_old['x'], gt_trans_z_old['y'], gt_trans_z_old['z'], rgbde_trans_z_resampled_x, rgbde_trans_z_resampled_y, rgbde_trans_z_resampled_z) 1084 | rgbde_error_roll = computeEuclideanDistance(gt_roll_old['x'], gt_roll_old['y'], gt_roll_old['z'], rgbde_roll_resampled_x, rgbde_roll_resampled_y, rgbde_roll_resampled_z) 1085 | rgbde_error_pitch = computeEuclideanDistance(gt_pitch_old['x'], gt_pitch_old['y'], gt_pitch_old['z'], rgbde_pitch_resampled_x, rgbde_pitch_resampled_y, rgbde_pitch_resampled_z) 1086 | rgbde_error_yaw = computeEuclideanDistance(gt_yaw_old['x'], gt_yaw_old['y'], gt_yaw_old['z'], rgbde_yaw_resampled_x, rgbde_yaw_resampled_y, rgbde_yaw_resampled_z) 1087 | rgbde_error_6dof = computeEuclideanDistance(gt_6dof['x'], gt_6dof['y'], gt_6dof['z'], rgbde_6dof_resampled_x, rgbde_6dof_resampled_y, rgbde_6dof_resampled_z) 1088 | 1089 | rgbde_q_angle_trans_x = computeQuaternionError(rgbde_trans_x_resampled_qx, rgbde_trans_x_resampled_qy, rgbde_trans_x_resampled_qz, rgbde_trans_x_resampled_qw, gt_trans_x_old['qx'], gt_trans_x_old['qy'], gt_trans_x_old['qz'], gt_trans_x_old['qw']) 1090 | rgbde_q_angle_trans_y = computeQuaternionError(rgbde_trans_y_resampled_qx, rgbde_trans_y_resampled_qy, rgbde_trans_y_resampled_qz, rgbde_trans_y_resampled_qw, gt_trans_y_old['qx'], gt_trans_y_old['qy'], gt_trans_y_old['qz'], gt_trans_y_old['qw']) 1091 | rgbde_q_angle_trans_z = computeQuaternionError(rgbde_trans_z_resampled_qx, rgbde_trans_z_resampled_qy, rgbde_trans_z_resampled_qz, rgbde_trans_z_resampled_qw, gt_trans_z_old['qx'], gt_trans_z_old['qy'], gt_trans_z_old['qz'], gt_trans_z_old['qw']) 1092 | rgbde_q_angle_roll = computeQuaternionError(rgbde_roll_resampled_qx, rgbde_roll_resampled_qy, rgbde_roll_resampled_qz, rgbde_roll_resampled_qw, gt_roll_old['qx'], gt_roll_old['qy'], gt_roll_old['qz'], gt_roll_old['qw']) 1093 | rgbde_q_angle_pitch = computeQuaternionError(rgbde_pitch_resampled_qx, rgbde_pitch_resampled_qy, rgbde_pitch_resampled_qz, rgbde_pitch_resampled_qw, gt_pitch_old['qx'], gt_pitch_old['qy'], gt_pitch_old['qz'], gt_pitch_old['qw']) 1094 | rgbde_q_angle_yaw = computeQuaternionError(rgbde_yaw_resampled_qx, rgbde_yaw_resampled_qy, rgbde_yaw_resampled_qz, rgbde_yaw_resampled_qw, gt_yaw_old['qx'], gt_yaw_old['qy'], gt_yaw_old['qz'], gt_yaw_old['qw']) 1095 | rgbde_q_angle_6dof = computeQuaternionError(rgbde_6dof_resampled_qx, rgbde_6dof_resampled_qy, rgbde_6dof_resampled_qz, rgbde_6dof_resampled_qw, gt_6dof['qx'], gt_6dof['qy'], gt_6dof['qz'], gt_6dof['qw']) 1096 | 1097 | edopt_tr_datasets_position_errors = np.concatenate((edopt_error_trans_x, edopt_error_trans_y, edopt_error_trans_z)) 1098 | edopt_rot_datasets_position_errors = np.concatenate((edopt_error_roll, edopt_error_pitch, edopt_error_yaw)) 1099 | edopt_tr_datasets_angle_errors = np.concatenate((edopt_q_angle_trans_x, edopt_q_angle_trans_y, edopt_q_angle_trans_z)) 1100 | edopt_rot_datasets_angle_errors = np.concatenate((edopt_q_angle_roll, edopt_q_angle_pitch, edopt_q_angle_yaw)) 1101 | 1102 | rgbde_tr_datasets_position_errors = np.concatenate((rgbde_error_trans_x, rgbde_error_trans_y, rgbde_error_trans_z)) 1103 | rgbde_rot_datasets_position_errors = np.concatenate((rgbde_error_roll, rgbde_error_pitch)) 1104 | rgbde_tr_datasets_angle_errors = np.concatenate((rgbde_q_angle_trans_x, rgbde_q_angle_trans_y, rgbde_q_angle_trans_z)) 1105 | rgbde_rot_datasets_angle_errors = np.concatenate((rgbde_q_angle_roll, rgbde_q_angle_pitch)) 1106 | 1107 | # print(np.mean(edopt_error_trans_x*100), "cm") 1108 | # print(np.mean(rgbde_error_trans_x*100), "cm") 1109 | 1110 | # print(np.mean(edopt_q_angle_trans_x), "rad") 1111 | # print(np.mean(rgbde_q_angle_trans_x), "rad") 1112 | 1113 | # print(np.mean(edopt_error_trans_z*100), "cm") 1114 | # print(np.mean(rgbde_error_trans_z*100), "cm") 1115 | 1116 | # X = ['Tr X', 'Tr Y', 'Tr Z', 'Roll', 'Pitch', 'Yaw'] 1117 | 1118 | # X_axis = np.arange(len(X)) 1119 | # edopt_average_position_error = [np.mean(edopt_error_trans_x), np.mean(edopt_error_trans_y),np.mean(edopt_error_trans_z), np.mean(edopt_error_roll), np.mean(edopt_error_pitch), np.mean(edopt_error_yaw)] 1120 | # rgbde_average_position_error = [np.mean(rgbde_error_trans_x), np.mean(rgbde_error_trans_y),np.mean(rgbde_error_trans_z), np.mean(rgbde_error_roll), np.mean(rgbde_error_pitch), np.mean(rgbde_error_yaw)] 1121 | 1122 | # edopt_std_position_error = [np.std(edopt_error_trans_x), np.std(edopt_error_trans_y),np.std(edopt_error_trans_z), np.std(edopt_error_roll), np.std(edopt_error_pitch), np.std(edopt_error_yaw)] 1123 | # rgbde_std_position_error = [np.std(rgbde_error_trans_x), np.std(rgbde_error_trans_y),np.std(rgbde_error_trans_z), np.std(rgbde_error_roll), np.std(rgbde_error_pitch), np.std(rgbde_error_yaw)] 1124 | 1125 | # plt.bar(X_axis - 0.2, edopt_average_position_error, 0.4, yerr=edopt_std_position_error,label = 'edopt', color=color_edopt) 1126 | # plt.bar(X_axis + 0.2, rgbde_average_position_error, 0.4, yerr=rgbde_std_position_error, label = 'RGB-D-E', color=color_rgbde) 1127 | 1128 | # plt.xticks(X_axis, X) 1129 | # plt.xlabel("Motions") 1130 | # plt.ylabel("Mean Position Error [m]") 1131 | # # plt.title("Number of Students in each group") 1132 | # plt.legend() 1133 | # plt.show() 1134 | 1135 | # X = ['Tr X', 'Tr Y', 'Tr Z', 'Roll', 'Pitch', 'Yaw'] 1136 | 1137 | # X_axis = np.arange(len(X)) 1138 | # edopt_average_angle_error = [np.mean(edopt_q_angle_trans_x), np.mean(edopt_q_angle_trans_y),np.mean(edopt_q_angle_trans_z), np.mean(edopt_q_angle_roll), np.mean(edopt_q_angle_pitch), np.mean(edopt_q_angle_yaw)] 1139 | # rgbde_average_angle_error = [np.mean(rgbde_q_angle_trans_x), np.mean(rgbde_q_angle_trans_y),np.mean(rgbde_q_angle_trans_z), np.mean(rgbde_q_angle_roll), np.mean(rgbde_q_angle_pitch), np.mean(rgbde_q_angle_yaw)] 1140 | 1141 | # edopt_std_angle_error = [np.std(edopt_q_angle_trans_x), np.std(edopt_q_angle_trans_y),np.std(edopt_q_angle_trans_z), np.std(edopt_q_angle_roll), np.std(edopt_q_angle_pitch), np.std(edopt_q_angle_yaw)] 1142 | # rgbde_std_angle_error = [np.std(rgbde_q_angle_trans_x), np.std(rgbde_q_angle_trans_y),np.std(rgbde_q_angle_trans_z), np.std(rgbde_q_angle_roll), np.std(rgbde_q_angle_pitch), np.std(rgbde_q_angle_yaw)] 1143 | 1144 | # plt.bar(X_axis - 0.2, edopt_average_angle_error, 0.4, yerr=edopt_std_angle_error, label = 'edopt', color=color_edopt) 1145 | # plt.bar(X_axis + 0.2, rgbde_average_angle_error, 0.4, yerr=rgbde_std_angle_error, label = 'RGB-D-E', color=color_rgbde) 1146 | 1147 | # plt.xticks(X_axis, X) 1148 | # plt.xlabel("Motions") 1149 | # plt.ylabel("Mean Rotation Error [rad]") 1150 | # # plt.title("Number of Students in each group") 1151 | # plt.legend() 1152 | # plt.show() 1153 | 1154 | # print(edopt_average_position_error, edopt_std_position_error) 1155 | # print(rgbde_average_position_error, rgbde_std_position_error) 1156 | # print(edopt_average_angle_error, edopt_std_angle_error) 1157 | # print(rgbde_average_angle_error, rgbde_std_angle_error) 1158 | 1159 | # X = ['Translations' ,'Rotations'] 1160 | 1161 | # X_axis = np.arange(len(X)) 1162 | # edopt_average_pos_errors = [np.mean(edopt_tr_datasets_position_errors), np.mean(edopt_rot_datasets_position_errors)] 1163 | # edopt_std_pos_errors = [np.std(edopt_tr_datasets_position_errors), np.std(edopt_rot_datasets_position_errors)] 1164 | 1165 | # rgbde_average_pos_errors = [np.mean(rgbde_tr_datasets_position_errors), np.mean(rgbde_rot_datasets_position_errors)] 1166 | # rgbde_std_pos_errors = [np.std(rgbde_tr_datasets_position_errors), np.std(rgbde_rot_datasets_position_errors)] 1167 | 1168 | # edopt_average_angle_errors = [np.mean(edopt_tr_datasets_angle_errors), np.mean(edopt_rot_datasets_angle_errors)] 1169 | # edopt_std_angle_errors = [np.std(edopt_tr_datasets_angle_errors), np.std(edopt_rot_datasets_angle_errors)] 1170 | 1171 | # rgbde_average_angle_errors = [np.mean(rgbde_tr_datasets_angle_errors), np.mean(rgbde_rot_datasets_angle_errors)] 1172 | # rgbde_std_angle_errors = [np.std(rgbde_tr_datasets_angle_errors), np.std(rgbde_rot_datasets_angle_errors)] 1173 | 1174 | # fig12 = plt.plot() 1175 | # plt.bar(X_axis - 0.2, edopt_average_pos_errors, 0.4, yerr=edopt_std_pos_errors, label = 'edopt', color=color_edopt) 1176 | # plt.bar(X_axis + 0.2, rgbde_average_pos_errors, 0.4, yerr=rgbde_std_pos_errors, label = 'RGB-D-E', color=color_rgbde) 1177 | 1178 | # plt.xticks(X_axis, X) 1179 | # # plt.xlabel("Algorithms") 1180 | # plt.ylabel('Position Error [m]') 1181 | # # plt.title("Number of Students in each group") 1182 | # plt.legend() 1183 | # plt.show() 1184 | 1185 | # fig13 = plt.plot() 1186 | # plt.bar(X_axis - 0.2, edopt_average_angle_errors, 0.4, yerr=edopt_std_angle_errors, label = 'edopt', color=color_edopt) 1187 | # plt.bar(X_axis + 0.2, rgbde_average_angle_errors, 0.4, yerr=rgbde_std_angle_errors, label = 'RGB-D-E', color=color_rgbde) 1188 | 1189 | # plt.xticks(X_axis, X) 1190 | # # plt.xlabel("Algorithms") 1191 | # plt.ylabel('Angle Error [rad]') 1192 | # # plt.title("Number of Students in each group") 1193 | # plt.legend() 1194 | # plt.show() 1195 | 1196 | rad_to_deg = 180/math.pi 1197 | 1198 | labels = [' ', '\n6-DoF',' ', '\n$\\gamma$', ' ', '\n$\\beta$', ' ', '\n$\\alpha$',' ', '\n$z$',' ', '\n$y$',' ', '\n$x$'] 1199 | ticks=[0,1,2,3,4,5,6,7,8,9,10,11,12,13] 1200 | medianprops = dict(color='white') 1201 | 1202 | quart_vec_pos=[edopt_error_6dof, rgbde_error_6dof, edopt_error_roll, rgbde_error_roll, edopt_error_yaw, rgbde_error_yaw, edopt_error_pitch, rgbde_error_pitch, edopt_error_trans_z, rgbde_error_trans_z, edopt_error_trans_y, rgbde_error_trans_y, edopt_error_trans_x, rgbde_error_trans_x] 1203 | quart_vec_ang=[edopt_q_angle_6dof*rad_to_deg, rgbde_q_angle_6dof*rad_to_deg, edopt_q_angle_roll*rad_to_deg, rgbde_q_angle_roll*rad_to_deg, edopt_q_angle_yaw*rad_to_deg, rgbde_q_angle_yaw*rad_to_deg, edopt_q_angle_pitch*rad_to_deg, rgbde_q_angle_pitch*rad_to_deg, edopt_q_angle_trans_z*rad_to_deg, rgbde_q_angle_trans_z*rad_to_deg, edopt_q_angle_trans_y*rad_to_deg, rgbde_q_angle_trans_y*rad_to_deg, edopt_q_angle_trans_x*rad_to_deg, rgbde_q_angle_trans_x*rad_to_deg] 1204 | 1205 | # new_quart_array = np.array(quart_vec_pos).transpose 1206 | 1207 | edopt_concatenation_pos_errors = np.concatenate((edopt_error_trans_x, edopt_error_trans_y, edopt_error_trans_z, edopt_error_roll,edopt_error_pitch, edopt_error_yaw)) 1208 | rgbde_concatenation_pos_errors = np.concatenate((rgbde_error_trans_x, rgbde_error_trans_y, rgbde_error_trans_z, rgbde_error_roll,rgbde_error_pitch, rgbde_error_yaw)) 1209 | 1210 | edopt_mean_pos_error = np.mean(edopt_concatenation_pos_errors) 1211 | rgbde_mean_pos_error = np.mean(rgbde_concatenation_pos_errors) 1212 | 1213 | edopt_concatenation_rot_errors = np.concatenate((edopt_q_angle_trans_x*rad_to_deg, edopt_q_angle_trans_y*rad_to_deg, edopt_q_angle_trans_z*rad_to_deg, edopt_q_angle_roll*rad_to_deg,edopt_q_angle_pitch*rad_to_deg, edopt_q_angle_yaw*rad_to_deg)) 1214 | rgbde_concatenation_rot_errors = np.concatenate((rgbde_q_angle_trans_x*rad_to_deg, rgbde_q_angle_trans_y*rad_to_deg, rgbde_q_angle_trans_z*rad_to_deg, rgbde_q_angle_roll*rad_to_deg,rgbde_q_angle_pitch*rad_to_deg, rgbde_q_angle_yaw*rad_to_deg)) 1215 | 1216 | edopt_mean_rot_error = np.mean(edopt_concatenation_rot_errors) 1217 | rgbde_mean_rot_error = np.mean(rgbde_concatenation_rot_errors) 1218 | 1219 | print("edopt pos error ="+str(edopt_mean_pos_error), ", rot error=", str(edopt_mean_rot_error)) 1220 | print("rgbde pos error ="+str(rgbde_mean_pos_error), ", rot error=", str(rgbde_mean_rot_error)) 1221 | 1222 | fig15, ax1 = plt.subplots(1,2) 1223 | fig15.set_size_inches(8, 6) 1224 | ax1[0].set_xlabel('Position error [m]', color='k') 1225 | ax1[1].set_xlabel('Rotation error [deg]', color='k') 1226 | res1 = ax1[0].boxplot(quart_vec_pos, labels=labels, vert=False, showfliers=False, 1227 | patch_artist=True, medianprops=medianprops) 1228 | res2 = ax1[1].boxplot(quart_vec_ang, vert=False, showfliers=False, 1229 | patch_artist=True, medianprops=medianprops) 1230 | for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']: 1231 | plt.setp(res1[element], color='k') 1232 | plt.setp(res2[element], color='k') 1233 | ax1[1].set_yticklabels([]) 1234 | ax1[1].set_yticks([]) 1235 | ax1[0].set_xlim(-0.001, 0.028) 1236 | ax1[1].set_xlim(-0.5, 6.5) 1237 | ax1[1].xaxis.set_major_locator(plt.MaxNLocator(4)) 1238 | colors=[color_edopt, color_rgbde, color_edopt, color_rgbde, color_edopt, color_rgbde, color_edopt, color_rgbde, color_edopt, color_rgbde, color_edopt, color_rgbde, color_edopt, color_rgbde] 1239 | # color='white' 1240 | # colors = [color, color, color, color, color, color,color, color,color, color,color, color] 1241 | # patterns=[0,1,0,1,0,1,0,1,0,1,0,1] 1242 | for patch, color in zip(res1['boxes'], colors): 1243 | patch.set_facecolor(color) 1244 | # if pattern == 1: 1245 | # patch.set(hatch = '/') 1246 | for patch, color in zip(res2['boxes'], colors): 1247 | patch.set_facecolor(color) 1248 | # if pattern == 1: 1249 | # patch.set(hatch = '/') 1250 | ax1[1].legend([res1["boxes"][0], res1["boxes"][1]], ['EDOPT', 'RGB-D-E'], bbox_to_anchor=(-0.7, 1.02, 0, 0.2), loc='lower left', ncol=2) 1251 | fig15.subplots_adjust(wspace=0) 1252 | plt.show() 1253 | -------------------------------------------------------------------------------- /superimpose.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h b/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 2 | index 0168da8..017264d 100644 3 | --- a/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 4 | +++ b/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 5 | @@ -191,6 +191,9 @@ public: 6 | * 7 | * @return true upon success, false otherswise. 8 | **/ 9 | + virtual bool superimpose(std::array obj_pose, std::array cam_pose, cv::Mat &img); 10 | + virtual bool superimpose(std::array obj_pose, std::array cam_pose, cv::Mat &img, cv::Rect roi); 11 | + virtual bool superimpose(const std::array cam_pose, const std::vector > &obj_poses, const cv::Rect &roi, std::vector &images); 12 | virtual bool superimpose(const std::vector& objpos_multimap, const double* cam_x, const double* cam_o, cv::Mat& img); 13 | 14 | virtual bool superimpose(const ModelPoseContainer& objpos_map, const double* cam_x, const double* cam_o, cv::Mat& img, 15 | diff --git a/src/SuperimposeMesh/shader/shader_model_texture.frag b/src/SuperimposeMesh/shader/shader_model_texture.frag 16 | index f9c169b..3f38ec5 100644 17 | --- a/src/SuperimposeMesh/shader/shader_model_texture.frag 18 | +++ b/src/SuperimposeMesh/shader/shader_model_texture.frag 19 | @@ -15,5 +15,11 @@ uniform sampler2D texture_diffuse1; 20 | 21 | void main() 22 | { 23 | - color = texture(texture_diffuse1, TexCoords); 24 | + //original 25 | + //color = texture(texture_diffuse1, TexCoords); 26 | + 27 | + // Transform to grayscale keeping 3 ch 28 | + vec4 c = texture(texture_diffuse1, TexCoords); 29 | + float gray = dot(c.rgb, vec3(0.299, 0.587, 0.114)); 30 | + color = vec4(vec3(gray), 1.0); 31 | } 32 | diff --git a/src/SuperimposeMesh/src/SICAD.cpp b/src/SuperimposeMesh/src/SICAD.cpp 33 | index 6588e0d..7b890db 100644 34 | --- a/src/SuperimposeMesh/src/SICAD.cpp 35 | +++ b/src/SuperimposeMesh/src/SICAD.cpp 36 | @@ -14,7 +14,11 @@ 37 | #include 38 | #include 39 | #include 40 | +#include 41 | 42 | +#include 43 | +#include 44 | +#include 45 | #include 46 | #include 47 | 48 | @@ -461,9 +465,240 @@ void SICAD::setOglWindowShouldClose(bool should_close) 49 | pollOrPostEvent(); 50 | } 51 | 52 | +bool SICAD::superimpose(std::array obj_pose, std::array cam_pose, cv::Mat& img) { 53 | + //auto tic1 = std::chrono::high_resolution_clock::now(); 54 | + glfwMakeContextCurrent(window_); 55 | + 56 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 57 | 58 | -bool SICAD::superimpose 59 | -( 60 | + /* View transformation matrix. */ 61 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 62 | + 63 | + /* Install/Use the program specified by the shader. */ 64 | + shader_cad_->install(); 65 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 66 | + shader_cad_->uninstall(); 67 | + 68 | + shader_mesh_texture_->install(); 69 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 70 | + shader_mesh_texture_->uninstall(); 71 | + 72 | + shader_frame_->install(); 73 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 74 | + shader_frame_->uninstall(); 75 | + 76 | + /* Clear the colorbuffer. */ 77 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 78 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 79 | + //auto tic2 = std::chrono::high_resolution_clock::now(); 80 | + 81 | + { 82 | + /* Render in the upper-left-most tile of the render grid */ 83 | + glViewport(0, framebuffer_height_ - tile_img_height_, 84 | + tile_img_width_, tile_img_height_); 85 | + glScissor(0, framebuffer_height_ - tile_img_height_, 86 | + tile_img_width_, tile_img_height_); 87 | + 88 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 89 | + model[3][0] = obj_pose[0]; 90 | + model[3][1] = obj_pose[1]; 91 | + model[3][2] = obj_pose[2]; 92 | + 93 | + auto iter_model = model_obj_.find("model"); 94 | + shader_mesh_texture_->install(); 95 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 96 | + (iter_model->second)->Draw(*shader_mesh_texture_); 97 | + shader_mesh_texture_->uninstall(); 98 | + 99 | + /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 100 | + /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 101 | + and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 102 | + static cv::Mat ogl_pixel(framebuffer_height_ / tiles_rows_, framebuffer_width_ / tiles_cols_, CV_8UC1); 103 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 104 | + glPixelStorei(GL_PACK_ALIGNMENT, (ogl_pixel.step & 3) ? 1 : 4); 105 | + glPixelStorei(GL_PACK_ROW_LENGTH, ogl_pixel.step / ogl_pixel.elemSize()); 106 | + glReadPixels(0, framebuffer_height_ - tile_img_height_, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, ogl_pixel.data); 107 | + 108 | + cv::flip(ogl_pixel, img, 0); 109 | + // cv::cvtColor(ogl_pixel, img, cv::COLOR_GRAY2BGR); 110 | + } 111 | + 112 | + //auto tic3 = std::chrono::high_resolution_clock::now(); 113 | + 114 | + /* Swap the buffers. */ 115 | + glfwSwapBuffers(window_); 116 | + 117 | + pollOrPostEvent(); 118 | + 119 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 120 | + 121 | + glfwMakeContextCurrent(nullptr); 122 | + 123 | + //auto tic4 = std::chrono::high_resolution_clock::now(); 124 | + 125 | + // std::chrono::duration p1 = tic2 - tic1; 126 | + // std::chrono::duration p2 = tic3 - tic2; 127 | + // std::chrono::duration p3 = tic4 - tic3; 128 | + 129 | + // std::cout << p1.count() << " " << p2.count() << " " << p3.count() << " " << (p1.count() + p3.count()) / (p1.count()+p2.count()+p3.count()) << "\%" << std::endl; 130 | + 131 | + return true; 132 | +} 133 | + 134 | +bool SICAD::superimpose(const std::array cam_pose, const std::vector >& obj_poses, const cv::Rect& roi, std::vector& images) { 135 | + glfwMakeContextCurrent(window_); 136 | + 137 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 138 | + 139 | + /* View transformation matrix. */ 140 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 141 | + 142 | + /* Install/Use the program specified by the shader. */ 143 | + shader_cad_->install(); 144 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 145 | + shader_cad_->uninstall(); 146 | + 147 | + shader_mesh_texture_->install(); 148 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 149 | + shader_mesh_texture_->uninstall(); 150 | + 151 | + shader_frame_->install(); 152 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 153 | + shader_frame_->uninstall(); 154 | + 155 | + /* Render starting by the upper-left-most tile of the render grid, proceding by columns and rows. */ 156 | + glViewport(0, 0, tile_img_width_, tile_img_height_); 157 | + glScissor(0, 0, tile_img_width_, tile_img_height_); 158 | + 159 | + auto iter_model = model_obj_.find("model"); 160 | + 161 | + 162 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 163 | + glPixelStorei(GL_PACK_ALIGNMENT, (images[0].step & 3) ? 1 : 4); 164 | + glPixelStorei(GL_PACK_ROW_LENGTH, images[0].step / images[0].elemSize()); 165 | + 166 | + shader_mesh_texture_->install(); 167 | + for (unsigned int i = 0; i < obj_poses.size(); ++i) { 168 | + auto& obj_pose = obj_poses[i]; 169 | + cv::Mat img; 170 | + // if(i == 0) img = images[i]; 171 | + // else 172 | + img = images[i](roi); 173 | + 174 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 175 | + model[3][0] = obj_pose[0]; 176 | + model[3][1] = obj_pose[1]; 177 | + model[3][2] = obj_pose[2]; 178 | + 179 | + /* Clear the colorbuffer. */ 180 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 181 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 182 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 183 | + (iter_model->second)->Draw(*shader_mesh_texture_); 184 | + // if(i == 0) 185 | + // glReadPixels(0, 0, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, img.data); 186 | + // else 187 | + glReadPixels(roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 188 | + cv::flip(img, img, 0); 189 | + } 190 | + shader_mesh_texture_->uninstall(); 191 | + 192 | + 193 | + // for (unsigned int i = 0; i < obj_poses.size(); ++i) { 194 | + // auto img = images[i]; 195 | + 196 | + // /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 197 | + // /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 198 | + // and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 199 | + // glReadBuffer(GL_COLOR_ATTACHMENT0); 200 | + // glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4); 201 | + // glPixelStorei(GL_PACK_ROW_LENGTH, img.step / img.elemSize()); 202 | + // //glReadPixels(tile_img_width_ * i + roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 203 | + 204 | + // glReadPixels(tile_img_width_ * i, framebuffer_height_ - tile_img_height_, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, img.data); 205 | + 206 | + // cv::flip(img, img, 0); 207 | + // } 208 | + 209 | + /* Swap the buffers. */ 210 | + glfwSwapBuffers(window_); 211 | + pollOrPostEvent(); 212 | + 213 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 214 | + 215 | + glfwMakeContextCurrent(nullptr); 216 | + 217 | + return true; 218 | +} 219 | + 220 | +bool SICAD::superimpose(std::array obj_pose, std::array cam_pose, cv::Mat& img, cv::Rect roi) { 221 | + glfwMakeContextCurrent(window_); 222 | + 223 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 224 | + 225 | + /* View transformation matrix. */ 226 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 227 | + 228 | + /* Install/Use the program specified by the shader. */ 229 | + shader_cad_->install(); 230 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 231 | + shader_cad_->uninstall(); 232 | + 233 | + shader_mesh_texture_->install(); 234 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 235 | + shader_mesh_texture_->uninstall(); 236 | + 237 | + shader_frame_->install(); 238 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 239 | + shader_frame_->uninstall(); 240 | + 241 | + /* Clear the colorbuffer. */ 242 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 243 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 244 | + 245 | + { 246 | + /* Render in the upper-left-most tile of the render grid */ 247 | + glViewport(0, framebuffer_height_ - tile_img_height_, 248 | + tile_img_width_, tile_img_height_); 249 | + glScissor(0, framebuffer_height_ - tile_img_height_, 250 | + tile_img_width_, tile_img_height_); 251 | + 252 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 253 | + model[3][0] = obj_pose[0]; 254 | + model[3][1] = obj_pose[1]; 255 | + model[3][2] = obj_pose[2]; 256 | + 257 | + auto iter_model = model_obj_.find("model"); 258 | + shader_mesh_texture_->install(); 259 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 260 | + (iter_model->second)->Draw(*shader_mesh_texture_); 261 | + shader_mesh_texture_->uninstall(); 262 | + 263 | + /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 264 | + /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 265 | + and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 266 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 267 | + glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4); 268 | + glPixelStorei(GL_PACK_ROW_LENGTH, img.step / img.elemSize()); 269 | + // glReadPixels(roi.x, framebuffer_height_ - tile_img_height_ - roi.y, roi.width, roi.height, GL_BGR, GL_UNSIGNED_BYTE, img.data); 270 | + glReadPixels(roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 271 | + 272 | + cv::flip(img, img, 0); 273 | + } 274 | + 275 | + /* Swap the buffers. */ 276 | + glfwSwapBuffers(window_); 277 | + 278 | + pollOrPostEvent(); 279 | + 280 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 281 | + 282 | + glfwMakeContextCurrent(nullptr); 283 | + 284 | + return true; 285 | +} 286 | + 287 | +bool SICAD::superimpose( 288 | const ModelPoseContainer& objpos_map, 289 | const double* cam_x, 290 | const double* cam_o, 291 | -------------------------------------------------------------------------------- /superimposeroi.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h b/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 2 | index 0168da8..017264d 100644 3 | --- a/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 4 | +++ b/src/SuperimposeMesh/include/SuperimposeMesh/SICAD.h 5 | @@ -191,6 +191,9 @@ public: 6 | * 7 | * @return true upon success, false otherswise. 8 | **/ 9 | + virtual bool superimpose(std::array obj_pose, std::array cam_pose, cv::Mat &img); 10 | + virtual bool superimpose(std::array obj_pose, std::array cam_pose, cv::Mat &img, cv::Rect roi); 11 | + virtual bool superimpose(const std::array cam_pose, const std::vector > &obj_poses, const cv::Rect &roi, std::vector &images); 12 | virtual bool superimpose(const std::vector& objpos_multimap, const double* cam_x, const double* cam_o, cv::Mat& img); 13 | 14 | virtual bool superimpose(const ModelPoseContainer& objpos_map, const double* cam_x, const double* cam_o, cv::Mat& img, 15 | diff --git a/src/SuperimposeMesh/shader/shader_model_texture.frag b/src/SuperimposeMesh/shader/shader_model_texture.frag 16 | index f9c169b..3f38ec5 100644 17 | --- a/src/SuperimposeMesh/shader/shader_model_texture.frag 18 | +++ b/src/SuperimposeMesh/shader/shader_model_texture.frag 19 | @@ -15,5 +15,11 @@ uniform sampler2D texture_diffuse1; 20 | 21 | void main() 22 | { 23 | - color = texture(texture_diffuse1, TexCoords); 24 | + //original 25 | + //color = texture(texture_diffuse1, TexCoords); 26 | + 27 | + // Transform to grayscale keeping 3 ch 28 | + vec4 c = texture(texture_diffuse1, TexCoords); 29 | + float gray = dot(c.rgb, vec3(0.299, 0.587, 0.114)); 30 | + color = vec4(vec3(gray), 1.0); 31 | } 32 | diff --git a/src/SuperimposeMesh/src/SICAD.cpp b/src/SuperimposeMesh/src/SICAD.cpp 33 | index 6588e0d..7b890db 100644 34 | --- a/src/SuperimposeMesh/src/SICAD.cpp 35 | +++ b/src/SuperimposeMesh/src/SICAD.cpp 36 | @@ -14,7 +14,11 @@ 37 | #include 38 | #include 39 | #include 40 | +#include 41 | 42 | +#include 43 | +#include 44 | +#include 45 | #include 46 | #include 47 | 48 | @@ -461,9 +465,240 @@ void SICAD::setOglWindowShouldClose(bool should_close) 49 | pollOrPostEvent(); 50 | } 51 | 52 | +bool SICAD::superimpose(std::array obj_pose, std::array cam_pose, cv::Mat& img) { 53 | + //auto tic1 = std::chrono::high_resolution_clock::now(); 54 | + glfwMakeContextCurrent(window_); 55 | + 56 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 57 | 58 | -bool SICAD::superimpose 59 | -( 60 | + /* View transformation matrix. */ 61 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 62 | + 63 | + /* Install/Use the program specified by the shader. */ 64 | + shader_cad_->install(); 65 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 66 | + shader_cad_->uninstall(); 67 | + 68 | + shader_mesh_texture_->install(); 69 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 70 | + shader_mesh_texture_->uninstall(); 71 | + 72 | + shader_frame_->install(); 73 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 74 | + shader_frame_->uninstall(); 75 | + 76 | + /* Clear the colorbuffer. */ 77 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 78 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 79 | + //auto tic2 = std::chrono::high_resolution_clock::now(); 80 | + 81 | + { 82 | + /* Render in the upper-left-most tile of the render grid */ 83 | + glViewport(0, framebuffer_height_ - tile_img_height_, 84 | + tile_img_width_, tile_img_height_); 85 | + glScissor(0, framebuffer_height_ - tile_img_height_, 86 | + tile_img_width_, tile_img_height_); 87 | + 88 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 89 | + model[3][0] = obj_pose[0]; 90 | + model[3][1] = obj_pose[1]; 91 | + model[3][2] = obj_pose[2]; 92 | + 93 | + auto iter_model = model_obj_.find("model"); 94 | + shader_mesh_texture_->install(); 95 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 96 | + (iter_model->second)->Draw(*shader_mesh_texture_); 97 | + shader_mesh_texture_->uninstall(); 98 | + 99 | + /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 100 | + /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 101 | + and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 102 | + static cv::Mat ogl_pixel(framebuffer_height_ / tiles_rows_, framebuffer_width_ / tiles_cols_, CV_8UC1); 103 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 104 | + glPixelStorei(GL_PACK_ALIGNMENT, (ogl_pixel.step & 3) ? 1 : 4); 105 | + glPixelStorei(GL_PACK_ROW_LENGTH, ogl_pixel.step / ogl_pixel.elemSize()); 106 | + glReadPixels(0, framebuffer_height_ - tile_img_height_, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, ogl_pixel.data); 107 | + 108 | + cv::flip(ogl_pixel, img, 0); 109 | + // cv::cvtColor(ogl_pixel, img, cv::COLOR_GRAY2BGR); 110 | + } 111 | + 112 | + //auto tic3 = std::chrono::high_resolution_clock::now(); 113 | + 114 | + /* Swap the buffers. */ 115 | + glfwSwapBuffers(window_); 116 | + 117 | + pollOrPostEvent(); 118 | + 119 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 120 | + 121 | + glfwMakeContextCurrent(nullptr); 122 | + 123 | + //auto tic4 = std::chrono::high_resolution_clock::now(); 124 | + 125 | + // std::chrono::duration p1 = tic2 - tic1; 126 | + // std::chrono::duration p2 = tic3 - tic2; 127 | + // std::chrono::duration p3 = tic4 - tic3; 128 | + 129 | + // std::cout << p1.count() << " " << p2.count() << " " << p3.count() << " " << (p1.count() + p3.count()) / (p1.count()+p2.count()+p3.count()) << "\%" << std::endl; 130 | + 131 | + return true; 132 | +} 133 | + 134 | +bool SICAD::superimpose(const std::array cam_pose, const std::vector >& obj_poses, const cv::Rect& roi, std::vector& images) { 135 | + glfwMakeContextCurrent(window_); 136 | + 137 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 138 | + 139 | + /* View transformation matrix. */ 140 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 141 | + 142 | + /* Install/Use the program specified by the shader. */ 143 | + shader_cad_->install(); 144 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 145 | + shader_cad_->uninstall(); 146 | + 147 | + shader_mesh_texture_->install(); 148 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 149 | + shader_mesh_texture_->uninstall(); 150 | + 151 | + shader_frame_->install(); 152 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 153 | + shader_frame_->uninstall(); 154 | + 155 | + /* Render starting by the upper-left-most tile of the render grid, proceding by columns and rows. */ 156 | + glViewport(0, 0, tile_img_width_, tile_img_height_); 157 | + glScissor(0, 0, tile_img_width_, tile_img_height_); 158 | + 159 | + auto iter_model = model_obj_.find("model"); 160 | + 161 | + 162 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 163 | + glPixelStorei(GL_PACK_ALIGNMENT, (images[0].step & 3) ? 1 : 4); 164 | + glPixelStorei(GL_PACK_ROW_LENGTH, images[0].step / images[0].elemSize()); 165 | + 166 | + shader_mesh_texture_->install(); 167 | + for (unsigned int i = 0; i < obj_poses.size(); ++i) { 168 | + auto& obj_pose = obj_poses[i]; 169 | + cv::Mat img; 170 | + // if(i == 0) img = images[i]; 171 | + // else 172 | + img = images[i](roi); 173 | + 174 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 175 | + model[3][0] = obj_pose[0]; 176 | + model[3][1] = obj_pose[1]; 177 | + model[3][2] = obj_pose[2]; 178 | + 179 | + /* Clear the colorbuffer. */ 180 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 181 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 182 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 183 | + (iter_model->second)->Draw(*shader_mesh_texture_); 184 | + // if(i == 0) 185 | + // glReadPixels(0, 0, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, img.data); 186 | + // else 187 | + glReadPixels(roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 188 | + cv::flip(img, img, 0); 189 | + } 190 | + shader_mesh_texture_->uninstall(); 191 | + 192 | + 193 | + // for (unsigned int i = 0; i < obj_poses.size(); ++i) { 194 | + // auto img = images[i]; 195 | + 196 | + // /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 197 | + // /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 198 | + // and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 199 | + // glReadBuffer(GL_COLOR_ATTACHMENT0); 200 | + // glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4); 201 | + // glPixelStorei(GL_PACK_ROW_LENGTH, img.step / img.elemSize()); 202 | + // //glReadPixels(tile_img_width_ * i + roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 203 | + 204 | + // glReadPixels(tile_img_width_ * i, framebuffer_height_ - tile_img_height_, tile_img_width_, tile_img_height_, GL_RED, GL_UNSIGNED_BYTE, img.data); 205 | + 206 | + // cv::flip(img, img, 0); 207 | + // } 208 | + 209 | + /* Swap the buffers. */ 210 | + glfwSwapBuffers(window_); 211 | + pollOrPostEvent(); 212 | + 213 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 214 | + 215 | + glfwMakeContextCurrent(nullptr); 216 | + 217 | + return true; 218 | +} 219 | + 220 | +bool SICAD::superimpose(std::array obj_pose, std::array cam_pose, cv::Mat& img, cv::Rect roi) { 221 | + glfwMakeContextCurrent(window_); 222 | + 223 | + glBindFramebuffer(GL_FRAMEBUFFER, fbo_); 224 | + 225 | + /* View transformation matrix. */ 226 | + glm::mat4 view = getViewTransformationMatrix(&cam_pose[0], &cam_pose[3]); 227 | + 228 | + /* Install/Use the program specified by the shader. */ 229 | + shader_cad_->install(); 230 | + glUniformMatrix4fv(glGetUniformLocation(shader_cad_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 231 | + shader_cad_->uninstall(); 232 | + 233 | + shader_mesh_texture_->install(); 234 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 235 | + shader_mesh_texture_->uninstall(); 236 | + 237 | + shader_frame_->install(); 238 | + glUniformMatrix4fv(glGetUniformLocation(shader_frame_->get_program(), "view"), 1, GL_FALSE, glm::value_ptr(view)); 239 | + shader_frame_->uninstall(); 240 | + 241 | + /* Clear the colorbuffer. */ 242 | + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); 243 | + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 244 | + 245 | + { 246 | + /* Render in the upper-left-most tile of the render grid */ 247 | + glViewport(0, framebuffer_height_ - tile_img_height_, 248 | + tile_img_width_, tile_img_height_); 249 | + glScissor(0, framebuffer_height_ - tile_img_height_, 250 | + tile_img_width_, tile_img_height_); 251 | + 252 | + glm::mat4 model = glm::rotate(glm::mat4(1.0f), static_cast(obj_pose[6]), glm::vec3(static_cast(obj_pose[3]), static_cast(obj_pose[4]), static_cast(obj_pose[5]))); 253 | + model[3][0] = obj_pose[0]; 254 | + model[3][1] = obj_pose[1]; 255 | + model[3][2] = obj_pose[2]; 256 | + 257 | + auto iter_model = model_obj_.find("model"); 258 | + shader_mesh_texture_->install(); 259 | + glUniformMatrix4fv(glGetUniformLocation(shader_mesh_texture_->get_program(), "model"), 1, GL_FALSE, glm::value_ptr(model)); 260 | + (iter_model->second)->Draw(*shader_mesh_texture_); 261 | + shader_mesh_texture_->uninstall(); 262 | + 263 | + /* Read before swap. glReadPixels read the current framebuffer, i.e. the back one. */ 264 | + /* See: http://stackoverflow.com/questions/16809833/opencv-image-loading-for-opengl-texture#16812529 265 | + and http://stackoverflow.com/questions/9097756/converting-data-from-glreadpixels-to-opencvmat#9098883 */ 266 | + glReadBuffer(GL_COLOR_ATTACHMENT0); 267 | + glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4); 268 | + glPixelStorei(GL_PACK_ROW_LENGTH, img.step / img.elemSize()); 269 | + // glReadPixels(roi.x, framebuffer_height_ - tile_img_height_ - roi.y, roi.width, roi.height, GL_BGR, GL_UNSIGNED_BYTE, img.data); 270 | + glReadPixels(roi.x, tile_img_height_ - roi.y - roi.height, roi.width, roi.height, GL_RED, GL_UNSIGNED_BYTE, img.data); 271 | + 272 | + cv::flip(img, img, 0); 273 | + } 274 | + 275 | + /* Swap the buffers. */ 276 | + glfwSwapBuffers(window_); 277 | + 278 | + pollOrPostEvent(); 279 | + 280 | + glBindFramebuffer(GL_FRAMEBUFFER, 0); 281 | + 282 | + glfwMakeContextCurrent(nullptr); 283 | + 284 | + return true; 285 | +} 286 | + 287 | +bool SICAD::superimpose( 288 | const ModelPoseContainer& objpos_map, 289 | const double* cam_x, 290 | const double* cam_o, 291 | --------------------------------------------------------------------------------