├── .clang-format ├── CMakeLists.txt ├── README.md ├── include └── visensor_simulator │ ├── builtin_planner.h │ ├── logger.h │ ├── ros_backend_node.h │ └── simple_waypoint_planner.h ├── launch ├── depth_image_to_ply_files.launch ├── depth_image_to_point_cloud.launch ├── extern_backend.launch ├── ros_backend.launch ├── spawn_mav_with_noise.launch └── uav_vi_blender.launch ├── package.xml ├── resources ├── firefly_with_vi_sensor_no_cameras.gazebo ├── gimbal_snippets.xacro ├── mav_with_odom_and_vi_sensor_and_gimbal_no_cameras.gazebo ├── mav_with_odom_and_vi_sensor_no_cameras.gazebo ├── neo11_vi_sensor_no_cameras.gazebo └── project_test.tar ├── scripts ├── blender │ ├── blender_debug.py │ ├── blender_enable_addon.py │ ├── blender_render_no_gui.py │ └── visensor_sim_blender_addon.py ├── exr_compression_remover.py ├── visensor_sim_bagcreator.py └── waypoint_generators │ ├── waypoints_gsd_zigzag_generator.py │ └── waypoints_zigzag_generator.py ├── setup.py └── src ├── extern_planner_backend.cc ├── gazebo_poltergeist_gimbal_plugin.cpp ├── ros_backend_node.cc ├── simple_planner_backend.cc └── simple_waypoint_planner.cc /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | BasedOnStyle: Google 4 | DerivePointerAlignment: false 5 | PointerAlignment: Left 6 | ColumnLimit: 80 7 | AllowShortFunctionsOnASingleLine: Empty 8 | AllowShortIfStatementsOnASingleLine: false 9 | AllowShortLoopsOnASingleLine: false 10 | AlignAfterOpenBracket: AlwaysBreak 11 | IncludeCategories: 12 | - Regex: '^<.*' 13 | Priority: 1 14 | - Regex: '.*' 15 | Priority: 2 16 | --- 17 | Language: Proto 18 | BasedOnStyle: Google 19 | ... 20 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(visensor_simulator) 3 | 4 | add_definitions(-std=c++11) 5 | 6 | find_package(catkin_simple REQUIRED) 7 | find_package(Boost REQUIRED COMPONENTS system filesystem) 8 | 9 | # Depend on system install of Gazebo 10 | find_package(gazebo REQUIRED) 11 | 12 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GAZEBO_CXX_FLAGS}") 13 | 14 | link_directories(${GAZEBO_LIBRARY_DIRS}) 15 | include_directories(${Boost_INCLUDE_DIR} ${catkin_INCLUDE_DIRS} ${GAZEBO_INCLUDE_DIRS}) 16 | 17 | catkin_simple() 18 | 19 | FILE(GLOB_RECURSE LibFiles "include/*.h") 20 | add_custom_target(headers SOURCES ${LibFiles}) 21 | 22 | 23 | cs_add_library(visensor_sim_backend src/ros_backend_node.cc ) 24 | 25 | cs_add_executable(simple_planner_backend src/simple_planner_backend.cc src/simple_waypoint_planner.cc) 26 | target_link_libraries(simple_planner_backend ${catkin_LIBRARIES} visensor_sim_backend) 27 | 28 | cs_add_executable(extern_planner_backend src/extern_planner_backend.cc ) 29 | target_link_libraries(extern_planner_backend ${catkin_LIBRARIES} visensor_sim_backend) 30 | 31 | cs_add_library(poltergeist_gimbal_plugin src/gazebo_poltergeist_gimbal_plugin.cpp) 32 | target_link_libraries(poltergeist_gimbal_plugin ${catkin_LIBRARIES} ${GAZEBO_LIBRARIES}) 33 | 34 | catkin_install_python(PROGRAMS scripts/visensor_sim_bagcreator.py 35 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}) 36 | 37 | cs_install() 38 | 39 | cs_export() 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | VI-Sensor Simulator 2 | ======================== 3 | The simulation of the VI-Sensor. No Guarantees. 4 | 5 | This work is described in the letter "Aerial Single-View Depth Completion with Image-Guided Uncertainty Estimation", by Lucas Teixeira, Martin R. 6 | Oswald, Marc Pollefeys, Margarita Chli, published in the IEEE 7 | Robotics and Automation Letters (RA-L) [IEEE link](https://doi.org/10.1109/LRA.2020.2967296). 8 | 9 | #### Video: 10 | Mesh 12 | 13 | #### Citations: 14 | If you use this code for research, please cite the following publication: 15 | 16 | ``` 17 | @article{Teixeira:etal:RAL2020, 18 | title = {{Aerial Single-View Depth Completion with Image-Guided Uncertainty Estimation}}, 19 | author = {Lucas Teixeira and Martin R. Oswald and Marc Pollefeys and Margarita Chli}, 20 | journal = {{IEEE} Robotics and Automation Letters ({RA-L})}, 21 | doi = {10.1109/LRA.2020.2967296}, 22 | year = {2020} 23 | } 24 | ``` 25 | 26 | 27 | License 28 | ------ 29 | There is copyright code from other libraries. Mostly commented in the source code. For our original code BSD license applies. 30 | 31 | Installation 32 | ------ 33 | * Requirements: 34 | * ROS [Melodic](http://wiki.ros.org/melodic/Installation/Ubuntu) 35 | * Blender 2.80 or newer- (https://askubuntu.com/questions/110821/how-to-install-blender-from-the-official-website) 36 | 37 | * Initialize catkin workspace (using [catkin tools](https://catkin-tools.readthedocs.io/en/latest/installing.html)): 38 | ```sh 39 | $ mkdir -p ~/catkin_ws/src 40 | $ cd ~/catkin_ws 41 | $ source /opt/ros/melodic/setup.bash 42 | $ catkin init # initialize your catkin workspace 43 | $ catkin config --cmake-args -DCMAKE_BUILD_TYPE=Release 44 | $ catkin config --merge-devel 45 | ``` 46 | * Get the simulator and dependencies 47 | ```sh 48 | $ cd ~/catkin_ws/src 49 | $ sudo apt-get install liblapacke-dev python-wstool python-catkin-tools protobuf-compiler libgoogle-glog-dev libopenexr-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev 50 | $ sudo apt-get install ros-melodic-joy ros-melodic-octomap-ros 51 | $ git clone git@github.com:catkin/catkin_simple 52 | $ git clone git@github.com:ethz-asl/rotors_simulator 53 | $ git clone git@github.com:ethz-asl/mav_comm 54 | $ git clone git@github.com:ethz-asl/eigen_catkin 55 | $ git clone git@github.com:ethz-asl/glog_catkin 56 | $ git clone git@github.com:ethz-asl/mav_control_rw 57 | $ pip install OpenEXR 58 | 59 | $ git clone git@github.com:VIS4ROB-lab/visensor_simulator.git -b blender_2.8 60 | 61 | 62 | ``` 63 | * Build the workspace 64 | ```sh 65 | $ catkin build 66 | ``` 67 | 68 | * Open your Blender, navigate to Edit > Preferences > Add-ons, and import the script "visensor_simulator/scripts/blender/visensor_sim_blender_addon.py" as an add-on by clicking on "Install..." (https://docs.blender.org/manual/en/latest/editors/preferences/addons.html) 69 | 70 | 71 | Step-by-step 72 | ------ 73 | 1. Create a project - this is a folder with any name. Configure the cameras and the waypoints In the folder resources there is a example "project_test.tar" 74 | 75 | 2. starts gazebo 76 | ```sh 77 | $ roslaunch visensor_simulator uav_vi_blender.launch 78 | ``` 79 | 3. run the back_end: 80 | ```sh 81 | $ roslaunch visensor_simulator ros_backend.launch project_folder:="/home/lucas/data/test/project_testA" 82 | ``` 83 | 4. open your scene on blender, select the camera, file->import->VISensor Simulator Project(*json), choose the file visim_project.json on your project. 84 | 85 | 5. Render. Quick render is faster, but it is less realistic. 86 | 87 | 6. run the bagcreator(namespace is optional): 88 | ```sh 89 | $ rosrun visensor_simulator visensor_sim_bagcreator.py --output_bag your_output.bag --project_folder "/home/lucas/data/test/project_testA" --namespace "firefly" 90 | ``` 91 | 92 | Roadmap 93 | ------ 94 | * write a camera path exporter compatible with our waypoint planner, better if we introduce this on our dataset format 95 | * change from firefly to neo11 96 | * develop a software to build a simplified version of the world to allow collision on the simulation. BVH and Octomap are options 97 | * better error msg on the bagcreator when it is not possible to create the bagfile 98 | * expose the simple_planner waypoint tolerance as ros parameters 99 | * write my owm spawn with noise and vi_sensor pose as parameters 100 | * add imu name on the json file 101 | * autoselect a camera from the json 102 | * add option to disable the simple planner 103 | * detect incomplete render sequence and jump to the latest one (support in case of shutdown) 104 | * make everything relative to the project file instead of the project folder 105 | * add test for topic names on the bagcreator 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /include/visensor_simulator/builtin_planner.h: -------------------------------------------------------------------------------- 1 | #ifndef VISENSOR_SIMULATOR_BUILTIN_PLANNER_H 2 | #define VISENSOR_SIMULATOR_BUILTIN_PLANNER_H 3 | 4 | #include 5 | #include 6 | 7 | class BuiltInPlanner { 8 | public: 9 | enum PlannerStatus { INVALID, STARTING, RUNNING, COMPLETED }; 10 | 11 | virtual PlannerStatus getStatus() = 0; 12 | virtual bool loadConfigurationFromFile(const std::string& project_folder_path) = 0; 13 | }; 14 | 15 | #endif // VISENSOR_SIMULATOR_BUILTIN_PLANNER_H 16 | -------------------------------------------------------------------------------- /include/visensor_simulator/logger.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef VISENSOR_SIMULATOR_LOGGER_H 3 | #define VISENSOR_SIMULATOR_LOGGER_H 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | struct ImuSensorReadings { 12 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 13 | 14 | ImuSensorReadings() : gyroscopes(), accelerometers(), timestamp(0) {} 15 | 16 | ImuSensorReadings( 17 | ros::Time timestamp_, Eigen::Vector3d gyroscopes_, 18 | Eigen::Vector3d accelerometers_) 19 | : gyroscopes(gyroscopes_), 20 | accelerometers(accelerometers_), 21 | timestamp(timestamp_) {} 22 | Eigen::Vector3d gyroscopes; ///< Gyroscope measurement. 23 | Eigen::Vector3d accelerometers; ///< Accelerometer measurement. 24 | ros::Time timestamp; 25 | }; 26 | 27 | struct PoseReadings { 28 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 29 | Eigen::Vector3d position; 30 | Eigen::Quaterniond orientation; 31 | ros::Time timestamp; 32 | }; 33 | 34 | std::ostream& operator<<(std::ostream& os, const PoseReadings& T) { 35 | Eigen::Vector3d p_WS_W = T.position; 36 | Eigen::Quaterniond q_WS = T.orientation; 37 | os << T.timestamp.toNSec() << "," << std::scientific << std::setprecision(18) 38 | << p_WS_W[0] << "," << p_WS_W[1] << "," << p_WS_W[2] << "," << q_WS.x() 39 | << "," << q_WS.y() << "," << q_WS.z() << "," << q_WS.w(); 40 | return os; 41 | } 42 | 43 | std::ostream& operator<<(std::ostream& os, const ImuSensorReadings& T) { 44 | Eigen::Vector3d acc = T.accelerometers; 45 | Eigen::Vector3d gyro = T.gyroscopes; 46 | os << T.timestamp.toNSec() << "," << std::scientific << std::setprecision(18) 47 | << acc[0] << "," << acc[1] << "," << acc[2] << "," << gyro[0] << "," 48 | << gyro[1] << "," << gyro[2]; 49 | return os; 50 | } 51 | 52 | class Logger { 53 | public: 54 | Logger() : is_valid_(false) {} 55 | ~Logger() { 56 | stop(); 57 | } 58 | 59 | bool startLogger(std::string output_folder) { 60 | if (!output_folder.empty() && 61 | output_folder.at(output_folder.length() - 1) != '/') 62 | output_folder += "/"; 63 | 64 | std::string imu_filename = output_folder + "imu_data.csv"; 65 | std::string pose_filename = output_folder + "pose_data.csv"; 66 | // ROS_INFO_STREAM(" " << imu_filename); 67 | imu_file.open(imu_filename); 68 | pose_file.open(pose_filename); 69 | 70 | if (imu_file.is_open() && pose_file.is_open()) { 71 | is_valid_ = true; 72 | } else { 73 | is_valid_ = false; 74 | imu_file.close(); 75 | pose_file.close(); 76 | } 77 | 78 | imu_file << "timestamp, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z" << std::endl << std::flush; 79 | pose_file << "timestamp, p_x, p_y,p_z, q_x, q_y,q_z, q_w" << std::endl << std::flush; 80 | 81 | 82 | imu_data.clear(); 83 | imu_data.reserve(100000); 84 | pose_data.clear(); 85 | pose_data.reserve(100000); 86 | 87 | return is_valid_; 88 | } 89 | 90 | bool logPose(const PoseReadings& measurement) { 91 | if (!is_valid_) 92 | return false; 93 | 94 | pose_data.push_back(measurement); 95 | return true; 96 | } 97 | 98 | bool logIMU(const ImuSensorReadings& measurement) { 99 | if (!is_valid_) 100 | return false; 101 | 102 | imu_data.push_back(measurement); 103 | return true; 104 | } 105 | 106 | void stop() { 107 | if (!is_valid_) 108 | return; 109 | 110 | serializeImu(); 111 | serializePoses(); 112 | 113 | is_valid_ = false; 114 | imu_file.close(); 115 | pose_file.close(); 116 | } 117 | 118 | private: 119 | bool is_valid_; 120 | std::vector> 121 | imu_data; 122 | std::vector> pose_data; 123 | 124 | std::ofstream imu_file; 125 | std::ofstream pose_file; 126 | 127 | void serializeImu() { 128 | 129 | for (const auto& reading : imu_data) 130 | imu_file << reading << std::endl; 131 | } 132 | 133 | void serializePoses() { 134 | 135 | for (const auto& reading : pose_data) 136 | pose_file << reading << std::endl; 137 | } 138 | }; 139 | 140 | #endif 141 | -------------------------------------------------------------------------------- /include/visensor_simulator/ros_backend_node.h: -------------------------------------------------------------------------------- 1 | #ifndef VISENSOR_SIMULATOR_ROS_BACKEND_H 2 | #define VISENSOR_SIMULATOR_ROS_BACKEND_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | using namespace ros; 14 | 15 | #define STRING_ENUM(value) \ 16 | case RosBackendNode::value: \ 17 | os << "Current state: " << #value; \ 18 | break; 19 | 20 | class Logger; 21 | class BuiltInPlanner; 22 | 23 | class RosBackendNode { 24 | public: 25 | RosBackendNode(BuiltInPlanner* planner=nullptr); 26 | ~RosBackendNode(); 27 | 28 | enum SimulationState { INVALID, WAINTING_PLANNER, RECORDING, FINISHED }; 29 | 30 | void run(const std::string& project_folder); 31 | 32 | private: 33 | // ROS publishers and subscribers 34 | ros::NodeHandle nh_; 35 | ros::Subscriber imu_sub_; 36 | ros::Subscriber reference_odometry_sub_; 37 | 38 | Logger* logger_; 39 | BuiltInPlanner* planner_; 40 | bool use_builtin_planner_; 41 | 42 | // Callback functions 43 | void referenceOdometryCallback(const nav_msgs::Odometry& odometry_msg); 44 | void updateBuiltInPlanner(); 45 | void imuCallback(const sensor_msgs::ImuConstPtr& msg); 46 | void setState(SimulationState state); 47 | SimulationState simulation_state_; 48 | std::string output_folder_path_; 49 | }; 50 | 51 | #endif // VISENSOR_SIMULATOR_ROS_BACKEND_H 52 | -------------------------------------------------------------------------------- /include/visensor_simulator/simple_waypoint_planner.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef VISENSOR_SIMULATOR_SIMPLE_WAYPOINT_PLANNER_H 3 | #define VISENSOR_SIMULATOR_SIMPLE_WAYPOINT_PLANNER_H 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | struct Waypoint { 12 | double x; // m 13 | double y; // m 14 | double z; // m 15 | double yaw; // rad 16 | float gimbal_pitch; // deg 17 | Waypoint( 18 | double x_p, double y_p, double z_p, double yaw_p, float gimbal_pitch_p) 19 | : x(x_p), y(y_p), z(z_p), yaw(yaw_p), gimbal_pitch(gimbal_pitch_p) {} 20 | }; 21 | 22 | class SimpleWaypointPlanner : public BuiltInPlanner { 23 | public: 24 | // error in 0.2rad(=11.5deg) and meters- these numbers need to be proportinal 25 | // to the noise in the odometry used as input. 26 | SimpleWaypointPlanner(); 27 | 28 | SimpleWaypointPlanner(double yaw_max_error, double position_max_error); 29 | ~SimpleWaypointPlanner() {} 30 | 31 | bool loadConfigurationFromFile(const std::string& project_folder_path) override; 32 | 33 | PlannerStatus getStatus() override; 34 | 35 | private: 36 | PlannerStatus step(const nav_msgs::Odometry& curr_odometry); 37 | bool getNextWaypoint( 38 | Eigen::Vector3d& desired_position, double& desired_yaw, 39 | float& desired_gimbal_pitch); 40 | bool reachedNextWaypoint(const geometry_msgs::Pose& curr_pose); // do not 41 | // consider 42 | // the gimbal 43 | // position 44 | void sendPoseCommand( 45 | const Eigen::Vector3d& desired_position, const double& desired_yaw, 46 | const float& desired_gimbal_pitch); 47 | void robotOdometryCallback(const nav_msgs::Odometry& curr_odometry); 48 | void goNextPose(); 49 | 50 | bool is_valid_; 51 | double yaw_max_error_; 52 | double position_max_error_squared_; 53 | std::list waypoints_; 54 | PlannerStatus status_; 55 | 56 | ros::NodeHandle nh_; 57 | 58 | ros::Publisher pose_command_pub_; 59 | ros::Publisher gimbal_command_pub_; 60 | ros::Subscriber robot_odometry_sub_; 61 | }; 62 | 63 | #endif 64 | -------------------------------------------------------------------------------- /launch/depth_image_to_ply_files.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /launch/depth_image_to_point_cloud.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /launch/extern_backend.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /launch/ros_backend.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /launch/spawn_mav_with_noise.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 39 | 40 | 41 | 42 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /launch/uav_vi_blender.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | visensor_simulator 3 | 0.1.0 4 | Simulator to create Visual-Inertial data, such as Skybotics/ETH-ASL VI-Sensor 5 | 6 | Lucas Teixeira 7 | 8 | None 9 | 10 | 11 | catkin 12 | catkin_simple 13 | mav_msgs 14 | geometry_msgs 15 | roscpp 16 | sensor_msgs 17 | gazebo_ros 18 | 19 | 20 | mav_msgs 21 | geometry_msgs 22 | roscpp 23 | sensor_msgs 24 | gazebo_ros 25 | 26 | -------------------------------------------------------------------------------- /resources/firefly_with_vi_sensor_no_cameras.gazebo: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 48 | 49 | 50 | 51 | 52 | 53 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /resources/gimbal_snippets.xacro: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | ${namespace} 99 | roll_joint 100 | 1 101 | 90 102 | pitch_joint 103 | 1 104 | -90 105 | yaw_joint 106 | 1 107 | 0 108 | 0.3 109 | ${parent_link} 110 | camera_mount_link 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | /${namespace} 139 | ${namespace}/pitch_joint 140 | 1 141 | 0 142 | 0.5 143 | ${parent_link} 144 | ${namespace}/camera_mount_link 145 | 146 | 147 | 148 | 149 | -------------------------------------------------------------------------------- /resources/mav_with_odom_and_vi_sensor_and_gimbal_no_cameras.gazebo: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /resources/mav_with_odom_and_vi_sensor_no_cameras.gazebo: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 57 | 58 | 59 | 60 | 61 | 62 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /resources/neo11_vi_sensor_no_cameras.gazebo: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /resources/project_test.tar: -------------------------------------------------------------------------------- 1 | waypoints.txt000664 001750 001750 00000000113 13264135200 014125 0ustar00lucaslucas000000 000000 0,0,1,0 2 | 0.0,-5.0,2.0,0.0 3 | 5.0,-5.0,2.0,0.0 4 | 5.0,5.0,2.0,0.0 5 | 0.0,5.0,3.0,0.0 6 | 7 | visim_project.json000664 001750 001750 00000002166 13317215765 015127 0ustar00lucaslucas000000 000000 { 8 | "cameras": [ 9 | { 10 | "T_SC": 11 | [ 0, -1, 0, 0.015, 12 | -1, 0, 0, 0.055, 13 | 0, 0, -1, 0.0065, 14 | 0, 0, 0, 1], 15 | "cam_name": "cam0", 16 | "frequency_reduction_factor": 10, 17 | "height": 480, 18 | "width": 752, 19 | "focal_length": 455 20 | }, 21 | { 22 | "T_SC": 23 | [ 0, -1, 0, 0.015, 24 | -1, 0, 0, -0.055, 25 | 0, 0, -1, 0.0065, 26 | 0, 0, 0, 1], 27 | "cam_name": "cam1", 28 | "frequency_reduction_factor": 10, 29 | "height": 480, 30 | "width": 752, 31 | "focal_length": 455 32 | }, 33 | { 34 | "T_SC": 35 | [ 0, 0, 1, -0.10, 36 | -1, 0, 0, 0, 37 | 0,-1, 0, 0, 38 | 0, 0, 0, 1], 39 | "cam_name": "cam2", 40 | "frequency_reduction_factor": 200, 41 | "height": 480, 42 | "width": 752, 43 | "focal_length": 350 44 | } 45 | ], 46 | "name": "project_test" 47 | } 48 | -------------------------------------------------------------------------------- /scripts/blender/blender_debug.py: -------------------------------------------------------------------------------- 1 | filename = "/home/lucas/catkin_ws/src/visensor_simulator/scripts/blender_visensor_sim_tool_lite.py" 2 | exec(compile(open(filename).read(), filename, 'exec')) 3 | -------------------------------------------------------------------------------- /scripts/blender/blender_enable_addon.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | #you can use to enable the plugin in a remote server 3 | bpy.ops.wm.addon_install(filepath='/home/shane/Downloads/testaddon.py') 4 | bpy.ops.wm.addon_enable(module='blender_visensor_sim_tool_lite') 5 | bpy.ops.wm.save_userpref() 6 | -------------------------------------------------------------------------------- /scripts/blender/blender_render_no_gui.py: -------------------------------------------------------------------------------- 1 | # The following is an updated version of http://www.blender.org/forum/viewtopic.php?t=19102 2 | # Author: baze from blender.org/forum 3 | # 4 | # Render all cameras or cameras containing text given with command line argument "cameras". 5 | # Example: 6 | # Let's say test.blend file contains cameras "east.01", "east.02", "west.01", "west.02" 7 | # By executing command "blender -b your_file.blend -P render_all_cameras.py" all 4 cameras will be rendered. 8 | # Command "blender -b your_file.blend -P render_all_cameras.py cameras=east" will render "east.01" and "east.02" cameras. 9 | # Command "blender -b your_file.blend -P render_all_cameras.py cameras=01" will render "east.01" and "west.01.02" cameras. 10 | 11 | #/home/lucas/bin/blender-2.79b/blender -b /home/lucas/data/vi-dataset/vi-test/plane-test_l.blend -P /home/lucas/catkin_ws/src/visensor_simulator/scripts/blender_render_no_gui.py 12 | #/home/lucas/bin/blender-2.79b-linux-glibc219-x86_64/blender -b /home/lucas/visensor-simulator/plane-test_l.blend -P /home/lucas/visensor-simulator/blender_render_no_gui.py 13 | 14 | 15 | import argparse 16 | import bpy, bgl, blf,sys 17 | from bpy import data, ops, props, types, context 18 | print("\nThis Python script will render your scene with all available cameras") 19 | print("or with camera(s) matching command line argument 'cameras'") 20 | print("") 21 | print("Usage:") 22 | print("Render all cameras:") 23 | print("blender -b your_file.blend -P render_all_cameras.py\n") 24 | print("Render only matching cameras:") 25 | print("blender -b your_file.blend -P render_all_cameras.py cameras=east\n") 26 | 27 | cameraNames='' 28 | 29 | ##setup the argument list 30 | # parser = argparse.ArgumentParser(description='Create a ROS bag using the images and imu data.') 31 | # parser.add_argument('--project_folder', metavar='project', nargs='?', help='VISim Project folder') 32 | # 33 | ## Loop all command line arguments and try to find "cameras=east" or similar 34 | #for arg in sys.argv: 35 | # words=arg.split('=') 36 | # if ( words[0] == 'cameras'): 37 | # cameraNames = words[1] 38 | # 39 | #print('rendering cameras containing [' + cameraNames + ']') 40 | 41 | print('\nPrint Scenes...') 42 | sceneKey = bpy.data.scenes.keys()[0] 43 | print('Using Scene[' + sceneKey + ']') 44 | scene = bpy.data.scenes[sceneKey] 45 | 46 | # Loop all objects and try to find Cameras 47 | print('Looping Cameras') 48 | c=0 49 | for obj in bpy.data.objects: 50 | # Find cameras that match cameraNames 51 | if ( obj.type =='CAMERA') and obj.data.visim_cam_config.has_config == True and ( cameraNames == '' or obj.name.find(cameraNames) != -1) : 52 | print("Rendering scene["+sceneKey+"] with Camera["+obj.name+"]") 53 | 54 | scene.visim_render_camera = obj 55 | bpy.context.scene.output_image_format = 'OPEN_EXR' 56 | bpy.ops.visim.prerender('INVOKE_DEFAULT') 57 | 58 | # Render Scene and store the scene 59 | bpy.ops.render.render('INVOKE_DEFAULT', animation=True ) 60 | c = c + 1 61 | print('Done!') 62 | -------------------------------------------------------------------------------- /scripts/blender/visensor_sim_blender_addon.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | import json 3 | import csv 4 | import mathutils 5 | import math 6 | import os 7 | import time 8 | import bpy_extras 9 | import sys 10 | import shutil 11 | 12 | 13 | bl_info = { 14 | "name": "VISensor Simulator project format", 15 | "description": "Import and Render a VISensor Simulator project", 16 | "author": "Lucas Teixeira", 17 | "version": (0, 4), 18 | "blender": (2, 80, 0), 19 | "location": "File > Import-Export", 20 | "warning": "", # used for warning icon and text in addons panel 21 | "wiki_url": "lteixeira@mavt.ethz.ch" 22 | "Scripts/My_Script", 23 | "category": "Import-Export"} 24 | 25 | 26 | ## Dataformat for the JSON project 27 | 28 | class VISimCamera(): 29 | cam_name = "cam_default" 30 | focal_length = 455 31 | frequency_reduction_factor = 99 # if 10 then it is 10 times SLOWER than the imu that runs at 200Hz 32 | height = 480 33 | width = 752 34 | transform_ImuCamera = [0, -1, 0, 0, #frontal camera in ros standard 35 | -1, 0, 0, 0, 36 | 0, 0, -1, 0, 37 | 0, 0, 0, 1] 38 | #todo add param with the delay between the imu and image 39 | 40 | 41 | 42 | def toJSON(self): 43 | result = {} 44 | result["cam_name"] = self.cam_name 45 | result["focal_length"] = self.focal_length 46 | result["frequency_reduction_factor"] = self.frequency_reduction_factor 47 | result["height"] = self.height 48 | result["width"] = self.width 49 | result["T_SC"] = self.transform_ImuCamera 50 | return result 51 | 52 | def fromJSON(self, json_dict): 53 | try: 54 | self.cam_name = json_dict["cam_name"] 55 | self.focal_length = json_dict["focal_length"] 56 | self.frequency_reduction_factor = json_dict["frequency_reduction_factor"] 57 | self.height = json_dict["height"] 58 | self.width = json_dict["width"] 59 | self.transform_ImuCamera = json_dict["T_SC"] 60 | except KeyError as e: 61 | print ('KeyError - cam_id {} reason {}'.format(self.cam_name , str(e))) 62 | return False 63 | return True 64 | 65 | class VISimProject(): 66 | 67 | cameras=[] 68 | name="" 69 | 70 | def camerasToJSON(self): 71 | result = [] 72 | for cam in self.cameras: 73 | result.append(cam.toJSON()) 74 | return result 75 | 76 | def camerasFromJSON(self, cameraJSONs): 77 | result = [] 78 | for iCamJSON in cameraJSONs: 79 | currCam = VISimCamera() 80 | if(currCam.fromJSON(iCamJSON)): 81 | result.append(currCam) 82 | else: 83 | return False 84 | 85 | self.cameras = result 86 | 87 | return True 88 | 89 | def toJSON(self): 90 | result = {} 91 | result["name"] = self.name 92 | result["cameras"] = self.camerasToJSON() 93 | return result 94 | 95 | def fromJSON(self, json_dict): 96 | try: 97 | self.name = json_dict["name"] 98 | except KeyError as e: 99 | print ("KeyError - in project reason {}".format(str(e))) 100 | 101 | if(self.camerasFromJSON(json_dict["cameras"]) == False): 102 | return False 103 | return True 104 | 105 | def test_VISimProject(): 106 | proj = VISimProject() 107 | proj.name = "project_test" 108 | cam0 = VISimCamera() 109 | cam0.cam_name = "cam0" 110 | 111 | 112 | cam1 = VISimCamera() 113 | cam1.cam_name = "cam1" 114 | 115 | 116 | proj.cameras.append(cam0) 117 | proj.cameras.append(cam1) 118 | 119 | proj_json = proj.toJSON() 120 | 121 | print("write") 122 | print(json.dumps(proj_json, 123 | sort_keys=False, indent=4)) 124 | proj_read = VISimProject() 125 | proj_read.fromJSON(proj_json) 126 | 127 | print("read") 128 | print(json.dumps(proj_read.toJSON(), 129 | sort_keys=False, indent=4)) 130 | 131 | ################### core 132 | 133 | 134 | class RosPose: 135 | q = [] 136 | p = [] 137 | q_BlenderRos = mathutils.Quaternion([0,1,0,0]) # converting from ros to blender is a 180 rotation around x 138 | timestamp = '' 139 | 140 | def transformed(self, rhs ): 141 | result = RosPose() 142 | result.q = self.q @ rhs.q 143 | 144 | rotated_p_rhs = rhs.p.copy() 145 | rotated_p_rhs.rotate(self.q) 146 | result.p = self.p + rotated_p_rhs 147 | return result 148 | 149 | def __init__(self, position=mathutils.Vector((0,0,0)), orientation = mathutils.Quaternion([1,0,0,0]),timestamp=''): 150 | self.q = orientation 151 | self.p = position 152 | self.timestamp = timestamp 153 | 154 | def __str__(self): 155 | return str(self.p) + " " + str(self.q) 156 | 157 | 158 | 159 | class BodyTrajectory: 160 | poses = [] 161 | 162 | def __init__(self,csv_parsed_list): 163 | for p in csv_parsed_list: 164 | curr_pose = RosPose(mathutils.Vector((float(p[1]),float(p[2]),float(p[3]))),mathutils.Quaternion([float(p[7]),float(p[4]),float(p[5]),float(p[6])]),p[0]) 165 | self.poses.append(curr_pose) 166 | 167 | 168 | 169 | 170 | class VISimProjectLoader(): 171 | 172 | @staticmethod 173 | def prepare_render(operator, context): 174 | if context.scene.visim_render_camera is None: 175 | context.scene.render.filepath = bpy.context.user_preferences.filepaths.temporary_directory# point the another place 176 | return {'FINISHED'} 177 | 178 | project_object = context.scene.visim_render_camera.parent 179 | camera_data = context.scene.visim_render_camera.data 180 | 181 | #change scene camera 182 | context.scene.camera = context.scene.visim_render_camera 183 | 184 | project_object.hide_viewport = True 185 | for child in project_object.children: 186 | if child.type == 'CAMERA': 187 | child.hide_viewport = True 188 | else: 189 | operator.report({'ERROR'}, 'Unexpected project child type :'+ str(child.data)) 190 | return {'CANCELLED'} 191 | 192 | context.scene.camera.hide_viewport = False 193 | 194 | images_output_folder = os.path.join(project_object.visim_project_setting.project_folder,'output/2_Blender/'+camera_data.visim_cam_config.cam_name+'_rgbd') 195 | #if os.access(images_output_folder, os.R_OK | os.W_OK) : 196 | # shutil.rmtree(images_output_folder) 197 | 198 | os.makedirs(images_output_folder,exist_ok=True) 199 | 200 | scene = context.scene 201 | scene.render.filepath = os.path.join(images_output_folder,'bl_############.png')# 10 zeros padding 202 | scene.render.resolution_x = camera_data.visim_cam_config.width 203 | scene.render.resolution_y = camera_data.visim_cam_config.height 204 | bpy.context.scene.frame_step = camera_data.visim_cam_config.frequency_reduction_factor 205 | 206 | #RM if context.scene.output_image_format == 'PNG': 207 | if scene.render.image_settings.file_format == 'PNG': 208 | scene.render.resolution_percentage = 100 209 | #RM scene.render.image_settings.file_format = 'PNG' 210 | scene.render.image_settings.color_mode = 'RGB' 211 | scene.render.image_settings.color_depth = '8' 212 | scene.render.image_settings.compression = 0 213 | else: 214 | scene.render.filepath = os.path.join(images_output_folder,'bl_############.exr') #RM added 215 | scene.render.resolution_percentage = 100 216 | scene.render.image_settings.file_format = 'OPEN_EXR' 217 | scene.render.image_settings.exr_codec = 'NONE' 218 | scene.render.image_settings.color_depth = '32' 219 | scene.render.image_settings.color_mode = 'RGB' 220 | scene.render.image_settings.use_zbuffer = True 221 | 222 | return {'FINISHED'} 223 | 224 | 225 | 226 | 227 | @staticmethod 228 | def load_trajectory(curr_cam_obj, body_trajectory): 229 | ros2blender_quat = mathutils.Quaternion([0,1,0,0]) 230 | ctrans = curr_cam_obj.data.visim_cam_config.imu_camera_translation 231 | cquat = curr_cam_obj.data.visim_cam_config.imu_camera_quaternion 232 | T_BC = RosPose(ctrans,cquat @ ros2blender_quat) 233 | keyframe_counter = 1 234 | nposes = len(body_trajectory.poses) 235 | last_percent = -1 236 | reduction_factor = curr_cam_obj.data.visim_cam_config.frequency_reduction_factor 237 | 238 | for T_WB in body_trajectory.poses: 239 | if (keyframe_counter % reduction_factor == 1): 240 | T_WC = T_WB.transformed(T_BC) 241 | bpy.context.scene.frame_set(keyframe_counter) 242 | curr_cam_obj.location = T_WC.p 243 | curr_cam_obj.keyframe_insert('location') 244 | curr_cam_obj.rotation_mode = "QUATERNION" 245 | # the rotation_quaternion is the rotation from camera to the parent 246 | curr_cam_obj.rotation_quaternion = T_WC.q 247 | curr_cam_obj.keyframe_insert('rotation_quaternion') 248 | keyframe_counter = keyframe_counter+1; 249 | percent = math.floor(100*keyframe_counter/nposes) 250 | if percent != last_percent: 251 | print("Trajectory loading progress: {}".format(percent), end='\r', flush=True) 252 | last_percent = percent 253 | print("") 254 | return None 255 | 256 | 257 | @staticmethod 258 | def create_camera( visim_camera, parent,cam_list): 259 | #create camera instance 260 | 261 | camera_obj_name = parent.name[6:] + "_" + visim_camera.cam_name 262 | if camera_obj_name in bpy.data.cameras: 263 | bpy.data.cameras.remove(bpy.data.cameras[camera_obj_name]) 264 | 265 | camera_data = bpy.data.cameras.new( camera_obj_name ) 266 | camera_object = bpy.data.objects.new( camera_obj_name, camera_data ) 267 | bpy.context.collection.objects.link( camera_object ) 268 | camera_object.parent = parent 269 | camera_object.hide_render = True 270 | camera_object.hide_viewport = False #RM True 271 | 272 | #configure 273 | camera_data.angle = 2*math.atan2( visim_camera.width/2.0,visim_camera.focal_length ) 274 | camera_data.visim_cam_config.has_config = True 275 | camera_data.visim_cam_config.height = visim_camera.height 276 | camera_data.visim_cam_config.width = visim_camera.width 277 | camera_data.visim_cam_config.frequency_reduction_factor = visim_camera.frequency_reduction_factor 278 | camera_data.visim_cam_config.cam_name = visim_camera.cam_name 279 | L = visim_camera.transform_ImuCamera 280 | tf = mathutils.Matrix([L[0:4],L[4:8],L[8:12],L[12:16]]) 281 | pose = tf.decompose() 282 | camera_data.visim_cam_config.imu_camera_translation = pose[0] 283 | camera_data.visim_cam_config.imu_camera_quaternion = pose[1] 284 | cam_list[visim_camera.cam_name] = camera_object 285 | 286 | 287 | @staticmethod 288 | def load_trajectories(operator, project_object): 289 | 290 | poses_filename = os.path.join(project_object.visim_project_setting.project_folder,'output/1_InertialPose/pose_data.csv') 291 | print (poses_filename) 292 | 293 | body_poses_list = [] 294 | 295 | try: 296 | 297 | with open(poses_filename, 'r') as poses_file_h: 298 | next(poses_file_h) #jump first line 299 | reader = csv.reader(poses_file_h) 300 | body_poses_list = list(reader) 301 | except EnvironmentError: 302 | print('error') 303 | operator.report({'ERROR'}, 'pose file could not be opened: '+poses_filename) 304 | return {'CANCELLED'} 305 | 306 | if not body_poses_list: 307 | operator.report({'ERROR'}, 'empty pose file ' + poses_filename) 308 | return {'CANCELLED'} 309 | 310 | body_trajectory = BodyTrajectory(body_poses_list) 311 | 312 | bpy.context.scene.frame_start = 1 313 | bpy.context.scene.frame_end = len(body_trajectory.poses) 314 | bpy.context.scene.frame_step = 1 315 | 316 | ncamera= len(project_object.children) 317 | curr_cam = 0 318 | for child in project_object.children: 319 | curr_cam +=1 320 | print("current camera:{}/{}".format(curr_cam,ncamera)) 321 | if child.type == 'CAMERA': 322 | VISimProjectLoader.load_trajectory(child,body_trajectory) 323 | else: 324 | operator.report({'ERROR'}, 'Unexpected project child type :'+ str(child.data)) 325 | return {'CANCELLED'} 326 | 327 | return {'FINISHED'} 328 | 329 | @staticmethod 330 | def load_project(operator, project_object , filepath = None): 331 | 332 | if project_object != None: 333 | filepath = os.path.join(project_object.visim_project_setting.project_folder, 'visim_project.json') 334 | 335 | #open the json file 336 | with open(filepath) as json_data: 337 | try: 338 | project_data = json.load(json_data) 339 | except json.decoder.JSONDecodeError as e: 340 | operator.report({'ERROR'}, 'Problem with the JSON file parsing. '+ str(e)) 341 | return {'CANCELLED'} 342 | 343 | visim_json_project = VISimProject() 344 | if (visim_json_project.fromJSON(project_data) == False): 345 | operator.report({'ERROR'}, 'Problem with the JSON file parsing. Look in the terminal') 346 | #print(json.dumps(project_data,sort_keys=False, indent=4)) 347 | return {'CANCELLED'} 348 | 349 | #set object mode- I dont know why but it is part of the tutorial 350 | if(bpy.context.view_layer.objects.active == None): 351 | #select any object 352 | bpy.context.view_layer.objects.active = bpy.context.scene.objects.values().pop() 353 | 354 | #bpy.ops.object.mode_set(mode='OBJECT') 355 | 356 | if project_object == None: 357 | 358 | project_name = 'visim_' + visim_json_project.name 359 | 360 | if project_name in bpy.context.scene.objects: 361 | operator.report({'ERROR'}, 'Project with the same name already exist- Please delete the hierarchy ') 362 | return {'CANCELLED'} 363 | 364 | #create a empty to hold the cameras 365 | project_object = bpy.data.objects.new( project_name , None ) 366 | project_object.empty_display_size = 2 367 | project_object.empty_display_type = 'PLAIN_AXES' 368 | project_object.hide_render = True 369 | project_object.hide_viewport = False #RM True 370 | #'IPO_BACK' 371 | 372 | #add to the current scene 373 | bpy.context.collection.objects.link( project_object ) 374 | 375 | root_output_folder = os.path.dirname(os.path.abspath(filepath)) 376 | project_object.visim_project_setting.has_config = True 377 | project_object.visim_project_setting.project_folder = root_output_folder 378 | bpy.context.view_layer.objects.active = project_object 379 | 380 | else: 381 | #delete all children 382 | for child in project_object.children: 383 | if child.type == 'CAMERA': 384 | bpy.data.cameras.remove(child.data) 385 | else: 386 | operator.report({'ERROR'}, 'Unexpected project child type :'+ str(child.data)) 387 | return {'CANCELLED'} 388 | #bpy.context.scene.objects.unlink(child) 389 | #bpy.data.objects.remove(child) 390 | 391 | 392 | 393 | cam_list = {} 394 | 395 | for curr_cam in visim_json_project.cameras: 396 | VISimProjectLoader.create_camera(curr_cam,project_object,cam_list) 397 | 398 | return {'FINISHED'} 399 | 400 | class VISimCameraSetting(bpy.types.PropertyGroup): 401 | has_config = bpy.props.BoolProperty( 402 | name="has visim configuration", 403 | description="Indicates if this camera,\n" 404 | " has a VISim configuration", 405 | default=False 406 | ) 407 | 408 | height = bpy.props.IntProperty( 409 | name="Image height", 410 | default=0 411 | ) 412 | 413 | width = bpy.props.IntProperty( 414 | name="Image width", 415 | default=0 416 | ) 417 | 418 | frequency_reduction_factor = bpy.props.IntProperty( 419 | name="Frequency Reduction Factor", 420 | default=88 421 | ) 422 | 423 | cam_name = bpy.props.StringProperty( 424 | name="cam_name", 425 | default="??" 426 | ) 427 | 428 | imu_camera_translation = bpy.props.FloatVectorProperty( 429 | name="imu_camera_translation", 430 | default=[0,0,0], 431 | subtype= 'TRANSLATION', 432 | precision=3 433 | ) 434 | 435 | imu_camera_quaternion = bpy.props.FloatVectorProperty( 436 | name="imu_camera_quaternion", 437 | default=[1,0,0,0], 438 | subtype= 'QUATERNION', 439 | precision=6, 440 | size=4 441 | ) 442 | 443 | 444 | 445 | class VISimProjectObjectSetting(bpy.types.PropertyGroup): 446 | has_config = bpy.props.BoolProperty( 447 | name="has visim configuration", 448 | description="Indicates if this camera,\n" 449 | " has a VISim configuration", 450 | default=False 451 | ) 452 | 453 | project_folder = bpy.props.StringProperty( 454 | name="Project Folder", 455 | default="??", 456 | subtype = 'DIR_PATH' 457 | ) 458 | 459 | class VISimTrajectoryReloadOperator(bpy.types.Operator): 460 | bl_idname = "visim.reload_trajectory" 461 | bl_label = "Reload VISim only trajectory" 462 | 463 | def execute(self, context): 464 | return VISimProjectLoader.load_trajectories(self,context.object) 465 | 466 | 467 | @classmethod 468 | def poll(cls, context): 469 | return context.object.visim_project_setting.has_config == True 470 | 471 | class VISimProjectReloadOperator(bpy.types.Operator): 472 | bl_idname = "visim.reload_project" 473 | bl_label = "Reload VISim Project" 474 | 475 | def execute(self, context): 476 | return VISimProjectLoader.load_project(self,context.object) 477 | 478 | 479 | @classmethod 480 | def poll(cls, context): 481 | return context.object.visim_project_setting.has_config == True 482 | 483 | class VISimProjectPanel(bpy.types.Panel): 484 | bl_idname = "OBJ_PT_visim_project_object_panel" 485 | bl_label = "VISim Project" 486 | bl_space_type = 'PROPERTIES' 487 | bl_region_type = 'WINDOW' 488 | bl_context = "object" 489 | 490 | def draw(self, context): 491 | #self.layout.prop(context.scene, "visim_camera", expand=True) 492 | self.layout.operator(VISimTrajectoryReloadOperator.bl_idname) 493 | self.layout.operator(VISimProjectReloadOperator.bl_idname) 494 | 495 | 496 | 497 | class VISimRaytraceRenderOperator(bpy.types.Operator): 498 | bl_idname = "visim.blender_render" 499 | bl_label = "Blender Render" 500 | 501 | def execute(self, context): 502 | 503 | bpy.ops.render.render('INVOKE_DEFAULT',animation=True) 504 | 505 | return {'FINISHED'} 506 | 507 | @classmethod 508 | def poll(cls, context): 509 | return context.scene.visim_render_camera is not None 510 | 511 | class VISimOGLRenderOperator(bpy.types.Operator): 512 | bl_idname = "visim.opengl_render" 513 | bl_label = "Quick Render" 514 | 515 | def execute(self, context): 516 | 517 | # Call user prefs window 518 | bpy.ops.screen.userpref_show('INVOKE_DEFAULT') 519 | # Change area type 520 | area = bpy.context.window_manager.windows[-1].screen.areas[0] 521 | area.type = 'VIEW_3D' 522 | area.spaces[0].region_3d.view_perspective = 'CAMERA' 523 | area.spaces[0].viewport_shade = 'MATERIAL' 524 | 525 | 526 | output_folder = os.path.dirname(os.path.abspath(bpy.context.scene.render.filepath)) 527 | if os.path.exists(output_folder) and os.path.isdir(output_folder): 528 | if os.listdir(output_folder): 529 | bpy.context.window_manager.popup_menu(scene_visim_info_replace_files_draw, title="Alert", icon='RADIO') 530 | else: 531 | self.report({'ERROR'},"Given Output Directory don't exists") 532 | return {'FINISHED'} 533 | 534 | 535 | bpy.ops.render.opengl('INVOKE_DEFAULT',animation=True) 536 | return {'FINISHED'} 537 | 538 | @classmethod 539 | def poll(cls, context): 540 | return ((context.scene.visim_render_camera is not None) and context.scene.output_image_format == 'PNG') 541 | 542 | 543 | class VISimPrepareRenderOperator(bpy.types.Operator): 544 | bl_idname = "visim.prerender" 545 | bl_label = "Reload camera" 546 | 547 | def execute(self, context): 548 | 549 | return VISimProjectLoader.prepare_render(self,context) 550 | 551 | @classmethod 552 | def poll(cls, context): 553 | return context.scene.visim_render_camera is not None 554 | 555 | 556 | class VISimRenderPanel(bpy.types.Panel): 557 | bl_idname = "RENDER_PT_visim_render_panel" 558 | bl_label = "VISim Render" 559 | bl_space_type = 'PROPERTIES' 560 | bl_region_type = 'WINDOW' 561 | bl_context = "render" 562 | 563 | def draw(self, context): 564 | layout = self.layout 565 | layout.prop(context.scene, "visim_render_camera", expand=True) 566 | layout.operator(VISimPrepareRenderOperator.bl_idname) 567 | 568 | layout.label("Image format:") 569 | layout.prop(context.scene, "output_image_format", expand=True) 570 | 571 | row = layout.row(align=True) 572 | row.alignment = 'EXPAND' 573 | row.operator(VISimOGLRenderOperator.bl_idname, icon='RENDER_ANIMATION') 574 | row.operator(VISimRaytraceRenderOperator.bl_idname, icon='RENDER_ANIMATION') 575 | 576 | class ImportVISimProj(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): 577 | """This appears in the tooltip of the operator and in the generated docs""" 578 | bl_idname = "import_vi_sim_proj.read_data" 579 | bl_label = "Import VISensor Simulator Project" 580 | 581 | # ImportHelper mixin class uses this 582 | filename_ext = ".json" 583 | 584 | filter_glob = bpy.props.StringProperty( 585 | default="*.json", 586 | options={'HIDDEN'}, 587 | maxlen=255, # Max internal buffer length, longer would be clamped. 588 | ) 589 | 590 | def execute(self, context): 591 | val = VISimProjectLoader.load_project(self,None,self.filepath) 592 | if( val == {'FINISHED'}): 593 | return VISimProjectLoader.load_trajectories(self,context.object) 594 | return val 595 | 596 | def scene_visim_camera_poll(self, object): 597 | return object.type == 'CAMERA' and object.data.visim_cam_config.has_config == True 598 | 599 | def scene_visim_camera_update(self, context): 600 | VISimProjectLoader.prepare_render(self,context) 601 | return None 602 | 603 | def scene_visim_info_replace_files_draw(self, context): 604 | self.layout.label("The output folder is not empty, mixing render results is dangerous! ") 605 | 606 | def scene_output_image_format_update(self, context): 607 | VISimProjectLoader.prepare_render(self,context) 608 | return None 609 | 610 | 611 | def menu_func_import(self, context): 612 | self.layout.operator(ImportVISimProj.bl_idname, text="VISensor Simulator Project (.json)") 613 | 614 | 615 | 616 | classes = ( 617 | ImportVISimProj, 618 | VISimProjectPanel, 619 | VISimTrajectoryReloadOperator, 620 | VISimProjectReloadOperator, 621 | VISimRenderPanel, 622 | VISimPrepareRenderOperator, 623 | VISimOGLRenderOperator, 624 | VISimRaytraceRenderOperator, 625 | VISimCameraSetting, 626 | VISimProjectObjectSetting, 627 | ) 628 | 629 | 630 | def register(): 631 | for cls in classes: 632 | bpy.utils.register_class(cls) 633 | 634 | bpy.types.TOPBAR_MT_file_import.append(menu_func_import) 635 | 636 | bpy.types.Object.visim_project_setting = bpy.props.PointerProperty(type=VISimProjectObjectSetting) 637 | bpy.types.Camera.visim_cam_config = bpy.props.PointerProperty(type=VISimCameraSetting) 638 | bpy.types.Scene.visim_render_camera = bpy.props.PointerProperty(type=bpy.types.Object,update=scene_visim_camera_update,poll=scene_visim_camera_poll,name="VISim Camera") 639 | bpy.types.Scene.output_image_format = bpy.props.EnumProperty( 640 | name='Image Output Format', 641 | description='File Format for images.', 642 | items={ 643 | ('PNG', 'png', 'Save as png'), 644 | ('OPEN_EXR', 'exr (with depth)', 'Save as exr with z')}, 645 | default='PNG',update=scene_output_image_format_update) 646 | 647 | 648 | def unregister(): 649 | bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) 650 | 651 | 652 | for cls in classes: 653 | bpy.utils.unregister_class(cls) 654 | 655 | del bpy.types.Object.visim_project_setting 656 | del bpy.types.Camera.visim_cam_config 657 | del bpy.types.Scene.visim_render_camera 658 | del bpy.types.Scene.output_image_format 659 | 660 | 661 | if __name__ == "__main__": 662 | register() 663 | 664 | -------------------------------------------------------------------------------- /scripts/exr_compression_remover.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | #License (BSD) 4 | #Copyright (c) 2018, Lucas Teixeira, Vision for Robotics Lab, ETH Zurich, Switzerland 5 | 6 | 7 | 8 | from __future__ import print_function 9 | 10 | import os 11 | import argparse 12 | import sys 13 | import OpenEXR 14 | import Imath 15 | import glob 16 | 17 | 18 | def convertExr(input_file,output_file): 19 | in_image_exr = OpenEXR.InputFile(input_file) 20 | header = in_image_exr.header() 21 | header['compression'] = Imath.Compression(Imath.Compression.NO_COMPRESSION) 22 | out_image_exr = OpenEXR.OutputFile(output_file,header) 23 | channels = header['channels'].keys() 24 | newchannels = dict(zip(channels, in_image_exr.channels(channels))) 25 | out_image_exr.writePixels(newchannels) 26 | 27 | 28 | 29 | if __name__ == "__main__": 30 | 31 | #setup the argument list 32 | parser = argparse.ArgumentParser(description='Tool to convert old render that used compression on the exr files. It saves about 30% on space but it is 50 times slower to open.') 33 | parser.add_argument('--input_folder', nargs='?', help='input folder') 34 | parser.add_argument('--output_folder', nargs='?', help='output folder') 35 | 36 | #print help if no argument is specified 37 | if len(sys.argv)<2: 38 | parser.print_help() 39 | sys.exit(0) 40 | 41 | #parse the args 42 | parsed = parser.parse_args() 43 | 44 | if not os.path.exists(parsed.input_folder) : 45 | print('The input folder does not exist :'+ parsed.input_folder) 46 | sys.exit() 47 | 48 | if os.path.exists(parsed.output_folder):#prevent a mess and to right on the input folder 49 | print('The output folder already exist. this script need to create a new output folder') 50 | sys.exit() 51 | 52 | 53 | 54 | original_current_folder = os.getcwd() 55 | 56 | os.chdir(parsed.input_folder) 57 | files_to_convert = glob.glob('*.exr') 58 | os.chdir(original_current_folder) 59 | 60 | if(len(files_to_convert) <= 0): 61 | print('No .exr files found in the input folder:' + parsed.input_folder) 62 | sys.exit() 63 | 64 | try: 65 | os.mkdir(parsed.output_folder) 66 | 67 | for f in files_to_convert: 68 | print('input file is:'+ os.path.join(parsed.input_folder,f)) 69 | print('output file is:'+ os.path.join(parsed.output_folder,f)+'\n') 70 | convertExr(os.path.join(parsed.input_folder,f),os.path.join(parsed.output_folder,f)) 71 | 72 | except ValueError: 73 | print("Oops!") 74 | 75 | -------------------------------------------------------------------------------- /scripts/visensor_sim_bagcreator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | #License (BSD) 4 | #Copyright (c) 2018, Lucas Teixeira, Vision for Robotics Lab, ETH Zurich, Switzerland 5 | #Copyright (c) 2014, Paul Furgale, Jérôme Maye and Jörn Rehder, Autonomous Systems Lab, ETH Zurich, Switzerland 6 | #Copyright (c) 2014, Thomas Schneider, Skybotix AG, Switzerland 7 | #most code in based on kalibr_bagcreater.py -ethz-asl/kalibr 8 | 9 | 10 | from __future__ import print_function 11 | import rosbag 12 | import rospy 13 | from sensor_msgs.msg import Image, CameraInfo 14 | from sensor_msgs.msg import Imu 15 | from geometry_msgs.msg import PoseStamped,TransformStamped 16 | import os 17 | import argparse 18 | import cv2 19 | import numpy as np 20 | import csv 21 | import json 22 | import math 23 | import sys 24 | import OpenEXR 25 | import Imath 26 | from PIL import Image as Im 27 | 28 | 29 | 30 | 31 | 32 | ## Dataformat for the JSON project 33 | 34 | class VISimCamera(): 35 | cam_name = "cam_default" 36 | frame_id = "cam_default" 37 | focal_length = 455 38 | frequency_reduction_factor = 99 # if 10 then it is 10 times SLOWER than the imu that runs at 200Hz 39 | height = 480 40 | width = 752 41 | transform_ImuCamera = [0, -1, 0, 0, #frontal camera in ros standard 42 | -1, 0, 0, 0, 43 | 0, 0, -1, 0, 44 | 0, 0, 0, 1] 45 | #todo add param with the delay between the imu and image 46 | 47 | 48 | 49 | def toJSON(self): 50 | result = {} 51 | result["cam_name"] = self.cam_name 52 | result["frame_id"] = self.frame_id 53 | result["focal_length"] = self.focal_length 54 | result["frequency_reduction_factor"] = self.frequency_reduction_factor 55 | result["height"] = self.height 56 | result["width"] = self.width 57 | result["T_SC"] = self.transform_ImuCamera 58 | return result 59 | 60 | def fromJSON(self, json_dict): 61 | #obligatory parameters 62 | try: 63 | self.cam_name = json_dict["cam_name"] 64 | self.focal_length = json_dict["focal_length"] 65 | self.frequency_reduction_factor = json_dict["frequency_reduction_factor"] 66 | self.height = json_dict["height"] 67 | self.width = json_dict["width"] 68 | self.transform_ImuCamera = json_dict["T_SC"] 69 | except KeyError as e: 70 | print ('KeyError - cam_id {} reason {}'.format(self.cam_name , str(e))) 71 | return False 72 | 73 | #optional frame_id 74 | try: 75 | self.frame_id = json_dict["frame_id"] 76 | except KeyError: 77 | self.frame_id = self.cam_name 78 | 79 | return True 80 | 81 | class VISimProject(): 82 | 83 | cameras=[] 84 | name="" 85 | imu_frame_id="" 86 | imu_name="" 87 | 88 | def camerasToJSON(self): 89 | result = [] 90 | for cam in self.cameras: 91 | result.append(cam.toJSON()) 92 | return result 93 | 94 | def camerasFromJSON(self, cameraJSONs): 95 | result = [] 96 | for iCamJSON in cameraJSONs: 97 | currCam = VISimCamera() 98 | if(currCam.fromJSON(iCamJSON)): 99 | result.append(currCam) 100 | else: 101 | return False 102 | 103 | self.cameras = result 104 | 105 | return True 106 | 107 | def toJSON(self): 108 | result = {} 109 | result["name"] = self.name 110 | result["imu_frame_id"] = self.imu_frame_id 111 | result["imu_name"] = self.imu_name 112 | 113 | result["camfrom __future__ import print_functioneras"] = self.camerasToJSON() 114 | return result 115 | 116 | def fromJSON(self, json_dict): 117 | try: 118 | self.name = json_dict["name"] 119 | self.imu_frame_id = json_dict["imu_frame_id"] 120 | self.imu_name = json_dict["imu_name"] 121 | except KeyError as e: 122 | print ("KeyError - in project reason {}".format(str(e))) 123 | 124 | if(self.camerasFromJSON(json_dict["cameras"]) == False): 125 | return False 126 | return True 127 | 128 | 129 | 130 | def getImageFilesFromDir(dir): 131 | '''Generates a list of files from the directory''' 132 | image_files = list() 133 | if os.path.exists(dir): 134 | for path, names, files in os.walk(dir): 135 | for f in files: 136 | if os.path.splitext(f)[1] in ['.exr', '.png', '.jpg','.jpeg']: 137 | image_files.append( f ) 138 | 139 | 140 | image_files = sorted( image_files) 141 | return image_files 142 | 143 | def getCamFoldersFromDir(dir): 144 | '''Generates a list of all folders that start with cam e.g. cam0''' 145 | cam_folders = list() 146 | if os.path.exists(dir): 147 | for path, folders, files in os.walk(dir): 148 | for folder in folders: 149 | if folder[-4:] == "_rgbd": 150 | cam_folders.append((folder[-4:],folder)) 151 | return cam_folders 152 | 153 | def loadBGRImageWithOpenCV(filepath): 154 | image_np = cv2.imread(filepath, cv2.IMREAD_COLOR) #BGR 155 | return image_np 156 | 157 | def convertSRGBToRGB(img_str,size): 158 | img = np.fromstring(img_str, dtype=np.float32) 159 | img = np.where(img<=0.0031308, 160 | (img*12.92)*255.0, 161 | (1.055*(img**(1.0/2.4))-0.055) * 255.0) 162 | img.shape = (size[1], size[0]) 163 | 164 | return img.astype(np.uint8) #Im.fromarray(img,'F').convert("L") 165 | 166 | 167 | def loadImageWithOpenEXR(filepath): 168 | image_exr = OpenEXR.InputFile(filepath) 169 | dw = image_exr.header()['dataWindow'] 170 | size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1) 171 | precision = Imath.PixelType(Imath.PixelType.FLOAT) 172 | Z = image_exr.channel('Z', precision) 173 | image_depth = np.fromstring(Z, dtype=np.float32) 174 | image_depth[image_depth > 99999999] = 0 # conversion: invalid depth in the exr is inf and on ros depth image is 0 175 | image_depth.shape = (size[1], size[0]) 176 | 177 | r = convertSRGBToRGB(image_exr.channel('R', precision),size) 178 | g = convertSRGBToRGB(image_exr.channel('G', precision),size) 179 | b = convertSRGBToRGB(image_exr.channel('B', precision),size) 180 | 181 | image_bgr = cv2.merge([b, g, r]) # np.asarray(Im.merge("BGR", [b,g,r])) 182 | 183 | return image_bgr, image_depth 184 | 185 | def loadImagesToRosMsg(timestamp,camera_definition,camdir,filename,force_gray): 186 | filepath = os.path.join(camdir, filename) 187 | extension = os.path.splitext(filepath)[1] 188 | image_msgs = list() 189 | image_bgr_np ="" 190 | if(extension in ['.png','.jpeg','.jpg']): 191 | image_bgr_np = loadBGRImageWithOpenCV(filepath) 192 | 193 | elif (extension == '.exr'): 194 | image_bgr_np, image_depth_np = loadImageWithOpenEXR(filepath) 195 | image_msgs.append(['image_depth',createCameraDepthMsg(timestamp,image_depth_np,camera_definition.frame_id)]) 196 | 197 | if(force_gray): 198 | image_gray_np = cv2.cvtColor(image_bgr_np, cv2.COLOR_BGR2GRAY) 199 | image_msgs.append(['image_raw',createCameraGrayMsg(timestamp,image_gray_np,camera_definition.frame_id)]) 200 | else: 201 | image_msgs.append(['image_raw',createCameraRGBMsg(timestamp,image_bgr_np,camera_definition.frame_id)]) 202 | 203 | return image_msgs 204 | 205 | def createCameraGrayMsg(timestamp, image_rgb,frame_id): 206 | rosimage = Image() 207 | rosimage.header.stamp = timestamp 208 | rosimage.header.frame_id = frame_id 209 | rosimage.height = image_rgb.shape[0] 210 | rosimage.width = image_rgb.shape[1] 211 | rosimage.step = rosimage.width # (step = width * byteperpixel * numChannels) 212 | rosimage.encoding = "mono8" 213 | rosimage.data = image_rgb.tostring() 214 | 215 | return rosimage 216 | 217 | def createCameraRGBMsg(timestamp, image_rgb,frame_id): 218 | rosimage = Image() 219 | rosimage.header.stamp = timestamp 220 | rosimage.header.frame_id = frame_id 221 | rosimage.height = image_rgb.shape[0] 222 | rosimage.width = image_rgb.shape[1] 223 | rosimage.step = rosimage.width * 1 * 3 #(step = width * byteperpixel * numChannels) 224 | rosimage.encoding = "bgr8" 225 | rosimage.data = image_rgb.tostring() 226 | 227 | return rosimage 228 | 229 | def createCameraDepthMsg(timestamp, image_depth,frame_id): 230 | rosimage = Image() 231 | rosimage.header.stamp = timestamp 232 | rosimage.header.frame_id = frame_id 233 | rosimage.height = image_depth.shape[0] 234 | rosimage.width = image_depth.shape[1] 235 | rosimage.step = rosimage.width * 4 * 1 # (step = width * byteperpixel * numChannels) 236 | rosimage.encoding = "32FC1" 237 | rosimage.data = image_depth.tostring() 238 | 239 | return rosimage 240 | 241 | def createCameraInfoMsg(timestamp, camera_definition): 242 | 243 | cam_info_msg = CameraInfo() 244 | cam_info_msg.header.stamp = timestamp 245 | cam_info_msg.header.frame_id = camera_definition.frame_id 246 | 247 | f = camera_definition.focal_length 248 | center_x = math.floor(camera_definition.width/2.0) + 0.5 249 | center_y = math.floor(camera_definition.height/2.0) + 0.5 250 | 251 | cam_info_msg.width = camera_definition.width 252 | cam_info_msg.height = camera_definition.height 253 | k = [f, 0, center_x, 254 | 0, f, center_y, 255 | 0, 0, 1.0 ] 256 | cam_info_msg.K = k 257 | cam_info_msg.P = [k[0], k[1], k[2], 0, 258 | k[3], k[4], k[5], 0, 259 | k[6], k[7], k[8], 0] 260 | 261 | cam_info_msg.distortion_model = "plumb_bob" 262 | cam_info_msg.D = [0.0] * 5 263 | 264 | return cam_info_msg 265 | 266 | def createImuMessge(timestamp_int, alpha, omega): 267 | timestamp_nsecs = str(timestamp_int) 268 | timestamp = rospy.Time( int(timestamp_nsecs[0:-9]), int(timestamp_nsecs[-9:]) ) 269 | 270 | rosimu = Imu() 271 | rosimu.header.stamp = timestamp 272 | rosimu.angular_velocity.x = float(omega[0]) 273 | rosimu.angular_velocity.y = float(omega[1]) 274 | rosimu.angular_velocity.z = float(omega[2]) 275 | rosimu.linear_acceleration.x = float(alpha[0]) 276 | rosimu.linear_acceleration.y = float(alpha[1]) 277 | rosimu.linear_acceleration.z = float(alpha[2]) 278 | 279 | return rosimu, timestamp 280 | 281 | def createGtTransformStampedMessage(parent_frame, child_frame, timestamp_int, position, orientation): 282 | timestamp_nsecs = str(timestamp_int) 283 | timestamp = rospy.Time( int(timestamp_nsecs[0:-9]), int(timestamp_nsecs[-9:]) ) 284 | 285 | rospose = TransformStamped() 286 | rospose.header.stamp = timestamp 287 | rospose.header.frame_id = parent_frame 288 | rospose.child_frame_id = child_frame 289 | 290 | rospose.transform.translation.x = float(position[0]) 291 | rospose.transform.translation.y = float(position[1]) 292 | rospose.transform.translation.z = float(position[2]) 293 | rospose.transform.rotation.x = float(orientation[0]) 294 | rospose.transform.rotation.y = float(orientation[1]) 295 | rospose.transform.rotation.z = float(orientation[2]) 296 | rospose.transform.rotation.w = float(orientation[3]) 297 | 298 | return rospose, timestamp 299 | 300 | def createGtPoseMessage(timestamp_int, position, orientation): 301 | timestamp_nsecs = str(timestamp_int) 302 | timestamp = rospy.Time( int(timestamp_nsecs[0:-9]), int(timestamp_nsecs[-9:]) ) 303 | 304 | rospose = PoseStamped() 305 | rospose.header.stamp = timestamp 306 | rospose.pose.position.x = float(position[0]) 307 | rospose.pose.position.y = float(position[1]) 308 | rospose.pose.position.z = float(position[2]) 309 | rospose.pose.orientation.x = float(orientation[0]) 310 | rospose.pose.orientation.y = float(orientation[1]) 311 | rospose.pose.orientation.z = float(orientation[2]) 312 | rospose.pose.orientation.w = float(orientation[3]) 313 | 314 | return rospose, timestamp 315 | 316 | 317 | if __name__ == "__main__": 318 | 319 | #setup the argument list 320 | parser = argparse.ArgumentParser(description='Create a ROS bag using the images and imu data.') 321 | parser.add_argument('--project_folder', metavar='project', nargs='?', help='VISim Project folder') 322 | parser.add_argument('--output_bag', metavar='output_bag', help='output ROS bag file (.bag)') 323 | parser.add_argument('--namespace', metavar='namespace', default="", help='topic namespace(e.g. firefly)') 324 | parser.add_argument('--gray_image', help='the published image will be in grayscale(mono8)', action='store_true') 325 | 326 | #print help if no argument is specified 327 | if len(sys.argv)<2: 328 | parser.print_help() 329 | sys.exit(0) 330 | 331 | #parse the args 332 | parsed = parser.parse_args() 333 | proj_filepath = os.path.join(parsed.project_folder, 'visim_project.json') 334 | 335 | if os.path.isfile(parsed.output_bag) : 336 | print('Error: the output file already exists!') 337 | sys.exit() 338 | 339 | #create the bag 340 | 341 | try: 342 | try: 343 | with open(proj_filepath, "r") as json_data: 344 | try: 345 | project_data = json.load(json_data) 346 | except ValueError as e: 347 | print('Problem with the JSON parsing of project file. '+ str(e)) 348 | sys.exit() 349 | except IOError as e: 350 | print('Error open the project JSON file. Check the project_folder '+ str(e)) 351 | sys.exit() 352 | 353 | 354 | 355 | try: 356 | bag = rosbag.Bag(parsed.output_bag, 'w') 357 | except IOError as e: 358 | print('Error to create the bag file, check with your output path is correct and if you have write permission on the folder. '+ str(e)) 359 | sys.exit() 360 | 361 | visim_json_project = VISimProject() 362 | if (visim_json_project.fromJSON(project_data) == False): 363 | print( 'Problem with the JSON file parsing. Look in the terminal') 364 | sys.exit() 365 | 366 | imu_filepath = os.path.join(parsed.project_folder, 'output/1_InertialPose/imu_data.csv') 367 | gtpose_filepath = os.path.join(parsed.project_folder, 'output/1_InertialPose/pose_data.csv') 368 | 369 | namespace = "/{0}".format(parsed.namespace) if parsed.namespace else "" 370 | 371 | #add imu msg to the bag 372 | imu_topic = namespace + "/"+ visim_json_project.imu_name 373 | 374 | with open(imu_filepath, 'rb') as csvfile: 375 | reader = csv.reader(csvfile, delimiter=',') 376 | headers = next(reader, None) 377 | print('loading imu data') 378 | for row in reader: 379 | imumsg, timestamp = createImuMessge(row[0], row[1:4], row[4:7]) 380 | bag.write(imu_topic, imumsg, timestamp) 381 | 382 | 383 | #add gtpose msg to the bag 384 | gtpose_topic = namespace + "/ground_truth_imu_pose" 385 | pose_timestamps = list() 386 | 387 | with open(gtpose_filepath, 'rb') as csvfile: 388 | reader = csv.reader(csvfile, delimiter=',') 389 | headers = next(reader, None) 390 | print('loading gt pose data') 391 | for row in reader: 392 | posemsg, timestamp_pose = createGtTransformStampedMessage("blender",visim_json_project.imu_frame_id,row[0], row[1:4], row[4:8]) 393 | bag.write(gtpose_topic, posemsg, timestamp_pose) 394 | pose_timestamps.append(timestamp_pose) 395 | 396 | #add image msg to the bag 397 | for cam_data in visim_json_project.cameras: 398 | cam_dirpath = os.path.join(parsed.project_folder, 'output/2_Blender/' + cam_data.cam_name + '_rgbd') 399 | cam_info_topic = namespace + "/{0}/camera_info".format(cam_data.cam_name) 400 | cam_image_files = getImageFilesFromDir(cam_dirpath) 401 | 402 | print("loading camera: {0}".format(cam_data.cam_name)) 403 | progress_total = len(cam_image_files) 404 | progress_last_percent = -1 405 | image_counter = 0 406 | for image_filename in cam_image_files: 407 | try: 408 | idx = int(image_filename[3:-4])-1 409 | timestamp = pose_timestamps[idx] 410 | timestamp.nsecs +=1 #add one nano second to garanty the image will be sorted after the imu at the same timestamp 411 | image_msgs = loadImagesToRosMsg(timestamp,cam_data,cam_dirpath,image_filename,parsed.gray_image) 412 | cam_info_msg = createCameraInfoMsg(timestamp,cam_data) 413 | bag.write(cam_info_topic, cam_info_msg, timestamp) 414 | for curr_img_msg in image_msgs: 415 | curr_topic = namespace + "/{0}/{1}".format(cam_data.cam_name,curr_img_msg[0]) 416 | bag.write(curr_topic, curr_img_msg[1], timestamp) 417 | 418 | except IndexError as e: 419 | print('image {0} ignored. Some expected in the end of the sequence'.format(idx)) 420 | 421 | image_counter = image_counter+1; 422 | percent = math.floor(100*image_counter/progress_total) 423 | if percent != progress_last_percent: 424 | print("progress: {0}/100".format(int(percent)), end='\r') 425 | sys.stdout.flush() 426 | progress_last_percent = percent 427 | print("") 428 | 429 | finally: 430 | if 'bag' in locals(): 431 | bag.close() 432 | 433 | -------------------------------------------------------------------------------- /scripts/waypoint_generators/waypoints_gsd_zigzag_generator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import sys 4 | import math 5 | 6 | #TODO find a better name for vertical and horizontal and the correlation with x,y,z 7 | 8 | def calculate_height(fovh,img_h,gsd): 9 | height = gsd*img_h/(2*math.tan(fovh/2)) 10 | return height 11 | 12 | def calculate_spacing(gsd,resolution,overlap): 13 | space = gsd*resolution*overlap*0.01 14 | return space 15 | 16 | def main(): 17 | 18 | #setup the argument list 19 | parser = argparse.ArgumentParser(description='Generate a zig-zag trajectory (ex. -o testfile.txt -resh 752 -resv 480 -glv 50 -glh 20 ') 20 | parser.add_argument('-o','--output', metavar='filename', help='output waypoint file') 21 | parser.add_argument('-glh','--ground_lenght_horizontal', metavar='value', type=float,default=100, help='ground lenght horizontal (in meters)') 22 | parser.add_argument('-glv','--ground_lenght_vertical', metavar='value', type=float,default=200, help='ground lenght vertical (in meters)') 23 | parser.add_argument('-gsd','--ground_sample_distance', metavar='value', type=float,default=0.01, help='ground sample distance (in meters)') 24 | parser.add_argument('-fovh','--field_of_view_horizontal', metavar='value', type=float,default=65, help='field of view - horizontal(in degrees)') 25 | parser.add_argument('-resh','--image_resolution_horizontal', metavar='value', type=float,default=2560, help='image resolution - horizontal( in pixels)') 26 | parser.add_argument('-resv','--image_resolution_vertical', metavar='value', type=float,default=1920, help='image resolution - vertical( in pixels)') 27 | parser.add_argument('-ovh','--overlap_horizontal', metavar='value', type=float,default=80, help='overlap between images - vertical( in percentage[0-100])') 28 | parser.add_argument('-ovv','--overlap_vertical', metavar='value', type=float,default=80, help='overlap between images - vertical( in percentage [0-100])') 29 | parser.add_argument('-ext','--only_extremes', dest='only_extremes', action='store_true',default=False, help='create only the turning poitns') 30 | 31 | 32 | #todo Add validation to the input 33 | #print help if no argument is specified 34 | if len(sys.argv)<2: 35 | parser.print_help() 36 | sys.exit(0) 37 | 38 | #parse the args 39 | parsed = parser.parse_args() 40 | 41 | height = calculate_height(parsed.field_of_view_horizontal,parsed.image_resolution_vertical,parsed.ground_sample_distance) 42 | sample_distance_horizontal = calculate_spacing(parsed.ground_sample_distance,parsed.image_resolution_horizontal,parsed.overlap_horizontal) 43 | sample_distance_vertical = calculate_spacing(parsed.ground_sample_distance,parsed.image_resolution_vertical,parsed.overlap_vertical) 44 | 45 | vertical_positions = np.linspace(0,parsed.ground_lenght_vertical,parsed.ground_lenght_vertical/sample_distance_vertical, endpoint=True) 46 | if parsed.only_extremes: 47 | horizontal_positions = [0,parsed.ground_lenght_horizontal] 48 | else: 49 | horizontal_positions = np.linspace(0,parsed.ground_lenght_horizontal,parsed.ground_lenght_horizontal/sample_distance_horizontal, endpoint=True) 50 | 51 | going_right = True 52 | 53 | with open(parsed.output,'w') as output_file: 54 | for u in vertical_positions: 55 | for v in horizontal_positions: 56 | v_directional = v if going_right else parsed.ground_lenght_horizontal-v 57 | output_file.write('{0:.2f} {1:.2f} {2:.2f} {3:.2f}\n'.format( u,v_directional,height,0)) 58 | going_right = not going_right 59 | 60 | print "done!" 61 | 62 | if __name__ == "__main__": 63 | main() -------------------------------------------------------------------------------- /scripts/waypoint_generators/waypoints_zigzag_generator.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | import math 4 | 5 | 6 | 7 | def main(): 8 | 9 | #setup the argument list 10 | parser = argparse.ArgumentParser(description='Generate a zig-zag trajectory (ex. -o testfile.txt ') 11 | parser.add_argument('-o','--output', metavar='filename', help='output waypoint file') 12 | parser.add_argument('-bbn','--bound_box_north', metavar='value', type=float,default=80, help='max lenght of the trajectory in the north direction (in meters)') 13 | parser.add_argument('-bbe','--bound_box_east', metavar='value', type=float,default=80, help='max lenght of the trajectory in the east direction (in meters)') 14 | parser.add_argument('-bbu','--bound_box_up', metavar='value', type=float,default=0, help='max lenght of the trajectory in the up direction (in meters)') 15 | 16 | parser.add_argument('-sn','--step_north', metavar='value', type=float,default=10, help='step size in the north direction (in meters)') 17 | parser.add_argument('-se','--step_east', metavar='value', type=float,default=80, help='step size in the east direction (in meters)') 18 | parser.add_argument('-su','--step_up', metavar='value', type=float,default=0, help='step size in the up direction (in meters)') 19 | 20 | parser.add_argument('-on','--offset_north', metavar='value', type=float,default=-40, help='offset on north direction (in meters)') 21 | parser.add_argument('-oe','--offset_east', metavar='value', type=float,default=40, help='offset on east direction (in meters)') 22 | parser.add_argument('-ou','--offset_up', metavar='value', type=float,default=20, help='offset on up direction (in meters). Simulators usually cannot work with up= 0') 23 | parser.add_argument('-he','--heading', metavar='value', type=float,default=0, help='desired constant heading-yaw (degrees)') 24 | parser.add_argument('-gp','--gimbal_pitch', metavar='value', type=float,default=90, help='desired constant gimbal pitch (degrees)') 25 | 26 | 27 | #todo Add validation to the input 28 | #print help if no argument is specified 29 | if len(sys.argv)<2: 30 | parser.print_help() 31 | sys.exit(0) 32 | 33 | #parse the args 34 | parsed = parser.parse_args() 35 | 36 | if parsed.bound_box_east == 0 or (parsed.bound_box_north == 0 and parsed.bound_box_up == 0) : 37 | print "error" 38 | sys.exit(0) 39 | 40 | if parsed.step_east <= 0: 41 | parsed.step_east = parsed.bound_box_east 42 | 43 | num_rows_east = int(math.floor(parsed.bound_box_east/parsed.step_east) + 1) 44 | 45 | num_rows_up = 1 if parsed.step_up <= 0 else (parsed.bound_box_up/parsed.step_up)+1 46 | num_rows_north = 1 if parsed.step_north <= 0 else (parsed.bound_box_north/parsed.step_north)+1 47 | 48 | num_rows = int(math.floor(max(num_rows_up,num_rows_north))) 49 | 50 | print "the output has {} rows and {} cols".format(num_rows,num_rows_east) 51 | 52 | going_east = True 53 | 54 | with open(parsed.output,'w') as output_file: 55 | for row in range(num_rows): 56 | n = min(row*parsed.step_north,parsed.bound_box_north) +parsed.offset_north 57 | u = min(row*parsed.step_up,parsed.bound_box_up)+parsed.offset_up 58 | for col in range(num_rows_east): 59 | e = min(col*parsed.step_east,parsed.bound_box_east) 60 | e_directional = e if going_east else parsed.bound_box_east-e 61 | output_file.write('{0:.2f},{1:.2f},{2:.2f},{3:.2f},{4:.2f}\n'.format( n,-e_directional+parsed.offset_east,u,parsed.heading,parsed.gimbal_pitch)) 62 | going_east = not going_east 63 | 64 | print "done!" 65 | 66 | if __name__ == "__main__": 67 | main() 68 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD 2 | 3 | from distutils.core import setup 4 | from catkin_pkg.python_setup import generate_distutils_setup 5 | 6 | # fetch values from package.xml 7 | setup_args = generate_distutils_setup( 8 | packages=['visensor_simulator'], 9 | package_dir={'':'scripts'}, 10 | scripts=['scripts/visensor_sim_bagcreater'] 11 | ) 12 | 13 | setup(**setup_args) 14 | -------------------------------------------------------------------------------- /src/extern_planner_backend.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | using namespace ros; 5 | 6 | int main(int argc, char** argv) { 7 | if (argc < 2) { 8 | printf("The project folder argument is missing."); 9 | return 0; 10 | } 11 | 12 | ros::init(argc, argv, "extern_planner_backend_node"); 13 | 14 | std::string project_folder(argv[1]); 15 | 16 | ROS_INFO("extern_planner_backend_node started"); 17 | 18 | RosBackendNode node; 19 | 20 | node.run(project_folder); 21 | 22 | ROS_INFO("shutting down extern_planner_backend_node"); 23 | 24 | return 0; 25 | } 26 | -------------------------------------------------------------------------------- /src/gazebo_poltergeist_gimbal_plugin.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace gazebo { 10 | 11 | //// Default values 12 | static const std::string kDefaultParentFrameId = "gimbal_support"; 13 | static const std::string kDefaultChildFrameId = "camera_mount"; 14 | static const std::string kDefaulGimbaltLinkName = "firefly/gimbal_support_link"; 15 | static const std::string kDefaultCameraLinkName = "firefly/gimbal_yaw"; 16 | static const uint kNumActuators = 3; 17 | static const double kDefaultVelocity = 0.3; 18 | static const double kDEG_2_RAD = M_PI / 180.0; 19 | 20 | class GazeboPoltergeistGimbalPlugin : public ModelPlugin { 21 | public: 22 | GazeboPoltergeistGimbalPlugin() : ModelPlugin() {} 23 | void Load(physics::ModelPtr _parent, sdf::ElementPtr _sdf) { 24 | // Initialize ros, if it has not already bee initialized. 25 | if (!ros::isInitialized()) { 26 | int argc = 0; 27 | char** argv = NULL; 28 | ros::init( 29 | argc, argv, "gazebo_gimbal_plugin", 30 | ros::init_options::NoSigintHandler); 31 | } 32 | 33 | // Create our ROS node. This acts in a similar manner to 34 | // the Gazebo node 35 | node_handle_.reset(new ros::NodeHandle("gazebo_gimbal_plugin")); 36 | 37 | if (_sdf->HasElement("robot_namespace")) 38 | namespace_ = _sdf->Get("robot_namespace"); 39 | else 40 | namespace_ = "/gimbal"; 41 | 42 | // Store the pointer to the model 43 | model_ = _parent; 44 | 45 | std::string ref_base_link_str = _sdf->Get("ref_base_link"); 46 | gimbal_support_link_ = model_->GetChildLink(ref_base_link_str); 47 | 48 | std::string cam_link_str = _sdf->Get("camera_link"); 49 | camera_mount_link_ = model_->GetChildLink(cam_link_str); 50 | // TODO(lpt) add the transform publisher 51 | 52 | // rpy order roll pitch yaw 53 | 54 | // TODO(lpt) add more error handling for th parameters 55 | if (_sdf->HasElement("roll_joint_name")) { 56 | joints_.push_back( 57 | model_->GetJoint(_sdf->Get("roll_joint_name"))); 58 | base_points_.push_back(_sdf->Get("roll_zero")); 59 | directions_.push_back(_sdf->Get("roll_direction")); 60 | } else { 61 | joints_.push_back(nullptr); 62 | base_points_.push_back(-1e15); 63 | directions_.push_back(-1e15); 64 | } 65 | 66 | if (_sdf->HasElement("pitch_joint_name")) { 67 | joints_.push_back( 68 | model_->GetJoint(_sdf->Get("pitch_joint_name"))); 69 | base_points_.push_back(_sdf->Get("pitch_zero")); 70 | directions_.push_back(_sdf->Get("pitch_direction")); 71 | } else { 72 | joints_.push_back(nullptr); 73 | base_points_.push_back(-1e15); 74 | directions_.push_back(-1e15); 75 | } 76 | 77 | if (_sdf->HasElement("yaw_joint_name")) { 78 | joints_.push_back( 79 | model_->GetJoint(_sdf->Get("yaw_joint_name"))); 80 | base_points_.push_back(_sdf->Get("yaw_zero")); 81 | directions_.push_back(_sdf->Get("yaw_direction")); 82 | } else { 83 | joints_.push_back(nullptr); 84 | base_points_.push_back(-1e15); 85 | directions_.push_back(-1e15); 86 | } 87 | goal_points_.assign(kNumActuators, 0); 88 | 89 | max_rotational_velocity_ = kDefaultVelocity; 90 | if (_sdf->HasElement("gimbal_angular_velocity")) 91 | max_rotational_velocity_ = _sdf->Get("gimbal_angular_velocity"); 92 | 93 | input_sub_ = node_handle_->subscribe( 94 | namespace_ + "/command/gimbal_actuators", 10, 95 | &GazeboPoltergeistGimbalPlugin::onInputCallback, this); 96 | 97 | for (int i = 0; i < kNumActuators; i++) { 98 | if (joints_[i] != nullptr) { 99 | joints_[i]->SetPosition(0, base_points_[i] * kDEG_2_RAD); 100 | } 101 | } 102 | 103 | previousUpdate_ = 0.0; 104 | 105 | // Listen to the update event. This event is broadcast every 106 | // simulation iteration. 107 | this->update_connection_ = event::Events::ConnectWorldUpdateBegin( 108 | std::bind( 109 | &GazeboPoltergeistGimbalPlugin::OnUpdate, this, 110 | std::placeholders::_1)); 111 | 112 | ROS_WARN("Hello World! gazebo_poltergeist_gimbal model v17"); 113 | } 114 | 115 | void onInputCallback(const sensor_msgs::Joy::ConstPtr& msg) { 116 | if (msg->axes.size() != 3) { 117 | ROS_ERROR_STREAM( 118 | "Gimbal Request need to have 3 axes, Roll: Pitch: Yaw, even that " 119 | "they are not avaiable"); 120 | } 121 | ROS_INFO_STREAM( 122 | "Gimbal Request Roll:" << msg->axes[0] << " Pitch:" << msg->axes[1] 123 | << " Yaw:" << msg->axes[2] << " "); 124 | bool input_valid = true; 125 | if (joints_[0] != nullptr && (msg->axes[0] > 180 || msg->axes[0] < -179)) { 126 | ROS_WARN_STREAM( 127 | "Gimbal Request ignored - invalid ROLL [-179,180]: " << msg->axes[0]); 128 | input_valid = false; 129 | } 130 | 131 | if (joints_[1] != nullptr && (msg->axes[1] > 180 || msg->axes[1] < -179)) { 132 | ROS_WARN_STREAM( 133 | "Gimbal Request ignored - invalid PITCH [-179,180]: " 134 | << msg->axes[1]); 135 | input_valid = false; 136 | } 137 | 138 | if (joints_[2] != nullptr && (msg->axes[2] > 180 || msg->axes[2] < -179)) { 139 | ROS_WARN_STREAM( 140 | "Gimbal Request ignored - invalid YAW [-179,180]: " << msg->axes[2]); 141 | input_valid = false; 142 | } 143 | 144 | if (input_valid) { 145 | for (int i = 0; i < kNumActuators; i++) { 146 | if (joints_[i] == nullptr) 147 | continue; 148 | goal_points_[i] = msg->axes[i]; 149 | } 150 | } 151 | } 152 | 153 | // Called by the world update start event 154 | void OnUpdate(const common::UpdateInfo& _info) { 155 | for (int i = 0; i < kNumActuators; i++) { 156 | if (joints_[i] == nullptr) 157 | continue; 158 | 159 | double curr_position = GetJointPositionOffSeted(i); 160 | 161 | double angle_diff = curr_position - goal_points_[i]; 162 | 163 | common::Time delta_time = _info.simTime - previousUpdate_; 164 | 165 | if (delta_time > 0) { 166 | double update_vel = std::min( 167 | std::abs(0.9 * kDEG_2_RAD * angle_diff / delta_time.Double()), 168 | max_rotational_velocity_); 169 | if (angle_diff > 0) { 170 | setSpeed(i, -update_vel); 171 | } else { 172 | setSpeed(i, update_vel); 173 | } 174 | } 175 | 176 | // joints_[i]->SetPosition(0,0.0); Attention: never use setposition 177 | // function or fmax = 0 178 | // joints_[i]->SetParam("fmax", 0, 0.0); 179 | 180 | previousUpdate_ = _info.simTime; 181 | } 182 | } 183 | 184 | double setSpeed(size_t joint_index, double vel) { 185 | #if GAZEBO_MAJOR_VERSION > 4 186 | if (joints_[joint_index] != nullptr) { 187 | joints_[joint_index]->SetParam("fmax", 0, 100.0); 188 | joints_[joint_index]->SetParam( 189 | "vel", 0, vel); // joint motor - only supported by ODE 190 | } 191 | #else 192 | if (joints_[joint_index] != nullptr) { 193 | joints_[joint_index]->SetAttribute("fmax", 0, 100.0); 194 | joints_[joint_index]->SetAttribute("vel", 0, vel); 195 | } 196 | #endif 197 | } 198 | 199 | double GetJointPosition(size_t joint_index) { 200 | if (joints_[joint_index] != nullptr) 201 | return ConvertAngle180( 202 | std::fmod( 203 | joints_[joint_index]->Position(0) * (180 / M_PI) + 72000000.0, 360.0)); 204 | 205 | return -1e16; 206 | } 207 | 208 | double GetJointPositionOffSeted(size_t joint_index) { 209 | return ConvertAngle180( 210 | std::fmod( 211 | GetJointPosition(joint_index) - base_points_[joint_index] + 360.0, 212 | 360.0)); 213 | } 214 | 215 | double ConvertAngle180(double angle) //[-180,180] 216 | { 217 | return std::fmod(angle + 180.0, 360.0) - 180.0; // 0->0 , 10->10, 350->-10 218 | } 219 | 220 | double ConvertAngle360(double angle) //[0,360] 221 | { 222 | return std::fmod(angle + 360.0, 360.0); // 0 -> 0 , 10 -> 10, -10 -> 350 223 | } 224 | 225 | double test_math() { 226 | double joint_index = 1; 227 | ROS_INFO_STREAM( 228 | " 1: " << joints_[joint_index]->Position(0) * (180 / M_PI) << " 2: " 229 | << std::fmod( 230 | joints_[joint_index]->Position(0) * (180 / M_PI) + 72000000.0, 231 | 360.0) 232 | << " 3: " << GetJointPosition(1) << " 4: " 233 | << GetJointPosition(joint_index) - base_points_[joint_index] + 234 | 360.0 235 | << " 5: " << std::fmod( 236 | GetJointPosition(joint_index) - 237 | base_points_[joint_index] + 360.0, 238 | 360.0) 239 | << " 6: " << GetJointPositionOffSeted(1)); 240 | } 241 | 242 | // Pointer to the model 243 | private: 244 | /// STATE 245 | 246 | double max_rotational_velocity_; 247 | 248 | std::string namespace_; 249 | 250 | common::Time previousUpdate_; 251 | 252 | /// GAZEBO 253 | /// 254 | 255 | physics::ModelPtr model_; 256 | 257 | std::unique_ptr node_handle_; 258 | 259 | physics::LinkPtr gimbal_support_link_; 260 | physics::LinkPtr camera_mount_link_; 261 | 262 | std::vector base_points_; 263 | std::vector goal_points_; 264 | std::vector directions_; 265 | std::vector joints_; 266 | 267 | // Pointer to the update event connection 268 | event::ConnectionPtr update_connection_; 269 | 270 | /// ROS 271 | 272 | /// \brief A node use for ROS transport 273 | std::unique_ptr ros_node_; 274 | 275 | /// \brief A ROS subscriber 276 | ros::Subscriber input_sub_; 277 | 278 | ros::Publisher status_pub_; // TODO 279 | ros::Publisher transform_gimbal_camera_pub_; // TODO 280 | ros::Publisher gimbal_camera_tf_pub_; // TODO 281 | }; 282 | 283 | // Register this plugin with the simulator 284 | GZ_REGISTER_MODEL_PLUGIN(GazeboPoltergeistGimbalPlugin) 285 | } 286 | -------------------------------------------------------------------------------- /src/ros_backend_node.cc: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | using namespace ros; 16 | 17 | #define STRING_ENUM(value) \ 18 | case RosBackendNode::value: \ 19 | os << "Current state: " << #value; \ 20 | break; 21 | 22 | std::ostream& operator<<( 23 | std::ostream& os, const RosBackendNode::SimulationState& state) { 24 | switch (state) { 25 | STRING_ENUM(INVALID) 26 | STRING_ENUM(WAINTING_PLANNER) 27 | STRING_ENUM(RECORDING) 28 | STRING_ENUM(FINISHED) 29 | } 30 | } 31 | 32 | RosBackendNode::RosBackendNode(BuiltInPlanner* planner) : planner_(planner) { 33 | simulation_state_ = INVALID; 34 | 35 | logger_ = new Logger(); 36 | reference_odometry_sub_ = nh_.subscribe( 37 | "imu_frame_odometry_topic", 1000, 38 | &RosBackendNode::referenceOdometryCallback, this); 39 | imu_sub_ = 40 | nh_.subscribe("imu_topic", 1000, &RosBackendNode::imuCallback, this); 41 | 42 | if (planner_ != nullptr) 43 | use_builtin_planner_ = true; 44 | else 45 | use_builtin_planner_ = false; 46 | } 47 | 48 | void RosBackendNode::imuCallback(const sensor_msgs::ImuConstPtr& msg) { 49 | if (simulation_state_ == RECORDING) { 50 | ImuSensorReadings imu_measurement; 51 | imu_measurement.accelerometers = Eigen::Vector3d( 52 | msg->linear_acceleration.x, msg->linear_acceleration.y, 53 | msg->linear_acceleration.z); 54 | 55 | imu_measurement.gyroscopes = Eigen::Vector3d( 56 | msg->angular_velocity.x, msg->angular_velocity.y, 57 | msg->angular_velocity.z); 58 | 59 | imu_measurement.timestamp = msg->header.stamp; 60 | 61 | logger_->logIMU(imu_measurement); 62 | } 63 | } 64 | 65 | void RosBackendNode::updateBuiltInPlanner() { 66 | BuiltInPlanner::PlannerStatus planner_status = planner_->getStatus(); 67 | 68 | if (simulation_state_ == WAINTING_PLANNER && 69 | planner_status == BuiltInPlanner::RUNNING) { 70 | // start to record 71 | logger_->startLogger(output_folder_path_.c_str()); 72 | setState(RECORDING); 73 | } 74 | 75 | if (simulation_state_ == RECORDING && 76 | planner_status == BuiltInPlanner::COMPLETED) { 77 | setState(FINISHED); 78 | } 79 | } 80 | 81 | void RosBackendNode::setState(RosBackendNode::SimulationState state) { 82 | ROS_INFO_STREAM( 83 | "Change state from: " << simulation_state_ << " to: " << state); 84 | simulation_state_ = state; 85 | } 86 | 87 | void RosBackendNode::referenceOdometryCallback( 88 | const nav_msgs::Odometry& odometry_msg) { 89 | const geometry_msgs::Point& curr_position = odometry_msg.pose.pose.position; 90 | 91 | if (use_builtin_planner_) { 92 | updateBuiltInPlanner(); 93 | } 94 | 95 | if (simulation_state_ == RECORDING) { 96 | PoseReadings pose; 97 | pose.position = 98 | Eigen::Vector3d(curr_position.x, curr_position.y, curr_position.z); 99 | pose.orientation = Eigen::Quaterniond( 100 | odometry_msg.pose.pose.orientation.w, 101 | odometry_msg.pose.pose.orientation.x, 102 | odometry_msg.pose.pose.orientation.y, 103 | odometry_msg.pose.pose.orientation.z); 104 | 105 | pose.timestamp = odometry_msg.header.stamp; 106 | logger_->logPose(pose); 107 | } 108 | 109 | return; 110 | } 111 | 112 | void RosBackendNode::run(const std::string& project_folder) { 113 | boost::filesystem::path project_folder_path(project_folder); 114 | 115 | if (!boost::filesystem::exists(project_folder_path)) { 116 | ROS_ERROR_STREAM( 117 | "the project folder does not exists :" << project_folder_path.c_str()); 118 | return; 119 | } 120 | 121 | boost::filesystem::path output_folder_path = 122 | project_folder_path / "output/1_InertialPose"; 123 | 124 | if (!boost::filesystem::exists(output_folder_path)) { 125 | if (!boost::filesystem::create_directories(output_folder_path)) { 126 | ROS_ERROR_STREAM( 127 | "the output folder could not be created :" 128 | << output_folder_path.c_str()); 129 | return; 130 | } 131 | } else { 132 | ROS_ERROR_STREAM( 133 | "the output folder already exist - please delete it :" 134 | << output_folder_path.c_str()); 135 | return; 136 | } 137 | 138 | setState(WAINTING_PLANNER); 139 | 140 | output_folder_path_ = std::string(output_folder_path.c_str()); 141 | 142 | if (use_builtin_planner_) { 143 | // open waypoints file 144 | if (!planner_->loadConfigurationFromFile(project_folder_path.c_str())) { 145 | ROS_ERROR("Fail to load configuration."); 146 | return; 147 | } 148 | } else { 149 | logger_->startLogger(output_folder_path_.c_str()); 150 | setState(RECORDING); 151 | // setup services 152 | } 153 | 154 | ros::Rate loop_rate = ros::Rate(100); 155 | 156 | while (ros::ok() && simulation_state_ != FINISHED) { 157 | ros::spinOnce(); 158 | loop_rate.sleep(); 159 | } 160 | 161 | // save the log file 162 | logger_->stop(); 163 | if (simulation_state_ == FINISHED) 164 | ROS_INFO("Simulation data saved!"); 165 | else if (use_builtin_planner_) 166 | ROS_WARN("Simulation data saved, but it is INCOMPLETE!"); 167 | } 168 | 169 | RosBackendNode::~RosBackendNode() { 170 | delete logger_; 171 | } 172 | -------------------------------------------------------------------------------- /src/simple_planner_backend.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace ros; 6 | 7 | int main(int argc, char** argv) { 8 | if (argc < 2) { 9 | printf("The project folder argument is missing."); 10 | return 0; 11 | } 12 | 13 | ros::init(argc, argv, "ros_backend_node"); 14 | 15 | std::string project_folder(argv[1]); 16 | 17 | SimpleWaypointPlanner* planner = new SimpleWaypointPlanner(); 18 | 19 | ROS_INFO("ros_backend_node started"); 20 | 21 | RosBackendNode node(planner); 22 | 23 | node.run(project_folder); 24 | 25 | delete planner; 26 | 27 | ROS_INFO("shutting down ros_backend_node"); 28 | 29 | return 0; 30 | } 31 | -------------------------------------------------------------------------------- /src/simple_waypoint_planner.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | static constexpr float kDEG_2_RAD = M_PI / 180.0; 12 | 13 | static constexpr double kPlanning_Rate = 5; 14 | 15 | /* 16 | status 17 | 0: invalid state 18 | 1: too far from the initial waypoint 19 | 3: executing pathl 20 | 4: finished 21 | */ 22 | 23 | double quat2yaw(const geometry_msgs::Quaternion& q) { 24 | double siny = 2.0 * (q.w * q.z + q.x * q.y); 25 | double cosy = 1.0 - 2.0 * (q.y * q.y + q.z * q.z); 26 | return atan2(siny, cosy); 27 | } 28 | 29 | double squared_dist(const geometry_msgs::Point& curr_pos, Waypoint waypoint) { 30 | double dx = curr_pos.x - waypoint.x; 31 | double dy = curr_pos.y - waypoint.y; 32 | double dz = curr_pos.z - waypoint.z; 33 | return ((dx * dx) + (dy * dy) + (dz * dz)); 34 | } 35 | 36 | SimpleWaypointPlanner::SimpleWaypointPlanner() 37 | : SimpleWaypointPlanner(0.2, 2) {} 38 | 39 | SimpleWaypointPlanner::SimpleWaypointPlanner( 40 | double yaw_max_error, double position_max_error) 41 | : is_valid_(false), 42 | yaw_max_error_(yaw_max_error), 43 | position_max_error_squared_(position_max_error * position_max_error) { 44 | pose_command_pub_ = nh_.advertise( 45 | mav_msgs::default_topics::COMMAND_TRAJECTORY, 1); 46 | gimbal_command_pub_ = 47 | nh_.advertise("command/gimbal_actuators", 1); 48 | 49 | robot_odometry_sub_ = nh_.subscribe( 50 | "robot_odometry_topic", 1000, 51 | &SimpleWaypointPlanner::robotOdometryCallback, this); 52 | 53 | // sample trajectory 54 | waypoints_.push_back(Waypoint(1, -5, 2, 0, 0)); 55 | waypoints_.push_back(Waypoint(1, 5, 2, 0, 0)); 56 | waypoints_.push_back(Waypoint(2, 5, 2, 0, 0)); 57 | waypoints_.push_back(Waypoint(2, -5, 2, 0, 0)); 58 | waypoints_.push_back(Waypoint(1, -5, 2, 0, 0)); 59 | status_ = STARTING; 60 | } 61 | 62 | bool SimpleWaypointPlanner::loadConfigurationFromFile( 63 | const std::string& project_folder_path) { 64 | waypoints_.clear(); 65 | status_ = INVALID; 66 | boost::filesystem::path project_folder(project_folder_path); 67 | boost::filesystem::path waypoint_file = 68 | project_folder / "waypoints.txt"; 69 | 70 | if (boost::filesystem::exists(waypoint_file)) { 71 | std::ifstream file(waypoint_file.c_str()); 72 | if (file.is_open()) { 73 | double x, y, z, yaw; 74 | float gimbal_pitch; 75 | char eater; // eats commas 76 | while (file >> x >> eater >> y >> eater >> z >> eater >> yaw >> eater >> 77 | gimbal_pitch) { 78 | waypoints_.push_back(Waypoint(x, y, z, yaw * kDEG_2_RAD, gimbal_pitch)); 79 | if (file.eof()) { 80 | break; 81 | } 82 | } 83 | if (waypoints_.size() > 0) { 84 | status_ = STARTING; 85 | return true; 86 | } 87 | else{ 88 | ROS_ERROR_STREAM( "" << waypoint_file.c_str() <<" file does not have any waypoint. "); 89 | } 90 | 91 | }else 92 | { 93 | ROS_ERROR_STREAM("" << waypoint_file.c_str() <<" file could not be opened"); 94 | } 95 | 96 | 97 | }else 98 | { 99 | ROS_ERROR_STREAM( 100 | "the project folder does not have a waypoints.txt file :" 101 | << waypoint_file.c_str()); 102 | } 103 | 104 | 105 | return false; 106 | } 107 | 108 | 109 | BuiltInPlanner::PlannerStatus SimpleWaypointPlanner::getStatus() { 110 | return status_; 111 | } 112 | 113 | void SimpleWaypointPlanner::robotOdometryCallback( 114 | const nav_msgs::Odometry& curr_odometry) { 115 | if (status_ == STARTING) { 116 | if (reachedNextWaypoint(curr_odometry.pose.pose)) { 117 | status_ = 118 | RUNNING; // do not send waypoints yet, the logger need to be started 119 | } else { 120 | goNextPose(); 121 | } 122 | } else if (status_ == RUNNING) { 123 | { 124 | if (reachedNextWaypoint(curr_odometry.pose.pose)) { 125 | if (waypoints_.size() > 0) { 126 | waypoints_.pop_front(); 127 | } 128 | } 129 | 130 | if (waypoints_.size() == 0) { 131 | status_ = COMPLETED; 132 | } else { 133 | goNextPose(); 134 | } 135 | } 136 | } 137 | } 138 | 139 | bool SimpleWaypointPlanner::getNextWaypoint( 140 | Eigen::Vector3d& desired_position, double& desired_yaw, 141 | float& desired_gimbal_pitch) { 142 | if (waypoints_.size() > 0) { 143 | Eigen::Vector3d position( 144 | waypoints_.front().x, waypoints_.front().y, waypoints_.front().z); 145 | desired_position = position; 146 | desired_yaw = waypoints_.front().yaw; 147 | desired_gimbal_pitch = waypoints_.front().gimbal_pitch; 148 | return true; 149 | } 150 | return false; 151 | } 152 | 153 | void SimpleWaypointPlanner::goNextPose() { 154 | static ros::Time last_time(0); 155 | if (ros::Time::now() - last_time > ros::Duration(ros::Rate(kPlanning_Rate))) { 156 | last_time = ros::Time::now(); 157 | } else { 158 | return; 159 | } 160 | 161 | Eigen::Vector3d desired_position; 162 | double desired_yaw; 163 | float desired_gimbal_pitch; 164 | getNextWaypoint(desired_position, desired_yaw, desired_gimbal_pitch); 165 | sendPoseCommand(desired_position, desired_yaw, desired_gimbal_pitch); 166 | } 167 | 168 | bool SimpleWaypointPlanner::reachedNextWaypoint( 169 | const geometry_msgs::Pose& 170 | curr_pose) // do not consider the gimbal position 171 | { 172 | if (waypoints_.size() == 0) 173 | return true; // nowhere to go 174 | 175 | double position_error_squared = 176 | squared_dist(curr_pose.position, waypoints_.front()); 177 | double yaw = quat2yaw(curr_pose.orientation); 178 | const static double c_2pi = 2 * M_PI; 179 | double yaw_error = 180 | std::fmod((std::abs(waypoints_.front().yaw - yaw) + c_2pi), c_2pi); 181 | // ROS_ERROR_STREAM("DEGUB error :" << position_error_squared << " "<< 182 | // position_max_error_squared_ << " "<< yaw_error << " "<< yaw_max_error_); 183 | if (position_error_squared < position_max_error_squared_ && 184 | ((yaw_error < yaw_max_error_) || 185 | (yaw_error > (c_2pi - yaw_max_error_)))) { 186 | return true; 187 | } 188 | return false; 189 | } 190 | 191 | void SimpleWaypointPlanner::sendPoseCommand( 192 | const Eigen::Vector3d& desired_position, const double& desired_yaw, 193 | const float& desired_gimbal_pitch) { 194 | trajectory_msgs::MultiDOFJointTrajectory trajectory_msg; 195 | trajectory_msg.header.stamp = ros::Time::now(); 196 | mav_msgs::msgMultiDofJointTrajectoryFromPositionYaw( 197 | desired_position, desired_yaw, &trajectory_msg); 198 | ROS_INFO( 199 | "Publishing waypoint: %0.1lf :: %0.1lf :: %0.1lf :: %0.1lf :: %0.1f", 200 | desired_position.x(), desired_position.y(), desired_position.z(), 201 | desired_yaw / kDEG_2_RAD, desired_gimbal_pitch); 202 | pose_command_pub_.publish(trajectory_msg); 203 | sensor_msgs::Joy gimbal_msg; 204 | gimbal_msg.axes = {0.0f, desired_gimbal_pitch, 0.0f}; 205 | gimbal_command_pub_.publish(gimbal_msg); 206 | } 207 | --------------------------------------------------------------------------------