├── .gitignore ├── CMakeLists.txt ├── README.md ├── include └── kr_ilqr_optimizer │ ├── finitediff.hpp │ ├── quadMPC.hpp │ ├── quaternion_utils.hpp │ ├── spline_trajectory_sampler.hpp │ └── test_utils.hpp ├── package.xml ├── scripts ├── eval_scripts │ ├── csv_plot_1_total_success_rate_vs_idx_and_cluster.py │ ├── csv_plot_2_front_end_success_rate_vs_idx.py │ ├── csv_plot_3_shortest_time.py │ ├── csv_plot_4_maze_diffculty_vs_jerk.py │ ├── csv_plot_5_plot_control.py │ ├── csv_plot_6_2d_eci.py │ ├── csv_plot_7_real_data_two_clusters.py │ ├── csv_plot_8_obstacle_data_relative_performance.py │ ├── evaluate_1_traj_load_csv.py │ ├── evaluate_2_traj_count_success.py │ ├── evaluate_3_three_file_combined_tb1.py │ ├── evaluate_4_other_metrics_tb2.py │ ├── evaluate_5_effort.py │ ├── gpt_prompts.md │ └── test_legend.py └── gpt_result │ └── Untitled 1.odp └── src ├── finitediff.cpp ├── quaternion_utils.cpp ├── run_sampler.cpp ├── spline_trajectory_sampler.cpp └── test_utils.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | res/ 2 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(kr_ilqr_optimizer) 3 | 4 | 5 | ## Compile as C++11, supported in ROS Kinetic and newer 6 | set(CMAKE_CXX_STANDARD 17) 7 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 8 | 9 | 10 | ## Find catkin macros and libraries 11 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 12 | ## is used, also find other catkin packages 13 | find_package(catkin REQUIRED COMPONENTS 14 | kr_planning_msgs 15 | roscpp 16 | eigen_conversions 17 | visualization_msgs 18 | altro 19 | ) 20 | 21 | 22 | 23 | ## System dependencies are found with CMake's conventions 24 | # find_package(Boost REQUIRED COMPONENTS system) 25 | find_package(Eigen3 3.4 REQUIRED) # for ilqr stuff 26 | 27 | catkin_package( 28 | INCLUDE_DIRS include ${catkin_INCLUDE_DIRS} 29 | LIBRARIES kr_ilqr_optimizer altro_test_utils 30 | CATKIN_DEPENDS kr_planning_msgs roscpp visualization_msgs altro 31 | DEPENDS # we should put fmt and eigen here 32 | ) 33 | 34 | ########### 35 | ## Build ## 36 | ########### 37 | 38 | ## Specify additional locations of header files 39 | ## Your package locations should be listed before other locations 40 | include_directories( 41 | # include 42 | include 43 | ${catkin_INCLUDE_DIRS} 44 | ) 45 | 46 | 47 | add_library(altro_test_utils src/test_utils.cpp src/finitediff.cpp src/quaternion_utils.cpp) 48 | target_link_libraries(altro_test_utils 49 | PUBLIC 50 | ${catkin_LIBRARIES} 51 | Eigen3::Eigen 52 | ) 53 | 54 | target_compile_definitions(altro_test_utils 55 | PUBLIC 56 | ALTRO_TEST_DIR="/home/yifei/Documents/optimal_ctrl/altro/test" 57 | ) 58 | ## Add cmake target dependencies of the library 59 | ## as an example, code may need to be generated before libraries 60 | ## either from message generation or dynamic reconfigure 61 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 62 | 63 | ## Declare a C++ executable 64 | ## With catkin_make all packages are built within a single CMake context 65 | ## The recommended prefix ensures that target names across packages don't collide 66 | 67 | ## Rename C++ executable without prefix 68 | ## The above recommended prefix causes long target names, the following renames the 69 | ## target back to the shorter version for ease of user use 70 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 71 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 72 | 73 | ## Add cmake target dependencies of the executable 74 | ## same as for the library above 75 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 76 | add_library(kr_ilqr_optimizer src/spline_trajectory_sampler.cpp) 77 | target_link_libraries(kr_ilqr_optimizer PUBLIC altro_test_utils) 78 | ## Specify libraries to link a library or executable target against 79 | add_executable(sampler_pub src/run_sampler.cpp) 80 | target_link_libraries(sampler_pub 81 | PUBLIC kr_ilqr_optimizer) # WHY DOES IT NEED FMT!!! 82 | # 83 | 84 | 85 | # target_link_libraries(kr_ilqr_optimizer PRIVATE ) 86 | 87 | ############# 88 | ## Install ## 89 | ############# 90 | 91 | # all install targets should use catkin DESTINATION variables 92 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 93 | 94 | ## Mark executable scripts (Python etc.) for installation 95 | ## in contrast to setup.py, you can choose the destination 96 | # catkin_install_python(PROGRAMS 97 | # scripts/my_python_script 98 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 99 | # ) 100 | 101 | ## Mark executables for installation 102 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 103 | # install(TARGETS ${PROJECT_NAME}_node 104 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 105 | # ) 106 | 107 | ## Mark libraries for installation 108 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 109 | # install(TARGETS ${PROJECT_NAME} 110 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 111 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 112 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 113 | # ) 114 | 115 | ## Mark cpp header files for installation 116 | # install(DIRECTORY include/${PROJECT_NAME}/ 117 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 118 | # FILES_MATCHING PATTERN "*.h" 119 | # PATTERN ".svn" EXCLUDE 120 | # ) 121 | 122 | ## Mark other files for installation (e.g. launch and bag files, etc.) 123 | # install(FILES 124 | # # myfile1 125 | # # myfile2 126 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 127 | # ) 128 | 129 | ############# 130 | ## Testing ## 131 | ############# 132 | 133 | ## Add gtest based cpp test target and link libraries 134 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_kr_ilqr_optimizer.cpp) 135 | # if(TARGET ${PROJECT_NAME}-test) 136 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 137 | # endif() 138 | 139 | ## Add folders to be run by python nosetests 140 | # catkin_add_nosetests(test) 141 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # KR iLQR Optimizer 2 | ## Maintainer: 3 | Yifei Shao (yishao at seas.upenn.edu) 4 | ## Description 5 | This package hooks up the Altro solver with the KR_autonomous flight stack to do constrained trajectory optimization. 6 | It can work in 2 ways 7 | 1. Run the solver in a separate node 8 | 2. Functions in this package will are called in the action_planner package to run the solver easily 9 | ## Dependencies 10 | 1. [Altro](https://github.com/bjack205/altro): When building altro, remember to not include the -march=native flag in the CMakeLists.txt file. After building, install it as a staic library so find_packages() can find it. **Forked Altro is not yet publically avaible, and I don't know if the original version works.** 11 | 2. Eigen > 3.4.0 12 | ## ToDos 13 | 1. Don't use finite difference to compute the jacobian of dynamics. 14 | 2. Move Altro Fork as a dependency inside this repo 15 | 3. Build planning_detail option to run this. 16 | 4. Build python binding for generating motion primitives 17 | 5. MPC style initialization, make it much faster 18 | 6. Add polyotpe constraints 19 | -------------------------------------------------------------------------------- /include/kr_ilqr_optimizer/finitediff.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @brief Functions to compute gradients using finite difference. 3 | * 4 | * Based on the functions in https://github.com/PatWie/CppNumericalSolvers 5 | * and rewritten to use Eigen 6 | */ 7 | #pragma once 8 | 9 | #include 10 | namespace fd { 11 | 12 | /** 13 | * @brief Enumeration of available orders of accuracy for finite differences. 14 | * 15 | * The corresponding integer values are used internally and should be ignored. 16 | */ 17 | enum AccuracyOrder { 18 | SECOND, ///< @brief Second order accuracy. 19 | FOURTH, ///< @brief Fourth order accuracy. 20 | SIXTH, ///< @brief Sixth order accuracy. 21 | EIGHTH ///< @brief Eighth order accuracy. 22 | }; 23 | 24 | /** 25 | * @brief Compute the gradient of a function using finite differences. 26 | * 27 | * @param[in] x Point at which to compute the gradient. 28 | * @param[in] f Compute the gradient of this function. 29 | * @param[out] grad Computed gradient. 30 | * @param[in] accuracy Accuracy of the finite differences. 31 | * @param[in] eps Value of the finite difference step. 32 | */ 33 | void finite_gradient( 34 | const Eigen::Ref& x, 35 | const std::function& f, 36 | Eigen::VectorXd& grad, 37 | const AccuracyOrder accuracy = SECOND, 38 | const double eps = 1.0e-8); 39 | 40 | /** 41 | * @brief Compute the jacobian of a function using finite differences. 42 | * 43 | * @param[in] x Point at which to compute the jacobian. 44 | * @param[in] f Compute the jacobian of this function. 45 | * @param[out] jac Computed jacobian. 46 | * @param[in] accuracy Accuracy of the finite differences. 47 | * @param[in] eps Value of the finite difference step. 48 | */ 49 | void finite_jacobian( 50 | const Eigen::Ref& x, 51 | const std::function& f, 52 | Eigen::MatrixXd& jac, 53 | const AccuracyOrder accuracy = SECOND, 54 | const double eps = 1.0e-8); 55 | 56 | // void finite_jacobian_quad(const quadModel* model, 57 | // const Eigen::Ref& x, 58 | // const Eigen::Ref& u, 59 | // const std::function& f, 60 | // Eigen::MatrixXd& jac, 61 | // const AccuracyOrder accuracy = SECOND, 62 | // const double eps = 1.0e-8); 63 | 64 | 65 | /** 66 | * @brief Compute the hessian of a function using finite differences. 67 | * 68 | * @param[in] x Point at which to compute the hessian. 69 | * @param[in] f Compute the hessian of this function. 70 | * @param[out] hess Computed hessian. 71 | * @param[in] accuracy Accuracy of the finite differences. 72 | * @param[in] eps Value of the finite difference step. 73 | */ 74 | void finite_hessian( 75 | const Eigen::Ref& x, 76 | const std::function& f, 77 | Eigen::MatrixXd& hess, 78 | const AccuracyOrder accuracy = SECOND, 79 | const double eps = 1.0e-5); 80 | 81 | /** 82 | * @brief Compare if two gradients are close enough. 83 | * 84 | * @param[in] x The first gradient to compare. 85 | * @param[in] y The second gradient to compare against. 86 | * @param[in] test_eps Tolerance of equality. 87 | * @param[in] msg Debug message header. 88 | * 89 | * @return A boolean for if x and y are close to the same value. 90 | */ 91 | bool compare_gradient( 92 | const Eigen::Ref& x, 93 | const Eigen::Ref& y, 94 | const double test_eps = 1e-4, 95 | const std::string& msg = "compare_gradient "); 96 | 97 | /** 98 | * @brief Compare if two jacobians are close enough. 99 | * 100 | * @param[in] x The first jacobian to compare. 101 | * @param[in] y The second jacobian to compare against. 102 | * @param[in] test_eps Tolerance of equality. 103 | * @param[in] msg Debug message header. 104 | * 105 | * @return A boolean for if x and y are close to the same value. 106 | */ 107 | bool compare_jacobian( 108 | const Eigen::Ref& x, 109 | const Eigen::Ref& y, 110 | const double test_eps = 1e-4, 111 | const std::string& msg = "compare_jacobian "); 112 | 113 | /** 114 | * @brief Compare if two hessians are close enough. 115 | * 116 | * @param[in] x The first hessian to compare. 117 | * @param[in] y The second hessian to compare against. 118 | * @param[in] test_eps Tolerance of equality. 119 | * @param[in] msg Debug message header. 120 | * 121 | * @return A boolean for if x and y are close to the same value. 122 | */ 123 | bool compare_hessian( 124 | const Eigen::Ref& x, 125 | const Eigen::Ref& y, 126 | const double test_eps = 1e-4, 127 | const std::string& msg = "compare_hessian "); 128 | 129 | /// @brief Flatten the matrix rowwise 130 | Eigen::VectorXd flatten(const Eigen::Ref& X); 131 | 132 | /// @brief Unflatten rowwise 133 | Eigen::MatrixXd unflatten(const Eigen::Ref& x, int dim); 134 | 135 | std::vector get_external_coeffs(const AccuracyOrder accuracy); 136 | std::vector get_interior_coeffs(const AccuracyOrder accuracy); 137 | double get_denominator(const AccuracyOrder accuracy); 138 | 139 | } // namespace fd -------------------------------------------------------------------------------- /include/kr_ilqr_optimizer/quadMPC.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "Eigen/Dense" 10 | #include "altro/altro.hpp" 11 | #include "kr_ilqr_optimizer/test_utils.hpp" 12 | namespace fs = std::filesystem; 13 | // using json = nlohmann::json; 14 | 15 | using Eigen::MatrixXd; 16 | // using Eigen::Vector; 17 | 18 | using namespace altro; 19 | using Vector = Eigen::Matrix; 20 | using Matrix = Eigen::Matrix; 21 | using Eigen::VectorXi; 22 | 23 | // const std::string ALTRO_TEST_DIR 24 | // ="/home/yifei/Documents/optimal_ctrl/try_altro"; 25 | 26 | class quadMPC { 27 | public: 28 | quadMPC(double dt, bool use_quaternion) 29 | : dt_(dt), solver(10), use_quaternion(use_quaternion) {} 30 | void SetUp(double mass, 31 | double grav_const, 32 | Eigen::Matrix3d inertia, 33 | double min_thrust, 34 | double max_thrust, 35 | double kf, 36 | double km, 37 | double L) { 38 | model_ptr = std::make_shared( 39 | mass, 40 | grav_const, 41 | inertia, 42 | min_thrust / 4 / kf, // converted to w^2 43 | max_thrust / 4 / kf, 44 | kf, 45 | km, 46 | L); 47 | 48 | // Objective 49 | // std::cout << "Size of Problem N_ctrl = " << N_ctrl_ << std::endl; 50 | } 51 | std::shared_ptr model_ptr; 52 | double altro_ini_penalty = 1.0; 53 | double altro_penalty_scale = 10; 54 | 55 | protected: 56 | const double dt_; 57 | 58 | const int n = quadModel::NumStates; 59 | const int m = quadModel::NumInputs; 60 | float h; 61 | 62 | Vector Qd; 63 | Vector Rd; 64 | Vector Qdf; 65 | bool use_quaternion; 66 | 67 | ExplicitDynamicsFunction dyn; 68 | ExplicitDynamicsJacobian jac; 69 | 70 | ALTROSolver solver; 71 | 72 | ErrorCodes err; 73 | // Reference Trajectory (the "Scotty Dog") 74 | std::vector> x_ref; 75 | std::vector u_ref; 76 | Vector u0; 77 | Eigen::Vector4d u_ref_single; 78 | 79 | public: 80 | uint solve_problem( 81 | const Eigen::VectorXd& start_state, 82 | std::vector pos, 83 | std::vector vel, 84 | std::vector acc, 85 | std::vector yaw_ref, 86 | std::vector thrust, 87 | std::vector moment, 88 | double dt, 89 | std::vector q_ref, 90 | std::vector w_ref, 91 | const std::vector>& h_polys, 92 | const Eigen::VectorXd& allo_ts, 93 | std::vector& X_sim, 94 | std::vector& U_sim, 95 | std::vector& t_sim) { 96 | // calculate N_ctrl 97 | int N_ctrl = pos.size() - 1; 98 | solver = ALTROSolver(N_ctrl); 99 | Qd = Vector::Constant(n, 0); 100 | Rd = Vector::Constant(m, 0.1); 101 | Qdf = Vector::Constant(n, 0); 102 | const int three = 3; 103 | Qd.segment<3>(0) = Vector::Constant(three, 0.1); 104 | Qdf.segment<3>(0) = Vector::Constant(three, 0.1); 105 | Qdf.segment<3>(10) = Vector::Constant(three, 0.2); 106 | Qdf.segment<3>(7) = Vector::Constant(three, 0.2); 107 | 108 | // Dynamics 109 | ContinuousDynamicsFunction dyn0 = 110 | [this](double* x_dot, const double* x, const double* u) { 111 | model_ptr->Dynamics(x_dot, x, u); 112 | }; 113 | ContinuousDynamicsJacobian jac0 = 114 | [this](double* jac, const double* x, const double* u) { 115 | model_ptr->Jacobian(jac, x, u); 116 | }; 117 | 118 | ContinuousDynamicsJacobian jac_dt = 119 | [this](double* jac, const double* x, const double* u) { 120 | model_ptr->Jacobian_fd(jac, x, u); 121 | }; 122 | dyn = MidpointDynamics(n, m, dyn0); 123 | jac = MidpointJacobian(n, m, dyn0, jac0); 124 | 125 | // Dimension and Time step 126 | err = solver.SetDimension(n, m); 127 | const int en = 12; 128 | const int em = 4; 129 | err = solver.SetErrorDimension(en, em); 130 | 131 | // Dynamics 132 | err = solver.SetExplicitDynamics(dyn, jac); 133 | 134 | // Read Reference Trajectory 135 | int N_state = N_ctrl + 1; 136 | // float t_total = t_total_; 137 | 138 | u_ref_single = Vector::Constant(m, model_ptr->get_hover_input()); 139 | std::cout << "u_ref_single = " << u_ref_single << std::endl; 140 | 141 | // Set time step equal to the reference trajectory 142 | printf("h = %f\n", dt); 143 | err = solver.SetTimeStep(dt); 144 | 145 | Eigen::Matrix xf; 146 | xf << pos.back(), 1.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0; 147 | // Suprise ! Specifying COST FUNCTION TWICE is useful!! 148 | for (int k = 0; k < N_ctrl; ++k) { 149 | if (use_quaternion) { 150 | err = solver.SetQuaternionCost( 151 | n, m, Qd.data(), Rd.data(), 1.0, xf.data(), u_ref_single.data(), k); 152 | } else { 153 | err = solver.SetLQRCost( 154 | n, m, Qd.data(), Rd.data(), xf.data(), u_ref_single.data(), k); 155 | } 156 | } 157 | // Set FINAL Cost 158 | if (use_quaternion) { 159 | err = solver.SetQuaternionCost( 160 | n, 161 | m, 162 | Qdf.data(), 163 | Rd.data(), 164 | 1.0, 165 | xf.data(), 166 | u_ref_single.data(), 167 | N_state - 1); // Set it for N_state, but index -1 168 | } else { 169 | err = solver.SetLQRCost(n, 170 | m, 171 | Qdf.data(), 172 | Rd.data(), 173 | xf.data(), 174 | u_ref_single.data(), 175 | N_state - 1); 176 | } 177 | // err = solver.SetQuaternionCost(n, m, Qdf.data(), Rd.data(), 1.0, 178 | // xf.data(), u_ref_single.data(),N); 179 | err = solver.SetInitialState(xf.data(), n); 180 | 181 | // Initialize Solver 182 | // err = solver.Initialize(); 183 | // std::cout << "Solver Initialized!\n" << std::endl; 184 | 185 | // Solve 186 | AltroOptions opts; 187 | opts.verbose = Verbosity::Silent; 188 | opts.iterations_max = 100; 189 | opts.use_backtracking_linesearch = true; 190 | opts.quat_start_index = 3; // THIS IS VERY IMPORTANT! 191 | opts.use_quaternion = use_quaternion; 192 | opts.penalty_initial = altro_ini_penalty; 193 | opts.penalty_scaling = altro_penalty_scale; 194 | solver.SetOptions(opts); 195 | // Constraints 196 | const a_float max_thrust = model_ptr->max_w_sq; 197 | const a_float min_thrust = model_ptr->min_w_sq; 198 | std::cout << "min max propeller limits = " << min_thrust << " " 199 | << max_thrust << std::endl; 200 | auto actuator_con = [max_thrust, min_thrust]( 201 | a_float* c, const a_float* x, const a_float* u) { 202 | (void)x; 203 | // < 0 format 204 | c[0] = u[0] - max_thrust; 205 | c[1] = u[1] - max_thrust; 206 | c[2] = u[2] - max_thrust; 207 | c[3] = u[3] - max_thrust; 208 | c[4] = min_thrust - u[0]; 209 | c[5] = min_thrust - u[1]; 210 | c[6] = min_thrust - u[2]; 211 | c[7] = min_thrust - u[3]; 212 | }; 213 | auto actuator_jac = [](a_float* jac, const a_float* x, const a_float* u) { 214 | (void)x; 215 | (void)u; 216 | Eigen::Map> J(jac); 217 | J.setZero(); 218 | J(0, 13) = 1.0; 219 | J(1, 14) = 1.0; 220 | J(2, 15) = 1.0; 221 | J(3, 16) = 1.0; 222 | J(4, 13) = -1.0; 223 | J(5, 14) = -1.0; 224 | J(6, 15) = -1.0; 225 | J(7, 16) = -1.0; 226 | }; 227 | auto state_con = [xf](a_float* c, const a_float* x, const a_float* u) { 228 | (void)u; 229 | // Eigen::Map X(x, 13); 230 | // Eigen::Map C(c, 6); 231 | // constrain velocity and angular velocity to 0 232 | c[0] = x[7]; // v = 0 233 | c[1] = x[8]; 234 | c[2] = x[9]; 235 | c[3] = x[10]; // w= 0 236 | c[4] = x[11]; 237 | c[5] = x[12]; 238 | // c[6] = x[0] - xf(0); // xf = xf 239 | // c[7] = x[1] - xf(1); 240 | // c[8] = x[2] - xf(2); 241 | // c[9] = x[3] - 1.0; // qf = 0 #We do not want to constrain the quad to 242 | // face a direction c[10] = x[4]; c[11] = x[5]; c[12] = x[6]; 243 | }; 244 | auto state_jac = [](a_float* jac, const a_float* x, const a_float* u) { 245 | (void)u; 246 | Eigen::Map> J(jac); 247 | J.setZero(); 248 | J(0, 7) = 1.0; 249 | J(1, 8) = 1.0; 250 | J(2, 9) = 1.0; 251 | J(3, 10) = 1.0; 252 | J(4, 11) = 1.0; 253 | J(5, 12) = 1.0; 254 | // J(6, 0) = 1.0; // to get to goal properly 255 | // J(7, 1) = 1.0; 256 | // J(8, 2) = 1.0; 257 | }; 258 | err = solver.SetConstraint(state_con, 259 | state_jac, 260 | 6, 261 | ConstraintType::EQUALITY, 262 | "State", 263 | N_state - 1); 264 | 265 | auto state_con_z = [](a_float* c, const a_float* x, const a_float* u) { 266 | (void)u; 267 | // Eigen::Map X(x, 13); 268 | // Eigen::Map C(c, 6); 269 | // constrain velocity and angular velocity to 0 270 | c[0] = -x[2]; 271 | }; 272 | auto state_jac_z = [](a_float* jac, const a_float* x, const a_float* u) { 273 | (void)u; 274 | Eigen::Map> J(jac); 275 | J.setZero(); 276 | J(0, 2) = -1.0; 277 | }; 278 | 279 | err = solver.SetConstraint(state_con_z, 280 | state_jac_z, 281 | 1, 282 | ConstraintType::INEQUALITY, 283 | "StateZ", 284 | AllIndices); 285 | err = solver.SetConstraint(actuator_con, 286 | actuator_jac, 287 | 8, 288 | ConstraintType::INEQUALITY, 289 | "Actuator Min Max", 290 | AllIndices); 291 | 292 | // int cur_poly_idx = 0; 293 | std::cout << "allots= " << allo_ts << std::endl; 294 | std::cout << "dt" << dt << std::endl; 295 | uint index_so_far = 0; 296 | double time_so_far = 0.0; 297 | for (int k = 0; k < h_polys.size(); k++) { 298 | auto h_poly = h_polys[k]; 299 | const int num_con = h_poly.second.rows(); 300 | auto poly_con = [h_poly](a_float* c, const a_float* x, const a_float* u) { 301 | (void)u; 302 | const int n_con = h_poly.second.rows(); 303 | Eigen::Map X(x, 13); 304 | Eigen::Map C(c, n_con); 305 | C = h_poly.first * X.segment<3>(0) - h_poly.second; 306 | }; 307 | auto poly_jac = [h_poly]( 308 | a_float* jac, const a_float* x, const a_float* u) { 309 | (void)u; 310 | const int n_con = h_poly.second.rows(); 311 | Eigen::Map J(jac, n_con, 17); 312 | J.setZero(); 313 | J.block(0, 0, n_con, 3) = h_poly.first; 314 | }; 315 | double time_end = time_so_far + allo_ts[k]; 316 | uint idx_end = int(time_end / dt); 317 | if (idx_end > N_state) { 318 | // allo_ts timing and traj_timing are different, so this is possible 319 | ROS_WARN_STREAM("idx_end = " << idx_end << " > N_state"); 320 | idx_end = N_state; 321 | } 322 | if (k == h_polys.size() - 1) { 323 | idx_end = N_state; 324 | } 325 | if (idx_end == index_so_far) { 326 | ROS_ERROR("polytope contains no points"); 327 | } else { 328 | err = solver.SetConstraint(poly_con, 329 | poly_jac, 330 | num_con, 331 | ConstraintType::INEQUALITY, 332 | "polytope", 333 | index_so_far, 334 | idx_end); 335 | } 336 | std::cout << "k= " << k << " startidx = " << index_so_far 337 | << " endidx = " << idx_end << " time_end = " << time_end 338 | << std::endl; 339 | index_so_far = idx_end; 340 | time_so_far = time_end; 341 | } 342 | // auto poly_con = [h_polys](a_float* c, const a_float* x, const a_float* u) 343 | // { 344 | // (void)x; 345 | // // < 0 format 346 | // c = 347 | // }; 348 | solver.Initialize(); 349 | // ErrorCodes err; 350 | // solver.Initialize(); 351 | err = solver.SetTimeStep(dt); 352 | PrintErrorCode(err); 353 | 354 | const int n_const = 13; 355 | 356 | Eigen::Matrix x0; 357 | // x0 << 2.0, 1.0, -1.0, -0.752, 0.443, 0.443, -0.206, 0.5,-0.5,1.0, 358 | // 0.8,0.8,0.8; 359 | Eigen::AngleAxisd yawAngle(start_state[6], Eigen::Vector3d::UnitZ()); 360 | Eigen::Quaterniond q0(yawAngle); 361 | x0 << start_state.segment<3>(0), q0.w(), q0.vec(), 362 | start_state.segment<3>(3), 0, 0, 0; 363 | // x0 << 2.0, 1.0, -1.0, 1.0, 0, 0, 0, 0.5,-0.5,0, 0.,0.,0.; 364 | 365 | // Eigen::Matrix xf; 366 | 367 | std::cout << "x0 = " << x0 << std::endl; 368 | std::cout << "xf = " << xf << std::endl; 369 | // xf << 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0,0,0, 0,0,0; 370 | 371 | // std::cout <<"size of pos" << pos.size() << std::endl; 372 | // std::cout << "x0 = " << x0 << std::endl; 373 | // std::cout << "xf = " << xf << std::endl; 374 | // Set Tracking Cost Function 375 | for (int k = 0; k < N_ctrl; ++k) { 376 | Eigen::Matrix x_ref_k; 377 | // std::cout<< "ref k = " << k << std::endl; 378 | // std::cout<< "pos[k] = " << pos[k] << std::endl; 379 | // std::cout<< "q_ref[k] = " << q_ref[k][1] << std::endl; 380 | 381 | x_ref_k << pos[k], q_ref[k].w(), q_ref[k].vec(), vel[k], w_ref[k]; 382 | // std::cout << k << std::endl; 383 | // 384 | 385 | if (use_quaternion) { 386 | err = solver.SetQuaternionCost(n, 387 | m, 388 | Qd.data(), 389 | Rd.data(), 390 | 1.0, 391 | x_ref_k.data(), 392 | u_ref_single.data(), 393 | k); 394 | } else { 395 | err = solver.SetLQRCost( 396 | n, m, Qd.data(), Rd.data(), x_ref_k.data(), u_ref_single.data(), k); 397 | } 398 | // PrintErrorCode(err); 399 | // fmt::print("Print error\n"); 400 | err = solver.SetState(x_ref_k.data(), n, k); 401 | // Set Initial Trajectory 402 | Eigen::Vector4d fm(4); // Initialize a 4D Eigen vector 403 | // std::cout << "k = " << k << std::endl; 404 | // std::cout << "thrust[k] = " << thrust[k] << std::endl; 405 | // std::cout << "moment[k] = " << moment[k] << std::endl; 406 | 407 | fm << thrust[k], moment[k]; 408 | Eigen::Vector4d reference_input = model_ptr->forceMatrix_inv() * fm; 409 | // std::cout << "reference_input = " << reference_input << std::endl; 410 | // we can change to reference input if needed but it is not getting 411 | // anywhere close to the goal, when there are obstacles it's even worse 412 | // since some constraints are violated 413 | err = solver.SetInput(u_ref_single.data(), m, k); 414 | } 415 | // Set Final COST!! 416 | if (use_quaternion) { 417 | err = solver.SetQuaternionCost(n, 418 | m, 419 | Qdf.data(), 420 | Rd.data(), 421 | 1.0, 422 | xf.data(), 423 | u_ref_single.data(), 424 | N_state - 1); 425 | } else { 426 | err = solver.SetLQRCost(n, 427 | m, 428 | Qdf.data(), 429 | Rd.data(), 430 | xf.data(), 431 | u_ref_single.data(), 432 | N_state - 1); 433 | } 434 | // 435 | err = solver.SetState(xf.data(), n, N_state - 1); 436 | 437 | // Initial State 438 | err = solver.SetInitialState(x0.data(), n); 439 | 440 | // fmt::print("Set Input Finished!\n"); 441 | 442 | solver.OpenLoopRollout(); 443 | double cost_initial = solver.CalcCost(); 444 | // fmt::print("Initial cost = {}\n", cost_initial); 445 | SolveStatus status = SolveStatus::Success; 446 | try { 447 | status = solver.Solve(); 448 | } catch (std::exception& e) { 449 | std::cout << "Exception caught: " << e.what() << std::endl; 450 | status = SolveStatus::Unsolved; 451 | } 452 | 453 | std::cout << "Solve status is: " << static_cast(status) << std::endl; 454 | cost_initial = solver.CalcCost(); 455 | // fmt::print("Final cost = {}\n", cost_initial); 456 | 457 | // std::vector X_sim; 458 | // std::vector U_sim; 459 | // std::vector t_sim; 460 | if (static_cast(status) != 0) { 461 | ROS_ERROR_STREAM( 462 | "Solve failed with status: " << static_cast(status)); 463 | return static_cast(status); 464 | } 465 | float dt_solver = solver.GetTimeStep(0); 466 | float t_now = 0; 467 | for (int k = 0; k < N_state; k++) { // should be of size N + 1 468 | Eigen::VectorXd x(n); 469 | solver.GetState(x.data(), k); 470 | X_sim.emplace_back(x); 471 | t_sim.emplace_back(t_now); 472 | t_now += dt_solver; 473 | if (k != N_state - 1) { 474 | Eigen::VectorXd u(m); 475 | solver.GetInput(u.data(), k); // this is in w_sq now!!! not force 476 | U_sim.emplace_back(u); 477 | } else { 478 | std::cout << "last state = " << x << std::endl; 479 | } 480 | } 481 | // ros::shutdown(); 482 | return static_cast(status); 483 | } 484 | }; 485 | -------------------------------------------------------------------------------- /include/kr_ilqr_optimizer/quaternion_utils.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Zixin Zhang on 03/17/23. 3 | // Copyright (c) 2022 Robotic Exploration Lab. All rights reserved. 4 | // 5 | 6 | #pragma once 7 | 8 | #include "Eigen/Dense" 9 | 10 | namespace altro { 11 | 12 | Eigen::Matrix3d skew(Eigen::Vector3d vec); 13 | Eigen::Vector4d cayley_map(Eigen::Vector3d phi); 14 | Eigen::Vector3d inv_cayley_map(Eigen::Vector4d q); 15 | Eigen::Vector4d quat_conj(Eigen::Vector4d q); 16 | Eigen::Matrix4d L(Eigen::Vector4d q); 17 | Eigen::Matrix4d R(Eigen::Vector4d q); 18 | Eigen::MatrixXd G(Eigen::Vector4d q); 19 | Eigen::Vector4d quat_mult(Eigen::Vector4d q1, Eigen::Vector4d q2); 20 | Eigen::Matrix4d T(); 21 | Eigen::MatrixXd H(); 22 | Eigen::Matrix3d Q(Eigen::Vector4d q); 23 | Eigen::MatrixXd dQdq(Eigen::Vector4d q); 24 | 25 | template 26 | int sgn(T val) { 27 | return (T(0) < val) - (val < T(0)); 28 | } 29 | 30 | void removeRow(Eigen::MatrixXd& matrix, unsigned int rowToRemove); 31 | void removeColumn(Eigen::MatrixXd& matrix, unsigned int colToRemove); 32 | Eigen::Matrix removeElement(const Eigen::Matrix& vec, 33 | unsigned int idxToRemove); 34 | 35 | } // namespace altro 36 | -------------------------------------------------------------------------------- /include/kr_ilqr_optimizer/spline_trajectory_sampler.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "ros/ros.h" 8 | // #include 9 | #include 10 | #include 11 | 12 | // 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | class SplineTrajSampler { 24 | protected: 25 | const double dt_ = 0.1; 26 | 27 | bool compute_altro_ = true; 28 | bool write_summary_to_file_ = false; 29 | bool subscribe_to_traj_ = true; 30 | bool publish_optimized_traj_ = true; 31 | bool publish_viz_ = true; 32 | 33 | uint N_iter_ = 0; 34 | std::ofstream poly_array_file; 35 | 36 | ros::Subscriber sub_; 37 | ros::Publisher sampled_traj_pub_; 38 | ros::Publisher opt_traj_pub_; 39 | ros::Publisher opt_viz_pub_; 40 | 41 | double g_; 42 | double mass_; 43 | Eigen::Matrix3d inertia_; 44 | double min_thrust_; // in Newtons 45 | double max_thrust_; // in Newtons 46 | 47 | ros::NodeHandle nhp_; 48 | 49 | public: 50 | std::unique_ptr mpc_solver; 51 | 52 | using Ptr = std::unique_ptr; 53 | 54 | std::vector opt_traj_; // empty if no solution yet 55 | 56 | SplineTrajSampler(ros::NodeHandle& nh, 57 | bool subscribe_to_traj_, 58 | bool publish_optimized_traj_, 59 | bool publish_viz_, 60 | double dt, 61 | double inertia_param = 1.0) 62 | : subscribe_to_traj_(subscribe_to_traj_), 63 | publish_optimized_traj_(publish_optimized_traj_), 64 | publish_viz_(publish_viz_), 65 | dt_(dt) { 66 | nhp_ = nh; 67 | bool use_quat = false; 68 | mpc_solver = std::make_unique(dt, use_quat); 69 | 70 | nhp_.param("VehicleMass", mass_, 1.5); 71 | nhp_.param("GravAcc", g_, 9.81); 72 | double Ixx, Iyy, Izz; 73 | // Ixx = Iyy = Izz = inertia_param; 74 | nhp_.param("Ixx", Ixx, 1.0); 75 | nhp_.param("Ixx", Iyy, 1.0); 76 | nhp_.param("Ixx", Izz, 1.0); 77 | inertia_ << Ixx, 0, 0, 0, Iyy, 0, 0, 0, Izz; 78 | nhp_.param("MinThrust", min_thrust_, 0.0); 79 | nhp_.param("MaxThrust", max_thrust_, 8.0 * 4); // still in force N here 80 | double arm_length, kf, km; 81 | nhp_.param("arm_length", arm_length, 0.4); 82 | nhp_.param("kf", kf, 1.0); 83 | nhp_.param("km", km, 0.0245); 84 | 85 | // mass_ = 1.5; 86 | // kf = 1.0; 87 | // km = 0.0245; 88 | // arm_length = 0.4; 89 | // inertia_ = Eigen::Matrix3d::Identity(); 90 | // min_thrust_ = 0.0; 91 | // max_thrust_ = 8.0*4; 92 | 93 | if (publish_optimized_traj_) { 94 | opt_traj_pub_ = nhp_.advertise( 95 | "optimized_traj_samples", 1); 96 | } 97 | if (publish_viz_) { 98 | opt_viz_pub_ = nhp_.advertise( 99 | "optimized_traj_samples_viz", 1); 100 | } 101 | if (subscribe_to_traj_) { 102 | sub_ = nhp_.subscribe( 103 | "local_plan_server/trajectory_planner/search_trajectory", 104 | 1, 105 | &SplineTrajSampler::callbackWrapper, 106 | this); 107 | } 108 | sampled_traj_pub_ = nhp_.advertise( 109 | "spline_traj_samples", 1); 110 | // opt_traj_pub_ = 111 | // n.advertise("optimized_traj_samples", 112 | // 1); opt_viz_pub_ = 113 | // n.advertise("optimized_traj_samples_viz", 1); 114 | // sub_ = 115 | // n.subscribe("/local_plan_server/trajectory_planner/search_trajectory", 1, 116 | // &SplineTrajSampler::callbackWrapper, this); 117 | if (write_summary_to_file_) { 118 | poly_array_file.open("/home/yifei/planning_summary.csv", 119 | std::ios_base::app); 120 | poly_array_file << "Planning Iteration, Starting x, Jerk Norm Sum, Traj " 121 | "Time, Total_Ref_Fdt, Total_Ref_Mdt \n"; 122 | } 123 | mpc_solver->SetUp( 124 | mass_, g_, inertia_, min_thrust_, max_thrust_, kf, km, arm_length); 125 | } 126 | void callbackWrapper(const kr_planning_msgs::SplineTrajectory::ConstPtr& msg); 127 | 128 | kr_planning_msgs::TrajectoryDiscretized sample_and_refine_trajectory( 129 | const Eigen::VectorXd& start_state, 130 | const kr_planning_msgs::SplineTrajectory& traj, 131 | const std::vector& hPolys, 132 | const Eigen::VectorXd& allo_ts); 133 | 134 | void setConfig(double altro_ini_penalty, double altro_penalty_scale) { 135 | altro_ini_penalty = altro_ini_penalty; 136 | altro_penalty_scale = altro_penalty_scale; 137 | } 138 | 139 | private: 140 | kr_planning_msgs::TrajectoryDiscretized publish_altro( 141 | const Eigen::VectorXd& start_state, 142 | std::vector pos, 143 | std::vector vel, 144 | std::vector acc, 145 | std::vector yaw_ref, 146 | std::vector thrust, 147 | std::vector moment, 148 | double t, 149 | std::vector q_ref, 150 | std::vector w_ref, 151 | const std::vector>& h_polys, 152 | const Eigen::VectorXd& allo_ts, 153 | std_msgs::Header header); 154 | Eigen::Vector3d compute_ref_inputs(const Eigen::Vector3d& pos, 155 | const Eigen::Vector3d& vel, 156 | const Eigen::Vector3d& acc, 157 | const Eigen::Vector3d& jerk, 158 | const Eigen::Vector3d& snap, 159 | const Eigen::Vector3d& yaw_dyaw_ddyaw, 160 | double& thrust, 161 | geometry_msgs::Point& moment, 162 | Eigen::Quaterniond& q_return, 163 | Eigen::Vector3d& omega); 164 | 165 | void unpack(geometry_msgs::Point& p, 166 | const Eigen::VectorXd& v, 167 | int start_idx = 0) { 168 | p.x = v(start_idx); 169 | p.y = v(start_idx + 1); 170 | p.z = v(start_idx + 2); 171 | } 172 | 173 | std::vector linspace(double start, double end, int numPoints) { 174 | std::vector result; 175 | if (numPoints <= 1) { 176 | std::cerr << "linspace: numPoints must be > 1\n"; 177 | } 178 | 179 | double step = (end - start) / (numPoints - 1); 180 | for (int i = 0; i < numPoints; ++i) { 181 | result.push_back(start + i * step); 182 | } 183 | 184 | return result; 185 | } 186 | Eigen::Matrix3d skew(const Eigen::Vector3d& x) { 187 | Eigen::Matrix3d s; 188 | s << 0, -x[2], x[1], x[2], 0, -x[0], -x[1], x[0], 0; 189 | return s; 190 | } 191 | 192 | /** 193 | * This tutorial demonstrates simple sending of messages over the ROS system. 194 | */ 195 | std::vector differentiate(const std::vector& p, 196 | float segment_time) { 197 | if (p.size() < 2) return std::vector(); 198 | std::vector v; 199 | for (int i = 1; i < p.size(); i++) { 200 | v.push_back(p[i] * static_cast(i) / segment_time); 201 | } 202 | return v; 203 | } 204 | Eigen::VectorXd evaluate(const kr_planning_msgs::SplineTrajectory& msg, 205 | double t, 206 | uint deriv_num) { 207 | Eigen::VectorXd result(msg.dimensions); 208 | 209 | for (int dim = 0; dim < msg.dimensions; dim++) { 210 | auto spline = msg.data[dim]; 211 | double time_elapsed = 0; 212 | for (auto poly : spline.segs) { 213 | auto poly_coeffs = poly.coeffs; 214 | for (int d = 0; d < deriv_num; d++) { 215 | poly_coeffs = differentiate(poly_coeffs, poly.dt); 216 | } 217 | if (poly_coeffs.size() == 0) { 218 | result(dim) = 0; 219 | break; 220 | } 221 | result(dim) = poly_coeffs[0]; 222 | 223 | if (t < time_elapsed + poly.dt || poly == spline.segs.back()) { 224 | for (int j = 1; j < poly_coeffs.size(); j++) { 225 | result(dim) += 226 | poly_coeffs[j] * std::pow((t - time_elapsed) / poly.dt, j); 227 | } 228 | break; 229 | } 230 | time_elapsed += poly.dt; 231 | } 232 | } 233 | return result; 234 | } 235 | 236 | std::vector sample( 237 | const kr_planning_msgs::SplineTrajectory& msg, 238 | double total_time, 239 | int N_state, 240 | int deriv_num) { 241 | std::vector ps(N_state); // 101 242 | double dt = total_time / (N_state - 1); // dt = 1 / 100 = 0.01 243 | for (int i = 0; i < N_state; i++) // 0, 1, 2, ..., 100 * 0.01 = 1 244 | ps.at(i) = evaluate(msg, i * dt, deriv_num); 245 | return ps; 246 | } 247 | }; 248 | -------------------------------------------------------------------------------- /include/kr_ilqr_optimizer/test_utils.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Brian Jackson on 9/27/22. 3 | // Copyright (c) 2022 Robotic Exploration Lab. All rights reserved. 4 | // 5 | 6 | #pragma once 7 | 8 | #include 9 | 10 | #include "Eigen/Dense" 11 | #include "altro/typedefs.hpp" 12 | #include "kr_ilqr_optimizer/quaternion_utils.hpp" 13 | 14 | void discrete_double_integrator_dynamics( 15 | double* xnext, const double* x, const double* u, float h, int dim); 16 | 17 | void discrete_double_integrator_jacobian( 18 | double* jac, const double* x, const double* u, float h, int dim); 19 | 20 | void cartpole_dynamics_midpoint(double* xnext, 21 | const double* x, 22 | const double* u, 23 | float h); 24 | void cartpole_jacobian_midpoint(double* xnext, 25 | const double* x, 26 | const double* u, 27 | float h); 28 | 29 | void pendulum_dynamics(double* xnext, const double* x, const double* u); 30 | void pendulum_jacobian(double* jac, const double* x, const double* u); 31 | 32 | using ContinuousDynamicsFunction = 33 | std::function; 34 | using ContinuousDynamicsJacobian = 35 | std::function; 36 | 37 | altro::ExplicitDynamicsFunction MidpointDynamics(int n, 38 | int m, 39 | ContinuousDynamicsFunction f); 40 | altro::ExplicitDynamicsJacobian MidpointJacobian( 41 | int n, int m, ContinuousDynamicsFunction f, ContinuousDynamicsJacobian jac); 42 | 43 | altro::ExplicitDynamicsFunction ForwardEulerDynamics( 44 | int n, int m, const ContinuousDynamicsFunction f); 45 | altro::ExplicitDynamicsJacobian ForwardEulerJacobian( 46 | int n, 47 | int m, 48 | const ContinuousDynamicsFunction f, 49 | const ContinuousDynamicsJacobian df); 50 | 51 | class BicycleModel { 52 | public: 53 | enum class ReferenceFrame { CenterOfGravity, Rear, Front }; 54 | 55 | explicit BicycleModel(ReferenceFrame frame = ReferenceFrame::CenterOfGravity) 56 | : reference_frame_(frame) {} 57 | 58 | void Dynamics(double* x_dot, const double* x, const double* u) const; 59 | void Jacobian(double* jac, const double* x, const double* u) const; 60 | 61 | void SetLengths(double length, double length_to_rear_wheel_from_cg) { 62 | length_ = length; 63 | distance_to_rear_wheels_ = length_to_rear_wheel_from_cg; 64 | } 65 | 66 | static constexpr int NumStates = 4; 67 | static constexpr int NumInputs = 2; 68 | 69 | private: 70 | ReferenceFrame reference_frame_; 71 | double length_ = 2.7; 72 | double distance_to_rear_wheels_ = 1.5; 73 | }; 74 | 75 | class quadModel { 76 | public: 77 | enum class ReferenceFrame { CenterOfGravity, Rear, Front }; 78 | 79 | explicit quadModel(double mass, 80 | double grav_const, 81 | Eigen::Matrix3d inertia, 82 | double min_w_sq, 83 | double max_w_sq, 84 | double kf, 85 | double km, 86 | double L, 87 | ReferenceFrame frame = ReferenceFrame::CenterOfGravity) 88 | : reference_frame_(frame), 89 | mass_(mass), 90 | grav_(grav_const), 91 | moment_of_inertia_(inertia), 92 | min_w_sq(min_w_sq), 93 | max_w_sq(max_w_sq), 94 | kf_(kf), 95 | km_(km), 96 | motor_dist_(L) {} 97 | 98 | void Dynamics(double* x_dot, const double* x, const double* u) const; 99 | void Jacobian(double* jac, const double* x, const double* u) const; 100 | void Jacobian_fd(double* jac, const double* x, const double* u) const; 101 | Eigen::Vector3d moments(const Eigen::VectorXd& u) const; 102 | Eigen::Vector3d forces(const Eigen::VectorXd& u) const; 103 | // Eigen::VectorXd f_quad(const Eigen::VectorXd& x_vec, const Eigen::VectorXd& 104 | // u_vec) const; void finite_jacobian(const Eigen::Ref& 105 | // x, 106 | // const std::function& f, 107 | // Eigen::MatrixXd& jac, 108 | // const double eps); 109 | void finite_jacobian_quad_xu(double* jac, 110 | const double* x, 111 | const double* u) const; 112 | 113 | static constexpr int NumStates = 13; 114 | static constexpr int NumInputs = 4; 115 | double get_hover_input() const { return mass_ * grav_ / 4.0 / kf_; } 116 | float max_w_sq; 117 | float min_w_sq; 118 | double kf_; 119 | double mass_; 120 | double grav_; 121 | Eigen::Matrix3d moment_of_inertia_; 122 | Eigen::Matrix4d forceMatrix_inv() const { return forceMatrix().inverse(); } 123 | Eigen::Matrix4d forceMatrix() const { 124 | double L = motor_dist_; 125 | 126 | Eigen::Matrix4d SMatrix; 127 | SMatrix << kf_, kf_, kf_, kf_, 0, L * kf_, 0, -L * kf_, -L * kf_, 0, 128 | L * kf_, 0, km_, -km_, km_, -km_; 129 | 130 | return SMatrix; 131 | } 132 | 133 | private: 134 | ReferenceFrame reference_frame_; 135 | double motor_dist_; 136 | double km_; 137 | }; 138 | 139 | class SimpleQuaternionModel { 140 | public: 141 | void Dynamics(double* x_dot, const double* x, const double* u) const; 142 | void Jacobian(double* jac, const double* x, const double* u) const; 143 | 144 | static constexpr int NumStates = 4; // Quaternion: [qs, qa, qb, qc] 145 | static constexpr int NumInputs = 146 | 3; // Angular Velocity: [omega_x, omega_y, omega_z] 147 | 148 | static constexpr int NumErrorStates = 3; 149 | static constexpr int NumErrorInputs = 3; 150 | }; 151 | 152 | class QuadrupedQuaternionModel { 153 | public: 154 | void Dynamics(double* x_dot, 155 | const double* x, 156 | const double* u, 157 | Eigen::Matrix foot_pos_body, 158 | Eigen::Matrix3d inertia_body) const; 159 | void Jacobian(double* jac, 160 | const double* x, 161 | const double* u, 162 | Eigen::Matrix foot_pos_body, 163 | Eigen::Matrix3d inertia_body) const; 164 | 165 | static constexpr int NumStates = 13; // r, q, v, w 166 | static constexpr int NumInputs = 12; // f1, f2, f3, f4 167 | 168 | static constexpr int NumErrorStates = 12; 169 | static constexpr int NumErrorInputs = 12; 170 | }; 171 | 172 | // void ReadScottyTrajectory(int *Nref, float *tref, 173 | // std::vector *xref, 174 | // std::vector *uref); 175 | 176 | Eigen::Vector4d Slerp(Eigen::Vector4d quat1, Eigen::Vector4d quat2, double t); -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | kr_ilqr_optimizer 4 | 0.0.0 5 | The kr_ilqr_optimizer package 6 | 7 | 8 | 9 | 10 | yifei 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | kr_planning_msgs 53 | roscpp 54 | kr_planning_msgs 55 | roscpp 56 | kr_planning_msgs 57 | roscpp 58 | eigen_conversions 59 | visualization_msgs 60 | altro 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_1_total_success_rate_vs_idx_and_cluster.py: -------------------------------------------------------------------------------- 1 | from matplotlib.lines import Line2D 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import matplotlib 5 | import numpy as np 6 | 7 | from mpl_toolkits.mplot3d import Axes3D 8 | from sklearn.mixture import GaussianMixture 9 | from seaborn.palettes import color_palette 10 | from matplotlib import animation 11 | import seaborn as sns 12 | animate_bool = False 13 | 14 | fm = matplotlib.font_manager 15 | fm._get_fontconfig_fonts.cache_clear() 16 | font = {'family' : 'sans-serif', 17 | 'weight' : 'normal', 18 | 'size' : 13} 19 | matplotlib.rc('font', **font) 20 | 21 | z_lim_high = 0.6 22 | z_lim_low = 0.2 23 | x_lim_high = 0.25 24 | 25 | # z_lim_high = 0.9 26 | # z_lim_low = 0.0 27 | # x_lim_high = 0.3 28 | z_name = "Structure Index" 29 | 30 | 31 | pd.set_option('display.max_columns', 50) 32 | 33 | matplotlib.rcParams['lines.linewidth'] = 4 34 | path_prefix = '/home/yifei/Documents/kr_ilqr_optimizer_icra/' 35 | save_path = path_prefix +'scripts/res/video_figures/' 36 | # Read the CSV file into a DataFrame 37 | file_path_real = path_prefix + 'scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 38 | file_path_maze = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_09-13_15-28-22.csv' 39 | file_path_maze2 = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze_80_09-13_22-36-55.csv' 40 | file_path_strucutre = path_prefix + 'scripts/res/9.13.23/ECI_icra_strcture/ECI_single_line_Obstacles_09-13_19-29-42.csv' 41 | 42 | 43 | df_real = pd.read_csv(file_path_real) 44 | df_maze = pd.read_csv(file_path_maze) 45 | df_maze2 = pd.read_csv(file_path_maze2) 46 | #combine two maze files 47 | df_maze = pd.concat([df_maze, df_maze2]) 48 | #save the new maze file as csv 49 | df_maze.to_csv(path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze_combined.csv') 50 | 51 | df_structure = pd.read_csv(file_path_strucutre) 52 | 53 | 54 | # Group the data by 'map_seed' and filter out the rows that jps failed 55 | filtered_df_jps_success = df_real[(df_real['planner_frontend'].str.contains('JPS', case=False)) & (df_real['success_detail'] < 2)] 56 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 57 | print('JPS failed map', map_indices_jps_fail) 58 | total_real_maps = len(df_real['map_seed'].unique()) 59 | df_real = df_real[~df_real['map_seed'].isin(map_indices_jps_fail)] 60 | print("number of unique feasible real maps: ", len(df_real['map_seed'].unique()), "/", total_real_maps) 61 | 62 | 63 | 64 | filtered_df_jps_success_maze = df_maze[(df_maze['planner_frontend'].str.contains('JPS', case=False)) & (df_maze['success_detail'] < 2)] 65 | map_indices_jps_fail = filtered_df_jps_success_maze['map_seed'].unique() 66 | print('JPS failed map', map_indices_jps_fail) 67 | total_maze_maps = len(df_maze['map_seed'].unique()) 68 | df_maze = df_maze[~df_maze['map_seed'].isin(map_indices_jps_fail)] 69 | print("number of unique feasible maze maps: ", len(df_maze['map_seed'].unique()), "/", total_maze_maps) 70 | 71 | filtered_df_jps_success_structure = df_structure[(df_structure['planner_frontend'].str.contains('JPS', case=False)) & (df_structure['success_detail'] < 2)] 72 | map_indices_jps_fail = filtered_df_jps_success_structure['map_seed'].unique() 73 | print('JPS failed map', map_indices_jps_fail) 74 | total_structure_maps = len(df_structure['map_seed'].unique()) 75 | df_structure = df_structure[~df_structure['map_seed'].isin(map_indices_jps_fail)] 76 | print("number of unique feasible structure maps: ", len(df_structure['map_seed'].unique()), "/", total_structure_maps) 77 | index_stats = df_structure[['density_index', 'clutter_index', 'structure_index']].describe().loc[['min', 'max', '50%']] 78 | # density_median = index_stats.loc['50%', 'density_index'] 79 | # clutter_median = index_stats.loc['50%', 'clutter_index'] 80 | # structure_median = index_stats.loc['50%', 'structure_index'] 81 | density_median, clutter_median, structure_median = 0.092449, 0.0442482219179718, 0.28364437802936177 82 | 83 | # 84 | #print column names 85 | print(df_structure.head()) 86 | # exit() 87 | # exit(0) 88 | # Group by 'map_seed' and calculate the mean of the 'success' column 89 | grouped_df_real = df_real.groupby(['map_seed', 'density_index', 'clutter_index', 'structure_index'])['success'].mean().reset_index() 90 | grouped_df_maze = df_maze.groupby(['map_seed', 'density_index', 'clutter_index', 'structure_index'])['success'].mean().reset_index() 91 | grouped_df_structure = df_structure.groupby(['map_seed', 'density_index', 'clutter_index', 'structure_index'])['success'].mean().reset_index() 92 | 93 | print(grouped_df_structure.head()) 94 | # Convert the mean success rate to percentage 95 | grouped_df_real['success_rate_percentage'] = grouped_df_real['success'] * 100 96 | grouped_df_maze['success_rate_percentage'] = grouped_df_maze['success'] * 100 97 | grouped_df_structure['success_rate_percentage'] = grouped_df_structure['success'] * 100 98 | 99 | 100 | # grouped_df_structure_maps = 101 | # kmeans = KMeans(n_clusters=5) 102 | 103 | ### Instead of clustering, let's try just split the data ranges for each axis from top 50 percent and lower 50 percent 104 | ### for a total of 8 sections 105 | 106 | #####################clustering! 107 | n_clusters = 6 108 | gmm = GaussianMixture(n_components=n_clusters, random_state=10) 109 | 110 | 111 | gmm.fit(grouped_df_real[['density_index', 'clutter_index', 'structure_index']]) 112 | labels = gmm.predict(grouped_df_real[['density_index', 'clutter_index', 'structure_index']]) 113 | grouped_df_real['labels'] = labels 114 | 115 | labels_maze = gmm.predict(grouped_df_maze[['density_index', 'clutter_index', 'structure_index']]) 116 | grouped_df_maze['labels'] = labels_maze 117 | 118 | # Predict cluster labels for grouped_df_structure 119 | labels_structure = gmm.predict(grouped_df_structure[['density_index', 'clutter_index', 'structure_index']]) 120 | grouped_df_structure['labels'] = labels_structure 121 | 122 | 123 | # Create the 3D scatter plot with 124 | fig = plt.figure(figsize=(6, 6)) 125 | ax4 = fig.add_subplot(111, projection='3d') 126 | 127 | # x plane 128 | y_x_plane = np.linspace(0, 0.1, 100) 129 | z_x_plane = np.linspace(0, 0.6, 100) 130 | Y_x_plane, Z_x_plane = np.meshgrid(y_x_plane, z_x_plane) 131 | X_x_plane = np.ones_like(Y_x_plane) * density_median 132 | 133 | # y plane 134 | x_y_plane = np.linspace(0, 0.25, 100) 135 | z_y_plane = np.linspace(0, 0.6, 100) 136 | X_y_plane, Z_y_plane = np.meshgrid(x_y_plane, z_y_plane) 137 | Y_y_plane = np.ones_like(X_y_plane) * clutter_median 138 | 139 | # z plane 140 | x_z_plane = np.linspace(0, 0.25, 100) 141 | y_z_plane = np.linspace(0, 0.1, 100) 142 | X_z_plane, Y_z_plane = np.meshgrid(x_z_plane, y_z_plane) 143 | Z_z_plane = np.ones_like(X_z_plane) * structure_median 144 | 145 | 146 | 147 | def init(): 148 | sc = ax4.scatter(grouped_df_real['density_index'],grouped_df_real['clutter_index'],grouped_df_real['structure_index'], c='r', s=20) 149 | sc = ax4.scatter(grouped_df_maze['density_index'],grouped_df_maze['clutter_index'],grouped_df_maze['structure_index'], c='b', s=20) 150 | sc = ax4.scatter(grouped_df_structure['density_index'],grouped_df_structure['clutter_index'],grouped_df_structure['structure_index'], c='g', s=20) 151 | legend = ax4.legend(['real','maze','obstacle'], loc="lower center", title="Classes", borderaxespad=-10, ncol=5) 152 | ax4.set_xlabel('Density Index',labelpad=10) 153 | # ax.set_xlim(0, 0.2) 154 | ax4.set_ylabel('Clutter Index',labelpad=10) 155 | ax4.set_zlabel(z_name,labelpad=10) 156 | # draw median planes 157 | ax4.plot_surface(X_x_plane, Y_x_plane, Z_x_plane, color='black', alpha=0.2) 158 | ax4.plot_surface(X_y_plane, Y_y_plane, Z_y_plane, color='black', alpha=0.2) 159 | ax4.plot_surface(X_z_plane, Y_z_plane, Z_z_plane, color='black', alpha=0.2) 160 | ax4.set_xlim(0, x_lim_high) 161 | ax4.set_zlim(z_lim_low, z_lim_high) 162 | ax4.set_ylim(0, 0.1) 163 | return fig, 164 | 165 | def animate(i): 166 | ax4.view_init(elev=10., azim=i) 167 | return fig, 168 | 169 | # Animate 170 | if(animate_bool): 171 | anim = animation.FuncAnimation(fig, animate, init_func=init, 172 | frames=360, interval=20, blit=True) 173 | # Save 174 | anim.save(save_path+'data_distributon_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264']) 175 | else: 176 | init() 177 | #show points from different files in different colors 178 | xlims = ax4.get_xlim() 179 | ylims = ax4.get_ylim() 180 | zlims = ax4.get_zlim() 181 | 182 | 183 | ####################1.2#################### 184 | 185 | plt.savefig(save_path+'fig1.1.png') 186 | fig = plt.figure(figsize=(6, 6)) 187 | ax1 = fig.add_subplot(111, projection='3d') 188 | sc = ax1.scatter(grouped_df_real['density_index'],grouped_df_real['clutter_index'],grouped_df_real['structure_index'], c= grouped_df_real['success_rate_percentage'], cmap='RdYlGn', s=20) 189 | # Add color bar and labels 190 | # labelpad=10 191 | ax1.set_xlabel('Density Index',labelpad=10) 192 | # ax.set_xlim(0, 0.2) 193 | ax1.set_ylabel('Clutter Index',labelpad=10) 194 | ax1.set_zlabel(z_name,labelpad=10) 195 | ax1.set_xlim(xlims) 196 | ax1.set_ylim(ylims) 197 | ax1.set_zlim(zlims) 198 | plt.savefig(save_path+'fig1.2.png') 199 | 200 | 201 | fig = plt.figure(figsize=(6, 6)) 202 | 203 | ax2 = fig.add_subplot(111, projection='3d') 204 | sc2 = ax2.scatter(grouped_df_maze['density_index'],grouped_df_maze['clutter_index'],grouped_df_maze['structure_index'], c= grouped_df_maze['success_rate_percentage'], cmap='RdYlGn', s=20) 205 | ax2.set_xlabel('Density Index',labelpad=10) 206 | # ax.set_xlim(0, 0.2) 207 | ax2.set_ylabel('Clutter Index',labelpad=10) 208 | ax2.set_zlabel(z_name,labelpad=10) 209 | ax2.set_xlim(xlims) 210 | ax2.set_ylim(ylims) 211 | ax2.set_zlim(zlims) 212 | plt.savefig(save_path+'fig1.3.png') 213 | 214 | 215 | fig = plt.figure(figsize=(6, 6)) 216 | fig.clear() 217 | ax3 = fig.add_subplot(111, projection='3d') 218 | sc3 = ax3.scatter(grouped_df_structure['density_index'],grouped_df_structure['clutter_index'],grouped_df_structure['structure_index'], c= grouped_df_structure['success_rate_percentage'], cmap='RdYlGn', s=20) 219 | ax3.set_xlabel('Density Index',labelpad=10) 220 | # ax.set_xlim(0, 0.2) 221 | ax3.set_ylabel('Clutter Index',labelpad=10) 222 | ax3.set_zlabel(z_name,labelpad=10) 223 | # ax3.set_xlim(xlims) 224 | # ax3.set_ylim(ylims) 225 | # ax3.set_zlim(zlims) 226 | # show some location options 227 | cbar = plt.colorbar(sc, ax=ax3, location = 'bottom') 228 | cbar.set_label('Success Rate (%)') 229 | 230 | plt.savefig(save_path+'fig1.4.png') 231 | # exit() 232 | 233 | ########################extra cluster analysis########################################### 234 | 235 | plotsize = (18,12) 236 | 237 | fig = plt.figure(figsize=plotsize) 238 | 239 | ax1 = fig.add_subplot(121, projection='3d') 240 | #list a few common cmaps 241 | #https://matplotlib.org/stable/tutorials/colors/colormaps.html 242 | sc = ax1.scatter(grouped_df_real['density_index'],grouped_df_real['clutter_index'],grouped_df_real['structure_index'], c=grouped_df_real['labels'], cmap= 'tab10', s=10) 243 | #show color label legend 244 | legend = ax1.legend(*sc.legend_elements(), loc="lower center", title="Classes", borderaxespad=-10, ncol=5) 245 | 246 | 247 | ax1 = fig.add_subplot(122, projection='3d') 248 | # Plot points from grouped_df_real, grouped_df_maze, and grouped_df_structure in different colors 249 | sc_real = ax1.scatter(grouped_df_real['density_index'], grouped_df_real['clutter_index'], grouped_df_real['structure_index'], c=grouped_df_real['labels'], cmap= 'tab10', s=20) 250 | sc_maze = ax1.scatter(grouped_df_maze['density_index'], grouped_df_maze['clutter_index'], grouped_df_maze['structure_index'], c=grouped_df_maze['labels'], cmap= 'tab10', s=5, marker='x') 251 | sc_structure = ax1.scatter(grouped_df_structure['density_index'], grouped_df_structure['clutter_index'], grouped_df_structure['structure_index'], c=grouped_df_structure['labels'], cmap= 'tab10', s=5, marker='+') 252 | 253 | 254 | # cbar = plt.colorbar(sc_real, ax=ax1) 255 | # cbar.set_label('Cluster Labels') 256 | 257 | # Legend for data sources 258 | legend1 = ax1.legend(['real', 'maze', 'structure'], title="Data Source", loc='upper left') 259 | 260 | # Add a second legend for cluster labels using Line2D 261 | unique_labels = np.unique(np.concatenate([grouped_df_real['labels'], grouped_df_maze['labels'], grouped_df_structure['labels']])) 262 | legend2_elements = [Line2D([0], [0], marker='o', color='w', markerfacecolor=plt.cm.tab10(i / float(len(unique_labels) - 1)), markersize=10) for i in unique_labels] 263 | ax1.legend(handles=legend2_elements, title="Cluster Labels", loc="upper right") 264 | 265 | # Add the first legend back 266 | ax1.add_artist(legend1) 267 | ax1.set_xlim(0, 0.25) 268 | 269 | plt.show() 270 | plt.close('all') 271 | 272 | 273 | 274 | fig = plt.figure(figsize=plotsize) 275 | fig.clear() 276 | ax = fig.add_subplot(111, projection='3d') 277 | 278 | # Calculate success rate of every method for each cluster in df_real 279 | n_clusters = 6 280 | gmm = GaussianMixture(n_components=n_clusters, random_state=10) 281 | gmm.fit(df_real[['density_index', 'clutter_index', 'structure_index']]) 282 | labels = gmm.predict(df_real[['density_index', 'clutter_index', 'structure_index']]) 283 | df_real['labels'] = labels 284 | 285 | sc = ax.scatter(df_real['density_index'],df_real['clutter_index'],df_real['structure_index'], c=df_real['labels'], cmap= 'tab10', s=10) 286 | #show color label legend 287 | legend = ax.legend(*sc.legend_elements(), title="Classes") 288 | ax.add_artist(legend) 289 | plt.savefig(save_path+'clusters.png') 290 | 291 | 292 | #add a column that combined frontend and backend 293 | df_real['planner'] = df_real['planner_frontend'] + '_' + df_real['planner_backend'] 294 | 295 | 296 | fig = plt.figure(figsize=plotsize) 297 | fig.clear() 298 | df_grouped = df_real.groupby(['planner', 'labels'])['success'].mean().reset_index() 299 | df_grouped['success_rate'] = df_grouped['success'] * 100 300 | #draw historgram of success rate for each cluster 301 | g = sns.catplot(data=df_grouped, x='labels', y='success_rate', hue='planner', kind='bar') 302 | # g.set_xticklabels(rotation=45, horizontalalignment='right') 303 | plt.title('Success Rate by Planner and Labels') 304 | plt.xlabel('Planner') 305 | plt.ylabel('Success Rate (%)') 306 | # save the figure 307 | plt.savefig(save_path+'success_by_group.png') 308 | 309 | # remove unsuccessful data 310 | df_real = df_real[df_real['success']] 311 | 312 | # Group the filtered data by 'labels', 'planner_frontend', and 'planner_backend' 313 | # Then calculate the metrics: success rate based on the 'success' column, avg trajectory time, and avg traj jerk 314 | 315 | 316 | 317 | #draw boxplot of traj_time each group 318 | fig = plt.figure(figsize=plotsize) 319 | fig.clear() 320 | g = sns.boxplot(data=df_real, x="labels", y="traj_time(s)", hue="planner") 321 | # g.set_xticklabels(rotation=45, horizontalalignment='right') 322 | plt.title('Trajectory Time by Planner and Labels') 323 | plt.savefig(save_path+'traj_time.png') 324 | 325 | fig = plt.figure(figsize=plotsize) 326 | fig.clear() 327 | #draw boxplot of traj_jerk each group 328 | g = sns.boxplot(data=df_real, x="labels", y="traj_jerk", hue="planner") 329 | # g.set_xticklabels(rotation=45, horizontalalignment='right') 330 | plt.title('Trajectory Jerk by Planner and Labels') 331 | plt.savefig(save_path+'traj_jerk.png') 332 | 333 | 334 | 335 | exit() 336 | 337 | 338 | grouped_by_cluster = df_real.groupby(['labels', 'planner_frontend', 'planner_backend']).agg( 339 | total_count=('success', 'count'), 340 | success_count=('success', 'sum'), # summing the True values in the 'success' column 341 | avg_traj_time=('traj_time(s)', 'mean'), 342 | avg_traj_jerk=('traj_jerk', 'mean') 343 | ) 344 | 345 | 346 | #output the stats to a file 347 | grouped_by_cluster.to_csv(save_path+'grouped_by_cluster.csv') 348 | 349 | 350 | 351 | 352 | 353 | # Add a legend to distinguish the data sources 354 | # legend = ax.legend(['real', 'maze', 'structure'], loc="lower center", title="Data Source", borderaxespad=-10, ncol=3) 355 | 356 | 357 | from scipy.spatial.distance import euclidean 358 | 359 | # Calculate the centroids of each cluster 360 | centroids = grouped_df_real.groupby('labels').agg({ 361 | 'density_index': 'mean', 362 | 'clutter_index': 'mean', 363 | 'structure_index': 'mean' 364 | }).reset_index() 365 | 366 | # Initialize variables to store closest map_seeds for each cluster 367 | closest_map_seeds = {} 368 | 369 | # Loop through each cluster label to find the closest point 370 | for label in centroids['labels']: 371 | centroid = centroids.loc[centroids['labels'] == label, ['density_index', 'clutter_index', 'structure_index']].values[0] 372 | cluster_data = grouped_df_real[grouped_df_real['labels'] == label] 373 | 374 | # Calculate Euclidean distances and find the minimum 375 | min_distance = float('inf') 376 | closest_map_seed = None 377 | for idx, row in cluster_data.iterrows(): 378 | point = row[['density_index', 'clutter_index', 'structure_index']].values 379 | distance = euclidean(centroid, point) 380 | if distance < min_distance: 381 | min_distance = distance 382 | closest_map_seed = row['map_seed'] 383 | 384 | closest_map_seeds[label] = closest_map_seed 385 | 386 | print("Closest map_seeds to centroids:", closest_map_seeds) 387 | 388 | plt.show() 389 | plt.savefig(save_path+'fig1.png') 390 | -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_2_front_end_success_rate_vs_idx.py: -------------------------------------------------------------------------------- 1 | 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | from mpl_toolkits.mplot3d import Axes3D 5 | import numpy as np 6 | from matplotlib import animation 7 | from sklearn.mixture import GaussianMixture as GMM 8 | 9 | compute_optimal_comp = False 10 | draw_individual = False 11 | # Load the CSV file into a DataFrame 12 | # save_path = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/video_figures/' 13 | path_prefix = '/home/yifei/Documents/kr_ilqr_optimizer_icra/' 14 | save_path = path_prefix +'scripts/res/video_figures/' 15 | # Read the CSV file into a DataFrame 16 | file_path_real = path_prefix + 'scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 17 | file_path_maze = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_09-13_15-28-22.csv' 18 | file_path_maze2 = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze_80_09-13_22-36-55.csv' 19 | file_path_strucutre = path_prefix + 'scripts/res/9.13.23/ECI_icra_strcture/ECI_single_line_Obstacles_09-13_19-29-42.csv' 20 | 21 | file_path1 = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 22 | file_path2 = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze.csv' 23 | file_path3 = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_icra_strcture/ECI_single_line_Obstacles_09-13_19-29-42.csv' 24 | df1 = pd.read_csv(file_path1) 25 | df2 = pd.read_csv(file_path2) 26 | df3 = pd.read_csv(file_path3) 27 | 28 | # Add a new column file type to each dataframe 29 | df1['env_type'] = 'Real' 30 | df2['env_type'] = 'Maze' 31 | df3['env_type'] = 'Obstacle' 32 | 33 | # create dictionary for colors for each env_type 34 | color_dict = {'Real': 'r', 'Maze': 'b', 'Obstacle': 'g'} 35 | failed_color_dict = {'Real': '#ffa1a1', 'Maze': '#a1a1ff', 'Obstacle': '#baffba'} 36 | sort_front_list = ['JPS (geometric)', 'RRT', 'MPL', 'SST', 'dispersion planner'] 37 | sort_back_list = ['gcopter', 'iLQR(Altro)'] 38 | front_text = ['JPS', 'RRT', 'MPL', 'SST', 'Dispersion'] 39 | back_text = ['GCOPTER', 'ALTRO'] 40 | # add 10000 to the map_seed of df2, add 100000 to map_seed of df3 41 | df2['map_seed'] = df2['map_seed'] + 10000 42 | df3['map_seed'] = df3['map_seed'] + 100000 43 | 44 | 45 | #combine the three dataframes 46 | df = pd.concat([df1, df2, df3]) 47 | 48 | # Group the data by 'map_seed' and filter out the rows that jps failed 49 | filtered_df_jps_success = df[(df['planner_frontend'].str.contains('JPS', case=False)) & (df['success_detail'] < 2)] 50 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 51 | print("len(map_indices_jps_fail): ", len(map_indices_jps_fail)) 52 | print('JPS success: ', map_indices_jps_fail) 53 | df = df[~df['map_seed'].isin(map_indices_jps_fail)] 54 | print("number of unique feasible maps: ", len(df['map_seed'].unique())) 55 | 56 | #sort the dataframe by planner_frontend and planner_backend 57 | df['planner_frontend'] = pd.Categorical(df['planner_frontend'], sort_front_list) 58 | df['planner_backend'] = pd.Categorical(df['planner_backend'], sort_back_list) 59 | df.sort_values(['planner_frontend', 'planner_backend'], inplace=True) 60 | 61 | 62 | # Group the data by 'planner_frontend' and 'planner_backend' 63 | grouped_data = df.groupby(['planner_backend', 'planner_frontend']) 64 | 65 | 66 | 67 | if (compute_optimal_comp): 68 | fig_n_components = plt.figure(figsize=(16, 9), dpi=100) 69 | # figure out how many clusters to use for each group 70 | for i, ((backend, frontend), group) in enumerate(grouped_data): 71 | ax = fig_n_components.add_subplot(2, 5, i+1) 72 | n_components = np.arange(10,21) 73 | 74 | group['success'] = group['success'].astype(float) 75 | fit_data = group[['density_index', 'clutter_index', 'structure_index', 'success']] 76 | # do gmm clustering for group based on density_index, clutter_index, structure_index, success 77 | models = [GMM(n, covariance_type='full', random_state=0).fit(fit_data) 78 | for n in n_components] 79 | ax.plot(n_components, [m.bic(fit_data) for m in models], label='BIC') 80 | ax.plot(n_components, [m.aic(fit_data) for m in models], label='AIC') 81 | ax.legend(loc='best') 82 | ax.set_xlabel('n_components') 83 | ax.set_title(f'{front_text[sort_front_list.index(frontend)]} + {back_text[sort_back_list.index(backend)]}') 84 | 85 | # This is not a great way of cluster since it split the success and failure into different clusters, make it hard to calculate success rate of each cluster 86 | if (draw_individual): 87 | for i, ((backend, frontend), group) in enumerate(grouped_data): 88 | fig_n_components = plt.figure(figsize=(16, 9), dpi=100) 89 | fig_n_components.clear() 90 | 91 | ax = fig_n_components.add_subplot(1,2,1, projection='3d') 92 | 93 | n_components = 14 94 | 95 | group['success'] = group['success'].astype(float) 96 | fit_data = group[['density_index', 'clutter_index', 'structure_index', 'success']] 97 | # do gmm clustering for group based on density_index, clutter_index, structure_index, success 98 | model = GMM(n_components=n_components, covariance_type='full', random_state=0).fit(fit_data) 99 | labels = model.predict(fit_data) 100 | # get centers of each cluster 101 | centers = model.means_ 102 | group['labels'] = labels 103 | group['success'] = group['success'].astype(bool) 104 | # get success rate for each label 105 | success_rate = [] 106 | for label in range(n_components): 107 | success_rate.append(len(group[(group['labels'] == label) & (group['success'] == True)]) / len(group[group['labels'] == label])) 108 | print("center: ", centers[label], " success rate: ", success_rate[label], " number of points: ", len(group[group['labels'] == label])) 109 | 110 | 111 | 112 | # only draw the failture clusters 113 | group_fail = group[group['success'] == False] 114 | ax.scatter(group_fail['density_index'], group_fail['clutter_index'], group_fail['structure_index'],c = group_fail['labels'], s=5)#color_dict[env_type]) 115 | 116 | ax.legend(loc='best') 117 | ax.set_title(f'{front_text[sort_front_list.index(frontend)]} + {back_text[sort_back_list.index(backend)]}') 118 | ax.set_xlabel('Density Index') 119 | ax.set_ylabel('Clutter Index') 120 | ax.set_zlabel('Structure Index') 121 | 122 | ax = fig_n_components.add_subplot(1,2,2, projection='3d') 123 | ax.scatter(fit_data['density_index'], fit_data['clutter_index'], fit_data['structure_index'], c=group['success'], s=5, cmap='RdYlGn') 124 | ax.legend(loc='best') 125 | ax.set_xlabel('Density Index') 126 | ax.set_ylabel('Clutter Index') 127 | ax.set_zlabel('Structure Index') 128 | 129 | #connect the rotation of the two subplots 130 | 131 | plt.show() 132 | 133 | 134 | # exit() 135 | 136 | # Initialize a new figure to hold the 3D scatter plots with natural log scale and success rates 137 | # craate figure 1920 x 1080 138 | fig = plt.figure(figsize=(19.2, 10.8), dpi=100) 139 | def init(): 140 | fig = plt.gcf() 141 | # Iterate through each group and plot the 3D scatter plot with log scale and success rates 142 | for i, ((backend, frontend), group) in enumerate(grouped_data): 143 | # treat success column as float 144 | group['success'] = group['success'].astype(float) 145 | # do gmm clustering for group based on density_index, clutter_index, structure_index, success 146 | gmm = GMM(n_components=5, covariance_type='full', random_state=0).fit(group[['density_index', 'clutter_index', 'structure_index', 'success']]) 147 | labels = gmm.predict(group[['density_index', 'clutter_index', 'structure_index', 'success']]) 148 | # plot the 3D scatter plo 149 | 150 | ax = fig.add_subplot(2, 5, i+1, projection='3d') 151 | # find sort_front_list index 152 | ax.set_title(f'{front_text[sort_front_list.index(frontend)]} + {back_text[sort_back_list.index(backend)]}') 153 | ax.set_xlabel('Density Index') 154 | ax.set_ylabel('Clutter Index') 155 | ax.set_zlabel('Structure Index') 156 | 157 | # Calculate the success rate for this group 158 | total_trials = len(group) 159 | successful_trials = len(group[group['success'] == True]) 160 | success_rate = (successful_trials / total_trials) * 100 if total_trials > 0 else 0 161 | front_end_success = len(group[group['success_detail'] >= 2]) 162 | front_end_success_rate = (front_end_success / total_trials) * 100 if total_trials > 0 else 0 163 | 164 | # Add success rate as text in the plot 165 | ax.text2D(0.05, 0.95, f'Success:{success_rate:.1f}%, Frontend:{front_end_success_rate:.1f}%', transform=ax.transAxes, color='r') 166 | 167 | # Apply natural log transformation to the indices, adding a small constant to avoid log(0) 168 | small_constant = 0 169 | 170 | # Plot points where success is True 171 | success_group = group[group['success'] == True] 172 | 173 | # now group by env_type and plot success 174 | success_group_env_type = success_group.groupby(['env_type']) 175 | 176 | # plot success for each env_type 177 | for j, (env_type, group_env_type) in enumerate(success_group_env_type): 178 | # print("env_type: ", env_type) 179 | #print number of rows 180 | # print("number of rows: ", len(group_env_type)) 181 | 182 | ax.scatter(group_env_type['density_index'], 183 | group_env_type['clutter_index'], #having their own color for each env_type is not very clear 184 | group_env_type['structure_index'], label=env_type, s=5, c = 'g')#color_dict[env_type]) 185 | 186 | # Plot points where success is False 187 | failure_group = group[group['success'] == False] 188 | 189 | # now group by env_type and plot failure 190 | failure_group_env_type = failure_group.groupby(['env_type']) 191 | 192 | for j, (env_type, group_env_type) in enumerate(failure_group_env_type): 193 | # print("env_type: ", env_type) 194 | #print number of rows 195 | # print("number of rows: ", len(group_env_type)) 196 | 197 | ax.scatter(group_env_type['density_index'], 198 | group_env_type['clutter_index'], 199 | group_env_type['structure_index'], label=env_type, s=5, c = 'r')#failed_color_dict[env_type], marker='x') 200 | # plt.show() 201 | return fig, 202 | 203 | def animate(i): 204 | #iterate through subplots and change the view 205 | for ax in fig.axes: 206 | ax.view_init(elev=20., azim=-100+i) 207 | return fig, 208 | # ax.legend() 209 | #set legend to top leftleft corner 210 | # ax.legend(loc='upper left', bbox_to_anchor=(0, 1.2)) 211 | # ax.set_xlim([-6, 0]) 212 | if (animate): 213 | anim = animation.FuncAnimation(fig, animate, init_func=init, 214 | frames=60, interval=10, blit=True) 215 | # Save 216 | anim.save(save_path+'individual_animation.mp4', fps=20, extra_args=['-vcodec', 'libx264'],bitrate=-1, dpi=100) 217 | else: 218 | init() 219 | plt.savefig(save_path+'individual.png') 220 | # exit() 221 | #show points from different files in different colors 222 | # xlims = ax4.get_xlim() 223 | # ylims = ax4.get_ylim() 224 | # zlims = ax4.get_zlim() 225 | 226 | # plt.savefig(save_path+'fig2.png') -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_3_shortest_time.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import seaborn as sns 4 | from mpl_toolkits.mplot3d import Axes3D 5 | import numpy as np 6 | 7 | 8 | # Load the entire CSV file into a DataFrame 9 | directory = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/ECI_single_line_09-09_02-03-44_big.csv' 10 | file_path = directory 11 | df = pd.read_csv(file_path) 12 | 13 | # Filter the DataFrame to keep only the rows where 'success' is True 14 | df_success = df[df['success'] == True] 15 | 16 | # Count the number of unique values in the 'success_detail' column 17 | unique_success_detail_values = df_success['success_detail'].nunique() 18 | print(f"Unique values in success_detail: {unique_success_detail_values}") 19 | 20 | # Create a new DataFrame to hold the rows with the shortest 'traj_time(s)' for each combination of 21 | # 'density_index', 'clutter_index', and 'structure_index' and 'planner_frontend' 22 | grouped = df_success.groupby(['density_index', 'clutter_index', 'structure_index']) 23 | df_min_time = grouped.apply(lambda x: x[x['traj_time(s)'] == x['traj_time(s)'].min()]) 24 | 25 | # Create the 3D scatter plot 26 | fig = plt.figure(figsize=(12, 8)) 27 | ax = fig.add_subplot(111, projection='3d') 28 | 29 | # Color map for different 'planner_frontend' 30 | colors = sns.color_palette('Set2', n_colors=df_min_time['planner_frontend'].nunique()) 31 | color_map = dict(zip(df_min_time['planner_frontend'].unique(), colors)) 32 | epsilon = 1e-6 33 | df_min_time['density_index'] = np.log(df_min_time['density_index'] + epsilon) 34 | df_min_time['clutter_index'] = np.log(df_min_time['clutter_index'] + epsilon) 35 | df_min_time['structure_index'] = np.log(df_min_time['structure_index'] + epsilon) 36 | for planner, color in color_map.items(): 37 | subset = df_min_time[df_min_time['planner_frontend'] == planner] 38 | print(subset.count()) 39 | ax.scatter(subset['density_index'], subset['clutter_index'], subset['structure_index'], 40 | c=[color]*len(subset), label=planner, s=10, alpha=0.6) 41 | 42 | # Labels and legend 43 | ax.set_xlabel('Density Index (Log Scale)') 44 | ax.set_ylabel('Clutter Index (Log Scale)') 45 | ax.set_zlabel('Structure Index (Log Scale)') 46 | ax.legend(title='Planner Frontend', bbox_to_anchor=(1.05, 1), loc='upper left') 47 | ax.set_title('Shortest Trajectory Time by Planner Frontend') 48 | 49 | plt.tight_layout() 50 | plt.show() -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_4_maze_diffculty_vs_jerk.py: -------------------------------------------------------------------------------- 1 | # Import the necessary plotting libraries again 2 | import matplotlib.pyplot as plt 3 | import seaborn as sns 4 | 5 | # Import the necessary libraries again 6 | import pandas as pd 7 | 8 | directory = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/ECI_Maze_single_line_09-09_14-50-20.csv' 9 | 10 | # Define the file path again 11 | file_path = directory 12 | 13 | # Re-load the original CSV file 14 | df_original = pd.read_csv(file_path) 15 | 16 | # Extract the part of the string that appears after 'maze' and before the first '_' in the 'map_filename' column 17 | df_original['maze_description'] = df_original['map_filename'].str.extract('maze(\d+)_')[0] 18 | 19 | # Display the first few rows to verify the new column 20 | df_original.head() 21 | 22 | # Define the columns to describe again 23 | columns_to_describe = ['compute_time_frontend(ms)', 'compute_time_backend(ms)', 'traj_time(s)', 'traj_length(m)', 'traj_jerk'] 24 | df_original = df_original.sort_values(by=['maze_description']) 25 | # Group by 'maze_description', 'planner_frontend', and 'planner_backend' and compute descriptive statistics 26 | grouped_descriptive_stats = df_original.groupby(['maze_description', 'planner_frontend', 'planner_backend'])[columns_to_describe].describe() 27 | 28 | # Show the first few rows to verify 29 | grouped_descriptive_stats.head() 30 | 31 | # Initialize a larger matplotlib figure for the single boxplot with counts 32 | plt.figure(figsize=(20, 10)) 33 | 34 | # Create the boxplot 35 | # df_melted = pd.melt(df_original, id_vars=['planner_frontend', 'planner_backend'], value_vars=['planner_frontend', 'planner_backend']) 36 | 37 | ax = sns.boxplot(data=df_original, x='maze_description', y='traj_jerk', hue=df_original[['planner_frontend', 'planner_backend']].apply(tuple, axis=1), showfliers=False) 38 | 39 | # Add the counts above the boxplots 40 | for i, maze_desc in enumerate(df_original['maze_description'].unique()): 41 | # Filter data for the current maze_description 42 | filtered_data = df_original[df_original['maze_description'] == maze_desc] 43 | 44 | # Calculate counts for each planner_frontend 45 | counts = filtered_data['planner_frontend'].value_counts().sort_index() 46 | 47 | # Add counts as text above the boxplots 48 | for j, (planner_frontend, count) in enumerate(counts.items()): 49 | ax.text(i + j * 0.2 - 0.3, ax.get_ylim()[1] * 0.8, f'N={count}', ha='center') 50 | 51 | # Set title and legend 52 | plt.title('Boxplot of Trajectory Jerk Grouped by Maze Description and Planner Frontend (Without Outliers)') 53 | plt.legend(title='Planner Frontend', bbox_to_anchor=(1.05, 1), loc='upper left') 54 | plt.xticks(rotation=45) 55 | 56 | # Adjust layout to prevent overlap 57 | plt.tight_layout() 58 | plt.show() -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_5_plot_control.py: -------------------------------------------------------------------------------- 1 | #import libraries to load pickle files to numpy 2 | import pickle 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | import matplotlib as mpl 6 | path0 = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.14.23/Effort009-14_15-44-48.pkl' 7 | path1 = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.14.23/Effort109-14_15-44-48.pkl' 8 | #load pickle file 9 | data0 = pickle.load(open(path0, 'rb')) 10 | data1 = pickle.load(open(path1, 'rb')) 11 | 12 | print(data0) 13 | print(data1) 14 | 15 | #plot the data 16 | plt.figure(figsize=(12, 8)) 17 | plt.plot(data0,label='Effort0') 18 | plt.plot(data1,label='Effort1') 19 | plt.xlabel('iteration') 20 | plt.ylabel('effort') 21 | plt.legend() 22 | plt.show() -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_6_2d_eci.py: -------------------------------------------------------------------------------- 1 | 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import seaborn as sns 5 | import numpy as np 6 | 7 | # Load the CSV file into a DataFrame 8 | file_path = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_single_line_09-13_11-30-25_half_real.csv' 9 | df = pd.read_csv(file_path) 10 | 11 | eps = 1e-6 12 | # take log scale of 'density_index', 'clutter_index', 'structure_index' 13 | df['density_index'] = np.log(df['density_index'])#.apply(lambda x: np.log(x + eps)) 14 | df['clutter_index'] = np.log(df['clutter_index'])#.apply(lambda x: np.log(x + eps)) 15 | df['structure_index'] = np.log(df['structure_index'])#.apply(lambda x: np.log(x + eps)) 16 | 17 | 18 | # Group the data by 'map_seed' and calculate the number of successful runs 19 | grouped_map_seed_df = df.groupby(['map_seed', 'density_index', 'clutter_index', 'structure_index']).agg({ 20 | 'success': 'sum' 21 | }).reset_index() 22 | 23 | # Create 3 scatter plots for each pair of indices with the number of successful runs as the color 24 | fig, axs = plt.subplots(1, 3, figsize=(18, 6)) 25 | 26 | cmap = plt.cm.RdYlGn 27 | # Density Index vs Clutter Index 28 | sns.scatterplot(data=grouped_map_seed_df, 29 | x='density_index', 30 | y='clutter_index', 31 | hue='success', 32 | palette=cmap, 33 | ax=axs[0]) 34 | axs[0].set_title('Density vs Clutter') 35 | axs[0].set_xlabel('Density Index') 36 | axs[0].set_ylabel('Clutter Index') 37 | 38 | # Density Index vs Structure Index 39 | sns.scatterplot(data=grouped_map_seed_df, 40 | x='density_index', 41 | y='structure_index', 42 | hue='success', 43 | palette=cmap, 44 | ax=axs[1]) 45 | axs[1].set_title('Density vs Structure') 46 | axs[1].set_xlabel('Density Index') 47 | axs[1].set_ylabel('Structure Index') 48 | 49 | # Clutter Index vs Structure Index 50 | sns.scatterplot(data=grouped_map_seed_df, 51 | x='clutter_index', 52 | y='structure_index', 53 | hue='success', 54 | palette=cmap, 55 | ax=axs[2]) 56 | axs[2].set_title('Clutter vs Structure') 57 | axs[2].set_xlabel('Clutter Index') 58 | axs[2].set_ylabel('Structure Index') 59 | 60 | # Add a unified legend 61 | # fig.legend(title='Number of Successful Runs', loc='upper right') 62 | plt.tight_layout() 63 | plt.savefig('fig6.png') -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_7_real_data_two_clusters.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from sklearn.mixture import GaussianMixture 4 | import matplotlib.pyplot as plt 5 | import matplotlib 6 | from mpl_toolkits.mplot3d import Axes3D 7 | import seaborn as sns 8 | fm = matplotlib.font_manager 9 | fm._get_fontconfig_fonts.cache_clear() 10 | font = {'family' : 'sans-serif', 11 | 'weight' : 'normal', 12 | 'size' : 13} 13 | matplotlib.rc('font', **font) 14 | # Step 1: Read the CSV file into a Pandas DataFrame 15 | file_path_real = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 16 | 17 | df_real = pd.read_csv(file_path_real) 18 | 19 | # Step 2: Filter out data based on specific conditions 20 | filtered_df_jps_success = df_real[(df_real['planner_frontend'].str.contains('JPS', case=False)) & (df_real['success_detail'] < 2)] 21 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 22 | total_real_maps = len(df_real['map_seed'].unique()) 23 | df_real_filtered = df_real[~df_real['map_seed'].isin(map_indices_jps_fail)] 24 | 25 | # Step 3: Extract environment indices and perform clustering with Gaussian Mixture Model (GMM) 26 | env_indices = df_real_filtered[['density_index', 'clutter_index', 'structure_index']].values 27 | gmm = GaussianMixture(n_components=3, random_state=0) 28 | gmm_labels = gmm.fit_predict(env_indices) 29 | df_real_filtered['env_cluster_label'] = gmm_labels 30 | 31 | #print label names 32 | print(df_real_filtered['env_cluster_label'].unique()) 33 | 34 | # Step 4: Combine two of the clusters into one and update the labels 35 | combined_labels = ['Vertical' if label == 2 else 'Other' for label in df_real_filtered['env_cluster_label']] 36 | df_real_filtered['combined_cluster_label'] = combined_labels 37 | 38 | #count the number of each label 39 | print(df_real_filtered['combined_cluster_label'].value_counts()) 40 | 41 | # # Step 5: Create a 3D scatter plot with new combined cluster labels 42 | # fig = plt.figure(figsize=(12, 8)) 43 | # ax = fig.add_subplot(111, projection='3d') 44 | # for label in ['Vertical', 'Other']: 45 | # cluster_data = env_indices[[l == label for l in combined_labels]] 46 | # ax.scatter(cluster_data[:, 0], cluster_data[:, 1], cluster_data[:, 2], label=label, s=20, c= ['black' if label == 'Vertical' else 'grey']) 47 | 48 | # # Add labels and title 49 | # ax.set_xlabel('Density Index') 50 | # ax.set_ylabel('Clutter Index') 51 | # ax.set_zlabel('Structure Index') 52 | # ax.set_title('3D Scatter Plot of Environment Indices with Combined Clusters and Labels') 53 | # ax.legend(title='Environment Clusters') 54 | 55 | 56 | df_real_filtered['combined_cluster_label'] = combined_labels 57 | 58 | # Group the filtered data by 'combined_cluster_label', 'planner_frontend', and 'planner_backend' 59 | # Then calculate the metrics: success rate based on the 'success' column, avg trajectory time, and avg traj jerk 60 | grouped_by_combined_cluster = df_real_filtered.groupby(['combined_cluster_label', 'planner_frontend', 'planner_backend']).agg( 61 | total_count=('success', 'count'), 62 | success_count=('success', 'sum'), # summing the True values in the 'success' column 63 | avg_traj_time=('traj_time(s)', 'mean'), 64 | avg_traj_jerk=('traj_jerk', 'mean') 65 | ) 66 | 67 | grouped_by_cluster_detail = df_real_filtered.groupby(['combined_cluster_label', 'planner_frontend', 'planner_backend']) 68 | #draw boxplot of traj_time and traj_jerk each group 69 | # fig, axs = plt.subplots(5, 2, figsize=(18, 30)) 70 | # for i, ((cluster, frontend, backend), group) in enumerate(grouped_by_cluster_detail): 71 | # ax = axs[i//2, i%2] 72 | # ax.set_title(f'Cluster: {cluster}, Frontend: {frontend}, Backend: {backend}') 73 | # ax.set_xlabel('Success') 74 | # ax.set_ylabel('Trajectory Time (s)') 75 | # sns.boxplot(x='success', y='traj_time(s)', data=group, ax=ax) 76 | # exit() 77 | 78 | # Convert success_count to integer (it's currently boolean) 79 | grouped_by_combined_cluster['success_count'] = grouped_by_combined_cluster['success_count'].astype(int) 80 | 81 | # Calculate the success rate based on the 'success' column 82 | grouped_by_combined_cluster['success_rate'] = (grouped_by_combined_cluster['success_count'] / grouped_by_combined_cluster['total_count']) * 100 83 | 84 | # Reset index for better readability 85 | grouped_by_combined_cluster.reset_index(inplace=True) 86 | 87 | grouped_by_combined_cluster.to_csv('/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.13.23/ECI_icra_real/cluster_result.csv') 88 | 89 | sample_map_filenames = df_real_filtered.groupby('combined_cluster_label').apply(lambda x: x.sample(n=6, random_state=10086)['map_filename'].tolist()) 90 | # sample_map_filenames2 = df_real_filtered.groupby('combined_cluster_label').apply(lambda x: x.sample(n=6, random_state=69)['map_filename'].tolist()) 91 | #print the samples 92 | 93 | 94 | 95 | # Extract the corresponding environment indices for these samples 96 | # make sure combined map_filename, density_index, clutter_index, structure_index are unique 97 | unique_maps = df_real_filtered[['map_filename', 'density_index', 'clutter_index', 'structure_index']].drop_duplicates() 98 | 99 | print(unique_maps.value_counts()) 100 | sample_indices_moderate = unique_maps[unique_maps['map_filename'].isin(sample_map_filenames.Vertical)][['density_index', 'clutter_index', 'structure_index']].values 101 | sample_indices_other = unique_maps[unique_maps['map_filename'].isin(sample_map_filenames.Other)][['density_index', 'clutter_index', 'structure_index']].values 102 | 103 | print("vertical #################") 104 | for i,ele in enumerate(sample_map_filenames.Vertical): 105 | print('pcl_viewer -bc 255,255,255 ' + ele) 106 | print(sample_indices_moderate[i,:]) 107 | print("other ########################") 108 | for i,ele in enumerate(sample_map_filenames.Other): 109 | print('pcl_viewer -bc 255,255,255 ' + ele) 110 | print(sample_indices_other[i,:]) 111 | 112 | # # plt.show() 113 | 114 | # Create a new 3D scatter plot with updated markers and colors for samples 115 | fig = plt.figure(figsize=(12, 8)) 116 | ax = fig.add_subplot(111, projection='3d') 117 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) 118 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) 119 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0)) 120 | 121 | 122 | # Scatter points for the clusters 123 | for label, marker in zip(['Vertical', 'Other'], ['o', 'o']): 124 | cluster_data = env_indices[[l == label for l in combined_labels]] 125 | ax.scatter(cluster_data[:, 0], cluster_data[:, 1], cluster_data[:, 2], label=label, marker=marker, s=5,c= ['black' if label == 'Vertical' else 'grey'], alpha = 0.2) 126 | 127 | # Mark the samples from each cluster with different colored crosses and larger size 128 | list_of_idx_vertical = [0,1, 3, 5] 129 | # list_of_idx_other = [0,1,2,3,4,5] 130 | # list_of_idx_other = [b,g,r,c,m,y] 131 | list_of_idx_other = [1,2,4,5] 132 | 133 | 134 | ax.scatter(sample_indices_moderate[list_of_idx_vertical, 0], sample_indices_moderate[list_of_idx_vertical, 1], sample_indices_moderate[list_of_idx_vertical, 2], c='red', marker='x', s=200, alpha = 1,label='Samples from Vertical',linewidths=3) 135 | ax.scatter(sample_indices_other[list_of_idx_other, 0], sample_indices_other[list_of_idx_other, 1], sample_indices_other[list_of_idx_other, 2], c='limegreen', marker='x', s=200,alpha = 1, label='Samples from Other', linewidths=3) 136 | 137 | # Add labels and title 138 | #set label with large font x 139 | font_aa = 20 140 | ax.set_xlabel('Density Index',fontsize=font_aa, labelpad=10) 141 | ax.set_ylabel('Clutter Index',fontsize=font_aa, labelpad=10) 142 | ax.set_zlabel('Structure Index',fontsize=font_aa, labelpad=10) 143 | # ax.set_title('3D Scatter Plot of Environment Indices with Updated Samples Marked') 144 | 145 | # Add legend to indicate cluster names and samples 146 | # ax.legend(title='Environment Clusters and Samples') 147 | 148 | plt.savefig('fig7.png') 149 | 150 | 151 | -------------------------------------------------------------------------------- /scripts/eval_scripts/csv_plot_8_obstacle_data_relative_performance.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import seaborn as sns 4 | import numpy as np 5 | import random 6 | 7 | # Load the data 8 | path_prefix = '/home/yifei/Documents/kr_ilqr_optimizer_icra/' 9 | save_path = path_prefix +'scripts/res/video_figures/' 10 | # Read the CSV file into a DataFrame 11 | file_path_real = path_prefix + 'scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 12 | file_path_maze = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_09-13_15-28-22.csv' 13 | file_path_maze2 = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze_80_09-13_22-36-55.csv' 14 | file_path_strucutre = path_prefix + 'scripts/res/9.13.23/ECI_icra_strcture/ECI_single_line_Obstacles_09-13_19-29-42.csv' 15 | # file_path_real 16 | df_real = pd.read_csv(file_path_maze2) 17 | 18 | # Filtering and initial analysis 19 | filtered_df_jps_success = df_real[(df_real['planner_frontend'].str.contains('JPS', case=False)) & (df_real['success_detail'] < 2)] 20 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 21 | df_real = df_real[~df_real['map_seed'].isin(map_indices_jps_fail)] 22 | 23 | # Remove rows where 'map_seed' is in 'map_indices_jps_fail' 24 | # Total number of unique real maps 25 | total_real_maps = len(df_real['map_seed'].unique()) 26 | print(total_real_maps) 27 | 28 | grouped = df_real.groupby(['planner_frontend', 'planner_backend']) 29 | summary = grouped.size().reset_index(name='total_counts') 30 | success_counts = df_real[df_real['success_detail'] >= 3].groupby(['planner_frontend', 'planner_backend']).size().reset_index(name='success_counts') 31 | result = pd.merge(summary, success_counts, on=['planner_frontend', 'planner_backend'], how='left') 32 | result['success_percentage'] = (result['success_counts'] / result['total_counts']) * 100 33 | 34 | print(result) 35 | 36 | index_stats = df_real[['density_index', 'clutter_index', 'structure_index']].describe().loc[['min', 'max', '50%']] 37 | 38 | # iteratively figure out how to split data into 2^3 regions using three axis 39 | # start with median 40 | # split_D_v = 0.5 41 | # split_C_v = 0.5 42 | # split_S_v = 0.51 43 | # Split values: 0.5, 0.5, 0.51 44 | # region 45 | # hD, lC, lS 1700 46 | # lD, hC, hS 1700 47 | # hD, hC, lS 1160 48 | # lD, lC, lS 1020 49 | # lD, lC, hS 840 50 | # hD, hC, hS 690 51 | # lD, hC, lS 110 52 | # hD, lC, hS 110 53 | # print(split_values) 54 | 55 | for iter in range (1): 56 | # print(f"Split values: {split_D_v}, {split_C_v}, {split_S_v}") 57 | # split_D = index_stats['density_index'].quantile(split_D_v) 58 | # split_C = index_stats['clutter_index'].quantile(split_C_v) 59 | # split_S = index_stats['structure_index'].quantile(split_S_v) 60 | split_D, split_C, split_S = 0.092449, 0.0442482219179718, 0.28364437802936177 61 | print(f"Split values: {split_D}, {split_C}, {split_S}") 62 | # density_median = index_stats.loc['50%', 'density_index'] 63 | # clutter_median = index_stats.loc['50%', 'clutter_index'] 64 | # structure_median = index_stats.loc['50%', 'structure_index'] 65 | 66 | def categorize(row): 67 | density_cat = 'hD' if row['density_index'] > split_D else 'lD' 68 | clutter_cat = 'hC' if row['clutter_index'] > split_C else 'lC' 69 | structure_cat = 'hS' if row['structure_index'] > split_S else 'lS' 70 | return f"{density_cat}, {clutter_cat}, {structure_cat}" 71 | 72 | # Apply corrected categorization 73 | df_real['region'] = df_real.apply(categorize, axis=1) 74 | 75 | # Count maps in each region 76 | region_counts = df_real['region'].value_counts() 77 | # insert a row for each region if it does not exist 78 | for region in ['hD, hC, hS', 'hD, hC, lS', 'hD, lC, hS', 'hD, lC, lS', 'lD, hC, hS', 'lD, hC, lS', 'lD, lC, hS', 'lD, lC, lS']: 79 | if region not in region_counts: 80 | region_counts[region] = 0 81 | # if iter % 100 == 0: 82 | print(region_counts) 83 | break 84 | # subtract the minimum count from max count 85 | # diff_og = np.max(list(region_counts.values())) - np.min(list(region_counts.values())) 86 | # if 87 | # convert to int eval 88 | diff_D = np.array([region_counts['hD, hC, hS'] - region_counts['lD, hC, hS'], 89 | region_counts['hD, lC, hS'] - region_counts['lD, lC, hS'], 90 | region_counts['hD, hC, lS'] - region_counts['lD, hC, lS'], 91 | region_counts['hD, lC, lS'] - region_counts['lD, lC, lS']]) 92 | diff_C = np.array([region_counts['hD, hC, hS'] - region_counts['hD, lC, hS'], 93 | region_counts['lD, hC, hS'] - region_counts['lD, lC, hS'], 94 | region_counts['hD, hC, lS'] - region_counts['hD, lC, lS'], 95 | region_counts['lD, hC, lS'] - region_counts['lD, lC, lS']]) 96 | diff_S = np.array([region_counts['hD, hC, hS'] - region_counts['hD, hC, lS'], 97 | region_counts['lD, hC, hS'] - region_counts['lD, hC, lS'], 98 | region_counts['hD, lC, hS'] - region_counts['hD, lC, lS'], 99 | region_counts['lD, lC, hS'] - region_counts['lD, lC, lS']]) 100 | # print(diff_D) 101 | # print(diff_C) 102 | # print(diff_S) 103 | D_sum = np.max(np.abs(diff_D)) 104 | C_sum = np.max(np.abs(diff_C)) 105 | S_sum = np.max(np.abs(diff_S)) 106 | D_sum_arg_max = np.argmax(np.abs(diff_D)) 107 | C_sum_arg_max = np.argmax(np.abs(diff_C)) 108 | S_sum_arg_max = np.argmax(np.abs(diff_S)) 109 | D_sum_sign = np.sign(diff_D[D_sum_arg_max]) 110 | C_sum_sign = np.sign(diff_C[C_sum_arg_max]) 111 | S_sum_sign = np.sign(diff_S[S_sum_arg_max]) 112 | sign_together = np.array([D_sum_sign, C_sum_sign, S_sum_sign], dtype=int) 113 | # max_diff = np.max([D_sum, C_sum, S_sum]) 114 | #random 0, 1, 2 115 | 116 | argmax_diff = random.randint(0, 2) 117 | if argmax_diff == 0: 118 | split_D_v += 0.01 * D_sum_sign 119 | # print(f"Split D: {split_D_v}") 120 | elif argmax_diff == 1: 121 | split_C_v += 0.01 * C_sum_sign 122 | # print(f"Split C: {split_C_v}") 123 | else: 124 | split_S_v += 0.01 * S_sum_sign 125 | print(f"Split S: {split_S_v}") 126 | 127 | 128 | 129 | 130 | 131 | 132 | # if region_counts['hD, hC, hS'] < region_counts['hD, lC, hS'] \ 133 | # and region_counts['lD, hC, hS'] < region_counts['lD, lC, hS'] \ 134 | # and region_counts['hD, hC, lS'] < region_counts['hD, lC, lS'] \ 135 | # and region_counts['lD, hC, lS'] < region_counts['lD, lC, lS']: 136 | # # print(f"Split values: {split_D}, {split_C}, {split_S}") 137 | # split_C_v -= 0.05 138 | # elif region_counts['hD, hC, hS'] > region_counts['hD, lC, hS'] \ 139 | # and region_counts['lD, hC, hS'] > region_counts['lD, lC, hS'] \ 140 | # and region_counts['hD, hC, lS'] > region_counts['hD, lC, lS'] \ 141 | # and region_counts['lD, hC, lS'] > region_counts['lD, lC, lS']: 142 | # # print(f"Split values: {split_D}, {split_C}, {split_S}") 143 | # split_C_v += 0.05 144 | # else: 145 | # print("C balanced") 146 | 147 | # if region_counts['hD, hC, hS'] < region_counts['hD, hC, lS'] \ 148 | # and region_counts['lD, hC, hS'] < region_counts['lD, hC, lS'] \ 149 | # and region_counts['hD, lC, hS'] < region_counts['hD, lC, lS'] \ 150 | # and region_counts['lD, lC, hS'] < region_counts['lD, lC, lS']: 151 | # # print(f"Split values: {split_D}, {split_C}, {split_S}") 152 | # split_S_v -= 0.05 153 | # elif region_counts['hD, hC, hS'] > region_counts['hD, hC, lS'] \ 154 | # and region_counts['lD, hC, hS'] > region_counts['lD, hC, lS'] \ 155 | # and region_counts['hD, lC, hS'] > region_counts['hD, lC, lS'] \ 156 | # and region_counts['lD, lC, hS'] > region_counts['lD, lC, lS']: 157 | # # print(f"Split values: {split_D}, {split_C}, {split_S}") 158 | # split_S_v += 0.05 159 | # else: 160 | # print("S balanced") 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | # if the region is not balanced, adjust the split values 171 | # if df_real 172 | 173 | # Group by region, planner_frontend, and planner_backend, then calculate success rates within each region 174 | grouped_region = df_real.groupby(['region', 'planner_frontend', 'planner_backend']) 175 | success_region = df_real[df_real['success_detail'] >= 3].groupby(['region', 'planner_frontend', 'planner_backend']).size().reset_index(name='success_counts') 176 | total_region = grouped_region.size().reset_index(name='total_counts') 177 | success_rate_region = pd.merge(total_region, success_region, on=['region', 'planner_frontend', 'planner_backend'], how='left') 178 | success_rate_region['success_percentage'] = (success_rate_region['success_counts'] / success_rate_region['total_counts']) * 100 179 | 180 | # Pivot the table for better visibility 181 | pivot_table_region = success_rate_region.pivot_table(index=['planner_frontend', 'planner_backend'], columns='region', values='success_percentage', fill_value=0) 182 | # pivot_table_region.columns = [ 183 | # "High Density, High Clutter, High Structure", 184 | # "High Density, High Clutter, Low Structure", 185 | # "High Density, Low Clutter, High Structure", 186 | # "High Density, Low Clutter, Low Structure", 187 | # "Low Density, High Clutter, High Structure", 188 | # "Low Density, High Clutter, Low Structure", 189 | # "Low Density, Low Clutter, High Structure", 190 | # "Low Density, Low Clutter, Low Structure" 191 | # ] 192 | print(pivot_table_region) 193 | # Prepare data by grouping by region first and then by planner combinations for the plot 194 | plot_data_region_first = success_rate_region.copy() 195 | plot_data_region_first['Planner'] = plot_data_region_first['planner_frontend'] + " + " + plot_data_region_first['planner_backend'] 196 | plot_data_region_first = plot_data_region_first.pivot(index='region', columns='Planner', values='success_percentage') 197 | 198 | # Adjusting the plot to start the y-axis from the smallest value to the largest value 199 | # plt.figure(figsize=(16, 10)) 200 | # bar_plot = plot_data_region_first.plot(kind='bar', colormap='tab20') 201 | # plt.title('Success Rate of Planners Across Different Regions Grouped by Map Characteristics') 202 | # plt.ylabel('Success Rate (%)') 203 | # plt.xlabel('Map Characteristics') 204 | # plt.xticks(rotation=45) 205 | 206 | plot_data = success_rate_region.copy() 207 | plot_data['Planner'] = plot_data['planner_frontend'] + " + " + plot_data['planner_backend'] 208 | plot_data = plot_data.pivot(index='Planner', columns='region', values='success_percentage') 209 | 210 | min_success_rate = plot_data.min().min() 211 | max_success_rate = plot_data.max().max() 212 | 213 | # unique_maps_per_region = df_real.groupby('region')['map_seed'].nunique() 214 | 215 | unique_maps_per_region = df_real.groupby('region')['map_seed'].nunique().to_dict() 216 | 217 | print(unique_maps_per_region) 218 | x_labels_with_unique_maps = [f"{region} (n={unique_maps_per_region[region]})" for region in plot_data_region_first.index] 219 | 220 | # Regenerate the plot with updated labels 221 | # plt.figure(figsize=(16, 10)) 222 | bar_plot = plot_data_region_first.plot(kind='bar', colormap='tab20') 223 | plt.title('Success Rate of Planners Across Different Regions Grouped by Map Characteristics') 224 | plt.ylabel('Success Rate (%)') 225 | plt.xlabel('Map Characteristics') 226 | plt.xticks(ticks=range(len(x_labels_with_unique_maps)), labels=x_labels_with_unique_maps, rotation=45) 227 | plt.ylim(min_success_rate, max_success_rate) # Ensure y-axis limits are set correctly 228 | plt.legend(title='Planner Combinations', bbox_to_anchor=(1.05, 1), loc='upper left') 229 | plt.tight_layout() 230 | 231 | plt.show() 232 | exit() 233 | # Group by frontend and backend planners, then calculate total counts and success rates 234 | grouped = df_real.groupby(['planner_frontend', 'planner_backend']) 235 | summary = grouped.size().reset_index(name='total_counts') 236 | success_counts = df_real[df_real['success_detail'] >= 2].groupby(['planner_frontend', 'planner_backend']).size().reset_index(name='success_counts') 237 | result = pd.merge(summary, success_counts, on=['planner_frontend', 'planner_backend'], how='left') 238 | result['success_percentage'] = (result['success_counts'] / result['total_counts']) * 100 239 | 240 | map_indices_jps_fail, total_real_maps, result 241 | 242 | 243 | # Group by frontend and backend planners, then calculate total counts and success rates 244 | grouped = df_real.groupby(['planner_frontend', 'planner_backend']) 245 | summary = grouped.size().reset_index(name='total_counts') 246 | success_counts = df_real[df_real['success_detail'] >= 2].groupby(['planner_frontend', 'planner_backend']).size().reset_index(name='success_counts') 247 | result = pd.merge(summary, success_counts, on=['planner_frontend', 'planner_backend'], how='left') 248 | result['success_percentage'] = (result['success_counts'] / result['total_counts']) * 100 249 | 250 | # Split data based on density, clutter, and structure indices 251 | density_median = df_real['density_index'].median() 252 | clutter_median = df_real['clutter_index'].median() 253 | structure_median = df_real['structure_index'].median() 254 | 255 | def categorize(row): 256 | density_cat = 'high' if row['density_index'] > density_median else 'low' 257 | clutter_cat = 'high' if row['clutter_index'] > clutter_median else 'low' 258 | structure_cat = 'high' if row['structure_index'] > structure_median else 'low' 259 | return f"{density_cat}-{clutter_cat}-{structure_cat}" 260 | 261 | df_real['region'] = df_real.apply(categorize, axis=1) 262 | 263 | # Calculate success rates by region and planner 264 | success_rate_region = df_real.groupby(['region', 'planner_frontend', 'planner_backend']).agg( 265 | total_counts=('map_seed', 'size'), 266 | success_counts=('success_detail', lambda x: (x >= 2).sum()) 267 | ).reset_index() 268 | success_rate_region['success_percentage'] = (success_rate_region['success_counts'] / success_rate_region['total_counts']) * 100 269 | 270 | # Count unique maps per region 271 | unique_maps_per_region = df_real.groupby('region')['map_seed'].nunique() 272 | 273 | # Prepare data for plotting 274 | plot_data_region_first = success_rate_region.pivot(index='region', columns='Planner', values='success_percentage') 275 | x_labels_with_unique_maps = [f"{region} (n={unique_maps_per_region[region]})" for region in plot_data_region_first.index] 276 | plt.xticks(ticks=range(len(x_labels_with_unique_maps)), labels=x_labels_with_unique_maps, rotation=45) 277 | 278 | # Plotting the grouped bar plot 279 | plt.figure(figsize=(16, 10)) 280 | plot_data_region_first.plot(kind='bar', colormap='tab20') 281 | plt.title('Success Rate of Planners Across Different Regions Grouped by Map Characteristics') 282 | plt.ylabel('Success Rate (%)') 283 | plt.xlabel('Map Characteristics') 284 | plt.ylim(min(plot_data_region_first.min().min()), max(plot_data_region_first.max().max())) 285 | plt.legend(title='Planner Combinations', bbox_to_anchor=(1.05, 1), loc='upper left') 286 | plt.tight_layout() 287 | plt.show() -------------------------------------------------------------------------------- /scripts/eval_scripts/evaluate_1_traj_load_csv.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | # import yaml 3 | import csv 4 | 5 | import numpy as np 6 | import os 7 | import pandas as pd 8 | 9 | #increase number of pandas prints width 10 | pd.set_option('display.max_columns', 50) 11 | 12 | 13 | directory = '/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res' 14 | #what do we need 15 | #we have 10 methods and 3 environemnt index 16 | 17 | #hanlde files from a the folder if there is no result associated with that file 18 | # step 1: iterate through the folder 19 | for filename in os.listdir(directory): 20 | if "csv" in filename[-3:]: 21 | print(filename) 22 | full_path = os.path.join(directory, filename) 23 | # step 2: check if there is a result file associated with that file 24 | # if os.path.isfile(full_path + '.txt'): 25 | # print("file exists") 26 | # continue 27 | # step 3: if there is no result file, then load the file and run the evaluation 28 | df = pd.read_csv(full_path) 29 | #divide environemnt index in column 'density_index' into bins and based on percentile, divide into three dfs 30 | print(df.describe()) 31 | name_of_col = ['density_index','clutter_index','structure_index'] 32 | index_limits = np.zeros((3,4)) 33 | df_collect = [] 34 | df_name_collect = [] 35 | for i in range(3): 36 | index_name = name_of_col[i] 37 | q33 = df[index_name].quantile(1/3) 38 | q66 = df[index_name].quantile(2/3) 39 | df1 = df[df[index_name] <= q33] 40 | df2 = df[(df[index_name] > q33) & (df[index_name] <= q66)] 41 | df3 = df[df[index_name] > q66] 42 | # print(df1.describe()) 43 | # print(df2.describe()) 44 | # print(df3.describe()) 45 | print(f"{index_name} 0-33%: Min = {df1[index_name].min()}, Max = {df1[index_name].max()}") 46 | print(f"{index_name} 33-66%: Min = {df2[index_name].min()}, Max = {df2[index_name].max()}") 47 | print(f"{index_name} 66-100%: Min = {df3[index_name].min()}, Max = {df3[index_name].max()}") 48 | index_limits[i,0] = df1[index_name].min() 49 | index_limits[i,1] = q33 50 | index_limits[i,2] = q66 51 | index_limits[i,3] = df3[index_name].max() 52 | #now cut the df into 27 dfs based on the index_limits 53 | for i in range(3): 54 | for j in range(3): 55 | for k in range(3): 56 | df_collect.append(df[(df[name_of_col[0]] >= index_limits[0,i]) & (df[name_of_col[0]] <= index_limits[0,i+1]) & 57 | (df[name_of_col[1]] >= index_limits[1,j]) & (df[name_of_col[1]] <= index_limits[1,j+1]) & 58 | (df[name_of_col[2]] >= index_limits[2,k]) & (df[name_of_col[2]] <= index_limits[2,k+1])]) 59 | df_name_collect.append(name_of_col[0]+' '+str(i) + ' '+name_of_col[1]+ ' '+str(j) + ' '+ name_of_col[2] + ' '+str(k)) 60 | #now we have 27 dfs, we can do the evaluation 61 | sig_figs = 2 62 | 63 | # Format the number with the specified number of significant figures 64 | for i in range(27): 65 | print("DF: ", i) 66 | 67 | for col_name in name_of_col: 68 | formatted_min = "{:.{p}g}".format(df_collect[i][col_name].min(), p=sig_figs) 69 | formatted_max = "{:.{p}g}".format(df_collect[i][col_name].max(), p=sig_figs) 70 | 71 | print(df_name_collect[i]) 72 | print("number of data pts", df_collect[i][col_name].count(),"success = ", df_collect[i]["success"].sum()) 73 | # print(df_collect[i].describe()) 74 | 75 | 76 | 77 | 78 | # csv_reader = csv.reader(file) 79 | # for row in csv_reader: 80 | # print(row) 81 | # break 82 | # step 4: save the result file 83 | with open(full_path + '.txt', mode='w') as file: 84 | file.write("hello world") 85 | 86 | 87 | -------------------------------------------------------------------------------- /scripts/eval_scripts/evaluate_2_traj_count_success.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | pd.set_option('display.max_columns', 50) 3 | 4 | ##Save prompt here! 5 | 6 | # Step 1: Load the data from the provided CSV file 7 | # Re-load the data from the re-uploaded CSV file 8 | 9 | path_prefix = '/home/yifei/Documents/kr_ilqr_optimizer_icra/' 10 | save_path = path_prefix +'scripts/res/video_figures/' 11 | # Read the CSV file into a DataFrame 12 | file_path_real = path_prefix + 'scripts/res/9.13.23/ECI_icra_real/ECI_single_line_Real_09-13_11-30-25.csv' 13 | file_path_maze = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_09-13_15-28-22.csv' 14 | file_path_maze2 = path_prefix + 'scripts/res/9.13.23/ECI_icra_maze/ECI_single_line_maze_80_09-13_22-36-55.csv' 15 | file_path_strucutre = path_prefix + 'scripts/res/9.13.23/ECI_icra_strcture/ECI_single_line_Obstacles_09-13_19-29-42.csv' 16 | 17 | file_path = file_path_real 18 | df = pd.read_csv(file_path) 19 | 20 | filtered_df_jps_success = df[(df['planner_frontend'].str.contains('JPS', case=False)) & (df['success_detail'] < 2)] 21 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 22 | print('JPS failed map', map_indices_jps_fail) 23 | total_real_maps = len(df['map_seed'].unique()) 24 | df = df[~df['map_seed'].isin(map_indices_jps_fail)] 25 | print(df.count()) 26 | # Re-group the data by 'planner_frontend' and 'planner_backend' 27 | grouped_df = df.groupby(['planner_frontend', 'planner_backend']) 28 | 29 | # Initialize an empty DataFrame to store the results 30 | result_df = pd.DataFrame(columns=['count','planner_frontend', 'planner_backend', 31 | 'success_rate', 'collision_rate', 'server failure rate', 32 | 'avg_frontend_time_success', 'avg_poly_time_success','avg_backend_time_success', 33 | 'avg_frontend_time_not_success', 'avg_poly_time_not_success', 'avg_backend_time_not_success', 34 | 'dist to goal(m)']) 35 | 36 | # Iterate through each group to calculate various metrics 37 | for (planner_frontend, planner_backend), group in grouped_df: 38 | total_count = len(group) 39 | success_count = group['success'].sum() 40 | collision_count = group['collision_status'].sum() 41 | count_minus_one = (group['success_detail'] == -1).sum() 42 | 43 | success_rate = (success_count / total_count) * 100 44 | collision_rate = (collision_count / total_count) * 100 45 | server_failure_rate = (count_minus_one / total_count) * 100 46 | 47 | # Average time for successful and unsuccessful trials 48 | avg_frontend_time_success = group[group['success'] == True]['compute_time_frontend(ms)'].mean() 49 | avg_backend_time_success = group[group['success'] == True]['compute_time_backend(ms)'].mean() 50 | avg_poly_time_success = group[group['success'] == True]['compute_time_poly(ms)'].mean() 51 | avg_poly_time_success = group[group['success'] == True]['compute_time_poly(ms)'].mean() 52 | dist_to_goal = group[group['success'] == True]['dist_to_goal(m)'].mean() 53 | 54 | avg_frontend_time_not_success = group[group['success'] == False]['compute_time_frontend(ms)'].mean() 55 | avg_backend_time_not_success = group[group['success'] == False]['compute_time_backend(ms)'].mean() 56 | avg_poly_time_not_success = group[group['success'] == False]['compute_time_poly(ms)'].mean() 57 | 58 | 59 | 60 | result_df = result_df._append({ 61 | 'planner_frontend': planner_frontend, 62 | 'planner_backend': planner_backend, 63 | 'success_rate': success_rate, 64 | 'collision_rate': collision_rate, 65 | 'avg_frontend_time_success': avg_frontend_time_success, 66 | 'avg_poly_time_success': avg_poly_time_success, 67 | 'avg_backend_time_success': avg_backend_time_success, 68 | 'avg_frontend_time_not_success': avg_frontend_time_not_success, 69 | 'avg_poly_time_not_success': avg_poly_time_not_success, 70 | 'avg_backend_time_not_success': avg_backend_time_not_success, 71 | 'server failure rate': server_failure_rate, 72 | 'count': total_count, 73 | 'dist to goal(m)': dist_to_goal 74 | }, ignore_index=True) 75 | 76 | # Sort the result DataFrame 77 | result_df_sorted_with_all_data = result_df.sort_values(by=['planner_frontend', 'planner_backend']) 78 | print(result_df_sorted_with_all_data) 79 | 80 | #save the result_df_sorted_with_all_data to csv 81 | # result_df_sorted_with_all_data.to_csv('/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/ECI_single_line_09-09_02-03-44_big.csv.csv', index=False) 82 | -------------------------------------------------------------------------------- /scripts/eval_scripts/evaluate_3_three_file_combined_tb1.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | # File paths 4 | file_paths = [ 5 | '/path/to/ECI_single_line_Obstacles_09-13_19-29-42.csv', 6 | '/path/to/ECI_single_line_Real_09-13_11-30-25.csv', 7 | '/path/to/ECI_single_line_maze.csv' 8 | ] 9 | 10 | # Initialize dictionaries for filtered data and statistics 11 | filtered_data = {} 12 | grouped_stats = {} 13 | 14 | # Loop through each file to perform data filtering and grouping 15 | for file_path in file_paths: 16 | # Read the CSV file into a DataFrame 17 | df_real = pd.read_csv(file_path) 18 | 19 | # Filter rows based on specific conditions 20 | filtered_df_jps_success = df_real[(df_real['planner_frontend'].str.contains('JPS', case=False)) & (df_real['success_detail'] < 2)] 21 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 22 | df_real = df_real[~df_real['map_seed'].isin(map_indices_jps_fail)] 23 | 24 | # Store the filtered DataFrame 25 | filtered_data[file_path] = df_real 26 | 27 | # Group by 'planner_frontend' and calculate statistics 28 | total_counts = df_real['planner_frontend'].value_counts() 29 | success_counts = df_real[df_real['success_detail'] >= 2]['planner_frontend'].value_counts() 30 | success_percentage = (success_counts / total_counts) * 100 31 | 32 | stats_df = pd.DataFrame({ 33 | 'Total Counts': total_counts, 34 | 'Success Counts': success_counts, 35 | 'Success Percentage': success_percentage 36 | }).reset_index().rename(columns={'index': 'Planner Frontend'}).fillna(0) 37 | 38 | grouped_stats[file_path] = stats_df 39 | 40 | # Combine the 'Success Percentage' columns from all files into a single DataFrame 41 | success_percentage_df = pd.DataFrame() 42 | for file_path, stats_df in grouped_stats.items(): 43 | file_name = file_path.split('/')[-1] 44 | temp_df = stats_df[['Planner Frontend', 'Success Percentage']].set_index('Planner Frontend') 45 | temp_df.columns = [file_name] 46 | if success_percentage_df.empty: 47 | success_percentage_df = temp_df 48 | else: 49 | success_percentage_df = success_percentage_df.merge(temp_df, left_index=True, right_index=True, how='outer') 50 | 51 | # Reorder rows and columns based on specified orders 52 | row_order = ['JPS (geometric)', 'RRT', 'MPL', 'SST', 'dispersion planner'] 53 | column_order = ['ECI_single_line_Real_09-13_11-30-25.csv', 'ECI_single_line_maze.csv', 'ECI_single_line_Obstacles_09-13_19-29-42.csv'] 54 | 55 | success_percentage_df = success_percentage_df.reindex(row_order).fillna(0)[column_order] 56 | 57 | print(success_percentage_df) 58 | 59 | 60 | import pandas as pd 61 | 62 | # File paths 63 | file_paths = [ 64 | '/path/to/ECI_single_line_Obstacles_09-13_19-29-42.csv', 65 | '/path/to/ECI_single_line_Real_09-13_11-30-25.csv', 66 | '/path/to/ECI_single_line_maze.csv' 67 | ] 68 | 69 | # Initialize dictionaries for filtered data and statistics 70 | filtered_data = {} 71 | grouped_stats = {} 72 | grouped_stats_frontend_backend = {} 73 | merged_stats = {} 74 | final_merged_stats_df = pd.DataFrame() 75 | 76 | # Loop through each file to perform data filtering and grouping 77 | for file_path in file_paths: 78 | # Read the CSV file into a DataFrame 79 | df_real = pd.read_csv(file_path) 80 | 81 | # Filter rows based on specific conditions 82 | filtered_df_jps_success = df_real[(df_real['planner_frontend'].str.contains('JPS', case=False)) & (df_real['success_detail'] < 2)] 83 | map_indices_jps_fail = filtered_df_jps_success['map_seed'].unique() 84 | df_real = df_real[~df_real['map_seed'].isin(map_indices_jps_fail)] 85 | 86 | # Store the filtered DataFrame 87 | filtered_data[file_path] = df_real 88 | 89 | # Group by 'planner_frontend' and calculate statistics 90 | total_counts = df_real['planner_frontend'].value_counts() 91 | success_counts = df_real[df_real['success_detail'] >= 2]['planner_frontend'].value_counts() 92 | success_percentage = (success_counts / total_counts) * 100 93 | 94 | # Additional grouping considering both frontend and backend 95 | total_counts_fb = df_real.groupby(['planner_frontend', 'planner_backend']).size().reset_index(name='Total Counts') 96 | success_counts_fb = df_real[df_real['success_detail'] == 3].groupby(['planner_frontend', 'planner_backend']).size().reset_index(name='Success Counts') 97 | stats_df_fb = pd.merge(total_counts_fb, success_counts_fb, on=['planner_frontend', 'planner_backend'], how='left').fillna(0) 98 | stats_df_fb['Success Percentage'] = (stats_df_fb['Success Counts'] / stats_df_fb['Total Counts']) * 100 99 | 100 | # Merge frontend success rates into the new statistics DataFrame 101 | stats_df = pd.DataFrame({ 102 | 'Total Counts': total_counts, 103 | 'Success Counts': success_counts, 104 | 'Success Percentage': success_percentage 105 | }).reset_index().rename(columns={'index': 'Planner Frontend'}).fillna(0) 106 | 107 | frontend_success_rates = stats_df[['Planner Frontend', 'Success Percentage']].rename(columns={'Success Percentage': 'Frontend Success Rate'}) 108 | merged_stats_df = pd.merge(stats_df_fb, frontend_success_rates, left_on='planner_frontend', right_on='Planner Frontend', how='left') 109 | merged_stats_df = merged_stats_df.drop(columns=['Total Counts', 'Success Counts', 'Planner Frontend']) 110 | merged_stats_df['File'] = file_path.split('/')[-1] 111 | 112 | final_merged_stats_df = final_merged_stats_df.append(merged_stats_df, ignore_index=True) 113 | 114 | # Reorder rows and columns based on specified orders 115 | final_columns_order = ['File', 'planner_frontend', 'planner_backend', 'Success Percentage', 'Frontend Success Rate'] 116 | final_merged_stats_df = final_merged_stats_df[final_columns_order] 117 | file_order = [ 118 | '/path/to/ECI_single_line_Real_09-13_11-30-25.csv', 119 | '/path/to/ECI_single_line_maze.csv', 120 | '/path/to/ECI_single_line_Obstacles_09-13_19-29-42.csv' 121 | ] 122 | 123 | # Pivot and reorder the final table 124 | pivot_columns = [] 125 | for file_name in file_order: 126 | file_name_short = file_name.split('/')[-1] 127 | for backend in final_merged_stats_df['planner_backend'].unique(): 128 | pivot_columns.append(f"{file_name_short}_{backend}") 129 | 130 | final_pivoted_stats_df = pd.DataFrame(index=final_merged_stats_df['planner_frontend'].unique(), columns=pivot_columns) 131 | for _, row in final_merged_stats_df.iterrows(): 132 | file_name = row['File'] 133 | frontend = row['planner_frontend'] 134 | backend = row['planner_backend'] 135 | success_percentage = row['Success Percentage'] 136 | frontend_success_rate = row['Frontend Success Rate'] 137 | 138 | column_name = f"{file_name}_{backend}" 139 | final_pivoted_stats_df.at[frontend, column_name] = f"{success_percentage:.1f}% ({frontend_success_rate:.1f}%)" 140 | 141 | final_pivoted_stats_df = final_pivoted_stats_df.fillna('-') 142 | specified_row_order = ['JPS (geometric)', 'RRT', 'MPL', 'SST', 'dispersion planner'] 143 | final_pivoted_stats_df_reordered = final_pivoted_stats_df.reindex(specified_row_order) 144 | 145 | print(final_pivoted_stats_df_reordered) 146 | 147 | 148 | # To output the table in LaTeX format with special styling, we need to identify the maximum percentages for each map 149 | def find_max_indices(df, columns): 150 | max_indices = {} 151 | for col in columns: 152 | try: 153 | max_value = df[col].str.extract(r'(\d+\.\d+)').astype(float).max() 154 | max_indices[col] = df[df[col].str.contains(f"{max_value:.1f}", na=False)].index.tolist() 155 | except: 156 | max_indices[col] = [] 157 | return max_indices 158 | 159 | # Find the maximum percentage indices for each map 160 | max_indices_dict = find_max_indices(final_pivoted_stats_df_reordered, final_pivoted_stats_df_reordered.columns) 161 | 162 | # Generate LaTeX code 163 | latex_lines = final_pivoted_stats_df_reordered.to_latex(index=True, escape=False, na_rep='-').split('\n') 164 | 165 | # Apply special LaTeX formatting: bold for max percentages, thicker lines between different types of maps 166 | formatted_latex_lines = [] 167 | for i, line in enumerate(latex_lines): 168 | for col, max_indices in max_indices_dict.items(): 169 | for idx in max_indices: 170 | if final_pivoted_stats_df_reordered.index[idx] in line: 171 | start_pos = line.find(final_pivoted_stats_df_reordered.at[idx, col]) 172 | if start_pos != -1: 173 | end_pos = start_pos + len(final_pivoted_stats_df_reordered.at[idx, col]) 174 | line = line[:start_pos] + "\\textbf{" + line[start_pos:end_pos] + "}" + line[end_pos:] 175 | formatted_latex_lines.append(line) 176 | 177 | if "Real" in line or "Maze" in line: 178 | formatted_latex_lines.append("\\hline \\hline") 179 | 180 | formatted_latex_str = '\n'.join(formatted_latex_lines) 181 | formatted_latex_str 182 | -------------------------------------------------------------------------------- /scripts/eval_scripts/evaluate_4_other_metrics_tb2.py: -------------------------------------------------------------------------------- 1 | # Merge all the filtered DataFrames across files into a single DataFrame 2 | merged_df_across_files = pd.concat(filtered_data.values(), ignore_index=True) 3 | 4 | # Group by 'planner_frontend' and 'planner_backend', and calculate the mean for 'compute_time_frontend(ms)' and 'compute_time_backend(ms)' 5 | # for cases where 'success' is True or False 6 | stats_success_merged = merged_df_across_files[merged_df_across_files['success'] == True].groupby(['planner_frontend', 'planner_backend'])[['compute_time_frontend(ms)', 'compute_time_backend(ms)']].mean().reset_index() 7 | stats_failure_merged = merged_df_across_files[merged_df_across_files['success'] == False].groupby(['planner_frontend', 'planner_backend'])[['compute_time_frontend(ms)', 'compute_time_backend(ms)']].mean().reset_index() 8 | 9 | # Add a column to indicate success or failure 10 | stats_success_merged['Success'] = True 11 | stats_failure_merged['Success'] = False 12 | 13 | # Concatenate the success and failure DataFrames 14 | stats_combined_merged = pd.concat([stats_success_merged, stats_failure_merged], ignore_index=True) 15 | 16 | # Pivot the table to show 'Success' as columns and sum up frontend and backend times 17 | pivot_success_df = stats_combined_merged.pivot(index=['planner_frontend', 'planner_backend'], columns='Success', values=['compute_time_frontend(ms)', 'compute_time_backend(ms)']).reset_index() 18 | 19 | # Fill NaN values with 0 for better computation 20 | pivot_success_df = pivot_success_df.fillna(0) 21 | 22 | # Calculate the total compute time as the sum of frontend and backend times 23 | pivot_success_df['Total Time (True)'] = pivot_success_df[('compute_time_frontend(ms)', True)] + pivot_success_df[('compute_time_backend(ms)', True)] 24 | pivot_success_df['Total Time (False)'] = pivot_success_df[('compute_time_frontend(ms)', False)] + pivot_success_df[('compute_time_backend(ms)', False)] 25 | 26 | # Reformat the columns to show frontend time in parentheses 27 | pivot_success_df['Total Time (True)'] = pivot_success_df['Total Time (True)'].apply(lambda x: f"{x:.2f}") + " (" + pivot_success_df[('compute_time_frontend(ms)', True)].apply(lambda x: f"{x:.2f}") + ")" 28 | pivot_success_df['Total Time (False)'] = pivot_success_df['Total Time (False)'].apply(lambda x: f"{x:.2f}") + " (" + pivot_success_df[('compute_time_frontend(ms)', False)].apply(lambda x: f"{x:.2f}") + ")" 29 | 30 | # Drop the original compute time columns for more compact presentation 31 | pivot_success_df = pivot_success_df.drop(columns=['compute_time_frontend(ms)', 'compute_time_backend(ms)']) 32 | 33 | # Rename columns for better readability 34 | pivot_success_df.columns = ['Planner Frontend', 'Planner Backend', 'Total Time (Success=True)', 'Total Time (Success=False)'] 35 | 36 | 37 | frontend_order = ['JPS (geometric)', 'RRT', 'MPL', 'SST', 'dispersion planner'] 38 | 39 | # Reorder the DataFrame based on the specified frontend order 40 | pivot_success_df['Planner Frontend'] = pd.Categorical(pivot_success_df['Planner Frontend'], categories=frontend_order, ordered=True) 41 | pivot_success_df = pivot_success_df.sort_values(['Planner Frontend', 'Planner Backend']).reset_index(drop=True) 42 | 43 | 44 | # Group by 'planner_frontend' and 'planner_backend', and calculate the mean for 'compute_time_frontend(ms)' and 'compute_time_backend(ms)' 45 | # for cases where 'success_detail' is 0 or 2 46 | stats_success_detail_0 = merged_df_across_files[merged_df_across_files['success_detail'] == 0].groupby(['planner_frontend', 'planner_backend'])['compute_time_frontend(ms)'].mean().reset_index() 47 | stats_success_detail_2 = merged_df_across_files[merged_df_across_files['success_detail'] == 2].groupby(['planner_frontend', 'planner_backend'])['compute_time_backend(ms)'].mean().reset_index() 48 | 49 | # Rename columns for better readability 50 | stats_success_detail_0.rename(columns={'compute_time_frontend(ms)': 'Avg Frontend Time (Success Detail=0)'}, inplace=True) 51 | stats_success_detail_2.rename(columns={'compute_time_backend(ms)': 'Avg Backend Time (Success Detail=2)'}, inplace=True) 52 | 53 | # Merge these statistics with the previous pivot_success_df_reordered DataFrame 54 | pivot_success_df_detailed = pd.merge(pivot_success_df_reordered, stats_success_detail_0, how='left', left_on=['Planner Frontend', 'Planner Backend'], right_on=['planner_frontend', 'planner_backend']) 55 | pivot_success_df_detailed = pd.merge(pivot_success_df_detailed, stats_success_detail_2, how='left', left_on=['Planner Frontend', 'Planner Backend'], right_on=['planner_frontend', 'planner_backend']) 56 | 57 | # Drop the duplicate columns after merge 58 | pivot_success_df_detailed.drop(columns=['planner_frontend_x', 'planner_backend_x', 'planner_frontend_y', 'planner_backend_y'], inplace=True) 59 | 60 | # Reformat the new columns to show as average time 61 | pivot_success_df_detailed['Avg Frontend Time (Success Detail=0)'] = pivot_success_df_detailed['Avg Frontend Time (Success Detail=0)'].apply(lambda x: f"{x:.2f}" if not pd.isna(x) else '-') 62 | pivot_success_df_detailed['Avg Backend Time (Success Detail=2)'] = pivot_success_df_detailed['Avg Backend Time (Success Detail=2)'].apply(lambda x: f"{x:.2f}" if not pd.isna(x) else '-') 63 | 64 | pivot_success_df_detailed 65 | 66 | 67 | 68 | # Load the new file containing tracking error and effort data 69 | tracking_error_effort_df = pd.read_csv('/mnt/data/ECI_single_line_tracking_error_effort.csv') 70 | 71 | # Filter only the successful rows 72 | tracking_error_effort_df_success = tracking_error_effort_df[tracking_error_effort_df['success']] 73 | 74 | # Group by 'planner_frontend' and 'planner_backend', and calculate the mean for 'tracking_error' and 'effort' 75 | tracking_error_effort_cols_to_average = ['tracking_error', 'effort'] 76 | tracking_error_effort_stats = tracking_error_effort_df_success.groupby(['planner_frontend', 'planner_backend'])[tracking_error_effort_cols_to_average].mean().reset_index() 77 | 78 | # Rename columns for better readability 79 | tracking_error_effort_stats.rename(columns={ 80 | 'tracking_error': 'Avg Tracking Error', 81 | 'effort': 'Avg Effort' 82 | }, inplace=True) 83 | 84 | # Merge these new average values with the previous success_stats_updated DataFrame 85 | success_stats_final = pd.merge(success_stats_updated, tracking_error_effort_stats, how='left', left_on=['planner_frontend', 'planner_backend'], right_on=['planner_frontend', 'planner_backend']) 86 | 87 | success_stats_final 88 | 89 | # Group by 'planner_frontend' and 'planner_backend', and calculate the mean for 'tracking_error(m) avg' and 'traj_effort(rpm)' 90 | tracking_error_effort_cols_to_average = ['tracking_error(m) avg', 'traj_effort(rpm)'] 91 | tracking_error_effort_stats = tracking_error_effort_df_success.groupby(['planner_frontend', 'planner_backend'])[tracking_error_effort_cols_to_average].mean().reset_index() 92 | 93 | # Rename columns for better readability 94 | tracking_error_effort_stats.rename(columns={ 95 | 'tracking_error(m) avg': 'Avg Tracking Error (m)', 96 | 'traj_effort(rpm)': 'Avg Effort (rpm)' 97 | }, inplace=True) 98 | 99 | # Merge these new average values with the previous success_stats_updated DataFrame 100 | success_stats_final = pd.merge(success_stats_updated, tracking_error_effort_stats, how='left', left_on=['planner_frontend', 'planner_backend'], right_on=['planner_frontend', 'planner_backend']) 101 | 102 | success_stats_final -------------------------------------------------------------------------------- /scripts/eval_scripts/evaluate_5_effort.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | import seaborn as sns 7 | 8 | # load csv as pandas dataframe 9 | # df_summary = pd.read_csv('/home/yifei/ws/src/kr_autonomous_flight/kr_ilqr_optimizer/scripts/res/9.14.23/ECI_single_line_09-14_16-33-40.csv') 10 | 11 | # #print columns names 12 | # print(df_summary.columns) 13 | 14 | # #take column traj_time(s) and convert to numpy array 15 | # traj_time = df_summary['map_index','traj_time(s)'].to_numpy() 16 | # print(traj_time) 17 | # print(traj_time.shape) 18 | 19 | # List all the .pkl files (replace with the actual path where the files are stored) 20 | directory = '.' 21 | file_names = [f for f in os.listdir(directory) if f.endswith('.pkl')] 22 | 23 | #create 2d numpy array, 1d for method, the other for trial number 24 | result = np.zeros((100, 2)) 25 | result_idx = np.zeros((100, 2),dtype=int) 26 | 27 | #in 3rd column, input trajecotry time from df_summary 28 | 29 | 30 | # Sort the file names if needed 31 | file_names.sort() 32 | method_names = ['iLQR(Altro)', 'gcopter'] 33 | 34 | data_list = [] 35 | # Loop through each file and load its content 36 | for i,file_name in enumerate(file_names): 37 | # Split the filename to extract the trial number and method 38 | trial_number, method = file_name.split('_')[1], file_name.split('_')[2].split('.')[0] 39 | assert(method in method_names) 40 | 41 | #find which index this method is 42 | method_idx = method_names.index(method) 43 | 44 | with open(os.path.join(directory, file_name), 'rb') as file: 45 | # Unpickle the data from file 46 | data = pickle.load(file) 47 | 48 | result[int(trial_number), method_idx] = sum(data) 49 | result_idx[int(trial_number), method_idx] = i 50 | data_list.append(data) 51 | plt.figure(figsize=(12, 12)) 52 | ax = plt.gca() 53 | for i in range(100): 54 | ax.clear() 55 | print(result[i,:]) 56 | print(result_idx[i,:]) 57 | if result[i,0] != 0 and result[i,1] != 0: 58 | plt.scatter(range(len(data_list[result_idx[i,0]])),data_list[result_idx[i,0]], label='Altro') 59 | plt.scatter(range(len(data_list[result_idx[i,1]])),data_list[result_idx[i,1]], label='gcopter') 60 | # print (data[result_idx[i,0]]) 61 | #set legend 62 | plt.legend() 63 | 64 | plt.show() 65 | # input("Press Enter to continue...") 66 | 67 | 68 | #convert to pandas dataframe 69 | df = pd.DataFrame(result, columns=method_names) 70 | 71 | #get rid of rows that have any zeros 72 | df = df[(df.T != 0).all()] 73 | 74 | print(df) 75 | 76 | 77 | #boxplot of the data 78 | # df.boxplot() 79 | print(df.describe()) 80 | -------------------------------------------------------------------------------- /scripts/eval_scripts/gpt_prompts.md: -------------------------------------------------------------------------------- 1 | This is the data to evaluate some planners. There 2 | We have different types of frontend planner and backend planner, and we are interested in evaluating their relative performance. Each row is a single method running on a single map. I would like to get success rate of the data grouped by frontend planner and backend planner. 3 | 4 | 5 | For real environemnt, we want to first group map index, and get rid of those that frontend planner jps failed. 6 | 7 | For maze environments, we know there is a solution from start to goal, therefore we can see if jps planner is good 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /scripts/eval_scripts/test_legend.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | from mpl_toolkits.mplot3d import Axes3D 3 | import pandas as pd 4 | 5 | # Sample DataFrame creation for demonstration purposes 6 | # Replace this with your actual 'df_real' 7 | df_real = pd.DataFrame({ 8 | 'density_index': [1, 2, 3], 9 | 'clutter_index': [1, 2, 3], 10 | 'structure_index': [1, 2, 3], 11 | 'labels': [0, 1, 2] 12 | }) 13 | 14 | fig = plt.figure() 15 | ax = fig.add_subplot(111, projection='3d') 16 | 17 | sc = ax.scatter(df_real['density_index'], df_real['clutter_index'], df_real['structure_index'], c=df_real['labels'], cmap='tab10', s=10) 18 | 19 | # Create a legend 20 | legend1 = ax.legend(*sc.legend_elements(), title="Classes") 21 | ax.add_artist(legend1) 22 | 23 | plt.show() -------------------------------------------------------------------------------- /scripts/gpt_result/Untitled 1.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KumarRobotics/kr_ilqr_optimizer/43db5310c09fd826844bbba7ca82754a8685bdc0/scripts/gpt_result/Untitled 1.odp -------------------------------------------------------------------------------- /src/finitediff.cpp: -------------------------------------------------------------------------------- 1 | // Functions to compute gradients using finite difference. 2 | // Based on the functions in https://github.com/PatWie/CppNumericalSolvers 3 | // and rewritten to use Eigen 4 | #include "kr_ilqr_optimizer/finitediff.hpp" 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | namespace fd { 11 | 12 | // The external coefficients, c1, in c1 * f(x + c2). 13 | // See: https://en.wikipedia.org/wiki/Finite_difference_coefficient 14 | std::vector get_external_coeffs(const AccuracyOrder accuracy) 15 | { 16 | switch (accuracy) { 17 | case SECOND: 18 | return { { 1, -1 } }; 19 | case FOURTH: 20 | return { { 1, -8, 8, -1 } }; 21 | case SIXTH: 22 | return { { -1, 9, -45, 45, -9, 1 } }; 23 | case EIGHTH: 24 | return { { 3, -32, 168, -672, 672, -168, 32, -3 } }; 25 | default: 26 | throw std::invalid_argument("invalid accuracy order"); 27 | } 28 | } 29 | 30 | // The internal coefficients, c2, in c1 * f(x + c2). 31 | // See: https://en.wikipedia.org/wiki/Finite_difference_coefficient 32 | std::vector get_interior_coeffs(const AccuracyOrder accuracy) 33 | { 34 | switch (accuracy) { 35 | case SECOND: 36 | return { { 1, -1 } }; 37 | case FOURTH: 38 | return { { -2, -1, 1, 2 } }; 39 | case SIXTH: 40 | return { { -3, -2, -1, 1, 2, 3 } }; 41 | case EIGHTH: 42 | return { { -4, -3, -2, -1, 1, 2, 3, 4 } }; 43 | default: 44 | throw std::invalid_argument("invalid accuracy order"); 45 | } 46 | } 47 | 48 | // The denominators of the finite difference. 49 | double get_denominator(const AccuracyOrder accuracy) 50 | { 51 | switch (accuracy) { 52 | case SECOND: 53 | return 2; 54 | case FOURTH: 55 | return 12; 56 | case SIXTH: 57 | return 60; 58 | case EIGHTH: 59 | return 840; 60 | default: 61 | throw std::invalid_argument("invalid accuracy order"); 62 | } 63 | } 64 | 65 | // Compute the gradient of a function at a point using finite differences. 66 | void finite_gradient( 67 | const Eigen::Ref& x, 68 | const std::function& f, 69 | Eigen::VectorXd& grad, 70 | const AccuracyOrder accuracy, 71 | const double eps) 72 | { 73 | const std::vector external_coeffs = get_external_coeffs(accuracy); 74 | const std::vector internal_coeffs = get_interior_coeffs(accuracy); 75 | 76 | assert(external_coeffs.size() == internal_coeffs.size()); 77 | const size_t inner_steps = internal_coeffs.size(); 78 | 79 | const double denom = get_denominator(accuracy) * eps; 80 | 81 | grad.setZero(x.rows()); 82 | 83 | Eigen::VectorXd x_mutable = x; 84 | for (size_t i = 0; i < x.rows(); i++) { 85 | for (size_t ci = 0; ci < inner_steps; ci++) { 86 | x_mutable[i] += internal_coeffs[ci] * eps; 87 | grad[i] += external_coeffs[ci] * f(x_mutable); 88 | x_mutable[i] = x[i]; 89 | } 90 | grad[i] /= denom; 91 | } 92 | } 93 | 94 | 95 | 96 | // void finite_jacobian_quad( quadModel* model, 97 | // const Eigen::Ref& x, 98 | // const Eigen::Ref& u, 99 | // const std::function& f, 100 | // Eigen::MatrixXd& jac, 101 | // const AccuracyOrder accuracy, 102 | // const double eps) 103 | // { 104 | // // auto myOneArgFunction = [this, u_vec](const Eigen::VectorXd x_in) { f_quad( x_in, u_vec); }; 105 | 106 | // const std::vector external_coeffs = get_external_coeffs(accuracy); 107 | // const std::vector internal_coeffs = get_interior_coeffs(accuracy); 108 | 109 | // assert(external_coeffs.size() == internal_coeffs.size()); 110 | // const size_t inner_steps = internal_coeffs.size(); 111 | 112 | // const double denom = get_denominator(accuracy) * eps; 113 | 114 | // jac.setZero(f(x).rows(), x.rows()); 115 | 116 | // Eigen::VectorXd x_mutable = x; 117 | // for (size_t i = 0; i < x.rows(); i++) { 118 | // for (size_t ci = 0; ci < inner_steps; ci++) { 119 | // x_mutable[i] += internal_coeffs[ci] * eps; 120 | // jac.col(i) += external_coeffs[ci] * model->f_quad(x_mutable, u); 121 | // x_mutable[i] = x[i]; 122 | // } 123 | // jac.col(i) /= denom; 124 | // } 125 | // } 126 | 127 | 128 | void finite_jacobian( 129 | const Eigen::Ref& x, 130 | const std::function& f, 131 | Eigen::MatrixXd& jac, 132 | const AccuracyOrder accuracy, 133 | const double eps) 134 | { 135 | const std::vector external_coeffs = get_external_coeffs(accuracy); 136 | const std::vector internal_coeffs = get_interior_coeffs(accuracy); 137 | 138 | assert(external_coeffs.size() == internal_coeffs.size()); 139 | const size_t inner_steps = internal_coeffs.size(); 140 | 141 | const double denom = get_denominator(accuracy) * eps; 142 | 143 | jac.setZero(f(x).rows(), x.rows()); 144 | 145 | Eigen::VectorXd x_mutable = x; 146 | for (size_t i = 0; i < x.rows(); i++) { 147 | for (size_t ci = 0; ci < inner_steps; ci++) { 148 | x_mutable[i] += internal_coeffs[ci] * eps; 149 | jac.col(i) += external_coeffs[ci] * f(x_mutable); 150 | x_mutable[i] = x[i]; 151 | } 152 | jac.col(i) /= denom; 153 | } 154 | } 155 | 156 | void finite_hessian( 157 | const Eigen::Ref& x, 158 | const std::function& f, 159 | Eigen::MatrixXd& hess, 160 | const AccuracyOrder accuracy, 161 | const double eps) 162 | { 163 | const std::vector external_coeffs = get_external_coeffs(accuracy); 164 | const std::vector internal_coeffs = get_interior_coeffs(accuracy); 165 | 166 | assert(external_coeffs.size() == internal_coeffs.size()); 167 | const size_t inner_steps = internal_coeffs.size(); 168 | 169 | double denom = get_denominator(accuracy) * eps; 170 | denom *= denom; 171 | 172 | hess.setZero(x.rows(), x.rows()); 173 | 174 | Eigen::VectorXd x_mutable = x; 175 | for (size_t i = 0; i < x.rows(); i++) { 176 | for (size_t j = i; j < x.rows(); j++) { 177 | for (size_t ci = 0; ci < inner_steps; ci++) { 178 | for (size_t cj = 0; cj < inner_steps; cj++) { 179 | x_mutable[i] += internal_coeffs[ci] * eps; 180 | x_mutable[j] += internal_coeffs[cj] * eps; 181 | hess(i, j) += external_coeffs[ci] * external_coeffs[cj] 182 | * f(x_mutable); 183 | x_mutable[j] = x[j]; 184 | x_mutable[i] = x[i]; 185 | } 186 | } 187 | hess(i, j) /= denom; 188 | hess(j, i) = hess(i, j); // The hessian is symmetric 189 | } 190 | } 191 | } 192 | 193 | // Compare if two gradients are close enough. 194 | bool compare_gradient( 195 | const Eigen::Ref& x, 196 | const Eigen::Ref& y, 197 | const double test_eps, 198 | const std::string& msg) 199 | { 200 | assert(x.rows() == y.rows()); 201 | 202 | bool same = true; 203 | for (long i = 0; i < x.rows(); i++) { 204 | double scale = 205 | std::max(std::max(std::abs(x[i]), std::abs(y[i])), double(1.0)); 206 | double abs_diff = std::abs(x[i] - y[i]); 207 | 208 | if (abs_diff > test_eps * scale) { 209 | spdlog::debug( 210 | "{} eps={:.3e} r={} x={:.3e} y={:.3e} |x-y|={:.3e} " 211 | "|x-y|/|x|={:.3e} |x-y|/|y|={:3e}", 212 | msg, test_eps, i, x(i), y(i), abs_diff, abs_diff / abs(x(i)), 213 | abs_diff / std::abs(y(i))); 214 | same = false; 215 | } 216 | } 217 | return same; 218 | } 219 | 220 | // Compare if two jacobians are close enough. 221 | bool compare_jacobian( 222 | const Eigen::Ref& x, 223 | const Eigen::Ref& y, 224 | const double test_eps, 225 | const std::string& msg) 226 | { 227 | assert(x.rows() == y.rows()); 228 | assert(x.cols() == y.cols()); 229 | 230 | bool same = true; 231 | for (long i = 0; i < x.rows(); i++) { 232 | for (long j = 0; j < x.cols(); j++) { 233 | double scale = std::max( 234 | std::max(std::abs(x(i, j)), std::abs(y(i, j))), double(1.0)); 235 | 236 | double abs_diff = std::abs(x(i, j) - y(i, j)); 237 | 238 | if (abs_diff > test_eps * scale) { 239 | spdlog::debug( 240 | "{} eps={:.3e} r={} c={} x={:.3e} y={:.3e} " 241 | "|x-y|={:.3e} |x-y|/|x|={:.3e} |x-y|/|y|={:3e}", 242 | msg, test_eps, i, j, x(i, j), y(i, j), abs_diff, 243 | abs_diff / std::abs(x(i, j)), abs_diff / std::abs(y(i, j))); 244 | same = false; 245 | } 246 | } 247 | } 248 | return same; 249 | } 250 | 251 | // Compare if two hessians are close enough. 252 | bool compare_hessian( 253 | const Eigen::Ref& x, 254 | const Eigen::Ref& y, 255 | const double test_eps, 256 | const std::string& msg) 257 | { 258 | return compare_jacobian(x, y, test_eps, msg); 259 | } 260 | 261 | // Flatten the matrix rowwise 262 | Eigen::VectorXd flatten(const Eigen::Ref& X) 263 | { 264 | Eigen::VectorXd x(X.size()); 265 | for (int i = 0; i < X.rows(); i++) { 266 | for (int j = 0; j < X.cols(); j++) { 267 | x(i * X.cols() + j) = X(i, j); 268 | } 269 | } 270 | return x; 271 | } 272 | 273 | // Unflatten rowwise 274 | Eigen::MatrixXd unflatten(const Eigen::Ref& x, int dim) 275 | { 276 | assert(x.size() % dim == 0); 277 | Eigen::MatrixXd X(x.size() / dim, dim); 278 | for (int i = 0; i < x.size(); i++) { 279 | X(i / dim, i % dim) = x(i); 280 | } 281 | return X; 282 | } 283 | 284 | } // namespace fd -------------------------------------------------------------------------------- /src/quaternion_utils.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Zixin Zhang on 03/17/23. 3 | // Copyright (c) 2022 Robotic Exploration Lab. All rights reserved. 4 | // 5 | 6 | #include "kr_ilqr_optimizer/quaternion_utils.hpp" 7 | #include 8 | 9 | namespace altro { 10 | Eigen::Matrix3d skew(Eigen::Vector3d vec) { 11 | Eigen::Matrix3d skew; 12 | skew << 0, -vec[2], vec[1], vec[2], 0, -vec[0], -vec[1], vec[0], 0; 13 | return skew; 14 | } 15 | 16 | Eigen::Vector4d cayley_map(Eigen::Vector3d phi) { 17 | Eigen::Vector4d phi_quat; 18 | phi_quat << 1, phi[0], phi[1], phi[2]; 19 | return 1 / (sqrt(1 + phi.norm() * phi.norm())) * phi_quat; 20 | } 21 | 22 | Eigen::Vector3d inv_cayley_map(Eigen::Vector4d q) { return q.tail(3) / q[0]; } 23 | 24 | Eigen::Vector4d quat_conj(Eigen::Vector4d q) { 25 | Eigen::Matrix4d T; 26 | T << 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1; 27 | return T * q; 28 | } 29 | 30 | Eigen::Matrix4d T(){ 31 | Eigen::Matrix4d T = -Eigen::Matrix4d::Identity(); 32 | T(0, 0) = 1; 33 | return T; 34 | } 35 | 36 | Eigen::MatrixXd H(){ 37 | Eigen::Matrix H_con; 38 | H_con << 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1; 39 | return H_con; 40 | } 41 | 42 | Eigen::MatrixXd dQdq(Eigen::Vector4d q){ 43 | double qw = q[0]; 44 | double qx = q[1]; 45 | double qy = q[2]; 46 | double qz = q[3]; 47 | Eigen::Matrix dR_dq; 48 | //row major 49 | dR_dq(0, 0) = 0; 50 | dR_dq(0, 1) = 0; 51 | dR_dq(0, 2) = -4 * qy; 52 | dR_dq(0, 3) = -4 * qz; 53 | 54 | dR_dq(1, 0) = -2 * qz; 55 | dR_dq(1, 1) = 2 * qy; 56 | dR_dq(1, 2) = 2 * qx; 57 | dR_dq(1, 3) = -2 * qw; 58 | 59 | dR_dq(2, 0) = 2 * qy; 60 | dR_dq(2, 1) = 2 * qz; 61 | dR_dq(2, 2) = 2 * qw; 62 | dR_dq(2, 3) = 2 * qx; 63 | 64 | dR_dq(3, 0) = 2 * qz; 65 | dR_dq(3, 1) = 2 * qy; 66 | dR_dq(3, 2) = 2 * qx; 67 | dR_dq(3, 3) = 2 * qw; 68 | 69 | dR_dq(4, 0) = 0; 70 | dR_dq(4, 1) = -4 * qx; 71 | dR_dq(4, 2) = 0; 72 | dR_dq(4, 3) = -4 * qz; 73 | 74 | dR_dq(5, 0) = -2 * qx; 75 | dR_dq(5, 1) = -2 * qw; 76 | dR_dq(5, 2) = 2 * qz; 77 | dR_dq(5, 3) = 2 * qy; 78 | 79 | dR_dq(6, 0) = -2 * qy; 80 | dR_dq(6, 1) = 2 * qz; 81 | dR_dq(6, 2) = -2 * qw; 82 | dR_dq(6, 3) = 2 * qx; 83 | 84 | dR_dq(7, 0) = 2 * qx; 85 | dR_dq(7, 1) = 2 * qw; 86 | dR_dq(7, 2) = 2 * qz; 87 | dR_dq(7, 3) = 2 * qy; 88 | 89 | dR_dq(8, 0) = 0; 90 | dR_dq(8, 1) = -4 * qx; 91 | dR_dq(8, 2) = -4 * qy; 92 | dR_dq(8, 3) = 0; 93 | return dR_dq; 94 | } 95 | 96 | Eigen::Matrix4d L(Eigen::Vector4d q) { 97 | Eigen::Matrix4d L; 98 | L(0, 0) = q[0]; 99 | L.block<1, 3>(0, 1) = -q.tail(3).transpose(); 100 | L.block<3, 1>(1, 0) = q.tail(3); 101 | L.block<3, 3>(1, 1) = q[0] * Eigen::Matrix3d::Identity() + skew(q.tail(3)); 102 | // std::cout << "L: success" << std::endl; 103 | return L; 104 | } 105 | 106 | Eigen::Matrix4d R(Eigen::Vector4d q) { 107 | Eigen::Matrix4d R; 108 | R(0, 0) = q[0]; 109 | R.block<1, 3>(0, 1) = -q.tail(3).transpose(); 110 | R.block<3, 1>(1, 0) = q.tail(3); 111 | R.block<3, 3>(1, 1) = q[0] * Eigen::Matrix3d::Identity() - skew(q.tail(3)); 112 | return R; 113 | } 114 | 115 | // T = Diagonal([1; -ones(3)]) 116 | // H = [zeros(1,3); I] 117 | // function qtoQ(q) 118 | // return H'*T*L(q)*T*L(q)*H 119 | // end 120 | Eigen::Matrix3d Q(Eigen::Vector4d q) { 121 | return H().transpose()* T() * L(q) *T() * L(q) * H(); 122 | } 123 | 124 | Eigen::MatrixXd G(Eigen::Vector4d q) { 125 | return L(q) * H(); 126 | } 127 | 128 | Eigen::Vector4d quat_mult(Eigen::Vector4d q1, Eigen::Vector4d q2) { return L(q1) * q2; } 129 | 130 | void removeRow(Eigen::MatrixXd& matrix, unsigned int rowToRemove) { 131 | unsigned int numRows = matrix.rows() - 1; 132 | unsigned int numCols = matrix.cols(); 133 | 134 | if (rowToRemove < numRows) 135 | matrix.block(rowToRemove, 0, numRows - rowToRemove, numCols) = 136 | matrix.block(rowToRemove + 1, 0, numRows - rowToRemove, numCols); 137 | 138 | matrix.conservativeResize(numRows, numCols); 139 | } 140 | 141 | void removeColumn(Eigen::MatrixXd& matrix, unsigned int colToRemove) { 142 | unsigned int numRows = matrix.rows(); 143 | unsigned int numCols = matrix.cols() - 1; 144 | 145 | if (colToRemove < numCols) 146 | matrix.block(0, colToRemove, numRows, numCols - colToRemove) = 147 | matrix.block(0, colToRemove + 1, numRows, numCols - colToRemove); 148 | 149 | matrix.conservativeResize(numRows, numCols); 150 | } 151 | 152 | Eigen::Matrix removeElement(const Eigen::Matrix& vec, 153 | unsigned int idxToRemove) { 154 | unsigned int length = vec.rows() - 1; 155 | Eigen::Matrix vec_; 156 | vec_ = vec; 157 | 158 | if (idxToRemove < length) { 159 | vec_.block(idxToRemove, 0, length - idxToRemove, 1) = 160 | vec_.block(idxToRemove + 1, 0, length - idxToRemove, 1); 161 | } 162 | vec_.conservativeResize(length, 1); 163 | 164 | return vec_; 165 | } 166 | } // namespace altro 167 | -------------------------------------------------------------------------------- /src/run_sampler.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "kr_ilqr_optimizer/spline_trajectory_sampler.hpp" 4 | #include "ros/ros.h" 5 | int main(int argc, char** argv) { 6 | /** 7 | * The ros::init() function needs to see argc and argv so that it can perform 8 | * any ROS arguments and name remapping that were provided at the command 9 | * line. For programmatic remappings you can use a different version of init() 10 | * which takes remappings directly, but for most command-line programs, 11 | * passing argc and argv is the easiest way to do it. The third argument to 12 | * init() is the name of the node. 13 | * 14 | * You must call one of the versions of ros::init() before using any other 15 | * part of the ROS system. 16 | */ 17 | ros::init(argc, argv, "spline_traj_sampler"); 18 | 19 | /** 20 | * NodeHandle is the main access point to communications with the ROS system. 21 | * The first NodeHandle constructed will fully initialize this node, and the 22 | * last NodeHandle destructed will close down the node. 23 | */ 24 | ros::NodeHandle nh; 25 | // std::shared_ptr visual_; 26 | SplineTrajSampler sampler(nh, true, true, true, 40); 27 | // sampler.mpc_solver->SetUp(); 28 | std::cout << "sampler setup Complete" << std::endl; 29 | ros::spin(); 30 | return 0; 31 | } -------------------------------------------------------------------------------- /src/spline_trajectory_sampler.cpp: -------------------------------------------------------------------------------- 1 | #include "kr_ilqr_optimizer/spline_trajectory_sampler.hpp" 2 | 3 | #include 4 | 5 | #include "altro/altro.hpp" 6 | #include "kr_ilqr_optimizer/finitediff.hpp" 7 | #include "kr_ilqr_optimizer/test_utils.hpp" 8 | 9 | // #include "spline_trajectory_sampler.hpp" 10 | using Vector = Eigen::Matrix; 11 | 12 | // helpers 13 | // Function Converted from Julia Code 14 | std::pair pt_normal_to_halfspace( 15 | const Eigen::MatrixXd& hPoly) { 16 | int size_n = hPoly.cols(); 17 | Eigen::MatrixXd A = Eigen::MatrixXd::Zero(size_n, 3); 18 | Eigen::VectorXd b = Eigen::VectorXd::Zero(size_n); 19 | 20 | for (int ii = 0; ii < size_n; ++ii) { 21 | Eigen::Vector3d p = hPoly.block<3, 1>(0, ii); 22 | Eigen::Vector3d n = hPoly.block<3, 1>(3, ii); 23 | double c = n.dot(p); 24 | 25 | A.row(ii) = n; 26 | b(ii) = c; 27 | } 28 | 29 | return std::make_pair(A, b); 30 | } 31 | 32 | Eigen::Vector3d SplineTrajSampler::compute_ref_inputs( 33 | const Eigen::Vector3d& pos, 34 | const Eigen::Vector3d& vel, 35 | const Eigen::Vector3d& acc, 36 | const Eigen::Vector3d& jerk, 37 | const Eigen::Vector3d& snap, 38 | const Eigen::Vector3d& yaw_dyaw_ddyaw, 39 | double& thrust, 40 | geometry_msgs::Point& moment, 41 | Eigen::Quaterniond& q_return, 42 | Eigen::Vector3d& omega) { 43 | // std::cout << "Computing ref inputs" << std::endl; 44 | // std::cout << mpc_solver->model_ptr->mass_ << std::endl; 45 | // Eigen::Matrix3d inertia_; 46 | // double mass_; 47 | // double g_; // TODO: Bad place to define this 48 | // Desired force vector. 49 | Eigen::Vector3d t = acc + Eigen::Vector3d(0, 0, g_); 50 | Eigen::Vector3d b3 = t.normalized(); 51 | Eigen::Vector3d F_des = mass_ * t; 52 | 53 | // Desired thrust is force projected onto b3 axis. 54 | double u1 = F_des.dot(b3); 55 | 56 | // Desired orientation to obtain force vector. 57 | Eigen::Vector3d b3_des = F_des.normalized(); 58 | double yaw_des = yaw_dyaw_ddyaw[0]; 59 | Eigen::Vector3d c1_des(std::cos(yaw_des), std::sin(yaw_des), 0); 60 | Eigen::Vector3d b2_des = (b3_des.cross(c1_des)).normalized(); 61 | Eigen::Vector3d b1_des = b2_des.cross(b3_des); 62 | Eigen::Matrix3d R_des; 63 | R_des << b1_des, b2_des, b3_des; 64 | 65 | q_return = R_des; 66 | 67 | Eigen::Matrix3d R = R_des; // assume we have perfect tracking on rotation 68 | 69 | double dot_u1 = mass_ * b3.dot(jerk); 70 | Eigen::Vector3d hw = 71 | mass_ / u1 * 72 | (jerk); // removing - dot_u1 * b3 since thomas' does not have it 73 | double p = -hw.dot(b2_des); 74 | double q = hw.dot(b1_des); 75 | 76 | Eigen::Vector3d e3(0, 0, 1); 77 | 78 | double r = ((1 - std::pow(e3.dot(b1_des), 2)) * yaw_dyaw_ddyaw[1] - 79 | e3.dot(b2_des) * q) / 80 | e3.dot(b3_des); 81 | Eigen::Vector3d Omega(p, q, r); 82 | 83 | Eigen::Matrix mat; 84 | mat << -b2_des, b1_des; 85 | Eigen::Vector2d pq_dot = 86 | (mass_ / u1 * mat * snap - 2 * dot_u1 / u1 * Eigen::Vector2d(p, q) + 87 | r * Eigen::Vector2d(q, -p)); 88 | 89 | Eigen::Matrix3d b_dot = R * skew(Omega); 90 | Eigen::Vector3d b1_dot = b_dot.col(0); 91 | Eigen::Vector3d b2_dot = b_dot.col(1); 92 | Eigen::Vector3d b3_dot = b_dot.col(2); 93 | 94 | double r_dot = 95 | -(e3.dot(b3_dot) * r + e3.dot(b2_dot) * q + e3.dot(b2_des) * pq_dot[1] + 96 | 2 * e3.dot(b1_des) * e3.dot(b1_dot) * yaw_dyaw_ddyaw[1] + 97 | (std::pow(e3.dot(b1_des), 2) - 1) * yaw_dyaw_ddyaw[2]) / 98 | e3.dot(b3_des); 99 | 100 | Eigen::Vector3d Alpha(pq_dot[0], pq_dot[1], r_dot); 101 | Eigen::Vector3d u2 = inertia_ * Alpha + Omega.cross(inertia_ * Omega); 102 | 103 | // OLD from mellinger 104 | // Eigen::Vector3d w_des(0, 0, yaw_dyaw_ddyaw[1]); 105 | // double r = w_des.dot(b3_des); 106 | // Eigen::Vector3d Omega(p, q, r); 107 | 108 | omega = Omega; 109 | 110 | // Eigen::Vector3d wwu1b3 = Omega.cross(Omega.cross(u1 * b3)); 111 | // double ddot_u1 = b3.dot(mass_ * snap) - b3.dot(wwu1b3); 112 | // Eigen::Vector3d ha = 113 | // 1.0 / u1 * 114 | // (mass_ * snap - ddot_u1 * b3 - 2 * Omega.cross(dot_u1 * b3) - wwu1b3); 115 | // double p_dot = -ha.dot(b2_des); 116 | // double q_dot = ha.dot(b1_des); 117 | // double r_dot = yaw_dyaw_ddyaw[2] * b3_des.dot(Eigen::Vector3d(0, 0, 1.0)); 118 | // Eigen::Vector3d Alpha(p_dot, q_dot, r_dot); 119 | 120 | Eigen::Vector3d TM; 121 | unpack(moment, u2); 122 | thrust = u1; 123 | return u2; 124 | } 125 | 126 | kr_planning_msgs::TrajectoryDiscretized SplineTrajSampler::publish_altro( 127 | const Eigen::VectorXd& start_state, 128 | std::vector pos, 129 | std::vector vel, 130 | std::vector acc, 131 | std::vector yaw_ref, 132 | std::vector thrust, 133 | std::vector moment, 134 | double dt, 135 | std::vector q_ref, 136 | std::vector w_ref, 137 | const std::vector>& h_polys, 138 | const Eigen::VectorXd& allo_ts, 139 | std_msgs::Header header) { 140 | std::vector X_sim; // for return 141 | std::vector U_sim; // for return 142 | std::vector t_sim; // for return 143 | ROS_WARN("[iLQR Optimizer]: Solving"); 144 | int N_controls = pos.size() - 1; 145 | uint solver_status = mpc_solver->solve_problem(start_state, 146 | pos, 147 | vel, 148 | acc, 149 | yaw_ref, 150 | thrust, 151 | moment, 152 | dt, 153 | q_ref, 154 | w_ref, 155 | h_polys, 156 | allo_ts, 157 | X_sim, 158 | U_sim, 159 | t_sim); 160 | // DEAL WITH VIZ 161 | visualization_msgs::Marker viz_msg; 162 | viz_msg.type = 4; 163 | viz_msg.header = header; 164 | viz_msg.header.stamp = ros::Time::now(); 165 | viz_msg.scale.x = 0.2; 166 | viz_msg.color.a = 1.0; 167 | // DEAL WITH Actual MSG 168 | kr_planning_msgs::TrajectoryDiscretized traj; 169 | traj.header = header; // TODO: ASK LAURA 170 | traj.header.stamp = ros::Time::now(); 171 | traj.N_ctrl = N_controls; 172 | geometry_msgs::Point pos_t; 173 | geometry_msgs::Point vel_t; 174 | geometry_msgs::Point acc_t; 175 | geometry_msgs::Point moment_t; 176 | Eigen::Vector3d moment_t_v = 177 | Eigen::Vector3d::Zero(); // TODO:(Yifei) return proper moment 178 | unpack(moment_t, moment_t_v); 179 | opt_traj_.clear(); 180 | 181 | if (solver_status == 0) { 182 | // no state here, need to calc from force, stay at 0 for now 183 | for (int i = 0; i < N_controls + 1; i++) { 184 | // deal with last index outside the loop 185 | // unpack points . THIS CODE IS SIMILAR TO NEXT SECTION FOR UNPACKING, 186 | // write a function! 187 | Eigen::Quaterniond q_unpack(X_sim[i][3], 188 | X_sim[i][4], 189 | X_sim[i][5], 190 | X_sim[i][6]); // not eigen quat 191 | Eigen::Vector3d a_body; 192 | Eigen::Vector3d v_body; 193 | 194 | a_body << 0.0, 0.0, 195 | mpc_solver->model_ptr->kf_ * U_sim[i].sum() / 196 | mpc_solver->model_ptr->mass_; 197 | 198 | auto a_world = (q_unpack * a_body) - Eigen::Vector3d(0, 0, 9.81); 199 | // std::cout << "a_world = " << a_world << std::endl; 200 | // std::cout << "a_world = " << a_world << std::endl; 201 | 202 | v_body << X_sim[i][7], X_sim[i][8], X_sim[i][9]; 203 | auto v_world = q_unpack * v_body; 204 | 205 | auto RPY = q_unpack.toRotationMatrix().eulerAngles(0, 1, 2); 206 | unpack(pos_t, X_sim[i]); // x y z q1 q2 q3 q4 v1 v2 v3 w1 w2 w3 207 | unpack(vel_t, v_world); 208 | unpack(acc_t, a_world); 209 | Eigen::VectorXd pva_temp(9); 210 | pva_temp << X_sim[i].segment<3>(0), v_world, a_world; 211 | opt_traj_.push_back(pva_temp); 212 | // DEAL WITH VIZ 213 | viz_msg.points.push_back(pos_t); 214 | // DEAL WITH Actual MSG 215 | traj.pos.push_back(pos_t); 216 | traj.vel.push_back(vel_t); 217 | traj.acc.push_back(acc_t); 218 | traj.yaw.push_back(RPY[2]); 219 | if (i != N_controls) { 220 | traj.thrust.push_back(U_sim[i].sum()); 221 | traj.moment.push_back(moment_t); // 0 222 | } 223 | traj.dt = dt; 224 | // initial attitude and initial omega not used 225 | } 226 | } 227 | // DEAL WITH VIZ 228 | if (publish_viz_) opt_viz_pub_.publish(viz_msg); 229 | // DEAL WITH Actual MSG 230 | if (publish_optimized_traj_) opt_traj_pub_.publish(traj); 231 | 232 | if (solver_status == 0) 233 | return traj; 234 | else 235 | return kr_planning_msgs::TrajectoryDiscretized(); 236 | } 237 | 238 | // if compute altro is on, then we will compute altro, otherwise we will just 239 | // publish the discretized traj from passed in 240 | kr_planning_msgs::TrajectoryDiscretized 241 | SplineTrajSampler::sample_and_refine_trajectory( 242 | const Eigen::VectorXd& start_state, 243 | const kr_planning_msgs::SplineTrajectory& traj, 244 | const std::vector& hPolys, 245 | const Eigen::VectorXd& allo_ts) { 246 | ROS_WARN("[iLQR]: sample_and_refine_trajectory Started"); 247 | std::vector> Abpolys( 248 | hPolys.size()); 249 | std::transform( 250 | hPolys.begin(), hPolys.end(), Abpolys.begin(), pt_normal_to_halfspace); 251 | ROS_WARN("[iLQR]: msg Converted"); 252 | 253 | double total_time = traj.data[0].t_total; 254 | int N_controls = total_time / dt_ / 5; 255 | N_controls *= 5; // make sure a multiple of 5 for traj to be correctly fitted 256 | // to poly outside 257 | int N_state = N_controls + 1; 258 | 259 | std::vector pos = sample(traj, total_time, N_state, 0); 260 | std::vector vel = sample(traj, total_time, N_state, 1); 261 | std::vector acc = sample(traj, total_time, N_state, 2); 262 | std::vector jerk = sample(traj, total_time, N_state, 3); 263 | std::vector snap = sample(traj, total_time, N_state, 4); 264 | ROS_INFO_STREAM("[iLQR]: Sampled " << pos.size() << " points" 265 | << "total time = " << total_time); 266 | double dt = dt_; 267 | ROS_WARN("[iLQR]: Sample Done"); 268 | 269 | kr_planning_msgs::TrajectoryDiscretized traj_discretized; 270 | traj_discretized.header = traj.header; 271 | double jerk_abs_sum = 0.0; 272 | double total_ref_F = 0.0; 273 | double total_ref_M = 0.0; 274 | Eigen::Quaterniond q_return; 275 | Eigen::Vector3d w_return; 276 | geometry_msgs::Quaternion ini_q_msg; 277 | geometry_msgs::Point ini_w_msg; 278 | std::vector yaw_ref(N_state); 279 | std::vector thrust_ref(N_state); 280 | std::vector moment_ref(N_state); 281 | std::vector q_ref(N_state); 282 | std::vector w_ref(N_state); 283 | geometry_msgs::Point pos_t; 284 | geometry_msgs::Point vel_t; 285 | geometry_msgs::Point acc_t; 286 | geometry_msgs::Point jerk_t; 287 | geometry_msgs::Point snap_t; 288 | double thrust; // for return 289 | geometry_msgs::Point moment; // for return 290 | 291 | for (int i = 0; i < N_state; i++) { 292 | Eigen::Vector3d yaw_three_der = 293 | Eigen::Vector3d(0, 0, 0); // TODO: Assign from traj 294 | yaw_ref[i] = yaw_three_der[0]; 295 | Eigen::Vector3d M = compute_ref_inputs(pos[i], 296 | vel[i], 297 | acc[i], 298 | jerk[i], 299 | snap[i], 300 | yaw_three_der, 301 | thrust, 302 | moment, 303 | q_return, 304 | w_return); 305 | moment_ref[i] = M; 306 | thrust_ref[i] = thrust; 307 | 308 | if (i == 0) { 309 | tf::quaternionEigenToMsg(q_return, ini_q_msg); 310 | tf::pointEigenToMsg(w_return, ini_w_msg); 311 | } 312 | // std::cout<<"rot q = "< t = linspace(0.0, total_time, N_controls_ + 1); 348 | traj_discretized.dt = dt; 349 | traj_discretized.N_ctrl = N_controls; 350 | traj_discretized.inital_attitude = ini_q_msg; 351 | traj_discretized.initial_omega = ini_w_msg; 352 | 353 | ROS_WARN("[iLQR]: Sampling Finished"); 354 | 355 | if (compute_altro_) { 356 | kr_planning_msgs::TrajectoryDiscretized refined_traj = 357 | publish_altro(start_state, 358 | pos, 359 | vel, 360 | acc, 361 | yaw_ref, 362 | thrust_ref, 363 | moment_ref, 364 | dt, 365 | q_ref, 366 | w_ref, 367 | Abpolys, 368 | allo_ts, 369 | traj.header); 370 | 371 | ROS_WARN("[iLQR]: Altro Finished"); 372 | return refined_traj; 373 | } else { 374 | return traj_discretized; 375 | } 376 | } 377 | 378 | void SplineTrajSampler::callbackWrapper( 379 | const kr_planning_msgs::SplineTrajectory::ConstPtr& msg) { 380 | kr_planning_msgs::SplineTrajectory traj = *msg; 381 | 382 | ROS_ERROR("[iLQR]: Callback not working, need to import as class"); 383 | // kr_planning_msgs::TrajectoryDiscretized traj_discretized = 384 | // sample_and_refine_trajectory(traj); 385 | 386 | // ros::Duration(0.5).sleep(); // this is for yuwei's planner to finish so we 387 | // have polytope info ROS_ERROR("SLEEPING 0.5 sec to wait for polytope info"); 388 | // this->sampled_traj_pub_.publish(traj_discretized); 389 | // do optimization here if needed 390 | N_iter_++; 391 | } 392 | -------------------------------------------------------------------------------- /src/test_utils.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Brian Jackson on 9/27/22. 3 | // Copyright (c) 2022 Robotic Exploration Lab. All rights reserved. 4 | // 5 | 6 | #include "kr_ilqr_optimizer/test_utils.hpp" 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "kr_ilqr_optimizer/finitediff.hpp" 15 | 16 | namespace fs = std::filesystem; 17 | // using json = nlohmann::json; 18 | 19 | void discrete_double_integrator_dynamics( 20 | double* xnext, const double* x, const double* u, float h, int dim) { 21 | double b = h * h / 2; 22 | for (int i = 0; i < dim; ++i) { 23 | xnext[i] = x[i] + x[i + dim] * h + u[i] * b; 24 | xnext[i + dim] = x[i + dim] + u[i] * h; 25 | } 26 | } 27 | 28 | void discrete_double_integrator_jacobian( 29 | double* jac, const double* x, const double* u, float h, int dim) { 30 | (void)x; 31 | (void)u; 32 | Eigen::Map J(jac, 2 * dim, 3 * dim); 33 | J.setZero(); 34 | double b = h * h / 2; 35 | for (int i = 0; i < dim; ++i) { 36 | J(i, i) = 1.0; 37 | J(i + dim, i + dim) = 1.0; 38 | J(i, i + dim) = h; 39 | J(i, 2 * dim + i) = b; 40 | J(i + dim, 2 * dim + i) = h; 41 | } 42 | } 43 | 44 | const double kPendulumMass = 1.0; 45 | const double kPendulumLength = 0.5; 46 | const double kPendulumFrictionCoeff = 0.1; 47 | // const double kPendulumInertia = 0.25; 48 | const double kPendulumGravity = 9.81; 49 | 50 | void pendulum_dynamics(double* xnext, const double* x, const double* u) { 51 | double l = kPendulumLength; 52 | double g = kPendulumGravity; 53 | double b = kPendulumFrictionCoeff; 54 | double m = kPendulumMass * l * l; 55 | 56 | double theta = x[0]; 57 | double omega = x[1]; 58 | 59 | double omega_dot = u[0] / m - g * std::sin(theta) / l - b * omega / m; 60 | xnext[0] = omega; 61 | xnext[1] = omega_dot; 62 | } 63 | 64 | void pendulum_jacobian(double* jac, const double* x, const double* u) { 65 | (void)u; 66 | double l = kPendulumLength; 67 | double g = kPendulumGravity; 68 | double b = kPendulumFrictionCoeff; 69 | double m = kPendulumMass * l * l; 70 | 71 | double domega_dtheta = 0.0; 72 | double domega_domega = 1.0; 73 | double domega_du = 0.0; 74 | double dalpha_dtheta = -g * std::cos(x[0]) / l; 75 | double dalpha_domega = -b / m; 76 | double dalpha_du = 1 / m; 77 | jac[0] = domega_dtheta; 78 | jac[1] = dalpha_dtheta; 79 | jac[2] = domega_domega; 80 | jac[3] = dalpha_domega; 81 | jac[4] = domega_du; 82 | jac[5] = dalpha_du; 83 | } 84 | 85 | altro::ExplicitDynamicsFunction MidpointDynamics(int n, 86 | int m, 87 | ContinuousDynamicsFunction f) { 88 | (void)m; 89 | auto fd = [n, f](double* xn, const double* x, const double* u, float h) { 90 | static Eigen::VectorXd xm(n); 91 | Eigen::Map xn_vec(xn, n); 92 | Eigen::Map x_vec(x, n); 93 | Eigen::Map u_vec(u, n); 94 | f(xm.data(), x, u); 95 | xm *= h / 2; 96 | xm.noalias() += x_vec; 97 | f(xn, xm.data(), u); 98 | xn_vec = x_vec + h * xn_vec; 99 | }; 100 | return fd; 101 | } 102 | 103 | altro::ExplicitDynamicsJacobian MidpointJacobian( 104 | int n, int m, ContinuousDynamicsFunction f, ContinuousDynamicsJacobian df) { 105 | auto fd = [n, m, f, df]( 106 | double* jac, const double* x, const double* u, float h) { 107 | static Eigen::MatrixXd A(n, n); 108 | static Eigen::MatrixXd B(n, m); 109 | static Eigen::MatrixXd Am(n, n); 110 | static Eigen::MatrixXd Bm(n, m); 111 | static Eigen::VectorXd xm(n); 112 | static Eigen::MatrixXd In = Eigen::MatrixXd::Identity(n, n); 113 | 114 | Eigen::Map J(jac, n, n + m); 115 | Eigen::Map x_vec(x, n); 116 | Eigen::Map u_vec(u, n); 117 | 118 | // Evaluate the midpoint 119 | f(xm.data(), x, u); 120 | xm = x_vec + h / 2 * xm; 121 | 122 | // Evaluate the Jacobian 123 | df(J.data(), x, u); 124 | A = J.leftCols(n); 125 | B = J.rightCols(m); 126 | 127 | // Evaluate the Jacobian at the midpoint 128 | df(J.data(), xm.data(), u); 129 | Am = J.leftCols(n); 130 | Bm = J.rightCols(m); 131 | 132 | // Apply the chain rule 133 | J.leftCols(n) = In + h * Am * (In + h / 2 * A); 134 | J.rightCols(m) = h * (Am * h / 2 * B + Bm); 135 | }; 136 | return fd; 137 | } 138 | 139 | altro::ExplicitDynamicsFunction ForwardEulerDynamics( 140 | int n, int m, const ContinuousDynamicsFunction f) { 141 | (void)m; 142 | auto fd = [n, f](double* xn, const double* x, const double* u, float h) { 143 | Eigen::Map xn_vec(xn, n); 144 | Eigen::Map x_vec(x, n); 145 | Eigen::Map u_vec(u, n); 146 | f(xn, x, u); // xn is actually x_dot here 147 | xn_vec = x_vec + h * xn_vec; 148 | }; 149 | return fd; 150 | } 151 | 152 | altro::ExplicitDynamicsJacobian ForwardEulerJacobian( 153 | int n, 154 | int m, 155 | const ContinuousDynamicsFunction f, 156 | const ContinuousDynamicsJacobian df) { 157 | auto fd = [n, m, f, df]( 158 | double* jac, const double* x, const double* u, float h) { 159 | Eigen::Map J(jac, n, n + m); 160 | Eigen::Map x_vec(x, n); 161 | Eigen::Map u_vec(u, n); 162 | 163 | static Eigen::MatrixXd In = Eigen::MatrixXd::Identity(n, n); 164 | 165 | df(J.data(), x, u); 166 | J.leftCols(n) = In + h * J.leftCols(n); 167 | J.rightCols(m) = h * J.rightCols(m); 168 | }; 169 | return fd; 170 | } 171 | 172 | void BicycleModel::Dynamics(double* x_dot, 173 | const double* x, 174 | const double* u) const { 175 | double v = u[0]; // longitudinal velocity (m/s) 176 | double delta_dot = u[1]; // steering angle rage (rad/s) 177 | double theta = x[2]; // heading angle (rad) relative to x-axis 178 | double delta = x[3]; // steering angle (rad) 179 | 180 | double beta = 0; 181 | double omega = 0; 182 | double stheta = 0; 183 | double ctheta = 0; 184 | switch (reference_frame_) { 185 | case ReferenceFrame::CenterOfGravity: 186 | beta = std::atan2(distance_to_rear_wheels_ * delta, length_); 187 | omega = v * std::cos(beta) * std::tan(delta) / length_; 188 | stheta = std::sin(theta + beta); 189 | ctheta = std::cos(theta + beta); 190 | break; 191 | case ReferenceFrame::Rear: 192 | omega = v * tan(delta) / length_; 193 | stheta = std::sin(theta); 194 | ctheta = std::cos(theta); 195 | break; 196 | case ReferenceFrame::Front: 197 | omega = v * std::sin(delta) / length_; 198 | stheta = std::sin(theta + delta); 199 | ctheta = std::cos(theta + delta); 200 | break; 201 | }; 202 | double px_dot = v * ctheta; 203 | double py_dot = v * stheta; 204 | x_dot[0] = px_dot; 205 | x_dot[1] = py_dot; 206 | x_dot[2] = omega; 207 | x_dot[3] = delta_dot; 208 | } 209 | 210 | void BicycleModel::Jacobian(double* jac, 211 | const double* x, 212 | const double* u) const { 213 | double v = u[0]; // longitudinal velocity (m/s) 214 | double theta = x[2]; // heading angle (rad) relative to x-axis 215 | double delta = x[3]; // steering angle (rad) 216 | 217 | Eigen::Map> J(jac); 218 | double beta = 0; 219 | double dbeta_ddelta = 0; 220 | double by = 0; 221 | double bx = 0; 222 | double domega_ddelta = 0; 223 | double domega_dv = 0; 224 | 225 | double stheta = 0; 226 | double ctheta = 0; 227 | double ds_dtheta = 0; 228 | double dc_dtheta = 0; 229 | double ds_ddelta = 0; 230 | double dc_ddelta = 0; 231 | switch (reference_frame_) { 232 | case ReferenceFrame::CenterOfGravity: 233 | by = distance_to_rear_wheels_ * delta; 234 | bx = length_; 235 | beta = std::atan2(by, bx); 236 | dbeta_ddelta = bx / (bx * bx + by * by) * distance_to_rear_wheels_; 237 | domega_ddelta = v / length_ * 238 | (-std::sin(beta) * std::tan(delta) * dbeta_ddelta + 239 | std::cos(beta) / (std::cos(delta) * std::cos(delta))); 240 | domega_dv = std::cos(beta) * std::tan(delta) / length_; 241 | 242 | stheta = std::sin(theta + beta); 243 | ctheta = std::cos(theta + beta); 244 | ds_dtheta = +std::cos(theta + beta); 245 | dc_dtheta = -std::sin(theta + beta); 246 | ds_ddelta = +std::cos(theta + beta) * dbeta_ddelta; 247 | dc_ddelta = -std::sin(theta + beta) * dbeta_ddelta; 248 | break; 249 | case ReferenceFrame::Rear: 250 | domega_ddelta = v / length_ / (std::cos(delta) * std::cos(delta)); 251 | domega_dv = std::tan(delta) / length_; 252 | 253 | stheta = std::sin(theta); 254 | ctheta = std::cos(theta); 255 | ds_dtheta = +std::cos(theta); 256 | dc_dtheta = -std::sin(theta); 257 | break; 258 | case ReferenceFrame::Front: 259 | domega_ddelta = v / length_ * std::cos(delta); 260 | domega_dv = std::sin(delta) / length_; 261 | 262 | stheta = std::sin(theta + delta); 263 | ctheta = std::cos(theta + delta); 264 | ds_dtheta = +std::cos(theta + delta); 265 | dc_dtheta = -std::sin(theta + delta); 266 | ds_ddelta = ds_dtheta; 267 | dc_ddelta = dc_dtheta; 268 | break; 269 | }; 270 | J.setZero(); 271 | J(0, 2) = v * dc_dtheta; // dxdot_dtheta 272 | J(0, 3) = v * dc_ddelta; // dxdot_ddelta 273 | J(0, 4) = ctheta; // dxdot_dv 274 | J(1, 2) = v * ds_dtheta; // dydot_dtheta 275 | J(1, 3) = v * ds_ddelta; // dydot_ddelta 276 | J(1, 4) = stheta; // dydot_dv 277 | J(2, 3) = domega_ddelta; 278 | J(2, 4) = domega_dv; 279 | J(3, 5) = 1.0; 280 | } 281 | 282 | void SimpleQuaternionModel::Dynamics(double* x_dot, 283 | const double* x, 284 | const double* u) const { 285 | Eigen::Map x_dot_vec(x_dot, 4); 286 | Eigen::Map x_vec(x, 4); 287 | Eigen::Map u_vec(u, 3); 288 | 289 | x_dot_vec = 0.5 * altro::G(x_vec) * u_vec; 290 | } 291 | 292 | void SimpleQuaternionModel::Jacobian(double* jac, 293 | const double* x, 294 | const double* u) const { 295 | Eigen::Map x_vec(x, 4); 296 | Eigen::Map u_vec(u, 3); 297 | 298 | double qs = x_vec[0]; 299 | double qa = x_vec[1]; 300 | double qb = x_vec[2]; 301 | double qc = x_vec[3]; 302 | 303 | double wx = u_vec[0]; 304 | double wy = u_vec[1]; 305 | double wz = u_vec[2]; 306 | 307 | Eigen::Map> J(jac); 308 | J.setZero(); 309 | J << 0, -wx / 2, -wy / 2, -wz / 2, -qa / 2, -qb / 2, -qc / 2, wx / 2, 0, 310 | wz / 2, -wy / 2, qs / 2, -qc / 2, qb / 2, wy / 2, -wz / 2, 0, wx / 2, 311 | qc / 2, qs / 2, -qa / 2, wz / 2, wy / 2, -wx / 2, 0, -qb / 2, qa / 2, 312 | qs / 2; 313 | } 314 | 315 | Eigen::Vector3d quadModel::moments(const Eigen::VectorXd& u) const { 316 | double L = motor_dist_; 317 | 318 | double w1 = u[0]; 319 | double w2 = u[1]; 320 | double w3 = u[2]; 321 | double w4 = u[3]; 322 | 323 | double F1 = std::max(0.0, kf_ * w1); 324 | double F2 = std::max(0.0, kf_ * w2); 325 | double F3 = std::max(0.0, kf_ * w3); 326 | double F4 = std::max(0.0, kf_ * w4); 327 | 328 | double M1 = km_ * w1; 329 | double M2 = km_ * w2; 330 | double M3 = km_ * w3; 331 | double M4 = km_ * w4; 332 | 333 | Eigen::Vector3d tau; 334 | tau << L * (F2 - F4), L * (F3 - F1), (M1 - M2 + M3 - M4); 335 | 336 | return tau; 337 | } 338 | 339 | Eigen::Vector3d quadModel::forces(const Eigen::VectorXd& u) const { 340 | double w1 = u[0]; 341 | double w2 = u[1]; 342 | double w3 = u[2]; 343 | double w4 = u[3]; 344 | 345 | double F1 = std::max(0.0, kf_ * w1); 346 | double F2 = std::max(0.0, kf_ * w2); 347 | double F3 = std::max(0.0, kf_ * w3); 348 | double F4 = std::max(0.0, kf_ * w4); 349 | 350 | Eigen::Vector3d F; 351 | F << 0.0, 0.0, F1 + F2 + F3 + F4; // total rotor force in body frame 352 | return F; 353 | } 354 | 355 | void quadModel::Dynamics(double* x_dot, 356 | const double* x, 357 | const double* u) const { 358 | // std::cout << "section 0 success " << std::endl; 359 | 360 | Eigen::Map x_dot_vec(x_dot, 13); 361 | x_dot_vec.segment<13>(0) = Eigen::VectorXd::Zero(13); 362 | Eigen::Map x_vec(x, 13); 363 | Eigen::Map u_vec(u, 4); 364 | 365 | const double robot_mass = mass_; 366 | Eigen::Vector3d g_vec; 367 | g_vec << 0, 0, -grav_; 368 | 369 | // std::cout << "section 1 success " << std::endl; 370 | 371 | Eigen::Vector3d moment_body = moments(u_vec); 372 | Eigen::Vector3d force_body = forces(u_vec); 373 | 374 | // std::cout << "section 2 success " << std::endl; 375 | 376 | // state: pos(world) quat(world->body) vel(body) omega(body) 377 | // 0 1 2 3 4 5 6 7 8 9 10 11 12 378 | Eigen::Vector4d q = x_vec.segment<4>(3).normalized(); 379 | Eigen::Vector3d vb = x_vec.segment<3>(7); 380 | Eigen::Vector3d w = x_vec.segment<3>(10); 381 | // std::cout << "section 3 success " << std::endl; 382 | 383 | Eigen::Matrix3d Q = altro::Q(q); 384 | // change rate of position 385 | x_dot_vec.segment<3>(0) = Q * vb; 386 | 387 | // change rate of quaternion 388 | // std::cout << "section 4 success " << std::endl; 389 | 390 | x_dot_vec.segment<4>(3) = 0.5 * altro::G(q) * w; 391 | // std::cout << "section 5 success " << std::endl; 392 | 393 | // change rate of linear velocity 394 | 395 | x_dot_vec.segment<3>(7) = 396 | Q.transpose() * g_vec + force_body / robot_mass - w.cross(vb); 397 | // std::cout << "section 6 success " << std::endl; 398 | 399 | // change rate of angular velocity 400 | x_dot_vec.segment<3>(10) = moment_of_inertia_.inverse() * 401 | (moment_body - w.cross(moment_of_inertia_ * w)); 402 | // std::cout << "section 7 success " << std::endl; 403 | } 404 | 405 | void quadModel::finite_jacobian_quad_xu(double* jac, 406 | const double* x, 407 | const double* u) const { 408 | // Eigen::Map x_dot_vec(x_dot, 13); 409 | Eigen::Map x_vec(x, 13); 410 | Eigen::Map u_vec(u, 4); 411 | Eigen::Map> J(jac); // jac = [dfc_dx, dfc_du] 412 | 413 | Eigen::MatrixXd dfc_dx(13, 13); 414 | Eigen::MatrixXd dfc_du(13, 4); 415 | 416 | const double eps = 1e-10; 417 | fd::AccuracyOrder accuracy = fd::EIGHTH; 418 | const std::vector external_coeffs = fd::get_external_coeffs(accuracy); 419 | const std::vector internal_coeffs = fd::get_interior_coeffs(accuracy); 420 | assert(external_coeffs.size() == internal_coeffs.size()); 421 | const size_t inner_steps = internal_coeffs.size(); 422 | const double denom = get_denominator(accuracy) * eps; 423 | J.setZero(); 424 | dfc_dx.setZero(); 425 | dfc_du.setZero(); 426 | // std::cout<< "number of rows" << x_vec.rows()<(3).normalize(); 436 | Eigen::VectorXd fx(13); 437 | 438 | Dynamics(fx.data(), x_mutable.data(), u); 439 | // fx = Eigen::VectorXd::Zero(13); 440 | // std::cout << "fx" << fx << std::endl; 441 | 442 | dfc_dx.col(i) += external_coeffs[ci] * fx; 443 | for (size_t j = 0; j < x_vec.rows(); j++) { 444 | x_mutable[j] = x_vec[j]; 445 | // reset x_mutable, it is unclear if quat will make things weird, so 446 | // just reset everything 447 | } 448 | } 449 | dfc_dx.col(i) /= denom; 450 | } 451 | 452 | Eigen::VectorXd u_mutable = u_vec; 453 | for (size_t i = 0; i < u_vec.rows(); i++) { 454 | for (size_t ci = 0; ci < inner_steps; ci++) { 455 | u_mutable[i] += internal_coeffs[ci] * eps; 456 | Eigen::VectorXd fx(13); 457 | 458 | Dynamics(fx.data(), x, u_mutable.data()); 459 | dfc_du.col(i) += external_coeffs[ci] * fx; 460 | u_mutable[i] = u_vec[i]; 461 | } 462 | dfc_du.col(i) /= denom; 463 | } 464 | 465 | // make all values < 1e-10 equal to zero 466 | for (size_t i = 0; i < dfc_dx.rows(); i++) { 467 | for (size_t j = 0; j < dfc_dx.cols(); j++) { 468 | if (std::abs(dfc_dx(i, j)) < 1e-8) { 469 | dfc_dx(i, j) = 0; 470 | } 471 | } 472 | } 473 | J.block<13, 13>(0, 0) = dfc_dx; 474 | J.block<13, 4>(0, 13) = dfc_du; 475 | } 476 | 477 | void quadModel::Jacobian_fd(double* jac, 478 | const double* x, 479 | const double* u) const { 480 | finite_jacobian_quad_xu(jac, x, u); 481 | } 482 | Eigen::Matrix quat_a_jacobian(const Eigen::Vector4d& q, 483 | const Eigen::Vector3d& g) { 484 | Eigen::Matrix mat; 485 | 486 | mat(0, 0) = -2 * g(2) * q(2); 487 | mat(0, 1) = 2 * g(2) * q(3); 488 | mat(0, 2) = -2 * g(2) * q(0); 489 | mat(0, 3) = 2 * g(2) * q(1); 490 | 491 | mat(1, 0) = 2 * g(2) * q(1); 492 | mat(1, 1) = 2 * g(2) * q(0); 493 | mat(1, 2) = 2 * g(2) * q(3); 494 | mat(1, 3) = 2 * g(2) * q(2); 495 | 496 | mat(2, 0) = 2 * g(2) * q(0); 497 | mat(2, 1) = -2 * g(2) * q(1); 498 | mat(2, 2) = -2 * g(2) * q(2); 499 | mat(2, 3) = 2 * g(2) * q(3); 500 | return mat; 501 | } 502 | 503 | Eigen::Matrix quat_v_jacobian(const Eigen::Vector4d& q, 504 | const Eigen::Vector3d& v) { 505 | Eigen::Matrix mat; // MATLAB symbolic toolbox 506 | 507 | mat(0, 0) = 2 * q(0) * v(0) + 2 * q(2) * v(2) - 2 * q(3) * v(1); 508 | mat(0, 1) = 2 * q(1) * v(0) + 2 * q(2) * v(1) + 2 * q(3) * v(2); 509 | mat(0, 2) = 2 * q(0) * v(2) + 2 * q(1) * v(1) - 2 * q(2) * v(0); 510 | mat(0, 3) = 2 * q(1) * v(2) - 2 * q(0) * v(1) - 2 * q(3) * v(0); 511 | 512 | mat(1, 0) = 2 * q(0) * v(1) - 2 * q(1) * v(2) + 2 * q(3) * v(0); 513 | mat(1, 1) = 2 * q(2) * v(0) - 2 * q(1) * v(1) - 2 * q(0) * v(2); 514 | mat(1, 2) = 2 * q(1) * v(0) + 2 * q(2) * v(1) + 2 * q(3) * v(2); 515 | mat(1, 3) = 2 * q(0) * v(0) + 2 * q(2) * v(2) - 2 * q(3) * v(1); 516 | 517 | mat(2, 0) = 2 * q(0) * v(2) + 2 * q(1) * v(1) - 2 * q(2) * v(0); 518 | mat(2, 1) = 2 * q(0) * v(1) - 2 * q(1) * v(2) + 2 * q(3) * v(0); 519 | mat(2, 2) = 2 * q(3) * v(1) - 2 * q(2) * v(2) - 2 * q(0) * v(0); 520 | mat(2, 3) = 2 * q(1) * v(0) + 2 * q(2) * v(1) + 2 * q(3) * v(2); 521 | return mat; 522 | } 523 | 524 | void quadModel::Jacobian(double* jac, const double* x, const double* u) const { 525 | Eigen::Map> J(jac); // jac = [dfc_dx, dfc_du] 526 | J.setZero(); 527 | 528 | Eigen::Map x_vec(x, 13); 529 | Eigen::Map u_vec(u, 4); 530 | 531 | Eigen::Vector4d q = x_vec.segment<4>(3); 532 | Eigen::Vector3d vb = x_vec.segment<3>(7); 533 | Eigen::Vector3d w = x_vec.segment<3>(10); 534 | Eigen::Matrix3d Q = altro::Q(q); 535 | Eigen::Vector4d inv_q = altro::quat_conj(q); 536 | 537 | const double robot_mass = mass_; 538 | Eigen::Vector3d g_vec; 539 | g_vec << 0, 0, -grav_; 540 | 541 | // Calculate dfc_dx 542 | Eigen::MatrixXd dfc_dx(13, 13); 543 | 544 | dfc_dx.setZero(); 545 | // dvw/dq ???? 546 | // Eigen::MatrixXd dQdq = altro::dQdq(q); 547 | // //1 x 3 3x4 548 | // dfc_dx.block<1, 4>(0, 3) = (vb.transpose() * dQdq.block<3, 4>(0, 0)); 549 | // dfc_dx.block<1, 4>(1, 3) = (vb.transpose() * dQdq.block<3, 4>(3, 0)); 550 | // dfc_dx.block<1, 4>(2, 3) = (vb.transpose() * dQdq.block<3, 4>(6, 0)); 551 | double q_w = q[0]; 552 | Eigen::Vector3d q_vec = q.segment<3>(1); 553 | // Eigen::Matrix JJ; 554 | // JJ.setZero(); 555 | 556 | // JJ.block<3,1>(0,3) = 2.0*(q_w*vb+q_vec.cross(vb)); 557 | // JJ.block<3,3>(0,4) = 2.0*(q_vec.transpose()*vb*Eigen::MatrixXd::Identity(3, 558 | // 3)+q_vec*vb.transpose()- vb*q_vec.transpose()-q_w*altro::skew(vb)); 559 | dfc_dx.block<3, 4>(0, 3) = quat_v_jacobian(x_vec.segment<4>(3), vb); 560 | 561 | // dvw/dvb 562 | dfc_dx.block<3, 3>(0, 7) = Q; 563 | // dqdot/dq 564 | dfc_dx.block<1, 3>(3, 4) = -0.5 * w.transpose(); 565 | dfc_dx.block<3, 1>(4, 3) = 0.5 * w; 566 | dfc_dx.block<3, 3>(4, 4) = -0.5 * altro::skew(w); 567 | // dqdot/domega // is this just 0.5*G(q)? 568 | dfc_dx(3, 10) = -0.5 * x_vec(4); // -0.5qa 569 | dfc_dx(3, 11) = -0.5 * x_vec(5); // -0.5qb 570 | dfc_dx(3, 12) = -0.5 * x_vec(6); // -0.5qc 571 | dfc_dx(4, 10) = 0.5 * x_vec(3); // 0.5qs 572 | dfc_dx(4, 11) = -0.5 * x_vec(6); // -0.5qc 573 | dfc_dx(4, 12) = 0.5 * x_vec(5); // 0.5qb 574 | dfc_dx(5, 10) = 0.5 * x_vec(6); // 0.5qc 575 | dfc_dx(5, 11) = 0.5 * x_vec(3); // 0.5qs 576 | dfc_dx(5, 12) = -0.5 * x_vec(4); // -0.5qa 577 | dfc_dx(6, 10) = -0.5 * x_vec(5); // -0.5qb 578 | dfc_dx(6, 11) = 0.5 * x_vec(4); // 0.5qa 579 | dfc_dx(6, 12) = 0.5 * x_vec(3); // 0.5qs 580 | 581 | // dvbdot/dq // ADDED - sign here, not sure if correct 582 | // Eigen::MatrixXd dQinv_dq = altro::dQdq(inv_q); 583 | // dfc_dx.block<1, 4>(7, 3) = -(g_vec.transpose() * dQinv_dq.block<3, 4>(0, 584 | // 0)); dfc_dx.block<1, 4>(8, 3) = -(g_vec.transpose() * dQinv_dq.block<3, 585 | // 4>(3, 0)); dfc_dx.block<1, 4>(9, 3) = -(g_vec.transpose() * 586 | // dQinv_dq.block<3, 4>(6, 0)); 587 | 588 | // Eigen::Matrix JJ2; 589 | // JJ2.setZero(); 590 | // JJ2.block<3, 1>(0, 3) = 2.0 * (q_w * g_vec + q_vec.cross(g_vec)); 591 | // JJ2.block<3, 3>(0, 4) = 592 | // 2.0 * (-q_vec.transpose() * g_vec * Eigen::MatrixXd::Identity(3, 3) + 593 | // -q_vec * g_vec.transpose() - g_vec * -q_vec.transpose() - q_w * 594 | // altro::skew(g_vec)); 595 | dfc_dx.block<3, 4>(7, 3) = quat_a_jacobian(x_vec.segment<4>(3), g_vec); 596 | 597 | // dvbdot/dvb 598 | dfc_dx.block<3, 3>(7, 7) = -altro::skew(w); 599 | // dvbdot/domega 600 | dfc_dx.block<3, 3>(7, 10) = altro::skew(vb); 601 | 602 | // domegadot/domega 603 | dfc_dx.block<3, 3>(10, 10) = 604 | -moment_of_inertia_.inverse() * (altro::skew(w) * moment_of_inertia_ - 605 | altro::skew(moment_of_inertia_ * w)); 606 | 607 | ///////////////////////////////////////////////////////////////////////////////////// 608 | // Calculate dfc_du 609 | Eigen::MatrixXd dfc_du(13, 4); 610 | dfc_du.setZero(); 611 | 612 | // dvw/du 613 | Eigen::Vector4d u_vec_sign; 614 | for (int i = 0; i < 4; ++i) { 615 | u_vec_sign(i) = u_vec(i) > 0 ? 1 : 0; 616 | } 617 | dfc_du.block<1, 4>(9, 0) = (1 / robot_mass * kf_) * u_vec_sign; 618 | 619 | // dw_dot/du 620 | Eigen::Matrix Fmat = forceMatrix().block<3, 4>(1, 0); 621 | Fmat.block<2, 4>(0, 0) = 622 | Fmat.block<2, 4>(0, 0).array().rowwise() * u_vec_sign.transpose().array(); 623 | 624 | dfc_du.block<3, 4>(10, 0) = moment_of_inertia_.inverse() * Fmat; 625 | // 626 | 627 | // Get Jacobian 628 | J.block<13, 13>(0, 0) = dfc_dx; 629 | J.block<13, 4>(0, 13) = dfc_du; 630 | } 631 | 632 | void QuadrupedQuaternionModel::Dynamics( 633 | double* x_dot, 634 | const double* x, 635 | const double* u, 636 | Eigen::Matrix foot_pos_body, 637 | Eigen::Matrix3d inertia_body) const { 638 | Eigen::Map x_dot_vec(x_dot, 13); 639 | Eigen::Map x_vec(x, 13); 640 | Eigen::Map u_vec(u, 12); 641 | 642 | double robot_mass = 13; 643 | Eigen::Vector3d g_vec; 644 | g_vec << 0, 0, -9.81; 645 | 646 | Eigen::Vector3d moment_body; 647 | moment_body = 648 | altro::skew(foot_pos_body.block<3, 1>(0, 0)) * u_vec.segment<3>(0) + 649 | altro::skew(foot_pos_body.block<3, 1>(0, 1)) * u_vec.segment<3>(3) + 650 | altro::skew(foot_pos_body.block<3, 1>(0, 2)) * u_vec.segment<3>(6) + 651 | altro::skew(foot_pos_body.block<3, 1>(0, 3)) * u_vec.segment<3>(9); 652 | 653 | // change rate of position 654 | x_dot_vec.segment<3>(0) = x_vec.segment<3>(7); 655 | // change rate of quaternion 656 | x_dot_vec.segment<4>(3) = 657 | 0.5 * altro::G(x_vec.segment<4>(3)) * x_vec.segment<3>(10); 658 | // change rate of linear velocity 659 | x_dot_vec.segment<3>(7) = (u_vec.segment<3>(0) + u_vec.segment<3>(3) + 660 | u_vec.segment<3>(6) + u_vec.segment<3>(9)) / 661 | robot_mass + 662 | g_vec; 663 | // change rate of angular velocity 664 | x_dot_vec.segment<3>(10) = 665 | inertia_body.inverse() * 666 | (moment_body - 667 | x_vec.segment<3>(10).cross(inertia_body * x_vec.segment<3>(10))); 668 | } 669 | 670 | void QuadrupedQuaternionModel::Jacobian( 671 | double* jac, 672 | const double* x, 673 | const double* u, 674 | Eigen::Matrix foot_pos_body, 675 | Eigen::Matrix3d inertia_body) const { 676 | (void)u; 677 | Eigen::Map> J(jac); // jac = [dfc_dx, dfc_du] 678 | J.setZero(); 679 | 680 | Eigen::Map x_vec(x, 13); 681 | 682 | double robot_mass = 13; 683 | 684 | // Calculate dfc_dx 685 | Eigen::MatrixXd dfc_dx(13, 13); 686 | dfc_dx.setZero(); 687 | // dv/dv 688 | dfc_dx.block<3, 3>(0, 7) = Eigen::Matrix3d::Identity(); 689 | // dqdot/dq 690 | dfc_dx.block<1, 3>(3, 4) = -0.5 * x_vec.segment<3>(10).transpose(); 691 | dfc_dx.block<3, 1>(4, 3) = 0.5 * x_vec.segment<3>(10); 692 | dfc_dx.block<3, 3>(4, 4) = -0.5 * altro::skew(x_vec.segment<3>(10)); 693 | // dqdot/domega 694 | dfc_dx(3, 10) = -0.5 * x_vec(4); // -0.5qa 695 | dfc_dx(3, 11) = -0.5 * x_vec(5); // -0.5qb 696 | dfc_dx(3, 12) = -0.5 * x_vec(6); // -0.5qc 697 | dfc_dx(4, 10) = 0.5 * x_vec(3); // 0.5qs 698 | dfc_dx(4, 11) = -0.5 * x_vec(6); // -0.5qc 699 | dfc_dx(4, 12) = 0.5 * x_vec(5); // 0.5qb 700 | dfc_dx(5, 10) = 0.5 * x_vec(6); // 0.5qc 701 | dfc_dx(5, 11) = 0.5 * x_vec(3); // 0.5qs 702 | dfc_dx(5, 12) = -0.5 * x_vec(4); // -0.5qa 703 | dfc_dx(6, 10) = -0.5 * x_vec(5); // -0.5qb 704 | dfc_dx(6, 11) = 0.5 * x_vec(4); // 0.5qa 705 | dfc_dx(6, 12) = 0.5 * x_vec(3); // 0.5qs 706 | // domegadot/domega 707 | dfc_dx.block<3, 3>(10, 10) = 708 | -inertia_body.inverse() * 709 | (altro::skew(x_vec.segment<3>(10)) * inertia_body - 710 | altro::skew(inertia_body * x_vec.segment<3>(10))); 711 | 712 | // Calculate dfc_du 713 | Eigen::MatrixXd dfc_du(13, 12); 714 | dfc_du.setZero(); 715 | 716 | for (int i = 0; i < 4; ++i) { 717 | dfc_du.block<3, 3>(7, 3 * i) = 718 | (1 / robot_mass) * Eigen::Matrix3d::Identity(); 719 | dfc_du.block<3, 3>(10, 3 * i) = 720 | inertia_body.inverse() * altro::skew(foot_pos_body.block<3, 1>(0, i)); 721 | } 722 | 723 | // Get Jacobian 724 | J.block<13, 13>(0, 0) = dfc_dx; 725 | J.block<13, 12>(0, 13) = dfc_du; 726 | } 727 | 728 | Eigen::Vector4d Slerp(Eigen::Vector4d q1, Eigen::Vector4d q2, double t) { 729 | if (q1 == q2) { 730 | return q1; 731 | } else { 732 | double dot = q1.dot(q2); 733 | double theta = acos(dot); 734 | double sinTheta = sin(theta); 735 | double a = sin((1 - t) * theta) / sinTheta; 736 | double b = sin(t * theta) / sinTheta; 737 | 738 | return a * q1 + b * q2; 739 | } 740 | } --------------------------------------------------------------------------------