├── .gitignore
├── CMakeLists.txt
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE.md
├── README.md
├── download_local_files.sh
├── figs
├── example_nav2d.gif
└── example_regression.gif
├── leocpp.h
├── leocpp
├── CMakeLists.txt
├── config.h.in
├── contact
│ ├── CMakeLists.txt
│ └── TactileRelativeTfRegressionFactor.h
├── dynamics
│ ├── CMakeLists.txt
│ └── QSVelPushMotionRealObjEEFactor.h
├── geometry
│ ├── CMakeLists.txt
│ └── IntersectionPlanarSDFObjEEFactor.h
└── thirdparty
│ ├── CMakeLists.txt
│ └── gpmp2
│ ├── LICENSE
│ ├── PlanarSDF.h
│ └── SDFexception.h
└── leopy
├── config
├── baselines
│ ├── lstm_net_test.yaml
│ └── lstm_net_train.yaml
├── dataio
│ ├── nav2d.yaml
│ └── push2d.yaml
└── examples
│ ├── leo_nav2d.yaml
│ ├── leo_push2d.yaml
│ └── regression.yaml
├── requirements
└── requirements.txt
├── scripts
├── analysis
│ ├── regression_analysis.py
│ └── visualize_traj_samples.py
├── baselines
│ ├── blackbox_nav2d.py
│ ├── lstm_utils.py
│ ├── test_lstm_net.py
│ └── train_lstm_net.py
└── examples
│ ├── leo_nav2d.py
│ ├── leo_push2d.py
│ └── regression.py
├── setup.py
└── src
└── leopy
├── algo
├── leo_obs_models.py
└── leo_update.py
├── dataio
├── data_process.py
├── generate_nav2dsim_dataset.py
├── generate_nav2dsim_time_varying_dataset.py
└── generate_push2dreal_dataset.py
├── eval
└── quant_metrics.py
├── optim
├── cost.py
├── gtsamopt.py
├── nav2d_factors.py
├── push2d_factors.py
└── sampler.py
├── thirdparty
└── dcem
│ ├── LICENSE
│ └── dcem.py
└── utils
├── __init__.py
├── dir_utils.py
├── logger.py
├── tf_utils.py
└── vis_utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .vscode/
3 |
4 | __pycache__
5 | *.py[cod]
6 | *.egg-info*
7 | .ipynb_checkpoints
8 |
9 | build/
10 | local/
11 | outputs/
12 | runs/
13 | multirun/
14 | old_runs/
15 | logs/
16 | notebooks/
17 |
18 | *.json
19 | *.swp
20 | *.bag
21 | *.log
22 |
23 | run.sh
24 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0)
2 | enable_testing()
3 | project(leocpp CXX C)
4 |
5 | # specify C++ standard
6 | set(CMAKE_CXX_STANDARD 14)
7 | set(CMAKE_CXX_STANDARD_REQUIRED True)
8 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -fopenmp")
9 |
10 | # options
11 | set (CMAKE_INSTALL_PREFIX "/usr/local" CACHE PATH "Install prefix for library")
12 | option(LEOCPP_BUILD_PYTHON_TOOLBOX "build python toolbox" ON)
13 |
14 | # GTSAM
15 | # find_package(GTSAM REQUIRED PATHS "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/install" NO_DEFAULT_PATH)
16 | find_package(GTSAM REQUIRED)
17 | include_directories(${GTSAM_INCLUDE_DIR})
18 | set(GTSAM_LIBRARIES gtsam)
19 |
20 | # GTSAMCMakeTools
21 | # find_package(GTSAMCMakeTools REQUIRED PATHS "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/" NO_DEFAULT_PATH)
22 | find_package(GTSAMCMakeTools)
23 | include(GtsamMakeConfigFile)
24 | include(GtsamBuildTypes)
25 | include(GtsamTesting)
26 |
27 | # for unittest scripts
28 | set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
29 |
30 | # Boost
31 | find_package(Boost 1.50 REQUIRED)
32 | find_package(Boost COMPONENTS filesystem REQUIRED)
33 | find_package(Boost COMPONENTS system REQUIRED)
34 | find_package(Boost COMPONENTS thread REQUIRED)
35 | find_package(Boost COMPONENTS serialization REQUIRED)
36 | include_directories(${Boost_INCLUDE_DIR})
37 |
38 | # include current source folder and generated config header files
39 | add_definitions(-DBASE_PATH="${CMAKE_CURRENT_SOURCE_DIR}")
40 | include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
41 |
42 | # Process subdirs
43 | add_subdirectory(leocpp)
44 |
45 | # Python wrapper
46 | if(LEOCPP_BUILD_PYTHON_TOOLBOX)
47 | include_directories(${GTSAM_DIR}/cython)
48 | include_directories(/usr/local/cython)
49 | include(GtsamCythonWrap)
50 | include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
51 |
52 | wrap_and_install_library_cython("leocpp.h"
53 | "from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
54 | "${CMAKE_INSTALL_PREFIX}/cython" # install path
55 | leocpp # library to link with
56 | "gtsam" # dependencies which need to be built before wrapping
57 | )
58 | add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES -DBOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
59 | endif()
60 |
61 | # Install config and export files
62 | GtsamMakeConfigFile(leocpp)
63 | export(TARGETS ${LEOCPP_EXPORTED_TARGETS} FILE leocpp-exports.cmake)
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to make participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies within all project spaces, and it also applies when
49 | an individual is representing the project or its community in public spaces.
50 | Examples of representing a project or community include using an official
51 | project e-mail address, posting via an official social media account, or acting
52 | as an appointed representative at an online or offline event. Representation of
53 | a project may be further defined and clarified by project maintainers.
54 |
55 | This Code of Conduct also applies outside the project spaces when there is a
56 | reasonable belief that an individual's behavior may have a negative impact on
57 | the project or its community.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported by contacting the project team at. All
63 | complaints will be reviewed and investigated and will result in a response that
64 | is deemed necessary and appropriate to the circumstances. The project team is
65 | obligated to maintain confidentiality with regard to the reporter of an incident.
66 | Further details of specific enforcement policies may be posted separately.
67 |
68 | Project maintainers who do not follow or enforce the Code of Conduct in good
69 | faith may face temporary or permanent repercussions as determined by other
70 | members of the project's leadership.
71 |
72 | ## Attribution
73 |
74 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76 |
77 | [homepage]: https://www.contributor-covenant.org
78 |
79 | For answers to common questions about this code of conduct, see
80 | https://www.contributor-covenant.org/faq
81 |
82 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Pull Requests
4 | We actively welcome your pull requests.
5 |
6 | 1. Fork the repo and create your branch from `master`.
7 | 2. If you've added code that should be tested, add tests.
8 |
9 | ## Issues
10 | We use GitHub issues to track public bugs. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue.
11 |
12 |
13 | ## License
14 | By contributing to this project, you agree that your contributions will be licensed under the LICENSE file in the root directory of this source tree.
15 |
16 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Carnegie Mellon University
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | LEO: Learning energy based models in graph optimization
2 | ===================================================
3 |
4 | This repository contains the source code of the paper [LEO: Learning Energy-based Models in Factor Graph optimization](https://arxiv.org/abs/2108.02274).
5 |
6 | # Content
7 |
8 | The code contains three sets of experiments:
9 | ```
10 | 1. 1D energy-based regression: Learn E(x,y) where x is 1D input and y is 1D output
11 | 2. 2D synthetic navigation: Learn E(x,z) where x is a sequence of 2D robot poses and z is a sequence of GPS and odometry measurements.
12 | 3. 2D real-world pushing: Learn E(x,z) where x is a sequence of 2D object poses and z is a sequence of tactile measurements.
13 | ```
14 | # Installation
15 |
16 | Create a virtual python environment using [Anaconda](https://www.anaconda.com/products/individual):
17 | ```
18 | conda create -n leo python=3.7
19 | conda activate leo
20 | ```
21 |
22 | Install the `leopy` python package locally. In the `leopy/` dir execute:
23 | ```
24 | pip install -e .
25 | ```
26 |
27 | Any additional install requirements are detailed within each example.
28 | # Usage
29 | ## Example 1: 1D energy-based regression
30 |
31 | We borrow this example from [The Differentiable Cross-Entropy Method](https://github.com/facebookresearch/dcem/) repository. We extend the example by adding in energy-based learning method (LEO), unrolling Gauss-Newton method and additional preprocessing and visualization utilities.
32 |
33 | To run the regression example, execute in the `leopy/` dir:
34 | ```
35 | python scripts/examples/regression.py
36 | ```
37 |
38 | By default, this runs LEO with Gauss-Newton as the inner loop optimizer. To run the example with other algorithms and settings, please take a look at [regression.yaml](leopy/config/examples/regression.yaml). For instance, to run unrolled Gauss-Newton, set `model=${unroll_gn}`. To run LEO with cross-entropy method as the inner loop optimizer, set `model=${leo_cem}`.
39 |
40 | Upon running the script, you should see the energy function E(x,y) being visualized as it is updated each iteration:
41 |
42 | 
43 |
44 | (green is the groundtruth function, orange are the LEO samples, lighter colors correspond to lower energy)
45 |
46 |
47 |
48 | ## Example 2: 2D synthetic navigation
49 |
50 | To run the navigation examples, we first need to install [gtsam](https://github.com/borglab/gtsam). Start by cloning the gtsam repository:
51 | ```
52 | git clone https://github.com/borglab/gtsam.git
53 | git checkout tags/4.0.0
54 | ```
55 |
56 | Build and install the gtsam library:
57 | ```
58 | cmake -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON -DGTSAM_PYTHON_VERSION=3.7 ..
59 | make -j
60 | make install
61 | ```
62 | If doing a local install, additionally pass in the install path `-DCMAKE_INSTALL_PREFIX=../install`.
63 |
64 | Download the 2D navigation datasets locally by running:
65 | ```
66 | ./download_local_files.sh
67 | ```
68 |
69 | To run the 2D navigation example:
70 | ```
71 | python scripts/examples/nav2d.py
72 | ```
73 | By default, this runs LEO with [iSAM2](https://www.cs.cmu.edu/~kaess/pub/Kaess12ijrr.pdf) optimizer in the inner loop on the varying covariance dataset. To run the example with other datasets and settings, please take a look at [leo_nav2d.yaml](leopy/config/examples/leo_nav2d.yaml).
74 |
75 | Upon running the script, you should see the optimized robot trajectories being saved to `local/plots` every learning iteration:
76 |
77 | 
78 |
79 |
80 |
81 | # Citing
82 | If you find this repository helpful in your publications, please consider citing the following:
83 |
84 | ```
85 | @inproceedings{sodhi2021leo,
86 | title={{LEO: Learning energy-based models in factor graph optimization}},
87 | author={Sodhi, Paloma and Dexheimer, Eric and Mukadam, Mustafa and Anderson, Stuart and Kaess, Michael},
88 | booktitle={Conference on Robot Learning (CoRL)},
89 | year={2021}
90 | }
91 | ```
92 |
93 | # License
94 | This repository is licensed under the [MIT License](LICENSE.md).
--------------------------------------------------------------------------------
/download_local_files.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LOCAL_DIR="local/"
4 | mkdir -p $LOCAL_DIR
5 |
6 | wget https://www.dropbox.com/s/r1nabquau6zcxmc/datasets.zip -P $LOCAL_DIR
7 |
8 | unzip $LOCAL_DIR/datasets.zip -d $LOCAL_DIR
9 |
10 | rm -r $LOCAL_DIR/datasets.zip
--------------------------------------------------------------------------------
/figs/example_nav2d.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rpl-cmu/leo/4ed27b169172795930a9103598144eb3ca70a405/figs/example_nav2d.gif
--------------------------------------------------------------------------------
/figs/example_regression.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rpl-cmu/leo/4ed27b169172795930a9103598144eb3ca70a405/figs/example_regression.gif
--------------------------------------------------------------------------------
/leocpp.h:
--------------------------------------------------------------------------------
1 | // leocpp variables
2 |
3 | class gtsam::Point2;
4 | class gtsam::Pose2;
5 | class gtsam::Vector3;
6 |
7 | class gtsam::Point3;
8 | class gtsam::Pose3;
9 | class gtsam::Vector6;
10 |
11 | class gtsam::Values;
12 | virtual class gtsam::noiseModel::Base;
13 | virtual class gtsam::NonlinearFactor;
14 | virtual class gtsam::NonlinearFactorGraph;
15 | virtual class gtsam::NoiseModelFactor : gtsam::NonlinearFactor;
16 |
17 | namespace leocpp {
18 |
19 | #include
20 | class PlanarSDF {
21 | PlanarSDF(const gtsam::Point2& origin, double cell_size, const Matrix& data);
22 | // access
23 | double getSignedDistance(const gtsam::Point2& point) const;
24 | void print(string s) const;
25 | };
26 |
27 | #include
28 | virtual class QSVelPushMotionRealObjEEFactor : gtsam::NoiseModelFactor {
29 | QSVelPushMotionRealObjEEFactor(size_t objKey0, size_t objKey1, size_t eeKey0, size_t eeKey1,
30 | const double& cSq, const gtsam::noiseModel::Base* noiseModel);
31 | Vector evaluateError(const gtsam::Pose2& objPose0, const gtsam::Pose2& objPose1,
32 | const gtsam::Pose2& eePose0, const gtsam::Pose2& eePose1) const;
33 | };
34 |
35 | #include
36 | virtual class IntersectionPlanarSDFObjEEFactor : gtsam::NoiseModelFactor {
37 | IntersectionPlanarSDFObjEEFactor(size_t objKey, size_t eeKey, const PlanarSDF &sdf, const double& eeRadius, const gtsam::noiseModel::Base* noiseModel);
38 | Vector evaluateError(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose) const;
39 | };
40 |
41 | #include
42 | virtual class TactileRelativeTfPredictionFactor : gtsam::NoiseModelFactor {
43 | TactileRelativeTfPredictionFactor(size_t objKey1, size_t objKey2, size_t eeKey1, size_t eeKey2,
44 | const Vector& torchModelOutput,
45 | const gtsam::noiseModel::Base* noiseModel);
46 | Vector evaluateError(const gtsam::Pose2& objPose1, const gtsam::Pose2& objPose2,
47 | const gtsam::Pose2& eePose1, const gtsam::Pose2& eePose2) const;
48 | void setFlags(const bool yawOnlyError,const bool constantModel);
49 | void setOracle(const bool oracleFactor, const gtsam::Pose2 oraclePose);
50 | void setLabel(int classLabel, int numClasses);
51 | // gtsam::Pose2 getMeasTransform();
52 | gtsam::Pose2 getExpectTransform(const gtsam::Pose2& objPose1, const gtsam::Pose2& objPose2,
53 | const gtsam::Pose2& eePose1, const gtsam::Pose2& eePose2);
54 | };
55 |
56 | } // namespace leocpp
57 |
--------------------------------------------------------------------------------
/leocpp/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | set(leocpp_subdirs
2 | contact
3 | dynamics
4 | geometry
5 | utils
6 | )
7 | set(leocpp_srcs)
8 |
9 | # files want to be excluded
10 | set(excluded_sources "")
11 |
12 | # Library sources
13 | foreach(subdir ${leocpp_subdirs})
14 | file(GLOB subdir_srcs "${subdir}/*.cpp" "${subdir}/*.h")
15 | list(REMOVE_ITEM subdir_srcs "${excluded_sources}")
16 |
17 | file(GLOB subdir_test_files "${subdir}/tests/*")
18 |
19 | list(APPEND leocpp_srcs ${subdir_srcs})
20 | message(STATUS "Building Module: ${subdir}")
21 |
22 | # local and tests
23 | add_subdirectory(${subdir})
24 | endforeach(subdir)
25 |
26 | # shared
27 | message(STATUS "Build shared library")
28 | add_library(${PROJECT_NAME} SHARED ${leocpp_srcs})
29 | target_link_libraries(leocpp ${Boost_LIBRARIES} ${GTSAM_LIBRARIES})
30 | set_target_properties(leocpp PROPERTIES
31 | OUTPUT_NAME ${PROJECT_NAME}
32 | LINKER_LANGUAGE CXX
33 | CLEAN_DIRECT_OUTPUT 1)
34 | install(TARGETS ${PROJECT_NAME} EXPORT leocpp-exports LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION bin)
35 |
36 | list(APPEND LEOCPP_EXPORTED_TARGETS leocpp)
37 | set(LEOCPP_EXPORTED_TARGETS "${LEOCPP_EXPORTED_TARGETS}" PARENT_SCOPE)
--------------------------------------------------------------------------------
/leocpp/config.h.in:
--------------------------------------------------------------------------------
1 | #ifndef CONFIG_H_IN_
2 | #define CONFIG_H_IN_
3 |
4 | #define LEOCPP_EXPORT
5 | #define LEOCPP_EXTERN_EXPORT extern
6 |
7 | #endif
8 |
--------------------------------------------------------------------------------
/leocpp/contact/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Install headers
2 | file(GLOB contact_headers "*.h")
3 | install(FILES ${contact_headers} DESTINATION include/leo/contact)
4 |
5 | # Build tests
6 | gtsamAddTestsGlob(contact "tests/*.cpp" "" ${PROJECT_NAME})
--------------------------------------------------------------------------------
/leocpp/contact/TactileRelativeTfRegressionFactor.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | /**
4 | * @file TactileRelativeTfPredictionFactor.cpp
5 | * @brief relative transform prediction network from tactile image feature pair inputs (vars: obj, ee poses)
6 | * @author Paloma Sodhi
7 | */
8 |
9 | #ifndef TACTILE_RELATIVE_TF_REGRESSION_FACTOR
10 | #define TACTILE_RELATIVE_TF_REGRESSION_FACTOR
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include
19 | #include
20 | #include
21 |
22 | namespace leo {
23 |
24 | class TactileRelativeTfPredictionFactor : public gtsam::NoiseModelFactor4 {
25 | private:
26 | bool useAnalyticJacobians_;
27 |
28 | gtsam::Vector torchModelOutput_;
29 |
30 | bool yawOnlyError_;
31 | bool constantModel_;
32 |
33 | bool oracleFactor_;
34 | gtsam::Pose2 oraclePose_;
35 |
36 | int classLabel_;
37 | int numClasses_;
38 |
39 | public:
40 | TactileRelativeTfPredictionFactor(gtsam::Key objKey1, gtsam::Key objKey2, gtsam::Key eeKey1, gtsam::Key eeKey2,
41 | const gtsam::Vector& torchModelOutput, const gtsam::SharedNoiseModel& noiseModel = nullptr)
42 | : NoiseModelFactor4(noiseModel, objKey1, objKey2, eeKey1, eeKey2), torchModelOutput_(torchModelOutput), useAnalyticJacobians_(false) {
43 |
44 | torchModelOutput_ = torchModelOutput;
45 |
46 | yawOnlyError_ = false;
47 | constantModel_ = false;
48 | oracleFactor_ = false;
49 | }
50 |
51 | void setFlags(const bool yawOnlyError, const bool constantModel) {
52 | yawOnlyError_ = yawOnlyError;
53 | constantModel_ = constantModel;
54 | }
55 |
56 | void setOracle(const bool oracleFactor, const gtsam::Pose2 oraclePose) {
57 | oracleFactor_ = oracleFactor;
58 | oraclePose_ = oraclePose;
59 | }
60 |
61 | void setLabel(int classLabel, int numClasses) {
62 | classLabel_ = classLabel;
63 | numClasses_ = numClasses;
64 | }
65 |
66 | gtsam::Vector3 transformError(const gtsam::Pose2& objPose1, const gtsam::Pose2& objPose2, const gtsam::Pose2& eePose1, const gtsam::Pose2& eePose2, const gtsam::Pose2& poseRelMeas) const {
67 | // T_arg1^{-1} * T_arg2: relative pose between arg1, arg2 in arg1 coordinate frame
68 | gtsam::Pose2 eePose1__obj = objPose1.between(eePose1);
69 | gtsam::Pose2 eePose2__obj = objPose2.between(eePose2);
70 | gtsam::Pose2 poseRelExpect = eePose1__obj.between(eePose2__obj);
71 | gtsam::Pose2 errPose = poseRelExpect.between(poseRelMeas);
72 |
73 | if (yawOnlyError_) {
74 | gtsam::Pose2 poseRelExpectYaw = gtsam::Pose2(0, 0, poseRelExpect.theta());
75 | gtsam::Pose2 poseRelMeasYaw = gtsam::Pose2(0, 0, poseRelMeas.theta());
76 | errPose = poseRelExpectYaw.between(poseRelMeasYaw);
77 | }
78 |
79 | gtsam::Vector3 errVec = gtsam::Pose2::Logmap(errPose);
80 |
81 | return errVec;
82 | }
83 |
84 | gtsam::Vector evaluateError(const gtsam::Pose2& objPose1, const gtsam::Pose2& objPose2, const gtsam::Pose2& eePose1, const gtsam::Pose2& eePose2,
85 | const boost::optional H1 = boost::none,
86 | const boost::optional H2 = boost::none,
87 | const boost::optional H3 = boost::none,
88 | const boost::optional H4 = boost::none) const {
89 |
90 | // torchModelOutput_: predicted relative pose from torch network model (measurement)
91 | double yaw = atan(torchModelOutput_[3] / torchModelOutput_[2]);
92 |
93 | if (yawOnlyError_) {
94 | yaw = asin(torchModelOutput_[3]);
95 | }
96 | if (constantModel_) {
97 | yaw = 0.0;
98 | }
99 |
100 | gtsam::Pose2 poseRelMeas = gtsam::Pose2(torchModelOutput_[0], torchModelOutput_[1], yaw);
101 |
102 | if (oracleFactor_) {
103 | poseRelMeas = oraclePose_;
104 | }
105 |
106 | // error between measured and expected relative pose
107 | gtsam::Vector3 errVec = TactileRelativeTfPredictionFactor::transformError(objPose1, objPose2, eePose1, eePose2, poseRelMeas);
108 |
109 | gtsam::Matrix J1, J2, J3, J4;
110 |
111 | if (useAnalyticJacobians_) {
112 | // todo: add analytic derivative
113 | } else {
114 | J1 = gtsam::numericalDerivative11(boost::bind(&TactileRelativeTfPredictionFactor::transformError, this, _1, objPose2, eePose1, eePose2, poseRelMeas), objPose1);
115 | J2 = gtsam::numericalDerivative11(boost::bind(&TactileRelativeTfPredictionFactor::transformError, this, objPose1, _1, eePose1, eePose2, poseRelMeas), objPose2);
116 | J3 = gtsam::numericalDerivative11(boost::bind(&TactileRelativeTfPredictionFactor::transformError, this, objPose1, objPose2, _1, eePose2, poseRelMeas), eePose1);
117 | J4 = gtsam::numericalDerivative11(boost::bind(&TactileRelativeTfPredictionFactor::transformError, this, objPose1, objPose2, eePose1, _1, poseRelMeas), eePose2);
118 | }
119 |
120 | if (H1) *H1 = J1;
121 | if (H2) *H2 = J2;
122 | if (H3) *H3 = J3;
123 | if (H4) *H4 = J4;
124 |
125 | return errVec;
126 | }
127 |
128 | gtsam::Pose2 getExpectTransform(const gtsam::Pose2& objPose1, const gtsam::Pose2& objPose2, const gtsam::Pose2& eePose1, const gtsam::Pose2& eePose2) {
129 | // T_arg1^{-1} * T_arg2: relative pose between arg1, arg2 in arg1 coordinate frame
130 | gtsam::Pose2 eePose1__obj = objPose1.between(eePose1);
131 | gtsam::Pose2 eePose2__obj = objPose2.between(eePose2);
132 | gtsam::Pose2 poseRelExpect = eePose1__obj.between(eePose2__obj);
133 |
134 | if (yawOnlyError_) {
135 | poseRelExpect = gtsam::Pose2(0.0, 0.0, poseRelExpect.theta());
136 | }
137 |
138 | return poseRelExpect;
139 | }
140 | };
141 |
142 | } // namespace leo
143 | #endif
--------------------------------------------------------------------------------
/leocpp/dynamics/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Install headers
2 | file(GLOB dynamics_headers "*.h")
3 | install(FILES ${dynamics_headers} DESTINATION include/leo/dynamics)
4 |
5 | # Build tests
6 | gtsamAddTestsGlob(dynamics "tests/*.cpp" "" ${PROJECT_NAME})
--------------------------------------------------------------------------------
/leocpp/dynamics/QSVelPushMotionRealObjEEFactor.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | /**
4 | * @file QSVelPushMotionRealObjEEFactor.h
5 | * @brief velocity only quasi-static pushing model factor (vars: obj, ee poses)
6 | * @author Paloma Sodhi
7 | */
8 |
9 | #ifndef QS_VEL_PUSH_MOTION_REAL_OBJ_EE_FACTOR_H_
10 | #define QS_VEL_PUSH_MOTION_REAL_OBJ_EE_FACTOR_H_
11 |
12 | #include
13 | #include
14 | #include
15 |
16 | #include
17 | #include
18 |
19 | namespace leo {
20 |
21 | class QSVelPushMotionRealObjEEFactor : public gtsam::NoiseModelFactor4 {
22 | private:
23 | bool useAnalyticJacobians_;
24 | double cSq_;
25 |
26 | public:
27 | QSVelPushMotionRealObjEEFactor(gtsam::Key objKey0, gtsam::Key objKey1, gtsam::Key eeKey0, gtsam::Key eeKey1,
28 | const double& cSq, const gtsam::SharedNoiseModel& model = nullptr)
29 | : NoiseModelFactor4(model, objKey0, objKey1, eeKey0, eeKey1), cSq_(cSq), useAnalyticJacobians_(false) {}
30 |
31 | gtsam::Vector3 pushVelocityError(const gtsam::Pose2& objPose0, const gtsam::Pose2& objPose1, const gtsam::Pose2& eePose0, const gtsam::Pose2& eePose1) const {
32 | gtsam::Pose2 objOri1 = gtsam::Pose2(0, 0, objPose1.theta());
33 | gtsam::Pose2 poseBetween__world = objPose0.between(objPose1);
34 |
35 | // compute object velocities using prev, curr obj poses
36 | gtsam::Vector2 velXYObj__world = gtsam::Vector2(objPose1.x(), objPose1.y()) - gtsam::Vector2(objPose0.x(), objPose0.y());
37 | gtsam::Vector2 velXYObj__obj = objOri1.transformTo(gtsam::Point2(velXYObj__world[0], velXYObj__world[1])); // rotateTo()
38 |
39 | // compute contact point velocities using prev, curr endeff poses
40 | gtsam::Vector2 contactPoint0 = gtsam::Vector2(eePose0.x(), eePose0.y());
41 | gtsam::Vector2 contactPoint1 = gtsam::Vector2(eePose1.x(), eePose1.y());
42 | gtsam::Vector2 velXYContact__world = contactPoint1 - contactPoint0;
43 | gtsam::Vector2 velXYContact__obj = objOri1.transformTo(gtsam::Point2(velXYContact__world[0], velXYContact__world[1])); // rotateTo()
44 |
45 | // current contact point in object frame
46 | gtsam::Vector contactPoint__obj = objPose1.transformTo(gtsam::Point2(contactPoint1[0], contactPoint1[1]));
47 |
48 | double vX = velXYObj__obj[0];
49 | double vY = velXYObj__obj[1];
50 | double omega = poseBetween__world.theta();
51 |
52 | double vPX = velXYContact__obj[0];
53 | double vPY = velXYContact__obj[1];
54 |
55 | double px = contactPoint__obj[0];
56 | double py = contactPoint__obj[1];
57 |
58 | // D*V = Vp (Ref: Zhou '17)
59 | gtsam::Matrix33 D;
60 | gtsam::Vector3 V, Vp;
61 |
62 | D << 1, 0, -py, 0, 1, px, -py, px, -cSq_;
63 | V << vX, vY, omega;
64 | Vp << vPX, vPY, 0;
65 |
66 | gtsam::Vector3 errVec;
67 | errVec = D * V - Vp;
68 |
69 | return errVec;
70 | }
71 |
72 | // Matrix pushVelocityErrorJacobian(const Pose2& objPose0, const Pose2& objPose1, const Pose2& eePose0, const Pose2& eePose1, int idx) const {
73 | // }
74 |
75 | gtsam::Vector evaluateError(const gtsam::Pose2& objPose0, const gtsam::Pose2& objPose1, const gtsam::Pose2& eePose0, const gtsam::Pose2& eePose1,
76 | const boost::optional H1 = boost::none,
77 | const boost::optional H2 = boost::none,
78 | const boost::optional H3 = boost::none,
79 | const boost::optional H4 = boost::none) const {
80 | gtsam::Vector3 errVec = QSVelPushMotionRealObjEEFactor::pushVelocityError(objPose0, objPose1, eePose0, eePose1);
81 |
82 | gtsam::Matrix J1, J2, J3, J4;
83 |
84 | if (useAnalyticJacobians_) {
85 | // todo: add analytic derivative
86 | // J1 = QSVelPushMotionRealObjEEFactor::pushVelocityErrorJacobian(objPose0, objPose1, eePose0, eePose1, 1);
87 | } else {
88 | J1 = gtsam::numericalDerivative11(boost::bind(&QSVelPushMotionRealObjEEFactor::pushVelocityError, this, _1, objPose1, eePose0, eePose1), objPose0);
89 | J2 = gtsam::numericalDerivative11(boost::bind(&QSVelPushMotionRealObjEEFactor::pushVelocityError, this, objPose0, _1, eePose0, eePose1), objPose1);
90 | J3 = gtsam::numericalDerivative11(boost::bind(&QSVelPushMotionRealObjEEFactor::pushVelocityError, this, objPose0, objPose1, _1, eePose1), eePose0);
91 | J4 = gtsam::numericalDerivative11(boost::bind(&QSVelPushMotionRealObjEEFactor::pushVelocityError, this, objPose0, objPose1, eePose0, _1), eePose1);
92 | }
93 |
94 | if (H1) *H1 = J1;
95 | if (H2) *H2 = J2;
96 | if (H3) *H3 = J3;
97 | if (H4) *H4 = J4;
98 |
99 | return errVec;
100 | }
101 | };
102 |
103 | } // namespace leo
104 |
105 | #endif
--------------------------------------------------------------------------------
/leocpp/geometry/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Install headers
2 | file(GLOB geometry_headers "*.h")
3 | install(FILES ${geometry_headers} DESTINATION include/leo/geometry)
4 |
5 | # Build tests
6 | gtsamAddTestsGlob(geometry "tests/*.cpp" "" ${PROJECT_NAME})
--------------------------------------------------------------------------------
/leocpp/geometry/IntersectionPlanarSDFObjEEFactor.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | /**
4 | * @file IntersectionPlanarSDFObjEEFactor.h
5 | * @brief Intersection factor using 2D signed distance field (vars: obj, ee poses)
6 | * @author Paloma Sodhi
7 | */
8 |
9 | #ifndef INTERSECTION_PLANAR_SDF_OBJ_EE_FACTOR_H_
10 | #define INTERSECTION_PLANAR_SDF_OBJ_EE_FACTOR_H_
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | namespace leo {
20 |
21 | // template
22 | class IntersectionPlanarSDFObjEEFactor : public gtsam::NoiseModelFactor2 {
23 | private:
24 | PlanarSDF sdf_;
25 | double eeRadius_;
26 |
27 | bool useAnalyticJacobians_;
28 |
29 | public:
30 | IntersectionPlanarSDFObjEEFactor(gtsam::Key objKey, gtsam::Key eeKey, const PlanarSDF& sdf, const double& eeRadius, const gtsam::SharedNoiseModel& model = nullptr)
31 | : NoiseModelFactor2(model, objKey, eeKey), sdf_(sdf), eeRadius_(eeRadius), useAnalyticJacobians_(false) {}
32 |
33 | gtsam::Vector1 IntersectionErrorOneSidedHinge(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose) const {
34 | double dist, err;
35 | gtsam::Vector1 errVec;
36 | gtsam::Point2 eeCenter__obj = objPose.transformTo(gtsam::Point2(eePose.x(), eePose.y()));
37 |
38 | try {
39 | dist = sdf_.getSignedDistance(eeCenter__obj);
40 | } catch (SDFQueryOutOfRange&) {
41 | // std::cout << "[IntersectionPlanarSDFFactor] WARNING: SDF query pos (" << eeCenter__obj.x()
42 | // << ", " << eeCenter__obj.y() << ") out of range. Setting error to 0. " << std::endl;
43 | errVec << 0.0;
44 | return errVec;
45 | }
46 |
47 | if (dist > eeRadius_) {
48 | err = 0;
49 | } else {
50 | err = -dist + eeRadius_;
51 | }
52 | errVec << err;
53 |
54 | return errVec;
55 | }
56 |
57 | gtsam::Vector1 IntersectionErrorTwoSidedHinge(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose) const {
58 | double dist, err;
59 | gtsam::Vector1 errVec;
60 | gtsam::Point2 eeCenter__obj = objPose.transformTo(gtsam::Point2(eePose.x(), eePose.y()));
61 |
62 | try {
63 | dist = sdf_.getSignedDistance(eeCenter__obj);
64 | } catch (SDFQueryOutOfRange&) {
65 | // std::cout << "[IntersectionPlanarSDFFactor] WARNING: SDF query pos (" << eeCenter__obj.x()
66 | // << ", " << eeCenter__obj.y() << ") out of range. Setting error to 0. " << std::endl;
67 | errVec << 0.0;
68 | return errVec;
69 | }
70 |
71 | if (dist > eeRadius_) {
72 | err = dist - eeRadius_;
73 | } else {
74 | err = -dist + eeRadius_;
75 | }
76 | errVec << err;
77 |
78 | return errVec;
79 | }
80 |
81 | gtsam::Vector1 IntersectionErrorOneSidedHuber(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose) const {
82 | double dist, err;
83 | gtsam::Vector1 errVec;
84 | gtsam::Point2 eeCenter__obj = objPose.transformTo(gtsam::Point2(eePose.x(), eePose.y()));
85 |
86 | try {
87 | dist = sdf_.getSignedDistance(eeCenter__obj);
88 | } catch (SDFQueryOutOfRange&) {
89 | // std::cout << "[IntersectionPlanarSDFFactor] WARNING: SDF query pos (" << eeCenter__obj.x()
90 | // << ", " << eeCenter__obj.y() << ") out of range. Setting error to 0. " << std::endl;
91 | errVec << 0.0;
92 | return errVec;
93 | }
94 |
95 | // ref: Ratliff '09
96 | if (dist >= eeRadius_) {
97 | err = 0;
98 | } else if ((dist < eeRadius_) && (dist >= 0)) { // quadratic
99 | err = 0.5 * (1 / eeRadius_) * (dist - eeRadius_) * (dist - eeRadius_);
100 | } else if (dist < 0) { // linear
101 | err = -dist + 0.5 * eeRadius_;
102 | }
103 | errVec << err;
104 |
105 | return errVec;
106 | }
107 |
108 | gtsam::Vector1 IntersectionErrorTwoSidedHuber(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose) const {
109 | double dist, err;
110 | gtsam::Vector1 errVec;
111 | gtsam::Point2 eeCenter__obj = objPose.transformTo(gtsam::Point2(eePose.x(), eePose.y()));
112 |
113 | try {
114 | dist = sdf_.getSignedDistance(eeCenter__obj);
115 | } catch (SDFQueryOutOfRange&) {
116 | // std::cout << "[IntersectionPlanarSDFFactor] WARNING: SDF query pos (" << eeCenter__obj.x()
117 | // << ", " << eeCenter__obj.y() << ") out of range. Setting error to 0. " << std::endl;
118 | errVec << 0.0;
119 | return errVec;
120 | }
121 |
122 | if (dist >= 2 * eeRadius_) { // linear
123 | err = dist - 0.5 * eeRadius_;
124 | } else if ((dist < 2 * eeRadius_) && (dist >= 0)) { // quadratic
125 | err = 0.5 * (1 / eeRadius_) * (dist - eeRadius_) * (dist - eeRadius_);
126 | } else if (dist < 0) { // linear
127 | err = -dist + 0.5 * eeRadius_;
128 | }
129 | errVec << err;
130 |
131 | return errVec;
132 | }
133 |
134 | gtsam::Vector evaluateError(const gtsam::Pose2& objPose, const gtsam::Pose2& eePose,
135 | const boost::optional H1 = boost::none,
136 | const boost::optional H2 = boost::none) const {
137 | gtsam::Vector errVec = IntersectionErrorTwoSidedHuber(objPose, eePose);
138 |
139 | gtsam::Matrix J1, J2;
140 | if (useAnalyticJacobians_) {
141 | // todo: add analytic derivative
142 | } else {
143 | J1 = gtsam::numericalDerivative11(boost::bind(&IntersectionPlanarSDFObjEEFactor::IntersectionErrorTwoSidedHuber, this, _1, eePose), objPose);
144 | J2 = gtsam::numericalDerivative11(boost::bind(&IntersectionPlanarSDFObjEEFactor::IntersectionErrorTwoSidedHuber, this, objPose, _1), eePose);
145 | }
146 |
147 | if (H1) *H1 = J1;
148 | if (H2) *H2 = J2;
149 |
150 | return errVec;
151 | }
152 | };
153 |
154 | } // namespace gtsam
155 |
156 | #endif
--------------------------------------------------------------------------------
/leocpp/thirdparty/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Install headers
2 | file(GLOB utils_headers "*.h")
3 | install(FILES ${utils_headers} DESTINATION include/leo/utils)
4 |
5 | # Build tests
6 | gtsamAddTestsGlob(utils "tests/*.cpp" "" ${PROJECT_NAME})
--------------------------------------------------------------------------------
/leocpp/thirdparty/gpmp2/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016, Georgia Tech Research Corporation
2 | Atlanta, Georgia 30332-0415
3 | All Rights Reserved
4 |
5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
6 |
7 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8 |
9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
10 |
11 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
12 |
13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/leocpp/thirdparty/gpmp2/PlanarSDF.h:
--------------------------------------------------------------------------------
1 | /**
2 | * @file PlanarSDF.h
3 | * @brief util functions for planar signed distance field
4 | * @author Jing Dong
5 | * @date May 7, 2016
6 | **/
7 |
8 | #ifndef PLANAR_SDF_H_
9 | #define PLANAR_SDF_H_
10 |
11 | #include
12 | #include
13 | #include
14 |
15 | #include
16 |
17 | namespace leo {
18 |
19 | /**
20 | * Signed distance field use Matrix as data type
21 | * Matrix represent the X (col) & Y (row) dimension
22 | */
23 | class PlanarSDF {
24 | public:
25 | // index and float_index is
26 | typedef boost::tuple index;
27 | typedef boost::tuple float_index;
28 | typedef boost::shared_ptr shared_ptr;
29 |
30 | private:
31 | gtsam::Point2 origin_;
32 | // geometry setting of signed distance field
33 | size_t field_rows_, field_cols_;
34 | double cell_size_;
35 | gtsam::Matrix data_;
36 |
37 | public:
38 | /// constructor
39 | PlanarSDF() : field_rows_(0), field_cols_(0), cell_size_(0.0) {}
40 |
41 | /// constructor with data
42 | PlanarSDF(const gtsam::Point2& origin, double cell_size, const gtsam::Matrix& data) : origin_(origin), field_rows_(data.rows()), field_cols_(data.cols()), cell_size_(cell_size), data_(data) {}
43 |
44 | ~PlanarSDF() {}
45 |
46 | /// give a point, search for signed distance field and (optional) gradient
47 | /// return signed distance
48 | inline double getSignedDistance(const gtsam::Point2& point) const {
49 | const float_index pidx = convertPoint2toCell(point);
50 | return signed_distance(pidx);
51 | }
52 |
53 | inline double getSignedDistance(const gtsam::Point2& point, gtsam::Vector2& g) const {
54 | const float_index pidx = convertPoint2toCell(point);
55 | const gtsam::Vector2 g_idx = gradient(pidx);
56 | // convert gradient of index to gradient of metric unit
57 | g = gtsam::Vector2(g_idx(1), g_idx(0)) / cell_size_;
58 | return signed_distance(pidx);
59 | }
60 |
61 | /// convert between point and cell corrdinate
62 | inline float_index convertPoint2toCell(const gtsam::Point2& point) const {
63 | // check point range
64 | if (point.x() < origin_.x() || point.x() > (origin_.x() + (field_cols_ - 1.0) * cell_size_) ||
65 | point.y() < origin_.y() || point.y() > (origin_.y() + (field_rows_ - 1.0) * cell_size_)) {
66 | throw SDFQueryOutOfRange();
67 | }
68 |
69 | const double col = (point.x() - origin_.x()) / cell_size_;
70 | const double row = (point.y() - origin_.y()) / cell_size_;
71 | return boost::make_tuple(row, col);
72 | }
73 |
74 | inline gtsam::Point2 convertCelltoPoint2(const float_index& cell) const {
75 | return origin_ + gtsam::Point2(
76 | cell.get<1>() * cell_size_,
77 | cell.get<0>() * cell_size_);
78 | }
79 |
80 | /// bilinear interpolation
81 | inline double signed_distance(const float_index& idx) const {
82 | const double lr = floor(idx.get<0>()), lc = floor(idx.get<1>());
83 | const double hr = lr + 1.0, hc = lc + 1.0;
84 | const size_t lri = static_cast(lr), lci = static_cast(lc),
85 | hri = static_cast(hr), hci = static_cast(hc);
86 | return (hr - idx.get<0>()) * (hc - idx.get<1>()) * signed_distance(lri, lci) +
87 | (idx.get<0>() - lr) * (hc - idx.get<1>()) * signed_distance(hri, lci) +
88 | (hr - idx.get<0>()) * (idx.get<1>() - lc) * signed_distance(lri, hci) +
89 | (idx.get<0>() - lr) * (idx.get<1>() - lc) * signed_distance(hri, hci);
90 | }
91 |
92 | /// gradient operator for bilinear interpolation
93 | /// gradient regrads to float_index
94 | /// not numerical differentiable at index point
95 | inline gtsam::Vector2 gradient(const float_index& idx) const {
96 | const double lr = floor(idx.get<0>()), lc = floor(idx.get<1>());
97 | const double hr = lr + 1.0, hc = lc + 1.0;
98 | const size_t lri = static_cast(lr), lci = static_cast(lc),
99 | hri = static_cast(hr), hci = static_cast(hc);
100 | return gtsam::Vector2(
101 | (hc - idx.get<1>()) * (signed_distance(hri, lci) - signed_distance(lri, lci)) +
102 | (idx.get<1>() - lc) * (signed_distance(hri, hci) - signed_distance(lri, hci)),
103 |
104 | (hr - idx.get<0>()) * (signed_distance(lri, hci) - signed_distance(lri, lci)) +
105 | (idx.get<0>() - lr) * (signed_distance(hri, hci) - signed_distance(hri, lci)));
106 | }
107 |
108 | /// access
109 | inline double signed_distance(size_t r, size_t c) const {
110 | return data_(r, c);
111 | }
112 |
113 | const gtsam::Point2& origin() const { return origin_; }
114 | size_t x_count() const { return field_cols_; }
115 | size_t y_count() const { return field_rows_; }
116 | double cell_size() const { return cell_size_; }
117 | const gtsam::Matrix& raw_data() const { return data_; }
118 |
119 | /// print
120 | void print(const std::string& str = "") const {
121 | std::cout << str;
122 | std::cout << "field origin: ";
123 | origin_.print();
124 | std::cout << "field resolution: " << cell_size_ << std::endl;
125 | std::cout << "field size: " << field_cols_ << " x "
126 | << field_rows_ << std::endl;
127 | }
128 | };
129 |
130 | } // namespace leo
131 | #endif
--------------------------------------------------------------------------------
/leocpp/thirdparty/gpmp2/SDFexception.h:
--------------------------------------------------------------------------------
1 | /**
2 | * @file SDFexception.h
3 | * @brief custom exceptions for signed distance field
4 | * @author Jing Dong
5 | * @date May 26, 2016
6 | **/
7 |
8 | #ifndef SDF_EXCEPTION_H_
9 | #define SDF_EXCEPTION_H_
10 |
11 | #include
12 |
13 | /// query out of range exception
14 | class SDFQueryOutOfRange : public std::runtime_error {
15 | public:
16 | /// constructor
17 | SDFQueryOutOfRange() : std::runtime_error("Querying SDF out of range") {}
18 | };
19 |
20 | #endif
--------------------------------------------------------------------------------
/leopy/config/baselines/lstm_net_test.yaml:
--------------------------------------------------------------------------------
1 | # scripts: test_lstm_net.py
2 |
3 | checkpoint_dir: 'sim/nav2dfix/dataset_0000/'
4 | test_dataset_dir: 'sim/nav2dfix/dataset_0000/test/'
5 |
6 | # checkpoint_dir: 'real/20200928_ellipse-pushing-test-keypoint/'
7 | # test_dataset_dir: 'real/20200928_ellipse-pushing-test-keypoint/test/'
8 |
9 | verbose: False
10 | show_plot: True
--------------------------------------------------------------------------------
/leopy/config/baselines/lstm_net_train.yaml:
--------------------------------------------------------------------------------
1 | # scripts: train_lstm_net.py
2 |
3 | dataset_name: sim/nav2dfix/dataset_0000
4 | # dataset_name: sim/nav2dtime/dataset_0001
5 |
6 | # dataset_name: real/20200928_rectangle-pushing-corners-test-keypoint
7 | # dataset_name: real/20200928_ellipse-pushing-test-keypoint
8 |
9 | prefix: ""
10 | random_seed: 0
11 |
12 | dataloader:
13 | batch_size: 32
14 | seq_len: 300
15 | shuffle: True
16 | num_workers: 8
17 |
18 | network:
19 | model: lstm
20 | hidden_size_lstm: 64
21 | num_layers_lstm: 2
22 | hidden_size_mlp: 32
23 | output_size: 3
24 |
25 | train:
26 | epochs: 30
27 | learning_rate: 1e-3
28 |
29 | reg_type: none
30 | reg_weight: 1e-4
31 |
32 | validation: True
--------------------------------------------------------------------------------
/leopy/config/dataio/nav2d.yaml:
--------------------------------------------------------------------------------
1 | env:
2 | area:
3 | xmin: -50
4 | xmax: 50
5 | ymin: 0
6 | ymax: 50
7 |
8 | options:
9 | random_seed: 0
10 |
11 | dataio:
12 | dstdir_dataset: /local/datasets/sim
13 | dataset_name: nav2dtime # nav2dfix, nav2dtime
14 |
15 | n_datasets: 1
16 | n_seqs: 50
17 |
18 | n_data_train: 30
19 |
20 | start_ds_idx: 0
21 | start_seq_idx: 0
22 |
23 | load_poses_file: True
24 | save_poses_file: False
25 |
26 | measurements:
27 | noise_models:
28 | random: False
29 |
30 | # nav2dfix/dataset_0000 (fixed_cov)
31 | odom: [1e-1, 1e-1, 1e-1]
32 | gps: [1., 1., 1e-1]
33 |
34 | # nav2dfix/dataset_0001 (fixed_cov)
35 | # odom: [1., 2., 1e-1]
36 | # gps: [2., 1., 1e-1]
37 |
38 | # nav2dtime/dataset_0000 (varying_cov)
39 | odom0: [1e-1, 1e-1, 1e-1]
40 | gps0: [1, 1, 1e-1]
41 | odom1: [1, 1, 1]
42 | gps1: [1e-1, 1e-1, 1e-1]
43 |
44 | # nav2dtime/dataset_0001 (varying_cov)
45 | # odom0: [1, 1, 1]
46 | # gps0: [1e-1, 1e-1, 1e-1]
47 | # odom1: [1e-1, 1e-1, 1e-1]
48 | # gps1: [1, 1, 1e-1]
49 |
50 | num_steps_max: 500
51 |
52 | plot:
53 | colors: [
54 | [0.4660, 0.6740, 0.1880], # gt
55 | [0, 0.4470, 0.7410], # odom
56 | # [0.4940, 0.1840, 0.5560], # gps
57 | [0.8500, 0.3250, 0.0980], # combined
58 | ]
59 |
--------------------------------------------------------------------------------
/leopy/config/dataio/push2d.yaml:
--------------------------------------------------------------------------------
1 | dataio:
2 | dstdir_dataset: /local/datasets/real
3 | srcdir_pushest: /local/datasets/pushest
4 |
5 | # dataset_name: push2d
6 |
7 | # disc datasets [*-straight-line-*, *-curves-*, *-trial1-*]
8 | # dataset_name: 20200624_pushing-6in-disc-curves-test-keypoint
9 | # dataset_name: 20200624_pushing-6in-disc-trial1-all-keypoint
10 |
11 | # rect datasets [*-edges-*, *-corners-*]
12 | # dataset_name: 20200928_rectangle-pushing-edges-test-keypoint
13 | # dataset_name: 20200928_rectangle-pushing-corners-test-keypoint
14 |
15 | # ellipse datasets [*-straight-*, *-*]
16 | dataset_name: 20200928_ellipse-pushing-test-keypoint
17 | # dataset_name: 20200928_ellipse-pushing-straight-test-keypoint
18 |
19 | num_steps: 300
20 | num_seqs: 30
21 | num_episodes_seq: 3
22 |
23 | n_data_train: 20
24 | n_data_test: 10
25 |
26 | measurements:
27 | tactile:
28 | wmin: 10
29 | wmax: 20
30 |
31 | noise_models: # sigma values
32 | random: False
33 |
34 | ee_pose_prior: [1e-5, 1e-5, 1e-5]
35 | qs_push_motion: [1e-3, 1e-3, 1e-3]
36 | sdf_intersection: [1e-2]
37 | tactile_rel_meas: [1, 1, 1e-5]
38 | binary_interseq_obj: [1e-9, 1e-9, 1e-9]
--------------------------------------------------------------------------------
/leopy/config/examples/leo_nav2d.yaml:
--------------------------------------------------------------------------------
1 | dataio:
2 |
3 | dataset_type: nav2d
4 | srcdir_dataset: /local/datasets/sim
5 |
6 | dataset_name: nav2dtime/dataset_0000 # nav2dfix, nav2dtime
7 | model_type: varying_cov # fixed_cov (for nav2dfix), varying_cov (for nav2dtime)
8 |
9 | options:
10 | seed: 0
11 |
12 | theta_init:
13 |
14 | noisy: False
15 | sigma_noise: 5
16 |
17 | sigma_inv_odom_vals: [1., 1., 1.]
18 | sigma_inv_gps_vals: [1., 1., 1.]
19 |
20 | # sigma_sq format
21 | sigma_inv_odom0_vals: [0.25, 0.25, 0.25]
22 | sigma_inv_gps0_vals: [1e-2, 1e-2, 0.25]
23 | sigma_inv_odom1_vals: [1., 1., 1.]
24 | sigma_inv_gps1_vals: [1e-2, 1e-2, 1e-2]
25 |
26 | optim:
27 | nsteps: 300 # 500
28 | verbose: False
29 |
30 | save_fig: True
31 | show_fig: False
32 | vis_step: False
33 |
34 | leo:
35 | max_iters: 100
36 | n_data_train: 5 # 30
37 | n_data_test: 1 # 20
38 | test_idx_offset: 30
39 |
40 | lr: 1e-1
41 | lmd: 0.
42 | l2_wt: 1e-3
43 |
44 | pool_processes: 4
45 | lr_scheduler: True
46 | realizability_coeff: 0. # 0: ground truth, 1: fully realizable
47 |
48 | sampler: True
49 | n_samples: 10
50 | temp: 1. # high temp -> low cov
51 |
52 | tb_flag: False
53 | save_video: False
54 |
55 | use_traj_convergence: True
56 | eps_diff_traj_err_trans: 5e-2
57 | eps_diff_traj_err_rot: 5e-2
58 | eps_traj_err_trans: 1e-1
59 | eps_traj_err_rot: 1e-1
60 |
61 | logger:
62 | enable: False
63 | save_csv: False
64 |
65 | plot:
66 | colors:
67 | gt: dimgray
68 | opt: tab:purple
69 |
70 | labels:
71 | gt: groundtruth
72 | odom: odom
73 | gps: gps
74 | opt: optimizer
75 | exp: expert
76 |
77 | dstdir: /local/plots/sim
78 |
79 | baselines:
80 |
81 | method: Nelder-Mead # Nelder-Mead, BFGS, CMAES
82 | max_fval_calls: 100
--------------------------------------------------------------------------------
/leopy/config/examples/leo_push2d.yaml:
--------------------------------------------------------------------------------
1 | dataio:
2 | srcdir_dataset: /local/datasets/real
3 |
4 | # disc datasets [*-straight-line-*, *-curves-*, *-trial1-*]
5 | # dataset_name: 20200624_pushing-6in-disc-curves-test-keypoint
6 | # dataset_name: 20200624_pushing-6in-disc-trial1-all-keypoint
7 |
8 | # rect datasets [*-edges-*, *-corners-*]
9 | # dataset_name: 20200928_rectangle-pushing-edges-test-keypoint
10 | dataset_name: 20200928_rectangle-pushing-corners-test-keypoint
11 |
12 | # ellipse datasets [*-straight-*, *-*]
13 | # dataset_name: 20200928_ellipse-pushing-test-keypoint
14 | # dataset_name: 20200928_ellipse-pushing-straight-test-keypoint
15 |
16 | obj_shape: rect # disc, rect, ellip
17 | dataset_type: push2d # push2d
18 | model_type: fixed_cov # fixed_cov, varying_cov, fixed_cov_varying_mean
19 |
20 | theta_init:
21 |
22 | sigma_inv_tactile_rel_vals: [1, 1, 1e5]
23 | sigma_inv_qs_push_motion_vals: [1e0, 1e0, 1e0]
24 |
25 | sigma_inv_ee_pose_prior_vals: [1e4, 1e4, 1e4]
26 | sigma_inv_sdf_intersection_vals: [1e2]
27 | sigma_inv_binary_interseq_obj_vals: [1e6, 1e6, 1e6]
28 |
29 | tactile_model:
30 |
31 | # linear [disc+rect+ellip, auto-keypoints]
32 | name: 10-25-2020-16-40-01_tf_regr_model_ser_epoch030
33 |
34 | norm_img_feat: True
35 | mean_img_feat: [0.2002, 0.6032]
36 | std_img_feat: [0.0367, 0.1235]
37 |
38 | # 0: disc, 1: rect-edges, 2: rect-corners, 3: ellip
39 | class_label: 2
40 | num_classes: 4
41 |
42 | yaw_only_error: True
43 | oracle: False
44 | constant: False
45 |
46 | optim:
47 | nsteps: 300
48 | verbose: False
49 |
50 | save_logger: False
51 |
52 | save_fig: True
53 | show_fig: False
54 | vis_step: False
55 |
56 | leo:
57 | max_iters: 30
58 | n_data_train: 1
59 | n_data_test: 1
60 | test_idx_offset: 20
61 |
62 | lr: 100 # 1000, 0.0001
63 | lmd: 0.
64 |
65 | pool_processes: 4
66 | lr_scheduler: True
67 | realizability_coeff: 0. # 0: ground truth, 1: fully realizable
68 |
69 | sampler: False
70 | n_samples: 1
71 |
72 | tb_flag: True
73 | save_video: False
74 |
75 | use_traj_convergence: False
76 | eps_diff_traj_err_trans: 5e-2
77 | eps_diff_traj_err_rot: 5e-2
78 | eps_traj_err_trans: 1e-1
79 | eps_traj_err_rot: 1e-1
80 |
81 | logger:
82 | cost_flag: True
83 | save_file: False
84 |
85 | plot:
86 | colors:
87 | gt: dimgray
88 | opt: tab:purple
89 |
90 | # odom: [0, 0.4470, 0.7410] # blue
91 | # gps: [0.4940, 0.1840, 0.5560] # blue
92 | # opt: [0.8500, 0.3250, 0.0980] # orange
93 | # exp: [0.4660, 0.6740, 0.1880] # green
94 |
95 | labels:
96 | gt: groundtruth
97 | odom: odom
98 | gps: gps
99 | opt: optimizer
100 | exp: expert
101 |
102 | dstdir: /local/plots/real
103 |
104 | baselines:
105 |
106 | opt_type: scipy # scipy, cmaes
107 |
108 | scipy:
109 | method: Nelder-Mead # Nelder-Mead, Powell, BFGS, CG
110 | max_fval_calls: 100
111 |
--------------------------------------------------------------------------------
/leopy/config/examples/regression.yaml:
--------------------------------------------------------------------------------
1 | model: ${leo_gn}
2 | enet: ${enet_rff}
3 | n_update: 5000
4 | n_inner_update: 1
5 | seed: 0
6 | n_hidden: 128
7 | n_samples: 100
8 | clip_norm: True
9 | lr: 1e-4
10 |
11 | n_disp_step: 200
12 | show_plot: True
13 | save_plot: False
14 | save_model: False
15 |
16 | enet_basic:
17 | class: regression.EnergyNetBasic
18 | tag: enet_basic
19 | params:
20 | n_hidden: 128
21 |
22 | enet_rff:
23 | class: regression.EnergyNetRFF
24 | tag: enet_rff
25 | params:
26 | n_hidden: 128
27 | sigma: 1.
28 | encoded_size: 128
29 |
30 | unroll_gd:
31 | class: regression.UnrollGD
32 | tag: unroll_gd
33 | params:
34 | n_inner_iter: 10
35 | inner_lr: 1e-3
36 | init_scheme: zero # zero, gt
37 |
38 | unroll_gn:
39 | class: regression.UnrollGN
40 | tag: unroll_gn
41 | params:
42 | n_inner_iter: 10
43 | inner_lr: 1 # standard GN is 1.0
44 | init_scheme: zero # zero, gt
45 |
46 | leo_gn:
47 | class: regression.LEOGN
48 | tag: leo_gn
49 | params:
50 | n_sample: 100
51 | temp: 1e9
52 | min_cov: 1e-3
53 | max_cov: 10.0
54 | n_inner_iter: 10
55 | init_scheme: zero # zero, gt
56 |
57 | dcem:
58 | class: regression.UnrollCEM
59 | tag: dcem
60 | params:
61 | n_sample: 100
62 | n_elite: 10
63 | n_iter: 10
64 | init_sigma: 1.
65 | temp: 1.
66 | normalize: True
67 |
68 | leo_cem:
69 | class: regression.LEOCEM
70 | tag: leo_cem
71 | params:
72 | n_sample: 100
73 | temp: 1.
74 | min_cov: 1e-3
75 | max_cov: 10.0
76 | cem_n_sample: 100
77 | cem_n_elite: 10
78 | cem_n_iter: 10
79 | cem_init_sigma: 7.
80 | cem_temp: 1.
81 | cem_normalize: True
--------------------------------------------------------------------------------
/leopy/requirements/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1<2
2 | scipy
3 | hydra-core==1.0.6
4 | matplotlib>=3.1.3
5 | torch
6 | higher
7 | random-fourier-features-pytorch
8 | lml@git+git://github.com/locuslab/lml.git
9 | setproctitle
10 | ipython
11 |
12 | pandas
13 | tensorboard
14 | attrdict
15 | pytorch3d
16 | pytorch_lightning
--------------------------------------------------------------------------------
/leopy/scripts/analysis/regression_analysis.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #%%
4 |
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn
9 | from torch.utils.data import DataLoader
10 | import torch.nn.functional as F
11 |
12 | import glob
13 | import math
14 |
15 | import matplotlib.pyplot as plt
16 |
17 | import sys
18 | sys.path.append('../examples')
19 | from regression import plot_energy_landscape, EnergyNetRFF
20 |
21 | # %% helper functions
22 |
23 | def compute_mse_loss(Enet, x_vec, y_vec):
24 |
25 | E_mat = torch.zeros((x_vec.shape[0], y_vec.shape[0]))
26 | for x_idx, x_val in enumerate(x_vec):
27 | for y_idx, y_val in enumerate(y_vec):
28 | E_mat[x_idx, y_idx] = torch.square(Enet(x_val.view(1,-1), y_val.view(1,-1)))
29 |
30 | min_val, min_ind = torch.min(E_mat, dim=1)
31 | y_pred = y_vec[min_ind]
32 |
33 | y_gt = x_vec * torch.sin(x_vec)
34 |
35 | loss = (y_pred - y_gt) ** 2
36 |
37 | return loss
38 |
39 | def plot_Enet(Enet, x_vec):
40 | y_gt = x_vec * torch.sin(x_vec)
41 | plot_energy_landscape(x_vec, y_gt, Enet)
42 | plt.show()
43 |
44 |
45 | #%% load different model files
46 |
47 | plt.ion()
48 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
49 | torch.set_printoptions(precision=2, sci_mode=False)
50 |
51 | BASE_PATH = '~/code/fair_ws/dcem/'
52 | model_type = 'leo_gn'
53 |
54 | srcdir_model = f'{BASE_PATH}/local/regression/models/{model_type}/'
55 | modelfiles = sorted(glob.glob(f"{srcdir_model}/*.pt"))
56 |
57 | print(f"Modelfiles: {modelfiles}")
58 |
59 | #%% mse loss across x for each model
60 |
61 | n_res_x, n_res_y = 20, 100
62 | x_vec = torch.linspace(0., 2.*math.pi, n_res_x)
63 | y_vec = torch.linspace(-6., 6., n_res_y)
64 |
65 | loss_mat = None
66 | for midx, modelfile in enumerate(modelfiles):
67 |
68 | Enet = EnergyNetRFF(1, 1, 128, 1, 128)
69 | Enet.load_state_dict(torch.load(modelfile))
70 | Enet.eval()
71 |
72 | # debug
73 | # plot_Enet(Enet, x_vec)
74 |
75 | loss_vec = compute_mse_loss(Enet, x_vec, y_vec)
76 | loss_mat = torch.cat((loss_mat, loss_vec.view(1, -1)),
77 | dim=0) if (loss_mat is not None) else loss_vec.view(1, -1)
78 |
79 | #%% mean variance plots
80 |
81 | mean = torch.mean(loss_mat, dim=0).detach().cpu().numpy()
82 | std = torch.std(loss_mat, dim=0).detach().cpu().numpy()
83 |
84 | scale = 1.
85 | plt.plot(x_vec, mean, color='tab:orange', linewidth=2)
86 | plt.fill_between(x_vec, mean - scale * std, mean + scale * std, color='tab:orange', alpha=0.2)
87 |
88 | plt.ylim([-20, 80])
89 |
--------------------------------------------------------------------------------
/leopy/scripts/analysis/visualize_traj_samples.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | sys.path.append("/usr/local/cython/")
5 |
6 | import numpy as np
7 | import os
8 |
9 | import pandas as pd
10 | import glob
11 | import hydra
12 |
13 | import matplotlib.pyplot as plt
14 | from matplotlib import cm
15 |
16 | from leopy.utils import tf_utils
17 |
18 | import logging
19 | log = logging.getLogger(__name__)
20 |
21 | plt.rcParams.update({'font.size': 18})
22 |
23 | dataset_type = "nav2d" # "nav2d", "push2d"
24 |
25 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
26 | CONFIG_PATH = os.path.join(BASE_PATH, f"python/config/leo_{dataset_type}.yaml")
27 |
28 | def pandas_string_to_numpy(arr_str):
29 | arr_npy = np.fromstring(arr_str.replace('\n', '').replace('[', '').replace(']', '').replace(' ', ' '), sep=', ')
30 | return arr_npy
31 |
32 | def compute_x_samples(mean, dx_samples):
33 | # mean: n_steps x 3
34 | # dx_samples: n_samples x n_steps x 3
35 |
36 | S, T, D = dx_samples.shape
37 | x_samples = np.zeros(dx_samples.shape)
38 |
39 | for sidx in range(0, S):
40 | for tidx in range(0, T):
41 | x_mean = tf_utils.vec3_to_pose2(mean[tidx, :])
42 | x_sample_pose = x_mean.retract(dx_samples[sidx, tidx, :])
43 | x_samples[sidx, tidx, :] = tf_utils.pose2_to_vec3(x_sample_pose)
44 |
45 | return x_samples
46 |
47 | def traj_samples(logfiles):
48 |
49 | data_idx = 0
50 | nsteps = 499
51 | leo_itrs = 40
52 | n_samples = 40
53 | cov_scale = 10.
54 |
55 | plt.ion()
56 | fig = plt.figure(constrained_layout=True, figsize=(8, 4))
57 | ax1 = plt.gca()
58 |
59 | colors = np.vstack((
60 | # cm.Reds(np.linspace(0.2, 0.2, num=n_samples)),
61 | cm.Blues(np.linspace(0.4, 0.4, num=int(0.5*n_samples))),
62 | cm.Blues(np.linspace(0.4, 0.4, num=int(0.5*n_samples)))
63 | ))
64 |
65 | tstart = 25
66 | tend = 425
67 |
68 | for file_idx in range(0, len(logfiles)):
69 |
70 | print(f'Loading csv: {logfiles[file_idx]}')
71 | df = pd.read_csv(f'{logfiles[file_idx]}')
72 |
73 | row_filter_1 = (df.data_idx == data_idx) & (df.tstep == nsteps)
74 |
75 | for itr in range(0, leo_itrs):
76 | plt.cla()
77 | plt.xlim([-50, 50])
78 | plt.ylim([0, 60])
79 | plt.axis('off')
80 |
81 | row_filter_2 = row_filter_1 & (df.leo_itr == itr)
82 |
83 | mean = (df.loc[row_filter_2, ['opt/mean']]).iloc[0, 0]
84 | covariance = (df.loc[row_filter_2, ['opt/covariance']]).iloc[0, 0]
85 | poses_gt = (df.loc[row_filter_2, ['opt/gt/poses2d']]).iloc[0, 0]
86 |
87 | mean = pandas_string_to_numpy(mean)
88 | covariance = pandas_string_to_numpy(covariance)
89 | poses_gt = pandas_string_to_numpy(poses_gt)
90 |
91 | covariance = covariance.reshape(mean.shape[0], mean.shape[0])
92 | covariance = cov_scale * covariance
93 | dx_samples = np.random.multivariate_normal(np.zeros(mean.shape[0]), covariance, n_samples)
94 |
95 | mean = mean.reshape(-1, 3)
96 | dx_samples = dx_samples.reshape(dx_samples.shape[0], -1, 3)
97 | x_samples = compute_x_samples(mean, dx_samples)
98 | poses_gt = poses_gt.reshape(-1, 3)
99 |
100 | # plotting
101 | for sidx in range(0, n_samples):
102 | plt.plot(x_samples[sidx, tstart:tend, 0], x_samples[sidx, tstart:tend, 1], linewidth=4, linestyle='-', color=colors[sidx])
103 |
104 | plt.plot(poses_gt[tstart:tend, 0], poses_gt[tstart:tend, 1], linewidth=4, linestyle='--', color='tab:grey')
105 | plt.plot(mean[tstart:tend, 0], mean[tstart:tend, 1], linewidth=3, linestyle='-', color='tab:blue')
106 |
107 | plt.show()
108 | plt.pause(1e-2)
109 | plt.savefig(f"{BASE_PATH}/local/plots/traj_samples/{dataset_type}/leo_itr_{itr:04d}")
110 |
111 | import pdb; pdb.set_trace()
112 |
113 | @hydra.main(config_name=CONFIG_PATH)
114 | def main(cfg):
115 |
116 | srcdir = f"{BASE_PATH}/local/datalogs/{cfg.dataio.dataset_name}"
117 | logfiles = sorted(glob.glob(f"{srcdir}/**/*.csv"), reverse=True)
118 |
119 | traj_samples(logfiles)
120 |
121 | if __name__ == '__main__':
122 | main()
123 |
--------------------------------------------------------------------------------
/leopy/scripts/baselines/blackbox_nav2d.py:
--------------------------------------------------------------------------------
1 |
2 | #!/usr/bin/env python
3 |
4 | import os
5 | import json
6 | import hydra
7 |
8 | import numpy as np
9 | from scipy.optimize import minimize
10 | import cma
11 |
12 | import torch
13 | from torch.autograd import Variable
14 | import torch.nn as nn
15 | import torch.optim as optim
16 | import torch.multiprocessing as mp
17 | from torch.utils.tensorboard import SummaryWriter
18 |
19 | from leopy.optim import cost, gtsamopt, sampler
20 | from leopy.algo.leo_obs_models import *
21 |
22 | from leopy.eval import quant_metrics
23 | from leopy.utils import tf_utils, dir_utils, vis_utils
24 |
25 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
26 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/leo_nav2d.yaml")
27 |
28 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
29 | FVAL_CALLS = 0
30 | log_dict = {'itr': [], 'traj_err_trans_train': [], 'traj_err_rot_train': [], 'traj_err_trans_test': [], 'traj_err_rot_test': []}
31 |
32 | def load_dataset(params, idx, dataset_mode="train"):
33 |
34 | idx_offset = 0 if (dataset_mode == "train") else params.leo.n_data_train # params.leo.n_data_train, 30
35 |
36 | filename = "{0}/{1}/{2}/{3}/{4:04d}.json".format(
37 | params.BASE_PATH, params.dataio.srcdir_dataset, params.dataio.dataset_name, dataset_mode, idx+idx_offset)
38 |
39 | dataset = dir_utils.read_file_json(filename, verbose=False)
40 |
41 | return dataset
42 |
43 | def optimizer_soln(theta, params, data_idx, dataset_mode="train"):
44 |
45 | # load data
46 | data = load_dataset(params, data_idx, dataset_mode)
47 | params.dataio.data_idx = "{0:04d}".format(data_idx)
48 |
49 | cost_obj = cost.Cost(data, theta, params=params, device=device)
50 | mean, cov = gtsamopt.run_optimizer(cost_obj, params=params)
51 |
52 | # x_opt: n_samples x n_x x dim_x (sampler=True), x_opt: n_x x dim_x (sampler=False)
53 | x_opt = sampler.sampler_gaussian(mean, cov, n_samples=params.leo.n_samples)
54 |
55 | return x_opt, data
56 |
57 | def groundtruth_poses(data, params):
58 | if (params.dataio.dataset_type == "nav2d"):
59 | x_gt = np.array(data['poses_gt'][0:params.optim.nsteps])
60 |
61 | return x_gt
62 |
63 | def loss_nav2d(x_opt, x_gt):
64 | err_trans, err_rot = quant_metrics.traj_error(xyh_est=x_opt, xyh_gt=x_gt)
65 | loss = err_trans + err_rot
66 | return loss
67 |
68 | def cost_fn_nav2d(theta_vals, params):
69 |
70 | global FVAL_CALLS
71 | FVAL_CALLS = FVAL_CALLS + 1
72 | print("[cost_fn_nav2d] FVAL_CALLS: {0}".format(FVAL_CALLS))
73 |
74 | theta = theta_vals_to_obj(theta_vals, params)
75 | n_data = params.leo.n_data_train
76 |
77 | # parallelized optimizer run
78 | pool = mp.Pool(processes=params.leo.pool_processes)
79 | optimizer_soln_fn = partial(optimizer_soln, theta, params)
80 | data_idxs = np.arange(0, n_data)
81 | result_opt = pool.map(optimizer_soln_fn, data_idxs)
82 | pool.close()
83 | pool.join()
84 |
85 | loss = 0.0
86 | for data_idx in range(0, n_data):
87 | # x_opt, data = optimizer_soln(theta, params, data_idx) # serial run
88 | x_opt, data = result_opt[data_idx][0], result_opt[data_idx][1]
89 | x_gt = groundtruth_poses(data, params)
90 |
91 | loss = loss + loss_nav2d(x_opt, x_gt)
92 |
93 | loss = loss / n_data
94 |
95 | return loss
96 |
97 | def traj_error_final(theta_vals, params, dataset_mode="train"):
98 | theta = theta_vals_to_obj(theta_vals, params)
99 |
100 | n_data = params.leo.n_data_train if (dataset_mode == "train") else params.leo.n_data_test
101 | traj_err_trans, traj_err_rot = 0., 0.
102 |
103 | for data_idx in range(0, n_data):
104 | x_opt, data = optimizer_soln(theta, params, data_idx, dataset_mode)
105 | x_gt = groundtruth_poses(data, params)
106 |
107 | err_trans, err_rot = quant_metrics.traj_error(xyh_est=x_opt, xyh_gt=x_gt)
108 | traj_err_trans = traj_err_trans + err_trans
109 | traj_err_rot = traj_err_rot + err_rot
110 |
111 | traj_err_trans = 1/n_data * traj_err_trans
112 | traj_err_rot = 1/n_data * traj_err_rot
113 |
114 | return traj_err_trans, traj_err_rot
115 |
116 | def theta_vals_to_obj(theta_vals, params):
117 | if (params.dataio.dataset_type == "nav2d"):
118 | if (params.dataio.model_type == "fixed_cov"):
119 | theta = ThetaNav2dFixedCov(
120 | sigma_inv_odom_vals=theta_vals[0:3], sigma_inv_gps_vals=theta_vals[3:6])
121 | elif (params.dataio.model_type == "varying_cov"):
122 | theta = ThetaNav2dVaryingCov(sigma_inv_odom0_vals=theta_vals[0:3], sigma_inv_gps0_vals=theta_vals[3:6],
123 | sigma_inv_odom1_vals=theta_vals[6:9], sigma_inv_gps1_vals=theta_vals[9:12])
124 | return theta
125 |
126 | def init_theta_vals(params):
127 | if (params.dataio.dataset_type == "nav2d"):
128 | if (params.dataio.model_type == "fixed_cov"):
129 | theta_vals = np.array(params.theta_init.sigma_inv_odom_vals + params.theta_init.sigma_inv_gps_vals)
130 | elif (params.dataio.model_type == "varying_cov"):
131 | theta_vals = np.array(params.theta_init.sigma_inv_odom0_vals + params.theta_init.sigma_inv_gps0_vals +
132 | params.theta_init.sigma_inv_odom1_vals + params.theta_init.sigma_inv_gps1_vals)
133 |
134 | return theta_vals
135 |
136 | def callback_scipyopt(xk, params):
137 | global FVAL_CALLS
138 |
139 | theta_vals_curr = xk
140 | print("theta_vals_curr: {0}".format(theta_vals_curr))
141 |
142 | # train data rmse errors
143 | traj_err_trans_train, traj_err_rot_train = traj_error_final(theta_vals_curr, params, dataset_mode="train")
144 | print("[baselines::train] fval calls {0}/{1}, traj_err_trans_train: {2}, traj_err_rot_train: {3}".format(
145 | FVAL_CALLS, params.baselines.max_fval_calls - 1, traj_err_trans_train, traj_err_rot_train))
146 |
147 | # test data rmse errors
148 | traj_err_trans_test, traj_err_rot_test = traj_error_final(theta_vals_curr, params, dataset_mode="test")
149 | print("[baselines::test] fval calls {0}/{1}, traj_err_trans_test: {2}, traj_err_rot_test: {3}".format(
150 | FVAL_CALLS, params.baselines.max_fval_calls - 1, traj_err_trans_test, traj_err_rot_test))
151 |
152 | # log values
153 | log_dict['itr'].append(FVAL_CALLS)
154 | log_dict['traj_err_trans_train'].append(traj_err_trans_train)
155 | log_dict['traj_err_rot_train'].append(traj_err_rot_train)
156 | log_dict['traj_err_trans_test'].append(traj_err_trans_test)
157 | log_dict['traj_err_rot_test'].append(traj_err_rot_test)
158 | filename = "{0}/{1}/{2}/{3}_{4}_errors.csv".format(
159 | params.BASE_PATH, params.plot.dstdir, params.dataio.dataset_name, params.dataio.prefix, params.baselines.method)
160 | dir_utils.write_dict_of_lists_to_csv(filename, log_dict)
161 |
162 | # terminaton criteria
163 | if (FVAL_CALLS > params.baselines.max_fval_calls):
164 | print("*** Terminating *** \n FVAL_CALLS {0} > params.baselines.max_fval_calls {1}".format(FVAL_CALLS, params.baselines.max_fval_calls))
165 | assert False
166 | if (traj_err_trans_train < params.leo.eps_traj_err_trans) & (traj_err_rot_train < params.leo.eps_traj_err_rot):
167 | print("*** Terminating *** \n traj_err_trans {0} < eps_traj_err_trans {1} & traj_err_rot {2} < eps_traj_err_rot {3} ".format(
168 | traj_err_trans_train, params.leo.eps_traj_err_trans, traj_err_rot_train, params.leo.eps_traj_err_rot))
169 | assert False
170 |
171 | def run(params):
172 |
173 | params.optim.save_fig = False
174 | mp.set_start_method('spawn')
175 | params.dataio.prefix = datetime.now().strftime("%m-%d-%Y-%H-%M-%S")
176 |
177 | # initialize cost function
178 | if (params.dataio.dataset_type == "nav2d"):
179 | cost_fn = lambda theta : cost_fn_nav2d(theta, params)
180 |
181 | # initialize theta params to be optimized
182 | theta_vals_init = init_theta_vals(params)
183 |
184 | # call black-box optimizer
185 | print("Running optimizer {0}".format(params.baselines.method))
186 | def callback_fn(x): return callback_scipyopt(x, params)
187 |
188 | if (params.baselines.method == "CMAES"):
189 | xopt, es = cma.fmin2(cost_fn, theta_vals_init, 2., {'maxfevals': params.baselines.max_fval_calls, 'verb_disp': 1, 'bounds': [0.1, 1e6]})
190 | callback_fn(xopt)
191 | else:
192 | result_optimizer = minimize(
193 | cost_fn, x0=theta_vals_init, method=params.baselines.method, callback=callback_fn)
194 |
195 |
196 | # print final errors
197 | theta_vals_final = result_optimizer.x
198 | err_trans, err_rot = traj_error_final(theta_vals_final, params)
199 | print("theta_final: {0}, err_trans: {1}, err_rot: {2}".format(theta_vals_final, err_trans, err_rot))
200 |
201 |
202 | @hydra.main(config_name=CONFIG_PATH)
203 | def main(cfg):
204 |
205 | print(cfg.pretty())
206 |
207 | cfg.BASE_PATH = BASE_PATH
208 | run(cfg)
209 |
210 | if __name__ == '__main__':
211 | main()
212 |
213 |
214 |
215 |
216 |
--------------------------------------------------------------------------------
/leopy/scripts/baselines/lstm_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import math
4 | import json
5 |
6 | import numpy as np
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.optim as optim
11 | from torch.utils.data import Dataset
12 |
13 | import pytorch_lightning as pl
14 |
15 | def wrap_to_pi(arr):
16 | arr_wrap = (arr + math.pi) % (2 * math.pi) - math.pi
17 | return arr_wrap
18 |
19 | def vecToRot(v):
20 | return torch.tensor(([ [np.cos(v[2]), -np.sin(v[2]), 0], \
21 | [np.sin(v[2]), np.cos(v[2]), 0], \
22 | [0,0,1]] ))
23 |
24 | def vecToTransform(v):
25 | return torch.tensor(([ [np.cos(v[2]), -np.sin(v[2]), v[0]], \
26 | [np.sin(v[2]), np.cos(v[2]), v[1]], \
27 | [0,0,1]] ))
28 |
29 | def transformToVec(T):
30 | return torch.tensor(([ T[0,2], T[1,2], torch.atan2(T[1,0], T[0,0])]))
31 |
32 | def read_file_json(filename, verbose=False):
33 | data = None
34 | with open(filename) as f:
35 | data = json.load(f)
36 |
37 | if verbose:
38 | print("Loaded file: {0}". format(filename))
39 |
40 | return data
41 |
42 | class SequenceDataset(Dataset):
43 | def __init__(self, seq_len, device='cpu'):
44 | self.seq_len = seq_len
45 |
46 | def __len__(self):
47 | # Account for seq length
48 | return self.x.shape[0]-self.seq_len+1
49 |
50 | def __getitem__(self, idx):
51 | end_idx = idx+self.seq_len
52 | return self.x[idx:end_idx,:], self.y[idx:end_idx,:]
53 |
54 | def get_input_dim(self):
55 | return self.x.shape[1]
56 |
57 | def get_output_dim(self):
58 | return self.y.shape[1]
59 |
60 | def get_input_sequence(self):
61 | return self.x
62 |
63 | def get_output_sequence(self):
64 | return self.y[:self.x.shape[0], :]
65 |
66 | class Nav2dFixDataset(SequenceDataset):
67 | def __init__(self, filename, seq_len, device='cpu'):
68 | super().__init__(seq_len)
69 |
70 | data = read_file_json(filename)
71 |
72 | # TODO: Check 1-off errors
73 | poses = torch.tensor(data["poses_gt"][1::], device=device)
74 | poses[:,2] = wrap_to_pi(poses[:,2])
75 |
76 | factors = data["factor_meas"]
77 | gps_meas = torch.tensor(factors[0:len(factors)-1:2], device=device)
78 | odom_meas = torch.tensor(factors[1::2], device=device)
79 |
80 | # T0_inv = torch.inverse(vecToTransform(poses[0]))
81 | # for i in range(len(poses)):
82 | # p = poses[i,:]
83 | # T = vecToTransform(p)
84 | # T_new = torch.matmul(T0_inv,T)
85 | # poses[i,:] = transformToVec(T_new)
86 | # for i in range(len(gps_meas)):
87 | # m = gps_meas[i,:]
88 | # Tm = torch.matmul(T0_inv, vecToTransform(m))
89 | # gps_meas[i,:] = transformToVec(Tm)
90 |
91 | self.x = torch.cat((gps_meas, odom_meas), dim=1)
92 | self.y = poses
93 |
94 | print(self.x.shape, self.y.shape)
95 |
96 | class Push2dDataset(SequenceDataset):
97 | def __init__(self, filename, seq_len, device='cpu'):
98 | super().__init__(seq_len)
99 |
100 | data = read_file_json(filename)
101 |
102 | # TODO: Check 1-off errors
103 | poses = torch.tensor(data["obj_poses_gt"], device=device)
104 | poses[:,2] = wrap_to_pi(poses[:,2])
105 |
106 | ee_meas = torch.tensor(data["meas_ee_prior"], device=device)
107 | tactile_fts = torch.tensor(data["meas_tactile_img_feats"], device=device)
108 |
109 | T0_inv = torch.inverse(vecToTransform(poses[0]))
110 | for i in range(len(poses)):
111 | p = poses[i,:]
112 | T = vecToTransform(p)
113 | T_new = torch.matmul(T0_inv,T)
114 | poses[i,:] = transformToVec(T_new)
115 | for i in range(len(ee_meas)):
116 | m = ee_meas[i,:]
117 | Tm = torch.matmul(T0_inv, vecToTransform(m))
118 | ee_meas[i,:] = transformToVec(Tm)
119 |
120 | self.x = torch.cat((ee_meas, tactile_fts), dim=1)
121 | self.y = poses
122 |
123 | print(self.x.shape, self.y.shape)
124 |
125 |
126 | class Pose2dLSTM(nn.Module):
127 | def __init__(self, input_size, hidden_size_lstm, \
128 | num_layers_lstm, hidden_size_mlp):
129 | super(Pose2dLSTM, self).__init__()
130 |
131 | # Parameters
132 | self.hidden_size_lstm = hidden_size_lstm
133 | self.num_layers_lstm = num_layers_lstm
134 | self.hidden_size_mlp = hidden_size_mlp
135 |
136 | # Model layers
137 | self.lstm = torch.nn.LSTM(input_size, hidden_size_lstm, num_layers_lstm, batch_first=True)
138 | self.fc1 = nn.Linear(hidden_size_lstm, hidden_size_mlp)
139 | self.fc2 = nn.Linear(hidden_size_mlp, hidden_size_mlp)
140 | self.fc3 = nn.Linear(hidden_size_mlp, 4)
141 |
142 | self.relu = nn.ReLU()
143 | self.tanh = nn.Tanh()
144 |
145 | def forward(self, x):
146 | batch_size = x.shape[0]
147 |
148 | # LSTM
149 | h0 = torch.zeros(self.num_layers_lstm, batch_size, self.hidden_size_lstm)
150 | c0 = torch.zeros(self.num_layers_lstm, batch_size, self.hidden_size_lstm)
151 | lstm_out, (hn, cn) = self.lstm(x, (h0, c0))
152 |
153 | # MLP output
154 | tmp1 = self.relu(self.fc1(lstm_out))
155 | tmp2 = self.relu(self.fc2(tmp1))
156 | tmp3 = self.fc3(tmp2)
157 | out = torch.zeros((tmp3.shape[0], tmp3.shape[1], 3))
158 | out[:,:,:2] = tmp3[:,:,:2]
159 | # tmp4 = self.tanh(tmp3[:,:,2:])
160 | tmp4 = tmp3[:,:,2:]
161 | out[:,:,2] = torch.atan2(tmp4[:,:,0], tmp4[:,:,1])
162 |
163 | return out
164 |
165 |
166 | class LSTMPoseSeqNet(pl.LightningModule):
167 | def custom_loss(self, output, target, w=100.0):
168 | t1 = output[:,:,:2]
169 | t2 = target[:,:,:2]
170 | r1 = output[:,:,2]
171 | r2 = target[:,:,2]
172 | loss_t = torch.sum((t1 - t2)**2)
173 | # loss_r = torch.sum(torch.minimum((r1 - r2)**2, (r1 - r2 - 2*np.pi)**2))
174 | loss_r = torch.sum(1.0 - torch.cos(r1-r2))
175 | loss = loss_t/t1.numel() + w*loss_r/r1.numel()
176 | return loss
177 |
178 | def __init__(self, params, input_size, tb_writer=None):
179 | super().__init__()
180 |
181 | # init config
182 | self.params = params
183 | self.learning_rate = self.params.train.learning_rate
184 | self.tb_writer = tb_writer
185 |
186 | # init model
187 | self.model = Pose2dLSTM(input_size=input_size, hidden_size_lstm=params.network.hidden_size_lstm,
188 | num_layers_lstm=params.network.num_layers_lstm, hidden_size_mlp=params.network.hidden_size_mlp)
189 |
190 | # init loss
191 | self.loss = lambda x, y: self.custom_loss(x, y)
192 | # self.loss = lambda x, y: nn.MSELoss()(x, y)
193 |
194 | # For loading model?
195 | self.save_hyperparameters('params')
196 | self.save_hyperparameters('input_size')
197 |
198 | def forward(self, x, *args):
199 | return self.model(x, *args)
200 |
201 | def configure_optimizers(self):
202 | weight_decay = self.params.train.reg_weight if (self.params.train.reg_type == "l2") else 0.0
203 | optimizer = optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=weight_decay)
204 | return optimizer
205 |
206 | def training_step(self, batch, batch_idx):
207 | meas, poses_gt = batch
208 |
209 | pred = self.forward(meas)
210 | actual = poses_gt
211 |
212 | loss = self.loss(pred, actual)
213 |
214 | return {'loss': loss}
215 |
216 | def training_epoch_end(self, train_step_outputs):
217 | avg_train_loss = torch.Tensor([x['loss'] for x in train_step_outputs]).mean()
218 | self.tb_writer.add_scalar("train/loss/epoch", avg_train_loss, self.trainer.current_epoch)
219 |
220 | def validation_step(self, batch, batch_idx):
221 | results = self.training_step(batch, batch_idx)
222 | return results
223 |
224 | def validation_epoch_end(self, val_step_outputs):
225 | avg_val_loss = torch.Tensor([x['loss'] for x in val_step_outputs]).mean()
226 | self.tb_writer.add_scalar("val/loss/epoch", avg_val_loss, self.trainer.current_epoch)
227 |
228 | self.log('val_loss', avg_val_loss)
--------------------------------------------------------------------------------
/leopy/scripts/baselines/test_lstm_net.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import math
5 | import os
6 |
7 | import hydra
8 | import glob
9 |
10 | import torch
11 | import pytorch_lightning as pl
12 | from pytorch_lightning.callbacks import ModelCheckpoint
13 |
14 | from lstm_utils import Nav2dFixDataset, Push2dDataset, LSTMPoseSeqNet
15 |
16 | import matplotlib.pyplot as plt
17 | # from leopy.eval import quant_metrics
18 | # from leopy.utils import vis_utils
19 |
20 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
21 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/baselines/lstm_net_test.yaml")
22 |
23 | # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24 | device = "cpu"
25 |
26 | def wrap_to_pi(arr):
27 | arr_wrap = (arr + math.pi) % (2 * math.pi) - math.pi
28 | return arr_wrap
29 |
30 |
31 | def init_plots(n_figs=1, figsize=(12, 8), interactive=True):
32 | if interactive:
33 | plt.ion()
34 |
35 | plt.close('all')
36 | figs = []
37 | for fid in range(0, n_figs):
38 | figs.append(plt.figure(constrained_layout=True, figsize=figsize))
39 |
40 | return figs
41 |
42 | def traj_error(xyh_est, xyh_gt, err_type="rmse"):
43 |
44 | if (err_type == "rmse"):
45 | diff = xyh_gt - xyh_est
46 | diff[:, 2] = wrap_to_pi(diff[:, 2])
47 | diff_sq = diff**2
48 |
49 | rmse_trans = np.sqrt(np.mean(diff_sq[:, 0:2].flatten()))
50 | rmse_rot = np.sqrt(np.mean(diff_sq[:, 2].flatten()))
51 | error = (rmse_trans, rmse_rot)
52 |
53 | elif (err_type == "ate"):
54 | pass
55 |
56 | return error
57 |
58 | def test_model(cfg):
59 |
60 | figs = init_plots(n_figs=1, figsize=(8,6))
61 |
62 | # Load model checkpoint
63 | checkpoint_dir = f"{BASE_PATH}/local/checkpoints/{cfg.checkpoint_dir}"
64 | checkpoint_file = sorted(glob.glob(f"{checkpoint_dir}/*.ckpt"), reverse=True)[0] # latest ckpt
65 |
66 | model = LSTMPoseSeqNet.load_from_checkpoint(checkpoint_file)
67 | model.eval()
68 |
69 | # Test data
70 | dataset_dir = f"{BASE_PATH}/local/datasets/{cfg.test_dataset_dir}"
71 | dataset_files = sorted(glob.glob(f"{dataset_dir}/*.json"), reverse=False)
72 | dataset_type = cfg.test_dataset_dir.partition('/')[0] # sim, real
73 |
74 | print(f"Running model checkpoint {checkpoint_file} on dataset {dataset_dir}")
75 |
76 | err_trans_test, err_rot_test = np.zeros((len(dataset_files), 1)), np.zeros((len(dataset_files), 1))
77 | for ds_idx, dataset_file in enumerate(dataset_files):
78 |
79 | if dataset_type == "sim":
80 | test_dataset = Nav2dFixDataset(dataset_file, 1, device)
81 | elif dataset_type == "real":
82 | test_dataset = Push2dDataset(dataset_file, 1, device)
83 |
84 | with torch.no_grad():
85 | y_pred = model.forward(test_dataset.get_input_sequence().unsqueeze(0))
86 | y_gt = test_dataset.get_output_sequence().unsqueeze(0)
87 | test_loss = model.loss(y_pred, y_gt)
88 |
89 | y_pred_np = (y_pred.squeeze(0)).detach().cpu().numpy()
90 | y_gt_np = (y_gt.squeeze(0)).detach().cpu().numpy()
91 | # np.set_printoptions(threshold=5000)
92 | # print(y_pred_np)
93 | # print(y_gt_np)
94 |
95 | err_trans, err_rot = traj_error(xyh_est=y_pred_np, xyh_gt=y_gt_np)
96 | err_trans_test[ds_idx, :] = err_trans
97 | err_rot_test[ds_idx, :] = err_rot
98 |
99 | if cfg.verbose:
100 | print(f"File: {dataset_file}\n loss: {test_loss}, err_trans: {err_trans}, err_rot: {err_rot}")
101 | # torch.set_printoptions(threshold=5000)
102 | # print(y_test_pred)
103 | # print(y_gt)
104 |
105 | if cfg.show_plot:
106 | plt.cla()
107 |
108 | plt.plot(y_gt_np[:, 0], y_gt_np[:, 1], linewidth=3, linestyle='--', color='tab:grey')
109 | plt.plot(y_pred_np[:, 0], y_pred_np[:, 1], linewidth=3, linestyle='-', color='#d95f02', alpha=0.8)
110 |
111 | plt.xlabel('x (m)')
112 | plt.ylabel('y (m)')
113 | plt.axis('off')
114 |
115 | plt.show()
116 | plt.pause(1e-1)
117 |
118 | print(f"*** Final error statistics for dataset {cfg.test_dataset_dir} ***")
119 | print(f"err_trans_mean: {np.mean(err_trans_test)}, err_trans_stdev: {np.std(err_trans_test)}")
120 | print(f"err_rot_mean: {np.mean(err_rot_test)}, err_rot_stdev: {np.std(err_rot_test)}")
121 |
122 | @hydra.main(config_path=CONFIG_PATH)
123 | def main(cfg):
124 |
125 | test_model(cfg)
126 |
127 | if __name__ == '__main__':
128 | main()
129 |
--------------------------------------------------------------------------------
/leopy/scripts/baselines/train_lstm_net.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import os
5 |
6 | import hydra
7 | from datetime import datetime
8 |
9 | import torch
10 | from torch.utils.data import DataLoader, ConcatDataset
11 | from torch.utils.tensorboard import SummaryWriter
12 |
13 | import pytorch_lightning as pl
14 | from pytorch_lightning.callbacks import Callback, ModelCheckpoint
15 |
16 | from lstm_utils import Nav2dFixDataset, Push2dDataset, Pose2dLSTM, LSTMPoseSeqNet
17 |
18 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
19 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/baselines/lstm_net_train.yaml")
20 |
21 | # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
22 | device = "cpu"
23 |
24 | def data_loader(dataset_name, dataset_type, datatype, seq_len, params):
25 |
26 | dataset_dir_global = f"{BASE_PATH}/local/datasets/{dataset_name}/{datatype}"
27 | dataset_files = os.listdir(dataset_dir_global)
28 |
29 | datasets = []
30 | for filename in dataset_files:
31 | if dataset_type == "sim":
32 | datasets.append(Nav2dFixDataset(f"{dataset_dir_global}/{filename}", seq_len=seq_len, device=device))
33 | elif dataset_type == "real":
34 | datasets.append(Push2dDataset(f"{dataset_dir_global}/{filename}", seq_len=seq_len, device=device))
35 |
36 | dataset = ConcatDataset(datasets)
37 | dataloader = DataLoader(dataset, batch_size=params.batch_size,
38 | shuffle=params.shuffle, num_workers=params.num_workers)
39 |
40 | return dataloader, dataset, datasets[0].get_input_dim()
41 |
42 | class CustomDataModule(pl.LightningDataModule):
43 | def __init__(self, dataset_name, dataset_type, params):
44 | super().__init__()
45 | self.dataset_name = dataset_name
46 | self.dataset_type = dataset_type
47 | self.params = params
48 |
49 | # def setup(self, stage: Optional[str] = None):
50 | # pass
51 |
52 | def get_input_size(self):
53 | # TODO: Hacky way to get input size
54 | _, _, input_size = data_loader(dataset_name=self.dataset_name, dataset_type=self.dataset_type, datatype="train", seq_len=1, params=self.params)
55 | return input_size
56 |
57 | def train_dataloader(self):
58 | seq_len = self.trainer.current_epoch + 2
59 | # seq_len = self.params.seq_len
60 | train_dataloader, train_dataset, input_size, = data_loader(dataset_name=self.dataset_name, dataset_type=self.dataset_type, datatype="train", seq_len=seq_len, params=self.params)
61 | return train_dataloader
62 |
63 | def val_dataloader(self):
64 | # seq_len = self.trainer.current_epoch + 2
65 | val_dataloader, _, _ = data_loader(dataset_name=self.dataset_name, dataset_type=self.dataset_type, datatype="test", seq_len=self.params.seq_len, params=self.params)
66 | return val_dataloader
67 |
68 | # def test_dataloader(self):
69 | # pass
70 | # return DataLoader(self.mnist_test, batch_size=self.batch_size)
71 |
72 | # def teardown(self, stage: Optional[str] = None):
73 | # Used to clean-up when the run is finished
74 | # ...
75 |
76 | def train_model(cfg):
77 |
78 | # save config
79 | prefix = cfg.prefix + datetime.now().strftime("%m-%d-%Y-%H-%M-%S")
80 | print(cfg.pretty())
81 | with open("{0}/local/logs/{1}.txt".format(BASE_PATH, prefix), "w") as f:
82 | print(cfg.pretty(), file=f)
83 |
84 | # init tensorboard writer
85 | tb_writer = SummaryWriter("{0}/local/logs/{1}".format(BASE_PATH, prefix))
86 |
87 | # init trainer
88 | partitioned_string = cfg.dataset_name.partition('/')
89 | dataset_type = partitioned_string[0]
90 | checkpoint_callback = ModelCheckpoint(dirpath=f"{BASE_PATH}/local/checkpoints/{cfg.dataset_name}/",
91 | filename="{0}_{1}_{2}".format(prefix, cfg.network.model, "{epoch:02d}"))
92 |
93 | gpus = None if (device == "cpu") else 1
94 | trainer = pl.Trainer(gpus=gpus, max_epochs=cfg.train.epochs, callbacks=[checkpoint_callback], reload_dataloaders_every_epoch=True)
95 |
96 | # init dataloaders
97 | # train_dataloader, train_dataset, input_size, = data_loader(dataset_name=cfg.dataset_name, dataset_type=dataset_type, datatype="train", params=cfg.dataloader)
98 | # val_dataloader, _, _ = data_loader(dataset_name=cfg.dataset_name, dataset_type=dataset_type, datatype="test", params=cfg.dataloader)
99 | dataloader = CustomDataModule(dataset_name=cfg.dataset_name, dataset_type=dataset_type, params=cfg.dataloader)
100 |
101 | # init model
102 | net = LSTMPoseSeqNet(cfg, dataloader.get_input_size(), tb_writer=tb_writer)
103 |
104 | # run training loop
105 | # trainer.fit(net, train_dataloader, val_dataloader)
106 | trainer.fit(net, datamodule=dataloader)
107 |
108 | @hydra.main(config_path=CONFIG_PATH)
109 | def main(cfg):
110 |
111 | train_model(cfg)
112 |
113 | if __name__ == '__main__':
114 | main()
115 |
--------------------------------------------------------------------------------
/leopy/scripts/examples/leo_nav2d.py:
--------------------------------------------------------------------------------
1 |
2 | #!/usr/bin/env python
3 |
4 | import os
5 | import hydra
6 |
7 | from leopy.algo import leo_update
8 |
9 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
10 | CONFIG_PATH = os.path.join(BASE_PATH, "leopy/config/examples/leo_nav2d.yaml")
11 |
12 | @hydra.main(config_name=CONFIG_PATH, strict=False)
13 | def main(cfg):
14 |
15 | print(cfg.pretty())
16 |
17 | cfg.BASE_PATH = BASE_PATH
18 | leo_update.run(cfg)
19 |
20 | if __name__ == '__main__':
21 | main()
--------------------------------------------------------------------------------
/leopy/scripts/examples/leo_push2d.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import hydra
5 |
6 | from leopy.algo import leo_update
7 |
8 | BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
9 | CONFIG_PATH = os.path.join(BASE_PATH, "leopy/config/examples/leo_push2d.yaml")
10 |
11 | @hydra.main(config_name=CONFIG_PATH, strict=False)
12 | def main(cfg):
13 |
14 | cfg.BASE_PATH = BASE_PATH
15 | leo_update.run(cfg)
16 |
17 | if __name__ == '__main__':
18 | main()
--------------------------------------------------------------------------------
/leopy/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | install_requires = [line.rstrip() for line in open("requirements/requirements.txt", "r")]
4 |
5 | setup(
6 | name="leopy",
7 | version="0.0.1",
8 | description="Learning energy based models in factor graph optimization",
9 | url="",
10 | author="Paloma Sodhi",
11 | author_email="psodhi@cs.cmu.edu",
12 | license="LICENSE",
13 | packages=find_packages("src"),
14 | package_dir={"": "src"},
15 | install_requires=install_requires,
16 | python_requires=">=3.6",
17 | )
--------------------------------------------------------------------------------
/leopy/src/leopy/algo/leo_obs_models.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import torch
4 | import torch.nn as nn
5 | import collections
6 |
7 | import pytorch_lightning as pl
8 |
9 | from leopy.utils import tf_utils
10 |
11 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
12 |
13 | # theta_exp used when realizability_coeff > 0
14 | def add_theta_exp_to_params(params):
15 | params.theta_exp = {}
16 | if (params.dataio.dataset_type == 'nav2d'):
17 | if (params.dataio.dataset_name == 'nav2dfix/dataset_0000'):
18 | params.theta_exp.sigma_inv_odom_vals = [1e1, 1e1, 1e1]
19 | params.theta_exp.sigma_inv_gps_vals = [1., 1., 1e1]
20 | elif (params.dataio.dataset_name == 'nav2dfix/dataset_0001'):
21 | params.theta_exp.sigma_inv_odom_vals = [1., 2., 1e1]
22 | params.theta_exp.sigma_inv_gps_vals = [2., 1., 1e1]
23 | elif (params.dataio.dataset_name == 'nav2dtime/dataset_0000'):
24 | params.theta_exp.sigma_inv_odom0_vals = [1e1, 1e1, 1e1]
25 | params.theta_exp.sigma_inv_gps0_vals = [1., 1., 1e1]
26 | params.theta_exp.sigma_inv_odom1_vals = [1., 1., 1.]
27 | params.theta_exp.sigma_inv_gps1_vals = [1e1, 1e1, 1e1]
28 | elif (params.dataio.dataset_name == 'nav2dtime/dataset_0001'):
29 | params.theta_exp.sigma_inv_odom0_vals = [1., 1., 1.]
30 | params.theta_exp.sigma_inv_gps0_vals = [1e1, 1e1, 1e1]
31 | params.theta_exp.sigma_inv_odom1_vals = [1e1, 1e1, 1e1]
32 | params.theta_exp.sigma_inv_gps1_vals = [1., 1., 1e1]
33 |
34 | if (params.dataio.dataset_type == 'push2d'):
35 | params.theta_exp.sigma_inv_tactile_rel_vals = [1, 1, 1e5]
36 | params.theta_exp.sigma_inv_qs_push_motion_vals = [1e3, 1e3, 1e3]
37 |
38 | params.theta_exp.sigma_inv_ee_pose_prior_vals = [1e4, 1e4, 1e4]
39 | params.theta_exp.sigma_inv_sdf_intersection_vals = [1e2]
40 | params.theta_exp.sigma_inv_binary_interseq_obj_vals = [1e6, 1e6, 1e6]
41 |
42 | return params
43 |
44 | def init_theta(params):
45 |
46 | if (params.dataio.dataset_type == "nav2d"):
47 | if (params.dataio.model_type == "fixed_cov"):
48 | sigma_noise = params.theta_init.sigma_noise if (params.theta_init.noisy == True) else 0.
49 | sigma_inv_odom_vals = np.array(params.theta_init.sigma_inv_odom_vals) + sigma_noise * np.random.randn(3)
50 | sigma_inv_gps_vals = np.array(params.theta_init.sigma_inv_gps_vals) + 0. * np.random.randn(3)
51 | theta = ThetaNav2dFixedCov(sigma_inv_odom_vals=sigma_inv_odom_vals,
52 | sigma_inv_gps_vals=sigma_inv_gps_vals)
53 | theta_exp = ThetaNav2dFixedCov(sigma_inv_odom_vals=params.theta_exp.sigma_inv_odom_vals,
54 | sigma_inv_gps_vals=params.theta_exp.sigma_inv_gps_vals)
55 |
56 | elif (params.dataio.model_type == "varying_cov"):
57 | sigma_noise = params.theta_init.sigma_noise if (params.theta_init.noisy == True) else 0.
58 | sigma_inv_odom0_vals = np.array(params.theta_init.sigma_inv_odom0_vals) + sigma_noise * np.random.randn(3)
59 | sigma_inv_gps0_vals = np.array(params.theta_init.sigma_inv_gps0_vals) + sigma_noise * np.random.randn(3)
60 | sigma_inv_odom1_vals = np.array(params.theta_init.sigma_inv_odom1_vals) + sigma_noise * np.random.randn(3)
61 | sigma_inv_gps1_vals = np.array(params.theta_init.sigma_inv_gps1_vals) + sigma_noise * np.random.randn(3)
62 |
63 | theta = ThetaNav2dVaryingCov(sigma_inv_odom0_vals=sigma_inv_odom0_vals,
64 | sigma_inv_gps0_vals=sigma_inv_gps0_vals,
65 | sigma_inv_odom1_vals=sigma_inv_odom1_vals,
66 | sigma_inv_gps1_vals=sigma_inv_gps1_vals)
67 | theta_exp = ThetaNav2dVaryingCov(sigma_inv_odom0_vals=params.theta_exp.sigma_inv_odom0_vals,
68 | sigma_inv_gps0_vals=params.theta_exp.sigma_inv_gps0_vals,
69 | sigma_inv_odom1_vals=params.theta_exp.sigma_inv_odom1_vals,
70 | sigma_inv_gps1_vals=params.theta_exp.sigma_inv_gps1_vals)
71 |
72 | elif (params.dataio.dataset_type == "push2d"):
73 | if (params.dataio.model_type == "fixed_cov"):
74 | theta = ThetaPush2dFixedCov(sigma_inv_tactile_rel_vals=params.theta_init.sigma_inv_tactile_rel_vals,
75 | sigma_inv_qs_push_motion_vals=params.theta_init.sigma_inv_qs_push_motion_vals,
76 | sigma_inv_ee_pose_prior_vals=params.theta_init.sigma_inv_ee_pose_prior_vals,
77 | sigma_inv_sdf_intersection_vals=params.theta_init.sigma_inv_sdf_intersection_vals,
78 | sigma_inv_binary_interseq_obj_vals=params.theta_init.sigma_inv_binary_interseq_obj_vals)
79 | theta_exp = ThetaPush2dFixedCov(sigma_inv_tactile_rel_vals=params.theta_exp.sigma_inv_tactile_rel_vals,
80 | sigma_inv_qs_push_motion_vals=params.theta_exp.sigma_inv_qs_push_motion_vals,
81 | sigma_inv_ee_pose_prior_vals=params.theta_exp.sigma_inv_ee_pose_prior_vals,
82 | sigma_inv_sdf_intersection_vals=params.theta_exp.sigma_inv_sdf_intersection_vals,
83 | sigma_inv_binary_interseq_obj_vals=params.theta_exp.sigma_inv_binary_interseq_obj_vals)
84 | elif (params.dataio.model_type == "fixed_cov_varying_mean"):
85 | tactile_model_filename = "{0}/local/models/{1}.pt".format(params.BASE_PATH, params.tactile_model.name)
86 | theta = ThetaPush2dFixedCovVaryingMean(sigma_inv_tactile_rel_vals=params.theta_init.sigma_inv_tactile_rel_vals,
87 | sigma_inv_qs_push_motion_vals=params.theta_init.sigma_inv_qs_push_motion_vals,
88 | sigma_inv_ee_pose_prior_vals=params.theta_init.sigma_inv_ee_pose_prior_vals,
89 | sigma_inv_sdf_intersection_vals=params.theta_init.sigma_inv_sdf_intersection_vals,
90 | sigma_inv_binary_interseq_obj_vals=params.theta_init.sigma_inv_binary_interseq_obj_vals,
91 | tactile_model_filename = tactile_model_filename)
92 | theta_exp = ThetaPush2dFixedCovVaryingMean(sigma_inv_tactile_rel_vals=params.theta_exp.sigma_inv_tactile_rel_vals,
93 | sigma_inv_qs_push_motion_vals=params.theta_exp.sigma_inv_qs_push_motion_vals,
94 | sigma_inv_ee_pose_prior_vals=params.theta_exp.sigma_inv_ee_pose_prior_vals,
95 | sigma_inv_sdf_intersection_vals=params.theta_exp.sigma_inv_sdf_intersection_vals,
96 | sigma_inv_binary_interseq_obj_vals=params.theta_exp.sigma_inv_binary_interseq_obj_vals,
97 | tactile_model_filename = tactile_model_filename)
98 | else:
99 | print(
100 | "[leo_obs_models::init_theta] Observation model parameter class not found.")
101 |
102 | [print("[leo_obs_models::init_theta] theta: {0} {1}".format(
103 | name, param)) for name, param in theta.named_parameters()]
104 | [print("[leo_obs_models::init_theta] theta_exp: {0} {1}".format(
105 | name, param)) for name, param in theta_exp.named_parameters()]
106 |
107 | return theta, theta_exp
108 |
109 |
110 | def min_clip(w):
111 | w_min = 1e-4 * torch.ones(w.shape, requires_grad=True, device=device)
112 | w = torch.max(w_min, w)
113 | return w
114 |
115 |
116 | class ThetaNav2dFixedCov(nn.Module):
117 | def __init__(self, sigma_inv_odom_vals=None, sigma_inv_gps_vals=None):
118 | super().__init__()
119 |
120 | self.sigma_inv_odom = torch.nn.Parameter(torch.tensor(
121 | sigma_inv_odom_vals, dtype=torch.float32, device=device)) if sigma_inv_odom_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
122 |
123 | self.sigma_inv_gps = torch.nn.Parameter(torch.tensor(
124 | sigma_inv_gps_vals, dtype=torch.float32, device=device)) if sigma_inv_gps_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
125 |
126 | self.freeze_params()
127 |
128 | def freeze_params(self):
129 | pass
130 |
131 | def get_sigma_inv(self, factor_name, z=None):
132 | sigma_inv_val = getattr(self, "sigma_inv_{0}".format(factor_name))
133 |
134 | return sigma_inv_val
135 |
136 | def min_clip(self):
137 | self.sigma_inv_odom.data = min_clip(self.sigma_inv_odom.data)
138 | self.sigma_inv_gps.data = min_clip(self.sigma_inv_gps.data)
139 |
140 | def norm(self):
141 | l2_norm = torch.norm(self.sigma_inv_odom) ** 2 + torch.norm(self.sigma_inv_gps) ** 2
142 |
143 | return l2_norm
144 | class ThetaNav2dVaryingCov(nn.Module):
145 | def __init__(self, sigma_inv_odom0_vals=None, sigma_inv_gps0_vals=None, sigma_inv_odom1_vals=None, sigma_inv_gps1_vals=None):
146 | super().__init__()
147 |
148 | self.sigma_inv_odom0 = torch.nn.Parameter(torch.tensor(
149 | sigma_inv_odom0_vals, dtype=torch.float32, device=device)) if sigma_inv_odom0_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
150 | self.sigma_inv_gps0 = torch.nn.Parameter(torch.tensor(
151 | sigma_inv_gps0_vals, dtype=torch.float32, device=device)) if sigma_inv_gps0_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
152 |
153 | self.sigma_inv_odom1 = torch.nn.Parameter(torch.tensor(
154 | sigma_inv_odom1_vals, dtype=torch.float32, device=device)) if sigma_inv_odom1_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
155 | self.sigma_inv_gps1 = torch.nn.Parameter(torch.tensor(
156 | sigma_inv_gps1_vals, dtype=torch.float32, device=device)) if sigma_inv_gps1_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
157 |
158 | self.freeze_params()
159 |
160 | def freeze_params(self):
161 | pass
162 |
163 | def get_sigma_inv(self, factor_name, z=None):
164 | sigma_inv_val0 = getattr(self, "sigma_inv_{0}0".format(factor_name))
165 | sigma_inv_val1 = getattr(self, "sigma_inv_{0}1".format(factor_name))
166 |
167 | cl = z[-1]
168 | sigma_inv_val = (1 - cl) * sigma_inv_val0 + cl * sigma_inv_val1
169 |
170 | return sigma_inv_val
171 |
172 | def min_clip(self):
173 | self.sigma_inv_odom0.data = min_clip(self.sigma_inv_odom0.data)
174 | self.sigma_inv_gps0.data = min_clip(self.sigma_inv_gps0.data)
175 |
176 | self.sigma_inv_odom1.data = min_clip(self.sigma_inv_odom1.data)
177 | self.sigma_inv_gps1.data = min_clip(self.sigma_inv_gps1.data)
178 |
179 | def norm(self):
180 |
181 | l2_norm = torch.norm(self.sigma_inv_odom0) ** 2 + torch.norm(self.sigma_inv_odom1) ** 2 + \
182 | torch.norm(self.sigma_inv_gps0) ** 2 + \
183 | torch.norm(self.sigma_inv_gps1) ** 2
184 |
185 | return l2_norm
186 |
187 | class ThetaPush2dFixedCov(nn.Module):
188 | def __init__(self, sigma_inv_tactile_rel_vals=None, sigma_inv_qs_push_motion_vals=None,
189 | sigma_inv_ee_pose_prior_vals=None, sigma_inv_sdf_intersection_vals=None, sigma_inv_binary_interseq_obj_vals=None):
190 | super().__init__()
191 |
192 | self.sigma_inv_tactile_rel = torch.nn.Parameter(torch.tensor(
193 | sigma_inv_tactile_rel_vals, device=device)) if sigma_inv_tactile_rel_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
194 | self.sigma_inv_qs_push_motion = torch.nn.Parameter(torch.tensor(
195 | sigma_inv_qs_push_motion_vals, device=device)) if sigma_inv_qs_push_motion_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
196 |
197 | self.sigma_inv_ee_pose_prior = torch.nn.Parameter(torch.tensor(
198 | sigma_inv_ee_pose_prior_vals, device=device)) if sigma_inv_ee_pose_prior_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
199 | self.sigma_inv_sdf_intersection = torch.nn.Parameter(torch.tensor(
200 | sigma_inv_sdf_intersection_vals, device=device)) if sigma_inv_sdf_intersection_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
201 | self.sigma_inv_binary_interseq_obj = torch.nn.Parameter(torch.tensor(
202 | sigma_inv_binary_interseq_obj_vals, device=device)) if sigma_inv_binary_interseq_obj_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
203 |
204 | self.freeze_params()
205 |
206 | def freeze_params(self):
207 | self.sigma_inv_ee_pose_prior.requires_grad = False
208 | self.sigma_inv_sdf_intersection.requires_grad = False
209 | self.sigma_inv_binary_interseq_obj.requires_grad = False
210 |
211 | def get_sigma_inv(self, factor_name, z=None):
212 | sigma_inv_val = getattr(self, "sigma_inv_{0}".format(factor_name))
213 |
214 | return sigma_inv_val
215 |
216 | def min_clip(self):
217 | self.sigma_inv_tactile_rel.data = min_clip(
218 | self.sigma_inv_tactile_rel.data)
219 | self.sigma_inv_qs_push_motion.data = min_clip(
220 | self.sigma_inv_qs_push_motion.data)
221 | class TactileModelNetwork(pl.LightningModule):
222 | def __init__(
223 | self, input_size, output_size=4):
224 | super().__init__()
225 |
226 | self.fc1 = torch.nn.Linear(input_size, output_size)
227 |
228 | def forward(self, x1, x2, k):
229 | x = torch.cat([x1, x2], dim=1)
230 |
231 | k1_ = k.unsqueeze(1) # b x 1 x cl
232 | x1_ = x.unsqueeze(-1) # b x dim x 1
233 | x = torch.mul(x1_, k1_) # b x dim x cl
234 |
235 | x = x.view(x.shape[0], -1)
236 | x = self.fc1(x)
237 |
238 | return x
239 |
240 | class ThetaPush2dFixedCovVaryingMean(nn.Module):
241 | def __init__(self, sigma_inv_tactile_rel_vals=None, sigma_inv_qs_push_motion_vals=None,
242 | sigma_inv_ee_pose_prior_vals=None, sigma_inv_sdf_intersection_vals=None, sigma_inv_binary_interseq_obj_vals=None, tactile_model_filename=None):
243 | super().__init__()
244 | self.sigma_inv_tactile_rel = torch.nn.Parameter(torch.tensor(
245 | sigma_inv_tactile_rel_vals, device=device)) if sigma_inv_tactile_rel_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
246 | self.sigma_inv_qs_push_motion = torch.nn.Parameter(torch.tensor(
247 | sigma_inv_qs_push_motion_vals, device=device)) if sigma_inv_qs_push_motion_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
248 |
249 | self.sigma_inv_ee_pose_prior = torch.nn.Parameter(torch.tensor(
250 | sigma_inv_ee_pose_prior_vals, device=device)) if sigma_inv_ee_pose_prior_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
251 | self.sigma_inv_sdf_intersection = torch.nn.Parameter(torch.tensor(
252 | sigma_inv_sdf_intersection_vals, device=device)) if sigma_inv_sdf_intersection_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
253 | self.sigma_inv_binary_interseq_obj = torch.nn.Parameter(torch.tensor(
254 | sigma_inv_binary_interseq_obj_vals, device=device)) if sigma_inv_binary_interseq_obj_vals is not None else torch.nn.Parameter(torch.tensor([10., 10., 10.], device=device))
255 |
256 | self.tactile_model = TactileModelNetwork(input_size=2*2*4, output_size=4)
257 | self.tactile_model = self.tactile_model.to(device)
258 | self.init_tactile_model_weights_file(tactile_model_filename)
259 |
260 | self.freeze_params()
261 |
262 | def init_tactile_model_weights_file(self, filename):
263 |
264 | model_saved = torch.jit.load(filename)
265 | state_dict_saved = model_saved.state_dict()
266 |
267 | # fix: saved dict key mismatch
268 | state_dict_new = collections.OrderedDict()
269 | state_dict_new['fc1.weight'] = state_dict_saved['model.fc1.weight']
270 | state_dict_new['fc1.bias'] = state_dict_saved['model.fc1.bias']
271 |
272 | self.tactile_model.load_state_dict(state_dict_new)
273 |
274 | def freeze_params(self):
275 | self.sigma_inv_tactile_rel.requires_grad = False
276 | self.sigma_inv_qs_push_motion.requires_grad = False
277 |
278 | self.sigma_inv_ee_pose_prior.requires_grad = False
279 | self.sigma_inv_sdf_intersection.requires_grad = False
280 | self.sigma_inv_binary_interseq_obj.requires_grad = False
281 |
282 | def get_sigma_inv(self, factor_name, z=None):
283 | sigma_inv_val = getattr(self, "sigma_inv_{0}".format(factor_name))
284 |
285 | return sigma_inv_val
286 |
287 | def tactile_model_output(self, img_feats, params=None):
288 | img_feat_i = torch.tensor(img_feats[0], requires_grad=True, device=device).view(1, -1)
289 | img_feat_j = torch.tensor(img_feats[1], requires_grad=True, device=device).view(1, -1)
290 |
291 | if (params.norm_img_feat == True):
292 | mean_img_feat = torch.tensor(params.mean_img_feat, requires_grad=True, device=device)
293 | std_img_feat = torch.tensor(params.std_img_feat, requires_grad=True, device=device)
294 |
295 | img_feat_i = tf_utils.normalize_vector(img_feat_i, mean_img_feat, std_img_feat)
296 | img_feat_j = tf_utils.normalize_vector(img_feat_j, mean_img_feat, std_img_feat)
297 |
298 | class_label = torch.tensor(params.class_label, requires_grad=False, device=device)
299 | class_label_vec = torch.nn.functional.one_hot(class_label, params.num_classes)
300 | class_label_vec = class_label_vec.view(1, -1)
301 |
302 | tf_pred = self.tactile_model.forward(img_feat_i, img_feat_j, class_label_vec)
303 | tf_pred = tf_pred.view(-1) # [tx, ty, cyaw, syaw]
304 |
305 | return tf_pred
306 |
307 | def min_clip(self):
308 | self.sigma_inv_tactile_rel.data = min_clip(
309 | self.sigma_inv_tactile_rel.data)
310 | self.sigma_inv_qs_push_motion.data = min_clip(
311 | self.sigma_inv_qs_push_motion.data)
--------------------------------------------------------------------------------
/leopy/src/leopy/algo/leo_update.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import os
4 | from datetime import datetime
5 | import copy
6 | from functools import partial
7 | import pandas as pd
8 |
9 | import torch
10 | from torch.autograd import Variable
11 | import torch.nn as nn
12 | import torch.optim as optim
13 | import torch.multiprocessing as mp
14 | from torch.utils.tensorboard import SummaryWriter
15 |
16 | from leopy.optim import cost, gtsamopt, sampler
17 | from leopy.algo.leo_obs_models import *
18 |
19 | from leopy.utils import dir_utils, vis_utils
20 | from leopy.eval import quant_metrics
21 |
22 | import logging
23 | log = logging.getLogger(__name__)
24 |
25 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
26 |
27 | def load_dataset(params, idx, dataset_mode="train"):
28 |
29 | idx_offset = 0 if (dataset_mode == "train") else params.leo.test_idx_offset
30 |
31 | filename = "{0}/{1}/{2}/{3}/{4:04d}.json".format(
32 | params.BASE_PATH, params.dataio.srcdir_dataset, params.dataio.dataset_name, dataset_mode, idx+idx_offset)
33 |
34 | dataset = dir_utils.read_file_json(filename, verbose=False)
35 |
36 | return dataset
37 |
38 | def eval_learnt_params(theta, theta_exp, params, dataframe):
39 |
40 | params.optim.save_fig = False
41 |
42 | n_data = params.leo.n_data_test
43 | traj_err_trans_test, traj_err_rot_test = np.zeros((n_data, 1)), np.zeros((n_data, 1))
44 |
45 | for data_idx in range(0, n_data):
46 |
47 | # optimizer trajectory
48 | x_opt, data, _, _ = optimizer_soln(theta, params, data_idx, dataset_mode="test")
49 | x_opt = torch.tensor(x_opt, requires_grad=True, dtype=torch.float32, device=device)
50 |
51 | # expert trajectory
52 | x_exp = get_exp_traj(data, theta_exp, params)
53 |
54 | # traj errors
55 | traj_err_trans_test[data_idx, :], traj_err_rot_test[data_idx, :] = quant_metrics.traj_error(
56 | xyh_est=x_opt.detach().cpu().numpy(), xyh_gt=x_exp.detach().cpu().numpy())
57 |
58 | dataframe.loc[(data_idx+params.leo.test_idx_offset, params.optim.nsteps-1), 'test/err/tracking/trans'] = traj_err_trans_test[data_idx, :]
59 | dataframe.loc[(data_idx+params.leo.test_idx_offset, params.optim.nsteps-1), 'test/err/tracking/rot'] = traj_err_rot_test[data_idx, :]
60 |
61 | return traj_err_trans_test, traj_err_rot_test, dataframe
62 |
63 | def add_tracking_errors_to_dataframe(df, x_opt, x_exp, params=None):
64 |
65 | nsteps = int(0.5 * x_opt.shape[0]) if (params.dataio.dataset_type == 'push2d') else x_opt.shape[0]
66 |
67 | x_opt_np = x_opt.detach().cpu().numpy()
68 | x_exp_np = x_exp.detach().cpu().numpy()
69 |
70 | for tstep in range(1, nsteps):
71 | err_trans, err_rot = quant_metrics.traj_error(xyh_est=x_opt_np[0:tstep, :], xyh_gt=x_exp_np[0:tstep, :])
72 | df.loc[tstep, 'train/err/tracking/trans'] = err_trans
73 | df.loc[tstep, 'train/err/tracking/rot'] = err_rot
74 |
75 | return df
76 |
77 | def check_traj_convergence(traj_err_trans, traj_err_rot, traj_err_trans_prev, traj_err_rot_prev, params):
78 |
79 | # if ((traj_err_trans < params.leo.eps_traj_err_trans) & (traj_err_rot < params.leo.eps_traj_err_rot)):
80 | # return True
81 |
82 | diff_traj_err_trans = np.absolute(traj_err_trans - traj_err_trans_prev)
83 | diff_traj_err_rot = np.absolute(traj_err_rot - traj_err_rot_prev)
84 |
85 | # print("[leo_update::train] iteration {0}/{1}, diff_traj_err_trans: {2}, diff_traj_err_rot: {3}".format(
86 | # params.leo.itr, params.leo.max_iters-1, diff_traj_err_trans, diff_traj_err_rot))
87 |
88 | if ((diff_traj_err_trans < params.leo.eps_diff_traj_err_trans) & (diff_traj_err_rot < params.leo.eps_diff_traj_err_rot)):
89 | return True
90 |
91 | return False
92 |
93 | def optimizer_soln(theta, params, data_idx, dataset_mode="train"):
94 |
95 | # load data
96 | data = load_dataset(params, data_idx, dataset_mode)
97 | params.dataio.data_idx = "{0:04d}".format(data_idx) # todo: switch to integer
98 |
99 | cost_obj = cost.Cost(data, theta, params=params, device=device)
100 | mean, cov, dataframe = gtsamopt.run_optimizer(cost_obj, params=params)
101 |
102 | # x_opt: n_x x dim_x
103 | x_opt = sampler.sampler_gaussian(mean, cov=None)
104 |
105 | # x_samples: n_samples x n_x x dim_x
106 | x_samples = sampler.sampler_gaussian(mean, cov, n_samples=params.leo.n_samples, temp=params.leo.temp)
107 |
108 | return x_opt, data, x_samples, dataframe
109 |
110 | def cost_optimizer(x_samples, cost_obj, params=None):
111 |
112 | # get cost values
113 | if params.leo.sampler:
114 | cost_opt = torch.tensor([0.], requires_grad=True, device=device)
115 | for sidx in range(0, x_samples.shape[0]):
116 | cost_opt = cost_opt + cost_obj.costfn(x_samples[sidx], log=params.logger.cost_flag)
117 | cost_opt = cost_opt / x_samples.shape[0]
118 | else:
119 | cost_opt = cost_obj.costfn(x_samples, log=params.logger.cost_flag)
120 |
121 | return cost_opt
122 |
123 | def cost_expert(x_exp, cost_obj, params=None):
124 |
125 | cost_exp = cost_obj.costfn(x_exp)
126 |
127 | return cost_exp
128 |
129 | def get_exp_traj_realizable(data, theta_exp, params):
130 |
131 | # expert values x_exp
132 | cost_obj_exp = cost.Cost(data, theta_exp, params=params, device=device)
133 | mean, _, _ = gtsamopt.run_optimizer(cost_obj_exp, params=params)
134 | x_samples = sampler.sampler_gaussian(mean, cov=None)
135 | x_exp = torch.tensor(x_samples, requires_grad=True, dtype=torch.float32, device=device)
136 |
137 | return x_exp
138 |
139 | def get_exp_traj_groundtruth(data, params):
140 |
141 | # expert values x_exp
142 | if (params.dataio.dataset_type == "push2d"):
143 | obj_poses_gt = np.array(data['obj_poses_gt'][0:params.optim.nsteps])
144 | ee_poses_gt = np.array(data['ee_poses_gt'][0:params.optim.nsteps])
145 | x_gt = np.vstack((obj_poses_gt, ee_poses_gt))
146 | elif (params.dataio.dataset_type == "nav2d"):
147 | x_gt = np.array(data['poses_gt'][0:params.optim.nsteps])
148 | else:
149 | print(f"[leo_update::get_exp_traj_groundtruth] x_exp not found for {params.dataio.dataset_type}")
150 | return
151 |
152 | x_exp = torch.tensor(x_gt, requires_grad=True, dtype=torch.float32, device=device)
153 |
154 | return x_exp
155 |
156 | def get_exp_traj(data, theta_exp, params):
157 |
158 | x_exp_realizable = get_exp_traj_realizable(data, theta_exp, params)
159 | x_exp_groundtruth = get_exp_traj_groundtruth(data, params)
160 |
161 | # debug: backward through graph a second time error
162 | # x_diff = x_exp_realizable - x_exp_groundtruth
163 | # x_diff[:, 2] = tf_utils.wrap_to_pi(x_diff[:, 2])
164 | # x_exp = x_exp_groundtruth + params.leo.realizability_coeff * x_diff
165 | # x_exp[:, 2] = tf_utils.wrap_to_pi(x_exp[:, 2])
166 |
167 | x_exp = x_exp_groundtruth
168 |
169 | return x_exp
170 |
171 | def optimizer_update(optimizer, output):
172 | # clear, backprop and apply new gradients
173 | optimizer.zero_grad()
174 | output.backward()
175 | optimizer.step()
176 |
177 | def run(params):
178 |
179 | # figs = vis_utils.init_plots(n_figs=1, interactive=params.optim.show_fig)
180 | print("[leo_update::run] Using device: {0}".format(device))
181 | params.dataio.prefix = datetime.now().strftime("%m-%d-%Y-%H-%M-%S")
182 |
183 | if params.random_seed is not None:
184 | np.random.seed(params.random_seed)
185 | torch.manual_seed(params.random_seed)
186 |
187 | # save config params
188 | dir_cfg = "{0}/{1}/{2}/{3}".format(params.BASE_PATH, params.plot.dstdir, params.dataio.dataset_name, params.dataio.prefix)
189 | dir_utils.make_dir(dir_cfg)
190 | print(params.pretty())
191 | with open("{0}/{1}_config.txt".format(dir_cfg, params.dataio.prefix), "w") as f:
192 | print(params.pretty(), file=f)
193 |
194 | # init tensorboard visualizer
195 | if params.leo.tb_flag:
196 | tb_dir = "{0}".format(params.dataio.prefix)
197 | os.system('mkdir -p {0}/runs/{1}'.format(params.BASE_PATH, tb_dir))
198 | tb_writer = SummaryWriter("{0}/runs/{1}".format(params.BASE_PATH, tb_dir))
199 |
200 | # for printing cost grad, vals every leo iteration
201 | if (params.dataio.dataset_type == "push2d"):
202 | print_list = ["tactile", "qs"]
203 | elif (params.dataio.dataset_type == "nav2d"):
204 | print_list = ["odom", "gps"]
205 |
206 | # init theta params
207 | params = add_theta_exp_to_params(params)
208 | theta, theta_exp = init_theta(params)
209 | print(" ************** [leo_update::theta_init] ************** ")
210 | for name, param in theta.named_parameters():
211 | print('name: ', name)
212 | print(type(param))
213 | print('param.shape: ', param.shape)
214 | print('param.requires_grad: ', param.requires_grad)
215 | print(param)
216 | print('=====')
217 |
218 | # init leo update params
219 | max_iters = params.leo.max_iters
220 | n_data = params.leo.n_data_train
221 |
222 | # leo loss optimizer
223 | params_optimize = filter(lambda p: p.requires_grad, theta.parameters())
224 | optimizer = optim.Adam(params_optimize, lr=params.leo.lr, weight_decay=params.leo.lmd)
225 | if params.leo.lr_scheduler:
226 | # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=20, factor=0.5, verbose=True)
227 | scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95, verbose=True)
228 |
229 | # init multiprocess params
230 | mp.set_start_method('spawn')
231 |
232 | # init clipper, gradient hook
233 | for name, param in theta.named_parameters():
234 | if (param.requires_grad == True):
235 | # if any(word in name for word in print_list):
236 | param.register_hook(lambda grad: print("[leo_update::run] GRAD : {}".format(grad)))
237 |
238 | # collect expert trajectories
239 | x_exp_all = []
240 | for data_idx in range(0, n_data):
241 | data = load_dataset(params, data_idx)
242 |
243 | params.leo.itr = 0
244 | params.dataio.data_idx = "{0:04d}".format(data_idx)
245 | params.optim.save_fig = False
246 |
247 | # expert trajectory
248 | x_exp = get_exp_traj(data, theta_exp, params)
249 |
250 | x_exp_all.append(x_exp)
251 |
252 | # main leo loop, update theta
253 | df_leo_list = []
254 | itr = 0
255 | while (itr < max_iters):
256 |
257 | cost_opt = torch.tensor([0.], requires_grad=True, device=device)
258 | cost_exp = torch.tensor([0.], requires_grad=True, device=device)
259 |
260 | mean_traj_err_trans_prev, mean_traj_err_rot_prev = 0.0, 0.0
261 | loss = torch.tensor([0.], requires_grad=True, device=device)
262 |
263 | # set config params
264 | params.leo.itr = itr
265 | params.optim.save_fig = True
266 |
267 | # logger
268 | if (params.optim.save_logger):
269 | logger_dir = "{0}/local/logger/{1}/leo_itr_{2:04d}".format(
270 | params.BASE_PATH, params.dataio.dataset_name, params.leo.itr)
271 | dir_utils.make_dir(logger_dir)
272 |
273 | # parallelized optimizer run
274 | pool = mp.Pool(processes=params.leo.pool_processes)
275 | optimizer_soln_fn = partial(optimizer_soln, copy.deepcopy(theta), params)
276 | data_idxs = np.arange(0, n_data)
277 | result_opt = pool.map(optimizer_soln_fn, data_idxs)
278 | pool.close()
279 | pool.join()
280 |
281 | traj_err_trans_train, traj_err_rot_train = np.zeros((n_data, 1)), np.zeros((n_data, 1))
282 | df_data_list = []
283 | for data_idx in range(0, n_data):
284 |
285 | # expert, optim trajs for current data idx
286 | x_exp = x_exp_all[data_idx]
287 | x_opt = result_opt[data_idx][0]
288 | data = result_opt[data_idx][1]
289 | x_samples = result_opt[data_idx][2]
290 |
291 | x_opt = torch.tensor(x_opt, requires_grad=True,
292 | dtype=torch.float32, device=device)
293 | x_samples = torch.tensor(x_samples, requires_grad=True,
294 | dtype=torch.float32, device=device)
295 |
296 | # optim, expert costs for current data idx
297 | cost_obj = cost.Cost(data, theta, params=params, device=device)
298 | cost_opt_curr = cost_optimizer(x_samples, cost_obj, params=params)
299 | cost_exp_curr = cost_expert(x_exp, cost_obj, params=None)
300 |
301 | # create a common data frame
302 | if params.logger.enable:
303 | df_opt = result_opt[data_idx][3]
304 | df_cost = cost_obj.get_dataframe()
305 | df = pd.concat([df_cost, df_opt], axis=1)
306 |
307 | df = add_tracking_errors_to_dataframe(df, x_opt, x_exp, params)
308 | df = df.assign(leo_loss= (cost_exp - cost_opt).item())
309 | df = pd.concat({data_idx: df}, names=['data_idx'])
310 | df_data_list.append(df)
311 |
312 | # sum up costs over data idxs
313 | cost_opt = cost_opt + cost_opt_curr
314 | cost_exp = cost_exp + cost_exp_curr
315 |
316 | # traj errors
317 | traj_err_trans_train[data_idx, :], traj_err_rot_train[data_idx, :] = quant_metrics.traj_error(
318 | xyh_est=x_opt[0:params.optim.nsteps, :].detach().cpu().numpy(), xyh_gt=x_exp[0:params.optim.nsteps, :].detach().cpu().numpy())
319 |
320 | # leo loss
321 | l2_reg = params.leo.l2_wt * theta.norm()
322 | loss = 1/n_data * (cost_exp - cost_opt) + l2_reg
323 |
324 | # test trajectory errors
325 | # traj_err_trans_test, traj_err_rot_test, df = eval_learnt_params(theta, theta_exp, params=params, dataframe=df)
326 |
327 | # concat dataframes across data idxs
328 | if params.logger.enable:
329 | df = pd.concat(df_data_list)
330 | df = pd.concat({itr: df}, names=['leo_itr'])
331 | df_leo_list.append(df)
332 |
333 | print("[leo_update::train] iteration {0}/{1} VALUE {2}: {3}".format(itr, max_iters-1, name, param.data))
334 |
335 | mean_traj_err_trans, mean_traj_err_rot = np.mean(traj_err_trans_train), np.mean(traj_err_rot_train)
336 | if params.leo.tb_flag:
337 | tb_writer.add_scalar("loss", loss.item(), itr)
338 | tb_writer.add_scalar("err/tracking/trans", mean_traj_err_trans, itr)
339 | tb_writer.add_scalar("err/tracking/rot", mean_traj_err_rot, itr)
340 |
341 | for name, param in theta.named_parameters():
342 | # if any(word in name for word in print_list):
343 | if (param.requires_grad == True):
344 | print("[leo_update::train] iteration {0}/{1} VALUE {2}: {3}".format(itr, max_iters-1, name, param.data))
345 | print("[leo_update::train] iteration {0}/{1}, loss: {2}, cost_opt: {3}, cost_exp: {4}".format(
346 | itr, max_iters-1, loss.item(), cost_opt.item(), cost_exp.item()))
347 | print("[leo_update::train] iteration {0}/{1}, traj_err_trans_train: {2}, traj_err_rot_train: {3}".format(
348 | itr, max_iters-1, mean_traj_err_trans, mean_traj_err_rot))
349 | # print("[leo_update::test] fevals: {0}, traj_err_trans_test: ({1}, {2}), traj_err_rot_test: ({3}, {4})".format(
350 | # itr, np.mean(traj_err_trans_test), np.std(traj_err_trans_test), np.mean(traj_err_rot_test), np.std(traj_err_rot_test)))
351 |
352 | if (params.leo.use_traj_convergence):
353 | converge_flag = check_traj_convergence(mean_traj_err_trans, mean_traj_err_rot, mean_traj_err_trans_prev, mean_traj_err_rot_prev, params)
354 | mean_traj_err_trans_prev, mean_traj_err_rot_prev = mean_traj_err_trans, mean_traj_err_rot
355 | if converge_flag: break
356 |
357 | optimizer_update(optimizer, loss)
358 | theta.min_clip()
359 |
360 | if params.leo.lr_scheduler:
361 | scheduler.step()
362 |
363 | # write dataframe to file
364 | if (params.logger.enable) & (params.logger.save_csv):
365 | dataframe = pd.concat(df_leo_list)
366 | logdir = f"{params.BASE_PATH}/local/datalogs/{params.dataio.dataset_name}/{params.dataio.prefix}"
367 | os.makedirs(logdir, exist_ok=True)
368 | dataframe.to_csv(f"{logdir}/datalog_{params.dataio.prefix}.csv")
369 | log.info(f"Saved logged data to {logdir}")
370 |
371 | itr = itr + 1
372 |
373 | # plotting
374 | if (params.leo.save_video):
375 | for idx in range(0, n_data):
376 | dataset_name_idx = "{0}_{1:04d}".format(params.dataio.dataset_name, idx)
377 | imgdir = "{0}/{1}/{2}".format(params.BASE_PATH, params.plot.dstdir, dataset_name_idx)
378 | vis_utils.write_video_ffmpeg(imgdir, "{0}/{1}".format(imgdir, "optimized_isam2"))
379 |
--------------------------------------------------------------------------------
/leopy/src/leopy/dataio/data_process.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import leopy.utils.tf_utils as tf_utils
5 | import gtsam
6 | import math
7 | import numpy as np
8 |
9 | def transform_episodes_common_frame(episode_list, push_data):
10 |
11 | push_data_tf = push_data
12 |
13 | push_data_idxs = []
14 | episode_idxs = []
15 | for episode in episode_list:
16 | push_data_idxs_curr = [idx for idx, val in enumerate(
17 | push_data['contact_episode']) if (val == episode)]
18 | push_data_idxs.append(push_data_idxs_curr)
19 | episode_idxs.append([episode] * len(push_data_idxs_curr))
20 |
21 | push_data_idxs = [item for sublist in push_data_idxs for item in sublist]
22 | episode_idxs = [item for sublist in episode_idxs for item in sublist]
23 | num_steps = len(push_data_idxs)
24 |
25 | pose_idx, prev_pose_idx = -1, -1
26 |
27 | obj_prevseq_last_pose = None
28 | prevseq_pose_set = False
29 | for tstep in range(num_steps):
30 |
31 | prev_pose_idx = pose_idx
32 | pose_idx = pose_idx + 1
33 | push_idx = push_data_idxs[pose_idx]
34 |
35 | if ((tstep != 0) & (episode_idxs[prev_pose_idx] != episode_idxs[pose_idx])):
36 |
37 | prev_push_idx = push_data_idxs[prev_pose_idx]
38 | obj_prevseq_last_pose = tf_utils.vec3_to_pose2(push_data['obj_poses_2d'][prev_push_idx])
39 | obj_newseq_first_pose = tf_utils.vec3_to_pose2(push_data['obj_poses_2d'][push_idx])
40 |
41 | prevseq_pose_set = True
42 |
43 | if prevseq_pose_set:
44 | obj_pose = tf_utils.vec3_to_pose2(push_data['obj_poses_2d'][push_idx])
45 | ee_pose = tf_utils.vec3_to_pose2(push_data['ee_poses_2d'][push_idx])
46 |
47 | # transform obj pose
48 | obj_pose_tf = obj_prevseq_last_pose.compose(obj_newseq_first_pose.between(obj_pose))
49 |
50 | # transform endeff pose
51 | ee_pose_tf = obj_pose_tf.compose(obj_pose.between(ee_pose))
52 |
53 | push_data_tf['obj_poses_2d'][push_idx] = tf_utils.pose2_to_vec3(obj_pose_tf)
54 | push_data_tf['ee_poses_2d'][push_idx] = tf_utils.pose2_to_vec3(ee_pose_tf)
55 |
56 | return push_data_tf
57 |
--------------------------------------------------------------------------------
/leopy/src/leopy/dataio/generate_nav2dsim_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | sys.path.append("/usr/local/cython/")
5 |
6 | import numpy as np
7 | import math
8 |
9 | import os
10 | import hydra
11 | import json
12 | import csv
13 | from attrdict import AttrDict
14 | from datetime import datetime
15 |
16 | import gtsam
17 | from leopy.utils import tf_utils, dir_utils
18 | from leopy.eval import quant_metrics
19 | from scipy import interpolate
20 |
21 | import matplotlib.pyplot as plt
22 |
23 | BASE_PATH = os.path.abspath(os.path.join(
24 | os.path.dirname(__file__), "../../../.."))
25 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/dataio/nav2d.yaml")
26 |
27 | def wrap_logger_angles_to_pi(logger, field_names):
28 |
29 | for field in field_names:
30 | field_arr = np.asarray(logger[field])
31 | field_arr[:, -1] = quant_metrics.wrap_to_pi(field_arr[:, -1]) # x, y, theta format
32 | logger[field] = field_arr.tolist()
33 |
34 | return logger
35 |
36 | def get_waypoints_gui(params, poses=None):
37 |
38 | class MouseEvents:
39 | def __init__(self, fig, line):
40 | self.path_start = False # if true, capture data
41 | self.fig = fig
42 | self.line = line
43 | self.xs = list(line.get_xdata())
44 | self.ys = list(line.get_ydata())
45 | self.orientation = []
46 |
47 | def connect(self):
48 | self.a = self.fig.canvas.mpl_connect(
49 | 'button_press_event', self.on_press)
50 | self.b = self.fig.canvas.mpl_connect(
51 | 'motion_notify_event', self.on_motion)
52 |
53 | def on_press(self, event):
54 | print('Pressed', event.button, event.xdata, event.ydata)
55 | self.path_start = not self.path_start
56 |
57 | def on_motion(self, event):
58 | if self.path_start is True:
59 | if len(self.orientation) == 0:
60 | self.orientation.append(0)
61 | else:
62 | self.orientation.append(
63 | np.pi/2 + np.arctan2((self.ys[-1] - event.ydata), (self.xs[-1] - event.xdata)))
64 | self.xs.append(event.xdata)
65 | self.ys.append(event.ydata)
66 | self.line.set_data(self.xs, self.ys)
67 | self.line.figure.canvas.draw()
68 |
69 | plt.ioff()
70 | plt.close('all')
71 | fig = plt.figure(figsize=(12, 8))
72 | plt.title(
73 | "Generate waypoints for nav2d/{0:04d}/{1:04d}.json dataset: \n Click and move pointer to draw trajectory. Close window once finished.".format(params.dataio.ds_idx, params.dataio.seq_idx))
74 | if poses is not None:
75 | plt.plot(poses[:, 0], poses[:, 1], 'o--', c='m')
76 |
77 | plt.xlim(params.env.area.xmin, params.env.area.xmax)
78 | plt.ylim(params.env.area.ymin, params.env.area.ymax)
79 |
80 | line, = plt.plot([], [])
81 | mouse = MouseEvents(fig, line)
82 | mouse.connect()
83 |
84 | plt.show()
85 |
86 | return np.hstack((np.array(mouse.xs)[:, None], np.array(mouse.ys)[:, None], np.array(mouse.orientation)[:, None]))[1:]
87 |
88 | def plot_data(params, logger, plot_ori=False):
89 | plt.ion()
90 | plt.close('all')
91 | fig = plt.figure(figsize=(12, 8))
92 |
93 | poses_gt = np.asarray(logger.poses_gt)
94 | meas_odom = np.asarray(logger.meas_odom)
95 | meas_gps = np.asarray(logger.meas_gps)
96 |
97 | num_steps = params.num_steps
98 | poses_odom = np.zeros((num_steps, 3))
99 | poses_gps = np.zeros((num_steps, 3))
100 | poses_odom[0, :] = poses_gt[0, :]
101 |
102 | # compute poses
103 | for tstep in range(0, num_steps):
104 |
105 | if (tstep > 0):
106 | poses_odom[tstep] = tf_utils.pose2_to_vec3(tf_utils.vec3_to_pose2(
107 | poses_odom[tstep-1, :]).compose(tf_utils.vec3_to_pose2(meas_odom[tstep-1, :])))
108 |
109 | poses_gps[tstep, :] = meas_gps[tstep, :]
110 |
111 | # plot poses
112 | for tstep in range(num_steps-1, num_steps):
113 |
114 | plt.cla()
115 | plt.xlim(params.env.area.xmin, params.env.area.xmax)
116 | plt.ylim(params.env.area.ymin, params.env.area.ymax)
117 |
118 | plt.scatter([0], [0], marker='*', c='k', s=20,
119 | alpha=1.0, zorder=3, edgecolor='k')
120 | plt.scatter(poses_gt[tstep, 0], poses_gt[tstep, 1], marker=(3, 0, poses_gt[tstep, 2]/np.pi*180),
121 | color='dimgray', s=300, alpha=0.25, zorder=3, edgecolor='dimgray')
122 |
123 | plt.plot(poses_gt[0:tstep, 0], poses_gt[0:tstep, 1], color=params.plot.colors[0], linewidth=2, label="groundtruth")
124 | plt.plot(poses_odom[0:tstep, 0], poses_odom[0:tstep, 1], color=params.plot.colors[1], linewidth=2, label="odom")
125 | plt.plot(poses_gps[0:tstep, 0], poses_gps[0:tstep, 1], color=params.plot.colors[2], linewidth=2, label="gps")
126 |
127 | # if plot_ori:
128 | # ori = poses_gt[:, 2]
129 | # sz_arw = 0.03
130 | # (dx, dy) = (sz_arw * np.cos(ori), sz_arw * np.sin(ori))
131 | # for i in range(0, num_steps):
132 | # plt.arrow(poses_gt[i, 0], poses_gt[i, 1], dx[i], dy[i], linewidth=4,
133 | # head_width=0.01, color='black', head_length=0.1, fc='black', ec='black')
134 |
135 | plt.title("Logged dataset nav2d/{0:04d}/{1:04d}.json".format(params.dataio.ds_idx, params.dataio.seq_idx))
136 | plt.legend(loc='upper right')
137 |
138 | plt.show()
139 | plt.pause(1e-1)
140 |
141 | def create_measurements(cfg, poses, covariances):
142 | num_steps = poses.shape[0]
143 |
144 | odom_noise = gtsam.noiseModel_Diagonal.Sigmas(covariances.odom)
145 | gps_noise = gtsam.noiseModel_Diagonal.Sigmas(covariances.gps)
146 |
147 | sampler_odom_noise = gtsam.Sampler(odom_noise, 0)
148 | sampler_gps_noise = gtsam.Sampler(gps_noise, 0)
149 |
150 | measurements = AttrDict()
151 | measurements.odom = np.zeros((num_steps-1, 3))
152 | measurements.gps = np.zeros((num_steps, 3))
153 |
154 | # add measurements
155 | for tstep in range(0, num_steps):
156 |
157 | # binary odom
158 | if (tstep > 0):
159 | prev_pose = tf_utils.vec3_to_pose2(poses[tstep-1])
160 | curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
161 | delta_pose = prev_pose.between(curr_pose)
162 | delta_pose_noisy = tf_utils.add_gaussian_noise(delta_pose, sampler_odom_noise.sample())
163 |
164 | measurements.odom[tstep-1, :] = tf_utils.pose2_to_vec3(delta_pose_noisy)
165 |
166 | # unary gps
167 | curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
168 | curr_pose_noisy = tf_utils.add_gaussian_noise(curr_pose, sampler_gps_noise.sample())
169 |
170 | measurements.gps[tstep, :] = tf_utils.pose2_to_vec3(curr_pose_noisy)
171 |
172 | return measurements
173 |
174 | def log_data(params, poses, measurements, save_file=False):
175 |
176 | # get data for logger
177 | sigma_mat_odom = np.diag(list(params.measurements.noise_models.odom))
178 | sigma_mat_odom = (np.reshape(
179 | sigma_mat_odom, (sigma_mat_odom.shape[0]*sigma_mat_odom.shape[1]))).tolist()
180 | sigma_mat_gps = np.diag(list(params.measurements.noise_models.gps))
181 | sigma_mat_gps = (np.reshape(
182 | sigma_mat_gps, (sigma_mat_gps.shape[0]*sigma_mat_gps.shape[1]))).tolist()
183 |
184 | factor_names, factor_keysyms, factor_keyids, factor_covs, factor_meas = ([] for i in range(5))
185 | num_steps = params.num_steps
186 |
187 | meas_odom, meas_gps = [], []
188 | for tstep in range(0, num_steps):
189 |
190 | # odom
191 | if (tstep > 0):
192 | factor_names.append('odom')
193 | factor_keysyms.append(['x', 'x'])
194 | factor_keyids.append([tstep-1, tstep])
195 | factor_covs.append(sigma_mat_odom)
196 | factor_meas.append(measurements.odom[tstep-1].tolist())
197 |
198 | # gps
199 | factor_names.append('gps')
200 | factor_keysyms.append(['x'])
201 | factor_keyids.append([tstep])
202 | factor_covs.append(sigma_mat_gps)
203 | factor_meas.append(measurements.gps[tstep].tolist())
204 |
205 | # store measurement separately
206 | meas_odom.append(measurements.odom[tstep-1].tolist())
207 | meas_gps.append(measurements.gps[tstep].tolist())
208 |
209 | # save to logger object
210 | logger = AttrDict()
211 |
212 | logger.poses_gt = poses[0:num_steps, :].tolist()
213 |
214 | logger.factor_names = factor_names
215 | logger.factor_keysyms = factor_keysyms
216 | logger.factor_keyids = factor_keyids
217 | logger.factor_covs = factor_covs
218 | logger.factor_meas = factor_meas
219 |
220 | logger.meas_odom = meas_odom
221 | logger.meas_gps = meas_gps
222 |
223 | logger = wrap_logger_angles_to_pi(logger, field_names=['poses_gt', 'meas_odom', 'meas_gps'])
224 |
225 | logger.logname = "{0}_{1}".format(
226 | params.dataio.dataset_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
227 |
228 | if save_file:
229 | seq_idx = params.dataio.seq_idx
230 | dataset_mode = "train" if (seq_idx < params.dataio.n_data_train) else "test"
231 | filename = "{0}/{1}/{2:04d}.json".format(params.dataio.dstdir_logger, dataset_mode, seq_idx)
232 | dir_utils.write_file_json(filename=filename, data=logger)
233 |
234 | return logger
235 |
236 | def load_poses_file(params):
237 | filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
238 | BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
239 |
240 | dataset = dir_utils.read_file_json(filename, verbose=False)
241 | poses = np.asarray(dataset['poses'])
242 |
243 | return poses
244 |
245 | def save_poses_file(params, poses):
246 | filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
247 | BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
248 |
249 | logger = AttrDict()
250 | logger.poses = poses.tolist()
251 |
252 | dir_utils.write_file_json(filename, data=logger)
253 |
254 | def random_cov_sigmas(min_val=0., max_val=1., dim=3):
255 | sigmas = np.random.rand(dim) * (max_val - min_val) + min_val
256 | return sigmas
257 |
258 | def get_covariances(params):
259 |
260 | covariances = AttrDict()
261 |
262 | if (params.measurements.noise_models == "random"):
263 | covariances.odom = random_cov_sigmas(min_val=1e-2, max_val=1e-1, dim=3)
264 | covariances.gps = random_cov_sigmas(min_val=1e-1, max_val=1, dim=3)
265 |
266 | return covariances
267 |
268 | covariances.odom = np.array(params.measurements.noise_models.odom)
269 | covariances.gps = np.array(params.measurements.noise_models.gps)
270 |
271 | return covariances
272 |
273 | def interpolate_poses(poses, dim=2):
274 |
275 | n_poses = poses.shape[0]
276 |
277 | y = quant_metrics.wrap_to_pi(poses[:, dim])
278 | x = np.arange(0, n_poses)
279 |
280 | idx = np.nonzero(y)
281 | interp = interpolate.interp1d(x[idx], y[idx], fill_value="extrapolate")
282 |
283 | x = np.arange(0, n_poses)
284 | y_interp = interp(x)
285 |
286 | poses[:, dim] = y_interp
287 |
288 | return poses
289 |
290 | @hydra.main(config_path=CONFIG_PATH)
291 | def main(cfg):
292 |
293 | if cfg.options.random_seed is not None:
294 | np.random.seed(cfg.options.random_seed)
295 |
296 | # create logger dstdir
297 | cfg.dataio.dstdir_logger = "{0}/{1}/{2}/dataset_{3:04d}".format(
298 | BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name, cfg.dataio.start_ds_idx)
299 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/train", clear=True)
300 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/test", clear=True)
301 |
302 | for ds_idx in range(cfg.dataio.start_ds_idx, cfg.dataio.n_datasets):
303 | cfg.dataio.ds_idx = ds_idx
304 |
305 | for seq_idx in range(cfg.dataio.start_seq_idx, cfg.dataio.n_seqs):
306 | cfg.dataio.seq_idx = seq_idx
307 | covariances = get_covariances(cfg)
308 |
309 | # load poses
310 | if (cfg.dataio.load_poses_file):
311 | poses = load_poses_file(cfg)
312 | poses = interpolate_poses(poses, dim=2) # angles
313 | else:
314 | poses = get_waypoints_gui(cfg, poses=None)
315 | if (cfg.dataio.save_poses_file):
316 | save_poses_file(cfg, poses)
317 |
318 | # create measurements
319 | cfg.num_steps = int(np.minimum(poses.shape[0], cfg.measurements.num_steps_max))
320 | measurements = create_measurements(cfg, poses, covariances)
321 |
322 | logger = log_data(cfg, poses, measurements, save_file=True)
323 | plot_data(cfg, logger, plot_ori=False)
324 |
325 |
326 | if __name__ == '__main__':
327 | main()
328 |
--------------------------------------------------------------------------------
/leopy/src/leopy/dataio/generate_nav2dsim_time_varying_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | sys.path.append("/usr/local/cython/")
5 |
6 | import numpy as np
7 | import math
8 |
9 | import os
10 | import hydra
11 | import json
12 | import csv
13 | from attrdict import AttrDict
14 | from datetime import datetime
15 |
16 | import gtsam
17 | from leopy.utils import tf_utils, dir_utils
18 | from leopy.eval import quant_metrics
19 | from scipy import interpolate
20 |
21 | import matplotlib.pyplot as plt
22 |
23 | BASE_PATH = os.path.abspath(os.path.join(
24 | os.path.dirname(__file__), "../../../.."))
25 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/dataio/nav2d.yaml")
26 |
27 | def wrap_logger_angles_to_pi(logger, field_names):
28 |
29 | for field in field_names:
30 | field_arr = np.asarray(logger[field])
31 | field_arr[:, -1] = quant_metrics.wrap_to_pi(field_arr[:, -1]) # x, y, theta format
32 | logger[field] = field_arr.tolist()
33 |
34 | return logger
35 |
36 | def get_waypoints_gui(params, poses=None):
37 |
38 | class MouseEvents:
39 | def __init__(self, fig, line):
40 | self.path_start = False # if true, capture data
41 | self.fig = fig
42 | self.line = line
43 | self.xs = list(line.get_xdata())
44 | self.ys = list(line.get_ydata())
45 | self.orientation = []
46 |
47 | def connect(self):
48 | self.a = self.fig.canvas.mpl_connect(
49 | 'button_press_event', self.on_press)
50 | self.b = self.fig.canvas.mpl_connect(
51 | 'motion_notify_event', self.on_motion)
52 |
53 | def on_press(self, event):
54 | print('Pressed', event.button, event.xdata, event.ydata)
55 | self.path_start = not self.path_start
56 |
57 | def on_motion(self, event):
58 | if self.path_start is True:
59 | if len(self.orientation) == 0:
60 | self.orientation.append(0)
61 | else:
62 | self.orientation.append(
63 | np.pi/2 + np.arctan2((self.ys[-1] - event.ydata), (self.xs[-1] - event.xdata)))
64 | self.xs.append(event.xdata)
65 | self.ys.append(event.ydata)
66 | self.line.set_data(self.xs, self.ys)
67 | self.line.figure.canvas.draw()
68 |
69 | plt.ioff()
70 | plt.close('all')
71 | fig = plt.figure(figsize=(12, 8))
72 | plt.title(
73 | "Generate waypoints for nav2d/{0:04d}/{1:04d}.json dataset: \n Click and move pointer to draw trajectory. Close window once finished.".format(params.dataio.ds_idx, params.dataio.seq_idx))
74 | if poses is not None:
75 | plt.plot(poses[:, 0], poses[:, 1], 'o--', c='m')
76 |
77 | plt.xlim(params.env.area.xmin, params.env.area.xmax)
78 | plt.ylim(params.env.area.ymin, params.env.area.ymax)
79 |
80 | line, = plt.plot([], [])
81 | mouse = MouseEvents(fig, line)
82 | mouse.connect()
83 |
84 | plt.show()
85 |
86 | return np.hstack((np.array(mouse.xs)[:, None], np.array(mouse.ys)[:, None], np.array(mouse.orientation)[:, None]))[1:]
87 |
88 | def plot_data(params, logger, plot_ori=False):
89 | plt.ion()
90 | plt.close('all')
91 | fig = plt.figure(figsize=(12, 8))
92 |
93 | poses_gt = np.asarray(logger.poses_gt)
94 | meas_odom = np.asarray(logger.meas_odom)
95 | meas_gps = np.asarray(logger.meas_gps)
96 |
97 | num_steps = params.num_steps
98 | poses_odom = np.zeros((num_steps, 3))
99 | poses_gps = np.zeros((num_steps, 3))
100 | poses_odom[0, :] = poses_gt[0, :]
101 |
102 | # compute poses
103 | for tstep in range(0, num_steps):
104 |
105 | if (tstep > 0):
106 | poses_odom[tstep] = tf_utils.pose2_to_vec3(tf_utils.vec3_to_pose2(
107 | poses_odom[tstep-1, :]).compose(tf_utils.vec3_to_pose2(meas_odom[tstep-1, :])))
108 |
109 | poses_gps[tstep, :] = meas_gps[tstep, :]
110 |
111 | # plot poses
112 | for tstep in range(num_steps-1, num_steps):
113 |
114 | plt.cla()
115 | plt.xlim(params.env.area.xmin, params.env.area.xmax)
116 | plt.ylim(params.env.area.ymin, params.env.area.ymax)
117 |
118 | plt.scatter([0], [0], marker='*', c='k', s=20,
119 | alpha=1.0, zorder=3, edgecolor='k')
120 | plt.scatter(poses_gt[tstep, 0], poses_gt[tstep, 1], marker=(3, 0, poses_gt[tstep, 2]/np.pi*180),
121 | color='dimgray', s=300, alpha=0.25, zorder=3, edgecolor='dimgray')
122 |
123 | plt.plot(poses_gt[0:tstep, 0], poses_gt[0:tstep, 1], color=params.plot.colors[0], linewidth=2, label="groundtruth")
124 | plt.plot(poses_odom[0:tstep, 0], poses_odom[0:tstep, 1], color=params.plot.colors[1], linewidth=2, label="odom")
125 | plt.plot(poses_gps[0:tstep, 0], poses_gps[0:tstep, 1], color=params.plot.colors[2], linewidth=2, label="gps")
126 |
127 | # if plot_ori:
128 | # ori = poses_gt[:, 2]
129 | # sz_arw = 0.03
130 | # (dx, dy) = (sz_arw * np.cos(ori), sz_arw * np.sin(ori))
131 | # for i in range(0, num_steps):
132 | # plt.arrow(poses_gt[i, 0], poses_gt[i, 1], dx[i], dy[i], linewidth=4,
133 | # head_width=0.01, color='black', head_length=0.1, fc='black', ec='black')
134 |
135 | plt.title("Logged dataset nav2d/{0:04d}/{1:04d}.json".format(params.dataio.ds_idx, params.dataio.seq_idx))
136 | plt.legend(loc='upper right')
137 |
138 | plt.show()
139 | plt.pause(1)
140 |
141 | # def covariance_type(tfrac):
142 |
143 | # cov_type = None
144 |
145 | # if tfrac <= 0.25:
146 | # cov_type = 0
147 | # elif (tfrac > 0.25) & (tfrac <= 0.5):
148 | # cov_type = 1
149 | # elif (tfrac > 0.5) & (tfrac <= 0.75):
150 | # cov_type = 0
151 | # elif (tfrac > 0.75):
152 | # cov_type = 1
153 |
154 | # return cov_type
155 |
156 | def covariance_type(tfrac):
157 |
158 | cov_type = None
159 |
160 | if tfrac <= 0.25:
161 | cov_type = 0
162 | elif (tfrac > 0.25) & (tfrac <= 0.75):
163 | cov_type = 1
164 | elif (tfrac >= 0.75):
165 | cov_type = 0
166 |
167 | return cov_type
168 |
169 | def create_measurements(params, poses, covariances):
170 |
171 | # noise models
172 | odom_noise0 = gtsam.noiseModel_Diagonal.Sigmas(covariances.odom0)
173 | odom_noise1 = gtsam.noiseModel_Diagonal.Sigmas(covariances.odom1)
174 | gps_noise0 = gtsam.noiseModel_Diagonal.Sigmas(covariances.gps0)
175 | gps_noise1 = gtsam.noiseModel_Diagonal.Sigmas(covariances.gps1)
176 |
177 | # samplers
178 | sampler_odom_noise0 = gtsam.Sampler(odom_noise0, 0)
179 | sampler_odom_noise1 = gtsam.Sampler(odom_noise1, 0)
180 | sampler_gps_noise0 = gtsam.Sampler(gps_noise0, 0)
181 | sampler_gps_noise1 = gtsam.Sampler(gps_noise1, 0)
182 |
183 | # init measurements
184 | measurements = AttrDict()
185 | num_steps = params.num_steps
186 | measurements.odom = np.zeros((num_steps-1, 3))
187 | measurements.gps = np.zeros((num_steps, 3))
188 | measurements.cov_type = np.zeros((num_steps, 1))
189 |
190 | # add measurements
191 | for tstep in range(0, num_steps):
192 |
193 | cov_type = covariance_type(tstep / float(num_steps))
194 | sampler_odom_noise = sampler_odom_noise0 if (cov_type == 0) else sampler_odom_noise1
195 | sampler_gps_noise = sampler_gps_noise0 if (cov_type == 0) else sampler_gps_noise1
196 |
197 | measurements.cov_type[tstep] = cov_type
198 |
199 | # binary odom
200 | if (tstep > 0):
201 | prev_pose = tf_utils.vec3_to_pose2(poses[tstep-1])
202 | curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
203 | delta_pose = prev_pose.between(curr_pose)
204 | delta_pose_noisy = tf_utils.add_gaussian_noise(delta_pose, sampler_odom_noise.sample())
205 |
206 | measurements.odom[tstep-1, :] = tf_utils.pose2_to_vec3(delta_pose_noisy)
207 |
208 | # unary gps
209 | curr_pose = tf_utils.vec3_to_pose2(poses[tstep])
210 | curr_pose_noisy = tf_utils.add_gaussian_noise(curr_pose, sampler_gps_noise.sample())
211 |
212 | measurements.gps[tstep, :] = tf_utils.pose2_to_vec3(curr_pose_noisy)
213 |
214 | return measurements
215 |
216 | def log_data(params, poses, measurements, save_file=False):
217 |
218 | # get data for logger
219 | sigma_mat_odom0 = np.diag(list(params.measurements.noise_models.odom0))
220 | sigma_mat_odom0 = (np.reshape(sigma_mat_odom0, (sigma_mat_odom0.shape[0]*sigma_mat_odom0.shape[1]))).tolist()
221 | sigma_mat_odom1 = np.diag(list(params.measurements.noise_models.odom1))
222 | sigma_mat_odom1 = (np.reshape(sigma_mat_odom1, (sigma_mat_odom1.shape[0]*sigma_mat_odom1.shape[1]))).tolist()
223 |
224 | sigma_mat_gps0 = np.diag(list(params.measurements.noise_models.gps0))
225 | sigma_mat_gps0 = (np.reshape(sigma_mat_gps0, (sigma_mat_gps0.shape[0]*sigma_mat_gps0.shape[1]))).tolist()
226 | sigma_mat_gps1 = np.diag(list(params.measurements.noise_models.gps1))
227 | sigma_mat_gps1 = (np.reshape(sigma_mat_gps1, (sigma_mat_gps1.shape[0]*sigma_mat_gps1.shape[1]))).tolist()
228 |
229 | factor_names, factor_keysyms, factor_keyids, factor_covs, factor_meas = ([] for i in range(5))
230 | num_steps = params.num_steps
231 |
232 | meas_odom, meas_gps = [], []
233 | for tstep in range(0, num_steps):
234 |
235 | # odom
236 | if (tstep > 0):
237 | factor_names.append('odom')
238 | factor_keysyms.append(['x', 'x'])
239 | factor_keyids.append([tstep-1, tstep])
240 | factor_meas.append(measurements.odom[tstep-1].tolist() + measurements.cov_type[tstep-1].tolist())
241 |
242 | sigma_mat_odom = sigma_mat_odom0 if (measurements.cov_type[tstep-1] == 0) else sigma_mat_odom1
243 | factor_covs.append(sigma_mat_odom)
244 |
245 | # gps
246 | factor_names.append('gps')
247 | factor_keysyms.append(['x'])
248 | factor_keyids.append([tstep])
249 | factor_meas.append(measurements.gps[tstep].tolist() + measurements.cov_type[tstep].tolist())
250 |
251 | sigma_mat_gps = sigma_mat_gps0 if (measurements.cov_type[tstep-1] == 0) else sigma_mat_gps1
252 | factor_covs.append(sigma_mat_gps)
253 |
254 | # store measurement separately
255 | meas_odom.append(measurements.odom[tstep-1].tolist())
256 | meas_gps.append(measurements.gps[tstep].tolist())
257 |
258 | # save to logger object
259 | logger = AttrDict()
260 |
261 | logger.poses_gt = poses[0:num_steps, :].tolist()
262 |
263 | logger.factor_names = factor_names
264 | logger.factor_keysyms = factor_keysyms
265 | logger.factor_keyids = factor_keyids
266 | logger.factor_covs = factor_covs
267 | logger.factor_meas = factor_meas
268 |
269 | logger.meas_odom = meas_odom
270 | logger.meas_gps = meas_gps
271 |
272 | logger = wrap_logger_angles_to_pi(logger, field_names=['poses_gt', 'meas_odom', 'meas_gps'])
273 |
274 | logger.logname = "{0}_{1}".format(
275 | params.dataio.dataset_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
276 |
277 | if save_file:
278 | seq_idx = params.dataio.seq_idx
279 | dataset_mode = "train" if (seq_idx < params.dataio.n_data_train) else "test"
280 | filename = "{0}/{1}/{2:04d}.json".format(params.dataio.dstdir_logger, dataset_mode, seq_idx)
281 | dir_utils.write_file_json(filename=filename, data=logger)
282 |
283 | return logger
284 |
285 | def load_poses_file(params):
286 | filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
287 | BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
288 |
289 | dataset = dir_utils.read_file_json(filename, verbose=False)
290 | poses = np.asarray(dataset['poses'])
291 |
292 | return poses
293 |
294 | def save_poses_file(params, poses):
295 | filename = "{0}/{1}/{2}/poses/{3:04d}.json".format(
296 | BASE_PATH, params.dataio.dstdir_dataset, params.dataio.dataset_name, params.dataio.seq_idx)
297 |
298 | logger = AttrDict()
299 | logger.poses = poses.tolist()
300 |
301 | dir_utils.write_file_json(filename, data=logger)
302 |
303 | def random_cov_sigmas(min_val=0., max_val=1., dim=3):
304 | sigmas = np.random.rand(dim) * (max_val - min_val) + min_val
305 | return sigmas
306 |
307 | def get_covariances(params):
308 |
309 | covariances = AttrDict()
310 |
311 | if (params.measurements.noise_models == "random"):
312 | covariances.odom0 = random_cov_sigmas(min_val=1e-2, max_val=1e-1, dim=3)
313 | covariances.gps0 = random_cov_sigmas(min_val=1e-1, max_val=1, dim=3)
314 |
315 | covariances.odom1 = random_cov_sigmas(min_val=1e-1, max_val=1e-1, dim=3)
316 | covariances.gps1 = random_cov_sigmas(min_val=1e-1, max_val=1, dim=3)
317 |
318 | return covariances
319 |
320 | covariances.odom0 = np.array(params.measurements.noise_models.odom0)
321 | covariances.gps0 = np.array(params.measurements.noise_models.gps0)
322 |
323 | covariances.odom1 = np.array(params.measurements.noise_models.odom1)
324 | covariances.gps1 = np.array(params.measurements.noise_models.gps1)
325 |
326 | return covariances
327 |
328 | def interpolate_poses(poses, dim=2):
329 |
330 | n_poses = poses.shape[0]
331 |
332 | y = quant_metrics.wrap_to_pi(poses[:, dim])
333 | x = np.arange(0, n_poses)
334 |
335 | idx = np.nonzero(y)
336 | interp = interpolate.interp1d(x[idx], y[idx], fill_value="extrapolate")
337 |
338 | x = np.arange(0, n_poses)
339 | y_interp = interp(x)
340 |
341 | poses[:, dim] = y_interp
342 |
343 | return poses
344 |
345 | @hydra.main(config_path=CONFIG_PATH)
346 | def main(cfg):
347 |
348 | if cfg.options.random_seed is not None:
349 | np.random.seed(cfg.options.random_seed)
350 |
351 | # create logger dstdir
352 | cfg.dataio.dstdir_logger = "{0}/{1}/{2}/dataset_{3:04d}".format(
353 | BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name, cfg.dataio.start_ds_idx)
354 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/train", clear=True)
355 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/test", clear=True)
356 |
357 | for ds_idx in range(cfg.dataio.start_ds_idx, cfg.dataio.n_datasets):
358 | cfg.dataio.ds_idx = ds_idx
359 |
360 | for seq_idx in range(cfg.dataio.start_seq_idx, cfg.dataio.n_seqs):
361 | cfg.dataio.seq_idx = seq_idx
362 | covariances = get_covariances(cfg)
363 |
364 | # load poses
365 | if (cfg.dataio.load_poses_file):
366 | poses = load_poses_file(cfg)
367 | poses = interpolate_poses(poses, dim=2) # angles
368 | else:
369 | poses = get_waypoints_gui(cfg, poses=None)
370 | if (cfg.dataio.save_poses_file):
371 | save_poses_file(cfg, poses)
372 |
373 | # create measurements
374 | cfg.num_steps = int(np.minimum(poses.shape[0], cfg.measurements.num_steps_max))
375 | measurements = create_measurements(cfg, poses, covariances)
376 |
377 | cfg.dataio.dstdir_logger = "{0}/{1}/{2}/dataset_{3:04d}".format(
378 | BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name, ds_idx)
379 | dir_utils.make_dir(cfg.dataio.dstdir_logger, clear=False)
380 |
381 | logger = log_data(cfg, poses, measurements, save_file=True)
382 |
383 | plot_data(cfg, logger, plot_ori=False)
384 |
385 |
386 | if __name__ == '__main__':
387 | main()
388 |
--------------------------------------------------------------------------------
/leopy/src/leopy/dataio/generate_push2dreal_dataset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | sys.path.append("/usr/local/cython/")
5 |
6 | import numpy as np
7 | import math
8 |
9 | import os
10 | import hydra
11 | from attrdict import AttrDict
12 | from datetime import datetime
13 |
14 | from itertools import combinations, permutations, combinations_with_replacement
15 | import random
16 | from tqdm import tqdm
17 |
18 | import gtsam
19 | from leopy.utils import tf_utils, dir_utils
20 | from leopy.dataio import data_process
21 | from leopy.eval import quant_metrics
22 |
23 | import matplotlib.pyplot as plt
24 |
25 | BASE_PATH = os.path.abspath(os.path.join(
26 | os.path.dirname(__file__), "../../../.."))
27 | CONFIG_PATH = os.path.join(BASE_PATH, "python/config/dataio/push2d.yaml")
28 |
29 | def create_cov_mat(sigmas, flatten=True):
30 |
31 | sigmas_sq = [sigma**2 for sigma in sigmas]
32 | cov_mat = np.diag(list(sigmas_sq))
33 | if flatten:
34 | cov_mat = np.reshape(cov_mat, (sigmas.shape[0]*sigmas.shape[1])).tolist()
35 |
36 | return cov_mat
37 |
38 | def wrap_logger_angles_to_pi(logger, field_names):
39 |
40 | for field in field_names:
41 | field_arr = np.asarray(logger[field])
42 | field_arr[:, -1] = quant_metrics.wrap_to_pi(field_arr[:, -1]) # x, y, theta format
43 | logger[field] = field_arr.tolist()
44 |
45 | return logger
46 |
47 | def log_data(params, dataset, ds_idxs, eps_idxs, seq_idx, save_file=False):
48 |
49 | # init noise models
50 | sigma_mat_ee = list(params.measurements.noise_models.ee_pose_prior)
51 | sigma_mat_qs = list(params.measurements.noise_models.qs_push_motion)
52 | sigma_mat_sdf = list(params.measurements.noise_models.sdf_intersection)
53 | sigma_mat_tactile = list(params.measurements.noise_models.tactile_rel_meas)
54 | sigma_mat_interseq = list(params.measurements.noise_models.binary_interseq_obj)
55 |
56 | ee_pose_prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
57 | np.array(params.measurements.noise_models.ee_pose_prior))
58 | sampler_ee_pose_prior_noise = gtsam.Sampler(ee_pose_prior_noise, 0)
59 |
60 | # init other params
61 | factor_names, factor_keysyms, factor_keyids, factor_covs, factor_meas = ([] for i in range(5))
62 | num_steps = params.dataio.num_steps
63 | wmin, wmax = params.measurements.tactile.wmin, params.measurements.tactile.wmax
64 | wnum = 2
65 |
66 | meas_ee_prior = []
67 | meas_tactile_img_feats = []
68 | for tstep in range(0, num_steps):
69 |
70 | # ee_pose_prior
71 | ee_pose_noisy = tf_utils.add_gaussian_noise(tf_utils.vec3_to_pose2(
72 | dataset['ee_poses_2d'][ds_idxs[tstep]]), sampler_ee_pose_prior_noise.sample())
73 | factor_names.append('ee_pose_prior')
74 | factor_keysyms.append(['e'])
75 | factor_keyids.append([tstep])
76 | factor_covs.append(sigma_mat_ee)
77 | factor_meas.append(tf_utils.pose2_to_vec3(ee_pose_noisy))
78 |
79 | # qs_push_motion
80 | if (tstep > 0) & (eps_idxs[tstep-1] == eps_idxs[tstep]):
81 | factor_names.append('qs_push_motion')
82 | factor_keysyms.append(['o', 'o', 'e', 'e'])
83 | factor_keyids.append([tstep-1, tstep, tstep-1, tstep])
84 | factor_covs.append(sigma_mat_qs)
85 | factor_meas.append([0., 0., 0.])
86 |
87 | # sdf_intersection
88 | factor_names.append('sdf_intersection')
89 | factor_keysyms.append(['o', 'e'])
90 | factor_keyids.append([tstep, tstep])
91 | factor_covs.append(sigma_mat_sdf)
92 | factor_meas.append([0.])
93 |
94 | # tactile_rel
95 | if (tstep > wmin):
96 |
97 | # wmax_step = np.minimum(tstep, wmax)
98 | # wrange = np.linspace(wmin, wmax_curr_step, num=wnum).astype(np.int)
99 | # for w in wrange:
100 |
101 | wmax_step = np.minimum(tstep, wmax)
102 | for w in range(wmin, wmax_step):
103 |
104 | # skip inter episode factors
105 | if (eps_idxs[tstep-w] != eps_idxs[tstep]):
106 | continue
107 |
108 | # print([tstep-w, tstep, tstep-w, tstep])
109 |
110 | factor_names.append('tactile_rel')
111 | factor_keysyms.append(['o', 'o', 'e', 'e'])
112 | factor_keyids.append([tstep-w, tstep, tstep-w, tstep])
113 | factor_covs.append(sigma_mat_tactile)
114 | factor_meas.append((dataset['img_feats'][ds_idxs[tstep-w]], dataset['img_feats'][ds_idxs[tstep]]))
115 |
116 | # interseq binary
117 | if (tstep > 0) & (eps_idxs[tstep-1] != eps_idxs[tstep]):
118 | factor_names.append('binary_interseq_obj')
119 | factor_keysyms.append(['o', 'o'])
120 | factor_keyids.append([tstep-1, tstep])
121 | factor_covs.append(sigma_mat_interseq)
122 | factor_meas.append([0., 0., 0.])
123 |
124 | # store external measurement separately
125 | meas_ee_prior.append(tf_utils.pose2_to_vec3(ee_pose_noisy))
126 | meas_tactile_img_feats.append(dataset['img_feats'][ds_idxs[tstep]])
127 |
128 | # save to logger object
129 | logger = AttrDict()
130 | logger.ee_poses_gt = [dataset['ee_poses_2d'][idx] for idx in ds_idxs]
131 | logger.obj_poses_gt = [dataset['obj_poses_2d'][idx] for idx in ds_idxs]
132 | logger.contact_episode = [dataset['contact_episode'][idx] for idx in ds_idxs]
133 | logger.contact_flag = [dataset['contact_flag'][idx] for idx in ds_idxs]
134 |
135 | logger.factor_names = factor_names
136 | logger.factor_keysyms = factor_keysyms
137 | logger.factor_keyids = factor_keyids
138 | logger.factor_covs = factor_covs
139 | logger.factor_meas = factor_meas
140 |
141 | logger.meas_ee_prior = meas_ee_prior
142 | logger.meas_tactile_img_feats = meas_tactile_img_feats
143 | logger = wrap_logger_angles_to_pi(logger, field_names=['ee_poses_gt', 'obj_poses_gt', 'meas_ee_prior'])
144 |
145 | logger.logname = "{0}_{1}".format(
146 | params.dataio.dataset_name, datetime.now().strftime("%m-%d-%Y-%H-%M-%S"))
147 |
148 | if save_file:
149 | dataset_mode = "train" if (seq_idx < params.dataio.n_data_train) else "test"
150 | filename = "{0}/{1}/{2:04d}.json".format(params.dataio.dstdir_logger, dataset_mode, seq_idx)
151 | dir_utils.write_file_json(filename=filename, data=logger)
152 |
153 | return logger
154 |
155 | def sequence_generator(contact_episodes, num_episodes_seq):
156 |
157 | episode_idxs = list(set(contact_episodes))
158 | episode_seq_list = list(permutations(episode_idxs, num_episodes_seq))
159 |
160 | random.shuffle(episode_seq_list)
161 | print("[sequence_generator] Generated {0} episode sequences".format(len(episode_seq_list)))
162 |
163 | return episode_seq_list
164 |
165 |
166 | def sequence_logger(params, dataset, episode_seq, seq_idx, save_file=True):
167 | dataset_idxs = []
168 | episode_idxs = []
169 | episode_seq = list(episode_seq)
170 | logger = None
171 |
172 | # collect dataset idxs for episodes in episode_seq
173 | for episode in episode_seq:
174 | dataset_idxs_curr = [idx for idx, val in enumerate(
175 | dataset['contact_episode']) if (val == episode)]
176 | dataset_idxs.append(dataset_idxs_curr)
177 | episode_idxs.append([episode] * len(dataset_idxs_curr))
178 |
179 | # flatten into single list
180 | dataset_idxs = [item for sublist in dataset_idxs for item in sublist]
181 | episode_idxs = [item for sublist in episode_idxs for item in sublist]
182 |
183 | if (len(episode_idxs) < params.dataio.num_steps):
184 | return
185 |
186 | # cut down to num_steps entries
187 | dataset_idxs = dataset_idxs[0:params.dataio.num_steps]
188 | episode_idxs = episode_idxs[0:params.dataio.num_steps]
189 |
190 | print("Logging dataset {0}/{1} of length {2} and episode idxs {3}".format(
191 | seq_idx, params.dataio.num_seqs, len(episode_idxs), episode_seq))
192 |
193 | dataset_tf = data_process.transform_episodes_common_frame(
194 | episode_seq, dataset)
195 |
196 | logger = log_data(params, dataset_tf, dataset_idxs,
197 | episode_idxs, seq_idx, save_file=save_file)
198 |
199 | return logger
200 |
201 |
202 | @hydra.main(config_path=CONFIG_PATH)
203 | def main(cfg):
204 |
205 | dataset_file = f"{BASE_PATH}/{cfg.dataio.srcdir_pushest}/{cfg.dataio.dataset_name}.json"
206 | dataset = dir_utils.read_file_json(dataset_file)
207 |
208 | # create logger dstdir
209 | cfg.dataio.dstdir_logger = "{0}/{1}/{2}".format(
210 | BASE_PATH, cfg.dataio.dstdir_dataset, cfg.dataio.dataset_name)
211 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/train", clear=True)
212 | dir_utils.make_dir(cfg.dataio.dstdir_logger+"/test", clear=True)
213 |
214 | # generate random episode combination sequences
215 | episode_seq_list = sequence_generator(
216 | dataset['contact_episode'], cfg.dataio.num_episodes_seq)
217 |
218 | # store episode combinations used for generating the dataset
219 | eps_seq_file = f"{BASE_PATH}/{cfg.dataio.dstdir_dataset}/episode_seq_{cfg.dataio.dataset_name}.txt"
220 | eps_seq_fid = open(eps_seq_file, 'w')
221 |
222 | seq_idx = 0
223 | num_seqs = np.minimum(len(episode_seq_list), cfg.dataio.num_seqs)
224 | for episode_seq in tqdm(episode_seq_list):
225 |
226 | logger = sequence_logger(cfg, dataset, episode_seq, seq_idx, save_file=True)
227 |
228 | if logger is not None:
229 | seq_idx = seq_idx + 1
230 |
231 | eps_seq_fid.write(f"{episode_seq}\n")
232 | # plot_logger_data(cfg, logger)
233 |
234 | if (seq_idx > num_seqs):
235 | eps_seq_fid.close()
236 | break
237 |
238 | if __name__ == '__main__':
239 | main()
240 |
--------------------------------------------------------------------------------
/leopy/src/leopy/eval/quant_metrics.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import math
4 | import numpy as np
5 |
6 | def wrap_to_pi(arr):
7 | arr_wrap = (arr + math.pi) % (2 * math.pi) - math.pi
8 | return arr_wrap
9 |
10 | def traj_error(xyh_est, xyh_gt, err_type="rmse"):
11 |
12 | if (err_type == "rmse"):
13 | diff = xyh_gt - xyh_est
14 | diff[:, 2] = wrap_to_pi(diff[:, 2])
15 | diff_sq = diff**2
16 |
17 | rmse_trans = np.sqrt(np.mean(diff_sq[:, 0:2].flatten()))
18 | rmse_rot = np.sqrt(np.mean(diff_sq[:, 2].flatten()))
19 | error = (rmse_trans, rmse_rot)
20 |
21 | elif (err_type == "ate"):
22 | pass
23 |
24 | return error
25 |
26 | def get_traj_error_step(params, tstep, logger):
27 | if (params.dataio.dataset_type == "nav2d"):
28 | poses_obj_gt = logger.data[tstep]["gt/poses2d"]
29 | poses_obj_graph = logger.data[tstep]["graph/poses2d"]
30 | elif (params.dataio.dataset_type == "push2d"):
31 | poses_obj_gt = logger.data[tstep]["gt/obj_poses2d"]
32 | poses_obj_graph = logger.data[tstep]["graph/obj_poses2d"]
33 | else:
34 | print("[quant_metrics::compute_traj_error_step] logger poses not found")
35 |
36 | error = traj_error(poses_obj_gt, poses_obj_graph, err_type="rmse")
37 |
38 | return error
39 |
--------------------------------------------------------------------------------
/leopy/src/leopy/optim/cost.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import math
5 | import numpy as np
6 | from scipy.optimize import minimize
7 |
8 | from attrdict import AttrDict
9 |
10 | import torch
11 | from torch.autograd import Variable
12 | import collections
13 | import pandas as pd
14 |
15 | from leopy.utils import tf_utils, dir_utils
16 | from leopy.algo.leo_obs_models import TactileModelNetwork
17 |
18 | import logging
19 | log = logging.getLogger(__name__)
20 |
21 | ## factor costs in pytorch ##
22 |
23 | def whiten_factor_error(factor_error, factor_inf):
24 | # factor_error: n x 1, factor_inf: n x n
25 |
26 | factor_cost = torch.matmul(torch.matmul(
27 | factor_error.permute(1, 0), factor_inf), factor_error)
28 | factor_cost = factor_cost.view(-1)
29 |
30 | return factor_cost
31 |
32 | def unary_factor_error(x, key_syms, key_ids, factor_inf, factor_meas, device=None, params=None):
33 |
34 | key_id = key_ids[0]
35 | key_id = key_id + int(0.5 * x.shape[0]) if (key_syms[0] == 'e') else key_id
36 |
37 | est_pose = (x[key_id, :]).view(1, -1)
38 | meas_pose = factor_meas.view(1, -1)
39 |
40 | err = tf_utils.tf2d_between(est_pose, meas_pose, device=device, requires_grad=True).view(-1, 1) # 3 x 1
41 |
42 | return err
43 |
44 | def binary_odom_factor_error(x, key_syms, key_ids, factor_inf, factor_meas, device=None, params=None):
45 |
46 | p1 = (x[key_ids[0], :]).view(1, -1) # n x 3
47 | p2 = (x[key_ids[1], :]).view(1, -1) # n x 3
48 |
49 | est_val = (tf_utils.tf2d_between(p1, p2, device=device, requires_grad=True)).view(-1)
50 |
51 | # err = (torch.sub(est_val, factor_meas)).view(-1, 1) # 3 x 1
52 | est_val = est_val.view(1, -1)
53 | factor_meas = factor_meas.view(1, -1)
54 | err = (tf_utils.tf2d_between(factor_meas, est_val, device=device, requires_grad=True)).view(-1, 1)
55 |
56 | return err
57 |
58 | def sdf_intersection_factor_error(x, key_syms, key_ids, factor_inf, device=None, params=None):
59 |
60 | err = torch.tensor([[0.]], device=device)
61 |
62 | return err
63 |
64 | def qs_motion_factor_params(obj_shape):
65 |
66 | params = AttrDict()
67 |
68 | if (obj_shape == 'disc'):
69 | params.c_sq = math.pow(0.088 / 3, 2)
70 | elif (obj_shape == 'rect'):
71 | params.c_sq = math.pow(math.sqrt(0.2363**2 + 0.1579**2), 2)
72 | elif (obj_shape == 'ellip'):
73 | params.c_sq = (0.5 * (0.1638 + 0.2428)) ** 2
74 | else:
75 | print("object shape sdf not found")
76 |
77 | return params
78 |
79 | def qs_motion_factor_error(x, key_syms, key_ids, factor_inf, device=None, params=None):
80 | # keys: o_{t-1}, o_{t}, e_{t-1}, e_{t}
81 |
82 | offset_e = int(0.5 * x.shape[0])
83 |
84 | obj_pose0 = (x[key_ids[0], :]).view(1, -1) # 1 x 3
85 | obj_pose1 = (x[key_ids[1], :]).view(1, -1)
86 | ee_pose0 = (x[offset_e + key_ids[2], :]).view(1, -1) # 1 x 3
87 | ee_pose1 = (x[offset_e + key_ids[3], :]).view(1, -1)
88 |
89 | obj_ori1 = obj_pose1.clone(); obj_ori1[0, 0] = 0.; obj_ori1[0, 1] = 0.
90 | obj_pose_rel__world = tf_utils.tf2d_between(obj_pose0, obj_pose1, device=device, requires_grad=True)
91 |
92 | vel_obj__world = obj_pose1.clone() - obj_pose0.clone(); vel_obj__world[0, 2] = 0.
93 | vel_obj__obj = tf_utils.tf2d_between(obj_ori1, vel_obj__world, device=device, requires_grad=True)
94 |
95 | vel_contact__world = ee_pose1.clone() - ee_pose0.clone(); vel_contact__world[0, 2] = 0.
96 | vel_contact__obj = tf_utils.tf2d_between(obj_ori1, vel_contact__world, device=device, requires_grad=True)
97 |
98 | contact_point1 = ee_pose1.clone(); contact_point1[0, 2] = 0.
99 | contact_point__obj = tf_utils.tf2d_between(obj_pose1, contact_point1, device=device, requires_grad=True)
100 |
101 | # # D*V = Vp
102 | vx = vel_obj__obj[0, 0]
103 | vy = vel_obj__obj[0, 1]
104 | omega = obj_pose_rel__world[0, 2]
105 |
106 | vpx = vel_contact__obj[0, 0]
107 | vpy = vel_contact__obj[0, 1]
108 |
109 | px = contact_point__obj[0, 0]
110 | py = contact_point__obj[0, 1]
111 |
112 | D = torch.tensor([[1, 0, -py], [0, 1, px], [-py, px, -params.c_sq]], device=device)
113 | V = torch.tensor([vx, vy, omega], device=device)
114 | Vp = torch.tensor([vpx, vpy, 0.], device=device)
115 |
116 | err = torch.sub(torch.matmul(D, V), Vp).view(-1, 1) # 3 x 1
117 |
118 | return err
119 |
120 | def tactile_rel_meas_factor_error(x, key_syms, key_ids, factor_inf, factor_meas, device=None, params=None):
121 | # keys: o_{t-k}, o_{t}, e_{t-k}, e_{t}
122 |
123 | offset_e = int(0.5 * x.shape[0])
124 |
125 | obj_pose1 = (x[key_ids[0], :]).view(1, -1) # 1 x 3
126 | obj_pose2 = (x[key_ids[1], :]).view(1, -1)
127 | ee_pose1 = (x[offset_e + key_ids[2], :]).view(1, -1) # 1 x 3
128 | ee_pose2 = (x[offset_e + key_ids[3], :]).view(1, -1)
129 |
130 | ee_pose1__obj = tf_utils.tf2d_between(obj_pose1, ee_pose1, device=device, requires_grad=True) # 1 x 3
131 | ee_pose2__obj = tf_utils.tf2d_between(obj_pose2, ee_pose2, device=device, requires_grad=True) # 1 x 3
132 | pose_rel_expect = tf_utils.tf2d_between(ee_pose1__obj, ee_pose2__obj, device=device, requires_grad=True) # 1 x 3
133 |
134 | yaw_only_error = True
135 | if yaw_only_error:
136 | yaw_only_mask = torch.zeros(3, 4, device=device)
137 | yaw_only_mask[-1,-1] = 1.
138 | pose_rel_meas = torch.asin(torch.matmul(yaw_only_mask, factor_meas)).view(1, -1) # 1 x 3
139 | pose_rel_expect = torch.tensor([0., 0., pose_rel_expect[0, 2]], requires_grad=True, device=device).view(1, -1) # 1 x 3
140 |
141 | err = tf_utils.tf2d_between(pose_rel_expect, pose_rel_meas, device=device, requires_grad=True).view(-1, 1) # 3 x 1
142 |
143 | return err
144 |
145 | def tactile_model_output_fixed(tactile_model, img_feats, device=None, params=None):
146 | img_feat_i = torch.tensor(img_feats[0], requires_grad=True, device=device).view(1, -1)
147 | img_feat_j = torch.tensor(img_feats[1], requires_grad=True, device=device).view(1, -1)
148 |
149 | if (params.norm_img_feat == True):
150 | mean_img_feat = torch.tensor(params.mean_img_feat, requires_grad=True, device=device)
151 | std_img_feat = torch.tensor(params.std_img_feat, requires_grad=True, device=device)
152 |
153 | img_feat_i = tf_utils.normalize_vector(img_feat_i, mean_img_feat, std_img_feat)
154 | img_feat_j = tf_utils.normalize_vector(img_feat_j, mean_img_feat, std_img_feat)
155 |
156 | class_label = torch.tensor(params.class_label, device=device)
157 | class_label_vec = torch.nn.functional.one_hot(class_label, params.num_classes)
158 | class_label_vec = class_label_vec.view(1, -1)
159 |
160 | tf_pred = tactile_model.forward(img_feat_i, img_feat_j, class_label_vec)
161 | tf_pred = tf_pred.view(-1) # [tx, ty, cyaw, syaw]
162 |
163 | return tf_pred
164 |
165 | def tactile_oracle_output(data, key_syms, key_ids, device=None, params=None):
166 | # keys: o_{t-k}, o_{t}, e_{t-k}, e_{t}
167 |
168 | obj_pose_gt_i = torch.tensor(data['obj_poses_gt'][key_ids[0]], device=device).view(1, -1) # n x 3
169 | obj_pose_gt_j = torch.tensor(data['obj_poses_gt'][key_ids[1]], device=device).view(1, -1)
170 | ee_pose_gt_i = torch.tensor(data['ee_poses_gt'][key_ids[2]], device=device).view(1, -1)
171 | ee_pose_gt_j = torch.tensor(data['ee_poses_gt'][key_ids[3]], device=device).view(1, -1)
172 |
173 | ee_pose_gt_i__obj = tf_utils.tf2d_between(obj_pose_gt_i, ee_pose_gt_i, device=device) # n x 3
174 | ee_pose_gt_j__obj = tf_utils.tf2d_between(obj_pose_gt_j, ee_pose_gt_j, device=device)
175 | pose_rel_gt = tf_utils.tf2d_between(ee_pose_gt_i__obj, ee_pose_gt_j__obj, device=device) # n x 3
176 |
177 | yaw_only_error = True
178 | if yaw_only_error:
179 | tf_pred = torch.tensor([0., 0., torch.cos(pose_rel_gt[0, 2]), torch.sin(pose_rel_gt[0, 2])], device=device).view(-1)
180 |
181 | return tf_pred
182 |
183 | def init_tactile_model(filename=None, device=None):
184 |
185 | tactile_model = TactileModelNetwork(input_size=2*2*4, output_size=4)
186 | tactile_model = tactile_model.to(device)
187 |
188 | if filename is not None:
189 | model_saved = torch.jit.load(filename)
190 | state_dict_saved = model_saved.state_dict()
191 |
192 | # fix: saved dict key mismatch
193 | state_dict_new = collections.OrderedDict()
194 | state_dict_new['fc1.weight'] = state_dict_saved['model.fc1.weight']
195 | state_dict_new['fc1.bias'] = state_dict_saved['model.fc1.bias']
196 |
197 | tactile_model.load_state_dict(state_dict_new)
198 |
199 | return tactile_model
200 |
201 | def update_data_dict(data_dict, factor_name, factor_cost, factor_inf):
202 | factor_key = f"err/factor/{factor_name}"
203 |
204 | if factor_key not in data_dict:
205 | data_dict[factor_key] = [factor_cost.item()]
206 | else:
207 | (data_dict[factor_key]).append(factor_cost.item())
208 |
209 | return data_dict
210 |
211 | def dict_to_row(data_dict, step):
212 |
213 | for key, val in data_dict.items():
214 | data_dict[key] = sum(val) / len(val)
215 |
216 | # data_dict['tstep'] = step
217 | data_row = pd.DataFrame(data_dict, index=[step])
218 | data_row.index.name = 'tstep'
219 |
220 | return data_row
221 | class Cost():
222 | def __init__(self, data, theta, params=None, device=None):
223 | self.data = data
224 | self.theta = theta
225 | self.params = params
226 | self.device = device
227 |
228 | self.dataframe = None
229 |
230 | def get_dataframe(self):
231 | return self.dataframe
232 |
233 | def costfn(self, x, log=False):
234 |
235 | dataset_type = self.params.dataio.dataset_type
236 |
237 | if (dataset_type == "push2d"):
238 | nsteps = np.minimum(self.params.optim.nsteps, len(self.data['obj_poses_gt']))
239 | tactile_model_filename = "{0}/local/models/{1}.pt".format(self.params.BASE_PATH, self.params.tactile_model.name)
240 | tactile_model = init_tactile_model(filename=tactile_model_filename, device=self.device)
241 | else:
242 | nsteps = np.minimum(self.params.optim.nsteps, len(self.data['poses_gt']))
243 |
244 | if log: self.dataframe = pd.DataFrame()
245 |
246 | fval = torch.tensor([0.], requires_grad=True, device=self.device)
247 | num_factors = torch.tensor([0.], requires_grad=True, device=self.device)
248 |
249 | for tstep in range(1, nsteps):
250 |
251 | # filter out curr step factors
252 | factor_idxs = [idxs for idxs, keys in enumerate(
253 | self.data['factor_keyids']) if (max(keys) == tstep)]
254 |
255 | factor_keysyms = [self.data['factor_keysyms'][idx] for idx in factor_idxs]
256 | factor_keyids = [self.data['factor_keyids'][idx] for idx in factor_idxs]
257 | factor_meas = [self.data['factor_meas'][idx] for idx in factor_idxs]
258 | factor_names = [self.data['factor_names'][idx] for idx in factor_idxs]
259 |
260 | # compute factor costs
261 | data_dict = {}
262 | for idx in range(0, len(factor_idxs)):
263 |
264 | key_syms, key_ids = factor_keysyms[idx], factor_keyids[idx]
265 | factor_name = factor_names[idx]
266 |
267 | if (factor_name == 'gps'):
268 | factor_meas_val = torch.tensor(factor_meas[idx], device=self.device)
269 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=factor_meas_val))
270 | factor_error = unary_factor_error(x, key_syms, key_ids, factor_inf, factor_meas_val[0:3], device=self.device) if (factor_inf.requires_grad) else None
271 | elif (factor_name == 'odom'):
272 | factor_meas_val = torch.tensor(factor_meas[idx], device=self.device)
273 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=factor_meas_val))
274 | factor_error = binary_odom_factor_error(x, key_syms, key_ids, factor_inf, factor_meas_val[0:3], device=self.device) if (factor_inf.requires_grad) else None
275 | elif (factor_name == 'ee_pose_prior'):
276 | factor_meas_val = torch.tensor(factor_meas[idx], device=self.device)
277 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=factor_meas_val))
278 | factor_error = unary_factor_error(x, key_syms, key_ids, factor_inf, factor_meas_val, device=self.device) if (factor_inf.requires_grad) else None
279 | elif (factor_name == 'qs_push_motion'):
280 | params_qs = qs_motion_factor_params(self.params.dataio.obj_shape)
281 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=None))
282 | factor_error = qs_motion_factor_error(x, key_syms, key_ids, factor_inf, device=self.device, params=params_qs) if (factor_inf.requires_grad) else None
283 | elif (factor_name == 'sdf_motion'):
284 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=None))
285 | factor_error = sdf_intersection_factor_error(x, key_syms, key_ids, factor_inf) if (factor_inf.requires_grad) else None
286 | elif (factor_name == 'tactile_rel'):
287 | if (self.params.tactile_model.oracle == True):
288 | tf_pred = tactile_oracle_output(self.data, key_syms, key_ids, device=self.device)
289 | else:
290 | if (self.params.dataio.model_type == "fixed_cov_varying_mean"):
291 | tf_pred = self.theta.tactile_model_output(img_feats=factor_meas[idx], params=self.params.tactile_model) # learnable model weights
292 | else:
293 | tf_pred = tactile_model_output_fixed(tactile_model, img_feats=factor_meas[idx], device=self.device, params=self.params.tactile_model) # fixed model weights
294 |
295 | factor_inf = torch.diag(self.theta.get_sigma_inv(factor_name, z=None))
296 | factor_error = tactile_rel_meas_factor_error(x, key_syms, key_ids, factor_inf, tf_pred, device=self.device) # if (factor_inf.requires_grad) else factor_error
297 |
298 | factor_cost = whiten_factor_error(factor_error, factor_inf) if (factor_error is not None) else torch.tensor([0.], requires_grad=True, device=self.device)
299 | fval = fval + 0.5 * factor_cost
300 | num_factors = num_factors + 1
301 |
302 | factor_cost_unwhtn = torch.matmul(factor_error.permute(1, 0), factor_error) if (factor_error is not None) else torch.tensor([0.], requires_grad=True, device=self.device)
303 | if log: data_dict = update_data_dict(data_dict, factor_name=factor_name, factor_cost=factor_cost_unwhtn, factor_inf=factor_inf)
304 |
305 | if log: data_row = dict_to_row(data_dict=data_dict, step=tstep)
306 | if log: self.dataframe = self.dataframe.append(data_row)
307 |
308 | fval = fval / num_factors
309 |
310 | return fval
311 |
312 | def grad_theta_costfn(self, x):
313 | for name, param in self.theta.named_parameters():
314 | if param.data.grad is not None:
315 | param.data.grad = None
316 |
317 | fval = self.costfn(x)
318 | fval.backward()
319 |
320 | grad_theta = AttrDict()
321 | for name, param in self.theta.named_parameters():
322 | grad_theta[name] = param.data.grad
323 |
324 | return grad_theta
325 |
--------------------------------------------------------------------------------
/leopy/src/leopy/optim/gtsamopt.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import numpy as np
5 | import time
6 | import pandas as pd
7 |
8 | from leopy.optim.nav2d_factors import *
9 | # from leopy.optim.push2d_factors import *
10 |
11 | from leopy.utils import tf_utils, dir_utils, vis_utils
12 | from leopy.utils.logger import Logger
13 |
14 | import logging
15 | log = logging.getLogger(__name__)
16 | class GraphOpt():
17 | def __init__(self):
18 |
19 | self.graph = gtsam.NonlinearFactorGraph()
20 | self.init_vals = gtsam.Values()
21 |
22 | self.optimizer = self.init_isam2()
23 | self.est_vals = gtsam.Values()
24 |
25 | self.logger = Logger()
26 | self.dataframe = pd.DataFrame()
27 | # self.graph_full = gtsam.NonlinearFactorGraph()
28 |
29 | def init_isam2(self):
30 | params_isam2 = gtsam.ISAM2Params()
31 | params_isam2.setRelinearizeThreshold(0.01)
32 | params_isam2.setRelinearizeSkip(10)
33 |
34 | return gtsam.ISAM2(params_isam2)
35 |
36 | def init_vars_step(graphopt, tstep, dataset_type):
37 |
38 | if (dataset_type == "push2d"):
39 | key_tm1 = gtsam.symbol(ord('o'), tstep-1)
40 | key_t = gtsam.symbol(ord('o'), tstep)
41 | graphopt.init_vals.insert(key_t, graphopt.est_vals.atPose2(key_tm1))
42 |
43 | key_tm1 = gtsam.symbol(ord('e'), tstep-1)
44 | key_t = gtsam.symbol(ord('e'), tstep)
45 | graphopt.init_vals.insert(key_t, graphopt.est_vals.atPose2(key_tm1))
46 | else:
47 | key_tm1 = gtsam.symbol(ord('x'), tstep-1)
48 | key_t = gtsam.symbol(ord('x'), tstep)
49 | graphopt.init_vals.insert(key_t, graphopt.est_vals.atPose2(key_tm1))
50 |
51 | return graphopt
52 |
53 |
54 | def reset_graph(graphopt):
55 | # graphopt.graph_full.push_back(graphopt.graph)
56 |
57 | graphopt.graph.resize(0)
58 | graphopt.init_vals.clear()
59 |
60 | return graphopt
61 |
62 |
63 | def optimizer_update(graphopt):
64 | graphopt.optimizer.update(graphopt.graph, graphopt.init_vals)
65 | graphopt.est_vals = graphopt.optimizer.calculateEstimate()
66 |
67 | return graphopt
68 |
69 | def print_step(tstep, data, isam2_estimate, dataset_type):
70 |
71 | if (dataset_type == "push2d"):
72 | key = gtsam.symbol(ord('o'), tstep)
73 | print('Estimated, Grountruth pose: \n {0} \n {1} '.format(
74 | tf_utils.pose2_to_vec3(isam2_estimate.atPose2(key)), data['obj_poses_gt'][tstep]))
75 | else:
76 | key = gtsam.symbol(ord('x'), tstep)
77 | print('Estimated, Grountruth pose: \n {0} \n {1} '.format(
78 | tf_utils.pose2_to_vec3(isam2_estimate.atPose2(key)), data['poses_gt'][tstep]))
79 |
80 | def log_step(tstep, logger, data, optimizer, dataset_type):
81 |
82 | if (dataset_type == 'nav2d'):
83 | logger = log_step_nav2d(tstep, logger, data, optimizer)
84 | elif (dataset_type == 'push2d'):
85 | logger = log_step_push2d(tstep, logger, data, optimizer)
86 |
87 | return logger
88 |
89 | def log_step_df(tstep, dataframe, data, optimizer, dataset_type):
90 |
91 | if (dataset_type == 'nav2d'):
92 | logger = log_step_df_nav2d(tstep, dataframe, data, optimizer)
93 | elif (dataset_type == 'push2d'):
94 | logger = log_step_df_push2d(tstep, dataframe, data, optimizer)
95 |
96 | return logger
97 |
98 | def add_first_pose_priors(graphopt, data, dataset_type):
99 |
100 | prior_cov = np.array([1e-6, 1e-6, 1e-6])
101 |
102 | if(dataset_type == 'nav2d'):
103 | keys = [gtsam.symbol(ord('x'), 0)]
104 | graphopt.init_vals.insert(
105 | keys[0], tf_utils.vec3_to_pose2(data['poses_gt'][0]))
106 | graphopt.graph = add_unary_factor(
107 | graphopt.graph, keys, prior_cov, data['poses_gt'][0])
108 | elif (dataset_type == 'push2d'):
109 | keys = [gtsam.symbol(ord('o'), 0)]
110 | graphopt.init_vals.insert(
111 | keys[0], tf_utils.vec3_to_pose2(data['obj_poses_gt'][0]))
112 | graphopt.graph = add_unary_factor(
113 | graphopt.graph, keys, prior_cov, data['obj_poses_gt'][0])
114 | keys = [gtsam.symbol(ord('e'), 0)]
115 | graphopt.init_vals.insert(
116 | keys[0], tf_utils.vec3_to_pose2(data['ee_poses_gt'][0]))
117 | graphopt.graph = add_unary_factor(
118 | graphopt.graph, keys, prior_cov, data['ee_poses_gt'][0])
119 |
120 | graphopt = optimizer_update(graphopt)
121 | graphopt = reset_graph(graphopt)
122 |
123 | return graphopt
124 |
125 |
126 | def get_optimizer_soln(tstep, graphopt, dataset_type, sampler=False):
127 |
128 | poses_graph = graphopt.optimizer.calculateEstimate()
129 | pose_vec_graph = np.zeros((poses_graph.size(), 3))
130 |
131 | num_poses = tstep + 1
132 | if (dataset_type == 'nav2d'):
133 | keys = [[gtsam.symbol(ord('x'), i)] for i in range(0, num_poses)]
134 | elif (dataset_type == 'push2d'):
135 | keys = [[gtsam.symbol(ord('o'), i), gtsam.symbol(ord('e'), i)] for i in range(0, num_poses)]
136 |
137 | key_vec = gtsam.gtsam.KeyVector()
138 | for key_idx in range(0, len(keys[0])):
139 | for pose_idx in range(0, num_poses):
140 | key = keys[pose_idx][key_idx]
141 | pose2d = poses_graph.atPose2(key)
142 | pose_vec_graph[key_idx * num_poses + pose_idx, :] = [pose2d.x(), pose2d.y(), pose2d.theta()]
143 |
144 | key_vec.push_back(key)
145 |
146 | mean = pose_vec_graph
147 | cov = None
148 |
149 | if sampler:
150 | marginals = gtsam.Marginals(graphopt.optimizer.getFactorsUnsafe(), poses_graph)
151 | cov = marginals.jointMarginalCovariance(key_vec).fullMatrix()
152 |
153 | return (mean, cov)
154 |
155 | def run_optimizer(cost, params=None):
156 | ''' construct graph from data and optimize '''
157 |
158 | dataset_type = params.dataio.dataset_type
159 |
160 | graphopt = GraphOpt()
161 | graphopt = add_first_pose_priors(graphopt, cost.data, dataset_type)
162 |
163 | if (dataset_type == "push2d"):
164 | nsteps = np.minimum(params.optim.nsteps, len(cost.data['obj_poses_gt']))
165 |
166 | planar_sdf = PlanarSDF("{0}/{1}/sdf/{2}.json".format(
167 | params.BASE_PATH, params.dataio.srcdir_dataset, params.dataio.obj_shape))
168 | tactile_model = torch.jit.load("{0}/local/models/{1}.pt".format(
169 | params.BASE_PATH, params.tactile_model.name))
170 | else:
171 | nsteps = np.minimum(params.optim.nsteps, len(cost.data['poses_gt']))
172 |
173 | for tstep in range(1, nsteps):
174 |
175 | # filter out curr step factors
176 | factor_idxs = [idxs for idxs, keys in enumerate(
177 | cost.data['factor_keyids']) if (max(keys) == tstep)]
178 |
179 | factor_keysyms = [cost.data['factor_keysyms'][idx] for idx in factor_idxs]
180 | factor_keyids = [cost.data['factor_keyids'][idx] for idx in factor_idxs]
181 | factor_meas = [cost.data['factor_meas'][idx] for idx in factor_idxs]
182 | factor_names = [cost.data['factor_names'][idx] for idx in factor_idxs]
183 |
184 | # compute factor costs
185 | for idx in range(0, len(factor_idxs)):
186 |
187 | key_syms, key_ids = factor_keysyms[idx], factor_keyids[idx]
188 |
189 | keys = [gtsam.symbol(ord(key_syms[i]), key_id)
190 | for i, key_id in enumerate(key_ids)]
191 | factor_name = factor_names[idx]
192 |
193 | sigma_inv_val = cost.theta.get_sigma_inv(factor_name, factor_meas[idx])
194 | sigma_inv_val = sigma_inv_val.detach().cpu().numpy()
195 | # factor_cov = np.reciprocal(sigma_inv_val) # factor_cov: sigma format
196 | factor_cov = np.reciprocal(np.sqrt(sigma_inv_val)) # factor_cov: sigma_sq format
197 |
198 | if (factor_name == 'gps'):
199 | graphopt.graph = add_unary_factor(
200 | graphopt.graph, keys, factor_cov, factor_meas[idx][0:3])
201 | elif (factor_name == 'odom'):
202 | graphopt.graph = add_binary_odom_factor(
203 | graphopt.graph, keys, factor_cov, factor_meas[idx][0:3])
204 | elif (factor_name == 'ee_pose_prior'):
205 | graphopt.graph = add_unary_factor(
206 | graphopt.graph, keys, factor_cov, factor_meas[idx])
207 | elif (factor_name == 'qs_push_motion'):
208 | graphopt.graph = add_qs_motion_factor(
209 | graphopt.graph, keys, factor_cov, params=params)
210 | elif (factor_name == 'sdf_intersection'):
211 | graphopt.graph = add_sdf_intersection_factor(
212 | graphopt.graph, keys, factor_cov, planar_sdf.sdf)
213 | elif (factor_name == 'tactile_rel'):
214 | if (params.tactile_model.oracle == True):
215 | tf_pred_net = tactile_oracle_output(cost.data, key_ids)
216 | else:
217 | if (params.dataio.model_type == "fixed_cov_varying_mean"):
218 | tf_pred_net = tactile_model_output(
219 | cost, img_feats=factor_meas[idx], params=params.tactile_model) # learnable model weights
220 | tf_pred_net[3] = np.clip(tf_pred_net[3], -1., 1.)
221 | else:
222 | tf_pred_net = tactile_model_output_fixed(
223 | tactile_model, img_feats=factor_meas[idx], params=params.tactile_model) # fixed model weights
224 | graphopt.graph = add_tactile_rel_meas_factor(
225 | graphopt.graph, keys, factor_cov, tf_pred_net, params=params.tactile_model)
226 | elif (factor_name == 'binary_interseq_obj'):
227 | graphopt.graph = add_binary_odom_factor(
228 | graphopt.graph, keys, factor_cov, factor_meas[idx])
229 |
230 | # init variables
231 | graphopt = init_vars_step(graphopt, tstep, dataset_type)
232 |
233 | # optimize
234 | graphopt = optimizer_update(graphopt)
235 |
236 | # print step
237 | # print_step(tstep, cost.data, graphopt.est_vals, dataset_type)
238 |
239 | # log step
240 | # todo: phase out log_step and only use log_step_df. Currently log_step needed for plotting.
241 | graphopt.logger = log_step(
242 | tstep, graphopt.logger, cost.data, graphopt.optimizer, dataset_type)
243 |
244 | if params.logger.enable:
245 | graphopt.dataframe = log_step_df(
246 | tstep, graphopt.dataframe, cost.data, graphopt.optimizer, dataset_type)
247 |
248 | # vis step
249 | if (params.optim.vis_step):
250 | vis_utils.vis_step(tstep, graphopt.logger, params=params)
251 |
252 | # reset graph
253 | graphopt = reset_graph(graphopt)
254 |
255 | mean, cov = get_optimizer_soln(tstep, graphopt, dataset_type, params.leo.sampler)
256 |
257 | # log final solution
258 | if params.logger.enable:
259 | cov_log = cov.tolist() if cov is not None else cov
260 | data_row = pd.DataFrame({'opt/mean': [mean.tolist()], 'opt/covariance': [cov_log]}, index=[tstep], dtype=object)
261 | data_row.index.name = 'tstep'
262 | graphopt.dataframe = pd.concat([graphopt.dataframe, data_row], axis=1)
263 |
264 | if (params.optim.save_fig) | (params.optim.show_fig):
265 | vis_utils.vis_step(tstep, graphopt.logger, params=params)
266 |
267 | # if params.optim.verbose:
268 | # graph_full = graphopt.optimizer.getFactorsUnsafe()
269 | # graph_cost = graph_full.error(graphopt.optimizer.calculateEstimate())
270 | # print("gtsam cost: {0}".format(graph_cost))
271 | # # print("gtsam per factor cost for {0} factors over {1} time steps".format(
272 | # # graph_full.size(), nsteps))
273 | # # for i in range(graph_full.size()):
274 | # # print(graph_full.at(i).error(graphopt.optimizer.calculateEstimate()))
275 |
276 | return (mean, cov, graphopt.dataframe)
277 |
--------------------------------------------------------------------------------
/leopy/src/leopy/optim/nav2d_factors.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import math
5 | import numpy as np
6 | import pandas as pd
7 |
8 | import torch
9 | from torch.autograd import Variable
10 |
11 | import gtsam
12 |
13 | from leopy.utils import tf_utils
14 |
15 | def get_noise_model(factor_cov):
16 |
17 | factor_noise_model = None
18 |
19 | if (factor_cov.shape[0] <= 3): # cov sigmas
20 | factor_noise_model = gtsam.noiseModel_Diagonal.Sigmas(factor_cov)
21 | elif (factor_cov.shape[0] == 9): # cov matrix
22 | factor_cov = np.reshape(factor_cov, (3, 3))
23 | factor_noise_model = gtsam.noiseModel_Gaussian.Covariance(factor_cov)
24 |
25 | return factor_noise_model
26 |
27 |
28 | def add_unary_factor(graph, keys, factor_cov, factor_meas):
29 |
30 | factor_noise_model = get_noise_model(factor_cov)
31 | factor_meas_pose = tf_utils.vec3_to_pose2(factor_meas)
32 | factor = gtsam.PriorFactorPose2(
33 | keys[0], factor_meas_pose, factor_noise_model)
34 |
35 | graph.push_back(factor)
36 |
37 | return graph
38 |
39 |
40 | def add_binary_odom_factor(graph, keys, factor_cov, factor_meas):
41 |
42 | factor_noise_model = get_noise_model(factor_cov)
43 | factor_meas_pose = tf_utils.vec3_to_pose2(factor_meas)
44 | factor = gtsam.BetweenFactorPose2(
45 | keys[0], keys[1], factor_meas_pose, factor_noise_model)
46 |
47 | graph.push_back(factor)
48 |
49 | return graph
50 |
51 | def log_step_nav2d(tstep, logger, data, optimizer):
52 |
53 | num_poses = tstep + 1
54 |
55 | pose_vec_graph = np.zeros((num_poses, 3))
56 | pose_vec_gt = np.asarray(data['poses_gt'][0:num_poses])
57 | poses_graph = optimizer.calculateEstimate()
58 |
59 | for i in range(0, num_poses):
60 | key = gtsam.symbol(ord('x'), i)
61 | pose2d = poses_graph.atPose2(key)
62 | pose_vec_graph[i, :] = [pose2d.x(), pose2d.y(), pose2d.theta()]
63 |
64 | logger.log_step("graph/poses2d", pose_vec_graph, tstep)
65 | logger.log_step("gt/poses2d", pose_vec_gt, tstep)
66 |
67 | return logger
68 |
69 | def log_step_df_nav2d(tstep, dataframe, data, optimizer):
70 |
71 | num_poses = tstep + 1
72 |
73 | pose_vec_graph = np.zeros((num_poses, 3))
74 | pose_vec_gt = np.asarray(data['poses_gt'][0:num_poses])
75 | poses_graph = optimizer.calculateEstimate()
76 |
77 | for i in range(0, num_poses):
78 | key = gtsam.symbol(ord('x'), i)
79 | pose2d = poses_graph.atPose2(key)
80 | pose_vec_graph[i, :] = [pose2d.x(), pose2d.y(), pose2d.theta()]
81 |
82 | # add to dataframe
83 | data_dict = {'opt/graph/poses2d': [pose_vec_graph.tolist()], 'opt/gt/poses2d': [pose_vec_gt.tolist()]}
84 | data_row = pd.DataFrame(data_dict, index=[tstep], dtype=object)
85 | data_row.index.name = 'tstep'
86 |
87 | dataframe = dataframe.append(data_row)
88 |
89 | return dataframe
90 |
--------------------------------------------------------------------------------
/leopy/src/leopy/optim/push2d_factors.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import math
5 | import numpy as np
6 | import json
7 | import pandas as pd
8 |
9 | import torch
10 | from torch.autograd import Variable
11 |
12 | import gtsam
13 | import leocpp
14 |
15 | from leopy.utils import tf_utils
16 |
17 | class PlanarSDF():
18 | def __init__(self, sdf_file=None):
19 | self.sdf = None
20 |
21 | if sdf_file is not None:
22 | self.load_sdf_from_file_json(sdf_file)
23 |
24 | def load_sdf_from_file_json(self, sdf_file):
25 |
26 | with open(sdf_file) as f:
27 | sdf_data = json.load(f)
28 |
29 | cell_size = sdf_data['grid_res']
30 | sdf_cols = sdf_data['grid_size_x']
31 | sdf_rows = sdf_data['grid_size_y']
32 | sdf_data_vec = sdf_data['grid_data']
33 | sdf_origin_x = sdf_data['grid_origin_x']
34 | sdf_origin_y = sdf_data['grid_origin_y']
35 |
36 | origin = gtsam.Point2(sdf_origin_x, sdf_origin_y)
37 |
38 | sdf_data_mat = np.zeros((sdf_rows, sdf_cols))
39 | for i in range(sdf_data_mat.shape[0]):
40 | for j in range(sdf_data_mat.shape[1]):
41 | sdf_data_mat[i, j] = sdf_data_vec[i][j]
42 |
43 | self.sdf = leocpp.PlanarSDF(origin, cell_size, sdf_data_mat)
44 |
45 | def get_noise_model(factor_cov):
46 |
47 | factor_noise_model = None
48 |
49 | if (factor_cov.shape[0] <= 3): # cov sigmas
50 | factor_noise_model = gtsam.noiseModel_Diagonal.Sigmas(factor_cov)
51 | elif (factor_cov.shape[0] == 9): # cov matrix
52 | factor_cov = np.reshape(factor_cov, (3, 3))
53 | factor_noise_model = gtsam.noiseModel_Gaussian.Covariance(factor_cov)
54 |
55 | return factor_noise_model
56 |
57 | def add_binary_odom_factor(graph, keys, factor_cov, factor_meas):
58 |
59 | factor_noise_model = get_noise_model(factor_cov)
60 | factor_meas_pose = tf_utils.vec3_to_pose2(factor_meas)
61 | factor = gtsam.BetweenFactorPose2(
62 | keys[0], keys[1], factor_meas_pose, factor_noise_model)
63 |
64 | graph.push_back(factor)
65 |
66 | return graph
67 |
68 | def add_qs_motion_factor(graph, keys, factor_cov, params=None):
69 | # keys: o_{t-1}, o_{t}, e_{t-1}, e_{t}
70 |
71 | factor_noise_model = get_noise_model(factor_cov)
72 |
73 | if (params.dataio.obj_shape == 'disc'):
74 | c_sq = math.pow(0.088 / 3, 2)
75 | elif (params.dataio.obj_shape == 'rect'):
76 | c_sq = math.pow(math.sqrt(0.2363**2 + 0.1579**2), 2)
77 | elif (params.dataio.obj_shape == 'ellip'):
78 | c_sq = (0.5 * (0.1638 + 0.2428)) ** 2
79 | else:
80 | print("object shape sdf not found")
81 |
82 | factor = leocpp.QSVelPushMotionRealObjEEFactor(
83 | keys[0], keys[1], keys[2], keys[3], c_sq, factor_noise_model)
84 |
85 | graph.push_back(factor)
86 |
87 | return graph
88 |
89 |
90 | def add_sdf_intersection_factor(graph, keys, factor_cov, object_sdf):
91 | # keys: o_{t}, e_{t}
92 |
93 | factor_noise_model = get_noise_model(factor_cov)
94 | factor = leocpp.IntersectionPlanarSDFObjEEFactor(
95 | keys[0], keys[1], object_sdf, 0.0, factor_noise_model)
96 |
97 | graph.push_back(factor)
98 |
99 | return graph
100 |
101 |
102 | def add_tactile_rel_meas_factor(graph, keys, factor_cov, tf_pred, params=None):
103 | # keys: o_{t-k}, o_{t}, e_{t-k}, e_{t}
104 |
105 | factor_noise_model = get_noise_model(factor_cov)
106 | factor = leocpp.TactileRelativeTfPredictionFactor(
107 | keys[0], keys[1], keys[2], keys[3], tf_pred, factor_noise_model)
108 |
109 | factor.setFlags(yawOnlyError=params.yaw_only_error,
110 | constantModel=params.constant_model)
111 | factor.setLabel(classLabel=params.class_label,
112 | numClasses=params.num_classes)
113 |
114 | graph.push_back(factor)
115 |
116 | return graph
117 |
118 | ## fixed tactile model weights
119 | def tactile_model_output_fixed(tactile_model, img_feats, params=None):
120 | img_feat_i = torch.tensor(img_feats[0]).view(1, -1)
121 | img_feat_j = torch.tensor(img_feats[1]).view(1, -1)
122 |
123 | if (params.norm_img_feat == True):
124 | img_feat_i = tf_utils.normalize_vector(
125 | img_feat_i, torch.tensor(params.mean_img_feat), torch.tensor(params.std_img_feat))
126 | img_feat_j = tf_utils.normalize_vector(
127 | img_feat_j, torch.tensor(params.mean_img_feat), torch.tensor(params.std_img_feat))
128 |
129 | class_label_vec = torch.nn.functional.one_hot(
130 | torch.tensor(params.class_label), params.num_classes)
131 | class_label_vec = class_label_vec.view(1, -1)
132 |
133 | tf_pred = tactile_model.forward(img_feat_i, img_feat_j, class_label_vec)
134 | tf_pred = (tf_pred.view(-1)).detach().cpu().numpy()
135 |
136 | return tf_pred
137 |
138 | ## learnable tactile model weights
139 | def tactile_model_output(cost, img_feats, params=None):
140 |
141 | tf_pred = cost.theta.tactile_model_output(img_feats, params)
142 | tf_pred = (tf_pred.view(-1)).detach().cpu().numpy()
143 |
144 | return tf_pred
145 |
146 | def tactile_oracle_output(data, key_ids):
147 | # keys: o_{t-k}, o_{t}, e_{t-k}, e_{t}
148 |
149 | obj_pose1 = torch.tensor(data['obj_poses_gt'][key_ids[0]]).view(1, -1)
150 | obj_pose2 = torch.tensor(data['obj_poses_gt'][key_ids[1]]).view(1, -1)
151 | ee_pose1 = torch.tensor(data['ee_poses_gt'][key_ids[2]]).view(1, -1)
152 | ee_pose2 = torch.tensor(data['ee_poses_gt'][key_ids[3]]).view(1, -1)
153 |
154 | ee_pose1__obj = tf_utils.tf2d_between(obj_pose1, ee_pose1) # n x 3
155 | ee_pose2__obj = tf_utils.tf2d_between(obj_pose2, ee_pose2)
156 |
157 | pose_rel_gt = tf_utils.tf2d_between(ee_pose1__obj, ee_pose2__obj) # n x 3
158 |
159 | yaw_only_error = True
160 | if yaw_only_error:
161 | tf_pred = np.array([0., 0., np.cos(pose_rel_gt[0, 2].data), np.sin(pose_rel_gt[0, 2])])
162 |
163 | return tf_pred
164 |
165 | def debug_tactile_factor(data, key_ids, tf_pred_net):
166 | # keys: o_{t-k}, o_{t}, e_{t-k}, e_{t}
167 |
168 | tf_pred_net = torch.tensor(tf_pred_net)
169 |
170 | obj_pose1 = torch.tensor(data['obj_poses_gt'][key_ids[0]]).view(1, -1)
171 | obj_pose2 = torch.tensor(data['obj_poses_gt'][key_ids[1]]).view(1, -1)
172 | ee_pose1 = torch.tensor(data['ee_poses_gt'][key_ids[2]]).view(1, -1)
173 | ee_pose2 = torch.tensor(data['ee_poses_gt'][key_ids[3]]).view(1, -1)
174 | ee_pose1__obj = tf_utils.tf2d_between(obj_pose1, ee_pose1) # n x 3
175 | ee_pose2__obj = tf_utils.tf2d_between(obj_pose2, ee_pose2)
176 | pose_rel_gt = tf_utils.tf2d_between(ee_pose1__obj, ee_pose2__obj) # n x 3
177 |
178 | pose_rel_gt = torch.tensor([0., 0., pose_rel_gt[0, 2]]).view(1, -1)
179 | pose_rel_meas = torch.tensor([0., 0., torch.asin(tf_pred_net[3])]).view(1, -1) # n x 3
180 | diff_vec = tf_utils.tf2d_between(pose_rel_gt, pose_rel_meas) # n x 3
181 |
182 | print("pose_rel_gt: {0}, pose_rel_meas: {1}, pose_diff: {2}".format(
183 | pose_rel_gt, pose_rel_meas, diff_vec))
184 |
185 | def log_step_push2d(tstep, logger, data, optimizer):
186 |
187 | num_poses = tstep + 1
188 |
189 | # log estimated poses
190 | poses_graph = optimizer.calculateEstimate()
191 | obj_pose_vec_graph = np.zeros((num_poses, 3))
192 | ee_pose_vec_graph = np.zeros((num_poses, 3))
193 |
194 | for i in range(0, num_poses):
195 | obj_key = gtsam.symbol(ord('o'), i)
196 | ee_key = gtsam.symbol(ord('e'), i)
197 |
198 | obj_pose2d = poses_graph.atPose2(obj_key)
199 | ee_pose2d = poses_graph.atPose2(ee_key)
200 |
201 | obj_pose_vec_graph[i, :] = [obj_pose2d.x(), obj_pose2d.y(), obj_pose2d.theta()]
202 | ee_pose_vec_graph[i, :] = [ee_pose2d.x(), ee_pose2d.y(), ee_pose2d.theta()]
203 |
204 | logger.log_step("graph/obj_poses2d", obj_pose_vec_graph, tstep)
205 | logger.log_step("graph/ee_poses2d", ee_pose_vec_graph, tstep)
206 |
207 | # log gt poses
208 | obj_pose_vec_gt = data['obj_poses_gt'][0:num_poses]
209 | ee_pose_vec_gt = data['ee_poses_gt'][0:num_poses]
210 | logger.log_step('gt/obj_poses2d', obj_pose_vec_gt, tstep)
211 | logger.log_step('gt/ee_poses2d', ee_pose_vec_gt, tstep)
212 |
213 | return logger
214 |
215 | def log_step_df_push2d(tstep, dataframe, data, optimizer):
216 |
217 | num_poses = tstep + 1
218 |
219 | # estimated poses
220 | poses_graph = optimizer.calculateEstimate()
221 | obj_pose_vec_graph = np.zeros((num_poses, 3))
222 | ee_pose_vec_graph = np.zeros((num_poses, 3))
223 |
224 | for i in range(0, num_poses):
225 | obj_key = gtsam.symbol(ord('o'), i)
226 | ee_key = gtsam.symbol(ord('e'), i)
227 |
228 | obj_pose2d = poses_graph.atPose2(obj_key)
229 | ee_pose2d = poses_graph.atPose2(ee_key)
230 |
231 | obj_pose_vec_graph[i, :] = [obj_pose2d.x(), obj_pose2d.y(), obj_pose2d.theta()]
232 | ee_pose_vec_graph[i, :] = [ee_pose2d.x(), ee_pose2d.y(), ee_pose2d.theta()]
233 |
234 | # gt poses
235 | obj_pose_vec_gt = data['obj_poses_gt'][0:num_poses]
236 | ee_pose_vec_gt = data['ee_poses_gt'][0:num_poses]
237 |
238 | # add to dataframe
239 | data_dict = {'opt/graph/obj_poses2d': [obj_pose_vec_graph.tolist()],
240 | 'opt/graph/ee_poses2d': [ee_pose_vec_graph.tolist()],
241 | 'opt/gt/obj_poses2d': [obj_pose_vec_gt],
242 | 'opt/gt/ee_poses2d': [ee_pose_vec_gt]}
243 |
244 | data_row = pd.DataFrame(data_dict, index=[tstep], dtype=object)
245 | data_row.index.name = 'tstep'
246 | dataframe = dataframe.append(data_row)
247 |
248 | return dataframe
249 |
--------------------------------------------------------------------------------
/leopy/src/leopy/optim/sampler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from leopy.utils import tf_utils
3 |
4 | def compute_x_samples(mean, dx_samples):
5 |
6 | num_samples, num_x, dim_x = dx_samples.shape
7 | x_samples = np.zeros(dx_samples.shape)
8 |
9 | for sidx in range(0, num_samples):
10 | for xidx in range(0, num_x):
11 | x_mean = tf_utils.vec3_to_pose2(mean[xidx, :])
12 | x_sample_pose = x_mean.retract(dx_samples[sidx, xidx, :])
13 | x_samples[sidx, xidx, :] = tf_utils.pose2_to_vec3(x_sample_pose)
14 |
15 | return x_samples
16 |
17 | def sampler_gaussian(mean, cov, n_samples=1, temp=1.):
18 |
19 | if cov is None:
20 | return mean
21 |
22 | num_x, dim_x = mean.shape
23 | mean_vec = np.reshape(mean, -1)
24 |
25 | cov = cov / temp
26 |
27 | dx_samples = np.random.multivariate_normal(np.zeros(mean_vec.shape), cov, n_samples)
28 | dx_samples = dx_samples.reshape(dx_samples.shape[0], num_x, dim_x)
29 |
30 | # x_samples: num_samples, num_x, dim_x
31 | x_samples = compute_x_samples(mean, dx_samples)
32 |
33 | return x_samples
--------------------------------------------------------------------------------
/leopy/src/leopy/thirdparty/dcem/dcem.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 |
3 | import numpy as np
4 |
5 | import torch
6 | from torch import nn
7 | from torch.distributions import Normal
8 |
9 | from lml import LML
10 |
11 | def dcem(
12 | f,
13 | nx,
14 | n_batch=1,
15 | n_sample=20,
16 | n_elite=10,
17 | n_iter=10,
18 | temp=1.,
19 | lb=None,
20 | ub=None,
21 | init_mu=None,
22 | init_sigma=None,
23 | device=None,
24 | iter_cb=None,
25 | proj_iterate_cb=None,
26 | lml_verbose=0,
27 | lml_eps=1e-3,
28 | normalize=True,
29 | iter_eps=1e-4,
30 | ):
31 | if init_mu is None:
32 | init_mu = 0.
33 |
34 | size = (n_batch, nx)
35 |
36 | if isinstance(init_mu, torch.Tensor):
37 | mu = init_mu.clone()
38 | elif isinstance(init_mu, float):
39 | mu = init_mu * torch.ones(size, requires_grad=True, device=device)
40 | else:
41 | assert False
42 |
43 | # TODO: Check if init_mu is in the domain
44 |
45 | if init_sigma is None:
46 | init_sigma = 1.
47 |
48 | if isinstance(init_sigma, torch.Tensor):
49 | sigma = init_sigma.clone()
50 | elif isinstance(init_sigma, float):
51 | sigma = init_sigma * torch.ones(
52 | size, requires_grad=True, device=device)
53 | else:
54 | assert False
55 |
56 | assert mu.size() == size
57 | assert sigma.size() == size
58 |
59 | if lb is not None:
60 | assert isinstance(lb, float)
61 |
62 | if ub is not None:
63 | assert isinstance(ub, float)
64 | assert ub > lb
65 |
66 | for i in range(n_iter):
67 | X = Normal(mu, sigma).rsample((n_sample,)).transpose(0, 1).to(device)
68 | X = X.contiguous()
69 | if lb is not None or ub is not None:
70 | X = torch.clamp(X, lb, ub)
71 |
72 | if proj_iterate_cb is not None:
73 | X = proj_iterate_cb(X)
74 |
75 | fX = f(X)
76 | X, fX = X.view(n_batch, n_sample, -1), fX.view(n_batch, n_sample)
77 |
78 | if temp is not None and temp < np.infty:
79 | if normalize:
80 | fX_mu = fX.mean(dim=1).unsqueeze(1)
81 | fX_sigma = fX.std(dim=1).unsqueeze(1)
82 | _fX = (fX - fX_mu) / (fX_sigma + 1e-6)
83 | else:
84 | _fX = fX
85 |
86 | if n_elite == 1:
87 | # I = LML(N=n_elite, verbose=lml_verbose, eps=lml_eps)(-_fX*temp)
88 | I = torch.softmax(-_fX * temp, dim=1)
89 | else:
90 | I = LML(N=n_elite, verbose=lml_verbose,
91 | eps=lml_eps)(-_fX * temp)
92 | I = I.unsqueeze(2)
93 | else:
94 | I_vals = fX.argsort(dim=1)[:, :n_elite]
95 | # TODO: A scatter would be more efficient here.
96 | I = torch.zeros(n_batch, n_sample, device=device)
97 | for j in range(n_batch):
98 | for v in I_vals[j]:
99 | I[j, v] = 1.
100 | I = I.unsqueeze(2)
101 |
102 | assert I.shape[:2] == X.shape[:2]
103 | X_I = I * X
104 | old_mu = mu.clone()
105 | mu = torch.sum(X_I, dim=1) / n_elite
106 | if (mu - old_mu).norm() < iter_eps:
107 | break
108 | sigma = ((I * (X - mu.unsqueeze(1))**2).sum(dim=1) / n_elite).sqrt()
109 |
110 | if iter_cb is not None:
111 | iter_cb(i, X, fX, I, X_I, mu, sigma)
112 |
113 | if lb is not None or ub is not None:
114 | mu = torch.clamp(mu, lb, ub)
115 |
116 | return mu
--------------------------------------------------------------------------------
/leopy/src/leopy/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rpl-cmu/leo/4ed27b169172795930a9103598144eb3ca70a405/leopy/src/leopy/utils/__init__.py
--------------------------------------------------------------------------------
/leopy/src/leopy/utils/dir_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pickle as pkl
3 |
4 | import csv
5 | import json
6 |
7 | def make_dir(dir, verbose=False, clear=False):
8 | if verbose:
9 | print("Creating directory {0}".format(dir))
10 |
11 | cmd = "mkdir -p {0}".format(dir)
12 | os.popen(cmd, 'r')
13 |
14 | if clear:
15 | cmd = "rm -rf {0}/*".format(dir)
16 | os.popen(cmd, 'r')
17 |
18 | def load_pkl_obj(filename):
19 | with (open(filename, "rb")) as f:
20 | pkl_obj = pkl.load(f)
21 | f.close()
22 |
23 | return pkl_obj
24 |
25 | def save_pkl_obj(filename, pkl_obj):
26 | f = open(filename, 'wb')
27 | pkl.dump(pkl_obj, f)
28 | f.close()
29 |
30 | def write_file_json(filename, data):
31 | with open(filename, 'w') as f:
32 | json.dump(data, f, indent=4)
33 | print("Saved data to file: {}".format(filename))
34 |
35 | def read_file_json(filename, verbose=False):
36 | data = None
37 | with open(filename) as f:
38 | data = json.load(f)
39 |
40 | if verbose:
41 | print("Loaded file: {0}". format(filename))
42 |
43 | return data
44 |
45 | def write_file_csv(filename, data, fieldnames):
46 | with open(filename, 'w', newline='') as csvfile:
47 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
48 | writer.writerow(data)
49 | print("Saved data to file: {}".format(filename))
50 |
51 | def write_dict_of_lists_to_csv(filename, data):
52 | with open(filename, 'w', newline='') as csvfile:
53 | writer = csv.writer(csvfile)
54 | writer.writerow(data.keys())
55 | writer.writerows(zip(*data.values()))
56 |
57 | def save_plt_fig(plt, filename, mkdir=True):
58 | if mkdir:
59 | dstdir = "/".join(filename.split("/")[:-1])
60 | os.makedirs(dstdir, exist_ok=True)
61 |
62 | plt.savefig(filename)
63 |
--------------------------------------------------------------------------------
/leopy/src/leopy/utils/logger.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from collections import defaultdict
3 | import pandas as pd
4 |
5 | import logging
6 | log = logging.getLogger(__name__)
7 |
8 | class Logger():
9 |
10 | def __init__(self, params=None):
11 | self.params = params
12 | self.data = defaultdict(dict)
13 | self.runtime = None
14 |
15 | def data(self):
16 | return self.data
17 |
18 | def params(self):
19 | return self.params
20 |
21 | def runtime(self):
22 | return self.runtime
23 |
24 | def log_step(self, name, value, step):
25 | self.data[step][name] = value
26 |
27 | def log_param(self, name, value):
28 | self.params[name] = value
29 |
30 | def log_runtime(self, runtime):
31 | self.runtime = runtime
--------------------------------------------------------------------------------
/leopy/src/leopy/utils/tf_utils.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append("/usr/local/cython/")
3 |
4 | import numpy as np
5 | import math
6 | import gtsam
7 |
8 | import torch
9 | import pytorch3d.transforms as p3d_t
10 |
11 | def wrap_to_pi(arr):
12 | arr_wrap = (arr + math.pi) % (2 * math.pi) - math.pi
13 | return arr_wrap
14 |
15 | def add_gaussian_noise(pose, noisevec, add_noise=True):
16 | if (add_noise):
17 | pose_noisy = pose.retract(noisevec)
18 | else:
19 | pose_noisy = pose
20 |
21 | return pose_noisy
22 |
23 | def vec3_to_pose2(vec):
24 | return gtsam.Pose2(vec[0], vec[1], vec[2])
25 |
26 | def pose2_to_vec3(pose2):
27 | return [pose2.x(), pose2.y(), pose2.theta()]
28 |
29 | def tf2d_between(pose1, pose2, device=None, requires_grad=None):
30 | """
31 | Relative transform of pose2 in pose1 frame, i.e. T12 = T1^{1}*T2
32 | :param pose1: n x 3 tensor [x,y,yaw]
33 | :param pose2: n x 3 tensor [x,y,yaw]
34 | :return: pose12 n x 3 tensor [x,y,yaw]
35 | """
36 |
37 | num_data = pose1.shape[0]
38 |
39 | rot1 = torch.cat([torch.zeros(num_data, 1, device=device, requires_grad=requires_grad), torch.zeros(
40 | num_data, 1, device=device, requires_grad=requires_grad), pose1[:, 2][:, None]], 1)
41 | rot2 = torch.cat([torch.zeros(num_data, 1, device=device, requires_grad=requires_grad), torch.zeros(
42 | num_data, 1, device=device, requires_grad=requires_grad), pose2[:, 2][:, None]], 1)
43 | t1 = torch.cat([pose1[:, 0][:, None], pose1[:, 1]
44 | [:, None], torch.zeros(num_data, 1, device=device, requires_grad=requires_grad)], 1)
45 | t2 = torch.cat([pose2[:, 0][:, None], pose2[:, 1]
46 | [:, None], torch.zeros(num_data, 1, device=device, requires_grad=requires_grad)], 1)
47 |
48 | R1 = p3d_t.euler_angles_to_matrix(rot1, "XYZ")
49 | R2 = p3d_t.euler_angles_to_matrix(rot2, "XYZ")
50 | R1t = torch.inverse(R1)
51 |
52 | R12 = torch.matmul(R1t, R2)
53 | rot12 = p3d_t.matrix_to_euler_angles(R12, "XYZ")
54 | t12 = torch.matmul(R1t, (t2-t1)[:, :, None])
55 | t12 = t12[:, :, 0]
56 |
57 | tx = t12[:, 0][:, None]
58 | ty = t12[:, 1][:, None]
59 | yaw = rot12[:, 2][:, None]
60 | pose12 = torch.cat([tx, ty, yaw], 1)
61 |
62 | return pose12
63 |
64 | def tf2d_compose(pose1, pose12):
65 | """
66 | Composing pose1 with pose12, i.e. T2 = T1*T12
67 | :param pose1: n x 3 tensor [x,y,yaw]
68 | :param pose12: n x 3 tensor [x,y,yaw]
69 | :return: pose2 n x 3 tensor [x,y,yaw]
70 | """
71 |
72 | num_data = pose1.shape[0]
73 |
74 | rot1 = torch.cat([torch.zeros(num_data, 1), torch.zeros(
75 | num_data, 1), pose1[:, 2][:, None]], 1)
76 | rot12 = torch.cat([torch.zeros(num_data, 1), torch.zeros(
77 | num_data, 1), pose12[:, 2][:, None]], 1)
78 | t1 = torch.cat([pose1[:, 0][:, None], pose1[:, 1]
79 | [:, None], torch.zeros(num_data, 1)], 1)
80 | t12 = torch.cat([pose12[:, 0][:, None], pose12[:, 1]
81 | [:, None], torch.zeros(num_data, 1)], 1)
82 |
83 | R1 = p3d_t.euler_angles_to_matrix(rot1, "XYZ")
84 | R12 = p3d_t.euler_angles_to_matrix(rot12, "XYZ")
85 |
86 | R2 = torch.matmul(R1, R12)
87 | rot2 = p3d_t.matrix_to_euler_angles(R2, "XYZ")
88 | t2 = torch.matmul(R1, t12[:, :, None]) + t1[:, :, None]
89 | t2 = t2[:, :, 0]
90 |
91 | tx = t2[:, 0][:, None]
92 | ty = t2[:, 1][:, None]
93 | yaw = rot2[:, 2][:, None]
94 | pose2 = torch.cat([tx, ty, yaw], 1)
95 |
96 | return pose2
97 |
98 | def normalize_vector(vector, mean, stddev):
99 |
100 | if torch.is_tensor(vector):
101 | vector_norm = torch.div(torch.sub(vector, mean), stddev)
102 | else:
103 | vector_norm = np.divide(np.subtract(vector, mean), stddev)
104 |
105 | return vector_norm
--------------------------------------------------------------------------------
/leopy/src/leopy/utils/vis_utils.py:
--------------------------------------------------------------------------------
1 | from subprocess import call
2 |
3 | import numpy as np
4 | import math
5 |
6 | from leopy.utils import dir_utils
7 |
8 | import matplotlib.pyplot as plt
9 | from matplotlib.gridspec import GridSpec
10 | from matplotlib.collections import PatchCollection
11 | import matplotlib.patches as mpatches
12 | import matplotlib.lines as mlines
13 |
14 | plt.rcParams.update({'font.size': 28})
15 | # plt.rcParams["figure.figsize"] = (12,8)
16 |
17 | def init_plots(n_figs=1, figsize=(12, 8), interactive=True):
18 | if interactive:
19 | plt.ion()
20 |
21 | plt.close('all')
22 | figs = []
23 | for fid in range(0, n_figs):
24 | figs.append(plt.figure(constrained_layout=True, figsize=figsize))
25 |
26 | return figs
27 |
28 | def write_video_ffmpeg(imgsrcdir, viddst, framerate=30, format="mp4"):
29 |
30 | cmd = "ffmpeg -y -r {0} -pattern_type glob -i '{1}/*.png' -c:v libx264 -pix_fmt yuv420p {2}.{3}".format(
31 | framerate, imgsrcdir, viddst, format)
32 | # cmd = "ffmpeg -y -r {0} -pattern_type glob -i '{1}/*.png' {2}.mp4".format(
33 | # framerate, imgsrcdir, viddst)
34 | call(cmd, shell=True)
35 |
36 | def random_color():
37 | color = list(np.random.choice(range(256), size=3))
38 | color = [x / 256 for x in color]
39 | return color
40 |
41 | def plot_vec1d(logger, keys=None, colors=None):
42 |
43 | plt.cla()
44 | if keys is None:
45 | keys = [keys for keys in logger.data[0]]
46 | if colors is None:
47 | colors = [random_color() for c in range(0, len(keys))]
48 |
49 | assert(len(keys) == len(colors))
50 |
51 | iters = len(logger.data)
52 | for idx, key in enumerate(keys):
53 | y = [logger.data[itr][key] for itr in range(0, iters)]
54 | x = np.arange(0, iters)
55 | plt.plot(x, y, color=colors[idx], label=key, linewidth=2)
56 |
57 | plt.legend(loc='upper right')
58 |
59 | plt.show()
60 | plt.pause(1e-12)
61 |
62 |
63 | def vis_step(tstep, logger, params=None):
64 |
65 | if (params.dataio.dataset_type == "nav2d"):
66 | vis_step_nav2d(tstep, logger, params)
67 | elif (params.dataio.dataset_type == "push2d"):
68 | vis_step_push2d(tstep, logger, params)
69 | else:
70 | print("[vis_utils::vis_step] vis_step not available for {0} dataset".format(
71 | params.dataio.dataset_type))
72 |
73 | def vis_step_nav2d(tstep, logger, params=None):
74 |
75 | plt.cla()
76 | plt.gca().axis('equal')
77 | plt.xlim(-70, 70)
78 | plt.ylim(0, 60)
79 | plt.axis('off')
80 |
81 | poses_graph = logger.data[tstep]["graph/poses2d"]
82 | poses_gt = logger.data[tstep]["gt/poses2d"]
83 |
84 | plt.scatter(poses_gt[tstep, 0], poses_gt[tstep, 1], marker=(3, 0, poses_gt[tstep, 2]/np.pi*180),
85 | color='dimgray', s=800, alpha=0.5, zorder=3)
86 |
87 | plt.plot(poses_gt[0:tstep, 0], poses_gt[0:tstep, 1], linewidth=4, linestyle='--',
88 | color=params.plot.colors.gt, label=params.plot.labels.gt)
89 | plt.plot(poses_graph[0:tstep, 0], poses_graph[0:tstep, 1], linewidth=3,
90 | color=params.plot.colors.opt, label=params.plot.labels.opt, alpha=0.8)
91 |
92 | # plt.legend(loc='upper right')
93 |
94 | if params.optim.show_fig:
95 | plt.show()
96 | plt.pause(1e-12)
97 |
98 | if params.optim.save_fig:
99 | filename = "{0}/{1}/{2}/{3}/{4}/{5:04d}.png".format(
100 | params.BASE_PATH, params.plot.dstdir, params.dataio.dataset_name, params.dataio.prefix, params.dataio.data_idx, params.leo.itr)
101 | # print("[vis_utils::vis_step_nav2d] Save figure to {0}".format(filename))
102 | dir_utils.save_plt_fig(plt, filename, mkdir=True)
103 |
104 | def draw_endeff(poses_ee, color="dimgray", label=None, ax=None):
105 |
106 | # plot contact point and normal
107 | plt.plot(poses_ee[-1][0], poses_ee[-1][1],
108 | 'k*') if ax is None else ax.plot(poses_ee[-1][0], poses_ee[-1][1], 'k*')
109 | ori = poses_ee[-1][2]
110 | sz_arw = 0.03
111 | (dx, dy) = (sz_arw * -math.sin(ori), sz_arw * math.cos(ori))
112 | plt.arrow(poses_ee[-1][0], poses_ee[-1][1], dx, dy, linewidth=2,
113 | head_width=0.001, color=color, head_length=0.01, fc='dimgray', ec='dimgray') if ax is None else ax.arrow(poses_ee[-1][0], poses_ee[-1][1], dx, dy, linewidth=2,
114 | head_width=0.001, color=color, head_length=0.01, fc='dimgray', ec='dimgray')
115 |
116 | ee_radius = 0.0075
117 | circle = mpatches.Circle(
118 | (poses_ee[-1][0], poses_ee[-1][1]), color='dimgray', radius=ee_radius)
119 | plt.gca().add_patch(circle) if ax is None else ax.add_patch(circle)
120 |
121 |
122 | def draw_object(poses_obj, shape="disc", color="dimgray", label=None, ax=None):
123 |
124 | linestyle_gt = '--' if (color == "dimgray") else '-'
125 | plt.plot(poses_obj[:, 0], poses_obj[:, 1], color=color,
126 | linestyle=linestyle_gt, label=label, linewidth=2, alpha=0.9) if ax is None else ax.plot(poses_obj[:, 0], poses_obj[:, 1], color=color,
127 | linestyle=linestyle_gt, label=label, linewidth=2, alpha=0.9)
128 |
129 | if (shape == "disc"):
130 | disc_radius = 0.088
131 | circ_obj = mpatches.Circle((poses_obj[-1][0], poses_obj[-1][1]), disc_radius,
132 | facecolor='None', edgecolor=color, linestyle=linestyle_gt, linewidth=2, alpha=0.9)
133 | plt.gca().add_patch(circ_obj) if ax is None else ax.add_patch(circ_obj)
134 |
135 | # cross-bars
136 | (x0, y0, yaw) = (poses_obj[-1][0],
137 | poses_obj[-1][1], poses_obj[-1][2])
138 | r = disc_radius
139 | plt.plot([x0 + r * math.cos(yaw), x0 - r * math.cos(yaw)],
140 | [y0 + r * math.sin(yaw), y0 - r * math.sin(yaw)],
141 | linestyle=linestyle_gt, color=color, alpha=0.4) if ax is None else ax.plot([x0 + r * math.cos(yaw), x0 - r * math.cos(yaw)],
142 | [y0 + r * math.sin(
143 | yaw), y0 - r * math.sin(yaw)],
144 | linestyle=linestyle_gt, color=color, alpha=0.4)
145 | plt.plot([x0 - r * math.sin(yaw), x0 + r * math.sin(yaw)],
146 | [y0 + r * math.cos(yaw), y0 - r * math.cos(yaw)],
147 | linestyle=linestyle_gt, color=color, alpha=0.4) if ax is None else ax.plot([x0 - r * math.sin(yaw), x0 + r * math.sin(yaw)],
148 | [y0 + r * math.cos(
149 | yaw), y0 - r * math.cos(yaw)],
150 | linestyle=linestyle_gt, color=color, alpha=0.4)
151 |
152 | elif (shape == "rect"):
153 | rect_len_x = 0.2363
154 | rect_len_y = 0.1579
155 |
156 | yaw = poses_obj[-1][2]
157 | R = np.array([[np.cos(yaw), -np.sin(yaw)],
158 | [np.sin(yaw), np.cos(yaw)]])
159 | offset = np.matmul(R, np.array(
160 | [[0.5*rect_len_x], [0.5*rect_len_y]]))
161 | xb = poses_obj[-1][0] - offset[0]
162 | yb = poses_obj[-1][1] - offset[1]
163 | rect = mpatches.Rectangle((xb, yb), rect_len_x, rect_len_y, angle=(
164 | np.rad2deg(yaw)), facecolor='None', edgecolor=color, linestyle=linestyle_gt, linewidth=2)
165 | plt.gca().add_patch(rect) if ax is None else ax.add_patch(rect)
166 |
167 | elif (shape == "ellip"):
168 | ellip_len_x = 0.1638
169 | ellip_len_y = 0.2428
170 |
171 | xb = poses_obj[-1][0]
172 | yb = poses_obj[-1][1]
173 | yaw = poses_obj[-1][2]
174 | ellip = mpatches.Ellipse((xb, yb), ellip_len_x, ellip_len_y, angle=(
175 | np.rad2deg(yaw)), facecolor='None', edgecolor=color, linestyle=linestyle_gt, linewidth=2)
176 | plt.gca().add_patch(ellip) if ax is None else ax.add_patch(ellip)
177 |
178 | def vis_step_push2d(tstep, logger, params=None):
179 |
180 | plt.cla()
181 | plt.xlim((-1, 1))
182 | plt.ylim((-1, 1))
183 | plt.gca().axis('equal')
184 | plt.axis('off')
185 |
186 | poses_obj_gt = np.array(logger.data[tstep]["gt/obj_poses2d"])
187 | poses_ee_gt = np.array(logger.data[tstep]["gt/ee_poses2d"])
188 | draw_endeff(poses_ee_gt, color="dimgray")
189 | draw_object(poses_obj_gt, shape=params.dataio.obj_shape,
190 | color="dimgray", label="groundtruth")
191 |
192 | color = params.plot.colors.exp if (params.optim.fig_name == "expert") else params.plot.colors.opt
193 | label = params.plot.labels.exp if (params.optim.fig_name == "expert") else params.plot.labels.opt
194 | poses_obj_graph = np.array(logger.data[tstep]["graph/obj_poses2d"])
195 | poses_ee_graph = np.array(logger.data[tstep]["graph/ee_poses2d"])
196 | draw_endeff(poses_ee_graph, color=color)
197 | draw_object(poses_obj_graph, shape=params.dataio.obj_shape,
198 | color=color, label=label)
199 | # plt.legend(loc='upper left')
200 |
201 | if params.optim.show_fig:
202 | plt.draw()
203 | plt.pause(1e-12)
204 |
205 | if params.optim.save_fig:
206 | filename = "{0}/{1}/{2}/{3}/{4}/{5:04d}.png".format(
207 | params.BASE_PATH, params.plot.dstdir, params.dataio.dataset_name, params.dataio.prefix, params.dataio.data_idx, params.leo.itr)
208 | # print(f"[vis_utils::vis_step_push2d] Save figure to {filename}")
209 | dir_utils.save_plt_fig(plt, filename, mkdir=True)
--------------------------------------------------------------------------------