├── .gitignore
├── .gitmodules
├── LICENSE
├── README.md
├── gifs
├── aliengo_trot.gif
├── go2_bound.gif
└── hyqreal_pace.gif
├── installation
├── docker
│ ├── integrated_gpu
│ │ └── dockerfile
│ └── nvidia
│ │ └── dockerfile
└── mamba
│ ├── integrated_gpu
│ ├── mamba_environment.yml
│ ├── mamba_environment_ros1.yml
│ └── mamba_environment_ros2.yml
│ └── nvidia_cuda
│ ├── mamba_environment.yml
│ ├── mamba_environment_ros1.yml
│ └── mamba_environment_ros2.yml
├── pyproject.toml
├── quadruped_pympc
├── __init__.py
├── config.py
├── controllers
│ ├── __init__.py
│ ├── gradient
│ │ ├── collaborative
│ │ │ ├── __init__.py
│ │ │ ├── centroidal_model_collaborative.py
│ │ │ └── centroidal_nmpc_collaborative.py
│ │ ├── input_rates
│ │ │ ├── __init__.py
│ │ │ ├── centroidal_model_input_rates.py
│ │ │ └── centroidal_nmpc_input_rates.py
│ │ ├── kinodynamic
│ │ │ ├── kinodynamic_model.py
│ │ │ └── kinodynamic_nmpc.py
│ │ ├── lyapunov
│ │ │ ├── centroidal_model_lyapunov.py
│ │ │ └── centroidal_nmpc_lyapunov.py
│ │ └── nominal
│ │ │ ├── __init__.py
│ │ │ ├── centroidal_model_nominal.py
│ │ │ ├── centroidal_nmpc_gait_adaptive.py
│ │ │ └── centroidal_nmpc_nominal.py
│ └── sampling
│ │ ├── __init__.py
│ │ ├── centroidal_model_jax.py
│ │ ├── centroidal_nmpc_jax.py
│ │ └── centroidal_nmpc_jax_gait_adaptive.py
├── helpers
│ ├── __init__.py
│ ├── early_stance_detector.py
│ ├── foothold_reference_generator.py
│ ├── inverse_kinematics
│ │ ├── __init__.py
│ │ ├── inverse_kinematics_numeric_adam.py
│ │ ├── inverse_kinematics_numeric_mujoco.py
│ │ └── inverse_kinematics_qp.py
│ ├── periodic_gait_generator.py
│ ├── periodic_gait_generator_jax.py
│ ├── quadruped_utils.py
│ ├── swing_generators
│ │ ├── __init__.py
│ │ ├── explicit_swing_trajectory_generator.py
│ │ └── scipy_swing_trajectory_generator.py
│ ├── swing_trajectory_controller.py
│ ├── terrain_estimator.py
│ ├── velocity_modulator.py
│ └── visual_foothold_adaptation.py
├── interfaces
│ ├── __init__.py
│ ├── srbd_batched_controller_interface.py
│ ├── srbd_controller_interface.py
│ └── wb_interface.py
└── quadruped_pympc_wrapper.py
├── ros2
├── console.py
├── msgs_ws
│ └── src
│ │ └── dls2_msgs
│ │ ├── CMakeLists.txt
│ │ ├── LICENSE
│ │ ├── msg
│ │ ├── BaseStateMsg.msg
│ │ ├── BlindStateMsg.msg
│ │ ├── ControlSignalMsg.msg
│ │ └── TrajectoryGeneratorMsg.msg
│ │ └── package.xml
└── run_controller.py
└── simulation
├── __init__.py
├── batched_simulations.py
├── generate_dataset.py
└── simulation.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/python,c++,c,visualstudiocode,git,linux
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,c++,c,visualstudiocode,git,linux
3 |
4 | ### Custom ###
5 | ./jnrh2023/
6 | c_generated_code
7 | *.json
8 | core.*
9 | ### C ###
10 | # Prerequisites
11 | *.d
12 |
13 | # Object files
14 | *.o
15 | *.ko
16 | *.elf
17 |
18 | # Linker output
19 | *.ilk
20 | *.map
21 | *.exp
22 |
23 | # Precompiled Headers
24 | *.gch
25 | *.pch
26 |
27 | # Libraries
28 | *.lib
29 | *.a
30 | *.la
31 | *.lo
32 |
33 | # Shared objects (inc. Windows DLLs)
34 | *.dll
35 | *.so
36 | *.so.*
37 | *.dylib
38 |
39 | # Executables
40 | *.exe
41 | *.out
42 | *.app
43 | *.i*86
44 | *.x86_64
45 | *.hex
46 |
47 | # Debug files
48 | *.dSYM/
49 | *.su
50 | *.idb
51 | *.pdb
52 |
53 | # Kernel Module Compile Results
54 | *.mod*
55 | *.cmd
56 | .tmp_versions/
57 | modules.order
58 | Module.symvers
59 | Mkfile.old
60 | dkms.conf
61 |
62 | ### C++ ###
63 | # Prerequisites
64 |
65 | # Compiled Object files
66 | *.slo
67 |
68 | # Precompiled Headers
69 |
70 | # Compiled Dynamic libraries
71 |
72 | # Fortran module files
73 | *.mod
74 | *.smod
75 |
76 | # Compiled Static libraries
77 | *.lai
78 |
79 | # Executables
80 |
81 | ### Git ###
82 | # Created by git for backups. To disable backups in Git:
83 | # $ git config --global mergetool.keepBackup false
84 | *.orig
85 |
86 | # Created by git when using merge tools for conflicts
87 | *.BACKUP.*
88 | *.BASE.*
89 | *.LOCAL.*
90 | *.REMOTE.*
91 | *_BACKUP_*.txt
92 | *_BASE_*.txt
93 | *_LOCAL_*.txt
94 | *_REMOTE_*.txt
95 |
96 | ### Linux ###
97 | *~
98 |
99 | # temporary files which can be created if a process still has a handle open of a deleted file
100 | .fuse_hidden*
101 |
102 | # KDE directory preferences
103 | .directory
104 |
105 | # Linux trash folder which might appear on any partition or disk
106 | .Trash-*
107 |
108 | # .nfs files are created when an open file is removed but is still being accessed
109 | .nfs*
110 |
111 | ### Python ###
112 | # Byte-compiled / optimized / DLL files
113 | __pycache__/
114 | *.py[cod]
115 | *$py.class
116 |
117 | # C extensions
118 |
119 | # Distribution / packaging
120 | .Python
121 | build/
122 | develop-eggs/
123 | dist/
124 | downloads/
125 | eggs/
126 | .eggs/
127 | lib/
128 | lib64/
129 | parts/
130 | sdist/
131 | var/
132 | wheels/
133 | share/python-wheels/
134 | *.egg-info/
135 | .installed.cfg
136 | *.egg
137 | MANIFEST
138 |
139 | # PyInstaller
140 | # Usually these files are written by a python script from a template
141 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
142 | *.manifest
143 | *.spec
144 |
145 | # Installer logs
146 | pip-log.txt
147 | pip-delete-this-directory.txt
148 |
149 | # Unit test / coverage reports
150 | htmlcov/
151 | .tox/
152 | .nox/
153 | .coverage
154 | .coverage.*
155 | .cache
156 | nosetests.xml
157 | coverage.xml
158 | *.cover
159 | *.py,cover
160 | .hypothesis/
161 | .pytest_cache/
162 | cover/
163 |
164 | # Translations
165 | *.mo
166 | *.pot
167 |
168 | *.pyc
169 |
170 |
171 | # Django stuff:
172 | *.log
173 | local_settings.py
174 | db.sqlite3
175 | db.sqlite3-journal
176 |
177 | # Flask stuff:
178 | instance/
179 | .webassets-cache
180 |
181 | # Scrapy stuff:
182 | .scrapy
183 |
184 | # Sphinx documentation
185 | docs/_build/
186 |
187 | # PyBuilder
188 | .pybuilder/
189 | target/
190 |
191 | # Jupyter Notebook
192 | .ipynb_checkpoints
193 |
194 | # IPython
195 | profile_default/
196 | ipython_config.py
197 |
198 | # pyenv
199 | # For a library or package, you might want to ignore these files since the code is
200 | # intended to run in multiple environments; otherwise, check them in:
201 | # .python-version
202 |
203 | # pipenv
204 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
205 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
206 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
207 | # install all needed dependencies.
208 | #Pipfile.lock
209 |
210 | # poetry
211 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
212 | # This is especially recommended for binary packages to ensure reproducibility, and is more
213 | # commonly ignored for libraries.
214 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
215 | #poetry.lock
216 |
217 | # pdm
218 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
219 | #pdm.lock
220 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
221 | # in version control.
222 | # https://pdm.fming.dev/#use-with-ide
223 | .pdm.toml
224 |
225 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
226 | __pypackages__/
227 |
228 | # Celery stuff
229 | celerybeat-schedule
230 | celerybeat.pid
231 |
232 | # SageMath parsed files
233 | *.sage.py
234 |
235 | # Environments
236 | .env
237 | .venv
238 | env/
239 | venv/
240 | ENV/
241 | env.bak/
242 | venv.bak/
243 |
244 | # Spyder project settings
245 | .spyderproject
246 | .spyproject
247 |
248 | # Rope project settings
249 | .ropeproject
250 |
251 | # mkdocs documentation
252 | /site
253 |
254 | # mypy
255 | .mypy_cache/
256 | .dmypy.json
257 | dmypy.json
258 |
259 | # Pyre type checker
260 | .pyre/
261 |
262 | # pytype static type analyzer
263 | .pytype/
264 |
265 | # Cython debug symbols
266 | cython_debug/
267 |
268 | # PyCharm
269 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
270 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
271 | # and can be added to the global gitignore or merged into this file. For a more nuclear
272 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
273 | #.idea/
274 |
275 | ### Python Patch ###
276 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
277 | poetry.toml
278 |
279 | # ruff
280 | .ruff_cache/
281 |
282 | # LSP config files
283 | pyrightconfig.json
284 |
285 | ### VisualStudioCode ###
286 | .vscode
287 | .vscode/*
288 | !.vscode/settings.json
289 | !.vscode/tasks.json
290 | !.vscode/launch.json
291 | !.vscode/extensions.json
292 | !.vscode/*.code-snippets
293 |
294 | # Local History for Visual Studio Code
295 | .history/
296 |
297 | # Built Visual Studio Code Extensions
298 | *.vsix
299 |
300 | ### VisualStudioCode Patch ###
301 | # Ignore all local history of files
302 | .history
303 | .ionide
304 |
305 |
306 |
307 |
308 | .idea/
309 |
310 | # Custom ignore parts.
311 | *MUJOCO_LOG.TXT
312 | quadruped_pympc/controllers/gradient/nominal/c_generated_code_gait_adaptive/
313 |
314 | # Datasets are heavy and shouls be saved to huggingface instead of handled here.
315 | /datasets/
316 | *.pkl
317 | *.h5
318 |
319 |
320 | # ROS2
321 | ros2/msgs_ws/build/
322 | ros2/msgs_ws/install/
323 | ros2/msgs_ws/log/
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "quadruped_pympc/acados"]
2 | path = quadruped_pympc/acados
3 | url = https://github.com/acados/acados.git
4 |
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2025, DLS Lab at Istituto Italiano di Tecnologia, Italy
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
6 |
7 |
8 | ## Overview
9 | This repo contains a model predictive controller based on the **single rigid body model** and written in **Python**. It comes in two flavours: gradient-based via [acados](https://github.com/acados/acados) or sampling-based via [jax](https://github.com/google/jax). The controller is tested on real robots and is compatible with [Mujoco](https://mujoco.org/). See [the end of this README](https://github.com/iit-DLSLab/Quadruped-PyMPC?tab=readme-ov-file#citing-this-work) if you want to cite this work.
10 |
11 |
12 | Features gradient-based mpc:
13 | - less than 5ms computation on an intel i7-13700H cpu
14 | - optional integrators for model mismatch
15 | - optional smoothing for the ground reaction forces
16 | - optional foothold optimization
17 | - optional [real-time iteration](http://cse.lab.imtlucca.it/~bemporad/publications/papers/ijc_rtiltv.pdf) or [advanced-step real-time iteration](https://arxiv.org/pdf/2403.07101.pdf)
18 | - optional zero moment point/center of mass constraints
19 | - optional lyapunov-based criteria
20 |
21 |
22 | Features sampling-based mpc:
23 | - 10000 parallel rollouts in less than 2ms on an nvidia 4050 mobile gpu!
24 | - optional step frequency adaptation for enhancing robustness
25 | - implements different strategies: [random sampling](https://arxiv.org/pdf/2212.00541.pdf), [mppi](https://sites.gatech.edu/acds/mppi/), or [cemppi](https://arxiv.org/pdf/2203.16633.pdf)
26 | - different control parametrizations: zero-order, linear splines or cubic splines (see [mujoco-mpc](https://arxiv.org/pdf/2212.00541.pdf))
27 |
28 |
29 |
30 | ## Dependencies
31 | Gradient-based MPC: It uses [CasADI](https://web.casadi.org/) to define the model and [acados](https://github.com/acados/acados) to solve the optimal control problem. Sampling-based MPC: [jax](https://github.com/google/jax) for both. The simulation environment is based on [Mujoco](https://mujoco.org/).
32 |
33 | ## Installation
34 |
35 | 1. install [miniforge](https://github.com/conda-forge/miniforge/releases) (x86_64 or arm64 depending on your platform)
36 |
37 | 2. create an environment using the file in the folder [installation](https://github.com/iit-DLSLab/Quadruped-PyMPC/tree/main/installation) choosing between **nvidia and integrated gpu**, either **with or without ros** (to run the simulation, you don't need ros!):
38 |
39 | `conda env create -f mamba_environment.yml`
40 |
41 |
42 | 3. clone the other submodules:
43 |
44 | `git submodule update --init --recursive`
45 |
46 | 4. activate the conda environment
47 |
48 | `conda activate quadruped_pympc_env`
49 |
50 | 5. go inside the folder acados and compile it pressing:
51 |
52 | ```
53 | mkdir build
54 | cd build
55 | cmake -DACADOS_WITH_SYSTEM_BLASFEO:BOOL=ON ..
56 | make install -j4
57 | pip install -e ./../interfaces/acados_template
58 | ```
59 |
60 | 6. inside the file .bashrc, given your **path_to_acados**, put:
61 |
62 | ```
63 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"/path_to_acados/lib"
64 | export ACADOS_SOURCE_DIR="/path_to_acados"
65 | ```
66 |
67 | Notice that if you are using Mac, you should modify the file .zshrc adding
68 |
69 | ```
70 | export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH:"/path_to_acados/lib"
71 | export ACADOS_SOURCE_DIR="/path_to_acados"
72 | ```
73 |
74 | The first time you run the simulation with acados, in the terminal you will be asked to install tera_render. You should accept to proceed.
75 |
76 |
77 | 7. go to Quadruped-PyMPC initial folder and install it:
78 |
79 | ```
80 | pip install -e .
81 | ```
82 |
83 | ## How to run - Simulation
84 |
85 | 1. activate the conda environment
86 |
87 | ```
88 | conda activate quadruped_pympc_env
89 | ```
90 |
91 | 2. go in the main Quadruped-PyMPC [folder](https://github.com/iit-DLSLab/Quadruped-PyMPC) and press
92 |
93 | ```
94 | python3 simulation/simulation.py
95 | ```
96 |
97 | In the file [config.py](https://github.com/iit-DLSLab/Quadruped-PyMPC/blob/main/quadruped_pympc/config.py), you can set up the robot, the mpc type (gradient, sampling..), its proprierties (real time iteration, sampling type, foothold optimization..), and other simulation params (reference, gait type..).
98 |
99 | 3. you can interact with the simulation with your mouse to add disturbances, or with the keyboard by pressing
100 | ```
101 | arrow up, arrow down -> add positive or negative forward velocity
102 | arrow left, arrow right -> add positive or negative yaw velocity
103 | ctrl -> set zero all velocities
104 | ```
105 |
106 | ## How to run - ROS2
107 | During the installation procedure, use the file **mamba_environment_ros2.yml**. Then:
108 |
109 | 1. activate the conda environment
110 |
111 | ```
112 | conda activate quadruped_pympc_ros2_env
113 | ```
114 |
115 | 2. go in the folder [ros2/msgs_ws](https://github.com/iit-DLSLab/Quadruped-PyMPC/tree/main/ros2/msgs_ws) and compile the messages
116 |
117 | ```
118 | colcon build
119 | source install/setup.bash
120 | ```
121 |
122 | 3. you can run now the script
123 |
124 | ```
125 | python3 ros2/run_controller.py
126 | ```
127 |
128 | For a real-robot deployment, remember to put inside the script these flags to false
129 |
130 | ```
131 | USE_MUJOCO_RENDER = False
132 | USE_MUJOCO_SIMULATION = False
133 | ```
134 |
135 | and to use a nice [state estimator](https://github.com/iit-DLSLab/muse).
136 |
137 | ## Citing this work
138 |
139 | If you find the work useful, please consider citing one of our works:
140 |
141 | [Sampling](https://arxiv.org/abs/2403.11383):
142 | ```
143 | @INPROCEEDINGS{turrisi2024sampling,
144 | author={Turrisi, Giulio and Modugno, Valerio and Amatucci, Lorenzo and Kanoulas, Dimitrios and Semini, Claudio},
145 | booktitle={2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
146 | title={On the Benefits of GPU Sample-Based Stochastic Predictive Controllers for Legged Locomotion},
147 | year={2024},
148 | pages={13757-13764},
149 | doi={10.1109/IROS58592.2024.10801698}}
150 | ```
151 | [Lyapunov](https://arxiv.org/abs/2409.01144):
152 | ```
153 | @ARTICLE{elobaid2025adaptivestablempc,
154 | author={Elobaid, Mohamed and Turrisi, Giulio and Rapetti, Lorenzo and Romualdi, Giulio and Dafarra, Stefano and Kawakami, Tomohiro and Chaki, Tomohiro and Yoshiike, Takahide and Semini, Claudio and Pucci, Daniele},
155 | journal={IEEE Robotics and Automation Letters},
156 | title={Adaptive Non-Linear Centroidal MPC With Stability Guarantees for Robust Locomotion of Legged Robots},
157 | year={2025},
158 | volume={10},
159 | number={3},
160 | pages={2806-2813},
161 | doi={10.1109/LRA.2025.3536296}}
162 | ```
163 |
164 | ## Maintainer
165 |
166 | This repository is maintained by [Giulio Turrisi](https://github.com/giulioturrisi) and [Daniel Ordonez](https://github.com/Danfoa).
167 |
--------------------------------------------------------------------------------
/gifs/aliengo_trot.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/gifs/aliengo_trot.gif
--------------------------------------------------------------------------------
/gifs/go2_bound.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/gifs/go2_bound.gif
--------------------------------------------------------------------------------
/gifs/hyqreal_pace.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/gifs/hyqreal_pace.gif
--------------------------------------------------------------------------------
/installation/docker/integrated_gpu/dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:jammy
2 |
3 |
4 | # Set environment variables
5 | ENV DEBIAN_FRONTEND=noninteractive \
6 | CONDA_DIR=/opt/conda \
7 | PATH=/opt/conda/bin:$PATH
8 |
9 |
10 | # Install tkinter, pip, and gedit
11 | RUN apt-get update && apt-get install -y python3-tk
12 | RUN apt-get update && apt-get install -y python3-pip
13 | RUN apt-get update && apt-get install -y gedit
14 |
15 |
16 | # Install git
17 | RUN apt-get update && apt-get install -y git
18 |
19 |
20 | # Install Miniforge
21 | RUN apt-get update && \
22 | apt-get install -y wget bzip2 ca-certificates libglib2.0-0 libxext6 libsm6 libxrender1 git && \
23 | wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O /tmp/miniforge.sh && \
24 | bash /tmp/miniforge.sh -b -p $CONDA_DIR && \
25 | rm /tmp/miniforge.sh && \
26 | conda init bash && \
27 | conda clean -afy
28 |
29 |
30 |
31 | RUN git clone https://github.com/iit-DLSLab/Quadruped-PyMPC.git
32 | RUN cd Quadruped-PyMPC && git submodule update --init --recursive
33 | # Create a conda environment with the specified name
34 | RUN cd Quadruped-PyMPC/installation/mamba/integrated_gpu/ && conda env create -f mamba_environment_ros2.yml
35 |
36 | SHELL ["conda", "run", "-n", "quadruped_pympc_ros2_env", "/bin/bash", "-c"]
37 |
38 | # Install acados
39 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados && mkdir -p build
40 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados/build && cmake -DACADOS_WITH_SYSTEM_BLASFEO:BOOL=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5 ..
41 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados/build && make install -j4
42 | RUN pip install -e ./Quadruped-PyMPC/quadruped_pympc/acados/interfaces/acados_template
43 | RUN echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"/Quadruped-PyMPC/quadruped_pympc/acados/lib"' >> /root/.bashrc
44 | RUN echo 'export ACADOS_SOURCE_DIR="/Quadruped-PyMPC/quadruped_pympc/acados"' >> /root/.bashrc
45 | # Install Quadruped-PyMPC
46 | RUN cd Quadruped-PyMPC && pip install -e .
47 |
48 |
49 | # Set the shell to bash and configure the shell prompt and aliases
50 | RUN echo 'export PS1="\[\e]0;\u@\h: \w\a\]${debian_chroot:+($debian_chroot)}\[\033[01;37m\]\u\[\033[00m\]@\[\033[01;32m\]\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\] "' >> /root/.bashrc
51 | RUN echo 'conda activate quadruped_pympc_ros2_env' >> /root/.bashrc
52 |
53 | # Set the working directory and source the bashrc file
54 | WORKDIR /home
55 | RUN source /root/.bashrc
--------------------------------------------------------------------------------
/installation/docker/nvidia/dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:12.4.0-devel-ubuntu22.04
2 |
3 |
4 | # Set environment variables
5 | ENV DEBIAN_FRONTEND=noninteractive \
6 | CONDA_DIR=/opt/conda \
7 | PATH=/opt/conda/bin:$PATH
8 |
9 |
10 | # Install tkinter, pip, and gedit
11 | RUN apt-get update && apt-get install -y python3-tk
12 | RUN apt-get update && apt-get install -y python3-pip
13 | RUN apt-get update && apt-get install -y gedit
14 |
15 |
16 | # Install git
17 | RUN apt-get update && apt-get install -y git
18 |
19 |
20 | # Install Miniforge
21 | RUN apt-get update && \
22 | apt-get install -y wget bzip2 ca-certificates libglib2.0-0 libxext6 libsm6 libxrender1 git && \
23 | wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O /tmp/miniforge.sh && \
24 | bash /tmp/miniforge.sh -b -p $CONDA_DIR && \
25 | rm /tmp/miniforge.sh && \
26 | conda init bash && \
27 | conda clean -afy
28 |
29 |
30 |
31 | RUN git clone https://github.com/iit-DLSLab/Quadruped-PyMPC.git
32 | RUN cd Quadruped-PyMPC && git submodule update --init --recursive
33 | # Create a conda environment with the specified name
34 | RUN cd Quadruped-PyMPC/installation/mamba/integrated_gpu/ && conda env create -f mamba_environment_ros2.yml
35 |
36 | SHELL ["conda", "run", "-n", "quadruped_pympc_ros2_env", "/bin/bash", "-c"]
37 |
38 | # Install acados
39 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados && mkdir -p build
40 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados/build && cmake -DACADOS_WITH_SYSTEM_BLASFEO:BOOL=ON -DCMAKE_POLICY_VERSION_MINIMUM=3.5 ..
41 | RUN cd Quadruped-PyMPC/quadruped_pympc/acados/build && make install -j4
42 | RUN pip install -e ./Quadruped-PyMPC/quadruped_pympc/acados/interfaces/acados_template
43 | RUN echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"/Quadruped-PyMPC/quadruped_pympc/acados/lib"' >> /root/.bashrc
44 | RUN echo 'export ACADOS_SOURCE_DIR="/Quadruped-PyMPC/quadruped_pympc/acados"' >> /root/.bashrc
45 | # Install Quadruped-PyMPC
46 | RUN cd Quadruped-PyMPC && pip install -e .
47 |
48 |
49 | # Set the shell to bash and configure the shell prompt and aliases
50 | RUN echo 'export PS1="\[\e]0;\u@\h: \w\a\]${debian_chroot:+($debian_chroot)}\[\033[01;37m\]\u\[\033[00m\]@\[\033[01;32m\]\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\] "' >> /root/.bashrc
51 | RUN echo 'conda activate quadruped_pympc_ros2_env' >> /root/.bashrc
52 |
53 | # Set the working directory and source the bashrc file
54 | WORKDIR /home
55 | RUN source /root/.bashrc
--------------------------------------------------------------------------------
/installation/mamba/integrated_gpu/mamba_environment.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_env
2 | channels:
3 | - conda-forge
4 | - robostack-staging
5 | dependencies:
6 | - pip
7 | - python==3.11
8 | - casadi
9 | - numpy
10 | - matplotlib
11 | - jaxlib
12 | - jax
13 | - meshcat-python
14 | - scipy==1.15.1
15 | - pinocchio
16 | - adam-robotics[all]
17 | - compilers
18 | - cmake
19 | - pkg-config
20 | - make
21 | - tqdm
22 | - gymnasium
23 | - pip:
24 | - mujoco
25 | - readchar
26 | - liecasadi
27 |
28 |
--------------------------------------------------------------------------------
/installation/mamba/integrated_gpu/mamba_environment_ros1.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_ros1_env
2 | channels:
3 | - conda-forge
4 | - robostack-staging
5 | dependencies:
6 | - pip
7 | - python
8 | - casadi
9 | - numpy
10 | - matplotlib
11 | - jaxlib
12 | - jax
13 | - meshcat-python
14 | - scipy==1.15.1
15 | - pinocchio
16 | - adam-robotics[all]
17 | - ros-noetic-desktop
18 | - compilers
19 | - cmake
20 | - pkg-config
21 | - make
22 | - ninja
23 | - colcon-common-extensions
24 | - catkin_tools
25 | - rosdep
26 | - tqdm
27 | - gymnasium
28 | - pip:
29 | - mujoco
30 | - readchar
31 | - liecasadi
32 |
--------------------------------------------------------------------------------
/installation/mamba/integrated_gpu/mamba_environment_ros2.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_ros2_env
2 | channels:
3 | - conda-forge
4 | - robostack-staging
5 | dependencies:
6 | - pip
7 | - python
8 | - casadi
9 | - numpy
10 | - matplotlib
11 | - jaxlib
12 | - jax
13 | - meshcat-python
14 | - scipy==1.15.1
15 | - pinocchio
16 | - adam-robotics[all]
17 | - ros-humble-desktop
18 | - ros-humble-plotjuggler-ros
19 | - compilers
20 | - cmake
21 | - pkg-config
22 | - make
23 | - ninja
24 | - colcon-common-extensions
25 | - catkin_tools
26 | - rosdep
27 | - tqdm
28 | - gymnasium
29 | - pip:
30 | - mujoco
31 | - readchar
32 | - liecasadi
33 |
34 |
--------------------------------------------------------------------------------
/installation/mamba/nvidia_cuda/mamba_environment.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_env
2 | channels:
3 | - conda-forge
4 | - nvidia
5 | - robostack-staging
6 | dependencies:
7 | - pip
8 | - python==3.11
9 | - casadi
10 | - numpy
11 | - matplotlib
12 | - jaxlib=*=*cuda*
13 | - jax
14 | - cuda-nvcc
15 | - meshcat-python
16 | - scipy==1.15.1
17 | - pinocchio
18 | - adam-robotics[all]
19 | - compilers
20 | - cmake
21 | - pkg-config
22 | - make
23 | - tqdm
24 | - gymnasium
25 | - pip:
26 | - mujoco
27 | - readchar
28 | - liecasadi
29 |
30 |
--------------------------------------------------------------------------------
/installation/mamba/nvidia_cuda/mamba_environment_ros1.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_ros1_env
2 | channels:
3 | - conda-forge
4 | - nvidia
5 | - robostack-staging
6 | dependencies:
7 | - pip
8 | - python
9 | - casadi
10 | - numpy
11 | - matplotlib
12 | - jaxlib=*=*cuda*
13 | - jax
14 | - cuda-nvcc
15 | - meshcat-python
16 | - scipy==1.15.1
17 | - pinocchio
18 | - adam-robotics[all]
19 | - ros-noetic-desktop
20 | - compilers
21 | - cmake
22 | - pkg-config
23 | - make
24 | - ninja
25 | - colcon-common-extensions
26 | - catkin_tools
27 | - rosdep
28 | - tqdm
29 | - gymnasium
30 | - pip:
31 | - mujoco
32 | - readchar
33 | - liecasadi
34 |
35 |
--------------------------------------------------------------------------------
/installation/mamba/nvidia_cuda/mamba_environment_ros2.yml:
--------------------------------------------------------------------------------
1 | name: quadruped_pympc_ros2_env
2 | channels:
3 | - conda-forge
4 | - nvidia
5 | - robostack-staging
6 | dependencies:
7 | - pip
8 | - python
9 | - casadi
10 | - numpy
11 | - matplotlib
12 | - jaxlib=*=*cuda*
13 | - jax
14 | - cuda-nvcc
15 | - meshcat-python
16 | - scipy==1.15.1
17 | - pinocchio
18 | - adam-robotics[all]
19 | - ros-humble-desktop
20 | - ros-humble-plotjuggler-ros
21 | - compilers
22 | - cmake
23 | - pkg-config
24 | - make
25 | - ninja
26 | - colcon-common-extensions
27 | - catkin_tools
28 | - rosdep
29 | - tqdm
30 | - gymnasium
31 | - pip:
32 | - mujoco
33 | - readchar
34 | - liecasadi
35 |
36 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | #_______________________________________________________
2 | [build-system]
3 | requires = ["hatchling"]
4 | build-backend = "hatchling.build"
5 | #_______________________________________________________
6 | #[build-system]
7 | #requires = ["setuptools", "wheel"]
8 | #build-backend = "setuptools.build_meta"
9 | #
10 | #[tool.setuptools.packages.find]
11 | #include = ["quadruped_pympc"] # package names should match these glob patterns (["*"] by default)
12 | #_______________________________________________________
13 |
14 | [project]
15 | name = "quadruped_pympc"
16 | version = "0.1.2"
17 | description = "A model predictive controller for quadruped robots based on the single rigid body model and written in python. Gradient-based (acados) or Sampling-based (jax)."
18 | authors = [
19 | { name="Giulio Turrisi", email="giulio.turrisi@iit.it" },
20 | ]
21 | maintainers = [
22 | { name="Giulio Turrisi", email="giulio.turrisi@iit.it" },
23 | ]
24 |
25 | readme = "README.md"
26 | #license = "TODO"
27 | keywords = ["robotics", "locomotion", "quadruped", "deep", "reinforcement", "learning", "environment"]
28 | classifiers=[
29 | "Development Status :: 3 - Alpha",
30 | "Intended Audience :: Developers",
31 | "Topic :: Software Development :: Build Tools",
32 | "License :: OSI Approved :: Apache Software License",
33 | "Programming Language :: Python :: 3.10",
34 | ]
35 |
36 | dependencies = [
37 | "liecasadi",
38 | "readchar",
39 | "numpy",
40 | "matplotlib",
41 | "scipy",
42 | #"pin",
43 | "cmake",
44 | "mujoco>=3.3.1",
45 | "gym_quadruped @ git+https://github.com/iit-DLSLab/gym-quadruped.git",
46 | ]
47 |
48 | [project.optional-dependencies]
49 | # pip install -e .[jax] / pip install quadruped_pympc[jax]
50 | sampling = [ # Example on how we can setup optional dependencies depending on the controller people want to use.
51 | "jax",
52 | "jaxlib"
53 | ]
54 |
55 | # ____________________________________________________________________________________________________________________
56 | [tool.hatch.metadata]
57 | allow-direct-references = true # Allow direct references to packages in the same repository
58 |
59 |
60 | #____________________________________________________________________________________________________________________
61 | [tool.ruff] # Linter tool
62 | extend-exclude = ["quadruped_pympc/config.py"]
63 | line-length = 120
64 | target-version = "py310"
65 | select = [
66 | # pyflakes
67 | "F",
68 | # pycodestyle
69 | "E",
70 | "W",
71 | # isort
72 | "I",
73 | # pydocstyle
74 | "D"
75 | ]
76 | ignore = [
77 | "D401", # good for methods but not for class docstrings
78 | "D405", # British-style section names are also "proper"!
79 | "E701", # short single-line statements are fine
80 | "D100",
81 | ]
82 |
83 | exclude = [
84 | ".bzr",
85 | ".direnv",
86 | ".eggs",
87 | ".git",
88 | ".idea",
89 | ".git-rewrite",
90 | ".hg",
91 | ".mypy_cache",
92 | ".nox",
93 | ".pants.d",
94 | ".pytype",
95 | ".ruff_cache",
96 | ".svn",
97 | ".tox",
98 | ".venv",
99 | "__pypackages__",
100 | "_build",
101 | "buck-out",
102 | "build",
103 | "dist",
104 | "node_modules",
105 | "venv",
106 | ]
107 |
108 | [tool.ruff.pydocstyle]
109 | convention = "google"
110 |
111 | [tool.ruff.lint]
112 | select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
113 | ignore-init-module-imports = true
114 |
115 | [tool.ruff.format]
116 | quote-style = "preserve" # Keep your preferred quotes
117 | skip-magic-trailing-comma = true # Prevents Ruff from enforcing trailing commas
--------------------------------------------------------------------------------
/quadruped_pympc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/config.py:
--------------------------------------------------------------------------------
1 | """This file includes all of the configuration parameters for the MPC controllers
2 | and of the internal simulations that can be launch from the folder /simulation.
3 | """
4 | import numpy as np
5 | from quadruped_pympc.helpers.quadruped_utils import GaitType
6 |
7 | # These are used both for a real experiment and a simulation -----------
8 | # These are the only attributes needed per quadruped, the rest can be computed automatically ----------------------
9 | robot = 'aliengo' # 'go1', 'go2', 'b2', 'aliengo', 'hyqreal', 'mini_cheetah' # TODO: Load from robot_descriptions.py
10 |
11 | from gym_quadruped.robot_cfgs import RobotConfig, get_robot_config
12 | robot_cfg: RobotConfig = get_robot_config(robot_name=robot)
13 | robot_leg_joints = robot_cfg.leg_joints
14 | robot_feet_geom_names = robot_cfg.feet_geom_names
15 | qpos0_js = robot_cfg.qpos0_js
16 | hip_height = robot_cfg.hip_height
17 |
18 | # ----------------------------------------------------------------------------------------------------------------
19 | if (robot == 'go1'):
20 | mass = 12.019
21 | inertia = np.array([[1.58460467e-01, 1.21660000e-04, -1.55444692e-02],
22 | [1.21660000e-04, 4.68645637e-01, -3.12000000e-05],
23 | [-1.55444692e-02, -3.12000000e-05, 5.24474661e-01]])
24 |
25 | elif (robot == 'go2'):
26 | mass = 15.019
27 | inertia = np.array([[1.58460467e-01, 1.21660000e-04, -1.55444692e-02],
28 | [1.21660000e-04, 4.68645637e-01, -3.12000000e-05],
29 | [-1.55444692e-02, -3.12000000e-05, 5.24474661e-01]])
30 |
31 | elif (robot == 'aliengo'):
32 | mass = 24.637
33 | inertia = np.array([[0.2310941359705289, -0.0014987128245817424, -0.021400468992761768],
34 | [-0.0014987128245817424, 1.4485084687476608, 0.0004641447134275615],
35 | [-0.021400468992761768, 0.0004641447134275615, 1.503217877350808]])
36 |
37 | elif (robot == 'b2'):
38 | mass = 83.49
39 | inertia = np.array([[0.2310941359705289, -0.0014987128245817424, -0.021400468992761768],
40 | [-0.0014987128245817424, 1.4485084687476608, 0.0004641447134275615],
41 | [-0.021400468992761768, 0.0004641447134275615, 1.503217877350808]])
42 |
43 |
44 | elif (robot == 'hyqreal'):
45 | mass = 108.40
46 | inertia = np.array([[4.55031444e+00, 2.75249434e-03, -5.11957307e-01],
47 | [2.75249434e-03, 2.02411774e+01, -7.38560592e-04],
48 | [-5.11957307e-01, -7.38560592e-04, 2.14269772e+01]])
49 |
50 | elif (robot == 'mini_cheetah'):
51 | mass = 12.5
52 | inertia = np.array([[1.58460467e-01, 1.21660000e-04, -1.55444692e-02],
53 | [1.21660000e-04, 4.68645637e-01, -3.12000000e-05],
54 | [-1.55444692e-02, -3.12000000e-05, 5.24474661e-01]])
55 |
56 |
57 | gravity_constant = 9.81 # Exposed in case of different gravity conditions
58 | # ----------------------------------------------------------------------------------------------------------------
59 |
60 | mpc_params = {
61 | # 'nominal' optimized directly the GRF
62 | # 'input_rates' optimizes the delta GRF
63 | # 'sampling' is a gpu-based mpc that samples the GRF
64 | # 'collaborative' optimized directly the GRF and has a passive arm model inside
65 | # 'lyapunov' optimized directly the GRF and has a Lyapunov-based stability constraint
66 | # 'kinodynamic' sbrd with joints - experimental
67 | 'type': 'nominal',
68 |
69 | # print the mpc info
70 | 'verbose': False,
71 |
72 | # horizon is the number of timesteps in the future that the mpc will optimize
73 | # dt is the discretization time used in the mpc
74 | 'horizon': 12,
75 | 'dt': 0.02,
76 |
77 | # GRF limits for each single leg
78 | "grf_max": mass * gravity_constant,
79 | "grf_min": 0,
80 | 'mu': 0.5,
81 |
82 | # this is used to have a smaller dt near the start of the horizon
83 | 'use_nonuniform_discretization': False,
84 | 'horizon_fine_grained': 2,
85 | 'dt_fine_grained': 0.01,
86 |
87 | # if this is true, we optimize the step frequency as well
88 | # for the sampling controller, this is done in the rollout
89 | # for the gradient-based controller, this is done with a batched version of the ocp
90 | 'optimize_step_freq': False,
91 | 'step_freq_available': [1.4, 2.0, 2.4],
92 |
93 | # ----- START properties only for the gradient-based mpc -----
94 |
95 | # this is used if you want to manually warm start the mpc
96 | 'use_warm_start': False,
97 |
98 | # this enables integrators for height, linear velocities, roll and pitch
99 | 'use_integrators': False,
100 | 'alpha_integrator': 0.1,
101 | 'integrator_cap': [0.5, 0.2, 0.2, 0.0, 0.0, 1.0],
102 |
103 | # if this is off, the mpc will not optimize the footholds and will
104 | # use only the ones provided in the reference
105 | 'use_foothold_optimization': True,
106 |
107 | # this is set to false automatically is use_foothold_optimization is false
108 | # because in that case we cannot chose the footholds and foothold
109 | # constraints do not any make sense
110 | 'use_foothold_constraints': False,
111 |
112 | # works with all the mpc types except 'sampling'. In sim does not do much for now,
113 | # but in real it minizimes the delay between the mpc control and the state
114 | 'use_RTI': False,
115 | # If RTI is used, we can set the advance RTI-step! (Standard is the simpler RTI)
116 | # See https://arxiv.org/pdf/2403.07101.pdf
117 | 'as_rti_type': "Standard", # "AS-RTI-A", "AS-RTI-B", "AS-RTI-C", "AS-RTI-D", "Standard"
118 | 'as_rti_iter': 1, # > 0, the higher the better, but slower computation!
119 |
120 | # This will force to use DDP instead of SQP, based on https://arxiv.org/abs/2403.10115.
121 | # Note that RTI is not compatible with DDP, and no state costraints for now are considered
122 | 'use_DDP': False,
123 |
124 | # this is used only in the case 'use_RTI' is false in a single mpc feedback loop.
125 | # More is better, but slower computation!
126 | 'num_qp_iterations': 1,
127 |
128 | # this is used to speeding up or robustify acados' solver (hpipm).
129 | 'solver_mode': 'balance', # balance, robust, speed, crazy_speed
130 |
131 |
132 | # these is used only for the case 'input_rates', using as GRF not the actual state
133 | # of the robot of the predicted one. Can be activated to compensate
134 | # for the delay in the control loop on the real robot
135 | 'use_input_prediction': False,
136 |
137 | # ONLY ONE CAN BE TRUE AT A TIME (only gradient)
138 | 'use_static_stability': False,
139 | 'use_zmp_stability': False,
140 | 'trot_stability_margin': 0.04,
141 | 'pace_stability_margin': 0.1,
142 | 'crawl_stability_margin': 0.04, # in general, 0.02 is a good value
143 |
144 | # this is used to compensate for the external wrenches
145 | # you should provide explicitly this value in compute_control
146 | 'external_wrenches_compensation': True,
147 | 'external_wrenches_compensation_num_step': 15,
148 |
149 | # this is used only in the case of collaborative mpc, to
150 | # compensate for the external wrench in the prediction (only collaborative)
151 | 'passive_arm_compensation': True,
152 |
153 |
154 | # Gain for Lyapunov-based MPC
155 | 'K_z1': np.array([1, 1, 10]),
156 | 'K_z2': np.array([1, 4, 10]),
157 | 'residual_dynamics_upper_bound': 30,
158 | 'use_residual_dynamics_decay': False,
159 |
160 | # ----- END properties for the gradient-based mpc -----
161 |
162 |
163 | # ----- START properties only for the sampling-based mpc -----
164 |
165 | # this is used only in the case 'sampling'.
166 | 'sampling_method': 'random_sampling', # 'random_sampling', 'mppi', 'cem_mppi'
167 | 'control_parametrization': 'cubic_spline', # 'cubic_spline', 'linear_spline', 'zero_order'
168 | 'num_splines': 2, # number of splines to use for the control parametrization
169 | 'num_parallel_computations': 10000, # More is better, but slower computation!
170 | 'num_sampling_iterations': 1, # More is better, but slower computation!
171 | 'device': 'gpu', # 'gpu', 'cpu'
172 | # convariances for the sampling methods
173 | 'sigma_cem_mppi': 3,
174 | 'sigma_mppi': 3,
175 | 'sigma_random_sampling': [0.2, 3, 10],
176 | 'shift_solution': False,
177 |
178 | # ----- END properties for the sampling-based mpc -----
179 | }
180 | # -----------------------------------------------------------------------
181 |
182 | simulation_params = {
183 | 'swing_generator': 'scipy', # 'scipy', 'explicit'
184 | 'swing_position_gain_fb': 500,
185 | 'swing_velocity_gain_fb': 10,
186 | 'impedence_joint_position_gain': 10.0,
187 | 'impedence_joint_velocity_gain': 2.0,
188 |
189 | 'step_height': 0.3 * hip_height,
190 |
191 | # Visual Foothold adapatation
192 | "visual_foothold_adaptation": 'blind', #'blind', 'height', 'vfa'
193 |
194 | # this is the integration time used in the simulator
195 | 'dt': 0.002,
196 |
197 | 'gait': 'trot', # 'trot', 'pace', 'crawl', 'bound', 'full_stance'
198 | 'gait_params': {'trot': {'step_freq': 1.4, 'duty_factor': 0.65, 'type': GaitType.TROT.value},
199 | 'crawl': {'step_freq': 0.5, 'duty_factor': 0.8, 'type': GaitType.BACKDIAGONALCRAWL.value},
200 | 'pace': {'step_freq': 1.4, 'duty_factor': 0.7, 'type': GaitType.PACE.value},
201 | 'bound': {'step_freq': 1.8, 'duty_factor': 0.65, 'type': GaitType.BOUNDING.value},
202 | 'full_stance': {'step_freq': 2, 'duty_factor': 0.65, 'type': GaitType.FULL_STANCE.value},
203 | },
204 |
205 | # This is used to activate or deactivate the reflexes upon contact detection
206 | 'reflex_trigger_mode': 'tracking', # 'tracking', 'geom_contact', False
207 | 'reflex_height_enhancement': False,
208 | 'velocity_modulator': True,
209 |
210 | # velocity mode: human will give you the possibility to use the keyboard, the other are
211 | # forward only random linear-velocity, random will give you random linear-velocity and yaw-velocity
212 | 'mode': 'human', # 'human', 'forward', 'random'
213 | 'ref_z': hip_height,
214 |
215 |
216 | # the MPC will be called every 1/(mpc_frequency*dt) timesteps
217 | # this helps to evaluate more realistically the performance of the controller
218 | 'mpc_frequency': 100,
219 |
220 | 'use_inertia_recomputation': True,
221 |
222 | 'scene': 'flat', # flat, random_boxes, random_pyramids, perlin
223 |
224 | }
225 | # -----------------------------------------------------------------------
226 |
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/controllers/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/gradient/collaborative/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/controllers/gradient/collaborative/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/gradient/input_rates/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/controllers/gradient/input_rates/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/gradient/nominal/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/controllers/gradient/nominal/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/gradient/nominal/centroidal_model_nominal.py:
--------------------------------------------------------------------------------
1 | # Description: This file contains the class Centroidal_Model that defines the
2 | # prediction model used by the MPC
3 |
4 | # Authors: Giulio Turrisi -
5 |
6 | import os
7 |
8 | import casadi as cs
9 | import numpy as np
10 | from acados_template import AcadosModel
11 |
12 | import quadruped_pympc.config as config
13 |
14 |
15 | # Class that defines the prediction model of the NMPC
16 | class Centroidal_Model_Nominal:
17 | def __init__(self) -> None:
18 | """
19 | This method initializes the foothold generator Centroidal_Model, which creates
20 | the prediction model of the NMPC.
21 | """
22 |
23 | # Define state and its casadi variables
24 | com_position_x = cs.SX.sym("com_position_x")
25 | com_position_y = cs.SX.sym("com_position_y")
26 | com_position_z = cs.SX.sym("com_position_z")
27 |
28 | com_velocity_x = cs.SX.sym("com_velocity_x")
29 | com_velocity_y = cs.SX.sym("com_velocity_y")
30 | com_velocity_z = cs.SX.sym("com_velocity_z")
31 |
32 | roll = cs.SX.sym("roll", 1, 1)
33 | pitch = cs.SX.sym("pitch", 1, 1)
34 | yaw = cs.SX.sym("yaw", 1, 1)
35 | omega_x = cs.SX.sym("omega_x", 1, 1)
36 | omega_y = cs.SX.sym("omega_y", 1, 1)
37 | omega_z = cs.SX.sym("omega_z", 1, 1)
38 |
39 | foot_position_fl = cs.SX.sym("foot_position_fl", 3, 1)
40 | foot_position_fr = cs.SX.sym("foot_position_fr", 3, 1)
41 | foot_position_rl = cs.SX.sym("foot_position_rl", 3, 1)
42 | foot_position_rr = cs.SX.sym("foot_position_rr", 3, 1)
43 |
44 | com_position_z_integral = cs.SX.sym("com_position_z_integral")
45 | com_velocity_x_integral = cs.SX.sym("com_velocity_x_integral")
46 | com_velocity_y_integral = cs.SX.sym("com_velocity_y_integral")
47 | com_velocity_z_integral = cs.SX.sym("com_velocity_z_integral")
48 | roll_integral = cs.SX.sym("roll_integral")
49 | pitch_integral = cs.SX.sym("pitch_integral")
50 | omega_x_integral = cs.SX.sym("omega_x_integral")
51 | omega_y_integral = cs.SX.sym("omega_y_integral")
52 | omega_z_integral = cs.SX.sym("omega_z_integral")
53 |
54 | self.states = cs.vertcat(
55 | com_position_x,
56 | com_position_y,
57 | com_position_z,
58 | com_velocity_x,
59 | com_velocity_y,
60 | com_velocity_z,
61 | roll,
62 | pitch,
63 | yaw,
64 | omega_x,
65 | omega_y,
66 | omega_z,
67 | foot_position_fl,
68 | foot_position_fr,
69 | foot_position_rl,
70 | foot_position_rr,
71 | com_position_z_integral,
72 | com_velocity_x_integral,
73 | com_velocity_y_integral,
74 | com_velocity_z_integral,
75 | roll_integral,
76 | pitch_integral,
77 | )
78 |
79 | # Define state dot
80 | self.states_dot = cs.vertcat(
81 | cs.SX.sym("linear_com_vel", 3, 1),
82 | cs.SX.sym("linear_com_acc", 3, 1),
83 | cs.SX.sym("euler_rates_base", 3, 1),
84 | cs.SX.sym("angular_acc_base", 3, 1),
85 | cs.SX.sym("linear_vel_foot_FL", 3, 1),
86 | cs.SX.sym("linear_vel_foot_FR", 3, 1),
87 | cs.SX.sym("linear_vel_foot_RL", 3, 1),
88 | cs.SX.sym("linear_vel_foot_RR", 3, 1),
89 | cs.SX.sym("linear_com_vel_z_integral", 1, 1),
90 | cs.SX.sym("linear_com_acc_integral", 3, 1),
91 | cs.SX.sym("euler_rates_roll_integral", 1, 1),
92 | cs.SX.sym("euler_rates_pitch_integral", 1, 1),
93 | )
94 |
95 | # Define input and its casadi variables
96 | foot_velocity_fl = cs.SX.sym("foot_velocity_fl", 3, 1)
97 | foot_velocity_fr = cs.SX.sym("foot_velocity_fr", 3, 1)
98 | foot_velocity_rl = cs.SX.sym("foot_velocity_rl", 3, 1)
99 | foot_velocity_rr = cs.SX.sym("foot_velocity_rr", 3, 1)
100 |
101 | foot_force_fl = cs.SX.sym("foot_force_fl", 3, 1)
102 | foot_force_fr = cs.SX.sym("foot_force_fr", 3, 1)
103 | foot_force_rl = cs.SX.sym("foot_force_rl", 3, 1)
104 | foot_force_rr = cs.SX.sym("foot_force_rr", 3, 1)
105 |
106 | self.inputs = cs.vertcat(
107 | foot_velocity_fl,
108 | foot_velocity_fr,
109 | foot_velocity_rl,
110 | foot_velocity_rr,
111 | foot_force_fl,
112 | foot_force_fr,
113 | foot_force_rl,
114 | foot_force_rr,
115 | )
116 |
117 | # Usefull for debug what things goes where in y_ref in the compute_control function,
118 | # because there are a lot of variables
119 | self.y_ref = cs.vertcat(self.states, self.inputs)
120 |
121 | # Define acados parameters that can be changed at runtine
122 | self.stanceFL = cs.SX.sym("stanceFL", 1, 1)
123 | self.stanceFR = cs.SX.sym("stanceFR", 1, 1)
124 | self.stanceRL = cs.SX.sym("stanceRL", 1, 1)
125 | self.stanceRR = cs.SX.sym("stanceRR", 1, 1)
126 | self.stance_param = cs.vertcat(self.stanceFL, self.stanceFR, self.stanceRL, self.stanceRR)
127 |
128 | self.mu_friction = cs.SX.sym("mu_friction", 1, 1)
129 | self.stance_proximity = cs.SX.sym("stanceProximity", 4, 1)
130 | self.base_position = cs.SX.sym("base_position", 3, 1)
131 | self.base_yaw = cs.SX.sym("base_yaw", 1, 1)
132 |
133 | self.external_wrench = cs.SX.sym("external_wrench", 6, 1)
134 |
135 | self.inertia = cs.SX.sym("inertia", 9, 1)
136 | self.mass = cs.SX.sym("mass", 1, 1)
137 |
138 | self.gravity_constant = config.gravity_constant
139 |
140 | # Not so useful, i can instantiate a casadi function for the fd
141 | param = cs.vertcat(
142 | self.stance_param,
143 | self.mu_friction,
144 | self.stance_proximity,
145 | self.base_position,
146 | self.base_yaw,
147 | self.external_wrench,
148 | self.inertia,
149 | self.mass,
150 | )
151 | fd = self.forward_dynamics(self.states, self.inputs, param)
152 | self.fun_forward_dynamics = cs.Function("fun_forward_dynamics", [self.states, self.inputs, param], [fd])
153 |
154 | def forward_dynamics(self, states: np.ndarray, inputs: np.ndarray, param: np.ndarray) -> cs.SX:
155 | """
156 | This method computes the symbolic forward dynamics of the robot. It is used inside
157 | Acados to compute the prediction model. It fill the same variables as the one in
158 | self.states_dot.
159 |
160 | Args:
161 | states: A numpy array of shape (29,) representing the current state of the robot.
162 | inputs: A numpy array of shape (29,) representing the inputs to the robot.
163 | param: A numpy array of shape (4,) representing the parameters (contact status) of the robot.
164 |
165 | Returns:
166 | A CasADi SX object of shape (29,) representing the predicted state of the robot.
167 | """
168 |
169 | # Saving for clarity a bunch of variables
170 | foot_velocity_fl = inputs[0:3]
171 | foot_velocity_fr = inputs[3:6]
172 | foot_velocity_rl = inputs[6:9]
173 | foot_velocity_rr = inputs[9:12]
174 | foot_force_fl = inputs[12:15]
175 | foot_force_fr = inputs[15:18]
176 | foot_force_rl = inputs[18:21]
177 | foot_force_rr = inputs[21:24]
178 |
179 | com_position = states[0:3]
180 | foot_position_fl = states[12:15]
181 | foot_position_fr = states[15:18]
182 | foot_position_rl = states[18:21]
183 | foot_position_rr = states[21:24]
184 |
185 | stanceFL = param[0]
186 | stanceFR = param[1]
187 | stanceRL = param[2]
188 | stanceRR = param[3]
189 | stance_proximity_FL = param[5]
190 | stance_proximity_FR = param[6]
191 | stance_proximity_RL = param[7]
192 | stance_proximity_RR = param[8]
193 |
194 | external_wrench_linear = param[13:16]
195 | external_wrench_angular = param[16:19]
196 |
197 | inertia = param[19:28]
198 | inertia = inertia.reshape((3, 3))
199 | mass = param[28]
200 |
201 | # FINAL linear_com_vel STATE (1)
202 | linear_com_vel = states[3:6]
203 |
204 | # FINAL linear_com_acc STATE (2)
205 | temp = foot_force_fl @ stanceFL
206 | temp += foot_force_fr @ stanceFR
207 | temp += foot_force_rl @ stanceRL
208 | temp += foot_force_rr @ stanceRR
209 | temp += external_wrench_linear
210 | gravity = np.array([0, 0, -self.gravity_constant])
211 | linear_com_acc = (1 / mass) @ temp + gravity
212 |
213 | # Start to write the component of euler_rates_base and angular_acc_base STATES
214 | w = states[9:12]
215 | roll = states[6]
216 | pitch = states[7]
217 | yaw = states[8]
218 |
219 | conj_euler_rates = cs.SX.eye(3)
220 | conj_euler_rates[1, 1] = cs.cos(roll)
221 | conj_euler_rates[2, 2] = cs.cos(pitch) * cs.cos(roll)
222 | conj_euler_rates[2, 1] = -cs.sin(roll)
223 | conj_euler_rates[0, 2] = -cs.sin(pitch)
224 | conj_euler_rates[1, 2] = cs.cos(pitch) * cs.sin(roll)
225 |
226 | temp2 = cs.skew(foot_position_fl - com_position) @ foot_force_fl @ stanceFL
227 | temp2 += cs.skew(foot_position_fr - com_position) @ foot_force_fr @ stanceFR
228 | temp2 += cs.skew(foot_position_rl - com_position) @ foot_force_rl @ stanceRL
229 | temp2 += cs.skew(foot_position_rr - com_position) @ foot_force_rr @ stanceRR
230 |
231 | # FINAL euler_rates_base STATE (3)
232 | euler_rates_base = cs.inv(conj_euler_rates) @ w
233 |
234 | # FINAL angular_acc_base STATE (4)
235 | # Z Y X rotations!
236 | Rx = cs.SX.eye(3)
237 | Rx[0, 0] = 1
238 | Rx[0, 1] = 0
239 | Rx[0, 2] = 0
240 | Rx[1, 0] = 0
241 | Rx[1, 1] = cs.cos(roll)
242 | Rx[1, 2] = cs.sin(roll)
243 | Rx[2, 0] = 0
244 | Rx[2, 1] = -cs.sin(roll)
245 | Rx[2, 2] = cs.cos(roll)
246 |
247 | Ry = cs.SX.eye(3)
248 | Ry[0, 0] = cs.cos(pitch)
249 | Ry[0, 1] = 0
250 | Ry[0, 2] = -cs.sin(pitch)
251 | Ry[1, 0] = 0
252 | Ry[1, 1] = 1
253 | Ry[1, 2] = 0
254 | Ry[2, 0] = cs.sin(pitch)
255 | Ry[2, 1] = 0
256 | Ry[2, 2] = cs.cos(pitch)
257 |
258 | Rz = cs.SX.eye(3)
259 | Rz[0, 0] = cs.cos(yaw)
260 | Rz[0, 1] = cs.sin(yaw)
261 | Rz[0, 2] = 0
262 | Rz[1, 0] = -cs.sin(yaw)
263 | Rz[1, 1] = cs.cos(yaw)
264 | Rz[1, 2] = 0
265 | Rz[2, 0] = 0
266 | Rz[2, 1] = 0
267 | Rz[2, 2] = 1
268 |
269 | b_R_w = Rx @ Ry @ Rz
270 |
271 | temp2 = temp2 + external_wrench_angular
272 | angular_acc_base = cs.inv(inertia) @ (b_R_w @ temp2 - cs.skew(w) @ inertia @ w)
273 |
274 | # angular_acc_base = -cs.inv(self.inertia)@cs.skew(w)@self.inertia@w + cs.inv(self.inertia)@b_R_w@temp2
275 | # angular_acc_base = -cs.inv(self.inertia)@cs.skew(w)@self.inertia@w + cs.inv(self.inertia)@b_R_w@temp2 + external_wrench_angular
276 |
277 | # FINAL linear_foot_vel STATES (5,6,7,8)
278 | if not config.mpc_params["use_foothold_optimization"]:
279 | foot_velocity_fl = foot_velocity_fl @ 0.0
280 | foot_velocity_fr = foot_velocity_fr @ 0.0
281 | foot_velocity_rl = foot_velocity_rl @ 0.0
282 | foot_velocity_rr = foot_velocity_rr @ 0.0
283 | linear_foot_vel_FL = foot_velocity_fl @ (1 - stanceFL) @ (1 - stance_proximity_FL)
284 | linear_foot_vel_FR = foot_velocity_fr @ (1 - stanceFR) @ (1 - stance_proximity_FR)
285 | linear_foot_vel_RL = foot_velocity_rl @ (1 - stanceRL) @ (1 - stance_proximity_RL)
286 | linear_foot_vel_RR = foot_velocity_rr @ (1 - stanceRR) @ (1 - stance_proximity_RR)
287 |
288 | # Integral states
289 | integral_states = states[24:]
290 | integral_states[0] += states[2]
291 | integral_states[1] += states[3]
292 | integral_states[2] += states[4]
293 | integral_states[3] += states[5]
294 | integral_states[4] += roll
295 | integral_states[5] += pitch
296 |
297 | # The order of the return should be equal to the order of the states_dot
298 | return cs.vertcat(
299 | linear_com_vel,
300 | linear_com_acc,
301 | euler_rates_base,
302 | angular_acc_base,
303 | linear_foot_vel_FL,
304 | linear_foot_vel_FR,
305 | linear_foot_vel_RL,
306 | linear_foot_vel_RR,
307 | integral_states,
308 | )
309 |
310 | def export_robot_model(self) -> AcadosModel:
311 | """
312 | This method set some general properties of the NMPC, such as the params,
313 | prediction mode, etc...! It will be called in centroidal_nmpc.py
314 | """
315 |
316 | # dynamics
317 | self.param = cs.vertcat(
318 | self.stance_param,
319 | self.mu_friction,
320 | self.stance_proximity,
321 | self.base_position,
322 | self.base_yaw,
323 | self.external_wrench,
324 | self.inertia,
325 | self.mass,
326 | )
327 | f_expl = self.forward_dynamics(self.states, self.inputs, self.param)
328 | f_impl = self.states_dot - f_expl
329 |
330 | acados_model = AcadosModel()
331 | acados_model.f_impl_expr = f_impl
332 | acados_model.f_expl_expr = f_expl
333 | acados_model.x = self.states
334 | acados_model.xdot = self.states_dot
335 | acados_model.u = self.inputs
336 | acados_model.p = self.param
337 | acados_model.name = "centroidal_model"
338 |
339 | return acados_model
340 |
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/sampling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/controllers/sampling/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/controllers/sampling/centroidal_model_jax.py:
--------------------------------------------------------------------------------
1 | # Description: This file contains the class Centroidal_Model that defines the
2 | # prediction model used by the MPC
3 |
4 | # Authors: Giulio Turrisi -
5 |
6 | import time
7 |
8 | import jax
9 | import jax.numpy as jnp
10 | from jax import jit, random
11 |
12 | from quadruped_pympc import config
13 |
14 | dtype_general = 'float32'
15 | jax.config.update("jax_default_matmul_precision", "float32")
16 |
17 |
18 | # Class that defines the prediction model of the NMPC
19 | class Centroidal_Model_JAX:
20 | def __init__(self, dt, device) -> None:
21 | """
22 | This method initializes the foothold generator Centroidal_Model, which creates
23 | the prediction model of the NMPC.
24 | """
25 |
26 | self.dt = dt
27 |
28 | if device == "gpu":
29 | try:
30 | self.device = jax.devices("gpu")[0]
31 | except:
32 | self.device = jax.devices("cpu")[0]
33 | print("GPU not available, using CPU")
34 | else:
35 | self.device = jax.devices("cpu")[0]
36 |
37 | # Mass and Inertia robot dependant
38 | self.mass = config.mass
39 | self.inertia = jnp.array(config.inertia)
40 |
41 | # Nonuniform discretization
42 | if config.mpc_params['use_nonuniform_discretization']:
43 | time_steps_fine_grained = jnp.tile(
44 | config.mpc_params['dt_fine_grained'], config.mpc_params['horizon_fine_grained']
45 | )
46 | self.dts = jnp.concatenate(
47 | (
48 | time_steps_fine_grained,
49 | jnp.tile(self.dt, config.mpc_params['horizon'] - config.mpc_params['horizon_fine_grained']),
50 | )
51 | )
52 | else:
53 | self.dts = jnp.tile(self.dt, config.mpc_params["horizon"])
54 |
55 | # We precompute the inverse of the inertia
56 | self.inertia_inv = self.calculate_inverse(self.inertia)
57 |
58 | # State and input dimensions
59 | self.state_dim = 12
60 | self.input_dim = 24
61 |
62 | # this is useful to manage the fact that df depends on self.mass and self.inertia which jit does not really like
63 | # by compiling it here we ensure that the parameters will always stay the same which is what jit likes
64 | vectorized_integrate_jax = jax.vmap(self.integrate_jax, in_axes=(None, 0, None), out_axes=0)
65 | self.compiled_integrate_jax = jit(vectorized_integrate_jax, device=self.device)
66 |
67 | def calculate_inverse(self, A):
68 | a11 = A[0, 0]
69 | a12 = A[0, 1]
70 | a13 = A[0, 2]
71 | a21 = A[1, 0]
72 | a22 = A[1, 1]
73 | a23 = A[1, 2]
74 | a31 = A[2, 0]
75 | a32 = A[2, 1]
76 | a33 = A[2, 2]
77 |
78 | # Calculate the determinant DET of A
79 | DET = a11 * (a33 * a22 - a32 * a23) - a21 * (a33 * a12 - a32 * a13) + a31 * (a23 * a12 - a22 * a13)
80 |
81 | # Calculate the inverse of A
82 | return (
83 | jnp.array(
84 | [
85 | [(a33 * a22 - a32 * a23), -(a33 * a12 - a32 * a13), (a23 * a12 - a22 * a13)],
86 | [-(a33 * a21 - a31 * a23), (a33 * a11 - a31 * a13), -(a23 * a11 - a21 * a13)],
87 | [(a32 * a21 - a31 * a22), -(a32 * a11 - a31 * a12), (a22 * a11 - a21 * a12)],
88 | ]
89 | )
90 | / DET
91 | )
92 |
93 | def fd(self, states: jnp.ndarray, inputs: jnp.ndarray, contact_status: jnp.ndarray):
94 | """
95 | This method computes the state derivative of the system.
96 | """
97 |
98 | def skew(v):
99 | return jnp.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
100 |
101 | # Extracting variables for clarity
102 | foot_position_fl, foot_position_fr, foot_position_rl, foot_position_rr = jnp.split(states[12:], 4)
103 | foot_force_fl, foot_force_fr, foot_force_rl, foot_force_rr = jnp.split(inputs[12:], 4)
104 | com_position = states[:3]
105 | stanceFL, stanceFR, stanceRL, stanceRR = contact_status[:4]
106 |
107 | # Compute linear_com_vel
108 | linear_com_vel = states[3:6]
109 |
110 | # Compute linear_com_acc
111 | temp = (
112 | jnp.dot(foot_force_fl, stanceFL)
113 | + jnp.dot(foot_force_fr, stanceFR)
114 | + jnp.dot(foot_force_rl, stanceRL)
115 | + jnp.dot(foot_force_rr, stanceRR)
116 | )
117 | gravity = jnp.array([jnp.float32(0), jnp.float32(0), jnp.float32(-9.81)])
118 | linear_com_acc = jnp.dot(jnp.float32(1) / self.mass, temp) + gravity
119 |
120 | # Compute euler_rates_base and angular_acc_base
121 | w = states[9:12]
122 | roll, pitch, yaw = states[6:9]
123 |
124 | conj_euler_rates = jnp.array(
125 | [
126 | [jnp.float32(1), jnp.float32(0), -jnp.sin(pitch)],
127 | [jnp.float32(0), jnp.cos(roll), jnp.cos(pitch) * jnp.sin(roll)],
128 | [jnp.float32(0), -jnp.sin(roll), jnp.cos(pitch) * jnp.cos(roll)],
129 | ]
130 | )
131 |
132 | temp2 = jnp.dot(skew(foot_position_fl - com_position), foot_force_fl) * stanceFL
133 | temp2 += jnp.dot(skew(foot_position_fr - com_position), foot_force_fr) * stanceFR
134 | temp2 += jnp.dot(skew(foot_position_rl - com_position), foot_force_rl) * stanceRL
135 | temp2 += jnp.dot(skew(foot_position_rr - com_position), foot_force_rr) * stanceRR
136 |
137 | euler_rates_base = jnp.dot(self.calculate_inverse(conj_euler_rates), w)
138 |
139 | # FINAL angular_acc_base STATE (4)
140 | # Z Y X rotations!
141 | b_R_w = jnp.array(
142 | [
143 | [jnp.cos(pitch) * jnp.cos(yaw), jnp.cos(pitch) * jnp.sin(yaw), -jnp.sin(pitch)],
144 | [
145 | jnp.sin(roll) * jnp.sin(pitch) * jnp.cos(yaw) - jnp.cos(roll) * jnp.sin(yaw),
146 | jnp.sin(roll) * jnp.sin(pitch) * jnp.sin(yaw) + jnp.cos(roll) * jnp.cos(yaw),
147 | jnp.sin(roll) * jnp.cos(pitch),
148 | ],
149 | [
150 | jnp.cos(roll) * jnp.sin(pitch) * jnp.cos(yaw) + jnp.sin(roll) * jnp.sin(yaw),
151 | jnp.cos(roll) * jnp.sin(pitch) * jnp.sin(yaw) - jnp.sin(roll) * jnp.cos(yaw),
152 | jnp.cos(roll) * jnp.cos(pitch),
153 | ],
154 | ]
155 | )
156 |
157 | angular_acc_base = -jnp.dot(self.inertia_inv, jnp.dot(skew(w), jnp.dot(self.inertia, w))) + jnp.dot(
158 | self.inertia_inv, jnp.dot(b_R_w, temp2)
159 | )
160 |
161 | # Returning the results
162 | return jnp.concatenate([linear_com_vel, linear_com_acc, euler_rates_base, angular_acc_base])
163 |
164 | def integrate_jax(self, state, inputs, contact_status, n):
165 | """
166 | This method computes the forward evolution of the system.
167 | """
168 | fd = self.fd(state, inputs, contact_status)
169 |
170 | # Simple euler!
171 | dt = self.dts[n]
172 | new_state = state[0:12] + fd * dt
173 |
174 | return jnp.concatenate([new_state, state[12:]])
175 |
176 |
177 | if __name__ == "__main__":
178 | model_jax = Centroidal_Model_JAX(dt=jnp.float32(0.04), device="gpu")
179 |
180 | # all stance, friction, stance proximity
181 | param = jnp.array([1.0, 1.0, 1.0, 1.0, 0.6, 0.0, 0.0, 0.0, 0.0], dtype=dtype_general)
182 |
183 | state = jnp.array(
184 | [
185 | 0.0,
186 | 0.0,
187 | 0.0, # com position
188 | 0.0,
189 | 0.0,
190 | 0.0, # com velocity
191 | 0.0,
192 | 0.0,
193 | 0.0, # euler angles
194 | 0.0,
195 | 0.0,
196 | 0.0,
197 | ],
198 | dtype=dtype_general,
199 | ) # euler rates
200 |
201 | input = jnp.array(
202 | [
203 | 0.0,
204 | 0.0,
205 | 0.0, # foot position fl
206 | 0.0,
207 | 0.0,
208 | 0.0, # foot position fr
209 | 0.0,
210 | 0.0,
211 | 0.0, # foot position rl
212 | 0.0,
213 | 0.0,
214 | 0.0, # foot position rr
215 | 0.0,
216 | 0.0,
217 | 0.0, # foot force fl
218 | 0.0,
219 | 0.0,
220 | 0.0, # foot force fr
221 | 0.0,
222 | 0.0,
223 | 0.0, # foot force rl
224 | 0.0,
225 | 0.0,
226 | 0.0,
227 | ],
228 | dtype=dtype_general,
229 | ) # foot force rr
230 |
231 | # test fd
232 | acc = model_jax.fd(state, input, param)
233 |
234 | # test integrated
235 | print("testing SINGLE integration PYTHON")
236 | time_start = time.time()
237 | state_integrated = model_jax.integrate_jax(state, input, param)
238 | print("computation time: ", time.time() - time_start)
239 |
240 | # test compiled integrated
241 | print("\ntesting SINGLE integration COMPILED-FIRST TIME")
242 | compiled_integrated_jax_single = jit(model_jax.integrate_jax, device=model_jax.device)
243 | time_start = time.time()
244 | state_integrated = compiled_integrated_jax_single(state, input, param).block_until_ready()
245 | print("computation time: ", time.time() - time_start)
246 |
247 | print("\ntesting SINGLE integration COMPILED-SECOND TIME")
248 | time_start = time.time()
249 | state_integrated = compiled_integrated_jax_single(state, input, param).block_until_ready()
250 | print("computation time: ", time.time() - time_start)
251 |
252 | print("\ntesting MULTIPLE integration COMPILED-FIRST TIME")
253 | threads = 10000
254 |
255 | key = random.PRNGKey(42)
256 | input_vec = random.randint(key, (model_jax.input_dim * threads,), minval=-2, maxval=2).reshape(
257 | threads, model_jax.input_dim
258 | )
259 |
260 | time_start = time.time()
261 | model_jax.compiled_integrate_jax(state, input_vec, param).block_until_ready()
262 | print("computation time: ", time.time() - time_start)
263 |
264 | print("\ntesting MULTIPLE integration SECOND TIME")
265 | time_start = time.time()
266 | key = random.PRNGKey(50)
267 | input_vec = random.randint(key, (model_jax.input_dim * threads,), minval=-2, maxval=2).reshape(
268 | threads, model_jax.input_dim
269 | )
270 | time_start = time.time()
271 | model_jax.compiled_integrate_jax(state, input_vec, param).block_until_ready()
272 | print("computation time: ", time.time() - time_start)
273 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/helpers/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/early_stance_detector.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from gym_quadruped.utils.quadruped_utils import LegsAttr
3 |
4 | from quadruped_pympc import config as cfg
5 |
6 | class EarlyStanceDetector:
7 | def __init__(self, feet_geom_id : LegsAttr = None,
8 | legs_order: tuple[str, str, str, str] = ('FL', 'FR', 'RL', 'RR')
9 | ):
10 | self.legs_order = legs_order
11 | self.feet_geom_id = feet_geom_id
12 | self.early_stance = LegsAttr(FL=False, FR=False, RR=False, RL=False )
13 | self.hitmoments = LegsAttr(FL=-1.0, FR=-1.0, RR=-1.0, RL=-1.0) # swing time of the last contact moment
14 | self.hitpoints = LegsAttr(FL=None, FR=None, RR=None, RL=None)
15 |
16 |
17 | if(cfg.mpc_params['type'] == 'sampling'):
18 | self.activated = False # TO FIX
19 |
20 | self.trigger_mode = cfg.simulation_params['reflex_trigger_mode']
21 | if(self.trigger_mode == False):
22 | self.activated = False
23 | else:
24 | self.activated = True
25 |
26 |
27 | self.early_stance_time_threshold = 0.07
28 | self.relative_tracking_error_threshold = 0.5
29 | self.absolute_min_distance_error_threshold = 0.1
30 |
31 | self.gait_cycles_after_step_height_enanchement = -1
32 | self.use_height_enhancement = cfg.simulation_params['reflex_height_enhancement']
33 | self.max_gait_cycles_height_enhancement = 6
34 |
35 |
36 | def update_detection(self, feet_pos: LegsAttr, des_feet_pos: LegsAttr,
37 | lift_off: LegsAttr, touch_down: LegsAttr,
38 | swing_time: list, swing_period: float,
39 | current_contact, previous_contact, mujoco_contact,
40 | stc):
41 | """
42 | Update the early stance detector.
43 |
44 | Parameters:
45 | feet_pos : Current feet positions.
46 | des_feet_pos : Desired feet positions.
47 | lift_off_positions : Lift off positions of all legs.
48 | touch_down_positions : Touch down positions of all legs.
49 | swing_time : Time of the current swing phase.
50 | swing_period : Duration of the swing phase.
51 | current_contact : Current contact state of the legs.
52 | """
53 | if not self.activated:
54 | for leg_id,leg_name in enumerate(self.legs_order):
55 | self.early_stance[leg_name] = False
56 | self.hitmoments[leg_name] = -1.0
57 | self.hitpoints[leg_name] = None
58 | else:
59 | if self.trigger_mode == 'tracking':
60 | for leg_id,leg_name in enumerate(self.legs_order):
61 | disp = touch_down[leg_name] - lift_off[leg_name]
62 | #if swing_time[leg_id] < EARLY_STANCE_TIME_THRESHOLD or swing_time[leg_id] > swing_period - EARLY_STANCE_TIME_THRESHOLD:
63 | if current_contact[leg_id] == 1:
64 | self.early_stance[leg_name] = False # reset early stance in "planned" stance
65 | continue
66 | elif self.early_stance[leg_name] == False and swing_time[leg_id] > swing_period - self.early_stance_time_threshold:
67 | self.early_stance[leg_name] = False # avoid early stance in the last 10% of the swing phase
68 | continue
69 | else:
70 | local_disp = (des_feet_pos[leg_name] - feet_pos[leg_name]).squeeze()
71 | #print(f"Leg {leg_name} local_disp: {np.linalg.norm(local_disp)} disp: {np.linalg.norm(disp)}")
72 | #print("norm(local_disp)/norm(disp): ", np.linalg.norm(local_disp)/np.linalg.norm(disp))
73 | if self.early_stance[leg_name] == False:
74 | #if np.arccos(np.dot(disp, local_disp) / (np.linalg.norm(disp) * np.linalg.norm(local_disp))) < np.pi/3:
75 | if (np.linalg.norm(local_disp)/np.linalg.norm(disp)) > self.relative_tracking_error_threshold and np.linalg.norm(local_disp) > self.absolute_min_distance_error_threshold:
76 | self.hitpoints[leg_name] = feet_pos[leg_name].copy()
77 | self.hitmoments[leg_name] = swing_time[leg_id]
78 | self.early_stance[leg_name] = True # acos( disp dot local_disp / |disp| |local_disp|) < 60°
79 | self.gait_cycles_after_step_height_enanchement = 0
80 | break
81 | else:
82 | self.early_stance[leg_name] = False
83 | self.hitmoments[leg_name] = -1.0
84 | self.hitpoints[leg_name] = None
85 |
86 | if self.early_stance[leg_name] == False:
87 | self.hitmoments[leg_name] = -1.0
88 | self.hitpoints[leg_name] = None
89 |
90 | elif self.trigger_mode == 'geom_contact':
91 | self.contact = mujoco_contact
92 | for leg_id,leg_name in enumerate(self.legs_order):
93 | contact_points = self.contact_points(leg_name)
94 | disp = touch_down[leg_name] - lift_off[leg_name]
95 | for contact_point in contact_points:
96 | # if np.linalg.norm(contact_point - touch_down[leg_name]) < EARLY_STANCE_THRESHOLD or np.linalg.norm(contact_point - lift_off[leg_name]) < EARLY_STANCE_THRESHOLD:
97 | if swing_time[leg_id] < self.early_stance_time_threshold or swing_time[leg_id] > swing_period - self.early_stance_time_threshold:
98 | self.early_stance[leg_name] = False # reset early stance if contact point is close to touch down position or lift off position
99 | break
100 | else:
101 | local_disp = (contact_point - feet_pos[leg_name]).squeeze()
102 | if self.early_stance[leg_name] == False:
103 | if np.arccos(np.dot(disp, local_disp) / (np.linalg.norm(disp) * np.linalg.norm(local_disp))) < np.pi/3:
104 | self.hitpoints[leg_name] = contact_point.copy()
105 | self.hitmoments[leg_name] = swing_time[leg_id]
106 | self.early_stance[leg_name] = True # acos( disp dot local_disp / |disp| |local_disp|) < 60°
107 | break
108 | else:
109 | self.early_stance[leg_name] = False
110 | self.hitmoments[leg_name] = -1.0
111 | self.hitpoints[leg_name] = None
112 | if self.early_stance[leg_name] != True:
113 | self.hitmoments[leg_name] = -1.0
114 | self.hitpoints[leg_name] = None
115 |
116 |
117 | if(self.use_height_enhancement):
118 | for leg_id,leg_name in enumerate(self.legs_order):
119 | if current_contact[leg_id] == 1 and previous_contact[leg_id] == 0 and self.gait_cycles_after_step_height_enanchement >= 0:
120 | self.gait_cycles_after_step_height_enanchement += 1
121 |
122 | break
123 |
124 | if(self.gait_cycles_after_step_height_enanchement >= 0 and self.gait_cycles_after_step_height_enanchement < self.max_gait_cycles_height_enhancement):
125 | stc.swing_generator.step_height_enhancement = True
126 | else:
127 | stc.swing_generator.step_height_enhancement = False
128 | self.gait_cycles_after_step_height_enanchement = -1
129 |
130 |
131 | def contact_points(self, leg_name):
132 | """
133 | Get the contact points of a leg. Return None if the leg is not in contact.
134 |
135 | Parameters:
136 | leg_name : str Name of the leg.
137 |
138 | Returns:
139 | contact_points : list Contact points of the leg.
140 | """
141 | contact_points = []
142 | contact_id = np.where(np.any(self.contact.geom == self.feet_geom_id[leg_name],axis=1))
143 | # check contact_id is not empty
144 | if contact_id[0].size > 0:
145 | for i in range(contact_id[0].size):
146 | contact_points.append(self.contact.pos[contact_id[0][i]])
147 | return contact_points
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/foothold_reference_generator.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import copy
3 |
4 | import mujoco
5 | import numpy as np
6 | from gym_quadruped.utils.quadruped_utils import LegsAttr
7 | from scipy.spatial.transform import Rotation
8 |
9 | from quadruped_pympc.helpers.quadruped_utils import GaitType
10 | from quadruped_pympc import config as cfg
11 |
12 | # Class for the generation of the reference footholds
13 | # TODO: @Giulio Should we convert this to a single function instead of a class? Stance time, can be passed as argument
14 | class FootholdReferenceGenerator:
15 | def __init__(
16 | self, stance_time: float, lift_off_positions: LegsAttr, vel_moving_average_length=20, hip_height: float = None
17 | ) -> None:
18 | """This method initializes the foothold generator class, which computes
19 | the reference foothold for the nonlinear MPC.
20 |
21 | Args:
22 | ----
23 | stance_time: The user-defined time of the stance phase.
24 | """
25 | self.base_vel_hist = collections.deque(maxlen=vel_moving_average_length)
26 | self.stance_time = stance_time
27 | self.hip_height = hip_height
28 | self.lift_off_positions = copy.deepcopy(lift_off_positions)
29 | self.touch_down_positions = copy.deepcopy(lift_off_positions)
30 |
31 | # The offset of the COM position wrt the estimated one. HACK compensation
32 | self.com_pos_offset_b = np.zeros((3,))
33 | self.com_pos_offset_w = np.zeros((3,))
34 |
35 | """R_W2H = np.array([np.cos(yaw), np.sin(yaw),
36 | -np.sin(yaw), np.cos(yaw)])
37 | R_W2H = R_W2H.reshape((2, 2))
38 | self.lift_off_positions_h = R_W2H @ (self.lift_off_positions - base_position[0:2])"""
39 | self.lift_off_positions_h = copy.deepcopy(lift_off_positions) # TODO wrong
40 | self.touch_down_positions_h = copy.deepcopy(lift_off_positions) # TODO wrong
41 |
42 | # The footholds are wrt the hip position, so if we want to change
43 | # the default foothold, we need to use a variable to add an offset
44 | self.hip_offset = 0.1
45 |
46 | # Placeholder for the last reference footholds before modification from height/vfa
47 | self.last_reference_footholds = LegsAttr(
48 | FL=np.array([0, 0, 0]), FR=np.array([0, 0, 0]), RL=np.array([0, 0, 0]), RR=np.array([0, 0, 0])
49 | )
50 |
51 | self.gravity_constant = cfg.gravity_constant
52 |
53 | def compute_footholds_reference(
54 | self,
55 | base_position: np.ndarray,
56 | base_ori_euler_xyz: np.ndarray,
57 | base_xy_lin_vel: np.ndarray,
58 | ref_base_xy_lin_vel: np.ndarray,
59 | hips_position: LegsAttr,
60 | com_height_nominal: np.float32,
61 | ) -> LegsAttr:
62 | """Compute the reference footholds for a quadruped robot, using simple geometric heuristics.
63 |
64 | TODO: This function should be adapted to:
65 | 1. Use the terrain slope estimation
66 | 2. Use the desired base angular (yaw_dot) velocity to compensate for the error in the velocity.
67 | Similar to the linear velocity compensation.
68 |
69 | TODO: This function should be vectorized so operations are not done for each leg separately.
70 |
71 | Args:
72 | ----
73 | base_position: (3,) The position of the center of mass of the robot.
74 | base_ori_euler_xyz: (3,) The orientation of the base in euler angles.
75 | base_xy_lin_vel: (2,) The [x,y] linear velocity of the base in world frame.
76 | ref_base_xy_lin_vel: (2,) The desired [x,y] linear velocity of the base in world frame.
77 | hips_position: (LegsAttr) The position of the hips of the robot in world frame.
78 |
79 | Returns:
80 | -------
81 | ref_feet: (LegsAttr) The reference footholds for the robot in world frame.
82 | """
83 | assert base_xy_lin_vel.shape == (2,) and ref_base_xy_lin_vel.shape == (2,), (
84 | f"Expected shape (2,):=[x_dot, y_dot], got {base_xy_lin_vel.shape} and {ref_base_xy_lin_vel.shape}."
85 | )
86 |
87 | # Get the rotation matrix to transform from world to horizontal frame (hip-centric)
88 | yaw = base_ori_euler_xyz[2]
89 | R_W2H = np.array([np.cos(yaw), np.sin(yaw), -np.sin(yaw), np.cos(yaw)])
90 | R_W2H = R_W2H.reshape((2, 2))
91 |
92 | # Compute desired and error velocity compensation values for all legs
93 | base_lin_vel_H = R_W2H @ base_xy_lin_vel
94 | ref_base_lin_vel_H = R_W2H @ ref_base_xy_lin_vel
95 |
96 | # Moving average of the base velocity
97 | self.base_vel_hist.append(base_lin_vel_H)
98 | base_vel_mvg = np.mean(list(self.base_vel_hist), axis=0)
99 | # Compensation due to average velocity
100 | # delta_ref_H = (self.stance_time / 2.) * base_vel_mvg
101 |
102 | # Compensation due to desired velocity
103 | delta_ref_H = (self.stance_time / 2.0) * ref_base_lin_vel_H
104 | delta_ref_H = np.clip(delta_ref_H, -self.hip_height * 1.5, self.hip_height * 1.5)
105 | vel_offset = np.concatenate((delta_ref_H, np.zeros(1)))
106 |
107 | # Compensation for the error in velocity tracking
108 | error_compensation = np.sqrt(com_height_nominal / self.gravity_constant) * (base_vel_mvg - ref_base_lin_vel_H)
109 | error_compensation = np.where(error_compensation > 0.05, 0.05, error_compensation)
110 | error_compensation = np.where(error_compensation < -0.05, -0.05, error_compensation)
111 | error_compensation = np.concatenate((error_compensation, np.zeros(1)))
112 |
113 | # Reference footholds in the horizontal frame
114 | ref_feet = LegsAttr(*[np.zeros(3) for _ in range(4)])
115 |
116 | # Reference feet positions are computed from the hips x,y position in the hip-centric/Horizontal frame
117 | ref_feet.FL[0:2] = R_W2H @ (hips_position.FL[0:2] - base_position[0:2])
118 | ref_feet.FR[0:2] = R_W2H @ (hips_position.FR[0:2] - base_position[0:2])
119 | ref_feet.RL[0:2] = R_W2H @ (hips_position.RL[0:2] - base_position[0:2])
120 | ref_feet.RR[0:2] = R_W2H @ (hips_position.RR[0:2] - base_position[0:2])
121 | # Offsets are introduced to account for x,y offsets from nominal hip and feet positions.
122 | # Offsets to the Y axis result in wider/narrower stance (+y values lead to wider stance in left/right)
123 | # Offsets to the X axis result in spread/crossed legs (+x values lead to spread legs in front/back)
124 | # TODO: This should not be hardcoded, should be a property of the robot cofiguration and passed as argment
125 | # to this function, not loaded from the config file.
126 | ref_feet.FL[1] += self.hip_offset
127 | ref_feet.FR[1] -= self.hip_offset
128 | ref_feet.RL[1] += self.hip_offset
129 | ref_feet.RR[1] -= self.hip_offset
130 |
131 | # Add the velocity compensation and desired velocity to the feet positions
132 | ref_feet += vel_offset + error_compensation # Add offset to all feet
133 |
134 | # Reference footholds in world frame
135 | ref_feet.FL[0:2] = R_W2H.T @ ref_feet.FL[:2] + base_position[0:2]
136 | ref_feet.FR[0:2] = R_W2H.T @ ref_feet.FR[:2] + base_position[0:2]
137 | ref_feet.RL[0:2] = R_W2H.T @ ref_feet.RL[:2] + base_position[0:2]
138 | ref_feet.RR[0:2] = R_W2H.T @ ref_feet.RR[:2] + base_position[0:2]
139 |
140 | # Add offset to the feet positions for manual com offset
141 | R_B2W = Rotation.from_euler("xyz", base_ori_euler_xyz).as_matrix()
142 | self.com_pos_offset_w = R_B2W @ self.com_pos_offset_b
143 | ref_feet.FL[0:2] += self.com_pos_offset_w[0:2]
144 | ref_feet.FR[0:2] += self.com_pos_offset_w[0:2]
145 | ref_feet.RL[0:2] += self.com_pos_offset_w[0:2]
146 | ref_feet.RR[0:2] += self.com_pos_offset_w[0:2]
147 |
148 | # TODO: we should rotate them considering the terrain estimator maybe
149 | # or we can just do exteroceptive height adjustement...
150 | for leg_id in ['FL', 'FR', 'RL', 'RR']:
151 | ref_feet[leg_id][2] = self.lift_off_positions[leg_id][2] # - 0.02
152 |
153 | # Update the last reference footholds to the current ones in order
154 | # to keep track of the modifcations done to vision (if any)
155 | self.last_reference_footholds = copy.deepcopy(ref_feet)
156 |
157 | return ref_feet
158 |
159 | def update_lift_off_positions(
160 | self, previous_contact, current_contact, feet_pos, legs_order, gait_type, base_position, base_ori_euler_xyz
161 | ):
162 | yaw = base_ori_euler_xyz[2]
163 | R_W2H = np.array([np.cos(yaw), np.sin(yaw), 0, -np.sin(yaw), np.cos(yaw), 0, 0, 0, 1])
164 | R_W2H = R_W2H.reshape((3, 3))
165 |
166 | for leg_id, leg_name in enumerate(legs_order):
167 | if gait_type == GaitType.FULL_STANCE.value:
168 | self.lift_off_positions[leg_name] = feet_pos[leg_name]
169 | continue
170 |
171 | # Set lif-offs in world frame and base frame
172 | if previous_contact[leg_id] == 1 and current_contact[leg_id] == 0:
173 | self.lift_off_positions[leg_name] = feet_pos[leg_name]
174 | self.lift_off_positions_h[leg_name] = R_W2H @ (self.lift_off_positions[leg_name] - base_position)
175 |
176 | elif previous_contact[leg_id] == 0 and current_contact[leg_id] == 0:
177 | # Update lift-offs in world frame
178 | self.lift_off_positions[leg_name] = R_W2H.T @ self.lift_off_positions_h[leg_name] + base_position
179 |
180 | def update_touch_down_positions(
181 | self, previous_contact, current_contact, feet_pos, legs_order, gait_type, base_position, base_ori_euler_xyz
182 | ):
183 | yaw = base_ori_euler_xyz[2]
184 | R_W2H = np.array([np.cos(yaw), np.sin(yaw), 0, -np.sin(yaw), np.cos(yaw), 0, 0, 0, 1])
185 | R_W2H = R_W2H.reshape((3, 3))
186 |
187 | for leg_id, leg_name in enumerate(legs_order):
188 | if gait_type == GaitType.FULL_STANCE.value:
189 | self.touch_down_positions[leg_name] = feet_pos[leg_name]
190 | continue
191 |
192 | # Set touch-downs
193 | if previous_contact[leg_id] == 0 and current_contact[leg_id] == 1:
194 | self.touch_down_positions[leg_name] = feet_pos[leg_name]
195 | self.touch_down_positions_h[leg_name] = R_W2H @ (self.touch_down_positions[leg_name] - base_position)
196 |
197 | elif previous_contact[leg_id] == 1 and current_contact[leg_id] == 1:
198 | # Update touch-downs in world frame
199 | self.touch_down_positions[leg_name] = R_W2H.T @ self.touch_down_positions_h[leg_name] + base_position
200 |
201 |
202 | if __name__ == "__main__":
203 | m = mujoco.MjModel.from_xml_path("./../simulation/unitree_go1/scene.xml")
204 | d = mujoco.MjData(m)
205 | mujoco.mj_step(m, d)
206 |
207 | FL_hip_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_BODY, "FL_hip")
208 | FR_hip_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_BODY, "FR_hip")
209 | RL_hip_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_BODY, "RL_hip")
210 | RR_hip_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_BODY, "RR_hip")
211 |
212 | hip_pos = np.array(
213 | ([d.body(FL_hip_id).xpos], [d.body(FR_hip_id).xpos], [d.body(RL_hip_id).xpos], [d.body(RR_hip_id).xpos])
214 | )
215 |
216 | print("hip_pos", hip_pos)
217 |
218 | stance_time = 0.5
219 | linear_com_velocity = np.array([0.1, 0.0, 0.0])
220 | desired_linear_com_velocity = np.array([0.1, 0.0, 0.0])
221 | com_height = d.qpos[2]
222 |
223 | foothold_generator = FootholdReferenceGenerator(stance_time)
224 | footholds_reference = foothold_generator.compute_footholds_reference(
225 | linear_com_velocity[0:2], desired_linear_com_velocity[0:2], hip_pos, com_height
226 | )
227 | print("iniztial hip_pos: ", hip_pos)
228 | print("footholds_reference: ", footholds_reference)
229 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/inverse_kinematics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/helpers/inverse_kinematics/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/inverse_kinematics/inverse_kinematics_numeric_adam.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | np.set_printoptions(precision=3, suppress=True)
4 | from numpy.linalg import norm, solve
5 | import time
6 |
7 | import casadi as cs
8 |
9 | # import example_robot_data as robex
10 | import copy
11 |
12 | # Mujoco magic
13 | import mujoco
14 | import mujoco.viewer
15 | from adam import Representations
16 |
17 | # Adam and Liecasadi magic
18 | from adam.casadi import KinDynComputations
19 | from liecasadi import SO3
20 |
21 | import gym_quadruped
22 | import os
23 |
24 | dir_path = os.path.dirname(os.path.realpath(__file__))
25 | gym_quadruped_path = os.path.dirname(gym_quadruped.__file__)
26 |
27 |
28 | from quadruped_pympc import config as cfg
29 |
30 |
31 | # Class for solving a generic inverse kinematics problem
32 | class InverseKinematicsNumeric:
33 | def __init__(self) -> None:
34 | """
35 | This method initializes the inverse kinematics solver class.
36 |
37 | Args:
38 |
39 | """
40 |
41 | if cfg.robot == 'go2':
42 | urdf_filename = gym_quadruped_path + '/robot_model/go2/go2.urdf'
43 | xml_filename = gym_quadruped_path + '/robot_model/go2/go2.xml'
44 | if cfg.robot == 'go1':
45 | urdf_filename = gym_quadruped_path + '/robot_model/go1/go1.urdf'
46 | xml_filename = gym_quadruped_path + '/robot_model/go1/go1.xml'
47 | elif cfg.robot == 'aliengo':
48 | urdf_filename = gym_quadruped_path + '/robot_model/aliengo/aliengo.urdf'
49 | xml_filename = gym_quadruped_path + '/robot_model/aliengo/aliengo.xml'
50 | elif cfg.robot == 'b2':
51 | urdf_filename = gym_quadruped_path + '/robot_model/b2/b2.urdf'
52 | xml_filename = gym_quadruped_path + '/robot_model/b2/b2.xml'
53 | elif cfg.robot == 'hyqreal':
54 | urdf_filename = gym_quadruped_path + '/robot_model/hyqreal/hyqreal.urdf'
55 | xml_filename = gym_quadruped_path + '/robot_model/hyqreal/hyqreal.xml'
56 | elif cfg.robot == 'mini_cheetah':
57 | urdf_filename = gym_quadruped_path + '/robot_model/mini_cheetah/mini_cheetah.urdf'
58 | xml_filename = gym_quadruped_path + '/robot_model/mini_cheetah/mini_cheetah.xml'
59 |
60 | joint_list = [
61 | 'FL_hip_joint',
62 | 'FL_thigh_joint',
63 | 'FL_calf_joint',
64 | 'FR_hip_joint',
65 | 'FR_thigh_joint',
66 | 'FR_calf_joint',
67 | 'RL_hip_joint',
68 | 'RL_thigh_joint',
69 | 'RL_calf_joint',
70 | 'RR_hip_joint',
71 | 'RR_thigh_joint',
72 | 'RR_calf_joint',
73 | ]
74 |
75 | self.kindyn = KinDynComputations(urdfstring=urdf_filename, joints_name_list=joint_list)
76 | self.kindyn.set_frame_velocity_representation(representation=Representations.MIXED_REPRESENTATION)
77 |
78 | self.forward_kinematics_FL_fun = self.kindyn.forward_kinematics_fun("FL_foot")
79 | self.forward_kinematics_FR_fun = self.kindyn.forward_kinematics_fun("FR_foot")
80 | self.forward_kinematics_RL_fun = self.kindyn.forward_kinematics_fun("RL_foot")
81 | self.forward_kinematics_RR_fun = self.kindyn.forward_kinematics_fun("RR_foot")
82 |
83 | self.jacobian_FL_fun = self.kindyn.jacobian_fun("FL_foot")
84 | self.jacobian_FR_fun = self.kindyn.jacobian_fun("FR_foot")
85 | self.jacobian_RL_fun = self.kindyn.jacobian_fun("RL_foot")
86 | self.jacobian_RR_fun = self.kindyn.jacobian_fun("RR_foot")
87 |
88 | q = cs.SX.sym('q', 12 + 7)
89 | FL_foot_target_position = cs.SX.sym('FL_foot_target_position', 3)
90 | FR_foot_target_position = cs.SX.sym('FR_foot_target_position', 3)
91 | RL_foot_target_position = cs.SX.sym('RL_foot_target_position', 3)
92 | RR_foot_target_position = cs.SX.sym('RR_foot_target_position', 3)
93 | ik = self.compute_solution(
94 | q, FL_foot_target_position, FR_foot_target_position, RL_foot_target_position, RR_foot_target_position
95 | )
96 | self.fun_compute_solution = cs.Function(
97 | 'fun_ik',
98 | [q, FL_foot_target_position, FR_foot_target_position, RL_foot_target_position, RR_foot_target_position],
99 | [ik],
100 | )
101 |
102 | def compute_solution(
103 | self,
104 | q: np.ndarray,
105 | FL_foot_target_position: np.ndarray,
106 | FR_foot_target_position: np.ndarray,
107 | RL_foot_target_position: np.ndarray,
108 | RR_foot_target_position: np.ndarray,
109 | ) -> np.ndarray:
110 | """
111 | This method computes the forward kinematics from initial joint angles and desired foot target positions.
112 |
113 | Args:
114 | q (np.ndarray): The initial joint angles.
115 | FL_foot_target_position (np.ndarray): The desired position of the front-left foot.
116 | FR_foot_target_position (np.ndarray): The desired position of the front-right foot.
117 | RL_foot_target_position (np.ndarray): The desired position of the rear-left foot.
118 | RR_foot_target_position (np.ndarray): The desired position of the rear-right foot.
119 |
120 | Returns:
121 | np.ndarray: The joint angles that achieve the desired foot positions.
122 | """
123 |
124 | eps = 1e-2
125 | IT_MAX = 5
126 | DT = 1e-2
127 | damp = 1e-2
128 | damp_matrix = damp * np.eye(12)
129 |
130 | i = 0
131 |
132 | err_FL = cs.SX.zeros(3, 1)
133 | err_FR = cs.SX.zeros(3, 1)
134 | err_RL = cs.SX.zeros(3, 1)
135 | err_RR = cs.SX.zeros(3, 1)
136 | err = cs.SX.zeros(3)
137 |
138 | q_joint = q[7:]
139 | quaternion = q[3:7]
140 | quaternion = np.array([quaternion[1], quaternion[2], quaternion[3], quaternion[0]])
141 | R = SO3.from_quat(quaternion).as_matrix()
142 | # H = cs.DM.eye(4)
143 | H = cs.SX.eye(4)
144 |
145 | H[0:3, 0:3] = R
146 | H[0:3, 3] = q[0:3]
147 |
148 | while i <= IT_MAX:
149 | FL_foot_actual_pos = self.forward_kinematics_FL_fun(H, q_joint)[0:3, 3]
150 | FR_foot_actual_pos = self.forward_kinematics_FR_fun(H, q_joint)[0:3, 3]
151 | RL_foot_actual_pos = self.forward_kinematics_RL_fun(H, q_joint)[0:3, 3]
152 | RR_foot_actual_pos = self.forward_kinematics_RR_fun(H, q_joint)[0:3, 3]
153 |
154 | err_FL = FL_foot_target_position - FL_foot_actual_pos
155 | err_FR = FR_foot_target_position - FR_foot_actual_pos
156 | err_RL = RL_foot_target_position - RL_foot_actual_pos
157 | err_RR = RR_foot_target_position - RR_foot_actual_pos
158 |
159 | err = err_FL + err_FR + err_RL + err_RR
160 | norm_err = cs.norm_2(err)
161 | # if(norm_err < eps):
162 | # success = True
163 | # break
164 |
165 | J_FL = self.jacobian_FL_fun(H, q_joint)[0:3, 6:]
166 | J_FR = self.jacobian_FR_fun(H, q_joint)[0:3, 6:]
167 | J_RL = self.jacobian_RL_fun(H, q_joint)[0:3, 6:]
168 | J_RR = self.jacobian_RR_fun(H, q_joint)[0:3, 6:]
169 |
170 | total_jac = cs.vertcat(J_FL, J_FR, J_RL, J_RR)
171 | total_err = 100.0 * cs.vertcat(err_FL, err_FR, err_RL, err_RR)
172 | damped_pinv = cs.inv(total_jac.T @ total_jac + damp_matrix) @ total_jac.T
173 | v = damped_pinv @ total_err
174 | q_joint = q_joint + DT * v
175 |
176 | i += 1
177 |
178 | return q_joint
179 |
180 |
181 | if __name__ == "__main__":
182 | if cfg.robot == 'go2':
183 | xml_filename = gym_quadruped_path + '/robot_model/go2/go2.xml'
184 | if cfg.robot == 'go1':
185 | xml_filename = gym_quadruped_path + '/robot_model/go1/go1.xml'
186 | elif cfg.robot == 'aliengo':
187 | xml_filename = gym_quadruped_path + '/robot_model/aliengo/aliengo.xml'
188 | elif cfg.robot == 'hyqreal':
189 | xml_filename = gym_quadruped_path + '/robot_model/hyqreal/hyqreal.xml'
190 | elif cfg.robot == 'mini_cheetah':
191 | xml_filename = gym_quadruped_path + '/robot_model/mini_cheetah/mini_cheetah.xml'
192 |
193 | ik = InverseKinematicsNumeric()
194 |
195 | # Check consistency in mujoco
196 | m = mujoco.MjModel.from_xml_path(xml_filename)
197 | d = mujoco.MjData(m)
198 |
199 | random_q_joint = np.random.rand(12)
200 | d.qpos[7:] = random_q_joint
201 |
202 | # random quaternion
203 | rand_quat = np.random.rand(4)
204 | rand_quat = rand_quat / np.linalg.norm(rand_quat)
205 | d.qpos[3:7] = rand_quat
206 |
207 | mujoco.mj_step(m, d)
208 |
209 | FL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "FL")
210 | FR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "FR")
211 | RL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "RL")
212 | RR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "RR")
213 | FL_foot_target_position = d.geom_xpos[FL_id]
214 | FR_foot_target_position = d.geom_xpos[FR_id]
215 | RL_foot_target_position = d.geom_xpos[RL_id]
216 | RR_foot_target_position = d.geom_xpos[RR_id]
217 |
218 | print("FL foot target position: ", FL_foot_target_position)
219 | print("FR foot target position: ", FR_foot_target_position)
220 | print("RL foot target position: ", RL_foot_target_position)
221 | print("RR foot target position: ", RR_foot_target_position)
222 |
223 | initial_q = copy.deepcopy(d.qpos)
224 | initial_q[7:] = np.random.rand(12)
225 | quaternion = d.qpos[3:7]
226 | quaternion = np.array([quaternion[1], quaternion[2], quaternion[3], quaternion[0]])
227 | R = SO3.from_quat(quaternion).as_matrix()
228 | H = cs.DM.eye(4)
229 | H[0:3, 0:3] = R
230 | H[0:3, 3] = d.qpos[0:3]
231 | print("FL foot start position", ik.forward_kinematics_FL_fun(H, initial_q[7:])[0:3, 3])
232 | print("FR foot start position", ik.forward_kinematics_FR_fun(H, initial_q[7:])[0:3, 3])
233 | print("RL foot start position", ik.forward_kinematics_RL_fun(H, initial_q[7:])[0:3, 3])
234 | print("RR foot start position", ik.forward_kinematics_RR_fun(H, initial_q[7:])[0:3, 3])
235 |
236 | initial_time = time.time()
237 | solution = ik.fun_compute_solution(
238 | initial_q, FL_foot_target_position, FR_foot_target_position, RL_foot_target_position, RR_foot_target_position
239 | )
240 | print("time: ", time.time() - initial_time)
241 |
242 | print("\n")
243 | print("MUJOCO SOLUTION")
244 | foot_position_FL = d.geom_xpos[FL_id]
245 | foot_position_FR = d.geom_xpos[FR_id]
246 | foot_position_RL = d.geom_xpos[RL_id]
247 | foot_position_RR = d.geom_xpos[RR_id]
248 | print("joints: ", d.qpos[7:])
249 | print("FL foot position: ", foot_position_FL)
250 | print("FR foot position: ", foot_position_FR)
251 | print("RL foot position: ", foot_position_RL)
252 | print("RR foot position: ", foot_position_RR)
253 |
254 | print("\n")
255 | print("ADAM SOLUTION")
256 | print("joints: ", solution)
257 | quaternion = d.qpos[3:7]
258 | quaternion = np.array([quaternion[1], quaternion[2], quaternion[3], quaternion[0]])
259 | R = SO3.from_quat(quaternion).as_matrix()
260 | H = cs.SX.eye(4)
261 | H[0:3, 0:3] = R
262 | H[0:3, 3] = d.qpos[0:3]
263 | print("FL foot position", ik.forward_kinematics_FL_fun(H, solution)[0:3, 3])
264 | print("FR foot position", ik.forward_kinematics_FR_fun(H, solution)[0:3, 3])
265 | print("RL foot position", ik.forward_kinematics_RL_fun(H, solution)[0:3, 3])
266 | print("RR foot position", ik.forward_kinematics_RR_fun(H, solution)[0:3, 3])
267 |
268 | with mujoco.viewer.launch_passive(m, d) as viewer:
269 | while True:
270 | viewer.sync()
271 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/inverse_kinematics/inverse_kinematics_numeric_mujoco.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | np.set_printoptions(precision=3, suppress=True)
4 | from numpy.linalg import norm, solve
5 | import time
6 | import casadi as cs
7 |
8 | # import example_robot_data as robex
9 | import copy
10 | import os
11 | import time
12 |
13 | import gym_quadruped
14 |
15 | # Mujoco magic
16 | import mujoco
17 | import mujoco.viewer
18 |
19 | # Adam and Liecasadi magic
20 |
21 | import gym_quadruped
22 | import os
23 |
24 | dir_path = os.path.dirname(os.path.realpath(__file__))
25 | gym_quadruped_path = os.path.dirname(gym_quadruped.__file__)
26 |
27 |
28 | from quadruped_pympc import config as cfg
29 |
30 | from gym_quadruped.quadruped_env import QuadrupedEnv
31 |
32 | from quadruped_pympc import config as cfg
33 |
34 | IT_MAX = 5
35 | DT = 1e-2
36 | damp = 1e-3
37 | damp_matrix = damp * np.eye(12)
38 |
39 |
40 | # Class for solving a generic inverse kinematics problem
41 | class InverseKinematicsNumeric:
42 | def __init__(self) -> None:
43 | """
44 | This method initializes the inverse kinematics solver class.
45 |
46 | Args:
47 |
48 | """
49 |
50 | robot_name = cfg.robot
51 |
52 | # Create the quadruped robot environment ---------------------
53 | self.env = QuadrupedEnv(
54 | robot=robot_name,
55 | )
56 |
57 | def compute_solution(
58 | self,
59 | q: np.ndarray,
60 | FL_foot_target_position: np.ndarray,
61 | FR_foot_target_position: np.ndarray,
62 | RL_foot_target_position: np.ndarray,
63 | RR_foot_target_position: np.ndarray,
64 | ) -> np.ndarray:
65 | """
66 | This method computes the forward kinematics from initial joint angles and desired foot target positions.
67 |
68 | Args:
69 | q (np.ndarray): The initial joint angles.
70 | FL_foot_target_position (np.ndarray): The desired position of the front-left foot.
71 | FR_foot_target_position (np.ndarray): The desired position of the front-right foot.
72 | RL_foot_target_position (np.ndarray): The desired position of the rear-left foot.
73 | RR_foot_target_position (np.ndarray): The desired position of the rear-right foot.
74 |
75 | Returns:
76 | np.ndarray: The joint angles that achieve the desired foot positions.
77 | """
78 |
79 | # Set the initial states
80 | self.env.mjData.qpos = q
81 | mujoco.mj_fwdPosition(self.env.mjModel, self.env.mjData)
82 |
83 | for j in range(IT_MAX):
84 | feet_pos = self.env.feet_pos(frame='world')
85 |
86 | FL_foot_actual_pos = feet_pos.FL
87 | FR_foot_actual_pos = feet_pos.FR
88 | RL_foot_actual_pos = feet_pos.RL
89 | RR_foot_actual_pos = feet_pos.RR
90 |
91 | err_FL = FL_foot_target_position - FL_foot_actual_pos
92 | err_FR = FR_foot_target_position - FR_foot_actual_pos
93 | err_RL = RL_foot_target_position - RL_foot_actual_pos
94 | err_RR = RR_foot_target_position - RR_foot_actual_pos
95 |
96 |
97 | # Compute feet jacobian
98 | feet_jac = self.env.feet_jacobians(frame='world', return_rot_jac=False)
99 |
100 | J_FL = feet_jac.FL[:, 6:]
101 | J_FR = feet_jac.FR[:, 6:]
102 | J_RL = feet_jac.RL[:, 6:]
103 | J_RR = feet_jac.RR[:, 6:]
104 |
105 | total_jac = np.vstack((J_FL, J_FR, J_RL, J_RR))
106 | total_err = 100*np.hstack((err_FL, err_FR, err_RL, err_RR))
107 |
108 | # Solve the IK problem
109 | #dq = total_jac.T @ np.linalg.solve(total_jac @ total_jac.T + damp_matrix, total_err)
110 | damped_pinv = np.linalg.inv(total_jac.T @ total_jac + damp_matrix) @ total_jac.T
111 | dq = damped_pinv @ total_err
112 |
113 | # Integrate joint velocities to obtain joint positions.
114 | q_joint = self.env.mjData.qpos.copy()[7:]
115 | q_joint += dq * DT
116 | self.env.mjData.qpos[7:] = q_joint
117 |
118 | mujoco.mj_fwdPosition(self.env.mjModel, self.env.mjData)
119 | #mujoco.mj_kinematics(self.env.mjModel, self.env.mjData)
120 | #mujoco.mj_step(self.env.mjModel, self.env.mjData)
121 |
122 | return q_joint
123 |
124 |
125 | if __name__ == "__main__":
126 | if cfg.robot == 'go2':
127 | xml_filename = gym_quadruped_path + '/robot_model/go2/go2.xml'
128 | if cfg.robot == 'go1':
129 | xml_filename = gym_quadruped_path + '/robot_model/go1/go1.xml'
130 | elif cfg.robot == 'aliengo':
131 | xml_filename = gym_quadruped_path + '/robot_model/aliengo/aliengo.xml'
132 | elif cfg.robot == 'hyqreal':
133 | xml_filename = gym_quadruped_path + '/robot_model/hyqreal/hyqreal.xml'
134 | elif cfg.robot == 'mini_cheetah':
135 | xml_filename = gym_quadruped_path + '/robot_model/mini_cheetah/mini_cheetah.xml'
136 |
137 | ik = InverseKinematicsNumeric()
138 |
139 | # Check consistency in mujoco
140 | m = mujoco.MjModel.from_xml_path(xml_filename)
141 | d = mujoco.MjData(m)
142 |
143 | random_q_joint = np.random.rand(12)
144 | d.qpos[7:] = random_q_joint
145 |
146 | # random quaternion
147 | rand_quat = np.random.rand(4)
148 | rand_quat = rand_quat / np.linalg.norm(rand_quat)
149 | d.qpos[3:7] = rand_quat
150 |
151 | mujoco.mj_fwdPosition(m, d)
152 |
153 | FL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "FL")
154 | FR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "FR")
155 | RL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "RL")
156 | RR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, "RR")
157 | FL_foot_target_position = d.geom_xpos[FL_id]
158 | FR_foot_target_position = d.geom_xpos[FR_id]
159 | RL_foot_target_position = d.geom_xpos[RL_id]
160 | RR_foot_target_position = d.geom_xpos[RR_id]
161 |
162 | print("FL foot target position: ", FL_foot_target_position)
163 | print("FR foot target position: ", FR_foot_target_position)
164 | print("RL foot target position: ", RL_foot_target_position)
165 | print("RR foot target position: ", RR_foot_target_position)
166 |
167 | initial_q = copy.deepcopy(d.qpos)
168 | initial_q[7:] = np.random.rand(12)
169 |
170 | ik.env.mjData.qpos = initial_q
171 | mujoco.mj_fwdPosition(ik.env.mjModel, ik.env.mjData)
172 | feet = ik.env.feet_pos(frame="world")
173 |
174 | #print("joints start position: ", initial_q)
175 | print("FL foot start position", feet.FL)
176 | print("FR foot start position", feet.FR)
177 | print("RL foot start position", feet.RL)
178 | print("RR foot start position", feet.RR)
179 |
180 | initial_time = time.time()
181 | solution = ik.compute_solution(
182 | initial_q, FL_foot_target_position, FR_foot_target_position, RL_foot_target_position, RR_foot_target_position
183 | )
184 | print("time: ", time.time() - initial_time)
185 |
186 |
187 | print("\n")
188 | print("MUJOCO IK SOLUTION")
189 | ik.env.mjData.qpos[7:] = solution
190 | mujoco.mj_fwdPosition(ik.env.mjModel, ik.env.mjData)
191 | feet = ik.env.feet_pos(frame="world")
192 |
193 | #print("joints solution: ", ik.env.mjData.qpos)
194 | print("FL foot solution position", feet.FL)
195 | print("FR foot solution position", feet.FR)
196 | print("RL foot solution position", feet.RL)
197 | print("RR foot solution position", feet.RR)
198 |
199 | with mujoco.viewer.launch_passive(m, d) as viewer:
200 | while True:
201 | viewer.sync()
202 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/inverse_kinematics/inverse_kinematics_qp.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | np.set_printoptions(precision=3, suppress=True)
4 | from numpy.linalg import norm
5 | import time
6 |
7 | import casadi as cs
8 | # import example_robot_data as robex
9 |
10 | import os
11 |
12 | dir_path = os.path.dirname(os.path.realpath(__file__))
13 |
14 | import sys
15 |
16 | sys.path.append(dir_path)
17 | sys.path.append(dir_path + "/../../")
18 |
19 | # from jnrh2023.utils.meshcat_viewer_wrapper import MeshcatVisualizer
20 |
21 | # Mujoco magic
22 |
23 | import mujoco
24 | import mujoco.viewer
25 |
26 | # Pinocchio magic
27 | import pinocchio as pin
28 | from pinocchio import casadi as cpin
29 |
30 |
31 |
32 | # Class for solving a generic inverse kinematics problem
33 | class InverseKinematicsQP:
34 | def __init__(self, robot: pin.robot_wrapper.RobotWrapper, use_viewer: bool = None) -> None:
35 | """
36 | This method initializes the inverse kinematics solver class.
37 |
38 | Args:
39 | robot: The robot model.
40 | use_viewer: Whether to use the Meshcat viewer.
41 | """
42 | self.robot = robot
43 | self.model = self.robot.model
44 | self.data = self.robot.data
45 |
46 | # generate the casadi graph
47 | cmodel = cpin.Model(self.model)
48 | cdata = cmodel.createData()
49 | cq = cs.SX.sym("q", self.model.nq, 1)
50 |
51 | # precompute the forward kinematics graph
52 | cpin.framesForwardKinematics(cmodel, cdata, cq)
53 |
54 | # initialize the viewer if requested
55 | self.use_viewer = use_viewer
56 | """if(self.use_viewer):
57 | # Open the viewer
58 | self.viz = MeshcatVisualizer(self.robot)
59 | self.viz.display(self.robot.q0)
60 | self.viewer = self.viz.viewer
61 | time.sleep(10)"""
62 |
63 | # takes the ID of the feet, and generate a casadi function for a generic forward kinematics
64 | self.FL_foot_id = self.model.getFrameId("FL_foot_fixed")
65 | self.FL_foot_position = cs.Function("FL_foot_pos", [cq], [cdata.oMf[self.FL_foot_id].translation])
66 |
67 | self.FR_foot_id = self.model.getFrameId("FR_foot_fixed")
68 | self.FR_foot_position = cs.Function("FR_foot_pos", [cq], [cdata.oMf[self.FR_foot_id].translation])
69 |
70 | self.RL_foot_id = self.model.getFrameId("RL_foot_fixed")
71 | self.RL_foot_position = cs.Function("RL_foot_pos", [cq], [cdata.oMf[self.RL_foot_id].translation])
72 |
73 | self.RR_foot_id = self.model.getFrameId("RR_foot_fixed")
74 | self.RR_foot_position = cs.Function("RR_foot_pos", [cq], [cdata.oMf[self.RR_foot_id].translation])
75 |
76 | # create the NLP for computing the forward kinematics
77 | self.create_nlp_ik()
78 |
79 | def create_nlp_ik(self) -> None:
80 | """
81 | This method creates the NLP for the forward kinematics problem and sets up the necessary variables.
82 | """
83 | # create NLP
84 | self.opti = cs.Opti()
85 |
86 | # casadi param to be updated at each request
87 | self.base_pose = self.opti.parameter(7) # 7 is the number of DoF of the base, position + quaternion
88 | self.FL_foot_target_position = self.opti.parameter(3) # 3 is the number of DoF of the foot, position
89 | self.FR_foot_target_position = self.opti.parameter(3) # 3 is the number of DoF of the foot, position
90 | self.RL_foot_target_position = self.opti.parameter(3) # 3 is the number of DoF of the foot, position
91 | self.RR_foot_target_position = self.opti.parameter(3) # 3 is the number of DoF of the foot, position
92 |
93 | # define the configuration variables (base pose + joints))
94 | self.var_q = self.opti.variable(self.model.nq)
95 |
96 | # define the cost function (it's parametric!!)
97 | totalcost = (
98 | cs.sumsqr(self.FL_foot_position(self.var_q) - self.FL_foot_target_position)
99 | + cs.sumsqr(self.FR_foot_position(self.var_q) - self.FR_foot_target_position)
100 | + cs.sumsqr(self.RL_foot_position(self.var_q) - self.RL_foot_target_position)
101 | + cs.sumsqr(self.RR_foot_position(self.var_q) - self.RR_foot_target_position)
102 | )
103 | self.opti.minimize(totalcost)
104 |
105 | # define the solver
106 | p_opts = dict(print_time=False, verbose=False)
107 | s_opts = dict(print_level=0)
108 | self.opti.solver("ipopt", p_opts, s_opts)
109 |
110 | # define the parametric constraints for the base, it's fixed, only the leg can move!
111 | self.opti.subject_to(self.var_q[0:7] == self.base_pose)
112 |
113 | # if use_viewer is yes, you can see the different solution iteration by iteration
114 | # in the browser
115 | if self.use_viewer:
116 | self.opti.callback(lambda i: self.callback(self.opti.debug.value(self.var_q)))
117 |
118 | def compute_solution(
119 | self,
120 | q: np.ndarray,
121 | FL_foot_target_position: np.ndarray,
122 | FR_foot_target_position: np.ndarray,
123 | RL_foot_target_position: np.ndarray,
124 | RR_foot_target_position: np.ndarray,
125 | ) -> np.ndarray:
126 | """
127 | This method computes the inverse kinematics from initial joint angles and desired foot target positions.
128 |
129 | Args:
130 | q (np.ndarray): The initial joint angles.
131 | FL_foot_target_position (np.ndarray): The desired position of the front-left foot.
132 | FR_foot_target_position (np.ndarray): The desired position of the front-right foot.
133 | RL_foot_target_position (np.ndarray): The desired position of the rear-left foot.
134 | RR_foot_target_position (np.ndarray): The desired position of the rear-right foot.
135 |
136 | Returns:
137 | np.ndarray: The joint angles that achieve the desired foot positions.
138 | """
139 |
140 | # print("initial state", q)
141 | # set the value for the constraints
142 | self.opti.set_value(self.base_pose, q[0:7])
143 |
144 | # set initial guess
145 | self.opti.set_initial(self.var_q, q)
146 |
147 | # set the value for the target
148 | self.opti.set_value(self.FL_foot_target_position, FL_foot_target_position)
149 | self.opti.set_value(self.FR_foot_target_position, FR_foot_target_position)
150 | self.opti.set_value(self.RL_foot_target_position, RL_foot_target_position)
151 | self.opti.set_value(self.RR_foot_target_position, RR_foot_target_position)
152 |
153 | # Caution: in case the solver does not converge, we are picking the candidate values
154 | # at the last iteration in opti.debug, and they are NO guarantee of what they mean.
155 | try:
156 | sol = self.opti.solve_limited()
157 | sol_q = self.opti.value(self.var_q)
158 | # print("final q: \n", sol_q)
159 | return sol_q
160 | except:
161 | print("ERROR in convergence, plotting debug info.")
162 | sol_q = self.opti.debug.value(self.var_q)
163 |
164 | def callback(self, q: np.ndarray) -> None:
165 | """
166 | This method is called by the solver at each iteration (if use_viewer is TRUE)
167 | and displays the current joint angles and foot positions.
168 | """
169 | pin.framesForwardKinematics(self.model, self.data, q)
170 | # transform_frame_to_world = self.data.oMf[self.FL_foot_id]
171 | # self.viewer["target"].set_transform(self.transform_target_to_world.np)
172 | # self.viewer["current"].set_transform(transform_frame_to_world.np)
173 | self.viz.display(q)
174 | print("q: \n", q)
175 | time.sleep(0.5)
176 |
177 |
178 | if __name__ == "__main__":
179 | robot = robex.load("go1")
180 | ik = InverseKinematicsQP(robot, use_viewer=False)
181 |
182 | FL_foot_target_position = np.array([0.1, 0, -0.02])
183 | FR_foot_target_position = np.array([-0.08, 0, 0])
184 | RL_foot_target_position = np.array([-0.12, 0, 0.06])
185 | RR_foot_target_position = np.array([0, 0.2, 0])
186 |
187 | initial_time = time.time()
188 | solution = ik.compute_solution(
189 | robot.q0, FL_foot_target_position, FR_foot_target_position, RL_foot_target_position, RR_foot_target_position
190 | )
191 | print("time: ", time.time() - initial_time)
192 |
193 | # Check consistency in mujoco
194 | m = mujoco.MjModel.from_xml_path("./../simulation/robot_model/unitree_go1/scene.xml")
195 | d = mujoco.MjData(m)
196 | d.qpos[2] = robot.q0[2]
197 | d.qpos[7:] = robot.q0[7:]
198 |
199 | FL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, 'FL')
200 | FR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, 'FR')
201 | RL_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, 'RL')
202 | RR_id = mujoco.mj_name2id(m, mujoco.mjtObj.mjOBJ_GEOM, 'RR')
203 |
204 | joint_FL = solution[7:10]
205 | joint_FR = solution[10:13]
206 | joint_RL = solution[13:16]
207 | joint_RR = solution[16:19]
208 |
209 | d.qpos[7:] = np.concatenate((joint_FR, joint_FL, joint_RR, joint_RL))
210 | mujoco.mj_step(m, d)
211 | print("\n")
212 | print("MUJOCO SOLUTION")
213 | foot_position_FL = d.geom_xpos[FL_id]
214 | foot_position_FR = d.geom_xpos[FR_id]
215 | foot_position_RL = d.geom_xpos[RL_id]
216 | foot_position_RR = d.geom_xpos[RR_id]
217 | print("joints: ", np.concatenate((joint_FL, joint_FR, joint_RL, joint_RR)))
218 | print("FL foot position: ", foot_position_FL)
219 | print("FR foot position: ", foot_position_FR)
220 | print("RL foot position: ", foot_position_RL)
221 | print("RR foot position: ", foot_position_RR)
222 |
223 | print("\n")
224 | print("PINOCCHIO SOLUTION")
225 | print("joints: ", solution[7:])
226 | print("FL foot position", ik.FL_foot_position(solution))
227 | print("FR foot position", ik.FR_foot_position(solution))
228 | print("RL foot position", ik.RL_foot_position(solution))
229 | print("RR foot position", ik.RR_foot_position(solution))
230 |
231 | with mujoco.viewer.launch_passive(m, d) as viewer:
232 | while True:
233 | viewer.sync()
234 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/periodic_gait_generator.py:
--------------------------------------------------------------------------------
1 | import copy
2 |
3 | import numpy as np
4 | from gym_quadruped.utils.quadruped_utils import LegsAttr
5 | from quadruped_pympc.helpers.quadruped_utils import GaitType
6 |
7 |
8 | class PeriodicGaitGenerator:
9 | def __init__(self, duty_factor, step_freq, gait_type: GaitType, horizon):
10 | self.duty_factor = duty_factor
11 | self.step_freq = step_freq
12 | self.horizon = horizon
13 | self.gait_type = gait_type
14 | self.previous_gait_type = copy.deepcopy(gait_type)
15 |
16 | self.start_and_stop_activated = False # If True, the robot will start and stop walking to save energy
17 |
18 | # Private variables
19 | self._phase_signal, self._init = None, None
20 | self.reset()
21 |
22 | def reset(self):
23 | # Choose of the gait, this represent the delay of each leg
24 | if self.gait_type == GaitType.TROT.value:
25 | self.phase_offset = [0.5, 1.0, 1.0, 0.5]
26 | elif self.gait_type == GaitType.PACE.value:
27 | self.phase_offset = [0.8, 0.3, 0.8, 0.3]
28 | elif self.gait_type == GaitType.BOUNDING.value:
29 | self.phase_offset = [0.5, 0.5, 0.0, 0.0]
30 | elif self.gait_type == GaitType.CIRCULARCRAWL.value:
31 | self.phase_offset = [0.0, 0.25, 0.75, 0.5]
32 | elif self.gait_type == GaitType.BFDIAGONALCRAWL.value:
33 | self.phase_offset = [0.0, 0.25, 0.5, 0.75]
34 | elif self.gait_type == GaitType.BACKDIAGONALCRAWL.value:
35 | self.phase_offset = [0.0, 0.5, 0.75, 0.25]
36 | elif self.gait_type == GaitType.FRONTDIAGONALCRAWL.value:
37 | self.phase_offset = [0.5, 1.0, 0.75, 1.25]
38 | else:
39 | self.phase_offset = [0.0, 0.5, 0.5, 0.0]
40 |
41 | # Set random gait_signal respecting the gait phase offset between legs
42 | # self._phase_signal = (np.asarray(self.phase_offset) + np.random.rand()) % 1.0
43 | self._phase_signal = np.asarray(self.phase_offset)
44 | self._init = [False] * len(self.phase_offset)
45 | self.n_contact = len(self.phase_offset)
46 | self.time_before_switch_freq = 0
47 |
48 | def run(self, dt, new_step_freq):
49 | contact = np.zeros(self.n_contact)
50 | for leg in range(self.n_contact):
51 | # Increase time by dt
52 | # self.t[leg] += dt*self.step_freq
53 | self._phase_signal[leg] += dt * new_step_freq
54 |
55 | # Phase signal is between 0 and 1
56 | self._phase_signal[leg] = self._phase_signal[leg] % 1.0
57 |
58 | # If we are still in init, we check if the delay of the leg
59 | # is not surpassed. If not, the contact needs to be still 1
60 | # otherwise we lift off
61 | if self._init[leg]:
62 | if self._phase_signal[leg] <= self.phase_offset[leg]:
63 | contact[leg] = 1
64 | else:
65 | self._init[leg] = False
66 | contact[leg] = 1
67 | self._phase_signal[leg] = 0
68 | else:
69 | # During the gait, we check if the time is below the duty factor
70 | # if so, the contact is 1, otherwise it is 0
71 | if self._phase_signal[leg] < self.duty_factor:
72 | contact[leg] = 1
73 | else:
74 | contact[leg] = 0
75 |
76 | return contact
77 |
78 | def set_phase_signal(self, phase_signal: np.ndarray, init: np.ndarray | None = None):
79 | assert len(phase_signal) == len(self._phase_signal)
80 |
81 | self._phase_signal = phase_signal
82 |
83 | if init is not None:
84 | assert len(init) == len(self._init)
85 | self._init = init
86 | else:
87 | self._init = [False for _ in range(len(self._phase_signal))]
88 |
89 | @property
90 | def phase_signal(self):
91 | return np.array(self._phase_signal)
92 |
93 | def compute_contact_sequence(self, contact_sequence_dts, contact_sequence_lenghts):
94 | # TODO: This function can be vectorized and computed with numpy vectorized operations
95 | if self.gait_type == GaitType.FULL_STANCE.value:
96 | contact_sequence = np.ones((4, self.horizon * 2))
97 | self.reset()
98 | return contact_sequence
99 |
100 | else:
101 | t_init = np.array(self._phase_signal)
102 | init_init = np.array(self._init)
103 |
104 | contact_sequence = np.zeros((self.n_contact, self.horizon))
105 |
106 | # the first value is simply the current predicted contact by the timer
107 | contact_sequence[:, 0] = self.run(0.0, self.step_freq)
108 |
109 | # contact_sequence_dts contains a list of dt (usefull for nonuniform sampling)
110 | # contact_sequence_lenghts contains the number of steps for each dt
111 | j = 0
112 | for i in range(1, self.horizon):
113 | if i >= contact_sequence_lenghts[j]:
114 | j += 1
115 | dt = contact_sequence_dts[j]
116 | contact_sequence[:, i] = self.run(dt, self.step_freq)
117 | self.set_phase_signal(t_init, init_init)
118 | return contact_sequence
119 |
120 | def set_full_stance(self):
121 | self.gait_type = GaitType.FULL_STANCE.value
122 | self.reset()
123 |
124 | def restore_previous_gait(self):
125 | self.gait_type = copy.deepcopy(self.previous_gait_type)
126 | self.reset()
127 |
128 | def update_start_and_stop(
129 | self,
130 | feet_pos,
131 | hip_pos,
132 | hip_offset,
133 | base_pos,
134 | base_ori_euler_xyz,
135 | base_lin_vel,
136 | base_ang_vel,
137 | ref_base_lin_vel,
138 | ref_base_ang_vel,
139 | current_contact,
140 | ):
141 | # Get the rotation matrix to transform from world to horizontal frame (hip-centric)
142 | yaw = base_ori_euler_xyz[2]
143 | R_W2H = np.array([np.cos(yaw), np.sin(yaw), -np.sin(yaw), np.cos(yaw)])
144 | R_W2H = R_W2H.reshape((2, 2))
145 |
146 | # Check if the feet are close to the hip enough to stop the motion
147 | feet_pos_h = LegsAttr(*[np.zeros(3) for _ in range(4)])
148 | feet_pos_h.FL[:2] = R_W2H @ (feet_pos.FL[:2] - base_pos[0:2])
149 | feet_pos_h.FR[:2] = R_W2H @ (feet_pos.FR[:2] - base_pos[0:2])
150 | feet_pos_h.RL[:2] = R_W2H @ (feet_pos.RL[:2] - base_pos[0:2])
151 | feet_pos_h.RR[:2] = R_W2H @ (feet_pos.RR[:2] - base_pos[0:2])
152 |
153 | feet_pos_h.FL[1] -= hip_offset
154 | feet_pos_h.FR[1] += hip_offset
155 | feet_pos_h.RL[1] -= hip_offset
156 | feet_pos_h.RR[1] += hip_offset
157 |
158 | hip_pos_h = LegsAttr(*[np.zeros(3) for _ in range(4)])
159 | hip_pos_h.FL[:2] = R_W2H @ (hip_pos.FL[:2] - base_pos[0:2])
160 | hip_pos_h.FR[:2] = R_W2H @ (hip_pos.FR[:2] - base_pos[0:2])
161 | hip_pos_h.RL[:2] = R_W2H @ (hip_pos.RL[:2] - base_pos[0:2])
162 | hip_pos_h.RR[:2] = R_W2H @ (hip_pos.RR[:2] - base_pos[0:2])
163 |
164 | feet_to_hip_distance_h_FL = np.sqrt(
165 | np.square(feet_pos_h.FL[0] - hip_pos_h.FL[0]) + np.square(feet_pos_h.FL[1] - hip_pos_h.FL[1])
166 | )
167 | feet_to_hip_distance_h_FR = np.sqrt(
168 | np.square(feet_pos_h.FR[0] - hip_pos_h.FR[0]) + np.square(feet_pos_h.FR[1] - hip_pos_h.FR[1])
169 | )
170 | feet_to_hip_distance_h_RL = np.sqrt(
171 | np.square(feet_pos_h.RL[0] - hip_pos_h.RL[0]) + np.square(feet_pos_h.RL[1] - hip_pos_h.RL[1])
172 | )
173 | feet_to_hip_distance_h_RR = np.sqrt(
174 | np.square(feet_pos_h.RR[0] - hip_pos_h.RR[0]) + np.square(feet_pos_h.RR[1] - hip_pos_h.RR[1])
175 | )
176 | feet_to_hip_distance_h = np.mean(
177 | [feet_to_hip_distance_h_FL, feet_to_hip_distance_h_FR, feet_to_hip_distance_h_RL, feet_to_hip_distance_h_RR]
178 | )
179 |
180 | # Some safety checks to safely stop motion
181 | if (
182 | np.linalg.norm(ref_base_lin_vel) == 0.0
183 | and np.linalg.norm(ref_base_ang_vel) == 0.0
184 | and np.linalg.norm(base_lin_vel) < 0.1
185 | and np.linalg.norm(base_ang_vel) < 0.1
186 | and np.abs(base_ori_euler_xyz[0]) < 0.05
187 | and np.abs(base_ori_euler_xyz[1]) < 0.05
188 | and np.sum(current_contact) == 4
189 | and feet_to_hip_distance_h_FL < 0.06
190 | and feet_to_hip_distance_h_FR < 0.06
191 | and feet_to_hip_distance_h_RL < 0.06
192 | and feet_to_hip_distance_h_RR < 0.06
193 | ):
194 | self.set_full_stance()
195 | elif self.gait_type == GaitType.FULL_STANCE.value:
196 | self.restore_previous_gait()
197 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/periodic_gait_generator_jax.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import jax
4 | import jax.numpy as jnp
5 |
6 | import os
7 |
8 | dir_path = os.path.dirname(os.path.realpath(__file__))
9 |
10 | import sys
11 |
12 | sys.path.append(dir_path + '/../')
13 |
14 | sys.path.append(dir_path + "/../")
15 |
16 | # Parameters for both MPC and simulation
17 | from quadruped_pympc import config
18 |
19 |
20 | class PeriodicGaitGeneratorJax:
21 | def __init__(self, duty_factor, step_freq, horizon, mpc_dt):
22 | self.duty_factor = duty_factor
23 | self.step_freq = step_freq
24 | self.horizon = horizon
25 | self.mpc_dt = mpc_dt
26 |
27 | # Only Trot for now
28 | self.delta = jnp.array([0.5, 1.0, 1.0, 0.5])
29 |
30 | self.t = jnp.zeros(len(self.delta))
31 | # self.t = jnp.array([0.0, 0.5, 0.5, 0.0])
32 | self.n_contact = len(self.delta)
33 |
34 | """def run(self, t, step_freq):
35 | contact = jnp.zeros(self.n_contact)
36 | #for leg in range(self.n_contact):
37 |
38 | #restart condition
39 | #t = t.at[0].set(jnp.where(t[0] >= 1.0, 0, t[0]))
40 | #t = t.at[1].set(jnp.where(t[1] >= 1.0, 0, t[1]))
41 | #t = t.at[2].set(jnp.where(t[2] >= 1.0, 0, t[2]))
42 | #t = t.at[3].set(jnp.where(t[3] >= 1.0, 0, t[3]))
43 |
44 | t = t.at[0].set(jnp.where(t[0] >= 1.0, t[0] - 1, t[0]))
45 | t = t.at[1].set(jnp.where(t[1] >= 1.0, t[1] - 1, t[1]))
46 | t = t.at[2].set(jnp.where(t[2] >= 1.0, t[2] - 1, t[2]))
47 | t = t.at[3].set(jnp.where(t[3] >= 1.0, t[3] - 1, t[3]))
48 |
49 | #increase time by dt
50 | t = t.at[0].set(t[0] + self.mpc_dt*step_freq)
51 | t = t.at[1].set(t[1] + self.mpc_dt*step_freq)
52 | t = t.at[2].set(t[2] + self.mpc_dt*step_freq)
53 | t = t.at[3].set(t[3] + self.mpc_dt*step_freq)
54 |
55 | #contact = contact.at[0].set(jnp.where(t[0] < self.duty_factor, 1.0, 0.0))
56 | #contact = contact.at[1].set(jnp.where(t[1] < self.duty_factor, 1.0, 0.0))
57 | #contact = contact.at[2].set(jnp.where(t[2] < self.duty_factor, 1.0, 0.0))
58 | #contact = contact.at[3].set(jnp.where(t[3] < self.duty_factor, 1.0, 0.0))
59 |
60 |
61 | contact = contact.at[0].set(jnp.where(t[0] > (1-self.duty_factor), 1.0, 0.0))
62 | contact = contact.at[1].set(jnp.where(t[1] > (1-self.duty_factor), 1.0, 0.0))
63 | contact = contact.at[2].set(jnp.where(t[2] > (1-self.duty_factor), 1.0, 0.0))
64 | contact = contact.at[3].set(jnp.where(t[3] > (1-self.duty_factor), 1.0, 0.0))
65 |
66 | return contact, t"""
67 |
68 | def run(self, t, step_freq):
69 | contact = jnp.zeros(self.n_contact)
70 | # for leg in range(self.n_contact):
71 |
72 | # restart condition
73 | t = t.at[0].set(jnp.where(t[0] >= 1.0, 0, t[0]))
74 | t = t.at[1].set(jnp.where(t[1] >= 1.0, 0, t[1]))
75 | t = t.at[2].set(jnp.where(t[2] >= 1.0, 0, t[2]))
76 | t = t.at[3].set(jnp.where(t[3] >= 1.0, 0, t[3]))
77 |
78 | # increase time by dt
79 | t = t.at[0].set(t[0] + self.mpc_dt * step_freq)
80 | t = t.at[1].set(t[1] + self.mpc_dt * step_freq)
81 | t = t.at[2].set(t[2] + self.mpc_dt * step_freq)
82 | t = t.at[3].set(t[3] + self.mpc_dt * step_freq)
83 |
84 | contact = contact.at[0].set(jnp.where(t[0] < self.duty_factor, 1.0, 0.0))
85 | contact = contact.at[1].set(jnp.where(t[1] < self.duty_factor, 1.0, 0.0))
86 | contact = contact.at[2].set(jnp.where(t[2] < self.duty_factor, 1.0, 0.0))
87 | contact = contact.at[3].set(jnp.where(t[3] < self.duty_factor, 1.0, 0.0))
88 |
89 | return contact, t
90 |
91 | def set(self, t):
92 | self.t = t
93 |
94 | def with_newt(self, t):
95 | self.t = t
96 | return self
97 |
98 | def get_t(self):
99 | return self.t
100 |
101 | """def compute_contact_sequence(self, simulation_dt, t, step_freq):
102 | t_init = jnp.array(t)
103 |
104 | contact_sequence = jnp.zeros((self.n_contact, self.horizon))
105 | new_t = t_init
106 |
107 |
108 |
109 | #for i in range(self.horizon):
110 | # #new_step_freq = jnp.where(i > 2, step_freq, self.step_freq)
111 | # #new_contact_sequence, new_t = self.run(new_t, new_step_freq)
112 | # new_contact_sequence, new_t = self.run(new_t, step_freq)
113 | # contact_sequence = contact_sequence.at[:, i].set(new_contact_sequence)
114 |
115 | def body_fn(n, carry):
116 | new_t, contact_sequence = carry
117 | new_contact_sequence, new_t = self.run(new_t, step_freq)
118 | contact_sequence = contact_sequence.at[:, n].set(new_contact_sequence)
119 | return (new_t, contact_sequence)#, None
120 |
121 |
122 | init_carry = (new_t, contact_sequence)
123 | new_t, contact_sequence = jax.lax.fori_loop(1, self.horizon, body_fn, init_carry)
124 | #new_t, contact_sequence = jax.lax.fori_loop(0, self.horizon, body_fn, init_carry)
125 |
126 |
127 | contact = jnp.zeros(self.n_contact)
128 | contact = contact.at[0].set(jnp.where(t_init[0] > (1-self.duty_factor), 1.0, 0.0))
129 | contact = contact.at[1].set(jnp.where(t_init[1] > (1-self.duty_factor), 1.0, 0.0))
130 | contact = contact.at[2].set(jnp.where(t_init[2] > (1-self.duty_factor), 1.0, 0.0))
131 | contact = contact.at[3].set(jnp.where(t_init[3] > (1-self.duty_factor), 1.0, 0.0))
132 | contact_sequence = contact_sequence.at[:, 0].set(contact)
133 |
134 | return contact_sequence, new_t"""
135 |
136 | def compute_contact_sequence(self, simulation_dt, t, step_freq):
137 | t_init = jnp.array(t)
138 |
139 | contact_sequence = jnp.zeros((self.n_contact, self.horizon))
140 | new_t = t_init
141 |
142 | def body_fn(n, carry):
143 | new_t, contact_sequence = carry
144 | new_contact_sequence, new_t = self.run(new_t, step_freq)
145 | contact_sequence = contact_sequence.at[:, n].set(new_contact_sequence)
146 | return (new_t, contact_sequence) # , None
147 |
148 | init_carry = (new_t, contact_sequence)
149 | new_t, contact_sequence = jax.lax.fori_loop(0, self.horizon, body_fn, init_carry)
150 |
151 | return contact_sequence, new_t
152 |
153 |
154 | class Gait:
155 | TROT = 0
156 | PACE = 1
157 | BOUNDING = 2
158 | CIRCULARCRAWL = 3
159 | BFDIAGONALCRAWL = 4
160 | BACKDIAGONALCRAWL = 5
161 | FRONTDIAGONALCRAWL = 6
162 |
163 |
164 | if __name__ == "__main__":
165 | # Periodic gait generator
166 |
167 | mpc_dt = config.mpc_params['dt']
168 | horizon = config.mpc_params['horizon']
169 | gait = config.simulation_params['gait']
170 | simulation_dt = config.simulation_params['dt']
171 | if gait == "trot":
172 | step_frequency = 2.65
173 | duty_factor = 0.65
174 | p_gait = Gait.TROT
175 | elif gait == "crawl":
176 | step_frequency = 0.4
177 | duty_factor = 0.9
178 | p_gait = Gait.BACKDIAGONALCRAWL
179 | else:
180 | step_frequency = 2
181 | duty_factor = 0.7
182 | p_gait = Gait.PACE
183 |
184 | pgg = PeriodicGaitGenerator(duty_factor=duty_factor, step_freq=step_frequency, gait_type=p_gait, horizon=horizon)
185 | pgg_jax = PeriodicGaitGeneratorJax(
186 | duty_factor=duty_factor, step_freq=step_frequency, horizon=horizon, mpc_dt=mpc_dt
187 | )
188 | jitted_pgg = jax.jit(pgg_jax.compute_contact_sequence)
189 | for j in range(100):
190 | contact_sequence = pgg.compute_contact_sequence(mpc_dt=mpc_dt, simulation_dt=simulation_dt)
191 | contact_sequence_jax = pgg_jax.compute_contact_sequence(
192 | simulation_dt=simulation_dt, t=pgg.get_t(), step_freq=step_frequency
193 | )
194 | contact_sequence_jax_jitted = jitted_pgg(simulation_dt, pgg.get_t(), step_frequency)
195 | print("contact_sequence:\n", contact_sequence)
196 | print("contact_sequence_jax:\n", contact_sequence_jax[0])
197 | print("contact_sequence_jax_jitted:\n", contact_sequence_jax_jitted[0])
198 | pgg.run(simulation_dt)
199 | print("############")
200 |
201 | breakpoint()
202 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/quadruped_utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import copy
4 | from collections import namedtuple
5 | from enum import Enum
6 |
7 | import numpy as np
8 | from mujoco.viewer import Handle
9 |
10 |
11 |
12 | class GaitType(Enum):
13 | """Enumeration class to represent the different gaits that a quadruped robot can perform."""
14 |
15 | TROT = 0
16 | PACE = 1
17 | BOUNDING = 2
18 | CIRCULARCRAWL = 3
19 | BFDIAGONALCRAWL = 4
20 | BACKDIAGONALCRAWL = 5
21 | FRONTDIAGONALCRAWL = 6
22 | FULL_STANCE = 7
23 |
24 |
25 | def plot_swing_mujoco(
26 | viewer: Handle,
27 | swing_traj_controller,
28 | swing_period,
29 | swing_time: namedtuple,
30 | lift_off_positions: namedtuple,
31 | nmpc_footholds: namedtuple,
32 | ref_feet_pos: namedtuple,
33 | early_stance_detector,
34 | geom_ids: namedtuple = None,
35 | ):
36 | """Function to plot the desired foot swing trajectory in Mujoco.
37 |
38 | Args:
39 | ----
40 | viewer (Handle): The Mujoco viewer.
41 | swing_traj_controller (SwingTrajectoryController): The swing trajectory controller.
42 | swing_period: The swing period.
43 | swing_time (LegsAttr): The swing time for each leg.
44 | lift_off_positions (LegsAttr): The lift-off positions for each leg.
45 | nmpc_footholds (LegsAttr): The footholds for each leg.
46 | ref_feet_pos (LegsAttr): The reference feet positions for each leg.
47 | geom_ids (LegsAttr, optional): The geometry ids for each leg. Defaults to None.
48 |
49 | Returns:
50 | -------
51 | LegsAttr: The geometry ids for each leg trajectory
52 | """
53 | from gym_quadruped.utils.mujoco.visual import render_line, render_sphere
54 | from gym_quadruped.utils.quadruped_utils import LegsAttr
55 |
56 | # Plot desired foot swing trajectory in Mujoco.
57 | NUM_TRAJ_POINTS = 6
58 |
59 | if geom_ids is None:
60 | geom_ids = LegsAttr(FL=[], FR=[], RL=[], RR=[])
61 | # Instantiate a new geometry
62 | for leg_id, leg_name in enumerate(["FL", "FR", "RL", "RR"]):
63 | viewer.user_scn.ngeom += NUM_TRAJ_POINTS
64 | geom_ids[leg_name] = list(range(viewer.user_scn.ngeom - NUM_TRAJ_POINTS - 1, viewer.user_scn.ngeom - 1))
65 |
66 | # viewer.user_scn.ngeom = 1
67 | # We first draw the trajectory of the feet
68 | des_foot_traj = LegsAttr(FL=[], FR=[], RL=[], RR=[])
69 | for leg_id, leg_name in enumerate(["FL", "FR", "RL", "RR"]):
70 | # TODO: This function should be vectorized rather than queried sequentially
71 | if swing_time[leg_name] == 0.0:
72 | continue
73 | for point_idx, foot_swing_time in enumerate(np.linspace(swing_time[leg_name], swing_period, NUM_TRAJ_POINTS)):
74 | ref_foot_pos, _, _ = swing_traj_controller.swing_generator.compute_trajectory_references(
75 | foot_swing_time, lift_off_positions[leg_name], nmpc_footholds[leg_name],
76 | early_stance_detector.hitmoments[leg_name], early_stance_detector.hitpoints[leg_name]
77 | )
78 | des_foot_traj[leg_name].append(ref_foot_pos.squeeze())
79 |
80 | for point_idx in range(NUM_TRAJ_POINTS - 1):
81 | render_line(
82 | viewer=viewer,
83 | initial_point=des_foot_traj[leg_name][point_idx],
84 | target_point=des_foot_traj[leg_name][point_idx + 1],
85 | width=0.005,
86 | color=np.array([1, 0, 0, 1]),
87 | geom_id=geom_ids[leg_name][point_idx],
88 | )
89 |
90 | # Add a sphere at the the ref_feet_pos
91 | render_sphere(
92 | viewer=viewer,
93 | position=ref_feet_pos[leg_name],
94 | diameter=0.04,
95 | color=np.array([0, 1, 0, 0.5]),
96 | geom_id=geom_ids[leg_name][-1],
97 | )
98 | return geom_ids
99 |
100 |
101 | def check_zmp_constraint_satisfaction(state, contact_status, forces):
102 | # TODO: This import should go
103 | from quadruped_pympc import config
104 |
105 | base_w = copy.deepcopy(state["position"])
106 | base_vel_w = copy.deepcopy(state["linear_velocity"])
107 |
108 | FL = copy.deepcopy(state["foot_FL"])
109 | FR = copy.deepcopy(state["foot_FR"])
110 | RL = copy.deepcopy(state["foot_RL"])
111 | RR = copy.deepcopy(state["foot_RR"])
112 |
113 | yaw = copy.deepcopy(state["orientation"][2])
114 | h_R_w = np.zeros((2, 2))
115 | h_R_w[0, 0] = np.cos(yaw)
116 | h_R_w[0, 1] = np.sin(yaw)
117 | h_R_w[1, 0] = -np.sin(yaw)
118 | h_R_w[1, 1] = np.cos(yaw)
119 |
120 | FL[0:2] = h_R_w @ (FL[0:2] - base_w[0:2])
121 | FR[0:2] = h_R_w @ (FR[0:2] - base_w[0:2])
122 | RL[0:2] = h_R_w @ (RL[0:2] - base_w[0:2])
123 | RR[0:2] = h_R_w @ (RR[0:2] - base_w[0:2])
124 |
125 | foot_force_fl = forces[0:3] # @self.centroidal_model.param[0]
126 | foot_force_fr = forces[3:6] # @self.centroidal_model.param[1]
127 | foot_force_rl = forces[6:9] # @self.centroidal_model.param[2]
128 | foot_force_rr = forces[9:12] # @self.centroidal_model.param[3]
129 | temp = foot_force_fl + foot_force_fr + foot_force_rl + foot_force_rr
130 | gravity_constant = 9.81
131 | gravity = np.array([0, 0, -gravity_constant])
132 | linear_com_acc = (1 / config.mass) * temp + gravity
133 |
134 | if config.mpc_params['use_zmp_stability']:
135 | robotHeight = base_w[2]
136 | zmp = base_w[0:2] - linear_com_acc[0:2] * (robotHeight / gravity_constant)
137 | zmp = h_R_w @ (zmp - base_w[0:2])
138 | x = zmp[0]
139 | y = zmp[1]
140 | else:
141 | x = 0.0
142 | y = 0.0
143 |
144 | y_FL = FL[1]
145 | y_FR = FR[1]
146 | y_RL = RL[1]
147 | y_RR = RR[1]
148 |
149 | x_FL = FL[0]
150 | x_FR = FR[0]
151 | x_RL = RL[0]
152 | x_RR = RR[0]
153 |
154 | # LF - RF : x < (x2 - x1) (y - y1) / (y2 - y1) + x1
155 | # RF - RH: y > (y2 - y1) (x - x1) / (x2 - x1) + y1
156 | # RH - LH : x > (x2 - x1) (y - y1) / (y2 - y1) + x1
157 | # LH - LF: y < (y2 - y1) (x - x1) / (x2 - x1) + y1
158 |
159 | # FL and FR cannot stay at the same x! #constrint should be less than zero
160 | constraint_FL_FR = x - (x_FR - x_FL) * (y - y_FL) / (y_FR - y_FL + 0.001) - x_FL
161 |
162 | # FR and RR cannot stay at the same y! #constraint should be bigger than zero
163 | constraint_FR_RR = y - (y_RR - y_FR) * (x - x_FR) / (x_RR - x_FR + 0.001) - y_FR
164 |
165 | # RL and RR cannot stay at the same x! #constraint should be bigger than zero
166 | constraint_RR_RL = x - (x_RL - x_RR) * (y - y_RR) / (y_RL - y_RR + 0.001) - x_RR
167 |
168 | # FL and RL cannot stay at the same y! #constraint should be less than zero
169 | constraint_RL_FL = y - (y_FL - y_RL) * (x - x_RL) / (x_FL - x_RL + 0.001) - y_RL
170 |
171 | # the diagonal stuff can be at any point...
172 | constraint_FL_RR = y - (y_RR - y_FL) * (x - x_FL) / (x_RR - x_FL + 0.001) - y_FL # bigger than zero
173 | constraint_FR_RL = y - (y_RL - y_FR) * (x - x_FR) / (x_RL - x_FR + 0.001) - y_FR # bigger than zero
174 |
175 | FL_contact = contact_status[0]
176 | FR_contact = contact_status[1]
177 | RL_contact = contact_status[2]
178 | RR_contact = contact_status[3]
179 |
180 | violation = 0
181 |
182 | if FL_contact == 1:
183 | if FR_contact == 1:
184 | # ub_support_FL_FR = -0.0
185 | # lb_support_FL_FR = -1000
186 | if constraint_FL_FR > 0:
187 | violation = violation + 1
188 |
189 | else:
190 | # ub_support_FL_RR = 1000
191 | # lb_support_FL_RR = 0.0
192 | if constraint_FL_RR < 0:
193 | violation = violation + 1
194 |
195 | if FR_contact == 1:
196 | if RR_contact == 1:
197 | # ub_support_FR_RR = 1000
198 | # lb_support_FR_RR = 0.0
199 | if constraint_FR_RR < 0:
200 | violation = violation + 1
201 |
202 | else:
203 | ub_support_FR_RL = 1000
204 | lb_support_FR_RL = 0.0
205 | # if(constraint_FR_RL < 0):
206 | # violation = violation + 1
207 | # TOCHECK
208 |
209 | if RR_contact == 1:
210 | if RL_contact == 1:
211 | # ub_support_RR_RL = 1000
212 | # lb_support_RR_RL = 0.0
213 | if constraint_RR_RL < 0:
214 | violation = violation + 1
215 |
216 | else:
217 | # ub_support_FL_RR = -0.0
218 | # lb_support_FL_RR = -1000
219 | if constraint_FL_RR > 0:
220 | violation = violation + 1 * 0
221 |
222 | if RL_contact == 1:
223 | if FL_contact == 1:
224 | # ub_support_RL_FL = -0.0
225 | # lb_support_RL_FL = -1000
226 | if constraint_RL_FL > 0:
227 | violation = violation + 1
228 | else:
229 | # ub_support_FR_RL = -0.0
230 | # lb_support_FR_RL = -1000
231 | if constraint_FR_RL > 0:
232 | violation = violation + 1
233 |
234 | if FL_contact == 1 and FR_contact == 1 and RL_contact == 1 and RR_contact == 1:
235 | violation = 0
236 |
237 | if violation >= 1:
238 | return True
239 | else:
240 | return False
241 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/swing_generators/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/helpers/swing_generators/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/swing_generators/explicit_swing_trajectory_generator.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 |
5 | class SwingTrajectoryGenerator:
6 | def __init__(self, step_height: float, swing_period: float) -> None:
7 | self.step_height = step_height
8 | self.swing_period = swing_period
9 | self.half_swing_period = swing_period / 2
10 | self.bezier_time_factor = 1 / (swing_period / 2)
11 |
12 | def plot_trajectory_3d(self, curve_points: np.ndarray) -> None:
13 | curve_points = np.array(curve_points)
14 |
15 | fig = plt.figure()
16 | ax = fig.add_subplot(111, projection="3d")
17 | ax.plot(curve_points[:, 0], curve_points[:, 1], curve_points[:, 2])
18 | ax.legend()
19 |
20 | plt.title("3D Bézier Curve")
21 | plt.show()
22 |
23 | def plot_trajectory_references(self, tp, fp, vp, ap):
24 | # Convert lists to NumPy arrays for easier plotting
25 | time_points = np.array(tp)
26 | footPosDes_points = np.array(fp)
27 | footVelDes_points = np.array(vp)
28 | footAccDes_points = np.array(ap)
29 |
30 | # Create subplots for position, velocity, and acceleration
31 | fig, axs = plt.subplots(3, 1, figsize=(8, 12))
32 |
33 | # Plot position
34 | for i in range(3):
35 | axs[0].plot(time_points, footPosDes_points[:, i], label=f"Position {i + 1}")
36 | axs[0].set_xlabel('Time')
37 | axs[0].set_ylabel('Position')
38 | axs[0].legend()
39 |
40 | # Plot velocity
41 | for i in range(3):
42 | axs[1].plot(time_points, footVelDes_points[:, i], label=f"Velocity {i + 1}")
43 | axs[1].set_xlabel('Time')
44 | axs[1].set_ylabel('Velocity')
45 | axs[1].legend()
46 |
47 | # Plot acceleration
48 | for i in range(3):
49 | axs[2].plot(time_points, footAccDes_points[:, i], label=f"Acceleration {i + 1}")
50 | axs[2].set_xlabel('Time')
51 | axs[2].set_ylabel('Acceleration')
52 | axs[2].legend()
53 |
54 | plt.tight_layout()
55 | plt.show()
56 |
57 | def compute_control_points(
58 | self, swing_time: float, lift_off: np.ndarray, touch_down: np.ndarray
59 | ) -> (np.array, np.array, np.array, np.array):
60 | cp1, cp2, cp3, cp4 = None, None, None, None
61 | middle_point = 0.5 * (lift_off + touch_down)
62 |
63 | if swing_time <= self.half_swing_period:
64 | cp1 = lift_off
65 | cp2 = lift_off
66 | cp3 = np.array([[lift_off[0], lift_off[1], self.step_height]])
67 | cp4 = np.array([[middle_point[0], middle_point[1], self.step_height]])
68 | else:
69 | cp1 = np.array([[middle_point[0], middle_point[1], self.step_height]])
70 | cp2 = np.array([[touch_down[0], touch_down[1], self.step_height]])
71 | cp3 = np.array([[touch_down[0], touch_down[1], touch_down[2]]])
72 | cp4 = np.array([[touch_down[0], touch_down[1], touch_down[2]]])
73 |
74 | return swing_time % self.half_swing_period, cp1, cp2, cp3, cp4
75 |
76 | def compute_trajectory_references(
77 | self, swing_time: float, lift_off: np.ndarray, touch_down: np.ndarray, early_stance_hitmoment = -1, early_stance_hitpoint = None
78 | ) -> (np.ndarray, np.ndarray, np.ndarray):
79 | bezier_swing_time, cp1, cp2, cp3, cp4 = self.compute_control_points(
80 | swing_time, lift_off.squeeze(), touch_down.squeeze()
81 | )
82 |
83 | desired_foot_position = (
84 | (1 - self.bezier_time_factor * bezier_swing_time) ** 3 * cp1
85 | + 3
86 | * (self.bezier_time_factor * bezier_swing_time)
87 | * (1 - self.bezier_time_factor * bezier_swing_time) ** 2
88 | * cp2
89 | + 3
90 | * (self.bezier_time_factor * bezier_swing_time) ** 2
91 | * (1 - self.bezier_time_factor * bezier_swing_time)
92 | * cp3
93 | + (self.bezier_time_factor * bezier_swing_time) ** 3 * cp4
94 | )
95 |
96 | desired_foot_velocity = (
97 | 3 * (1 - self.bezier_time_factor * bezier_swing_time) ** 2 * (cp2 - cp1)
98 | + 6
99 | * (1 - self.bezier_time_factor * bezier_swing_time)
100 | * (self.bezier_time_factor * bezier_swing_time)
101 | * (cp3 - cp2)
102 | + 3 * (self.bezier_time_factor * bezier_swing_time) ** 2 * (cp4 - cp3)
103 | )
104 |
105 | desired_foot_acceleration = 6 * (1 - self.bezier_time_factor * bezier_swing_time) * (
106 | cp3 - 2 * cp2 + cp1
107 | ) + 6 * (self.bezier_time_factor * bezier_swing_time) * (cp4 - 2 * cp3 + cp2)
108 |
109 | return (
110 | desired_foot_position.reshape((3,)),
111 | desired_foot_velocity.reshape((3,)),
112 | desired_foot_acceleration.reshape((3,)),
113 | )
114 |
115 |
116 | # Example:
117 | if __name__ == "__main__":
118 | step_height = 0.08
119 | swing_period = 0.9
120 | trajectory_generator = SwingTrajectoryGenerator(step_height=step_height, swing_period=swing_period)
121 |
122 | lift_off = np.array([0, 0, 0])
123 | touch_down = np.array([0.1, 0, 0.0])
124 |
125 | # Generate trajectory points
126 | simulation_dt = 0.002
127 | time_points = []
128 | position_points = []
129 | velocity_points = []
130 | acceleration_points = []
131 | i = 0
132 | for foot_swing_time in np.arange(0.000001, swing_period, 0.002):
133 | desired_foot_position, desired_foot_velocity, desired_foot_acceleration = (
134 | trajectory_generator.compute_trajectory_references(swing_time=i, lift_off=lift_off, touch_down=touch_down)
135 | )
136 | i += simulation_dt
137 |
138 | time_points.append(i)
139 | position_points.append(desired_foot_position.squeeze())
140 | velocity_points.append(desired_foot_velocity.squeeze())
141 | acceleration_points.append(desired_foot_acceleration.squeeze())
142 |
143 | # Plot the generated trajectory
144 | trajectory_generator.plot_trajectory_3d(np.array(position_points))
145 | trajectory_generator.plot_trajectory_references(time_points, position_points, velocity_points, acceleration_points)
146 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/swing_generators/scipy_swing_trajectory_generator.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | from scipy.interpolate import CubicSpline
4 |
5 | from quadruped_pympc import config as cfg
6 |
7 | class SwingTrajectoryGenerator:
8 | def __init__(self, step_height: float, swing_period: float) -> None:
9 | self.step_height = step_height
10 | self.swing_period = swing_period
11 | self.half_swing_period = swing_period / 2
12 | self.bezier_time_factor = 1 / (swing_period / 2)
13 |
14 | # Stored swing-trajectory properties
15 | self.stepHeight = step_height
16 | self.step_height_enhancement = False
17 |
18 | if(cfg.simulation_params['visual_foothold_adaptation'] == 'blind'):
19 | self.z_height_enhancement = True
20 | else:
21 | self.z_height_enhancement = False
22 |
23 | def createCurve(self, x0, xf, early_stance_hitmoment = -1):
24 |
25 | scaling_factor = 1.5
26 |
27 | if early_stance_hitmoment != -1:# and early_stance_hitmoment < self.swing_period*0.9:
28 | reflex_maximum_height = 0.15 # this should be per robot basis
29 |
30 | p1 = x0.copy()
31 | p1[:2] = x0[:2] - 0.01 * (xf[:2]-x0[:2])
32 | p1 += np.array([0., 0., self.stepHeight / scaling_factor])
33 | p2 = 0.5 * (x0 + xf) + np.array([0., 0., reflex_maximum_height])
34 | p3 = xf + np.array([0.0, 0.0, reflex_maximum_height / scaling_factor])
35 |
36 | x = np.array([x0[0], p1[0], p2[0], p3[0], xf[0]])
37 | y = np.array([x0[1], p1[1], p2[1], p3[1], xf[1]])
38 | if(self.z_height_enhancement):
39 | z = np.array([x0[2], p1[2], p2[2], p3[2], xf[2] + reflex_maximum_height / (scaling_factor+0.5)])
40 | else:
41 | z = np.array([x0[2], p1[2], p2[2], p3[2], xf[2]])
42 |
43 | updated_swing_period = self.swing_period - early_stance_hitmoment
44 | t = np.array([early_stance_hitmoment, early_stance_hitmoment+updated_swing_period/4, early_stance_hitmoment+updated_swing_period/2, early_stance_hitmoment+updated_swing_period*3/4, self.swing_period])
45 |
46 | else:
47 | if(self.step_height_enhancement):
48 | temp_step_height = 0.15
49 | else:
50 | temp_step_height = self.stepHeight
51 | p1 = x0 + np.array([0., 0., temp_step_height / scaling_factor])
52 | p2 = 0.5 * (x0 + xf) + np.array([0., 0., temp_step_height])
53 | p3 = xf + np.array([0.0, 0.0, temp_step_height / scaling_factor])
54 |
55 | x = np.array([x0[0], p1[0], p2[0], p3[0], xf[0]])
56 | y = np.array([x0[1], p1[1], p2[1], p3[1], xf[1]])
57 | z = np.array([x0[2], p1[2], p2[2], p3[2], xf[2]])
58 |
59 | t = np.array([0, self.half_swing_period/2, self.half_swing_period, self.half_swing_period*3/2, self.half_swing_period*2])
60 |
61 |
62 |
63 | self._curve_x = CubicSpline(t, x, bc_type=["clamped", "clamped"])
64 | self._curve_y = CubicSpline(t, y, bc_type=["clamped", "clamped"])
65 | self._curve_z = CubicSpline(t, z, bc_type=["clamped", "clamped"])
66 |
67 | # self._curve_x = Akima1DInterpolator(t, x)
68 | # self._curve_y = Akima1DInterpolator(t, y)
69 | # self._curve_z = Akima1DInterpolator(t, z)
70 |
71 | """dxdt = np.array([0, 0, 0])
72 | dydt = np.array([0, 0, 0])
73 | dzdt = np.array([0, 0, 0])
74 | scaling_factor = 1
75 |
76 | p2 = 0.5 * (x0 + xf) + np.array([0., 0., self.stepHeight / scaling_factor])
77 |
78 | x = np.array([x0[0], p2[0], xf[0]])
79 | y = np.array([x0[1], p2[1], xf[1]])
80 | z = np.array([x0[2], p2[2], xf[2]])
81 | t = np.array([0, self.half_swing_period, self.half_swing_period*2])
82 | self._curve_x = CubicHermiteSpline(t, x, dxdt)
83 | self._curve_y = CubicHermiteSpline(t, y, dydt)
84 | self._curve_z = CubicHermiteSpline(t, z, dzdt)"""
85 |
86 | self._curve_x_vel = self._curve_x.derivative()
87 | self._curve_y_vel = self._curve_y.derivative()
88 | self._curve_z_vel = self._curve_z.derivative()
89 |
90 | self._curve_x_acc = self._curve_x_vel.derivative()
91 | self._curve_y_acc = self._curve_y_vel.derivative()
92 | self._curve_z_acc = self._curve_z_vel.derivative()
93 |
94 | def compute_trajectory_references(
95 | self, swing_time: float, lift_off: np.array, touch_down: np.array, early_stance_hitmoment = -1, early_stance_hitpoint = None) -> (np.array, np.array, np.array):
96 | if early_stance_hitpoint is not None:
97 | self.createCurve(early_stance_hitpoint, touch_down, early_stance_hitmoment)
98 | # self.plot_current_curve(hitmoment)
99 | else:
100 | self.createCurve(lift_off, touch_down)
101 |
102 | position_x = self._curve_x(swing_time)
103 | position_y = self._curve_y(swing_time)
104 | position_z = self._curve_z(swing_time)
105 |
106 | position = np.array([position_x, position_y, position_z])
107 |
108 | velocity_x = self._curve_x_vel(swing_time)
109 | velocity_y = self._curve_y_vel(swing_time)
110 | velocity_z = self._curve_z_vel(swing_time)
111 |
112 | velocity = np.array([velocity_x, velocity_y, velocity_z])
113 |
114 | acceleration_x = self._curve_x_acc(swing_time)
115 | acceleration_y = self._curve_y_acc(swing_time)
116 | acceleration_z = self._curve_z_acc(swing_time)
117 |
118 | acceleration = np.array([acceleration_x, acceleration_y, acceleration_z])
119 |
120 | return position, velocity, acceleration
121 |
122 | def plot_trajectory_3d(self, curve_points: np.array) -> None:
123 | curve_points = np.array(curve_points)
124 |
125 | fig = plt.figure()
126 | ax = fig.add_subplot(111, projection="3d")
127 | ax.plot(curve_points[:, 0], curve_points[:, 1], curve_points[:, 2])
128 | ax.legend()
129 |
130 | plt.title("3D Curve")
131 | plt.show()
132 |
133 | def plot_trajectory_references(self, tp, fp, vp, ap):
134 | # Convert lists to NumPy arrays for easier plotting
135 | time_points = np.array(tp)
136 | footPosDes_points = np.array(fp)
137 | footVelDes_points = np.array(vp)
138 | footAccDes_points = np.array(ap)
139 |
140 | # Create subplots for position, velocity, and acceleration
141 | fig, axs = plt.subplots(3, 1, figsize=(8, 12))
142 |
143 | # Plot position
144 | for i in range(3):
145 | axs[0].plot(time_points, footPosDes_points[:, i], label=f"Position {i + 1}")
146 | axs[0].set_xlabel('Time')
147 | axs[0].set_ylabel('Position')
148 | axs[0].legend()
149 |
150 | # Plot velocity
151 | for i in range(3):
152 | axs[1].plot(time_points, footVelDes_points[:, i], label=f"Velocity {i + 1}")
153 | axs[1].set_xlabel('Time')
154 | axs[1].set_ylabel('Velocity')
155 | axs[1].legend()
156 |
157 | # Plot acceleration
158 | for i in range(3):
159 | axs[2].plot(time_points, footAccDes_points[:, i], label=f"Acceleration {i + 1}")
160 | axs[2].set_xlabel('Time')
161 | axs[2].set_ylabel('Acceleration')
162 | axs[2].legend()
163 |
164 | plt.tight_layout()
165 | plt.show()
166 |
167 |
168 | # Example:
169 | if __name__ == "__main__":
170 | step_height = 0.08
171 | swing_period = 0.9
172 | trajectory_generator = SwingTrajectoryGenerator(step_height=step_height, swing_period=swing_period)
173 |
174 | lift_off = np.array([0, 0, 0])
175 | touch_down = np.array([0.1, -0.2, 0.0])
176 |
177 | # Generate trajectory points
178 | simulation_dt = 0.002
179 | time_points = []
180 | position_points = []
181 | velocity_points = []
182 | acceleration_points = []
183 | i = 0
184 | for foot_swing_time in np.arange(0.000001, swing_period, 0.002):
185 | desired_foot_position, desired_foot_velocity, desired_foot_acceleration = (
186 | trajectory_generator.compute_trajectory_references(swing_time=i, lift_off=lift_off, touch_down=touch_down)
187 | )
188 | i += simulation_dt
189 |
190 | time_points.append(i)
191 | position_points.append(desired_foot_position.squeeze())
192 | velocity_points.append(desired_foot_velocity.squeeze())
193 | acceleration_points.append(desired_foot_acceleration.squeeze())
194 |
195 | # Plot the generated trajectory
196 | trajectory_generator.plot_trajectory_3d(np.array(position_points))
197 | trajectory_generator.plot_trajectory_references(time_points, position_points, velocity_points, acceleration_points)
198 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/terrain_estimator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class TerrainEstimator:
5 | def __init__(self) -> None:
6 | self.terrain_roll = 0
7 | self.terrain_pitch = 0
8 | self.terrain_height = 0
9 |
10 | self.roll_activated = False
11 | self.pitch_activated = True
12 |
13 | def compute_terrain_estimation(
14 | self, base_position: np.ndarray, yaw: float, feet_pos: dict, current_contact: np.ndarray
15 | ) -> [float, float]:
16 | """Compute the estimated roll and pitch of the terrain based on the positions of the robot's feet.
17 |
18 | Parameters
19 | ----------
20 | base_position : (np.ndarray) The position of the robot's base in the world frame.
21 | yaw : (float) The yaw angle of the robot in radians.
22 | lifted_foot_positions : (dict) The positions of the robot's feet in the world frame. This is an instance of
23 | the dict class, which has attributes for the positions of the front left (FL), front right (FR), rear left
24 | (RL),and rear right (RR) feet.
25 | current_contact: (np.ndarray) The contact state of the feet. This is a 4-element array
26 |
27 | Returns
28 | -------
29 | roll : (float) The estimated roll of the terrain in radians.
30 | pitch : (float) The estimated pitch of the terrain in radians.
31 |
32 | Notes
33 | -----
34 | This function assumes that the robot is on a planar terrain and that the feet positions are measured in the
35 | world frame.
36 | """
37 | # Compute roll and pitch for each foot position
38 | # Rotation matrix R_yaw
39 | R_W2H = np.array([[np.cos(yaw), np.sin(yaw), 0], [-np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
40 |
41 | # Extracting 3-element segments from liftoff_position_z_ and x_op_
42 | seg0 = feet_pos["FL"]
43 | seg3 = feet_pos["FR"]
44 | seg6 = feet_pos["RL"]
45 | seg9 = feet_pos["RR"]
46 |
47 | # Calculating differences
48 | # TODO: Feet position in base frame?
49 | front_difference = R_W2H @ (seg0 - base_position) - R_W2H @ (seg3 - base_position)
50 | back_difference = R_W2H @ (seg6 - base_position) - R_W2H @ (seg9 - base_position)
51 | left_difference = R_W2H @ (seg0 - base_position) - R_W2H @ (seg6 - base_position)
52 | right_difference = R_W2H @ (seg3 - base_position) - R_W2H @ (seg9 - base_position)
53 |
54 | # Calculating pitch and roll
55 | # TODO: Docstring
56 | pitch = (
57 | np.arctan(np.abs(left_difference[2]) / np.abs(left_difference[0] + 0.001))
58 | + np.arctan(np.abs(right_difference[2]) / np.abs(right_difference[0] + 0.001))
59 | ) * 0.5
60 |
61 | roll = (
62 | np.arctan(np.abs(front_difference[2]) / np.abs(front_difference[1] + 0.001))
63 | + np.arctan(np.abs(back_difference[2]) / np.abs(back_difference[1] + 0.001))
64 | ) * 0.5
65 |
66 | # Adjusting signs of pitch and roll TODO: Adjusting what and for what?
67 | if (front_difference[2] * 0.5 + back_difference[2] * 0.5) < 0:
68 | roll = -roll
69 | if (left_difference[2] * 0.5 + right_difference[2] * 0.5) > 0:
70 | pitch = -pitch
71 |
72 |
73 | if self.roll_activated:
74 | self.terrain_roll = self.terrain_roll * 0.99 + roll * 0.01
75 | else:
76 | self.terrain_roll = 0.0
77 |
78 | if self.pitch_activated:
79 | self.terrain_pitch = self.terrain_pitch * 0.99 + pitch * 0.01
80 | else:
81 | self.terrain_pitch = 0.0
82 |
83 | # Update the reference height given the foot in contact
84 | z_foot_FL = feet_pos["FL"][2]
85 | z_foot_FR = feet_pos["FR"][2]
86 | z_foot_RL = feet_pos["RL"][2]
87 | z_foot_RR = feet_pos["RR"][2]
88 | """number_foot_in_contact = current_contact[0] + \
89 | current_contact[1] + \
90 | current_contact[2] + \
91 | current_contact[3]
92 | if (number_foot_in_contact != 0):
93 | z_foot_mean_temp = (z_foot_FL * current_contact[0] + \
94 | z_foot_FR * current_contact[1] + \
95 | z_foot_RL * current_contact[2] + \
96 | z_foot_RR * current_contact[3]) / number_foot_in_contact
97 | self.terrain_height = self.terrain_height * 0.6 + z_foot_mean_temp * 0.4"""
98 |
99 | z_foot_mean_temp = (z_foot_FL + z_foot_FR + z_foot_RL + z_foot_RR) / 4
100 | self.terrain_height = self.terrain_height * 0.9 + z_foot_mean_temp * 0.1
101 |
102 |
103 | return self.terrain_roll, self.terrain_pitch, self.terrain_height
104 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/velocity_modulator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from quadruped_pympc import config as cfg
4 |
5 |
6 | class VelocityModulator:
7 | def __init__(self):
8 |
9 | self.activated = cfg.simulation_params['velocity_modulator']
10 |
11 | if cfg.robot == "aliengo":
12 | self.max_distance = 0.2
13 | elif cfg.robot == "go1" or cfg.robot == "go2":
14 | self.max_distance = 0.2
15 | else:
16 | self.max_distance = 0.2
17 |
18 | def modulate_velocities(self, ref_base_lin_vel, ref_base_ang_vel, feet_pos, hip_pos):
19 | distance_FL_to_hip_xy = np.sqrt(
20 | np.square(feet_pos.FL[0] - hip_pos.FL[0]) + np.square(feet_pos.FL[1] - hip_pos.FL[1])
21 | )
22 | distance_FR_to_hip_xy = np.sqrt(
23 | np.square(feet_pos.FR[0] - hip_pos.FR[0]) + np.square(feet_pos.FR[1] - hip_pos.FR[1])
24 | )
25 | distance_RL_to_hip_xy = np.sqrt(
26 | np.square(feet_pos.RL[0] - hip_pos.RL[0]) + np.square(feet_pos.RL[1] - hip_pos.RL[1])
27 | )
28 | distance_RR_to_hip_xy = np.sqrt(
29 | np.square(feet_pos.RR[0] - hip_pos.RR[0]) + np.square(feet_pos.RR[1] - hip_pos.RR[1])
30 | )
31 |
32 | if(ref_base_lin_vel[0] < 0.01 and ref_base_lin_vel[1] < 0.01):
33 | # If the robot is not moving, we don't need to modulate the velocities
34 | return ref_base_lin_vel, ref_base_ang_vel
35 |
36 | if (
37 | distance_FL_to_hip_xy > self.max_distance
38 | or distance_FR_to_hip_xy > self.max_distance
39 | or distance_RL_to_hip_xy > self.max_distance
40 | or distance_RR_to_hip_xy > self.max_distance
41 | ):
42 | ref_base_lin_vel = ref_base_lin_vel * 0.0
43 | ref_base_ang_vel = ref_base_ang_vel * 0.0
44 |
45 | return ref_base_lin_vel, ref_base_ang_vel
46 |
--------------------------------------------------------------------------------
/quadruped_pympc/helpers/visual_foothold_adaptation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from gym_quadruped.utils.quadruped_utils import LegsAttr
3 |
4 | try:
5 | from virall.vfa.vfa import VFA
6 | except ImportError:
7 | print("VFA not installed, not open source yet")
8 |
9 |
10 | class VisualFootholdAdaptation:
11 | def __init__(self, legs_order, adaptation_strategy='height'):
12 | self.footholds_adaptation = LegsAttr(
13 | FL=np.array([0, 0, 0]), FR=np.array([0, 0, 0]), RL=np.array([0, 0, 0]), RR=np.array([0, 0, 0])
14 | )
15 | self.footholds_constraints = LegsAttr(FL=None, FR=None, RL=None, RR=None)
16 | self.initialized = False
17 |
18 | self.adaptation_strategy = adaptation_strategy
19 |
20 | if self.adaptation_strategy == 'vfa':
21 | self.vfa_evaluators = LegsAttr(FL=None, FR=None, RL=None, RR=None)
22 | for leg_id, leg_name in enumerate(legs_order):
23 | self.vfa_evaluators[leg_name] = VFA(leg=leg_name)
24 |
25 | def update_footholds_adaptation(self, update_footholds_adaptation):
26 | self.footholds_adaptation = update_footholds_adaptation
27 | self.initialized = True
28 |
29 | def reset(self):
30 | self.initialized = False
31 |
32 | def get_footholds_adapted(self, reference_footholds):
33 | # If the adaptation is not initialized, return the reference footholds
34 | if self.initialized == False:
35 | self.footholds_adaptation = reference_footholds
36 | return reference_footholds, self.footholds_constraints
37 | else:
38 | return self.footholds_adaptation, self.footholds_constraints
39 |
40 | def get_heightmap_coordinates_foothold_id(self, heightmaps, foothold_id, leg_name):
41 | r = round(foothold_id.item() / heightmaps[leg_name].n)
42 | c = round(foothold_id.item() % heightmaps[leg_name].n)
43 |
44 | if r >= heightmaps[leg_name].n:
45 | r = heightmaps[leg_name].n - 1
46 | if c >= heightmaps[leg_name].n:
47 | c = heightmaps[leg_name].n - 1
48 |
49 | return r, c
50 |
51 | def compute_adaptation(
52 | self,
53 | legs_order,
54 | reference_footholds,
55 | hip_positions,
56 | heightmaps,
57 | forward_vel,
58 | base_orientation,
59 | base_orientation_rate,
60 | ):
61 | for leg_id, leg_name in enumerate(legs_order):
62 | if heightmaps[leg_name].data is None:
63 | return False
64 |
65 | if self.adaptation_strategy == 'height':
66 | for leg_id, leg_name in enumerate(legs_order):
67 | height_adjustment = heightmaps[leg_name].get_height(reference_footholds[leg_name])
68 | if height_adjustment is not None:
69 | reference_footholds[leg_name][2] = height_adjustment
70 |
71 | elif self.adaptation_strategy == 'vfa':
72 | gait_phases = 0.0 # for now
73 | for leg_id, leg_name in enumerate(legs_order):
74 | # Transform the heightmap in hip frame
75 |
76 | heightmap = heightmaps[leg_name].data
77 | hip_position = hip_positions[leg_name]
78 |
79 | heightmap_hip_frame = heightmap[:, :, 0, 2] - hip_position[2]
80 |
81 | convex_data, _, safe_map, info = self.vfa_evaluators[leg_name](
82 | heightmap_data=heightmap_hip_frame,
83 | base_linear_velocity=forward_vel,
84 | base_orientation=base_orientation,
85 | base_orientation_rate=base_orientation_rate,
86 | gait_phase=gait_phases,
87 | return_info=True,
88 | )
89 |
90 | best_foothold_id = convex_data[0][0]
91 | best_convex_area_vertices_id = [convex_data[1][0].x1, convex_data[1][0].y2]
92 |
93 | r, c = self.get_heightmap_coordinates_foothold_id(heightmaps, best_foothold_id, leg_name)
94 |
95 | reference_footholds[leg_name][0:2] = heightmap[r, c, 0, :][0:2]
96 |
97 | height_adjustment = heightmaps[leg_name].get_height(reference_footholds[leg_name])
98 | if height_adjustment is not None:
99 | reference_footholds[leg_name][2] = height_adjustment
100 |
101 | r_vertex1, c_vertex1 = self.get_heightmap_coordinates_foothold_id(
102 | heightmaps, np.array([best_convex_area_vertices_id[0]]), leg_name
103 | )
104 | r_vertex2, c_vertex2 = self.get_heightmap_coordinates_foothold_id(
105 | heightmaps, np.array([best_convex_area_vertices_id[1]]), leg_name
106 | )
107 | vertex1_world_frame = heightmap[r_vertex1, c_vertex1, 0, :]
108 | vertex2_world_frame = heightmap[r_vertex2, c_vertex2, 0, :]
109 |
110 | self.footholds_constraints[leg_name] = [vertex1_world_frame, vertex2_world_frame]
111 |
112 | # print("Safe map: ", safe_map)
113 |
114 | self.update_footholds_adaptation(reference_footholds)
115 |
116 | return True
117 |
--------------------------------------------------------------------------------
/quadruped_pympc/interfaces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/iit-DLSLab/Quadruped-PyMPC/3b289c089ce103733071455760ccbbaef9a9672c/quadruped_pympc/interfaces/__init__.py
--------------------------------------------------------------------------------
/quadruped_pympc/interfaces/srbd_batched_controller_interface.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from quadruped_pympc import config as cfg
4 | from quadruped_pympc.helpers.periodic_gait_generator import PeriodicGaitGenerator
5 |
6 |
7 |
8 | class SRBDBatchedControllerInterface:
9 | """This is an interface for a batched controller that uses the SRBD method to optimize the gait"""
10 |
11 | def __init__(self):
12 | """Constructor for the SRBD batched controller interface"""
13 |
14 | self.type = cfg.mpc_params['type']
15 | self.mpc_dt = cfg.mpc_params['dt']
16 | self.horizon = cfg.mpc_params['horizon']
17 | self.optimize_step_freq = cfg.mpc_params['optimize_step_freq']
18 | self.step_freq_available = cfg.mpc_params['step_freq_available']
19 |
20 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_gait_adaptive import Acados_NMPC_GaitAdaptive
21 |
22 | self.batched_controller = Acados_NMPC_GaitAdaptive()
23 |
24 | # in the case of nonuniform discretization, we create a list of dts and horizons for each nonuniform discretization
25 | if cfg.mpc_params['use_nonuniform_discretization']:
26 | self.contact_sequence_dts = [cfg.mpc_params['dt_fine_grained'], self.mpc_dt]
27 | self.contact_sequence_lenghts = [cfg.mpc_params['horizon_fine_grained'], self.horizon]
28 | else:
29 | self.contact_sequence_dts = [self.mpc_dt]
30 | self.contact_sequence_lenghts = [self.horizon]
31 |
32 | def optimize_gait(
33 | self,
34 | state_current: dict,
35 | ref_state: dict,
36 | inertia: np.ndarray,
37 | pgg_phase_signal: np.ndarray,
38 | pgg_step_freq: float,
39 | pgg_duty_factor: float,
40 | pgg_gait_type: int,
41 | optimize_swing: int,
42 | ) -> float:
43 | """Optimize the gait using the SRBD method
44 | TODO: remove the unused arguments, and not pass pgg but rather its values
45 |
46 | Args:
47 | state_current (dict): The current state of the robot
48 | ref_state (dict): The reference state of the robot
49 | inertia (np.ndarray): The inertia of the robot
50 | pgg_phase_signal (np.ndarray): The periodic gait generator phase signal of the legs (from 0 to 1)
51 | pgg_step_freq (float): The step frequency of the periodic gait generator
52 | pgg_duty_factor (float): The duty factor of the periodic gait generator
53 | pgg_gait_type (int): The gait type of the periodic gait generator
54 | contact_sequence_dts (np.ndarray): The contact sequence dts
55 | contact_sequence_lenghts (np.ndarray): The contact sequence lengths
56 | optimize_swing (int): The flag to optimize the swing
57 |
58 | Returns:
59 | float: The best sample frequency
60 | """
61 |
62 | best_sample_freq = pgg_step_freq
63 | if self.optimize_step_freq and optimize_swing == 1:
64 | contact_sequence_temp = np.zeros((len(self.step_freq_available), 4, self.horizon))
65 | for j in range(len(self.step_freq_available)):
66 | pgg_temp = PeriodicGaitGenerator(
67 | duty_factor=pgg_duty_factor,
68 | step_freq=self.step_freq_available[j],
69 | gait_type=pgg_gait_type,
70 | horizon=self.horizon,
71 | )
72 | pgg_temp.set_phase_signal(pgg_phase_signal)
73 | contact_sequence_temp[j] = pgg_temp.compute_contact_sequence(
74 | contact_sequence_dts=self.contact_sequence_dts,
75 | contact_sequence_lenghts=self.contact_sequence_lenghts,
76 | )
77 |
78 | costs, best_sample_freq = self.batched_controller.compute_batch_control(
79 | state_current, ref_state, contact_sequence_temp
80 | )
81 |
82 | return best_sample_freq
83 |
--------------------------------------------------------------------------------
/quadruped_pympc/interfaces/srbd_controller_interface.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from gym_quadruped.utils.quadruped_utils import LegsAttr
3 |
4 | from quadruped_pympc import config as cfg
5 |
6 |
7 | class SRBDControllerInterface:
8 | """This is an interface for a controller that uses the SRBD method to optimize the gait"""
9 |
10 | def __init__(self):
11 | """Constructor for the SRBD controller interface"""
12 |
13 | self.type = cfg.mpc_params['type']
14 | self.mpc_dt = cfg.mpc_params['dt']
15 | self.horizon = cfg.mpc_params['horizon']
16 | self.optimize_step_freq = cfg.mpc_params['optimize_step_freq']
17 | self.step_freq_available = cfg.mpc_params['step_freq_available']
18 |
19 | self.previous_contact_mpc = np.array([1, 1, 1, 1])
20 |
21 | # 'nominal' optimized directly the GRF
22 | # 'input_rates' optimizes the delta GRF
23 | # 'sampling' is a gpu-based mpc that samples the GRF
24 | # 'collaborative' optimized directly the GRF and has a passive arm model inside
25 | # 'lyapunov' optimized directly the GRF and has a Lyapunov-based stability constraint
26 | # 'kinodynamic' sbrd with joints - experimental
27 | if self.type == "nominal":
28 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_nominal import Acados_NMPC_Nominal
29 |
30 | self.controller = Acados_NMPC_Nominal()
31 |
32 | if self.optimize_step_freq:
33 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_gait_adaptive import (
34 | Acados_NMPC_GaitAdaptive,
35 | )
36 |
37 | self.batched_controller = Acados_NMPC_GaitAdaptive()
38 |
39 | elif self.type == 'input_rates':
40 | from quadruped_pympc.controllers.gradient.input_rates.centroidal_nmpc_input_rates import (
41 | Acados_NMPC_InputRates,
42 | )
43 |
44 | self.controller = Acados_NMPC_InputRates()
45 |
46 | if self.optimize_step_freq:
47 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_gait_adaptive import (
48 | Acados_NMPC_GaitAdaptive,
49 | )
50 |
51 | self.batched_controller = Acados_NMPC_GaitAdaptive()
52 |
53 | elif cfg.mpc_params['type'] == 'lyapunov':
54 | from quadruped_pympc.controllers.gradient.lyapunov.centroidal_nmpc_lyapunov import Acados_NMPC_Lyapunov
55 |
56 | self.controller = Acados_NMPC_Lyapunov()
57 |
58 | if self.optimize_step_freq:
59 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_gait_adaptive import (
60 | Acados_NMPC_GaitAdaptive,
61 | )
62 |
63 | self.batched_controller = Acados_NMPC_GaitAdaptive()
64 |
65 | elif cfg.mpc_params['type'] == 'kinodynamic':
66 | from quadruped_pympc.controllers.gradient.kinodynamic.kinodynamic_nmpc import Acados_NMPC_KinoDynamic
67 |
68 | self.controller = Acados_NMPC_KinoDynamic()
69 |
70 | if self.optimize_step_freq:
71 | from quadruped_pympc.controllers.gradient.nominal.centroidal_nmpc_gait_adaptive import (
72 | Acados_NMPC_GaitAdaptive,
73 | )
74 |
75 | self.batched_controller = Acados_NMPC_GaitAdaptive()
76 |
77 | elif self.type == "sampling":
78 | if self.optimize_step_freq:
79 | from quadruped_pympc.controllers.sampling.centroidal_nmpc_jax_gait_adaptive import Sampling_MPC
80 | else:
81 | from quadruped_pympc.controllers.sampling.centroidal_nmpc_jax import Sampling_MPC
82 |
83 | self.controller = Sampling_MPC()
84 |
85 | def compute_control(
86 | self,
87 | state_current: dict,
88 | ref_state: dict,
89 | contact_sequence: np.ndarray,
90 | inertia: np.ndarray,
91 | pgg_phase_signal: np.ndarray,
92 | pgg_step_freq: float,
93 | optimize_swing: int,
94 | external_wrenches: np.ndarray = np.zeros((6,)),
95 | ) -> [LegsAttr, LegsAttr, LegsAttr, LegsAttr, LegsAttr, float]:
96 | """Compute the control using the SRBD method
97 |
98 | Args:
99 | state_current (dict): The current state of the robot
100 | ref_state (dict): The reference state of the robot
101 | contact_sequence (np.ndarray): The contact sequence of the robot
102 | inertia (np.ndarray): The inertia of the robot
103 | pgg_phase_signal (np.ndarray): The periodic gait generator phase signal of the legs (from 0 to 1)
104 | pgg_step_freq (float): The step frequency of the periodic gait generator
105 | optimize_swing (int): The flag to optimize the swing
106 | external_wrenches (np.ndarray): The external wrench applied to the robot to compensate
107 |
108 | Returns:
109 | tuple: The GRFs and the feet positions in world frame,
110 | and the best sample frequency (only if the controller is sampling)
111 | """
112 |
113 | current_contact = np.array(
114 | [contact_sequence[0][0], contact_sequence[1][0], contact_sequence[2][0], contact_sequence[3][0]]
115 | )
116 |
117 | # If we use sampling
118 | if self.type == 'sampling':
119 | # Convert data to jax and shift previous solution
120 | state_current_jax, reference_state_jax = self.controller.prepare_state_and_reference(
121 | state_current, ref_state, current_contact, self.previous_contact_mpc
122 | )
123 | self.previous_contact_mpc = current_contact
124 |
125 | for iter_sampling in range(self.controller.num_sampling_iterations):
126 | self.controller = self.controller.with_newkey()
127 | if self.controller.sampling_method == 'cem_mppi':
128 | if iter_sampling == 0:
129 | self.controller = self.controller.with_newsigma(cfg.mpc_params['sigma_cem_mppi'])
130 |
131 | (
132 | nmpc_GRFs,
133 | nmpc_footholds,
134 | nmpc_predicted_state,
135 | self.controller.best_control_parameters,
136 | best_cost,
137 | best_sample_freq,
138 | costs,
139 | sigma_cem_mppi,
140 | ) = self.controller.jitted_compute_control(
141 | state_current_jax,
142 | reference_state_jax,
143 | contact_sequence,
144 | self.controller.best_control_parameters,
145 | self.controller.master_key,
146 | self.controller.sigma_cem_mppi,
147 | )
148 | self.controller = self.controller.with_newsigma(sigma_cem_mppi)
149 | else:
150 | nominal_sample_freq = pgg_step_freq
151 | (
152 | nmpc_GRFs,
153 | nmpc_footholds,
154 | nmpc_predicted_state,
155 | self.controller.best_control_parameters,
156 | best_cost,
157 | best_sample_freq,
158 | costs,
159 | ) = self.controller.jitted_compute_control(
160 | state_current_jax,
161 | reference_state_jax,
162 | contact_sequence,
163 | self.controller.best_control_parameters,
164 | self.controller.master_key,
165 | pgg_phase_signal,
166 | nominal_sample_freq,
167 | optimize_swing,
168 | )
169 |
170 | nmpc_footholds = LegsAttr(
171 | FL=ref_state["ref_foot_FL"][0],
172 | FR=ref_state["ref_foot_FR"][0],
173 | RL=ref_state["ref_foot_RL"][0],
174 | RR=ref_state["ref_foot_RR"][0],
175 | )
176 | nmpc_GRFs = np.array(nmpc_GRFs)
177 |
178 | nmpc_joints_pos = None
179 | nmpc_joints_vel = None
180 | nmpc_joints_acc = None
181 |
182 | # If we use Gradient-Based MPC
183 | else:
184 | if self.type == 'kinodynamic':
185 | (
186 | nmpc_GRFs,
187 | nmpc_footholds,
188 | nmpc_joints_pos,
189 | nmpc_joints_vel,
190 | nmpc_joints_acc,
191 | nmpc_predicted_state,
192 | status,
193 | ) = self.controller.compute_control(
194 | state_current, ref_state, contact_sequence, inertia=inertia, external_wrenches=external_wrenches
195 | )
196 |
197 | nmpc_joints_pos = LegsAttr(
198 | FL=nmpc_joints_pos[0:3], FR=nmpc_joints_pos[3:6], RL=nmpc_joints_pos[6:9], RR=nmpc_joints_pos[9:12]
199 | )
200 |
201 | nmpc_joints_vel = LegsAttr(
202 | FL=nmpc_joints_vel[0:3], FR=nmpc_joints_vel[3:6], RL=nmpc_joints_vel[6:9], RR=nmpc_joints_vel[9:12]
203 | )
204 |
205 | nmpc_joints_acc = LegsAttr(
206 | FL=nmpc_joints_acc[0:3], FR=nmpc_joints_acc[3:6], RL=nmpc_joints_acc[6:9], RR=nmpc_joints_acc[9:12]
207 | )
208 |
209 | else:
210 | nmpc_GRFs, nmpc_footholds, nmpc_predicted_state, _ = self.controller.compute_control(
211 | state_current, ref_state, contact_sequence, inertia=inertia, external_wrenches=external_wrenches
212 | )
213 |
214 | nmpc_joints_pos = None
215 | nmpc_joints_vel = None
216 | nmpc_joints_acc = None
217 |
218 | nmpc_footholds = LegsAttr(
219 | FL=nmpc_footholds[0], FR=nmpc_footholds[1], RL=nmpc_footholds[2], RR=nmpc_footholds[3]
220 | )
221 |
222 | best_sample_freq = pgg_step_freq
223 |
224 | # TODO: Indexing should not be hardcoded. Env should provide indexing of leg actuator dimensions.
225 | nmpc_GRFs = LegsAttr(
226 | FL=nmpc_GRFs[0:3] * current_contact[0],
227 | FR=nmpc_GRFs[3:6] * current_contact[1],
228 | RL=nmpc_GRFs[6:9] * current_contact[2],
229 | RR=nmpc_GRFs[9:12] * current_contact[3],
230 | )
231 |
232 | return (
233 | nmpc_GRFs,
234 | nmpc_footholds,
235 | nmpc_joints_pos,
236 | nmpc_joints_vel,
237 | nmpc_joints_acc,
238 | best_sample_freq,
239 | nmpc_predicted_state,
240 | )
241 |
242 | def compute_RTI(self):
243 | self.controller.acados_ocp_solver.options_set("rti_phase", 1)
244 | self.controller.acados_ocp_solver.solve()
245 | # print("preparation phase time: ", controller.acados_ocp_solver.get_stats('time_tot'))
246 |
--------------------------------------------------------------------------------
/quadruped_pympc/quadruped_pympc_wrapper.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from gym_quadruped.utils.quadruped_utils import LegsAttr
3 |
4 | from quadruped_pympc import config as cfg
5 | from quadruped_pympc.interfaces.srbd_batched_controller_interface import SRBDBatchedControllerInterface
6 | from quadruped_pympc.interfaces.srbd_controller_interface import SRBDControllerInterface
7 | from quadruped_pympc.interfaces.wb_interface import WBInterface
8 |
9 | _DEFAULT_OBS = ("ref_base_height", "ref_base_angles", "nmpc_GRFs", "nmpc_footholds", "swing_time")
10 |
11 |
12 | class QuadrupedPyMPC_Wrapper:
13 | """A simple class wrapper of all the mpc submodules (swing, contact generator, mpc itself)."""
14 |
15 | def __init__(
16 | self,
17 | initial_feet_pos: LegsAttr,
18 | legs_order: tuple[str, str, str, str] = ('FL', 'FR', 'RL', 'RR'),
19 | feet_geom_id: LegsAttr = None,
20 | quadrupedpympc_observables_names: tuple[str, ...] = _DEFAULT_OBS,
21 | ):
22 | """Constructor of the QuadrupedPyMPC_Wrapper class.
23 |
24 | Args:
25 | initial_feet_pos (LegsAttr): initial feet positions, otherwise they will be all zero.
26 | legs_order (tuple[str, str, str, str], optional): order of the leg. Defaults to ('FL', 'FR', 'RL', 'RR').
27 | quadrupedpympc_observables_names (tuple[str, ...], optional): list of observable to save. Defaults to _DEFAULT_OBS.
28 | """
29 |
30 | self.mpc_frequency = cfg.simulation_params["mpc_frequency"]
31 |
32 | self.srbd_controller_interface = SRBDControllerInterface()
33 |
34 | if cfg.mpc_params['type'] != 'sampling' and cfg.mpc_params['optimize_step_freq']:
35 | self.srbd_batched_controller_interface = SRBDBatchedControllerInterface()
36 |
37 | self.wb_interface = WBInterface(initial_feet_pos=initial_feet_pos(frame='world'), legs_order=legs_order, feet_geom_id = feet_geom_id)
38 |
39 | self.nmpc_GRFs = LegsAttr(FL=np.zeros(3), FR=np.zeros(3), RL=np.zeros(3), RR=np.zeros(3))
40 | self.nmpc_footholds = LegsAttr(FL=np.zeros(3), FR=np.zeros(3), RL=np.zeros(3), RR=np.zeros(3))
41 | self.nmpc_joints_pos = LegsAttr(FL=np.zeros(3), FR=np.zeros(3), RL=np.zeros(3), RR=np.zeros(3))
42 | self.nmpc_joints_vel = LegsAttr(FL=np.zeros(3), FR=np.zeros(3), RL=np.zeros(3), RR=np.zeros(3))
43 | self.nmpc_joints_acc = LegsAttr(FL=np.zeros(3), FR=np.zeros(3), RL=np.zeros(3), RR=np.zeros(3))
44 | self.nmpc_predicted_state = np.zeros(12)
45 | self.best_sample_freq = self.wb_interface.pgg.step_freq
46 |
47 | self.quadrupedpympc_observables_names = quadrupedpympc_observables_names
48 | self.quadrupedpympc_observables = {}
49 |
50 | def compute_actions(
51 | self,
52 | com_pos: np.ndarray,
53 | base_pos: np.ndarray,
54 | base_lin_vel: np.ndarray,
55 | base_ori_euler_xyz: np.ndarray,
56 | base_ang_vel: np.ndarray,
57 | feet_pos: LegsAttr,
58 | hip_pos: LegsAttr,
59 | joints_pos: LegsAttr,
60 | heightmaps,
61 | legs_order: tuple[str, str, str, str],
62 | simulation_dt: float,
63 | ref_base_lin_vel: np.ndarray,
64 | ref_base_ang_vel: np.ndarray,
65 | step_num: int,
66 | qpos: np.ndarray,
67 | qvel: np.ndarray,
68 | feet_jac: LegsAttr,
69 | feet_jac_dot: LegsAttr,
70 | feet_vel: LegsAttr,
71 | legs_qfrc_passive: LegsAttr,
72 | legs_qfrc_bias: LegsAttr,
73 | legs_mass_matrix: LegsAttr,
74 | legs_qpos_idx: LegsAttr,
75 | legs_qvel_idx: LegsAttr,
76 | tau: LegsAttr,
77 | inertia: np.ndarray,
78 | mujoco_contact: np.ndarray,
79 | ) -> LegsAttr:
80 | """Given the current state of the robot (and the reference),
81 | compute the torques to be applied to the motors.
82 |
83 | Args:
84 | com_pos (np.ndarray): center of mass position in
85 | base_pos (np.ndarray): base position in world frame
86 | base_lin_vel (np.ndarray): base velocity in world frame
87 | base_ori_euler_xyz (np.ndarray): base orientation in world frame
88 | base_ang_vel (np.ndarray): base angular velocity in base frame
89 | feet_pos (LegsAttr): locations of the feet in world frame
90 | hip_pos (LegsAttr): locations of the hip in world frame
91 | heightmaps (_type_): TODO
92 | legs_order (tuple[str, str, str, str]): order of the legs
93 | simulation_dt (float): simulation time step
94 | ref_base_lin_vel (np.ndarray): reference base linear velocity in world frame
95 | ref_base_ang_vel (np.ndarray): reference base angular velocity in world frame
96 | step_num (int): current step number of the environment
97 | qpos (np.ndarray): joint positions
98 | qvel (np.ndarray): joint velocities
99 | feet_jac (LegsAttr): jacobian of the feet
100 | feet_jac_dot (LegsAttr): derivative of the jacobian of the feet
101 | feet_vel (LegsAttr): velocity of the feet
102 | legs_qfrc_passive (LegsAttr): passive forces acting on the joints
103 | legs_qfrc_bias (LegsAttr): gravity compensation, coriolis and centrifugal forces
104 | legs_mass_matrix (LegsAttr): mass matrix of the legs
105 | legs_qvel_idx (LegsAttr): indices of the joint velocities
106 | tau (LegsAttr): joint torques
107 | inertia (np.ndarray): inertia matrix of the robot (CCRBI)
108 |
109 | Returns:
110 | LegsAttr: torques to be applied to the motors
111 | """
112 |
113 | # Update the state and reference -------------------------
114 | state_current, ref_state, contact_sequence, step_height, optimize_swing = (
115 | self.wb_interface.update_state_and_reference(
116 | com_pos,
117 | base_pos,
118 | base_lin_vel,
119 | base_ori_euler_xyz,
120 | base_ang_vel,
121 | feet_pos,
122 | hip_pos,
123 | joints_pos,
124 | heightmaps,
125 | legs_order,
126 | simulation_dt,
127 | ref_base_lin_vel,
128 | ref_base_ang_vel,
129 | mujoco_contact,
130 | )
131 | )
132 |
133 | # Solve OCP ---------------------------------------------------------------------------------------
134 | if step_num % round(1 / (self.mpc_frequency * simulation_dt)) == 0:
135 | (
136 | self.nmpc_GRFs,
137 | self.nmpc_footholds,
138 | self.nmpc_joints_pos,
139 | self.nmpc_joints_vel,
140 | self.nmpc_joints_acc,
141 | self.best_sample_freq,
142 | self.nmpc_predicted_state,
143 | ) = self.srbd_controller_interface.compute_control(
144 | state_current,
145 | ref_state,
146 | contact_sequence,
147 | inertia,
148 | self.wb_interface.pgg.phase_signal,
149 | self.wb_interface.pgg.step_freq,
150 | optimize_swing,
151 | )
152 |
153 | if cfg.mpc_params['type'] != 'sampling' and cfg.mpc_params['use_RTI']:
154 | # If the controller is gradient and is using RTI, we need to linearize the mpc after its computation
155 | # this helps to minize the delay between new state->control in a real case scenario.
156 | self.srbd_controller_interface.compute_RTI()
157 |
158 | # Update the gait
159 | if cfg.mpc_params['type'] != 'sampling' and cfg.mpc_params['optimize_step_freq']:
160 | self.best_sample_freq = self.srbd_batched_controller_interface.optimize_gait(
161 | state_current,
162 | ref_state,
163 | inertia,
164 | self.wb_interface.pgg.phase_signal,
165 | self.wb_interface.pgg.step_freq,
166 | self.wb_interface.pgg.duty_factor,
167 | self.wb_interface.pgg.gait_type,
168 | optimize_swing,
169 | )
170 |
171 | # Compute Swing and Stance Torque ---------------------------------------------------------------------------
172 | tau, des_joints_pos, des_joints_vel = self.wb_interface.compute_stance_and_swing_torque(
173 | simulation_dt,
174 | qpos,
175 | qvel,
176 | feet_jac,
177 | feet_jac_dot,
178 | feet_pos,
179 | feet_vel,
180 | legs_qfrc_passive,
181 | legs_qfrc_bias,
182 | legs_mass_matrix,
183 | self.nmpc_GRFs,
184 | self.nmpc_footholds,
185 | legs_qpos_idx,
186 | legs_qvel_idx,
187 | tau,
188 | optimize_swing,
189 | self.best_sample_freq,
190 | self.nmpc_joints_pos,
191 | self.nmpc_joints_vel,
192 | self.nmpc_joints_acc,
193 | self.nmpc_predicted_state,
194 | mujoco_contact,
195 | )
196 |
197 | # Do some PD control over the joints (these values are normally passed
198 | # to a low-level motor controller, here we can try to simulate it)
199 | kp_joint_motor = cfg.simulation_params['impedence_joint_position_gain']
200 | kd_joint_motor = cfg.simulation_params['impedence_joint_velocity_gain']
201 | # for leg in legs_order:
202 | # tau[leg] += kp_joint_motor * (des_joints_pos[leg] - qpos[legs_qpos_idx[leg]]) + \
203 | # kd_joint_motor * (des_joints_vel[leg] - qvel[legs_qvel_idx[leg]])
204 |
205 | # Save some observables -------------------------------------------------------------------------------------
206 | self.quadrupedpympc_observables = {}
207 | for obs_name in self.quadrupedpympc_observables_names:
208 | if obs_name == 'ref_base_height':
209 | data = {'ref_base_height': ref_state['ref_position'][2]}
210 | elif obs_name == 'ref_base_angles':
211 | data = {'ref_base_angles': ref_state['ref_orientation']}
212 | elif obs_name == 'ref_feet_pos':
213 | ref_feet_pos = LegsAttr(
214 | FL=ref_state['ref_foot_FL'].reshape(3, 1),
215 | FR=ref_state['ref_foot_FR'].reshape(3, 1),
216 | RL=ref_state['ref_foot_RL'].reshape(3, 1),
217 | RR=ref_state['ref_foot_RR'].reshape(3, 1),
218 | )
219 | data = {'ref_feet_pos': ref_feet_pos}
220 | elif obs_name == 'ref_feet_constraints':
221 | ref_feet_constraints = LegsAttr(
222 | FL=ref_state['ref_foot_FL_constraints'],
223 | FR=ref_state['ref_foot_FR_constraints'],
224 | RL=ref_state['ref_foot_RL_constraints'],
225 | RR=ref_state['ref_foot_RR_constraints'],
226 | )
227 | data = {'ref_feet_constraints': ref_feet_constraints}
228 | elif obs_name == 'nmpc_GRFs':
229 | data = {'nmpc_GRFs': self.nmpc_GRFs}
230 | elif obs_name == 'nmpc_footholds':
231 | data = {'nmpc_footholds': self.nmpc_footholds}
232 | elif obs_name == 'swing_time':
233 | data = {'swing_time': self.wb_interface.stc.swing_time}
234 | elif obs_name == 'phase_signal':
235 | data = {'phase_signal': self.wb_interface.pgg._phase_signal}
236 | elif obs_name == 'lift_off_positions':
237 | data = {'lift_off_positions': self.wb_interface.frg.lift_off_positions}
238 |
239 | else:
240 | data = {}
241 | raise ValueError(f"Unknown observable name: {obs_name}")
242 |
243 | self.quadrupedpympc_observables.update(data)
244 |
245 | return tau
246 |
247 | def get_obs(self) -> dict:
248 | """Get some user-defined observables from withing the control loop.
249 |
250 | Returns:
251 | Dict: dictionary of observables
252 | """
253 | return self.quadrupedpympc_observables
254 |
255 | def reset(self, initial_feet_pos: LegsAttr):
256 | """Reset the controller."""
257 |
258 | self.wb_interface.reset(initial_feet_pos)
259 | self.srbd_controller_interface.controller.reset()
260 |
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.8)
2 | project(dls2_msgs)
3 |
4 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
5 | add_compile_options(-Wall -Wextra -Wpedantic)
6 | endif()
7 |
8 | # find dependencies
9 | find_package(ament_cmake REQUIRED)
10 | # uncomment the following section in order to fill in
11 | # further dependencies manually.
12 | # find_package( REQUIRED)
13 |
14 | if(BUILD_TESTING)
15 | find_package(ament_lint_auto REQUIRED)
16 | # the following line skips the linter which checks for copyrights
17 | # comment the line when a copyright and license is added to all source files
18 | set(ament_cmake_copyright_FOUND TRUE)
19 | # the following line skips cpplint (only works in a git repo)
20 | # comment the line when this package is in a git repo and when
21 | # a copyright and license is added to all source files
22 | set(ament_cmake_cpplint_FOUND TRUE)
23 | ament_lint_auto_find_test_dependencies()
24 | endif()
25 |
26 | find_package(rosidl_default_generators REQUIRED)
27 |
28 | rosidl_generate_interfaces(${PROJECT_NAME}
29 | "msg/BlindStateMsg.msg"
30 | "msg/BaseStateMsg.msg"
31 | "msg/ControlSignalMsg.msg"
32 | "msg/TrajectoryGeneratorMsg.msg"
33 | )
34 |
35 | ament_package()
36 |
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/msg/BaseStateMsg.msg:
--------------------------------------------------------------------------------
1 | # Header
2 | string frame_id
3 | uint32 sequence_id
4 | float64 timestamp
5 |
6 | string robot_name
7 |
8 | float64[3] position
9 | float64[4] orientation
10 |
11 | # Base velocity
12 | float64[3] linear_velocity
13 | float64[3] angular_velocity
14 |
15 | # Base acceleration
16 | float64[3] linear_acceleration
17 | float64[3] angular_acceleration
18 |
19 | # Stance status
20 | float64[4] stance_status
21 |
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/msg/BlindStateMsg.msg:
--------------------------------------------------------------------------------
1 | # Header
2 | string frame_id
3 | uint32 sequence_id
4 | float64 timestamp
5 |
6 | string robot_name
7 |
8 | string[12] joints_name
9 | float64[12] joints_position
10 | float64[12] joints_velocity
11 | float64[12] joints_acceleration
12 | float64[12] joints_effort
13 | float64[12] joints_temperature
14 |
15 | float64[4] feet_contact
16 | float64[12] current_feet_positions
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/msg/ControlSignalMsg.msg:
--------------------------------------------------------------------------------
1 | # Header
2 | string frame_id
3 | uint32 sequence_id
4 | float64 timestamp
5 |
6 | float64[12] torques
7 | uint64 signal_reconstruction_method
8 |
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/msg/TrajectoryGeneratorMsg.msg:
--------------------------------------------------------------------------------
1 | # Header
2 | string frame_id
3 | uint32 sequence_id
4 | float64 timestamp
5 |
6 | float64[3] com_position
7 | float64[4] com_orientation
8 |
9 | float64[3] com_linear_velocity
10 | float64[3] com_angular_velocity
11 |
12 | float64[3] com_linear_acceleration
13 | float64[3] com_angular_acceleration
14 |
15 | float64[12] joints_position
16 | float64[12] joints_velocity
17 | float64[12] joints_acceleration
18 | float64[12] joints_effort
19 |
20 | float64[6] wrench
21 |
22 | bool[4] stance_legs
23 |
24 | float64[12] nominal_touch_down
25 | float64[12] touch_down
26 | float64[4] swing_period
27 |
28 | float64[4] normal_force_max
29 | float64[4] normal_force_min
30 |
31 | float64[12] kp
32 | float64[12] kd
--------------------------------------------------------------------------------
/ros2/msgs_ws/src/dls2_msgs/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | dls2_msgs
5 | 0.0.0
6 | Messages for ROS2
7 | giulioturrisi
8 | Apache-2.0
9 |
10 | ament_cmake
11 |
12 | ament_lint_auto
13 | ament_lint_common
14 |
15 |
16 | ament_cmake
17 |
18 |
19 | geometry_msgs
20 | rosidl_default_generators
21 | rosidl_default_runtime
22 | rosidl_interface_packages
23 |
24 |
25 |
--------------------------------------------------------------------------------
/simulation/__init__.py:
--------------------------------------------------------------------------------
1 | # Created by danfoa at 03/07/24
2 |
--------------------------------------------------------------------------------
/simulation/batched_simulations.py:
--------------------------------------------------------------------------------
1 | # Description: This script is used to simulate multiple MPC runs in parallel,
2 | # mainly to test the performance of the MPC controller over time.
3 |
4 | # Authors:
5 | # Giulio Turrisi
6 |
7 | import multiprocessing
8 | import time
9 | from multiprocessing import Process
10 |
11 | import numpy as np
12 |
13 | # Import the simulation module that will run mujoco
14 | import simulation
15 |
16 | import os
17 | import pathlib
18 | import sys
19 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
20 |
21 | # Number of processes to run
22 | NUM_PROCESSES = 4
23 | NUM_EPISODES = 20
24 | NUM_SECONDS_EPISODE = 2
25 | REF_BASE_LIN_VEL=(-1.0, 1.0)
26 | REF_BASE_ANG_VEL=(-0.2, 0.2)
27 | FRICTION_COEFF=(0.5, 1.0)
28 | BASE_VEL_COMMAND_TYPE="forward+rotate"
29 |
30 |
31 | if __name__ == "__main__":
32 | from quadruped_pympc import config as cfg
33 | qpympc_cfg = cfg
34 | render = False
35 | render_only_first = False
36 | output_simulations_tracking = None
37 | output_simulations_success_rate = None
38 |
39 | # Run the simulation in parallel
40 | processes = []
41 | manager = multiprocessing.Manager()
42 | return_dict = manager.dict()
43 | for i in range(NUM_PROCESSES):
44 | if render_only_first and i == 0:
45 | render = True
46 |
47 | storage_path = pathlib.Path(__file__).parent.parent / ("datasets_" + "proc_" + str(i))
48 |
49 | p = Process(
50 | target=simulation.run_simulation,
51 | args=(qpympc_cfg, i, NUM_EPISODES, NUM_SECONDS_EPISODE, REF_BASE_LIN_VEL, REF_BASE_ANG_VEL, FRICTION_COEFF, BASE_VEL_COMMAND_TYPE, i * NUM_PROCESSES, render, storage_path),
52 | )
53 | p.start()
54 | time.sleep(5)
55 | processes.append(p)
56 |
57 | for p in processes:
58 | p.join()
59 |
60 | """# Concatenate the data from the processes
61 | for proc_j in range(NUM_PROCESSES):
62 | for ep_i in range(NUM_EPISODES):
63 | if output_simulations_tracking is None:
64 | output_simulations_tracking = return_dict[
65 | "process" + str(proc_j) + "_ctrl_state_history_ep" + str(ep_i)
66 | ]
67 | else:
68 | output_simulations_tracking = np.concatenate(
69 | (
70 | output_simulations_tracking,
71 | return_dict["process" + str(proc_j) + "_ctrl_state_history_ep" + str(ep_i)],
72 | )
73 | )
74 |
75 | if output_simulations_success_rate is None:
76 | output_simulations_success_rate = np.array(
77 | [return_dict["process" + str(proc_j) + "_success_rate_ep" + str(ep_i)]]
78 | )
79 | else:
80 | output_simulations_success_rate = np.concatenate(
81 | (
82 | output_simulations_success_rate,
83 | np.array([return_dict["process" + str(proc_j) + "_success_rate_ep" + str(ep_i)]]),
84 | )
85 | )
86 |
87 | print("Tracking data mean: ", np.mean(output_simulations_tracking, axis=0))
88 | print("Tracking data std: ", np.std(output_simulations_tracking, axis=0))
89 | print("Success rate: ", np.sum(output_simulations_success_rate) / len(output_simulations_success_rate), "%")"""
90 |
--------------------------------------------------------------------------------
/simulation/generate_dataset.py:
--------------------------------------------------------------------------------
1 | # Authors:
2 | # Giulio Turrisi, Daniel Ordonez
3 | import itertools
4 | import os
5 | import pathlib
6 | import sys
7 | from pprint import pprint
8 | import numpy as np
9 |
10 | from gym_quadruped.quadruped_env import QuadrupedEnv
11 | from gym_quadruped.utils.data.h5py import H5Reader
12 |
13 | # TODO: Remove horrible hack
14 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
15 | from simulation import run_simulation
16 |
17 | if __name__ == "__main__":
18 | from quadruped_pympc import config as cfg
19 |
20 | qpympc_cfg = cfg
21 | # Custom changes to the config here:
22 |
23 | storage_path = pathlib.Path(__file__).parent.parent / "datasets"
24 | data_path = run_simulation(
25 | qpympc_cfg=qpympc_cfg,
26 | num_seconds_per_episode=50,
27 | num_episodes=1,
28 | ref_base_lin_vel=(-0.5, 0.5),
29 | ref_base_ang_vel=(-0.3, 0.3),
30 | base_vel_command_type="human",
31 | render=True,
32 | recording_path=storage_path,
33 | )
34 |
35 | dataset = H5Reader(file_path=data_path)
36 |
37 | print(f"Number of trajectories: {dataset.len()}")
38 | print("Parameters used to generate the data")
39 | pprint(dataset.env_hparams)
40 | # We can reconstruct the environment used to reproduce the dataset using
41 | reproduced_env = QuadrupedEnv(**dataset.env_hparams)
42 |
43 | # We can also list the observations in the dataset
44 | obs_names = dataset.env_hparams["state_obs_names"]
45 |
46 | print(f"\n\n{str(list(itertools.chain(['-'] * 100)))}\n\n")
47 | print(
48 | f"Dataset stored at: {data_path} \n"
49 | f"Number of trajectories: {dataset.len()} \n"
50 | f"Dataset observations names: \n{obs_names}"
51 | )
52 |
53 |
54 | # We can also convert the dataset to a npy file
55 | n_trajs = dataset.n_trajectories
56 | desired_fps = 50.0
57 | actual_fps = 1.0 / reproduced_env.mjModel.opt.timestep
58 | skipping_factor = int(actual_fps / desired_fps)
59 | for traj_id in range(n_trajs):
60 |
61 | obs_t = {obs_name: dataset.recordings[obs_name][traj_id][::skipping_factor] for obs_name in reproduced_env.state_obs_names}
62 |
63 | base_pos = obs_t['base_pos']
64 | base_ori_quat_xyzw = np.roll(obs_t['base_ori_quat_wxyz'],-1)
65 | qpos_js = obs_t['qpos_js']
66 |
67 | data = {
68 | "joints_list": ["FL_hip_joint", "FL_thigh_joint", "FL_calf_joint",
69 | "FR_hip_joint", "FR_thigh_joint", "FR_calf_joint",
70 | "RL_hip_joint", "RL_thigh_joint", "RL_calf_joint",
71 | "RR_hip_joint", "RR_thigh_joint", "RR_calf_joint"],
72 | "joint_positions": [row for row in qpos_js],
73 | 'root_position': [row for row in base_pos],
74 | 'root_quaternion': [row for row in base_ori_quat_xyzw],
75 | "fps": desired_fps,
76 | }
77 | # Save the data to an .npy file
78 | output_file = f"datasets/traj_{traj_id}.npy"
79 | np.save(output_file, data)
80 |
81 |
82 | # And since we use the same names as QuadrupedEnv, we can get the group representations for free
83 | from gym_quadruped.utils.quadruped_utils import configure_observation_space_representations
84 |
85 | obs_reps = configure_observation_space_representations(robot_name=dataset.env_hparams["robot"], obs_names=obs_names)
86 |
87 | for obs_name in obs_names:
88 | print(f"Obs: {obs_name} \n Representation: {obs_reps[obs_name]}")
89 |
--------------------------------------------------------------------------------