├── .DS_Store ├── .circleci └── config.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── advanced_source └── README.txt ├── beginner_source ├── README.txt ├── lietensor_tutorial.py └── started.py ├── conf.py ├── dynamics_source ├── README.txt ├── cartpole_tutorial.py ├── floquet_tutorial.py └── neuralnet_tutorial.py ├── imu_source ├── README.txt ├── imu_corrector_tutorial.py ├── imu_dataset_tutorial.py └── imu_integrator_tutorial.py ├── index.rst ├── intermediate_source └── README.txt ├── ipynb2py.py ├── make.bat ├── pgo_source ├── README.txt ├── pgo_dataset_tutorial.py └── pgo_tutorial.py └── requirements.txt /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypose/tutorials/9304f84cea6646bb23a168c2168f9ec703ed9e5e/.DS_Store -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | ############################################################################## 4 | # Reusables 5 | ############################################################################## 6 | build_binary_params: &build_binary_params 7 | parameters: 8 | docker_image: 9 | type: string 10 | default: "" 11 | cuda_version: 12 | type: string 13 | default: "" 14 | cudnn_version: 15 | type: string 16 | default: "8" 17 | python_version: 18 | type: string 19 | default: "" 20 | torch_version: 21 | type: string 22 | default: "" 23 | environment: 24 | DOCKER_IMAGE: << parameters.docker_image >> 25 | CUDA_VERSION: << parameters.cuda_version >> 26 | CUDNN_VERSION: << parameters.cudnn_version >> 27 | PYTHON_VERSION: << parameters.python_version >> 28 | TORCH_VERSION: << parameters.torch_version >> 29 | 30 | commands: 31 | git_update_submodules: &git_update_submodules 32 | description: Update submodules of clone 33 | steps: 34 | - run: 35 | name: Update submodule 36 | command: git submodule sync && git submodule update --init 37 | setup_env: &setup_env 38 | description: Setup the build environment 39 | steps: 40 | - run: 41 | name: Install dependencies 42 | command: | 43 | # derived environment variables 44 | export CUDA_VERSION_SHORT=$(echo ${CUDA_VERSION} | sed -E 's/([0-9]+)\.([0-9]+).*/\1.\2/') 45 | # apt-get and conda 46 | apt-get update -y 47 | DEBIAN_FRONTEND=noninteractive apt-get install -y wget curl git-all ninja-build 48 | cd /tmp 49 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh 50 | chmod +x Miniconda3-latest-Linux-x86_64.sh && ./Miniconda3-latest-Linux-x86_64.sh -b 51 | export "PATH=~/miniconda3/bin:$PATH" && conda init bash && . $HOME/miniconda3/etc/profile.d/conda.sh 52 | # python dependencies 53 | conda create -n pyenv -y python=${PYTHON_VERSION} setuptools pytorch=${TORCH_VERSION} cudatoolkit=${CUDA_VERSION_SHORT} -c pytorch 54 | build_package: &build_package 55 | description: Build PyPose package of specified type 56 | parameters: 57 | build_tag: 58 | type: string 59 | default: "dev" 60 | dist_type: 61 | type: string 62 | default: "bdist_wheel" 63 | max_jobs: 64 | type: integer 65 | default: 1 66 | steps: 67 | - run: 68 | name: Build PyPose package (<< parameters.dist_type >>) 69 | no_output_timeout: 30m 70 | command: | 71 | echo $PATH 72 | export "PATH=~/miniconda3/bin:$PATH" && conda init bash && . $HOME/miniconda3/etc/profile.d/conda.sh 73 | conda activate pyenv 74 | export MAX_JOBS=<< parameters.max_jobs >> 75 | cd /tmp 76 | git clone --recursive https://github.com/pypose/pypose.git 77 | cd pypose && python setup.py develop 78 | 79 | ############################################################################## 80 | # Job specs 81 | ############################################################################## 82 | jobs: 83 | # linux_build_wheel: 84 | # <<: *build_binary_params 85 | # docker: 86 | # - image: nvidia/cuda:<< parameters.cuda_version >>-cudnn<< parameters.cudnn_version >>-devel-ubuntu20.04 87 | # resource_class: large 88 | # steps: 89 | # - setup_remote_docker: 90 | # docker_layer_caching: true 91 | # - setup_env 92 | # # - checkout 93 | # # - git_update_submodules 94 | # - build_package 95 | # - store_artifacts: 96 | # path: dist 97 | # destination: wheels 98 | # - persist_to_workspace: 99 | # root: /tmp/pypose 100 | # paths: [dist] 101 | build_tutorials: 102 | <<: *build_binary_params 103 | docker: 104 | - image: nvidia/cuda:<< parameters.cuda_version >>-cudnn<< parameters.cudnn_version >>-devel-ubuntu20.04 105 | resource_class: large 106 | steps: 107 | - run: 108 | name: Test CUDA 109 | command: nvidia-smi 110 | - setup_env 111 | # - attach_workspace: 112 | # at: /tmp/pypose 113 | - build_package 114 | # - run: 115 | # name: Install PyPose 116 | # command: | 117 | # cd /tmp/pypose 118 | # export "PATH=~/miniconda3/bin:$PATH" && conda init bash && . $HOME/miniconda3/etc/profile.d/conda.sh 119 | # conda activate pyenv 120 | # pip install dist/pypose-*.whl 121 | # cd ~/project 122 | - checkout 123 | - run: 124 | name: Build PyPose tutorials 125 | command: | 126 | export "PATH=~/miniconda3/bin:$PATH" && conda init bash && . $HOME/miniconda3/etc/profile.d/conda.sh 127 | conda activate pyenv 128 | pip install -r requirements.txt 129 | apt-get install -y python3-sphinx 130 | make html 131 | - persist_to_workspace: 132 | root: _build 133 | paths: [html] 134 | deploy_tutorials: 135 | docker: 136 | - image: node:8.10.0 137 | steps: 138 | - checkout 139 | - attach_workspace: 140 | at: _build 141 | - run: 142 | name: Disable jekyll builds 143 | command: touch _build/html/.nojekyll 144 | - run: 145 | name: Install and configure dependencies 146 | command: | 147 | npm install -g --silent gh-pages@2.0.1 148 | git config user.email "253750329@qq.com" 149 | git config user.name "ew123" 150 | - add_ssh_keys: 151 | fingerprints: 152 | - "41:f2:68:c6:0b:09:92:0d:58:88:db:6e:5a:45:3d:a0" 153 | - run: 154 | name: Deploy docs to gh-pages branch 155 | command: gh-pages --dotfiles --message "[skip ci] Updates" --dist _build/html 156 | 157 | ############################################################################## 158 | # Workflows 159 | ############################################################################## 160 | workflows: 161 | version: 2 162 | build: 163 | jobs: 164 | # - linux_build_wheel: 165 | # cuda_version: "11.3.1" 166 | # torch_version: "1.12.1" 167 | # python_version: "3.8" 168 | - build_tutorials: 169 | cuda_version: "11.3.1" 170 | torch_version: "1.11.0" 171 | python_version: "3.8" 172 | - deploy_tutorials: 173 | requires: [build_tutorials] 174 | filters: 175 | branches: 176 | only: main 177 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | _build 74 | beginner 75 | intermediate 76 | advanced 77 | dynamics 78 | imu 79 | pgo 80 | 81 | # PyBuilder 82 | target/ 83 | 84 | # Jupyter Notebook 85 | .ipynb_checkpoints 86 | 87 | # IPython 88 | profile_default/ 89 | ipython_config.py 90 | 91 | # pyenv 92 | .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 102 | __pypackages__/ 103 | 104 | # Celery stuff 105 | celerybeat-schedule 106 | celerybeat.pid 107 | 108 | # SageMath parsed files 109 | *.sage.py 110 | 111 | # Environments 112 | .env 113 | .venv 114 | env/ 115 | venv/ 116 | ENV/ 117 | env.bak/ 118 | venv.bak/ 119 | 120 | # Spyder project settings 121 | .spyderproject 122 | .spyproject 123 | 124 | # Rope project settings 125 | .ropeproject 126 | 127 | # mkdocs documentation 128 | /site 129 | 130 | # mypy 131 | .mypy_cache/ 132 | .dmypy.json 133 | dmypy.json 134 | 135 | # Pyre type checker 136 | .pyre/ 137 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2022, PyPose 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | html-noplot: 23 | $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(SOURCEDIR) $(BUILDDIR)/html 24 | @echo 25 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 26 | 27 | clean: 28 | rm -rf $(BUILDDIR)/* 29 | rm -rf beginner/ advanced/ intermediate/ imu/ dynamics/ pgo/ 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyPose tutorials 2 | 3 | # 1. Contributing to Documentation 4 | 5 | ## 1.1 Build docs locally 6 | 7 | 1. Sphinx docs come with a makefile build system. To preview, build PyPose locally and 8 | 9 | ```bash 10 | pip install -r requirements.txt 11 | make html 12 | ``` 13 | 14 | 2. Then open the generated HTML page: `_build/html/index.html`. 15 | 16 | 3. To clean and rebuild the doc: 17 | ``` 18 | make clean 19 | ``` 20 | 21 | 22 | ## 1.2 Writing documentation 23 | 24 | We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/stable/tutorials/index.html) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted python file and it shows up as documentation page. 25 | 26 | Here's how to create a new tutorial or recipe: 27 | 1. Create a notebook styled python file. If you want it executed while inserted into documentation, save the file with suffix `tutorial` so that file name is `your_tutorial.py`. 28 | 2. Put it in one of the beginner_source, intermediate_source, advanced_source based on the level. If it is a recipe, add to recipes_source. 29 | 3. For Tutorials (except if it is a prototype feature), include it in the TOC tree at index.rst 30 | 4. Create a pull request. 31 | 32 | In case you prefer to write your tutorial in jupyter, you can use [this script](https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe) to convert the notebook to python file. After conversion and addition to the project, please make sure the sections headings etc are in logical order. 33 | 34 | -------------------------------------------------------------------------------- /advanced_source/README.txt: -------------------------------------------------------------------------------- 1 | This is my gallery 2 | ================== 3 | 4 | Below is a gallery of examples -------------------------------------------------------------------------------- /beginner_source/README.txt: -------------------------------------------------------------------------------- 1 | Beginner Tutorial 2 | ================== 3 | 4 | This is a gallery of beginner's tutorial for pypose. -------------------------------------------------------------------------------- /beginner_source/lietensor_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | LieTensor Tutorial 3 | ================== 4 | 5 | """ 6 | 7 | ###################################################################### 8 | # Uncomment this if you're using google colab to run this script 9 | # 10 | 11 | # !pip install pypose 12 | 13 | 14 | ###################################################################### 15 | # ``LieTensor`` is the cornerstone of PyPose project. ``LieTensor`` is derived from 16 | # ``torch.tensor``. it represents Lie Algebra or Lie Group. It support all 17 | # the ``torch.tensor`` features and also specific features for Lie Theory. 18 | # 19 | # 20 | # We will see eventually in this tutorial that, with ``LieTensor``, 21 | # one could easily implement operations often used in robotics applications. 22 | # 23 | # In PyPose, we would want to utilize the powerful network training API the comes with PyTorch. 24 | # So, we will go a step further to see how we can use ``LieTensor`` in training a simple network. 25 | # 26 | 27 | import torch 28 | import pypose as pp 29 | 30 | 31 | ###################################################################### 32 | # 1. Intialization 33 | # --------------------------------------- 34 | # The first thing we need to know is how to initialize a LieTensor. 35 | # Use ``pypose.LieTensor`` or alias like ``pypose.so3``, specify the ``data`` and ``ltpye``. 36 | # See list of ``ltype`` 37 | # `here `_. 38 | # 39 | # Note that the last dimension 40 | # of ``data`` has to align with the ``LieTensor.ltype.dimension`` 41 | # because LieTensor has different length with respect to different ``ltype``. 42 | # Here we have a ``(2,3)`` shaped tensor, because ``so3_type`` 43 | # requires a dimension of 3 for each element. 44 | # 45 | # It is recommanded to use alias to initialize LieTensor. 46 | 47 | 48 | data = torch.randn(2, 3, requires_grad=True, device='cuda:0') 49 | a = pp.LieTensor(data, ltype=pp.so3_type) 50 | print('a:', a) 51 | b = pp.so3(data) 52 | print('b:', b) 53 | 54 | ###################################################################### 55 | # Like ``PyTorch``, you can initialize an identity ``LieTensor`` or a random ``LieTensor``. 56 | # Use the function related to each ``ltype``. For example, here we used ``pypose.identity_SE3`` 57 | # and ``pypose.randn_se3``. The usage is similar with ``torch.randn``, except the shape we input 58 | # is ``lshape``. 59 | # The only difference between ``LieTensor.lshape`` and ``tensor.shape`` is the last dimension is hidden, since 60 | # ``lshape`` takes the last dimension as a single ``ltype`` item. 61 | # 62 | 63 | 64 | ###################################################################### 65 | # You might notice the case difference here. 66 | # In PyPose, uppercase refers to Lie Group, and lowercase refers to Lie Algebra. 67 | # It is recommanded to use Lie Group, unless Lie Algebra is absolutely necessary. 68 | # 69 | 70 | ###################################################################### 71 | # ``LieTensor.lview`` here is used to change the shape of a ``LieTensor``, 72 | # similar to ``torch.view``. 73 | # The difference is that ``LieTensor.lview`` does not modify the last dimension. 74 | # It is intuitive since we need each element in ``x`` stays a ``SE3`` ltype. 75 | # 76 | 77 | x = pp.identity_SE3(2,1) 78 | y = pp.randn_se3(2,2) 79 | print('x.shape:', x.shape, '\nx.gshape:', x.lshape) 80 | print(x.lview(2)) 81 | print(y) 82 | 83 | 84 | ###################################################################### 85 | # 2. All arguments in PyTorch are supported 86 | # --------------------------------------------- 87 | # ``LieTensor`` is derived from ``torch.tensor``, so it inherit all the 88 | # attributes of a ``tensor``. 89 | # You could specify ``device``, ``dtype``, and ``requires_grad`` during the initialization, 90 | # just like PyTorch. 91 | 92 | a = pp.randn_SO3(3, device="cuda:0", dtype=torch.double, requires_grad=True) 93 | b = pp.identity_like(a, device="cpu") 94 | a, b 95 | 96 | ###################################################################### 97 | # And also, easy data type transform. 98 | 99 | t = a.float() 100 | a, t 101 | 102 | 103 | ###################################################################### 104 | # Slicing and Shaping 105 | # ``LieTensor`` concatination is also the same as ``Pytorch``. 106 | 107 | A = pp.randn_SO3(2,2) 108 | B = pp.randn_SO3(2,1) 109 | C = torch.cat([A,B], dim=1) # Tensor cat 110 | C[0,1] = pp.randn_SO3(1) # Slicing set 111 | D = C[1,:].Log() # Slicing get 112 | E, F = torch.split(C, [1,2], dim=1) # Tensor split 113 | print('A:', A.lshape) 114 | print('B:', B.lshape) 115 | print('C:', C.lshape) 116 | print('D:', D.lshape) 117 | print('E:', E.lshape) 118 | print('F:', F.lshape) 119 | 120 | 121 | ###################################################################### 122 | # 3. Exponential, Logarithm and Inversion Function 123 | # --------------------------------------------------- 124 | # ``LieTensor.Exp`` is the Exponential function defined in Lie Theory, 125 | # which transform a input Lie Algebra to Lie Group. 126 | # ``LieTensor.Log`` is the Logarithm function, whcih transform Lie Group back to Lie Algebra. 127 | # See the doc of 128 | # `LieTensor.Exp `_ and 129 | # `LieTensor.Log `_ 130 | # for the math. 131 | # 132 | # ``LieTensor.Inv`` gives us the inversion of a ``LieTensor``. 133 | # Assume you have a ``LieTensor`` of ``pypose.so3_type`` 134 | # representing a rotation :math:`{\rm R}`, the `Inv` will give you :math:`{\rm R^{-1}}`. 135 | # See `LieTensor.Inv `_. 136 | # 137 | 138 | 139 | (x * y.Exp()).Inv().Log() 140 | 141 | 142 | ###################################################################### 143 | # 4. Adjoint Transforms 144 | # --------------------------------------- 145 | # We also have adjoint operations. Assume ``X`` is a Lie Group, 146 | # and ``a`` is a small left increment in Lie Algebra. 147 | # Adjoint operation will input ``a`` and output a right increment ``b`` that gives ther same transformation. 148 | # See `pypose.Adj `_ for more details. 149 | # 150 | 151 | X = pp.randn_Sim3(6, dtype=torch.double) 152 | a = pp.randn_sim3(6, dtype=torch.double) 153 | b = X.AdjT(a) 154 | print((X * b.Exp() - a.Exp() * X).abs().mean() < 1e-7) 155 | 156 | X = pp.randn_SE3(8) 157 | a = pp.randn_se3(8) 158 | b = X.Adj(a) 159 | print((b.Exp() * X - X * a.Exp()).abs().mean() < 1e-7) 160 | 161 | 162 | ###################################################################### 163 | # 5. Grdients 164 | # --------------------------------------- 165 | # As mentioned at the beginning, we would want to utilize the powerful 166 | # network training API the comes with PyTorch. 167 | # We might want to start by calculating gradients, 168 | # which is a core step of any network training. 169 | # First, we need to initialize the ``LieTensor`` of which we want to get gradients. 170 | # Remember to set ``requires_grad=True``. 171 | 172 | x = pp.randn_so3(3, sigma=0.1, requires_grad=True, device="cuda") 173 | assert x.is_leaf 174 | 175 | ###################################################################### 176 | # And, just like in PyTorch, we will define a ``loss``, and call ``loss.backward``. 177 | # That's it. Exactly the same with PyTorch. 178 | # 179 | 180 | 181 | loss = (x.Exp().Log()**2).sin().sum() # Just test, No physical meaning 182 | loss.backward() 183 | y = x.detach() 184 | loss, x.grad, x, y 185 | 186 | 187 | ###################################################################### 188 | # 6. Test a Module 189 | # --------------------------------------- 190 | # Now that we know all the basic operations, we might start ahead to build our first network. 191 | # First of all, we define our ``TestNet`` as follows. Still, it doesn't have any physical meaning. 192 | # 193 | 194 | 195 | from torch import nn 196 | 197 | def count_parameters(model): 198 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 199 | 200 | class TestNet(nn.Module): 201 | def __init__(self, n): 202 | super().__init__() 203 | self.weight = pp.Parameter(pp.randn_so3(n)) 204 | 205 | def forward(self, x): 206 | return self.weight.Exp() * x 207 | 208 | 209 | ###################################################################### 210 | # Like PyTorch, we instantiate our network, optimizer, and scheduler. 211 | # Scheduler here is to control the learning rate, see `lr_scheduler.MultiStepLR 212 | # `_ 213 | # for more detail. 214 | # 215 | # Then, inside the loop, we run our training. If you are not familiar with the training process, 216 | # we would recommand you reading one of the PyTorch tutorial, like 217 | # `this `_. 218 | # 219 | 220 | n,epoch = 4, 5 221 | net = TestNet(n).cuda() 222 | 223 | optimizer = torch.optim.SGD(net.parameters(), lr = 0.2, momentum=0.9) 224 | scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2,4], gamma=0.5) 225 | 226 | print("Before Optimization:\n", net.weight) 227 | for i in range(epoch): 228 | optimizer.zero_grad() 229 | inputs = pp.randn_SO3(n).cuda() 230 | outputs = net(inputs) 231 | loss = outputs.abs().sum() 232 | loss.backward() 233 | optimizer.step() 234 | scheduler.step() 235 | print(loss) 236 | 237 | print("Parameter:", count_parameters(net)) 238 | print("After Optimization:\n", net.weight) 239 | 240 | ###################################################################### 241 | # And then we are finished with our ``LieTensor`` tutorial. 242 | # Hopefully you are more familiar with it by now. 243 | # 244 | # Now you may be free to explore other tutorials. 245 | # See How PyPose can be utilized in real robotics applications. 246 | # 247 | -------------------------------------------------------------------------------- /beginner_source/started.py: -------------------------------------------------------------------------------- 1 | """ 2 | Get Started Tutorial 3 | ================== 4 | 5 | """ 6 | 7 | ###################################################################### 8 | # Uncomment this if you're using google colab to run this script 9 | # 10 | 11 | # !pip install pypose 12 | 13 | 14 | ###################################################################### 15 | 16 | 17 | 18 | 19 | ###################################################################### 20 | # Sample Code of LieTensor 21 | # --------------------------------------- 22 | # The following code sample shows how to rotate random 23 | # points and compute the gradient of batched rotation. 24 | # 25 | 26 | import torch 27 | import pypose as pp 28 | 29 | 30 | ###################################################################### 31 | # Create a random so(3) LieTensor 32 | # 33 | 34 | r = pp.randn_so3(2, requires_grad=True) 35 | print(r) 36 | 37 | 38 | ###################################################################### 39 | # Get the Lie Group of the Lie Algebra 40 | # 41 | 42 | R = r.Exp() # Equivalent to: R = pp.Exp(r) 43 | print(R) 44 | 45 | 46 | ###################################################################### 47 | # Create a random point and rotate it based on the Lie Group rotation tensor 48 | # 49 | 50 | p = R @ torch.randn(3) # Rotate random point 51 | print(p) 52 | 53 | 54 | ###################################################################### 55 | # Compute the gradient and print it 56 | # 57 | 58 | p.sum().backward() # Compute gradient 59 | r.grad # Print gradient 60 | 61 | 62 | ###################################################################### 63 | # Sample code of optimizer 64 | # --------------------------------------------- 65 | # We show how to estimate batched transform inverse by a 66 | # 2nd-order optimizer. Two usage options for a scheduler 67 | # are provided, each of which can work independently. 68 | # 69 | 70 | from torch import nn 71 | import torch, pypose as pp 72 | from pypose.optim import LM 73 | from pypose.optim.strategy import Constant 74 | from pypose.optim.scheduler \ 75 | import StopOnPlateau 76 | 77 | class InvNet(nn.Module): 78 | 79 | def __init__(self, *dim): 80 | super().__init__() 81 | init = pp.randn_SE3(*dim) 82 | self.pose = pp.Parameter(init) 83 | 84 | def forward(self, input): 85 | error = (self.pose @ input).Log() 86 | return error.tensor() 87 | 88 | device = torch.device("cuda") 89 | input = pp.randn_SE3(2, 2, device=device) 90 | invnet = InvNet(2, 2).to(device) 91 | strategy = Constant(damping=1e-4) 92 | optimizer = LM(invnet, strategy=strategy) 93 | scheduler = StopOnPlateau(optimizer, 94 | steps=10, 95 | patience=3, 96 | decreasing=1e-3, 97 | verbose=True) 98 | 99 | # 1st option, full optimization 100 | scheduler.optimize(input=input) 101 | 102 | # 2nd option, step optimization 103 | while scheduler.continual(): 104 | loss = optimizer.step(input) 105 | scheduler.step(loss) 106 | 107 | 108 | 109 | ###################################################################### 110 | # And then we are finished with the two sample codes mentioned in our paper. 111 | # 112 | # Now you may be free to explore other tutorials. 113 | # See How PyPose can be utilized in real robotics applications. 114 | # 115 | -------------------------------------------------------------------------------- /conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | import pypose_sphinx_theme 17 | 18 | # -- Project information ----------------------------------------------------- 19 | 20 | project = 'PyPose Tutorials' 21 | copyright = '2022, PyPose contributors' 22 | author = 'PyPose contributors' 23 | 24 | # The full version, including alpha/beta/rc tags 25 | import pypose 26 | version = pypose.__version__ 27 | 28 | 29 | # -- General configuration --------------------------------------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx_gallery.gen_gallery', 36 | ] 37 | 38 | 39 | sphinx_gallery_conf = { 40 | 'examples_dirs': ['beginner_source', 'dynamics_source', 'imu_source', 41 | 'pgo_source'], 42 | 'gallery_dirs': ['beginner', 'dynamics', 'imu', 'pgo'], 43 | 'filename_pattern': 'tutorial.py', 44 | } 45 | 46 | 47 | # Add any paths that contain templates here, relative to this directory. 48 | templates_path = ['_templates'] 49 | 50 | # List of patterns, relative to source directory, that match files and 51 | # directories to ignore when looking for source files. 52 | # This pattern also affects html_static_path and html_extra_path. 53 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 54 | 55 | 56 | # -- Options for HTML output ------------------------------------------------- 57 | 58 | # The theme to use for HTML and HTML Help pages. See the documentation for 59 | # a list of builtin themes. 60 | # 61 | 62 | html_theme = 'pypose_sphinx_theme' 63 | html_theme_path = [pypose_sphinx_theme.get_html_theme_path()] 64 | # html_theme_path = ['/home/tianhao/twu3/pypose_sphinx_theme'] 65 | html_theme_options = { 66 | 'pytorch_project': 'tutorials', 67 | 'collapse_navigation': False, 68 | 'display_version': True, 69 | } 70 | 71 | # Add any paths that contain custom static files (such as style sheets) here, 72 | # relative to this directory. They are copied after the builtin static files, 73 | # so a file named "default.css" will overwrite the builtin "default.css". 74 | html_static_path = ['_static'] -------------------------------------------------------------------------------- /dynamics_source/README.txt: -------------------------------------------------------------------------------- 1 | # Defining Dynamical Systems 2 | 3 | Examples for defining general time-discrete time-varying dynamical systems 4 | 5 | ## Installation 6 | 7 | python -m pip install -U matplotlib 8 | 9 | ## Run: 10 | 11 | + The classical Cart-Pole example, nonlinear time-invariant system 12 | 13 | python cartpole.py 14 | 15 | + A Floquet system example, linear time-varying system 16 | 17 | python floquet.py 18 | 19 | + Dynamics defined using a neural network, nonlinear time-invariant system 20 | 21 | python neuralnet.py 22 | -------------------------------------------------------------------------------- /dynamics_source/cartpole_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cartpole Tutorial 3 | ================== 4 | 5 | """ 6 | import torch, pypose as pp 7 | import math, matplotlib.pyplot as plt 8 | 9 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 10 | 11 | ###################################################################### 12 | # Preparation 13 | # ------------- 14 | # Create class for cart-pole dynamics 15 | # 16 | class CartPole(pp.module.NLS): 17 | def __init__(self, dt, length, cartmass, polemass, gravity): 18 | super().__init__() 19 | self.tau = dt 20 | self.length = length 21 | self.cartmass = cartmass 22 | self.polemass = polemass 23 | self.gravity = gravity 24 | self.polemassLength = self.polemass * self.length 25 | self.totalMass = self.cartmass + self.polemass 26 | 27 | def state_transition(self, state, input, t = None): 28 | x, xDot, theta, thetaDot = state 29 | force = input.squeeze() 30 | costheta = theta.cos() 31 | sintheta = theta.sin() 32 | 33 | temp = (force + self.polemassLength * thetaDot**2 * sintheta) / self.totalMass 34 | 35 | thetaAcc = (self.gravity * sintheta - costheta * temp) / \ 36 | (self.length * (4.0 / 3.0 - self.polemass * costheta**2 / self.totalMass)) 37 | 38 | xAcc = temp - self.polemassLength * thetaAcc * costheta / self.totalMass 39 | 40 | _dstate = torch.stack((xDot, xAcc, thetaDot, thetaAcc)) 41 | 42 | return state + _dstate * self.tau 43 | 44 | def observation(self, state, input, t = None): 45 | return state 46 | 47 | 48 | def subPlot(ax, x, y, xlabel=None, ylabel=None): 49 | x = x.detach().cpu().numpy() 50 | y = y.detach().cpu().numpy() 51 | ax.plot(x, y) 52 | ax.set_xlabel(xlabel) 53 | ax.set_ylabel(ylabel) 54 | 55 | 56 | 57 | ###################################################################### 58 | # Create parameters for cart pole trajectory 59 | # -------------------------------------------- 60 | 61 | 62 | dt = 0.01 # Delta t 63 | len = 1.5 # Length of pole 64 | m_cart = 20 # Mass of cart 65 | m_pole = 10 # Mass of pole 66 | g = 9.81 # Accerleration due to gravity 67 | N = 1000 # Number of time steps 68 | 69 | 70 | ###################################################################### 71 | # Time and input 72 | 73 | time = torch.arange(0, N, device=device) * dt 74 | input = torch.sin(time) 75 | 76 | 77 | ###################################################################### 78 | # Initial state 79 | 80 | state = torch.zeros(N, 4, dtype=float, device=device) 81 | state[0] = torch.tensor([0, 0, math.pi, 0], dtype=float, device=device) 82 | 83 | ###################################################################### 84 | # Create dynamics solver object 85 | model = CartPole(dt, len, m_cart, m_pole, g).to(device) 86 | 87 | 88 | ###################################################################### 89 | # Calculate trajectory 90 | for i in range(N - 1): 91 | state[i + 1], _ = model(state[i], input[i]) 92 | 93 | 94 | ###################################################################### 95 | # Jacobian computation - Find jacobians at the last step 96 | 97 | model.set_refpoint(state=state[-1,:], input=input[-1], t=time[-1]) 98 | vars = ['A', 'B', 'C', 'D', 'c1', 'c2'] 99 | [print(v, getattr(model, v)) for v in vars] 100 | 101 | ###################################################################### 102 | # Create time plots to show dynamics 103 | f, ax = plt.subplots(nrows=4, sharex=True) 104 | x, xdot, theta, thetadot = state.T 105 | subPlot(ax[0], time, x, ylabel='X') 106 | subPlot(ax[1], time, xdot, ylabel='X dot') 107 | subPlot(ax[2], time, theta, ylabel='Theta') 108 | subPlot(ax[3], time, thetadot, ylabel='Theta dot', xlabel='Time') 109 | plt.show() 110 | -------------------------------------------------------------------------------- /dynamics_source/floquet_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Floquet Tutorial 3 | ================== 4 | 5 | """ 6 | import torch, pypose as pp 7 | import math, matplotlib.pyplot as plt 8 | 9 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 10 | 11 | ###################################################################### 12 | # Preparation 13 | # ------------- 14 | # We consider a Floquet system, which is periodic and an example of time-varying systems 15 | # 16 | 17 | 18 | class Floquet(pp.module.NLS): 19 | ''' 20 | Floquet system is periodic and time-varying. 21 | ''' 22 | def __init__(self): 23 | super().__init__() 24 | 25 | def state_transition(self, state, input, t): 26 | cc = (2 * math.pi * t / 100).cos() 27 | ss = (2 * math.pi * t / 100).sin() 28 | A = torch.tensor([[ 1., cc/10], 29 | [cc/10, 1.]], device=t.device) 30 | B = torch.tensor([[ss], 31 | [1.]], device=t.device) 32 | 33 | return A @ state + B @ input 34 | 35 | def observation(self, state, input, t): 36 | return state + t 37 | 38 | 39 | def subPlot(ax, x, y, xlabel=None, ylabel=None): 40 | x = x.detach().cpu().numpy() 41 | y = y.detach().cpu().numpy() 42 | ax.plot(x, y) 43 | ax.set_xlabel(xlabel) 44 | ax.set_ylabel(ylabel) 45 | 46 | ###################################################################### 47 | # Number of time steps 48 | # --------------------- 49 | 50 | N = 100 51 | 52 | 53 | ###################################################################### 54 | # Time, Input, Initial state 55 | 56 | time = torch.arange(0, N, device=device) 57 | input = (2 * math.pi * time / 50).sin() 58 | 59 | state = torch.zeros(N, 2, device=device) 60 | state[0] = torch.tensor([1., 1.], device=device) 61 | obser = torch.zeros(N, 2, device=device) 62 | 63 | ###################################################################### 64 | # Create dynamics solver object 65 | model = Floquet().to(device) 66 | 67 | ###################################################################### 68 | # Calculate trajectory 69 | # 70 | 71 | for i in range(N - 1): 72 | state[i + 1], obser[i] = model(state[i], input[i]) 73 | 74 | ###################################################################### 75 | # Jacobian computation - Find jacobians at the last step 76 | vars = ['A', 'B', 'C', 'D', 'c1', 'c2'] 77 | model.set_refpoint() 78 | [print(v, getattr(model, v)) for v in vars] 79 | 80 | 81 | ###################################################################### 82 | # Jacobian computation - Find jacobians at the 5th step 83 | idx = 5 84 | model.set_refpoint(state=state[idx], input=input[idx], t=time[idx]) 85 | [print(v, getattr(model, v)) for v in vars] 86 | 87 | ###################################################################### 88 | # Create time plots to show dynamics 89 | 90 | f, ax = plt.subplots(nrows=4, sharex=True) 91 | subPlot(ax[0], time, state[:, 0], ylabel='State[0]') 92 | subPlot(ax[1], time, state[:, 1], ylabel='State[1]') 93 | subPlot(ax[2], time[:-1], obser[:-1, 0], ylabel='Observe[0]') 94 | subPlot(ax[3], time[:-1], obser[:-1, 1], ylabel='Observe[1]', xlabel='Time') 95 | plt.show() -------------------------------------------------------------------------------- /dynamics_source/neuralnet_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | NeuralNet Tutorial 3 | ================== 4 | 5 | """ 6 | 7 | from pypose.module.dynamics import System 8 | import torch as torch 9 | import matplotlib.pyplot as plt 10 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | ###################################################################### 13 | # Preparation 14 | # ----------- 15 | 16 | class nnDynamics(System): 17 | def __init__(self, hiddenSize): 18 | super().__init__() 19 | self.net = torch.nn.Sequential( 20 | torch.nn.Linear(2, hiddenSize[0]), 21 | torch.nn.ReLU(), 22 | torch.nn.Linear(hiddenSize[0], hiddenSize[1]), 23 | torch.nn.ReLU(), 24 | torch.nn.Linear(hiddenSize[1], 2)) 25 | 26 | def state_transition(self, state, input, t=None): 27 | return self.net(state) + input 28 | 29 | def observation(self, state, input, t=None): 30 | return state 31 | 32 | def createTimePlot(x, y, figname="Un-named plot", title=None, xlabel=None, ylabel=None): 33 | f = plt.figure(figname) 34 | plt.plot(x, y) 35 | plt.xlabel(xlabel) 36 | plt.ylabel(ylabel) 37 | plt.title(title) 38 | return f 39 | 40 | ###################################################################### 41 | # Time Step 42 | 43 | dt = 0.01 # Time step size 44 | N = 1000 # Number of time steps 45 | 46 | ###################################################################### 47 | # Time and input 48 | 49 | time = torch.arange(0, N + 1) * dt 50 | input = torch.sin(time) 51 | 52 | ###################################################################### 53 | # Initial state 54 | 55 | state = torch.tensor([0,0]) 56 | 57 | ###################################################################### 58 | # Create solver object 59 | 60 | nnSolver = nnDynamics([5, 10]) 61 | 62 | ###################################################################### 63 | # Calculate trajectory 64 | 65 | state_all = torch.zeros(N + 1, 2) 66 | state_all[0,:] = state 67 | for i in range(N): 68 | state_all[i+1], _ = nnSolver.forward(state_all[i], input[i]) 69 | 70 | 71 | ###################################################################### 72 | # Create plots 73 | 74 | x, y = (state_all.T).detach().numpy() 75 | x_fig = createTimePlot(time, x, figname="x Plot", xlabel="Time", ylabel="x", title="x Plot") 76 | y_fig = createTimePlot(time, y, figname="y Plot", xlabel="Time", ylabel="y", title="y Plot") 77 | 78 | # torch.save([state_all], 'nn_dynamics_data.pt') 79 | 80 | plt.show() -------------------------------------------------------------------------------- /imu_source/README.txt: -------------------------------------------------------------------------------- 1 | # IMU examples 2 | 3 | Examples of IMU [preintegrating](./imu_integrator.py) and IMU [correction](./imu_corrector.py). 4 | 5 | ## Installation 6 | 7 | python -m pip install -U matplotlib 8 | 9 | ## Prepare Dataset 10 | 11 | * Download KITTI IMU sample data [2011_09_26.zip](https://github.com/pypose/IMU_preintegration/releases/download/Kitti/2011_09_26.zip). 12 | * Extract the file to any folder `DATAROOT`, so that it looks like: 13 | 14 | DATAROOT 15 | ├── 2011_09_26 16 | ├── 2011_09_26_drive_0001_sync 17 | ├── 2011_09_26_drive_0002_sync 18 | ├── 2011_09_26_drive_0005_sync 19 | . 20 | . 21 | . 22 | ├── 2011_09_26_drive_0018_sync 23 | ├── 2011_09_26_drive_0020_sync 24 | ├── 2011_09_26_drive_0022_sync 25 | ├── calib_cam_to_cam.txt 26 | ├── calib_imu_to_velo.txt 27 | └── calib_velo_to_cam.txt 28 | 29 | # IMU Preintegration: 30 | 31 | python imu_integrator.py --dataroot DATAROOT --datadrive 0018 0022 32 | 33 | Note: change `DATAROOT` to the folder you select. 34 | 35 | * Data Drive to select: 36 | 37 | 0001 0002 0005 0009 0011 0013 0014 0015 0017 0018 0019 0020 0022 0005 38 | 39 | * Other supported arguments: 40 | 41 | IMU Preintegration 42 | 43 | optional arguments: 44 | -h, --help show this help message and exit 45 | --device DEVICE cuda or cpu 46 | --integrating-step INTEGRATING_STEP 47 | number of integrated steps 48 | --batch-size BATCH_SIZE 49 | batch size, only support 1 now 50 | --save SAVE location of png files to save 51 | --dataroot DATAROOT dataset location downloaded 52 | --dataname DATANAME dataset name 53 | --datadrive DATADRIVE [DATADRIVE ...] 54 | data sequences 55 | --plot3d plot in 3D space, default: False 56 | 57 | # IMU Correction: 58 | 59 | python imu_integrator.py --dataroot DATAROOT 60 | 61 | Note: change `DATAROOT` to the folder you select. 62 | 63 | * Data Drive to select: 64 | 65 | 0001 0002 0005 0009 0011 0013 0014 0015 0017 0018 0019 0020 0022 0005 66 | 67 | * Other supported arguments: 68 | 69 | optional arguments: 70 | -h, --help show this help message and exit 71 | --device DEVICE cuda or cpu 72 | --batch-size BATCH_SIZE 73 | batch size 74 | --max_epoches MAX_EPOCHES 75 | max_epoches 76 | --dataroot DATAROOT dataset location downloaded 77 | --dataname DATANAME dataset name 78 | --datadrive DATADRIVE [DATADRIVE ...] 79 | data sequences 80 | --load_ckpt 81 | -------------------------------------------------------------------------------- /imu_source/imu_corrector_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | IMU Corrector Tutorial 3 | ====================== 4 | 5 | """ 6 | 7 | ###################################################################### 8 | # Uncomment this if you're using google colab to run this script 9 | # 10 | 11 | # !pip install pypose 12 | # !pip install pykitti 13 | 14 | ###################################################################### 15 | # In this tutorial, we will be implementing a simple IMUCorrector 16 | # using ``torch.nn`` modules and ``pypose.IMUPreintegrator``. 17 | # The functionality of our ``IMUCorrector`` is to take an input noisy IMU sensor reading, 18 | # and output the corrected IMU integration result. 19 | # In some way, ``IMUCorrector`` is an improved ``IMUPreintegrator``. 20 | # 21 | # We will show that, we can combine ``pypose.module.IMUPreintegrator`` into network training smoothly. 22 | # 23 | # **Skip the first two part if you have seen it in the imu integrator tutorial** 24 | # 25 | 26 | import torch 27 | import pykitti 28 | import numpy as np 29 | import pypose as pp 30 | from torch import nn 31 | import tqdm, argparse 32 | from datetime import datetime 33 | import torch.utils.data as Data 34 | from torch.optim.lr_scheduler import ReduceLROnPlateau 35 | import matplotlib.pyplot as plt 36 | from matplotlib.patches import Ellipse 37 | from matplotlib.collections import PatchCollection 38 | from torchvision.datasets.utils import download_and_extract_archive 39 | 40 | ###################################################################### 41 | # 1. Dataset Defination 42 | # -------------------------- 43 | # First we will define the ``KITTI_IMU`` dataset as a ``data.Dataset`` in torch, for easy usage. 44 | # We're using the ``pykitti`` package. 45 | # This package provides a minimal set of tools for working with the KITTI datasets. 46 | # To access a data sequence, use: 47 | # :: 48 | # 49 | # dataset = pykitti.raw(root, dataname, drive) 50 | # 51 | # Some of the data attributes we used below are: 52 | # 53 | # * ``dataset.timestamps``: Timestamps are parsed into a list of datetime objects 54 | # * ``dataset.oxts``: List of OXTS packets and 6-dof poses as named tuples 55 | # 56 | # For more details about the data format, please refer to their github page 57 | # `here `_. 58 | # 59 | # A sequence will be seperated into many segments. The number of segments is controlled by ``step_size``. 60 | # Each segment of the sequence will return the measurements like ``dt``, ``acc``, and ``gyro`` 61 | # for a few frames, defined by duration. 62 | # 63 | 64 | class KITTI_IMU(Data.Dataset): 65 | datalink = 'https://github.com/pypose/pypose/releases/download/v0.2.2/2011_09_26.zip' 66 | def __init__(self, root, dataname, drive, duration=10, step_size=1, mode='train', 67 | download=True): 68 | super().__init__() 69 | if download: 70 | download_and_extract_archive(self.datalink, root) 71 | self.duration = duration 72 | self.data = pykitti.raw(root, dataname, drive) 73 | self.seq_len = len(self.data.timestamps) - 1 74 | assert mode in ['evaluate', 'train', 75 | 'test'], "{} mode is not supported.".format(mode) 76 | 77 | self.dt = torch.tensor([datetime.timestamp(self.data.timestamps[i+1]) - 78 | datetime.timestamp(self.data.timestamps[i]) 79 | for i in range(self.seq_len)]) 80 | self.gyro = torch.tensor([[self.data.oxts[i].packet.wx, 81 | self.data.oxts[i].packet.wy, 82 | self.data.oxts[i].packet.wz] 83 | for i in range(self.seq_len)]) 84 | self.acc = torch.tensor([[self.data.oxts[i].packet.ax, 85 | self.data.oxts[i].packet.ay, 86 | self.data.oxts[i].packet.az] 87 | for i in range(self.seq_len)]) 88 | self.gt_rot = pp.euler2SO3(torch.tensor([[self.data.oxts[i].packet.roll, 89 | self.data.oxts[i].packet.pitch, 90 | self.data.oxts[i].packet.yaw] 91 | for i in range(self.seq_len)])) 92 | self.gt_vel = self.gt_rot @ torch.tensor([[self.data.oxts[i].packet.vf, 93 | self.data.oxts[i].packet.vl, 94 | self.data.oxts[i].packet.vu] 95 | for i in range(self.seq_len)]) 96 | self.gt_pos = torch.tensor( 97 | np.array([self.data.oxts[i].T_w_imu[0:3, 3] for i in range(self.seq_len)])) 98 | 99 | start_frame = 0 100 | end_frame = self.seq_len 101 | if mode == 'train': 102 | end_frame = np.floor(self.seq_len * 0.5).astype(int) 103 | elif mode == 'test': 104 | start_frame = np.floor(self.seq_len * 0.5).astype(int) 105 | 106 | self.index_map = [i for i in range( 107 | 0, end_frame - start_frame - self.duration, step_size)] 108 | 109 | def __len__(self): 110 | return len(self.index_map) 111 | 112 | def __getitem__(self, i): 113 | frame_id = self.index_map[i] 114 | end_frame_id = frame_id + self.duration 115 | return { 116 | 'dt': self.dt[frame_id: end_frame_id], 117 | 'acc': self.acc[frame_id: end_frame_id], 118 | 'gyro': self.gyro[frame_id: end_frame_id], 119 | 'gyro': self.gyro[frame_id: end_frame_id], 120 | 'gt_pos': self.gt_pos[frame_id+1: end_frame_id+1], 121 | 'gt_rot': self.gt_rot[frame_id+1: end_frame_id+1], 122 | 'gt_vel': self.gt_vel[frame_id+1: end_frame_id+1], 123 | 'init_pos': self.gt_pos[frame_id][None, ...], 124 | # TODO: the init rotation might be used in gravity compensation 125 | 'init_rot': self.gt_rot[frame_id: end_frame_id], 126 | 'init_vel': self.gt_vel[frame_id][None, ...], 127 | } 128 | 129 | def get_init_value(self): 130 | return {'pos': self.gt_pos[:1], 131 | 'rot': self.gt_rot[:1], 132 | 'vel': self.gt_vel[:1]} 133 | 134 | ###################################################################### 135 | # 2. Utility Functions 136 | # -------------------------- 137 | # These are several utility functions. You can skip to the parameter definations 138 | # and come back when necessary. 139 | 140 | ###################################################################### 141 | # ``imu_collate`` 142 | # ~~~~~~~~~~~~~~~~ 143 | # ``imu_collate`` is used in batch operation, to stack data in multiple frames together. 144 | # 145 | 146 | 147 | def imu_collate(data): 148 | acc = torch.stack([d['acc'] for d in data]) 149 | gyro = torch.stack([d['gyro'] for d in data]) 150 | 151 | gt_pos = torch.stack([d['gt_pos'] for d in data]) 152 | gt_rot = torch.stack([d['gt_rot'] for d in data]) 153 | gt_vel = torch.stack([d['gt_vel'] for d in data]) 154 | 155 | init_pos = torch.stack([d['init_pos'] for d in data]) 156 | init_rot = torch.stack([d['init_rot'] for d in data]) 157 | init_vel = torch.stack([d['init_vel'] for d in data]) 158 | 159 | dt = torch.stack([d['dt'] for d in data]).unsqueeze(-1) 160 | 161 | return { 162 | 'dt': dt, 163 | 'acc': acc, 164 | 'gyro': gyro, 165 | 166 | 'gt_pos': gt_pos, 167 | 'gt_vel': gt_vel, 168 | 'gt_rot': gt_rot, 169 | 170 | 'init_pos': init_pos, 171 | 'init_vel': init_vel, 172 | 'init_rot': init_rot, 173 | } 174 | 175 | ###################################################################### 176 | # ``move_to`` 177 | # ~~~~~~~~~~~~~~~~ 178 | # ``move_to`` used to move different object to CUDA device. 179 | # 180 | 181 | 182 | def move_to(obj, device): 183 | if torch.is_tensor(obj): 184 | return obj.to(device) 185 | elif isinstance(obj, dict): 186 | res = {} 187 | for k, v in obj.items(): 188 | res[k] = move_to(v, device) 189 | return res 190 | elif isinstance(obj, list): 191 | res = [] 192 | for v in obj: 193 | res.append(move_to(v, device)) 194 | return res 195 | else: 196 | raise TypeError("Invalid type for move_to", obj) 197 | 198 | ###################################################################### 199 | # ``plot_gaussian`` 200 | # ~~~~~~~~~~~~~~~~~~ 201 | # ``plot_gaussian`` used to plot an ellipse measuring uncertainty, 202 | # bigger ellipse means bigger uncertainty. 203 | # 204 | 205 | 206 | def plot_gaussian(ax, means, covs, color=None, sigma=3): 207 | ''' Set specific color to show edges, otherwise same with facecolor.''' 208 | ellipses = [] 209 | for i in range(len(means)): 210 | eigvals, eigvecs = np.linalg.eig(covs[i]) 211 | axis = np.sqrt(eigvals) * sigma 212 | slope = eigvecs[1][0] / eigvecs[1][1] 213 | angle = 180.0 * np.arctan(slope) / np.pi 214 | ellipses.append(Ellipse(means[i, 0:2], axis[0], axis[1], angle=angle)) 215 | ax.add_collection(PatchCollection(ellipses, edgecolors=color, linewidth=1)) 216 | 217 | 218 | ###################################################################### 219 | # 3. Define IMU Corrector 220 | # ------------------------- 221 | # Here we define the ``IMUCorrecter`` module. It has two parts, the ``net`` and the ``imu``, 222 | # * ``net`` is a network that resemble an autoencoder. 223 | # It consists of a sequence of linear layer and activation layer. 224 | # It will return the IMU measurements correction. Add this correction to the original IMU sensor data, 225 | # we will get the corrected sensor reading. 226 | # * ``imu`` is a ``pypose.module.IMUPreintegrator``. Use the corrected sensor reading from previous step as 227 | # the input to the ``IMUPreintegrator``, we can get a more accurate IMU integration result. 228 | # 229 | 230 | class IMUCorrector(nn.Module): 231 | def __init__(self, size_list= [6, 64, 128, 128, 128, 6]): 232 | super().__init__() 233 | layers = [] 234 | self.size_list = size_list 235 | for i in range(len(size_list) - 2): 236 | layers.append(nn.Linear(size_list[i], size_list[i+1])) 237 | layers.append(nn.GELU()) 238 | layers.append(nn.Linear(size_list[-2], size_list[-1])) 239 | self.net = nn.Sequential(*layers) 240 | self.imu = pp.module.IMUPreintegrator(reset=True, prop_cov=False) 241 | 242 | def forward(self, data, init_state): 243 | feature = torch.cat([data["acc"], data["gyro"]], dim = -1) 244 | B, F = feature.shape[:2] 245 | 246 | output = self.net(feature.reshape(B*F,6)).reshape(B, F, 6) 247 | corrected_acc = output[...,:3] + data["acc"] 248 | corrected_gyro = output[...,3:] + data["gyro"] 249 | 250 | return self.imu(init_state = init_state, 251 | dt = data['dt'], 252 | gyro = corrected_gyro, 253 | acc = corrected_acc, 254 | rot = data['gt_rot'].contiguous()) 255 | 256 | ###################################################################### 257 | # 4. Define the Loss Function 258 | # ---------------------------- 259 | # The loss function consists of two parts: position loss and rotation loss. 260 | # 261 | # For position loss, we used ``torch.nn.functional.mse_loss``, which is the mean squared error. 262 | # See the `docs `_ 263 | # for more detail. 264 | # 265 | # For rotation loss, we first compute pose error between the output rotation and the ground truth rotation, 266 | # then taking the norm of the lie algebra of the pose error. 267 | # 268 | # Finally, we add the two loss together as our combined loss. 269 | # 270 | 271 | def get_loss(inte_state, data): 272 | pos_loss = torch.nn.functional.mse_loss(inte_state['pos'][:,-1,:], data['gt_pos'][:,-1,:]) 273 | rot_loss = (data['gt_rot'][:,-1,:] * inte_state['rot'][:,-1,:].Inv()).Log().norm() 274 | 275 | loss = pos_loss + rot_loss 276 | return loss, {'pos_loss': pos_loss, 'rot_loss': rot_loss} 277 | 278 | 279 | ###################################################################### 280 | # 5. Define the Training Process 281 | # ------------------------------ 282 | # This is the training process, which has three steps: 283 | # #. **Step 1**: Run forward function, to get the current network output 284 | # #. **Step 2**: Collect loss, for doing backward in **Step 3** 285 | # #. **Step 3**: Get gradients and do optimization 286 | 287 | def train(network, train_loader, epoch, optimizer, device="cuda:0"): 288 | """ 289 | Train network for one epoch using a specified data loader 290 | Outputs all targets, predicts, predicted covariance params, and losses in numpy arrays 291 | """ 292 | network.train() 293 | running_loss = 0 294 | t_range = tqdm.tqdm(train_loader) 295 | for i, data in enumerate(t_range): 296 | 297 | # Step 1: Run forward function 298 | data = move_to(data, device) 299 | init_state = { 300 | "pos": data['init_pos'], 301 | "rot": data['init_rot'][:,:1,:], 302 | "vel": data['init_vel'],} 303 | state = network(data, init_state) 304 | 305 | # Step 2: Collect loss 306 | losses, _ = get_loss(state, data) 307 | running_loss += losses.item() 308 | 309 | # Step 3: Get gradients and do optimization 310 | t_range.set_description(f'iteration: {i:04d}, losses: {losses:.06f}') 311 | t_range.refresh() 312 | losses.backward() 313 | optimizer.step() 314 | 315 | return (running_loss/i) 316 | 317 | 318 | ###################################################################### 319 | # 6. Define the Testing Process 320 | # ----------------------------- 321 | # This is the testing process, which has two steps: 322 | # #. **Step 1**: Run forward function, to get the current network output 323 | # #. **Step 2**: Collect loss, to evaluate the network performance 324 | 325 | 326 | def test(network, loader, device = "cuda:0"): 327 | network.eval() 328 | with torch.no_grad(): 329 | running_loss = 0 330 | for i, data in enumerate(tqdm.tqdm(loader)): 331 | 332 | # Step 1: Run forward function 333 | data = move_to(data, device) 334 | init_state = { 335 | "pos": data['init_pos'], 336 | "rot": data['init_rot'][:,:1,:], 337 | "vel": data['init_vel'],} 338 | state = network(data, init_state) 339 | 340 | # Step 2: Collect loss 341 | losses, _ = get_loss(state, data) 342 | running_loss += losses.item() 343 | 344 | print("the running loss of the test set %0.6f"%(running_loss/i)) 345 | 346 | return (running_loss/i) 347 | 348 | ###################################################################### 349 | # 7. Define Parameters 350 | # ------------------------- 351 | # Here we define all the parameters we will use. 352 | # See the help message for the usage of each parameter. 353 | 354 | parser = argparse.ArgumentParser() 355 | parser.add_argument("--device", 356 | type=str, 357 | default='cuda:0', 358 | help="cuda or cpu") 359 | parser.add_argument("--batch-size", 360 | type=int, 361 | default=4, 362 | help="batch size") 363 | parser.add_argument("--max_epoches", 364 | type=int, 365 | default=100, 366 | help="max_epoches") 367 | parser.add_argument("--dataroot", 368 | type=str, 369 | default='../dataset', 370 | help="dataset location downloaded") 371 | parser.add_argument("--dataname", 372 | type=str, 373 | default='2011_09_26', 374 | help="dataset name") 375 | parser.add_argument("--datadrive", 376 | nargs='+', 377 | type=str, 378 | default=[ "0001"], 379 | help="data sequences") 380 | parser.add_argument('--load_ckpt', 381 | default=False, 382 | action="store_true") 383 | args, unknown = parser.parse_known_args(); print(args) 384 | 385 | ###################################################################### 386 | # 8. Define Dataloaders 387 | # ------------------------- 388 | # 389 | 390 | train_dataset = KITTI_IMU(args.dataroot, args.dataname, args.datadrive[0], 391 | duration=10, mode='train') 392 | test_dataset = KITTI_IMU(args.dataroot, args.dataname, args.datadrive[0], 393 | duration=10, mode='test') 394 | train_loader = Data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, 395 | collate_fn=imu_collate, shuffle=True) 396 | test_loader = Data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, 397 | collate_fn=imu_collate, shuffle=False) 398 | 399 | ###################################################################### 400 | # 9. Main Training Loop 401 | # ------------------------- 402 | # Here we will run our main training loop. 403 | # First, like in pytorch, we will define the network, optimizer and scheduler. 404 | # 405 | # If you are not familiar with the process of training a network, 406 | # we would recommand you reading one of the PyTorch tutorial, like 407 | # `this `_. 408 | # 409 | # For each epoch, we run both the training and testing once and collect the running loss. 410 | # We can see from the output message below: the running losss is reducing, 411 | # which means our IMUCorrecter is working. 412 | # 413 | 414 | network = IMUCorrector().to(args.device) 415 | optimizer = torch.optim.Adam(network.parameters(), lr = 5e-6) # to use with ViTs 416 | scheduler = ReduceLROnPlateau(optimizer, 'min', factor = 0.1, patience = 10) # default setup 417 | 418 | for epoch_i in range(args.max_epoches): 419 | train_loss = train(network, train_loader, epoch_i, optimizer, device = args.device) 420 | test_loss = test(network, test_loader, device = args.device) 421 | scheduler.step(train_loss) 422 | print("train loss: %f test loss: %f "%(train_loss, test_loss)) 423 | 424 | ###################################################################### 425 | # And that's it. We'are done with our IMUCorrecter tutorials. Thanks for reading. 426 | # -------------------------------------------------------------------------------- /imu_source/imu_dataset_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | IMU Dataset Tutorial 3 | ======================== 4 | 5 | """ 6 | 7 | import torch 8 | import pykitti 9 | import numpy as np 10 | import pypose as pp 11 | from datetime import datetime 12 | import torch.utils.data as Data 13 | from torchvision.datasets.utils import download_and_extract_archive 14 | 15 | class KITTI_IMU(Data.Dataset): 16 | datalink = 'https://github.com/pypose/pypose/releases/download/v0.2.2/2011_09_26.zip' 17 | def __init__(self, root, dataname, drive, duration=10, step_size=1, mode='train', 18 | download=True): 19 | super().__init__() 20 | if download: 21 | download_and_extract_archive(self.datalink, root) 22 | self.duration = duration 23 | self.data = pykitti.raw(root, dataname, drive) 24 | self.seq_len = len(self.data.timestamps) - 1 25 | assert mode in ['evaluate', 'train', 'test'], "{} mode is not supported.".format(mode) 26 | 27 | self.dt = torch.tensor([datetime.timestamp(self.data.timestamps[i+1]) - datetime.timestamp(self.data.timestamps[i]) for i in range(self.seq_len)]) 28 | self.gyro = torch.tensor([[self.data.oxts[i].packet.wx, self.data.oxts[i].packet.wy, self.data.oxts[i].packet.wz] for i in range(self.seq_len)]) 29 | self.acc = torch.tensor([[self.data.oxts[i].packet.ax, self.data.oxts[i].packet.ay, self.data.oxts[i].packet.az] for i in range(self.seq_len)]) 30 | self.gt_rot = pp.euler2SO3(torch.tensor([[self.data.oxts[i].packet.roll, self.data.oxts[i].packet.pitch, self.data.oxts[i].packet.yaw] for i in range(self.seq_len)])) 31 | self.gt_vel = self.gt_rot @ torch.tensor([[self.data.oxts[i].packet.vf, self.data.oxts[i].packet.vl, self.data.oxts[i].packet.vu] for i in range(self.seq_len)]) 32 | self.gt_pos = torch.tensor(np.array([self.data.oxts[i].T_w_imu[0:3, 3] for i in range(self.seq_len)])) 33 | 34 | start_frame = 0 35 | end_frame = self.seq_len 36 | if mode == 'train': 37 | end_frame = np.floor(self.seq_len * 0.5).astype(int) 38 | elif mode == 'test': 39 | start_frame = np.floor(self.seq_len * 0.5).astype(int) 40 | 41 | self.index_map = [i for i in range(0, end_frame - start_frame - self.duration, step_size)] 42 | 43 | def __len__(self): 44 | return len(self.index_map) 45 | 46 | def __getitem__(self, i): 47 | frame_id = self.index_map[i] 48 | end_frame_id = frame_id + self.duration 49 | return { 50 | 'dt': self.dt[frame_id: end_frame_id], 51 | 'acc': self.acc[frame_id: end_frame_id], 52 | 'gyro': self.gyro[frame_id: end_frame_id], 53 | 'gyro': self.gyro[frame_id: end_frame_id], 54 | 'gt_pos': self.gt_pos[frame_id+1 : end_frame_id+1], 55 | 'gt_rot': self.gt_rot[frame_id+1 : end_frame_id+1], 56 | 'gt_vel': self.gt_vel[frame_id+1 : end_frame_id+1], 57 | 'init_pos': self.gt_pos[frame_id][None, ...], 58 | 'init_rot': self.gt_rot[frame_id : end_frame_id], # TODO: the init rotation might be used in gravity compensation 59 | 'init_vel': self.gt_vel[frame_id][None, ...], 60 | } 61 | 62 | def get_init_value(self): 63 | return {'pos': self.gt_pos[:1], 64 | 'rot': self.gt_rot[:1], 65 | 'vel': self.gt_vel[:1]} 66 | 67 | 68 | def imu_collate(data): 69 | acc = torch.stack([d['acc'] for d in data]) 70 | gyro = torch.stack([d['gyro'] for d in data]) 71 | 72 | gt_pos = torch.stack([d['gt_pos'] for d in data]) 73 | gt_rot = torch.stack([d['gt_rot'] for d in data]) 74 | gt_vel = torch.stack([d['gt_vel'] for d in data]) 75 | 76 | init_pos = torch.stack([d['init_pos'] for d in data]) 77 | init_rot = torch.stack([d['init_rot'] for d in data]) 78 | init_vel = torch.stack([d['init_vel'] for d in data]) 79 | 80 | dt = torch.stack([d['dt'] for d in data]).unsqueeze(-1) 81 | 82 | return { 83 | 'dt': dt, 84 | 'acc': acc, 85 | 'gyro': gyro, 86 | 87 | 'gt_pos': gt_pos, 88 | 'gt_vel': gt_vel, 89 | 'gt_rot': gt_rot, 90 | 91 | 'init_pos': init_pos, 92 | 'init_vel': init_vel, 93 | 'init_rot': init_rot, 94 | } 95 | 96 | 97 | def move_to(obj, device): 98 | if torch.is_tensor(obj): 99 | return obj.to(device) 100 | elif isinstance(obj, dict): 101 | res = {} 102 | for k, v in obj.items(): 103 | res[k] = move_to(v, device) 104 | return res 105 | elif isinstance(obj, list): 106 | res = [] 107 | for v in obj: 108 | res.append(move_to(v, device)) 109 | return res 110 | else: 111 | raise TypeError("Invalid type for move_to", obj) 112 | -------------------------------------------------------------------------------- /imu_source/imu_integrator_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | IMU Integrator Tutorial 3 | ======================== 4 | 5 | """ 6 | ###################################################################### 7 | # Uncomment this if you're using google colab to run this script 8 | # 9 | 10 | # !pip install pypose 11 | # !pip install pykitti 12 | 13 | ###################################################################### 14 | # In this tutorial, we will be doing IMU integration using the ``pypose.module.IMUPreintegrator`` module. 15 | # 16 | # 1. What is IMU integration 17 | # -------------------------- 18 | # An Inertial Measurement Unit (IMU) is a device that can measure accelaration and angular velocity. 19 | # 20 | # An IMU typically consists of: 21 | # * Gyroscopes: providing a measure of angular velocity 22 | # * Accelerometers: providing a measure of acceleration 23 | # 24 | # With acceleration and angular velocity, we can get velocity and position using basic kinetics: 25 | # * The first integral of acceleration over time is the change in velocity. 26 | # * The second integral of acceleration over time is the change in position. 27 | # 28 | # This process is called the IMU preintegration, often used in applications in robotics 29 | # like SLAM (Simultaneous Localization and Mapping). 30 | # 31 | # Uncertainty 32 | # ~~~~~~~~~~~~~ 33 | # However, IMU measurements contains very big noise. For example, if we put an IMU sensor in a static position, 34 | # the measurements will jump around zero. That's why, the more we integrate, the more uncertain we are. 35 | # This uncertainty can also be measured mathematically. Please refer the 36 | # `doc `_ for the math. 37 | # 38 | # We will see below in a simple example, how we can get the IMU integrated position and the uncertainty 39 | # with ``pypose.module.IMUPreintegrator``. 40 | # 41 | 42 | 43 | import os 44 | import argparse 45 | import torch 46 | import pykitti 47 | import numpy as np 48 | import pypose as pp 49 | from datetime import datetime 50 | import torch.utils.data as Data 51 | import matplotlib.pyplot as plt 52 | from matplotlib.patches import Ellipse 53 | from matplotlib.collections import PatchCollection 54 | from torchvision.datasets.utils import download_and_extract_archive 55 | 56 | 57 | ###################################################################### 58 | # 2. Dataset Defination 59 | # -------------------------- 60 | # First we will define the ``KITTI_IMU`` dataset as a ``data.Dataset`` in torch, for easy usage. 61 | # We're using the ``pykitti`` package. 62 | # This package provides a minimal set of tools for working with the KITTI datasets. 63 | # To access a data sequence, use: 64 | # :: 65 | # 66 | # dataset = pykitti.raw(root, dataname, drive) 67 | # 68 | # Some of the data attributes we used below are: 69 | # 70 | # * ``dataset.timestamps``: Timestamps are parsed into a list of datetime objects 71 | # * ``dataset.oxts``: List of OXTS packets and 6-dof poses as named tuples 72 | # 73 | # For more details about the data format, please refer to their github page 74 | # `here `_. 75 | # 76 | # A sequence will be seperated into many segments. The number of segments is controlled by ``step_size``. 77 | # Each segment of the sequence will return the measurements like ``dt``, ``acc``, and ``gyro`` 78 | # for a few frames, defined by duration. 79 | # 80 | 81 | class KITTI_IMU(Data.Dataset): 82 | datalink = 'https://github.com/pypose/pypose/releases/download/v0.2.2/2011_09_26.zip' 83 | def __init__(self, root, dataname, drive, duration=10, step_size=1, mode='train', 84 | download=True): 85 | super().__init__() 86 | if download: 87 | download_and_extract_archive(self.datalink, root) 88 | self.duration = duration 89 | self.data = pykitti.raw(root, dataname, drive) 90 | self.seq_len = len(self.data.timestamps) - 1 91 | assert mode in ['evaluate', 'train', 92 | 'test'], "{} mode is not supported.".format(mode) 93 | 94 | self.dt = torch.tensor([datetime.timestamp(self.data.timestamps[i+1]) - 95 | datetime.timestamp(self.data.timestamps[i]) 96 | for i in range(self.seq_len)]) 97 | self.gyro = torch.tensor([[self.data.oxts[i].packet.wx, 98 | self.data.oxts[i].packet.wy, 99 | self.data.oxts[i].packet.wz] 100 | for i in range(self.seq_len)]) 101 | self.acc = torch.tensor([[self.data.oxts[i].packet.ax, 102 | self.data.oxts[i].packet.ay, 103 | self.data.oxts[i].packet.az] 104 | for i in range(self.seq_len)]) 105 | self.gt_rot = pp.euler2SO3(torch.tensor([[self.data.oxts[i].packet.roll, 106 | self.data.oxts[i].packet.pitch, 107 | self.data.oxts[i].packet.yaw] 108 | for i in range(self.seq_len)])) 109 | self.gt_vel = self.gt_rot @ torch.tensor([[self.data.oxts[i].packet.vf, 110 | self.data.oxts[i].packet.vl, 111 | self.data.oxts[i].packet.vu] 112 | for i in range(self.seq_len)]) 113 | self.gt_pos = torch.tensor( 114 | np.array([self.data.oxts[i].T_w_imu[0:3, 3] for i in range(self.seq_len)])) 115 | 116 | start_frame = 0 117 | end_frame = self.seq_len 118 | if mode == 'train': 119 | end_frame = np.floor(self.seq_len * 0.5).astype(int) 120 | elif mode == 'test': 121 | start_frame = np.floor(self.seq_len * 0.5).astype(int) 122 | 123 | self.index_map = [i for i in range( 124 | 0, end_frame - start_frame - self.duration, step_size)] 125 | 126 | def __len__(self): 127 | return len(self.index_map) 128 | 129 | def __getitem__(self, i): 130 | frame_id = self.index_map[i] 131 | end_frame_id = frame_id + self.duration 132 | return { 133 | 'dt': self.dt[frame_id: end_frame_id], 134 | 'acc': self.acc[frame_id: end_frame_id], 135 | 'gyro': self.gyro[frame_id: end_frame_id], 136 | 'gyro': self.gyro[frame_id: end_frame_id], 137 | 'gt_pos': self.gt_pos[frame_id+1: end_frame_id+1], 138 | 'gt_rot': self.gt_rot[frame_id+1: end_frame_id+1], 139 | 'gt_vel': self.gt_vel[frame_id+1: end_frame_id+1], 140 | 'init_pos': self.gt_pos[frame_id][None, ...], 141 | # TODO: the init rotation might be used in gravity compensation 142 | 'init_rot': self.gt_rot[frame_id: end_frame_id], 143 | 'init_vel': self.gt_vel[frame_id][None, ...], 144 | } 145 | 146 | def get_init_value(self): 147 | return {'pos': self.gt_pos[:1], 148 | 'rot': self.gt_rot[:1], 149 | 'vel': self.gt_vel[:1]} 150 | 151 | ###################################################################### 152 | # 3. Utility Functions 153 | # -------------------------- 154 | # These are several utility functions. You can skip to the parameter definations 155 | # and come back when necessary. 156 | 157 | ###################################################################### 158 | # ``imu_collate`` 159 | # ~~~~~~~~~~~~~~~~ 160 | # ``imu_collate`` is used in batch operation, to stack data in multiple frames together. 161 | # 162 | 163 | 164 | def imu_collate(data): 165 | acc = torch.stack([d['acc'] for d in data]) 166 | gyro = torch.stack([d['gyro'] for d in data]) 167 | 168 | gt_pos = torch.stack([d['gt_pos'] for d in data]) 169 | gt_rot = torch.stack([d['gt_rot'] for d in data]) 170 | gt_vel = torch.stack([d['gt_vel'] for d in data]) 171 | 172 | init_pos = torch.stack([d['init_pos'] for d in data]) 173 | init_rot = torch.stack([d['init_rot'] for d in data]) 174 | init_vel = torch.stack([d['init_vel'] for d in data]) 175 | 176 | dt = torch.stack([d['dt'] for d in data]).unsqueeze(-1) 177 | 178 | return { 179 | 'dt': dt, 180 | 'acc': acc, 181 | 'gyro': gyro, 182 | 183 | 'gt_pos': gt_pos, 184 | 'gt_vel': gt_vel, 185 | 'gt_rot': gt_rot, 186 | 187 | 'init_pos': init_pos, 188 | 'init_vel': init_vel, 189 | 'init_rot': init_rot, 190 | } 191 | 192 | ###################################################################### 193 | # ``move_to`` 194 | # ~~~~~~~~~~~~~~~~ 195 | # ``move_to`` used to move different object to CUDA device. 196 | # 197 | 198 | 199 | def move_to(obj, device): 200 | if torch.is_tensor(obj): 201 | return obj.to(device) 202 | elif isinstance(obj, dict): 203 | res = {} 204 | for k, v in obj.items(): 205 | res[k] = move_to(v, device) 206 | return res 207 | elif isinstance(obj, list): 208 | res = [] 209 | for v in obj: 210 | res.append(move_to(v, device)) 211 | return res 212 | else: 213 | raise TypeError("Invalid type for move_to", obj) 214 | 215 | ###################################################################### 216 | # ``plot_gaussian`` 217 | # ~~~~~~~~~~~~~~~~~~ 218 | # ``plot_gaussian`` used to plot an ellipse measuring uncertainty, 219 | # bigger ellipse means bigger uncertainty. 220 | # 221 | 222 | 223 | def plot_gaussian(ax, means, covs, color=None, sigma=3): 224 | ''' Set specific color to show edges, otherwise same with facecolor.''' 225 | ellipses = [] 226 | for i in range(len(means)): 227 | eigvals, eigvecs = np.linalg.eig(covs[i]) 228 | axis = np.sqrt(eigvals) * sigma 229 | slope = eigvecs[1][0] / eigvecs[1][1] 230 | angle = 180.0 * np.arctan(slope) / np.pi 231 | ellipses.append(Ellipse(means[i, 0:2], axis[0], axis[1], angle=angle)) 232 | ax.add_collection(PatchCollection(ellipses, edgecolors=color, linewidth=1)) 233 | 234 | 235 | ###################################################################### 236 | # 4. Define Parameters 237 | # ---------------------- 238 | # Here we define all the parameters we will use. 239 | # See the help message for the usage of each parameter. 240 | 241 | 242 | parser = argparse.ArgumentParser(description='IMU Preintegration') 243 | parser.add_argument("--device", 244 | type=str, 245 | default='cpu', 246 | help="cuda or cpu") 247 | parser.add_argument("--batch-size", 248 | type=int, 249 | default=1, 250 | help="batch size, only support 1 now") #why? 251 | parser.add_argument("--step-size", 252 | type=int, 253 | default=2, 254 | help="the size of the integration for one interval") 255 | parser.add_argument("--save", 256 | type=str, 257 | default='../dataset/save/', 258 | help="location of png files to save") 259 | parser.add_argument("--dataroot", 260 | type=str, 261 | default='../dataset/', 262 | help="dataset location downloaded") 263 | parser.add_argument("--dataname", 264 | type=str, 265 | default='2011_09_26', 266 | help="dataset name") 267 | parser.add_argument("--datadrive", 268 | nargs='+', 269 | type=str, 270 | default=["0001", "0002", "0005", "0009", "0011", 271 | "0013", "0014", "0015", "0017", "0018", 272 | "0019", "0020", "0022"], 273 | help="data sequences") 274 | parser.add_argument('--plot3d', 275 | dest='plot3d', 276 | action='store_true', 277 | help="plot in 3D space, default: False") 278 | parser.set_defaults(plot3d=False) 279 | args, unknown = parser.parse_known_args() 280 | print(args) 281 | os.makedirs(os.path.join(args.save), exist_ok=True) 282 | torch.set_default_tensor_type(torch.DoubleTensor) 283 | 284 | 285 | ###################################################################### 286 | # 5. Perform Integration 287 | # ---------------------- 288 | # With everything set up, we will perform the core operation of IMU integration. 289 | # There are mainly 5 steps in the codes below: 290 | # 291 | # #. **Step 1**: Define dataloader using the ``KITTI_IMU`` class we defined above 292 | # #. **Step 2**: Get the initial position, rotation and velocity, all 0 here 293 | # #. **Step 3**: Define the IMUPreintegrator 294 | # #. **Step 4**: Perform integration: 295 | # After running the forward function of the ``integrator``, the result is stored in ``state``, 296 | # where ``state['pos']`` is the integrated position, and ``state['cov']`` is the uncertainty measurements. 297 | # 298 | # Note that ``state['cov']`` is a 9x9 matrix in the order of rotation, velocity, and position. 299 | # That's why in visualization we are using ``covs[:, 6:8, 6:8]`` here: they are the covariance matrix of ``x`` and ``y`` position. 300 | # #. **Step 5**: Visualization 301 | 302 | for drive in args.datadrive: 303 | 304 | # Step 1: Define dataloader using the ``KITTI_IMU`` class we defined above 305 | dataset = KITTI_IMU(args.dataroot, 306 | args.dataname, 307 | drive, 308 | duration=args.step_size, 309 | step_size=args.step_size, 310 | mode='evaluate') 311 | loader = Data.DataLoader(dataset=dataset, 312 | batch_size=args.batch_size, 313 | collate_fn=imu_collate, 314 | shuffle=False) 315 | 316 | # Step 2: Get the initial position, rotation and velocity, all 0 here 317 | init = dataset.get_init_value() 318 | 319 | # Step 3: Define the IMUPreintegrator. 320 | integrator = pp.module.IMUPreintegrator(init['pos'], 321 | init['rot'], 322 | init['vel'], 323 | reset=False).to(args.device) 324 | 325 | # Step 4: Perform integration 326 | poses, poses_gt = [init['pos']], [init['pos']] 327 | covs = [torch.zeros(9, 9)] 328 | 329 | for idx, data in enumerate(loader): 330 | data = move_to(data, args.device) 331 | state = integrator(dt=data['dt'], 332 | gyro=data['gyro'], 333 | acc=data['acc'], 334 | rot=data['init_rot']) 335 | poses_gt.append(data['gt_pos'][..., -1, :].cpu()) 336 | poses.append(state['pos'][..., -1, :].cpu()) 337 | covs.append(state['cov'][..., -1, :, :].cpu()) 338 | 339 | poses = torch.cat(poses).numpy() 340 | poses_gt = torch.cat(poses_gt).numpy() 341 | covs = torch.stack(covs, dim=0).numpy() 342 | 343 | # Step 5: Visualization 344 | plt.figure(figsize=(5, 5)) 345 | if args.plot3d: 346 | ax = plt.axes(projection='3d') 347 | ax.plot3D(poses[:, 0], poses[:, 1], poses[:, 2], 'b') 348 | ax.plot3D(poses_gt[:, 0], poses_gt[:, 1], poses_gt[:, 2], 'r') 349 | else: 350 | ax = plt.axes() 351 | ax.plot(poses[:, 0], poses[:, 1], 'b') 352 | ax.plot(poses_gt[:, 0], poses_gt[:, 1], 'r') 353 | plot_gaussian(ax, poses[:, 0:2], covs[:, 6:8, 6:8]) 354 | plt.title("PyPose IMU Integrator") 355 | plt.legend(["PyPose", "Ground Truth"]) 356 | figure = os.path.join(args.save, args.dataname+'_'+drive+'.png') 357 | plt.savefig(figure) 358 | print("Saved to", figure) 359 | 360 | 361 | ###################################################################### 362 | # We can see that, in some of the sequences, 363 | # the integrated position drifts away from the groundtruth, also the uncertainty grows very big. 364 | # This shows the noisy nature of the IMU sensor. 365 | # In the IMUCorrector tutorial, we will see an example of how we can correct this. 366 | # 367 | -------------------------------------------------------------------------------- /index.rst: -------------------------------------------------------------------------------- 1 | .. Pypose Tutorials documentation master file, created by 2 | sphinx-quickstart on Sun Sep 11 00:56:21 2022. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Pypose Tutorials 7 | ============================================ 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Introduction to PyPose 12 | 13 | beginner/started 14 | beginner/lietensor_tutorial 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | :caption: Dynamics 19 | 20 | dynamics/cartpole_tutorial 21 | dynamics/floquet_tutorial 22 | dynamics/neuralnet_tutorial 23 | 24 | .. toctree:: 25 | :maxdepth: 2 26 | :caption: IMU 27 | 28 | imu/imu_integrator_tutorial 29 | imu/imu_corrector_tutorial 30 | 31 | .. toctree:: 32 | :maxdepth: 2 33 | :caption: Pose Graph Optimization 34 | 35 | pgo/pgo_tutorial 36 | 37 | 38 | 39 | Indices and tables 40 | ================== 41 | 42 | * :ref:`genindex` 43 | * :ref:`modindex` 44 | * :ref:`search` 45 | -------------------------------------------------------------------------------- /intermediate_source/README.txt: -------------------------------------------------------------------------------- 1 | This is my gallery 2 | ================== 3 | 4 | Below is a gallery of examples -------------------------------------------------------------------------------- /ipynb2py.py: -------------------------------------------------------------------------------- 1 | """Convert jupyter notebook to sphinx gallery notebook styled examples. 2 | 3 | Usage: python ipynb_to_gallery.py 4 | 5 | Dependencies: 6 | pypandoc: install using `pip install pypandoc` 7 | """ 8 | import pypandoc as pdoc 9 | import json 10 | 11 | def convert_ipynb_to_gallery(file_name): 12 | python_file = "" 13 | 14 | nb_dict = json.load(open(file_name)) 15 | cells = nb_dict['cells'] 16 | 17 | for i, cell in enumerate(cells): 18 | if i == 0: 19 | assert cell['cell_type'] == 'markdown', \ 20 | 'First cell has to be markdown' 21 | 22 | md_source = ''.join(cell['source']) 23 | rst_source = pdoc.convert_text(md_source, 'rst', 'md') 24 | python_file = '"""\n' + rst_source + '\n"""' 25 | else: 26 | if cell['cell_type'] == 'markdown': 27 | md_source = ''.join(cell['source']) 28 | rst_source = pdoc.convert_text(md_source, 'rst', 'md') 29 | commented_source = '\n'.join(['# ' + x for x in 30 | rst_source.split('\n')]) 31 | python_file = python_file + '\n\n\n' + '#' * 70 + '\n' + \ 32 | commented_source 33 | elif cell['cell_type'] == 'code': 34 | source = ''.join(cell['source']) 35 | python_file = python_file + '\n' * 2 + source 36 | 37 | python_file = python_file.replace("\n%", "\n# %") 38 | open(file_name.replace('.ipynb', '.py'), 'w').write(python_file) 39 | 40 | if __name__ == '__main__': 41 | import sys 42 | convert_ipynb_to_gallery(sys.argv[-1]) -------------------------------------------------------------------------------- /make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /pgo_source/README.txt: -------------------------------------------------------------------------------- 1 | # Pose Graph Optimization 2 | 3 | An example for Pose Graph Optimization (PGO) 4 | 5 | ## Installation 6 | 7 | python -m pip install -U matplotlib 8 | 9 | ## Prepare Dataset 10 | 11 | * Download the Parking Garage G2O sample data [parking-garage.g2o](https://www.dropbox.com/s/zu23p8d522qccor/parking-garage.g2o?dl=0). 12 | * Extract the file to any folder `DATAROOT`, so that it looks like: 13 | 14 | DATAROOT 15 | ├── parking-garage.g2o 16 | 17 | ## Run: 18 | 19 | python pgo.py --dataroot DATAROOT --device cuda:0 20 | 21 | Note: change `DATAROOT` to the folder you select. 22 | 23 | 24 | * Other supported arguments: 25 | 26 | Pose Graph Optimization 27 | 28 | optional arguments: 29 | -h, --help show this help message and exit 30 | --device DEVICE cuda or cpu 31 | --damping DAMPING damping factor 32 | --radius RADIUS damping factor 33 | --save SAVE location of png files to save 34 | --dataroot DATAROOT dataset location downloaded 35 | --dataname DATANAME dataset name 36 | --no-vectorize to save memory 37 | --vectorize to accelerate computation 38 | -------------------------------------------------------------------------------- /pgo_source/pgo_dataset_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Pose Graph Optimization Dataset Tutorial 3 | ======================================== 4 | 5 | """ 6 | 7 | import os,torch 8 | import numpy as np 9 | import pypose as pp 10 | import torch.utils.data as Data 11 | 12 | 13 | class G2OPGO(Data.Dataset): 14 | def __init__(self, root, dataname, device='cpu'): 15 | super().__init__() 16 | 17 | def info2mat(info): 18 | mat = np.zeros((6,6)) 19 | ix = 0 20 | for i in range(mat.shape[0]): 21 | mat[i,i:] = info[ix:ix+(6-i)] 22 | mat[i:,i] = info[ix:ix+(6-i)] 23 | ix += (6-i) 24 | return mat 25 | self.dtype = torch.get_default_dtype() 26 | filename = os.path.join(root, dataname) 27 | ids, nodes, edges, poses, infos = [], [], [], [], [] 28 | with open(filename) as f: 29 | for line in f: 30 | line = line.split() 31 | if line[0] == 'VERTEX_SE3:QUAT': 32 | ids.append(torch.tensor(int(line[1]), dtype=torch.int64)) 33 | nodes.append(pp.SE3(np.array(line[2:], dtype=np.float64))) 34 | elif line[0] == 'EDGE_SE3:QUAT': 35 | edges.append(torch.tensor(np.array(line[1:3], dtype=np.int64))) 36 | poses.append(pp.SE3(np.array(line[3:10], dtype=np.float64))) 37 | infos.append(torch.tensor(info2mat(np.array(line[10:], dtype=np.float64)))) 38 | 39 | self.ids = torch.stack(ids) 40 | self.nodes = torch.stack(nodes).to(self.dtype).to(device) 41 | self.edges = torch.stack(edges).to(device) # have to be LongTensor 42 | self.poses = torch.stack(poses).to(self.dtype).to(device) 43 | self.infos = torch.stack(infos).to(self.dtype).to(device) 44 | assert self.ids.size(0) == self.nodes.size(0) \ 45 | and self.edges.size(0) == self.poses.size(0) == self.infos.size(0) 46 | 47 | def init_value(self): 48 | return self.nodes.clone() 49 | 50 | def __getitem__(self, i): 51 | return self.edges[i], self.poses[i], self.infos[i] 52 | 53 | def __len__(self): 54 | return self.edges.size(0) 55 | -------------------------------------------------------------------------------- /pgo_source/pgo_tutorial.py: -------------------------------------------------------------------------------- 1 | """ 2 | Pose Graph Optimization Tutorial 3 | ================================ 4 | 5 | """ 6 | 7 | 8 | import os 9 | import torch 10 | import argparse 11 | import pypose as pp 12 | from torch import nn 13 | from pgo_dataset_tutorial import G2OPGO 14 | import matplotlib.pyplot as plt 15 | import pypose.optim.solver as ppos 16 | import pypose.optim.kernel as ppok 17 | import pypose.optim.corrector as ppoc 18 | import pypose.optim.strategy as ppost 19 | from pypose.optim.scheduler import StopOnPlateau 20 | 21 | 22 | ###################################################################### 23 | # Define Pose Graph 24 | # ------------------ 25 | 26 | class PoseGraph(nn.Module): 27 | 28 | def __init__(self, nodes): 29 | super().__init__() 30 | self.nodes = pp.Parameter(nodes) 31 | 32 | def forward(self, edges, poses): 33 | node1 = self.nodes[edges[..., 0]] 34 | node2 = self.nodes[edges[..., 1]] 35 | error = poses.Inv() @ node1.Inv() @ node2 36 | return error.Log().tensor() 37 | 38 | 39 | @torch.no_grad() 40 | def plot_and_save(points, pngname, title='', axlim=None): 41 | points = points.detach().cpu().numpy() 42 | plt.figure(figsize=(7, 7)) 43 | ax = plt.axes(projection='3d') 44 | ax.plot3D(points[:,0], points[:,1], points[:,2], 'b') 45 | plt.title(title) 46 | if axlim is not None: 47 | ax.set_xlim(axlim[0]) 48 | ax.set_ylim(axlim[1]) 49 | ax.set_zlim(axlim[2]) 50 | plt.savefig(pngname) 51 | print('Saving to', pngname) 52 | return ax.get_xlim(), ax.get_ylim(), ax.get_zlim() 53 | 54 | 55 | parser = argparse.ArgumentParser(description='Pose Graph Optimization') 56 | parser.add_argument("--device", type=str, default='cuda:0', help="cuda or cpu") 57 | parser.add_argument("--radius", type=float, default=1e4, help="trust region radius") 58 | parser.add_argument("--save", type=str, default='../dataset/pgo/save/', help="files location to save") 59 | parser.add_argument("--dataroot", type=str, default='../dataset/pgo', help="dataset location") 60 | parser.add_argument("--dataname", type=str, default='parking-garage.g2o', help="dataset name") 61 | parser.add_argument('--no-vectorize', dest='vectorize', action='store_false', help="to save memory") 62 | parser.add_argument('--vectorize', action='store_true', help='to accelerate computation') 63 | parser.set_defaults(vectorize=False) 64 | args = parser.parse_args(); print(args) 65 | os.makedirs(os.path.join(args.save), exist_ok=True) 66 | 67 | data = G2OPGO(args.dataroot, args.dataname, device=args.device) 68 | edges, poses, infos = data.edges, data.poses, data.infos 69 | 70 | ###################################################################### 71 | # Define Optimizer 72 | # ------------------------------------------------------------- 73 | 74 | graph = PoseGraph(data.nodes).to(args.device) 75 | solver = ppos.Cholesky() 76 | strategy = ppost.TrustRegion(radius=args.radius) 77 | optimizer = pp.optim.LM(graph, solver=solver, strategy=strategy, min=1e-6, vectorize=args.vectorize) 78 | scheduler = StopOnPlateau(optimizer, steps=10, patience=3, decreasing=1e-3, verbose=True) 79 | 80 | pngname = os.path.join(args.save, args.dataname+'.png') 81 | axlim = plot_and_save(graph.nodes.translation(), pngname, args.dataname) 82 | 83 | ###################################################################### 84 | # the 1st implementation: for customization and easy to extend 85 | # ------------------------------------------------------------- 86 | # commented out because too time consumming 87 | 88 | # while scheduler.continual: 89 | # loss = optimizer.step(input=(edges, poses), weight=infos) 90 | # scheduler.step(loss) 91 | 92 | # name = os.path.join(args.save, args.dataname + '_' + str(scheduler.steps)) 93 | # title = 'PyPose PGO at the %d step(s) with loss %7f'%(scheduler.steps, loss.item()) 94 | # plot_and_save(graph.nodes.translation(), name+'.png', title, axlim=axlim) 95 | # torch.save(graph.state_dict(), name+'.pt') 96 | 97 | ###################################################################### 98 | # The 2nd implementation: equivalent to the 1st one, but more compact 99 | # -------------------------------------------------------------------- 100 | 101 | # scheduler.optimize(input=(edges, poses), weight=infos) 102 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx-gallery 2 | matplotlib 3 | pypose_sphinx_theme 4 | --------------------------------------------------------------------------------