├── .circleci
└── config.yml
├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── BUG_REPORT.md
│ ├── FEATURE_REQUEST.md
│ └── QUESTIONS_HELP_SUPPORT.md
├── PULL_REQUEST_TEMPLATE.md
├── tmp
│ ├── optim_tests.yml
│ ├── test_wheel.yml
│ └── tests_gpu.yml
└── workflows
│ ├── lie_tests.yml
│ ├── other_tests.yml
│ └── precommit.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── CITATION.cff
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── build_scripts
└── build_wheel.sh
├── docs
└── source
│ ├── conf.py
│ ├── core.rst
│ ├── embodied.rst
│ ├── generated
│ ├── theseus.CostFunction.rst
│ ├── theseus.Objective.add.rst
│ ├── theseus.Objective.error.rst
│ ├── theseus.Objective.retract_vars_sequence.rst
│ ├── theseus.Objective.rst
│ ├── theseus.Objective.update.rst
│ └── theseus.Variable.rst
│ ├── geometry.rst
│ ├── getting-started.rst
│ ├── img
│ ├── theseus-color-horizontal.png
│ ├── theseus-color-horizontal.svg
│ ├── theseus-color-icon.png
│ ├── theseus-color-icon.svg
│ ├── theseuslayer.gif
│ └── theseuslayer.png
│ ├── index.rst
│ ├── optimizer.rst
│ └── utils.rst
├── evaluations
├── README.md
├── autodiff_cost_function_ablation.sh
├── backward_modes_tactile.sh
├── pose_graph_cube.sh
├── pose_graph_synthetic.sh
├── time_local_cost_backward.py
└── vectorization_ablation.sh
├── examples
├── README.md
├── __init__.py
├── backward_modes.py
├── bundle_adjustment.py
├── configs
│ ├── bundle_adjustment.yaml
│ ├── homography_estimation.yaml
│ ├── motion_planning_2d.yaml
│ ├── pose_graph
│ │ ├── __init__.py
│ │ ├── pose_graph_benchmark.yaml
│ │ ├── pose_graph_cube.yaml
│ │ └── pose_graph_synthetic.yaml
│ └── tactile_pose_estimation.yaml
├── homography_estimation.py
├── inverse_kinematics.py
├── motion_planning_2d.py
├── pose_graph
│ ├── pose_graph_benchmark.py
│ ├── pose_graph_cube.py
│ └── pose_graph_synthetic.py
├── se2_inverse.py
├── se2_planning.py
├── simple_example.py
├── state_estimation_2d.py
├── tactile_pose_estimation.py
└── torchlie_api.py
├── papers.md
├── requirements
├── dev.txt
├── docs.txt
└── main.txt
├── setup.cfg
├── setup.py
├── tests
├── __init__.py
├── labs
│ └── __init__.py
├── theseus_tests
│ ├── __init__.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── common.py
│ │ ├── test_cost_function.py
│ │ ├── test_cost_weight.py
│ │ ├── test_manifold.py
│ │ ├── test_objective.py
│ │ ├── test_robust_cost.py
│ │ ├── test_theseus_function.py
│ │ ├── test_variable.py
│ │ └── test_vectorizer.py
│ ├── decorators.py
│ ├── embodied
│ │ ├── collision
│ │ │ ├── __init__.py
│ │ │ ├── sdf_data.csv
│ │ │ ├── test_collision_factor.py
│ │ │ ├── test_eff_obj_contact.py
│ │ │ ├── test_signed_distance_field.py
│ │ │ └── utils.py
│ │ ├── kinematics
│ │ │ ├── data
│ │ │ │ ├── panda_fk_dataset.json
│ │ │ │ └── panda_no_gripper.urdf
│ │ │ ├── test_inverse_kinematics.py
│ │ │ └── test_urdf_model.py
│ │ ├── measurements
│ │ │ ├── between_errors_se2.npy
│ │ │ ├── between_errors_so2.npy
│ │ │ ├── test_between.py
│ │ │ ├── test_moving_frame_between.py
│ │ │ └── test_reprojection.py
│ │ ├── misc
│ │ │ ├── sq_dist_errors_se2.npy
│ │ │ ├── sq_dist_errors_so2.npy
│ │ │ └── test_variable_difference.py
│ │ └── motionmodel
│ │ │ ├── test_double_integrator.py
│ │ │ ├── test_misc.py
│ │ │ └── test_quasi_static_pushing_planar.py
│ ├── extlib
│ │ ├── __init__.py
│ │ ├── test_baspacho.py
│ │ ├── test_baspacho_simple.py
│ │ ├── test_cusolver_lu_solver.py
│ │ └── test_mat_mult.py
│ ├── geometry
│ │ ├── __init__.py
│ │ ├── common.py
│ │ ├── point_types_mypy_check.py
│ │ ├── test_point_types.py
│ │ ├── test_se2.py
│ │ ├── test_se3.py
│ │ ├── test_so2.py
│ │ ├── test_so3.py
│ │ └── test_vector.py
│ ├── optimizer
│ │ ├── autograd
│ │ │ ├── __init__.py
│ │ │ ├── bad_sparse_matrix.pth
│ │ │ ├── common.py
│ │ │ ├── test_baspacho_sparse_backward.py
│ │ │ ├── test_lu_cuda_sparse_backward.py
│ │ │ └── test_sparse_backward.py
│ │ ├── linear
│ │ │ ├── test_baspacho_sparse_solver.py
│ │ │ ├── test_cholmod_sparse_solver.py
│ │ │ ├── test_dense_solver.py
│ │ │ └── test_lu_cuda_sparse_solver.py
│ │ ├── linearization_test_utils.py
│ │ ├── nonlinear
│ │ │ ├── __init__.py
│ │ │ ├── common.py
│ │ │ ├── test_backwards.py
│ │ │ ├── test_dogleg.py
│ │ │ ├── test_gauss_newton.py
│ │ │ ├── test_info.py
│ │ │ ├── test_levenberg_marquardt.py
│ │ │ └── test_trust_region.py
│ │ ├── test_dense_linearization.py
│ │ ├── test_manifold_gaussian.py
│ │ ├── test_sparse_linearization.py
│ │ └── test_variable_ordering.py
│ ├── test_dlm_perturbation.py
│ ├── test_misc.py
│ ├── test_pgo_benchmark.py
│ ├── test_theseus_layer.py
│ └── utils
│ │ └── test_utils.py
├── torchkin_tests
│ ├── panda_fk_dataset.json
│ ├── panda_no_gripper.urdf
│ └── test_forward_kinematics.py
└── torchlie_tests
│ ├── __init__.py
│ ├── functional
│ ├── __init__.py
│ ├── common.py
│ ├── test_se3.py
│ └── test_so3.py
│ ├── test_lie_tensor.py
│ └── test_misc.py
├── theseus
├── __init__.py
├── _version.py
├── constants.py
├── core
│ ├── __init__.py
│ ├── cost_function.py
│ ├── cost_weight.py
│ ├── objective.py
│ ├── robust_cost_function.py
│ ├── robust_loss.py
│ ├── theseus_function.py
│ ├── variable.py
│ └── vectorizer.py
├── embodied
│ ├── __init__.py
│ ├── collision
│ │ ├── __init__.py
│ │ ├── collision.py
│ │ ├── eff_obj_contact.py
│ │ └── signed_distance_field.py
│ ├── kinematics
│ │ ├── __init__.py
│ │ └── kinematics_model.py
│ ├── measurements
│ │ ├── __init__.py
│ │ ├── between.py
│ │ ├── moving_frame_between.py
│ │ └── reprojection.py
│ ├── misc
│ │ ├── __init__.py
│ │ └── local_cost_fn.py
│ └── motionmodel
│ │ ├── __init__.py
│ │ ├── double_integrator.py
│ │ ├── misc.py
│ │ └── quasi_static_pushing_planar.py
├── extlib
│ ├── __init__.py
│ ├── baspacho_solver.cpp
│ ├── baspacho_solver.h
│ ├── baspacho_solver_cuda.cu
│ ├── cusolver_lu_solver.cpp
│ ├── cusolver_sp_defs.cpp
│ ├── cusolver_sp_defs.h
│ ├── mat_mult.cu
│ └── utils.h
├── geometry
│ ├── __init__.py
│ ├── lie_group.py
│ ├── lie_group_check.py
│ ├── manifold.py
│ ├── point_types.py
│ ├── se2.py
│ ├── se3.py
│ ├── so2.py
│ ├── so3.py
│ ├── utils.py
│ └── vector.py
├── global_params.py
├── labs
│ └── __init__.py
├── optimizer
│ ├── __init__.py
│ ├── autograd
│ │ ├── __init__.py
│ │ ├── baspacho_sparse_autograd.py
│ │ ├── cholmod_sparse_autograd.py
│ │ ├── common.py
│ │ └── lu_cuda_sparse_autograd.py
│ ├── dense_linearization.py
│ ├── linear
│ │ ├── __init__.py
│ │ ├── baspacho_sparse_solver.py
│ │ ├── cholmod_sparse_solver.py
│ │ ├── dense_solver.py
│ │ ├── linear_optimizer.py
│ │ ├── linear_solver.py
│ │ ├── lu_cuda_sparse_solver.py
│ │ └── utils.py
│ ├── linear_system.py
│ ├── linearization.py
│ ├── manifold_gaussian.py
│ ├── nonlinear
│ │ ├── __init__.py
│ │ ├── dcem.py
│ │ ├── dogleg.py
│ │ ├── gauss_newton.py
│ │ ├── levenberg_marquardt.py
│ │ ├── nonlinear_least_squares.py
│ │ ├── nonlinear_optimizer.py
│ │ └── trust_region.py
│ ├── optimizer.py
│ ├── sparse_linearization.py
│ └── variable_ordering.py
├── py.typed
├── theseus_layer.py
├── third_party
│ ├── __init__.py
│ ├── easyaug.py
│ ├── lml.py
│ └── utils.py
└── utils
│ ├── __init__.py
│ ├── examples
│ ├── __init__.py
│ ├── bundle_adjustment
│ │ ├── __init__.py
│ │ ├── data.py
│ │ └── util.py
│ ├── motion_planning
│ │ ├── __init__.py
│ │ ├── misc.py
│ │ ├── models.py
│ │ └── motion_planner.py
│ ├── pose_graph
│ │ ├── __init__.py
│ │ └── dataset.py
│ └── tactile_pose_estimation
│ │ ├── __init__.py
│ │ ├── misc.py
│ │ ├── models.py
│ │ ├── pose_estimator.py
│ │ └── trainer.py
│ ├── sparse_matrix_utils.py
│ └── utils.py
├── torchkin
├── LICENSE
├── MANIFEST.in
├── README.md
├── requirements.txt
├── setup.py
└── torchkin
│ ├── __init__.py
│ ├── forward_kinematics.py
│ ├── joint.py
│ ├── py.typed
│ ├── robot.py
│ └── third_party
│ ├── __init__.py
│ └── urdf_parser_py
│ ├── LICENSE
│ ├── __init__.py
│ ├── display_urdf.py
│ ├── sdf.py
│ ├── urdf.py
│ └── xml_reflection
│ ├── __init__.py
│ ├── basics.py
│ └── core.py
├── torchlie
├── LICENSE
├── MANIFEST.in
├── README.md
├── setup.py
└── torchlie
│ ├── __init__.py
│ ├── functional
│ ├── __init__.py
│ ├── check_contexts.py
│ ├── constants.py
│ ├── lie_group.py
│ ├── se3_impl.py
│ ├── so3_impl.py
│ └── utils.py
│ ├── global_params.py
│ ├── lie_tensor.py
│ ├── py.typed
│ └── types.py
└── tutorials
├── 00_introduction.ipynb
├── 01_least_squares_optimization.ipynb
├── 02_differentiating_theseus_layer.ipynb
├── 03_custom_cost_functions.ipynb
├── 04_motion_planning.ipynb
├── 05_differentiable_motion_planning.ipynb
├── README.md
├── data
└── motion_planning_2d
│ ├── im_sdf
│ └── tarpit
│ │ ├── 0_im.png
│ │ ├── 0_sdf.npy
│ │ ├── 1_im.png
│ │ └── 1_sdf.npy
│ ├── meta.yaml
│ └── opt_trajs_gpmp2
│ └── tarpit
│ ├── env_0_prob_0.npz
│ ├── env_1_prob_0.npz
│ ├── image_env_0_prob_0.png
│ └── image_env_1_prob_0.png
└── fig
└── theseus_objective.png
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.ipynb linguist-documentation
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/BUG_REPORT.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F41B Bug Report"
3 | about: Submit a bug report to help us improve Theseus
4 | ---
5 |
6 | ## 🐛 Bug
7 |
8 |
9 |
10 | ## Steps to Reproduce
11 |
12 | Steps to reproduce the behavior:
13 |
14 |
15 |
16 |
17 |
18 | ## Expected behavior
19 |
20 |
21 |
22 | ## Additional context
23 |
24 |
25 |
26 | ## System Info
27 |
28 |
29 | - OS (e.g., Linux):
30 | - Python version:
31 | - GPU models and configuration:
32 | - CUDA version:
33 | - pip/conda dependencies packages versions:
34 | - Any other relevant information:
35 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F680 Feature Request"
3 | about: Submit a proposal or request for a new Theseus feature
4 | ---
5 |
6 | ## 🚀 Feature
7 |
8 |
9 | ## Motivation
10 |
11 |
12 |
13 |
14 |
15 | ## Pitch
16 |
17 |
18 |
19 | ## Alternatives
20 |
21 |
22 |
23 |
24 | ## Additional context
25 |
26 |
27 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/QUESTIONS_HELP_SUPPORT.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "❓ Questions/Help/Support"
3 | about: Do you need support?
4 | ---
5 |
6 | ## ❓ Questions and Help
7 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Motivation and Context
2 |
3 |
4 |
5 |
6 |
7 | ## How Has This Been Tested
8 |
9 |
10 |
11 | ## Types of changes
12 |
13 |
14 |
15 | - [ ] Docs change / refactoring / dependency upgrade
16 | - [ ] Bug fix (non-breaking change which fixes an issue)
17 | - [ ] New feature (non-breaking change which adds functionality)
18 | - [ ] Breaking change (fix or feature that would cause existing functionality to change)
19 |
20 | ## Checklist
21 |
22 |
23 |
24 | - [ ] My code follows the code style of this project.
25 | - [ ] My change requires a change to the documentation.
26 | - [ ] I have updated the documentation accordingly.
27 | - [ ] I have read the **CONTRIBUTING** document.
28 | - [ ] I have completed my CLA (see **CONTRIBUTING**)
29 | - [ ] I have added tests to cover my changes.
30 | - [ ] All new and existing tests passed.
31 |
--------------------------------------------------------------------------------
/.github/tmp/optim_tests.yml:
--------------------------------------------------------------------------------
1 | # name: test-optimizers
2 | # on:
3 | # pull_request:
4 | # branches:
5 | # - main
6 |
7 | # jobs:
8 | # test_optimizers:
9 | # runs-on: ubuntu-latest
10 | # strategy:
11 | # matrix:
12 | # python-version: [3.8, 3.9, 3.10.15]
13 | # steps:
14 | # - uses: actions/checkout@v4.1.3
15 | # - name: Set up Python ${{ matrix.python-version }}
16 | # uses: actions/setup-python@v5.2.0
17 | # with:
18 | # python-version: ${{ matrix.python-version }}
19 | # - uses: actions/checkout@v4.1.3
20 | # - name: Install suitesparse
21 | # run: |
22 | # sudo apt-get update && sudo apt-get install -y libsuitesparse-dev
23 | # - name: Create Conda env
24 | # run: |
25 | # wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
26 | # /bin/bash ~/miniconda.sh -b -p ~/conda
27 | # export PATH=~/conda/bin:$PATH
28 | # conda create --name theseus python=${{ matrix.python-version }}
29 | # source activate theseus
30 | # pip install --progress-bar off --upgrade pip
31 | # pip install --progress-bar off --upgrade setuptools
32 | # - name: Install Torch
33 | # run: |
34 | # export PATH=~/conda/bin:$PATH
35 | # source activate theseus
36 | # pip install torch
37 | # - name: Build Baspacho CPU
38 | # run: |
39 | # sudo apt-get install -y libopenblas-pthread-dev
40 | # git clone https://github.com/facebookresearch/baspacho.git
41 | # cd baspacho
42 | # echo $(pwd)
43 | # cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DBLA_STATIC=ON -DBASPACHO_USE_CUBLAS=0 -DBUILD_SHARED_LIBS=OFF -DBASPACHO_BUILD_TESTS=OFF -DBASPACHO_BUILD_EXAMPLES=OFF
44 | # cmake --build build -- -j16
45 | # - name: Install torchlie and torchkin
46 | # run: |
47 | # export PATH=~/conda/bin:$PATH
48 | # source activate theseus
49 | # cd torchlie
50 | # pip install -e .
51 | # cd ../torchkin
52 | # pip install -e .
53 | # - name: Install theseus
54 | # run: |
55 | # export PATH=~/conda/bin:$PATH
56 | # source activate theseus
57 | # echo $(pwd)
58 | # BASPACHO_ROOT_DIR=./baspacho pip install -e ".[dev]"
59 | # - name: Run Optimizer tests
60 | # run: |
61 | # export PATH=~/conda/bin:$PATH
62 | # source activate theseus
63 | # # the full baspacho test for CPU fails, we only run an abriged version in CI
64 | # python -m pytest tests/theseus_tests/optimizer -m "not cudaext" -k "not test_baspacho_solver_cpu_full"
65 | # python -m pytest tests/theseus_tests/test_theseus_layer.py -m "not cudaext"
--------------------------------------------------------------------------------
/.github/tmp/test_wheel.yml:
--------------------------------------------------------------------------------
1 | # name: test-wheel
2 | # on:
3 | # pull_request:
4 | # branches:
5 | # - main
6 |
7 | # jobs:
8 | # test_wheel:
9 | # runs-on: ubuntu-latest
10 | # strategy:
11 | # matrix:
12 | # python-version: [3.10.15]
13 | # steps:
14 | # - uses: actions/checkout@v4.1.3
15 | # - name: Set up Python ${{ matrix.python-version }}
16 | # uses: actions/setup-python@v5.2.0
17 | # with:
18 | # python-version: ${{ matrix.python-version }}
19 | # - uses: actions/checkout@v4.1.3
20 | # - name: Install suitesparse
21 | # run: |
22 | # sudo apt-get update && sudo apt-get install -y libsuitesparse-dev
23 | # - name: Create Conda env
24 | # run: |
25 | # wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
26 | # /bin/bash ~/miniconda.sh -b -p ~/conda
27 | # export PATH=~/conda/bin:$PATH
28 | # conda create --name theseus python=${{ matrix.python-version }}
29 | # source activate theseus
30 | # pip install --progress-bar off --upgrade pip
31 | # pip install --progress-bar off --upgrade setuptools
32 | # - name: Install CUDA 11
33 | # run: |
34 | # # download and install nvidia drivers, cuda, etc
35 | # wget --quiet --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda_11.7.1_515.65.01_linux.run
36 | # time sudo /bin/bash ~/nvidia-downloads/cuda_11.7.1_515.65.01_linux.run --no-drm --silent --driver --toolkit
37 | # sudo ldconfig /usr/local/cuda/lib64
38 | # echo "Done installing NVIDIA drivers and CUDA libraries."
39 | # nvidia-smi
40 | # - name: Install Torch CUDA 11
41 | # run: |
42 | # export PATH=~/conda/bin:$PATH
43 | # source activate theseus
44 | # which python && which pip
45 | # pip install --progress-bar off torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118
46 | # python -c 'import torch; print("Torch version:", torch.__version__); assert torch.cuda.is_available()'
47 | # - name: Build CUDA wheel
48 | # run: |
49 | # echo $(pwd)
50 | # echo $(ls)
51 | # export PATH=~/conda/bin:$PATH
52 | # source activate theseus
53 | # which python && which pip
54 | # THESEUS_GIT_COMMIT=$(git --git-dir project/.git log --format="%H" -n 1)
55 | # THESEUS_VERSION=$(grep -Eo "[0-9].[0-9].[0-9][.0-9a-z]*" project/theseus/_version.py | tail -n 1)
56 | # ./build_scripts/build_wheel.sh . ${THESEUS_GIT_COMMIT} 11.8 ${THESEUS_VERSION}
57 | # pip install $(ls */*.whl)
58 | # pip install -r ./requirements/dev.txt
59 | # - name: Install theseus from wheel and run tests
60 | # run: |
61 | # mv theseus theseus_tmp
62 | # export PATH=~/conda/bin:$PATH
63 | # source activate theseus
64 | # which python && which pip
65 | # python -m pytest tests/theseus_tests/test_theseus_layer.py
66 | # pytest -s tests/theseus_tests/ -m "cudaext"
67 |
--------------------------------------------------------------------------------
/.github/workflows/lie_tests.yml:
--------------------------------------------------------------------------------
1 | name: test-lie-groups
2 | on:
3 | pull_request:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | test_lie_groups:
9 | runs-on: ubuntu-latest
10 | strategy:
11 | matrix:
12 | python-version: [3.8, 3.9, 3.10.15]
13 | steps:
14 | - uses: actions/checkout@v4.1.3
15 | - name: Set up Python ${{ matrix.python-version }}
16 | uses: actions/setup-python@v5.2.0
17 | with:
18 | python-version: ${{ matrix.python-version }}
19 | - uses: actions/checkout@v4.1.3
20 | - name: Install suitesparse
21 | run: |
22 | sudo apt-get update && sudo apt-get install -y libsuitesparse-dev
23 | - name: Create Conda env
24 | run: |
25 | wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
26 | /bin/bash ~/miniconda.sh -b -p ~/conda
27 | export PATH=~/conda/bin:$PATH
28 | conda create --name theseus python=${{ matrix.python-version }}
29 | source activate theseus
30 | pip install --progress-bar off --upgrade pip
31 | pip install --progress-bar off --upgrade setuptools
32 | - name: Install Torch
33 | run: |
34 | export PATH=~/conda/bin:$PATH
35 | source activate theseus
36 | pip install torch
37 | - name: Install torchlie and torchkin
38 | run: |
39 | export PATH=~/conda/bin:$PATH
40 | source activate theseus
41 | cd torchlie
42 | pip install -e .
43 | cd ../torchkin
44 | pip install -e .
45 | - name: Install theseus without Baspacho
46 | run: |
47 | export PATH=~/conda/bin:$PATH
48 | source activate theseus
49 | pip install -e ".[dev]"
50 | - name: Run Lie groups tests
51 | run: |
52 | export PATH=~/conda/bin:$PATH
53 | source activate theseus
54 | python -m pytest tests/theseus_tests/geometry -m "not cudaext"
55 | python -m pytest tests/torchlie_tests -m "not cudaext"
56 |
--------------------------------------------------------------------------------
/.github/workflows/other_tests.yml:
--------------------------------------------------------------------------------
1 | name: test-others
2 | on:
3 | pull_request:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | test_others:
9 | runs-on: ubuntu-latest
10 | strategy:
11 | matrix:
12 | python-version: [3.8, 3.9, 3.10.15]
13 | steps:
14 | - uses: actions/checkout@v4.1.3
15 | - name: Set up Python ${{ matrix.python-version }}
16 | uses: actions/setup-python@v5.2.0
17 | with:
18 | python-version: ${{ matrix.python-version }}
19 | - uses: actions/checkout@v4.1.3
20 | - name: Install suitesparse
21 | run: |
22 | sudo apt-get update && sudo apt-get install -y libsuitesparse-dev
23 | - name: Create Conda env
24 | run: |
25 | wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
26 | /bin/bash ~/miniconda.sh -b -p ~/conda
27 | export PATH=~/conda/bin:$PATH
28 | conda create --name theseus python=${{ matrix.python-version }}
29 | source activate theseus
30 | pip install --progress-bar off --upgrade pip
31 | pip install --progress-bar off --upgrade setuptools
32 | - name: Install Torch
33 | run: |
34 | export PATH=~/conda/bin:$PATH
35 | source activate theseus
36 | pip install torch
37 | - name: Install torchlie and torchkin
38 | run: |
39 | export PATH=~/conda/bin:$PATH
40 | source activate theseus
41 | cd torchlie
42 | pip install -e .
43 | cd ../torchkin
44 | pip install -e .
45 | - name: Install theseus without Baspacho
46 | run: |
47 | export PATH=~/conda/bin:$PATH
48 | source activate theseus
49 | pip install -e ".[dev]"
50 | - name: Run other tests
51 | run: |
52 | export PATH=~/conda/bin:$PATH
53 | source activate theseus
54 | python -m pytest tests -m "not cudaext" -s --ignore=tests/theseus_tests/geometry/ --ignore=tests/optimizer --ignore-glob=tests/theseus_tests/test_theseus_layer.py --ignore=tests/lie_tests
--------------------------------------------------------------------------------
/.github/workflows/precommit.yml:
--------------------------------------------------------------------------------
1 | name: pre-commit checks
2 | on:
3 | pull_request:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | precommit:
9 | runs-on: ubuntu-latest
10 | strategy:
11 | matrix:
12 | python-version: [3.8, 3.9, 3.10.15]
13 | steps:
14 | - uses: actions/checkout@v4.1.3
15 | - name: Set up Python ${{ matrix.python-version }}
16 | uses: actions/setup-python@v5.2.0
17 | with:
18 | python-version: ${{ matrix.python-version }}
19 | - name: Running precommit checks
20 | run: |
21 | pip install pre-commit
22 | pre-commit install-hooks
23 | pre-commit run --all-files --show-diff-on-failure
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .DS_Store
3 | .vscode
4 | *.swp
5 | *.code-workspace
6 | debug/
7 | build/
8 | dist/
9 | datasets/
10 | .mypy_cache/
11 | .pytest_cache/
12 | __pycache__/
13 | *.egg-info/
14 | expts/
15 | *.ipynb_checkpoints
16 | examples/*.ipynb_checkpoints
17 | outputs/
18 | examples/outputs/
19 | theseus/extlib/*.so
20 | datasets/
21 | data/
22 | temp/
23 | scripts/
24 | docs_build/
25 | viz/
26 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | exclude: "setup.py|torchlie/setup.py|torchkin/setup.py|third_party"
2 |
3 | repos:
4 | - repo: https://github.com/psf/black
5 | rev: 23.9.1
6 | hooks:
7 | - id: black
8 | files: "^theseus|^examples|^tests|^torchlie|^torchkin"
9 |
10 | - repo: https://github.com/pycqa/flake8
11 | rev: 6.1.0
12 | hooks:
13 | - id: flake8
14 | files: "^theseus|^torchlie|^torchkin"
15 |
16 | - repo: https://github.com/pre-commit/mirrors-mypy
17 | rev: v1.5.1
18 | hooks:
19 | - id: mypy
20 | additional_dependencies: [torch, tokenize-rt==3.2.0, types-PyYAML, types-mock]
21 | args: [--no-strict-optional, --ignore-missing-imports, --implicit-reexport, --explicit-package-bases]
22 | files: "^theseus|^torchlie|^torchkin"
23 |
24 | - repo: https://github.com/pycqa/isort
25 | rev: 5.12.0
26 | hooks:
27 | - id: isort
28 | files: "^theseus|^examples|^torchlie|^torchkin"
29 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | build:
9 | os: ubuntu-20.04
10 | tools:
11 | python: "3.9"
12 |
13 | apt_packages:
14 | - libsuitesparse-dev
15 |
16 | sphinx:
17 | configuration: docs/source/conf.py
18 |
19 | formats:
20 | - pdf
21 |
22 | python:
23 | install:
24 | - requirements: requirements/docs.txt
25 | - method: pip
26 | path: .
27 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Pineda"
5 | given-names: "Luis"
6 | - family-names: "Fan"
7 | given-names: "Taosha"
8 | - family-names: "Monge"
9 | given-names: "Maurizio"
10 | - family-names: "Venkataraman"
11 | given-names: "Shobha"
12 | - family-names: "Sodhi"
13 | given-names: "Paloma"
14 | - family-names: "Chen"
15 | given-names: "Ricky T. Q."
16 | - family-names: "Ortiz"
17 | given-names: "Joseph"
18 | - family-names: "DeTone"
19 | given-names: "Daniel"
20 | - family-names: "Wang"
21 | given-names: "Austin"
22 | - family-names: "Anderson"
23 | given-names: "Stuart"
24 | - family-names: "Dong"
25 | given-names: "Jing"
26 | - family-names: "Amos"
27 | given-names: "Brandon"
28 | - family-names: "Mukadam"
29 | given-names: "Mustafa"
30 | title: "Theseus: A Library for Differentiable Nonlinear Optimization"
31 | url: "https://github.com/facebookresearch/theseus"
32 | preferred-citation:
33 | type: article
34 | journal: Advances in Neural Information Processing Systems
35 | title: "Theseus: A Library for Differentiable Nonlinear Optimization"
36 | url: "https://arxiv.org/abs/2207.09442"
37 | year: 2022
38 | authors:
39 | - family-names: "Pineda"
40 | given-names: "Luis"
41 | - family-names: "Fan"
42 | given-names: "Taosha"
43 | - family-names: "Monge"
44 | given-names: "Maurizio"
45 | - family-names: "Venkataraman"
46 | given-names: "Shobha"
47 | - family-names: "Sodhi"
48 | given-names: "Paloma"
49 | - family-names: "Chen"
50 | given-names: "Ricky T. Q."
51 | - family-names: "Ortiz"
52 | given-names: "Joseph"
53 | - family-names: "DeTone"
54 | given-names: "Daniel"
55 | - family-names: "Wang"
56 | given-names: "Austin"
57 | - family-names: "Anderson"
58 | given-names: "Stuart"
59 | - family-names: "Dong"
60 | given-names: "Jing"
61 | - family-names: "Amos"
62 | given-names: "Brandon"
63 | - family-names: "Mukadam"
64 | given-names: "Mustafa"
65 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Meta Platforms, Inc. and affiliates.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE README.md
2 | include requirements/*.txt
3 | include py.typed
4 | graft theseus/extlib
5 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | # Configuration file for the Sphinx documentation builder.
7 | #
8 | # This file only contains a selection of the most common options. For a full
9 | # list see the documentation:
10 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
11 |
12 | # -- Path setup --------------------------------------------------------------
13 |
14 | # If extensions (or modules to document with autodoc) are in another directory,
15 | # add these directories to sys.path here. If the directory is relative to the
16 | # documentation root, use os.path.abspath to make it absolute, like shown here.
17 |
18 | import os
19 | import sys
20 |
21 | sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
22 |
23 | # -- Project information -----------------------------------------------------
24 |
25 | project = "Theseus"
26 | copyright = "2022, Meta Platforms, Inc. and affiliates"
27 | author = "Meta AI, FAIR team"
28 |
29 |
30 | # -- General configuration ---------------------------------------------------
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | extensions = [
36 | "sphinx.ext.autodoc",
37 | "sphinx.ext.autosummary",
38 | "sphinx.ext.coverage",
39 | "sphinx.ext.napoleon",
40 | # "nbsphinx",
41 | ]
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ["_templates"]
45 |
46 | # List of patterns, relative to source directory, that match files and
47 | # directories to ignore when looking for source files.
48 | # This pattern also affects html_static_path and html_extra_path.
49 | exclude_patterns = [
50 | "build",
51 | "docs_build",
52 | "Thumbs.db",
53 | ".DS_Store",
54 | "**.ipynb_checkpoints",
55 | "**test*.py",
56 | "experiments",
57 | "examples",
58 | "data",
59 | "tutorials",
60 | ]
61 |
62 |
63 | # -- Options for HTML output -------------------------------------------------
64 |
65 | # The theme to use for HTML and HTML Help pages. See the documentation for
66 | # a list of builtin themes.
67 | #
68 | html_theme = "sphinx_rtd_theme"
69 |
70 | # Add any paths that contain custom static files (such as style sheets) here,
71 | # relative to this directory. They are copied after the builtin static files,
72 | # so a file named "default.css" will overwrite the builtin "default.css".
73 | # html_static_path = ["_static"]
74 |
75 |
76 | def skip_undocumented(app, what, name, obj, skip, options):
77 | exclusions = ["__weakref__", "__module__"]
78 | if not skip and obj.__doc__ is None and name not in exclusions:
79 | return True
80 | return None
81 |
82 |
83 | def setup(app):
84 | app.connect("autodoc-skip-member", skip_undocumented)
85 |
--------------------------------------------------------------------------------
/docs/source/core.rst:
--------------------------------------------------------------------------------
1 | Core Module
2 | ===========
3 |
4 | Objective
5 | ---------
6 | An objective function to optimize (see :class:`theseus.Objective`).
7 |
8 | Cost Function
9 | -------------
10 | A term in the objective function as a function of one or more :class:`Variable` objects.
11 |
12 | Variable
13 | --------
14 | A variable in the optimization problem. :class:`Variable` objects are named wrappers for
15 | ``torch`` tensors.
16 |
17 | Cost Weight
18 | -----------
19 | A weight for cost functions.
20 |
21 | Reference
22 | ---------
23 | .. autosummary::
24 | :toctree: generated
25 | :nosignatures:
26 |
27 | theseus.Objective
28 | :no-undoc-members:
29 | :nosignatures:
30 | theseus.Objective.add
31 | theseus.Objective.error
32 | theseus.Objective.error_metric
33 | theseus.Objective.update
34 | theseus.Objective.retract_vars_sequence
35 | theseus.CostFunction
36 | :no-undoc-members:
37 | :nosignatures:
38 | theseus.Variable
--------------------------------------------------------------------------------
/docs/source/embodied.rst:
--------------------------------------------------------------------------------
1 | Embodied Module
2 | ===============
--------------------------------------------------------------------------------
/docs/source/generated/theseus.CostFunction.rst:
--------------------------------------------------------------------------------
1 | theseus.CostFunction
2 | ====================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. autoclass:: CostFunction
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Objective.add.rst:
--------------------------------------------------------------------------------
1 | theseus.Objective.add
2 | =====================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. automethod:: Objective.add
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Objective.error.rst:
--------------------------------------------------------------------------------
1 | theseus.Objective.error
2 | =======================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. automethod:: Objective.error
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Objective.retract_vars_sequence.rst:
--------------------------------------------------------------------------------
1 | theseus.Objective.retract\_vars\_sequence
2 | =========================================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. automethod:: Objective.retract_vars_sequence
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Objective.rst:
--------------------------------------------------------------------------------
1 | theseus.Objective
2 | =================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. autoclass:: Objective
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 | .. rubric:: Methods
13 |
14 | .. autosummary::
15 |
16 | ~Objective.add
17 | ~Objective.copy
18 | ~Objective.dim
19 | ~Objective.erase
20 | ~Objective.error
21 | ~Objective.error_metric
22 | ~Objective.get_aux_var
23 | ~Objective.get_cost_function
24 | ~Objective.get_functions_connected_to_aux_var
25 | ~Objective.get_functions_connected_to_optim_var
26 | ~Objective.get_optim_var
27 | ~Objective.has_aux_var
28 | ~Objective.has_cost_function
29 | ~Objective.has_optim_var
30 | ~Objective.retract_vars_sequence
31 | ~Objective.size
32 | ~Objective.size_aux_vars
33 | ~Objective.size_cost_functions
34 | ~Objective.size_variables
35 | ~Objective.update
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Objective.update.rst:
--------------------------------------------------------------------------------
1 | theseus.Objective.update
2 | ========================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. automethod:: Objective.update
--------------------------------------------------------------------------------
/docs/source/generated/theseus.Variable.rst:
--------------------------------------------------------------------------------
1 | theseus.Variable
2 | ================
3 |
4 | .. currentmodule:: theseus
5 |
6 | .. autoclass:: Variable
7 |
8 |
9 | .. automethod:: __init__
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/docs/source/geometry.rst:
--------------------------------------------------------------------------------
1 | Geometry Module
2 | ===============
--------------------------------------------------------------------------------
/docs/source/getting-started.rst:
--------------------------------------------------------------------------------
1 | Getting started
2 | ===============
3 |
4 | Installation
5 | ------------
6 |
7 | Prerequisites
8 | ^^^^^^^^^^^^^
9 | - We *strongly* recommend you install ``theseus`` in a venv or conda environment with Python 3.8-3.10.
10 | - Theseus requires ``torch`` installation. To install for your particular CPU/CUDA configuration, follow the instructions in the PyTorch `website `_.
11 | - For GPU support, Theseus requires `nvcc `_ to compile custom CUDA operations. Make sure it matches the version used to compile pytorch with ``nvcc --version.`` If not, install it and ensure its location is on your system's ``$PATH`` variable.
12 | - Theseus also requires `suitesparse `_, which you can install via:
13 | - ``sudo apt-get install libsuitesparse-dev`` (Ubuntu).
14 | - ``conda install -c conda-forge suitesparse`` (Mac).
15 |
16 | Installing
17 | ^^^^^^^^^^
18 |
19 | pypi
20 | """"
21 |
22 | .. code-block:: bash
23 |
24 | pip install theseus-ai
25 |
26 | We currently provide wheels with our CUDA extensions compiled using CUDA 11.6 and Python 3.10.
27 | For other CUDA versions, consider installing from source or using our
28 | `build script `_.
29 |
30 | Note that ``pypi`` installation doesn't include our experimental `Theseus Labs `_.
31 | For this, please install from source.
32 |
33 | From source
34 | """""""""""
35 | The simplest way to install Theseus from source is by running the following (see further below to also include BaSpaCho)
36 |
37 | .. code-block:: bash
38 |
39 | git clone https://github.com/facebookresearch/theseus.git
40 | pip install -e .
41 | python -m pytest tests
42 |
43 | If you are interested in contributing to ``theseus``, instead install using
44 |
45 | .. code-block:: bash
46 |
47 | pip install -e ".[dev]"
48 |
49 | and follow the more detailed instructions in `CONTRIBUTING `_.
50 |
51 | **Installing BaSpaCho extensions from source**
52 | By default, installing from source doesn't include our BaSpaCho sparse
53 | solver extension. For this, follow these steps:
54 |
55 | 1. Compile BaSpaCho from source following instructions `here `_. We recommend using flags `-DBLA_STATIC=ON -DBUILD_SHARED_LIBS=OFF`.
56 | 2. Run
57 |
58 | .. code-block:: bash
59 |
60 | git clone https://github.com/facebookresearch/theseus.git && cd theseus
61 | BASPACHO_ROOT_DIR= pip install -e .
62 |
63 | where the BaSpaCho root dir must have binaries in the subdirectory `build`.
64 |
65 | Unit tests
66 | """"""""""
67 | With ``dev`` installation, you can run unit tests via
68 |
69 | .. code-block:: bash
70 |
71 | python -m pytest tests
72 |
73 | By default, unit tests include tests for our CUDA extensions. You can add the option `-m "not cudaext"`
74 | to skip them when installing without CUDA support. Additionally, the tests for sparse solver BaSpaCho are automatically
75 | skipped when its extlib is not compiled.
76 |
77 | Tutorials
78 | ---------
79 | See `tutorials `_ and `examples `_ to learn about the API and usage.
80 |
--------------------------------------------------------------------------------
/docs/source/img/theseus-color-horizontal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/docs/source/img/theseus-color-horizontal.png
--------------------------------------------------------------------------------
/docs/source/img/theseus-color-horizontal.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/img/theseus-color-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/docs/source/img/theseus-color-icon.png
--------------------------------------------------------------------------------
/docs/source/img/theseus-color-icon.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/img/theseuslayer.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/docs/source/img/theseuslayer.gif
--------------------------------------------------------------------------------
/docs/source/img/theseuslayer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/docs/source/img/theseuslayer.png
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | Documentation for Theseus
2 | ========================================
3 | Theseus is a library for differentiable nonlinear optimization built on PyTorch to
4 | support constructing various problems in robotics and vision as end-to-end
5 | differentiable architectures.
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 | :caption: Getting Started
10 |
11 | getting-started.rst
12 |
13 | .. toctree::
14 | :maxdepth: 1
15 | :caption: API Documentation
16 |
17 | core.rst
18 | embodied.rst
19 | geometry.rst
20 | optimizer.rst
21 | utils.rst
--------------------------------------------------------------------------------
/docs/source/optimizer.rst:
--------------------------------------------------------------------------------
1 | Optimizer Module
2 | ================
--------------------------------------------------------------------------------
/docs/source/utils.rst:
--------------------------------------------------------------------------------
1 | Utilities Module
2 | ================
--------------------------------------------------------------------------------
/evaluations/README.md:
--------------------------------------------------------------------------------
1 | This folder contains scripts for evaluating `theseus`'s components.
2 | These scripts should be run from the root folder of the repository. The following
3 | scripts are available, with reference to corresponding figure in our white paper:
4 |
5 | - `vectorization_ablation.sh`: Runs pose graph optimization with synthetic data with or without cost function vectorization (Fig. 1).
6 | - `pose_graph_synthetic.sh`: Same as above, but can change linear solver and problem size (Fig. 2).
7 | - `pose_graph_cube.sh`: Same as above, but using the cube data for Ceres comparison (Fig. 3).
8 | - `backward_modes_tactile.sh`: Runs tactile state estimation with different backward modes (Fig. 4).
9 | - `autodiff_cost_function_ablation.sh`: Runs homography estimation with different autograd modes.
10 |
11 | Some other relevant files to look at:
12 |
13 | * Pose Graph Optimization:
14 | - `examples/pose_graph/pose_graph_{cube/synthetic}.py`: Puts together optimization layer and implements outer loop.
15 |
16 | * Tactile State Estimation:
17 | - `theseus/utils/examples/tactile_pose_estimation/trainer.py`: Main outer learning loop.
18 | - `theseus/utils/examples/tactile_pose_estimation/pose_estimator.py`: Puts together the optimization layer.
19 |
20 | * Bundle Adjustment:
21 | - `examples/bundle_adjustment.py`: Puts together optimization layer and implements outer loop.
22 |
23 | * Motion Planning:
24 | - `theseus/utils/examples/motion_planning/motion_planner.py`: Puts together optimization layer.
25 | - `examples/motion_planning_2d.py`: Implements outer loop.
26 |
27 | * Homography Estimation:
28 | - `examples/homography_estimation.py`: Puts together optimization layer and implements outer loop.
29 |
--------------------------------------------------------------------------------
/evaluations/autodiff_cost_function_ablation.sh:
--------------------------------------------------------------------------------
1 | python examples/homography_estimation.py autograd_mode=dense outer_optim.batch_size=64 outer_optim.num_epochs=1 inner_optim.max_iters=10
2 | python examples/homography_estimation.py autograd_mode=loop_batch outer_optim.batch_size=64 outer_optim.num_epochs=1 inner_optim.max_iters=10
3 | python examples/homography_estimation.py autograd_mode=vmap outer_optim.batch_size=64 outer_optim.num_epochs=1 inner_optim.max_iters=10
4 |
--------------------------------------------------------------------------------
/evaluations/backward_modes_tactile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script can be used to generate the results in Figure 4
4 | # Options that can be changed for values used in the paper:
5 | # inner_optim.backward_mode: {unroll, implicit, truncated, dlm}
6 | # inner_optim.max_iters: {2, 5, 10, 20, 30, 40, 50}
7 | #
8 | # When using DLM, set inner_optim.dlm_epsilon=0.01
9 | # When using TRUNCATED, inner_optim.backward_num_iterations can be set to {5, 10}
10 | python examples/tactile_pose_estimation.py \
11 | train.num_epochs=100 \
12 | inner_optim.reg_w=0 \
13 | inner_optim.force_max_iters=true \
14 | inner_optim.force_implicit_by_epoch=10000 \
15 | train.lr=1e-4 \
16 | train.batch_size=8 \
17 | inner_optim.val_iters=50 \
18 | inner_optim.keep_step_size=true \
19 | inner_optim.step_size=0.05 \
20 | train.optimizer=adam \
21 | train.lr_decay=0.98 \
22 | inner_optim.max_iters=2 \
23 | inner_optim.dlm_epsilon=None \
24 | inner_optim.backward_num_iterations=None \
25 | inner_optim.backward_mode=implicit
26 |
--------------------------------------------------------------------------------
/evaluations/pose_graph_cube.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script can be used to generate the results in Figure 3
4 | # Options that can be changed for values used in the paper:
5 | # solver_device: {cuda, cpu}
6 | # batch_size: {1, 2, 4, 8, 16, 32, 64, 128, 256}
7 | # num_poses: {128, 256, 512, 1024, 2048, 4096}
8 | python examples/pose_graph/pose_graph_cube.py \
9 | solver_device=cpu \
10 | device=cpu \
11 | dataset_size=256 \
12 | num_poses=256
13 |
--------------------------------------------------------------------------------
/evaluations/pose_graph_synthetic.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script can be used to generate the results in Figure 2
4 | # Options that can be changed for values used in the paper:
5 | # inner_optim.solver: {dense, sparse}
6 | # solver_device: {cuda, cpu}
7 | # batch_size: {8, 16, 32, 64, 128, 256}
8 | # num_poses: {128, 256, 512, 1024, 2048, 4096}
9 | #
10 | # When using inner_optim.solver=sparse, this script supports the following options:
11 | # - solver_device=cuda, solver_type can be lucuda|baspacho
12 | # - solver_device=cpu, then solver is always CHOLMOD (solver_type is ignored)
13 | python examples/pose_graph/pose_graph_synthetic.py \
14 | inner_optim.solver=sparse \
15 | loop_closure_ratio=0.2 \
16 | solver_device=cpu \
17 | dataset_size=256 \
18 | batch_size=128 \
19 | num_poses=256 \
20 | solver_type=lucuda
21 |
--------------------------------------------------------------------------------
/evaluations/vectorization_ablation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This can be used to get the results in Figure 1
4 | # You can vary:
5 | # num_poses to be one of {32,64,128,256,512,1024,2048}
6 | # batch_size to be one of {16,32,64,128,256}
7 | # inner_optim.vectorize in {true,false}, toggles whether vectorization
8 | # will be used or not.
9 | python examples/pose_graph/pose_graph_synthetic.py \
10 | solver_device=cpu \
11 | outer_optim.num_epochs=200 \
12 | outer_optim.max_num_batches=1 \
13 | inner_optim.max_iters=1 \
14 | batch_size=32 \
15 | num_poses=128 \
16 | inner_optim.vectorize=true
17 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | This folder contains examples of how to use Theseus for several applications:
2 |
3 | - **simple_example.py:** This is a minimal example using Theseus that is fitting a curve to a dataset of observations.
4 | - **state_estimation_2d.py:** Is an example of how to do 2D pose estimation from simulated
5 | noisy GPS and odometry sensors. In this example the noise is state dependent, and we
6 | learn the cost weight as a function of pose.
7 | - **motion_planning_2d.py:** Is an example of how to set up a differentiable motion planning
8 | problem, inspired by [Bhardwaj et al. 2020](https://arxiv.org/pdf/1907.09591.pdf).
9 | - **se2_planning.py:** Brief example of how to use SE2 variables with the motion planner
10 | utility, to handle poses with angles in 2D maps and nonholonomic constraints.
11 | - **tactile_pose_estimation.py:** Is an example of how to set up learning models for
12 | tactile pose estimation, as described in [Sodhi et al. 2021](https://arxiv.org/abs/1705.10664).
13 | - **backward_modes.py:** Shows how to compute derivatives through Theseus solves and switch between backward modes.
14 | - **se2_inverse.py:** Is an example of numerically computing the inverse of SE2.
15 | - **pose_graph/pose_graph_synthetic:** Is an example of how to setup a differentiable pose graph optimization
16 | problem with loop closure outliers.
17 | - **pose_graph/pose_graph_cube:** Is an example of profiling Theseus' optimizers on the cube datasets.
18 | - **pose_graph/pose_graph_benchmark:** Is an example of using Theseus to solve pose graph optimization problems.
19 | - **homography_estimation.py:** Is an example of using Theseus to learn CNN image features for homography estimation.
20 | The features are trained to minimize the four corner distance between the warped and target image.
21 |
22 | These can be run from your root `theseus` directory by doing
23 |
24 | ```bash
25 | python examples/simple_example.py
26 | python examples/state_estimation_2d.py
27 | python examples/motion_planning_2d.py
28 | python examples/tactile_pose_estimation.py
29 | python examples/backward_modes.py
30 | python examples/se2_inverse.py
31 | python examples/pose_graph/pose_graph_synthetic.py
32 | python examples/pose_graph/pose_graph_cube.py
33 | python examples/pose_graph/pose_graph_benchmark.py
34 | python examples/homography_estimation.py
35 | ```
36 |
37 | The motion planning and tactile estimation examples require `hydra` installation, which you can obtain by running.
38 |
39 | ```bash
40 | pip install hydra-core
41 | ```
42 |
43 | The homography example requires `hydra`, `kornia` and `OpenCV`, which you can install with
44 |
45 | ```bash
46 | pip install hydra-core
47 | pip install kornia
48 | pip install opencv-python
49 | ```
50 |
51 | Any outputs generated by these scripts will be saved under `examples/outputs`. You can change this directory by passing the CLI option `hydra.run.dir=`.
52 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/examples/configs/bundle_adjustment.yaml:
--------------------------------------------------------------------------------
1 | seed: 1
2 |
3 | num_cameras: 10
4 | num_points: 200
5 | average_track_length: 8
6 | track_locality: 0.2
7 |
8 | inner_optim:
9 | optimizer_cls: GaussNewton
10 | max_iters: 10
11 | step_size: 0.1
12 | backward_mode: implicit
13 | verbose: true
14 | track_err_history: true
15 | keep_step_size: true
16 | regularize: true
17 | ratio_known_cameras: 0.1
18 | reg_w: 1e-4
19 |
20 | outer_optim:
21 | lr: 0.1
22 | num_epochs: 20
23 |
24 | hydra:
25 | run:
26 | dir: examples/outputs
27 |
--------------------------------------------------------------------------------
/examples/configs/homography_estimation.yaml:
--------------------------------------------------------------------------------
1 | autograd_mode: vmap
2 | benchmarking_costs: true
3 |
4 | outer_optim:
5 | num_epochs: 999
6 | batch_size: 128
7 | lr: 1e-4
8 |
9 | inner_optim:
10 | max_iters: 50
11 | step_size: 0.1
12 |
13 | hydra:
14 | run:
15 | dir: examples/outputs/homography_estimation
16 |
--------------------------------------------------------------------------------
/examples/configs/motion_planning_2d.yaml:
--------------------------------------------------------------------------------
1 | seed: 0
2 | device: cuda:0
3 | verbose: true
4 | plot_trajectories: true
5 | map_type: tarpit
6 | batch_size: 2
7 | img_size: 128
8 | num_time_steps: 100
9 | train_data_size: 2
10 | num_images: 1000
11 |
12 | do_learning: true
13 | num_epochs: 20
14 | shuffle_each_epoch: false
15 | model_type: initial_trajectory_model
16 | model_lr: 0.01
17 | model_wd: 0
18 | amsgrad: false
19 | use_mean_objective_as_loss: true
20 | include_imitation_loss: true
21 | clip_grad_norm: 0
22 | obj_loss_weight: 0.0005
23 | gp_loss_weight: 0
24 | collision_loss_weight: 0
25 |
26 | total_time: 10
27 | robot_radius: 0.4
28 | gp_params:
29 | Qc_inv: [[1.0, 0.0], [0.0, 1.0]]
30 | obs_params:
31 | weight: 100.0
32 | safety_dist: 0.4
33 | optim_params:
34 | init:
35 | - "LevenbergMarquardt"
36 | - max_iterations: 2
37 | step_size: 0.3
38 | kwargs:
39 | damping: 0.1
40 |
41 | hydra:
42 | run:
43 | dir: examples/outputs
44 |
--------------------------------------------------------------------------------
/examples/configs/pose_graph/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/examples/configs/pose_graph/pose_graph_benchmark.yaml:
--------------------------------------------------------------------------------
1 | dataset: sphere2500
2 | dtype: float64
3 | hydra:
4 | run:
5 | dir: examples/outputs/pgo/benchmark
6 | sweep:
7 | dir: examples/outputs/pgo/benchmark
8 |
--------------------------------------------------------------------------------
/examples/configs/pose_graph/pose_graph_cube.yaml:
--------------------------------------------------------------------------------
1 | seed: 1
2 | solver_device: "cpu"
3 | device: "cuda:0"
4 |
5 | num_poses: 256
6 | dataset_size: 256
7 |
8 | inner_optim:
9 | optimizer_cls: GaussNewton
10 | solver: sparse
11 | max_iters: 10
12 | step_size: 1
13 | verbose: true
14 | reg_w: 1e-3
15 |
16 | hydra:
17 | run:
18 | dir: examples/outputs/pose_graph_cube
19 | sweep:
20 | dir: examples/outputs/pose_graph/pose_graph_cube/${now:%Y.%m.%d}/${now:%H.%M.%S}
21 |
--------------------------------------------------------------------------------
/examples/configs/pose_graph/pose_graph_synthetic.yaml:
--------------------------------------------------------------------------------
1 | seed: 1
2 | device: "cuda:0"
3 | profile: True
4 | savemat: True
5 |
6 | num_poses: 256
7 | translation_noise: 0.05
8 | rotation_noise: 0.02
9 | loop_closure_ratio: 0.2
10 | loop_closure_outlier_ratio: 0.25
11 | dataset_size: 256
12 | batch_size: 128
13 |
14 | inner_optim:
15 | optimizer_cls: LevenbergMarquardt
16 | linear_solver_cls: BaspachoSparseSolver # LUCudaSparseSolver
17 | optimizer_kwargs:
18 | backward_mode: implicit
19 | verbose: true
20 | track_err_history: true
21 | __keep_final_step_size__: true
22 | adaptive_damping: true
23 | max_iters: 10
24 | step_size: 0.75
25 | regularize: true
26 | ratio_known_poses: 0.1
27 | reg_w: 1e-3
28 | vectorize: true
29 | empty_cuda_cache: false
30 |
31 | outer_optim:
32 | lr: 0.1
33 | num_epochs: 20
34 | max_num_batches: 1000
35 |
36 | hydra:
37 | run:
38 | dir: examples/outputs/pose_graph
39 | sweep:
40 | dir: examples/outputs/pose_graph/pose_graph_synthetic/${now:%Y.%m.%d}/${now:%H.%M.%S}
41 |
--------------------------------------------------------------------------------
/examples/configs/tactile_pose_estimation.yaml:
--------------------------------------------------------------------------------
1 | seed: 1234567
2 | save_all: true
3 |
4 | dataset_name: "rectangle-pushing-corners-keypoints"
5 | sdf_name: "rect"
6 |
7 | episode_length: 25
8 | max_steps: 200
9 | max_episodes: 100 # if split = true, actual number might be slightly larger
10 | split_episodes: true
11 |
12 | inner_optim:
13 | max_iters: 5
14 | optimizer: GaussNewton
15 | reg_w: 0
16 | backward_mode: implicit
17 | backward_num_iterations: None # only needed by TRUNCATED backward mode
18 | dlm_epsilon: None # only needed by DLM backward mode
19 | force_implicit_by_epoch: 10000
20 | step_size: 0.05
21 | keep_step_size: true
22 | force_max_iters: true
23 | val_iters: 50
24 |
25 | train:
26 | # options: "weights_only" or "weights_and_measurement_nn"
27 | mode: "weights_and_measurement_nn"
28 | batch_size: 8
29 | num_epochs: 100
30 | lr: 1e-4
31 | optimizer: "adam" # "adam", "rmsprop"
32 | lr_decay: 0.98
33 | val_ratio: 0.1
34 | max_num_batches: 100
35 |
36 | # 0: disc, 1: rect-edges, 2: rect-corners, 3: ellip
37 | class_label: 2
38 | num_classes: 4
39 |
40 | shape:
41 | rect_len_x: 0.2363
42 | rect_len_y: 0.1579
43 |
44 | tactile_cost:
45 | # window size (min, max, step) over which to add pairwise costs
46 | min_win_mf: 10
47 | max_win_mf: 40
48 | step_win_mf: 5
49 | init_pretrained_model: True
50 |
51 | options:
52 | vis_traj: True
53 | vis_train: true
54 |
55 | hydra:
56 | run:
57 | dir: examples/outputs/tactile
58 |
--------------------------------------------------------------------------------
/examples/pose_graph/pose_graph_benchmark.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import logging
7 | import pathlib
8 |
9 | import hydra
10 | import torch
11 | from scipy.io import savemat
12 |
13 | import theseus as th
14 | import theseus.utils.examples as theg
15 |
16 | # To run this example, you will need the cube datasets available at
17 | # https://dl.fbaipublicfiles.com/theseus/pose_graph_data.tar.gz
18 | #
19 | # The steps below should let you run the example.
20 | # From the root project folder do:
21 | # mkdir datasets
22 | # cd datasets
23 | # cp your/path/pose_graph_data.tar.gz .
24 | # tar -xzvf pose_graph_data.tar.gz
25 | # cd ..
26 | # python examples/pose_graph_benchmark.py
27 |
28 | # Logger
29 | log = logging.getLogger(__name__)
30 |
31 |
32 | DATASET_DIR = pathlib.Path.cwd() / "datasets" / "pose_graph"
33 |
34 |
35 | @hydra.main(config_path="../configs/pose_graph", config_name="pose_graph_benchmark")
36 | def main(cfg):
37 | dataset_name = cfg.dataset
38 | file_path = f"{DATASET_DIR}/{dataset_name}_init.g2o"
39 | dtype = eval(f"torch.{cfg.dtype}")
40 |
41 | _, verts, edges = theg.pose_graph.read_3D_g2o_file(file_path, dtype=torch.float64)
42 | d = 3
43 |
44 | objective = th.Objective(torch.float64)
45 |
46 | for edge in edges:
47 | cost_func = th.Between(
48 | verts[edge.i],
49 | verts[edge.j],
50 | edge.relative_pose,
51 | edge.weight,
52 | )
53 | objective.add(cost_func)
54 |
55 | pose_prior = th.Difference(
56 | var=verts[0],
57 | cost_weight=th.ScaleCostWeight(torch.tensor(1e-6, dtype=torch.float64)),
58 | target=verts[0].copy(new_name=verts[0].name + "PRIOR"),
59 | )
60 | objective.add(pose_prior)
61 |
62 | optimizer = th.LevenbergMarquardt(
63 | objective.to(dtype),
64 | max_iterations=10,
65 | step_size=1,
66 | linearization_cls=th.SparseLinearization,
67 | linear_solver_cls=th.CholmodSparseSolver,
68 | vectorize=True,
69 | )
70 |
71 | start_event = torch.cuda.Event(enable_timing=True)
72 | end_event = torch.cuda.Event(enable_timing=True)
73 |
74 | inputs = {var.name: var.tensor for var in verts}
75 | optimizer.objective.update(inputs)
76 |
77 | start_event.record()
78 | torch.cuda.reset_peak_memory_stats()
79 | optimizer.optimize(verbose=True)
80 | end_event.record()
81 |
82 | torch.cuda.synchronize()
83 | forward_time = start_event.elapsed_time(end_event)
84 | forward_mem = torch.cuda.max_memory_allocated() / 1048576
85 | log.info(f"Forward pass took {forward_time} ms.")
86 | log.info(f"Forward pass used {forward_mem} MBs.")
87 |
88 | results = {}
89 | results["objective"] = objective.error_metric().detach().cpu().numpy().sum()
90 | results["R"] = torch.cat(
91 | [pose.tensor[:, :, :d].detach().cpu() for pose in verts]
92 | ).numpy()
93 | results["t"] = torch.cat(
94 | [pose.tensor[:, :, d].detach().cpu() for pose in verts]
95 | ).numpy()
96 |
97 | savemat(dataset_name + ".mat", results)
98 |
99 |
100 | if __name__ == "__main__":
101 | main()
102 |
--------------------------------------------------------------------------------
/examples/se2_inverse.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Meta Platforms, Inc. and affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 | # This example will use torch to solve min_x1 || x1^-1 - x2 ||^2,
8 | # where x1 and x2 are SE2 objects
9 |
10 | import torch
11 |
12 | import theseus as th
13 | from tests.theseus_tests.geometry.test_se2 import create_random_se2
14 | from theseus import LieGroupTensor
15 | from theseus.geometry.lie_group import LieGroup
16 |
17 | # Create two random SE2
18 | rng = torch.Generator()
19 | rng.manual_seed(0)
20 | x1 = create_random_se2(1, rng)
21 | x2 = create_random_se2(1, rng)
22 |
23 | # use_lie_tangent: update on the Lie group or not
24 |
25 |
26 | def run(x1: LieGroup, x2: LieGroup, num_iters=10, use_lie_tangent=True):
27 | x1.tensor = LieGroupTensor(x1)
28 | x1.tensor.requires_grad = True
29 |
30 | optim = torch.optim.Adam([x1.tensor], lr=1e-1)
31 | scheduler = torch.optim.lr_scheduler.MultiStepLR(
32 | optim, milestones=[250, 600], gamma=0.01
33 | )
34 | for i in range(num_iters):
35 | optim.zero_grad()
36 | cf = th.Difference(x1.inverse(), x2, th.ScaleCostWeight(1.0))
37 | loss = cf.error().norm()
38 | if i % 100 == 0:
39 | print(
40 | "iter {:0>4d}: loss is {:.10f}, cos(theta)^2 + sin(theta)^2 is {:.10f}".format(
41 | i, loss.item(), x1[0, 2:].norm().item() ** 2
42 | )
43 | )
44 | loss.backward()
45 |
46 | # Activiate the Lie group update
47 | with th.set_lie_tangent_enabled(use_lie_tangent):
48 | optim.step()
49 |
50 | scheduler.step()
51 |
52 | cf = th.Difference(x1.inverse(), x2, th.ScaleCostWeight(1.0))
53 | loss = cf.error().norm()
54 | print(
55 | "iter {}: loss is {:.10f}, cos(theta)^2 + sin(theta)^2 is {:.10f}".format(
56 | num_iters, loss.item(), x1[0, 2:].norm().item() ** 2
57 | )
58 | )
59 |
60 |
61 | print("=========================================================")
62 | print("PyTorch Optimization on the Euclidean Space")
63 | print("---------------------------------------------------------")
64 | run(x1.copy(), x2.copy(), num_iters=1000, use_lie_tangent=False)
65 | print("\n")
66 |
67 | print("=========================================================")
68 | print("PyTorch Optimization on the Lie Group Tangent Space (Ours)")
69 | print("---------------------------------------------------------")
70 | run(x1.copy(), x2.copy(), num_iters=1000, use_lie_tangent=True)
71 | print("\n")
72 |
--------------------------------------------------------------------------------
/examples/se2_planning.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright (c) Meta Platforms, Inc. and affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 | import random
8 |
9 | import matplotlib as mpl
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import torch
13 | import torch.utils.data
14 |
15 | import theseus as th
16 | import theseus.utils.examples as theg
17 |
18 | torch.set_default_dtype(torch.double)
19 |
20 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
21 | torch.random.manual_seed(1)
22 | random.seed(1)
23 | np.random.seed(1)
24 |
25 | mpl.rcParams["figure.facecolor"] = "white"
26 | mpl.rcParams["font.size"] = 16
27 |
28 |
29 | # #### INITIAL SETUP
30 | # Data and config stuff
31 | dataset_dir = "tutorials/data/motion_planning_2d"
32 | num_prob = 2
33 | dataset = theg.TrajectoryDataset(True, num_prob, dataset_dir, "tarpit")
34 | data_loader = torch.utils.data.DataLoader(dataset, num_prob, shuffle=False)
35 |
36 | batch = next(iter(data_loader))
37 | map_size = batch["map_tensor"].shape[1]
38 | trajectory_len = batch["expert_trajectory"].shape[2]
39 | num_time_steps = trajectory_len - 1
40 | map_size = batch["map_tensor"].shape[1]
41 | safety_distance = 1.5
42 | robot_radius = 0.25
43 | total_time = 10.0
44 | dt_val = total_time / num_time_steps
45 | Qc_inv = torch.eye(3)
46 | collision_w = 20.0
47 | boundary_w = 100.0
48 |
49 | # Create the planner
50 | planner = theg.MotionPlanner(
51 | optimizer_config=(
52 | "LevenbergMarquardt",
53 | {"max_optim_iters": 50, "step_size": 0.25},
54 | ),
55 | map_size=map_size,
56 | epsilon_dist=safety_distance + robot_radius,
57 | total_time=total_time,
58 | collision_weight=collision_w,
59 | Qc_inv=Qc_inv,
60 | num_time_steps=num_time_steps,
61 | device=device,
62 | pose_type=th.SE2,
63 | nonholonomic_w=10.0,
64 | positive_vel_w=5.0,
65 | )
66 |
67 | # #### INITIALIZE OPTIMIZER VARIABLES
68 | start = torch.zeros(batch["expert_trajectory"].shape[0], 4)
69 | start[:, :2] = batch["expert_trajectory"][:, :2, 0]
70 | start[:, 3] = -1
71 | goal = batch["expert_trajectory"][:, :2, -1]
72 | planner_inputs = {
73 | "sdf_origin": batch["sdf_origin"].to(device),
74 | "start": start.to(device),
75 | "goal": goal.to(device),
76 | "cell_size": batch["cell_size"].to(device),
77 | "sdf_data": batch["sdf_data"].to(device),
78 | }
79 |
80 | # Initialize from straight line trajectory
81 | initial_traj_dict = planner.get_variable_values_from_straight_line(
82 | planner_inputs["start"], planner_inputs["goal"]
83 | )
84 | planner_inputs.update(initial_traj_dict)
85 |
86 | # #### RUN THE PLANNER
87 | planner.layer.forward(
88 | planner_inputs,
89 | optimizer_kwargs={
90 | "verbose": True,
91 | "damping": 0.1,
92 | },
93 | )
94 |
95 | # #### PLOT SOLUTION
96 | solution = planner.get_trajectory()
97 |
98 | sdf = th.eb.SignedDistanceField2D(
99 | th.Point2(batch["sdf_origin"]),
100 | th.Variable(batch["cell_size"]),
101 | th.Variable(batch["sdf_data"]),
102 | )
103 | figures = theg.generate_trajectory_figs(
104 | batch["map_tensor"][1:].cpu(),
105 | sdf,
106 | [solution[1:].cpu()],
107 | robot_radius,
108 | max_num_figures=1,
109 | fig_idx_robot=0,
110 | labels=["solution"],
111 | )
112 | plt.show()
113 |
--------------------------------------------------------------------------------
/examples/simple_example.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | # A minimal example using Theseus that is fitting a curve to a dataset of observations.
7 |
8 | import torch
9 |
10 | import theseus as th
11 |
12 |
13 | def y_model(x, c):
14 | return c * torch.exp(x)
15 |
16 |
17 | def generate_data(num_points=10, c=0.5):
18 | data_x = torch.linspace(-1, 1, num_points).view(1, -1)
19 | data_y = y_model(data_x, c)
20 | return data_x, data_y
21 |
22 |
23 | def read_data():
24 | num_points = 20
25 | data_x, data_y_clean = generate_data(num_points)
26 | return data_x, data_y_clean, 0.5 * torch.ones(1, 1)
27 |
28 |
29 | x_true, y_true, v_true = read_data() # shapes (1, N), (1, N), (1, 1)
30 | x = th.Variable(torch.randn_like(x_true), name="x")
31 | y = th.Variable(y_true, name="y")
32 | v = th.Vector(1, name="v") # a manifold subclass of Variable for optim_vars
33 |
34 |
35 | def error_fn(optim_vars, aux_vars): # returns y - v * exp(x)
36 | x, y = aux_vars
37 | return y.tensor - optim_vars[0].tensor * torch.exp(x.tensor)
38 |
39 |
40 | objective = th.Objective()
41 | cost_function = th.AutoDiffCostFunction(
42 | [v], error_fn, y_true.shape[1], aux_vars=[x, y], cost_weight=th.ScaleCostWeight(1.0)
43 | )
44 | objective.add(cost_function)
45 | layer = th.TheseusLayer(th.GaussNewton(objective, max_iterations=10))
46 |
47 | phi = torch.nn.Parameter(x_true + 0.1 * torch.ones_like(x_true))
48 | outer_optimizer = torch.optim.Adam([phi], lr=0.001)
49 | for epoch in range(20):
50 | solution, info = layer.forward(
51 | input_tensors={"x": phi.clone(), "v": torch.ones(1, 1)},
52 | optimizer_kwargs={"backward_mode": "implicit"},
53 | )
54 | outer_loss = torch.nn.functional.mse_loss(solution["v"], v_true)
55 | outer_loss.backward()
56 | outer_optimizer.step()
57 | print("Outer loss: ", outer_loss.item())
58 |
--------------------------------------------------------------------------------
/requirements/dev.txt:
--------------------------------------------------------------------------------
1 | black>=20.8b1
2 | flake8>=3.8.4
3 | mypy>=0.981
4 | pre-commit>=2.9.2
5 | isort>=5.6.4
6 | types-PyYAML==5.4.3
7 | mock>=4.0.3
8 | types-mock>=4.0.8
9 | Sphinx==5.0.2
10 | sphinx-rtd-theme==1.0.0
11 | hydra-core
12 | flake8-copyright>=0.2.4
--------------------------------------------------------------------------------
/requirements/docs.txt:
--------------------------------------------------------------------------------
1 | differentiable-robot-model>=0.2.3
2 | numpy>=1.19.2
3 | scipy>=1.5.3
4 | scikit-sparse>=0.4.5
5 | torch>=1.11
6 | pytest>=6.2.1
7 | pybind11>=2.7.1
8 | mock>=4.0.3
9 | Sphinx==5.0.2
10 | sphinx-rtd-theme==1.0.0
11 |
--------------------------------------------------------------------------------
/requirements/main.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.19.2
2 | scipy>=1.5.3
3 | scikit-sparse>=0.4.11
4 | pytest>=6.2.1
5 | pybind11>=2.7.1
6 | semantic-version==2.10.0
7 | torchlie
8 | torchkin
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 100
3 | # E203: whitespace before ":", incompatible with black
4 | # W503: line break before binary operator (black also)
5 | # F401: imported but unused
6 | ignore=E203, W503
7 | per-file-ignores =
8 | *__init__.py:F401
9 | # E261,E241,E121,E131,E201,E202: formatting options, to avoid having to
10 | # re-format explicit sparse matrices used for a small example
11 | tests/theseus_tests/extlib/test_baspacho_simple.py:E261,E241,E121,E131,E201,E202
12 | copyright-check = True
13 | select = E,F,W,C
14 | copyright-regexp=Copyright \(c\) Meta Platforms, Inc. and affiliates.
15 | exclude = third_party
16 |
17 | [mypy]
18 | python_version = 3.10
19 | ignore_missing_imports = True
20 | show_error_codes = True
21 | strict_optional = False
22 | implicit_reexport = True
23 | explicit_package_bases = True
24 |
25 | [mypy-sympy.*]
26 | follow_imports = skip
27 |
28 | [isort]
29 | profile=black
30 | skip=theseus/__init__.py
31 |
32 | [tool:pytest]
33 | markers =
34 | cudaext: marks tests as requiring CUDA support
35 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/labs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/core/test_manifold.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 |
9 | import theseus as th
10 |
11 | from .common import MockVar, check_copy_var
12 |
13 |
14 | def test_copy():
15 | for i in range(100):
16 | size = torch.randint(low=1, high=21, size=(1,)).item()
17 | data = torch.rand(size=(1,) + (size,))
18 | var = MockVar(size, tensor=data, name="var")
19 | check_copy_var(var)
20 |
21 |
22 | def test_var_shape():
23 | for sz in range(100):
24 | data = torch.ones(1, sz)
25 | var = MockVar(sz, tensor=data)
26 | assert data.shape == var.shape
27 |
28 |
29 | def test_update_shape_check():
30 | for sz in range(2, 100):
31 | data = torch.ones(sz)
32 | var = MockVar(sz)
33 | with pytest.raises(ValueError):
34 | var.update(data) # no batch dimension
35 | with pytest.raises(ValueError):
36 | var.update(data.view(-1, 1)) # wrong dimension
37 | var.update(data.view(1, -1))
38 | assert torch.isclose(var.tensor.squeeze(), data).all()
39 |
40 |
41 | class MockVarNoArgs(th.Manifold):
42 | def __init__(self, tensor=None, name=None):
43 | super().__init__(tensor=tensor, name=name)
44 |
45 | @staticmethod
46 | def _init_tensor():
47 | return torch.ones(1, 1)
48 |
49 | @staticmethod
50 | def _check_tensor_impl(tensor: torch.Tensor) -> bool:
51 | return True
52 |
53 | @staticmethod
54 | def normalize(tensor: torch.Tensor) -> torch.Tensor:
55 | return tensor
56 |
57 | def dof(self):
58 | return 0
59 |
60 | def numel(self):
61 | return 0
62 |
63 | def _local_impl(self, variable2):
64 | pass
65 |
66 | def _local_jacobian(self, var2):
67 | pass
68 |
69 | def _retract_impl(self, delta):
70 | pass
71 |
72 | def _copy_impl(self):
73 | return MockVarNoArgs()
74 |
75 | def _project_impl(
76 | self, euclidean_grad: torch.Tensor, is_sparse: bool = False
77 | ) -> torch.Tensor:
78 | return euclidean_grad.clone()
79 |
80 |
81 | def test_variable_no_args_init():
82 | var = MockVarNoArgs(name="mock")
83 | assert var.tensor.allclose(torch.ones(1, 1))
84 | assert var.name == "mock"
85 | var = MockVarNoArgs(tensor=torch.ones(2, 1))
86 | assert var.tensor.allclose(torch.ones(2, 1))
87 | var.update(torch.ones(3, 1))
88 | assert var.tensor.allclose(torch.ones(3, 1))
89 |
--------------------------------------------------------------------------------
/tests/theseus_tests/core/test_theseus_function.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import numpy as np
6 | import pytest # noqa: F401
7 | import torch
8 |
9 | import theseus as th
10 |
11 | from .common import MockCostFunction, MockCostWeight
12 |
13 |
14 | def test_theseus_function_init():
15 | all_ids = []
16 | variables = [th.Variable(torch.ones(1, 1), name="var_1")]
17 | aux_vars = [th.Variable(torch.ones(1, 1), name="aux_1")]
18 | for i in range(100):
19 | cost_weight = MockCostWeight(torch.ones(1, 1), name=f"cost_weight_{i}")
20 | if np.random.random() < 0.5:
21 | name = f"name_{i}"
22 | else:
23 | name = None
24 | cost_function = MockCostFunction(variables, aux_vars, cost_weight, name=name)
25 | all_ids.append(cost_function._id)
26 | all_ids.append(cost_weight._id)
27 | if name is not None:
28 | assert name == cost_function.name
29 |
30 | assert len(set(all_ids)) == len(all_ids)
31 |
32 |
33 | def test_no_copy_vars_check():
34 | variables = [th.Variable(torch.ones(1, 1), name="var_1")]
35 | aux_vars = [th.Variable(torch.ones(1, 1), name="aux_1")]
36 | cost_weight = MockCostWeight(torch.ones(1, 1), name="cost_weight")
37 | cost_function = MockCostFunction(
38 | variables, aux_vars, cost_weight, no_copy_vars=True
39 | )
40 | with pytest.raises(RuntimeError):
41 | cost_function.copy()
42 |
--------------------------------------------------------------------------------
/tests/theseus_tests/core/test_variable.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import numpy as np
7 | import pytest # noqa: F401
8 | import torch
9 |
10 | import theseus as th
11 |
12 | from .common import MockVar
13 |
14 |
15 | def test_variable_init():
16 | all_ids = []
17 | for i in range(100):
18 | if np.random.random() < 0.5:
19 | name = f"name_{i}"
20 | else:
21 | name = None
22 | data = torch.rand(1, 1)
23 | t = th.Variable(data, name=name)
24 | all_ids.append(t._id)
25 | if name is not None:
26 | assert name == t.name
27 | else:
28 | assert t.name == f"Variable__{t._id}"
29 | assert t.tensor.allclose(data)
30 |
31 | assert len(set(all_ids)) == len(all_ids)
32 |
33 |
34 | def test_properties():
35 | for i in range(100):
36 | length = np.random.randint(2, 5)
37 | batch_size = np.random.randint(1, 100)
38 | dtype = torch.float if np.random.random() < 0.5 else torch.long
39 | data = torch.ones(batch_size, length, dtype=dtype)
40 | t = th.Variable(data)
41 | assert t.shape == data.shape
42 | assert t.ndim == data.ndim
43 | assert t.dtype == dtype
44 |
45 |
46 | def test_update():
47 | for _ in range(10):
48 | for length in range(1, 10):
49 | var = MockVar(length)
50 | batch_size = np.random.randint(1, 10)
51 | # check update from torch tensor
52 | new_data_good = torch.rand(batch_size, length)
53 | var.update(new_data_good)
54 | assert var.tensor is new_data_good
55 | # check update from variable
56 | new_data_good_wrapped = torch.rand(batch_size, length)
57 | another_var = MockVar(length, tensor=new_data_good_wrapped)
58 | var.update(another_var)
59 | # check raises error on shape
60 | new_data_bad = torch.rand(batch_size, length + 1)
61 | with pytest.raises(ValueError):
62 | var.update(new_data_bad)
63 | # check raises error on dtype
64 | new_data_bad = torch.rand(batch_size, length + 1).double()
65 | with pytest.raises(ValueError):
66 | var.update(new_data_bad)
67 | # check batch indices to ignore
68 | how_many = np.random.randint(1, batch_size + 1)
69 | ignore_indices = np.random.choice(batch_size, size=how_many)
70 | ignore_mask = torch.zeros(batch_size).bool()
71 | ignore_mask[ignore_indices] = 1
72 | old_data = var.tensor.clone()
73 | new_data_some_ignored = torch.randn(batch_size, length)
74 | if ignore_indices[0] % 2 == 0: # randomly wrap into a variable to also test
75 | new_data_some_ignored = MockVar(length, new_data_some_ignored)
76 | var.update(new_data_some_ignored, batch_ignore_mask=ignore_mask)
77 | for i in range(batch_size):
78 | if ignore_mask[i] == 1:
79 | assert torch.allclose(var[i], old_data[i])
80 | else:
81 | if isinstance(new_data_some_ignored, th.Variable):
82 | new_data_some_ignored = new_data_some_ignored.tensor
83 | assert torch.allclose(var[i], new_data_some_ignored[i])
84 |
--------------------------------------------------------------------------------
/tests/theseus_tests/decorators.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | _BASPACHO_NOT_INSTALLED_MSG = "Baspacho solver not in theseus extension library."
7 | _LABS_NOT_INSTALLED_MSG = "Theseus Labs is not available."
8 |
9 |
10 | def _run_if(import_fn, msg):
11 | import pytest
12 |
13 | try:
14 | import_fn()
15 |
16 | is_available = False
17 | except ModuleNotFoundError:
18 | is_available = True
19 |
20 | return pytest.mark.skipif(is_available, reason=msg)
21 |
22 |
23 | def run_if_baspacho():
24 | def _import_fn():
25 | import theseus.extlib.baspacho_solver # noqa: F401
26 |
27 | return _run_if(_import_fn, _BASPACHO_NOT_INSTALLED_MSG)
28 |
29 |
30 | def run_if_labs():
31 | def _import_fn():
32 | import theseus.labs # noqa: F401
33 |
34 | return _run_if(_import_fn, _LABS_NOT_INSTALLED_MSG)
35 |
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/collision/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/collision/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | import theseus as th
9 |
10 |
11 | def random_scalar(batch_size):
12 | number = torch.rand(1).item()
13 | # Test with all possible cell_size inputs
14 | if number < 1.0 / 3:
15 | return th.Variable(tensor=torch.randn(batch_size, 1))
16 | elif number < 2.0 / 3:
17 | return torch.randn(batch_size, 1)
18 | else:
19 | return torch.randn(1).item()
20 |
21 |
22 | def random_origin(batch_size):
23 | origin_tensor = torch.randn(batch_size, 2)
24 | if torch.rand(1).item() < 0.5:
25 | return th.Point2(tensor=origin_tensor)
26 | return origin_tensor
27 |
28 |
29 | def random_sdf_data(batch_size, field_width, field_height):
30 | sdf_data_tensor = torch.randn(batch_size, field_width, field_height)
31 | if torch.rand(1).item() < 0.5:
32 | return th.Variable(tensor=sdf_data_tensor)
33 | return sdf_data_tensor
34 |
35 |
36 | def random_sdf(batch_size, field_width, field_height):
37 | return th.eb.SignedDistanceField2D(
38 | random_origin(batch_size),
39 | random_scalar(batch_size),
40 | random_sdf_data(batch_size, field_width, field_height),
41 | )
42 |
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/kinematics/data/panda_fk_dataset.json:
--------------------------------------------------------------------------------
1 | {
2 | "joint_states": [
3 | [0.2896690950055305, 0.7887120656281918, 0.6098184262843489, -1.3822266904213196, -0.45304768976307175, 2.437887624445399, -0.37036997092492996],
4 | [2.3248593412410172, 1.6994167497883723, -0.6916813389840697, -0.5851990245348841, 0.17146823279768597, 2.1335270160528377, 2.5255755709563092],
5 | [-2.545557823442099, -1.513253690733913, -2.847119985909619, -0.453153780710263, 1.6506377914866026, 3.314099494785764, 2.840216966477668]
6 | ],
7 | "ee_poses": [
8 | [[0.5618914365768433, 0.5131404399871826, 0.34427565336227417], [0.7712864279747009, 0.6026210188865662, 0.19384866952896118, -0.06624171137809753]],
9 | [[-0.3478390574455261, 0.671889066696167, -0.023775160312652588], [0.952156126499176, -0.1261112540960312, 0.22165635228157043, -0.16841386258602142]],
10 | [[0.6701399683952332, 0.41195011138916016, 0.1345270723104477], [0.4187312126159668, 0.7321641445159912, 0.47301992774009705, 0.2546604871749878]]
11 | ],
12 | "ee_name": "panda_virtual_ee_link"
13 | }
14 |
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/measurements/between_errors_se2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tests/theseus_tests/embodied/measurements/between_errors_se2.npy
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/measurements/between_errors_so2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tests/theseus_tests/embodied/measurements/between_errors_so2.npy
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/measurements/test_reprojection.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | import theseus as th
9 | from theseus.utils.examples.bundle_adjustment.util import random_small_quaternion
10 |
11 |
12 | def test_residual():
13 | # unit test for Cost term
14 | torch.manual_seed(0)
15 | batch_size = 4
16 | cam_rot = torch.cat(
17 | [
18 | random_small_quaternion(max_degrees=20).unsqueeze(0)
19 | for _ in range(batch_size)
20 | ]
21 | )
22 | cam_tr = torch.rand((batch_size, 3), dtype=torch.float64) * 2 + torch.tensor(
23 | [-1, -1, -5.0], dtype=torch.float64
24 | )
25 | cam_pose_data = torch.cat([cam_tr, cam_rot], dim=1)
26 | cam_pose = th.SE3(cam_pose_data, name="cam_pose")
27 |
28 | focal_length = th.Vector(
29 | tensor=torch.tensor([1000], dtype=torch.float64)
30 | .repeat(batch_size)
31 | .unsqueeze(1),
32 | name="focal_length",
33 | )
34 | calib_k1 = th.Vector(
35 | tensor=torch.tensor([-0.1], dtype=torch.float64)
36 | .repeat(batch_size)
37 | .unsqueeze(1),
38 | name="calib_k1",
39 | )
40 | calib_k2 = th.Vector(
41 | tensor=torch.tensor([0.01], dtype=torch.float64)
42 | .repeat(batch_size)
43 | .unsqueeze(1),
44 | name="calib_k2",
45 | )
46 | world_point = th.Vector(
47 | tensor=torch.rand((batch_size, 3), dtype=torch.float64), name="worldPoint"
48 | )
49 | point_cam = cam_pose.transform_from(world_point).tensor
50 | proj = -point_cam[:, :2] / point_cam[:, 2:3]
51 | proj_sqn = (proj * proj).sum(dim=1).unsqueeze(1)
52 | proj_factor = focal_length.tensor * (
53 | 1.0 + proj_sqn * (calib_k1.tensor + proj_sqn * calib_k2.tensor)
54 | )
55 | point_projection = proj * proj_factor
56 | image_feature_point = th.Vector(
57 | tensor=point_projection + (torch.rand((batch_size, 2)) - 0.5) * 50,
58 | name="image_feature_point",
59 | )
60 | r = th.eb.Reprojection(
61 | camera_pose=cam_pose,
62 | world_point=world_point,
63 | focal_length=focal_length,
64 | calib_k1=calib_k1,
65 | calib_k2=calib_k2,
66 | image_feature_point=image_feature_point,
67 | )
68 |
69 | base_err = r.error()
70 | base_camera_pose = r.camera_pose.copy()
71 | base_world_point = r.world_point.copy()
72 |
73 | n_err = base_err.shape[1]
74 | pose_num_jac = torch.zeros((batch_size, n_err, 6), dtype=torch.float64)
75 | epsilon = 1e-8
76 | for i in range(6):
77 | v = torch.zeros((batch_size, 6), dtype=torch.float64)
78 | v[:, i] += epsilon
79 | r.camera_pose = base_camera_pose.retract(v)
80 | pert_err = r.error()
81 | pose_num_jac[:, :, i] = (pert_err - base_err) / epsilon
82 | r.camera_pose = base_camera_pose
83 |
84 | wpt_num_jac = torch.zeros((batch_size, n_err, 3), dtype=torch.float64)
85 | for i in range(3):
86 | v = torch.zeros((batch_size, 3), dtype=torch.float64)
87 | v[:, i] += epsilon
88 | r.world_point = base_world_point.retract(v)
89 | pert_err = r.error()
90 | wpt_num_jac[:, :, i] = (pert_err - base_err) / epsilon
91 |
92 | (pose_jac, wpt_jac), _ = r.jacobians()
93 |
94 | assert torch.norm(pose_num_jac - pose_jac) < 5e-5
95 | assert torch.norm(wpt_num_jac - wpt_jac) < 5e-5
96 |
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/misc/sq_dist_errors_se2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tests/theseus_tests/embodied/misc/sq_dist_errors_se2.npy
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/misc/sq_dist_errors_so2.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tests/theseus_tests/embodied/misc/sq_dist_errors_so2.npy
--------------------------------------------------------------------------------
/tests/theseus_tests/embodied/motionmodel/test_misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import theseus as th
7 | import torch
8 |
9 | from theseus.utils import check_jacobians
10 |
11 |
12 | def test_nonholonomic():
13 | rng = torch.Generator()
14 | rng.manual_seed(0)
15 | # Check SE2 pose version
16 | pose = th.SE2.rand(10, generator=rng)
17 | vel = th.Vector.rand(10, 3, generator=rng)
18 | w = th.ScaleCostWeight(1.0)
19 | cf = th.eb.Nonholonomic(pose, vel, w)
20 | check_jacobians(cf, num_checks=100, tol=1e-5)
21 | # Check Vector3 pose version
22 | pose = th.Vector.rand(10, 3, generator=rng)
23 | cf = th.eb.Nonholonomic(pose, vel, w)
24 | check_jacobians(cf, num_checks=100, tol=1e-5)
25 |
26 |
27 | def test_hinge_cost():
28 | rng = torch.Generator()
29 | rng.manual_seed(0)
30 | batch_size = 10
31 | how_many = 4
32 |
33 | def _rand_chunk():
34 | return torch.rand(batch_size, how_many, generator=rng)
35 |
36 | for limit in [0.0, 1.0]:
37 | threshold = 0.0 if limit == 0.0 else 0.2
38 | vector = torch.zeros(batch_size, 3 * how_many)
39 |
40 | # Vector is created so that [below_th, within_th, above_th]
41 | vector[:, :how_many] = -_rand_chunk() - limit
42 | if limit == 0.0:
43 | vector[:, how_many : 2 * how_many] = 0.0
44 | else:
45 | vector[:, how_many : 2 * how_many] = (
46 | -limit + threshold + 0.1 * _rand_chunk()
47 | )
48 | vector[:, 2 * how_many :] = limit + _rand_chunk()
49 | v = th.Vector(tensor=vector)
50 | cf = th.eb.HingeCost(v, -limit, limit, threshold, th.ScaleCostWeight(1.0))
51 |
52 | jacobians, error = cf.jacobians()
53 | assert jacobians[0].shape == (batch_size, 3 * how_many, 3 * how_many)
54 | assert error.shape == (batch_size, 3 * how_many)
55 | # Middle section error must be == 0
56 | assert (error[:, how_many : 2 * how_many] == 0).all().item()
57 | # Check jacobians
58 | # All jacobians are equal and the number of nz elements is how_many times -1 and
59 | # and how_many 1, for each batch index
60 | nn_zero = batch_size * how_many
61 | assert jacobians[0].abs().sum() == 2 * nn_zero
62 | assert jacobians[0][:, :how_many, :how_many].sum() == -nn_zero
63 | assert jacobians[0][:, 2 * how_many :, 2 * how_many :].sum() == nn_zero
64 |
65 | # Throw in some random checks as well, why not
66 | check_jacobians(cf, num_checks=100, tol=1e-5)
67 |
--------------------------------------------------------------------------------
/tests/theseus_tests/extlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/geometry/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/geometry/point_types_mypy_check.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import theseus as th
7 |
8 | x = th.Point2()
9 | y = th.Point2()
10 |
11 | z1: th.Point2 = x + y # noqa: F841
12 | z2: th.Point2 = x - y # noqa: F841
13 | z3: th.Point2 = x * y # noqa: F841
14 | z4: th.Point2 = x / y # noqa: F841
15 | z5: th.Point2 = -x # noqa: F841
16 | z6: th.Point2 = x.cat(y) # noqa: F841
17 | z7: th.Point2 = x.abs() # noqa: F841
18 |
19 |
20 | x1 = th.Point3()
21 | y1 = th.Point3()
22 |
23 | w1: th.Point3 = x1 + y1 # noqa: F841
24 | w2: th.Point3 = x1 - y1 # noqa: F841
25 | w3: th.Point3 = x1 * y1 # noqa: F841
26 | w4: th.Point3 = x1 / y1 # noqa: F841
27 | w5: th.Point3 = -x1 # noqa: F841
28 | w6: th.Point3 = x1.cat(y1) # noqa: F841
29 | w7: th.Point3 = x1.abs() # noqa: F841
30 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/autograd/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/autograd/bad_sparse_matrix.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tests/theseus_tests/optimizer/autograd/bad_sparse_matrix.pth
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/autograd/common.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 | from torch.autograd import grad, gradcheck
8 |
9 |
10 | def check_grad(solve_func, inputs, eps, atol, rtol):
11 | assert gradcheck(solve_func, inputs, eps=eps, atol=atol)
12 |
13 | A_val, b = inputs[0], inputs[1]
14 | # Check that the gradient works correctly for floating point data
15 | out = solve_func(*inputs).sum()
16 | gA, gb = grad(out, (A_val, b))
17 |
18 | A_float = A_val.float()
19 | b_float = b.float()
20 | inputs2 = (A_float, b_float) + inputs[2:]
21 | out_float = solve_func(*inputs2).sum()
22 | gA_float, gb_float = grad(out_float, (A_float, b_float))
23 |
24 | # This mostly checks that backward() is not accumulating
25 | # additional floating point errors on top of those expected by
26 | # converting the input from double to float. Naive float casting
27 | # in the backward python ops can result in differences in order of magnitude
28 | # even for well-conditioned systems. These checks cover that case.
29 | #
30 | # Do note that, in general, it's possible to construct very
31 | # ill-conditioned systems where the initial loss or precision is enough
32 | # to get large gradient errors no matter what we do. This checks
33 | # are not meant to capture such scenarios.
34 | torch.testing.assert_close(gA, gA_float.double(), rtol=rtol, atol=atol)
35 | torch.testing.assert_close(gb, gb_float.double(), rtol=rtol, atol=atol)
36 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/autograd/test_lu_cuda_sparse_backward.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 |
9 | import theseus as th
10 | from .common import check_grad
11 |
12 |
13 | def _build_sparse_mat(batch_size):
14 | torch.manual_seed(37)
15 | all_cols = list(range(10))
16 | col_ind = []
17 | row_ptr = [0]
18 | for i in range(12):
19 | start = max(0, i - 2)
20 | end = min(i + 1, 10)
21 | col_ind += all_cols[start:end]
22 | row_ptr.append(len(col_ind))
23 | data = torch.randn(size=(batch_size, len(col_ind)), dtype=torch.double)
24 | return 12, 10, data, col_ind, row_ptr
25 |
26 |
27 | @pytest.mark.cudaext
28 | def test_sparse_backward_step():
29 | if not torch.cuda.is_available():
30 | return
31 | from theseus.optimizer.autograd import LUCudaSolveFunction
32 |
33 | void_objective = th.Objective()
34 | void_ordering = th.VariableOrdering(void_objective, default_order=False)
35 | solver = th.LUCudaSparseSolver(
36 | void_objective, linearization_kwargs={"ordering": void_ordering}, damping=0.01
37 | )
38 | linearization = solver.linearization
39 |
40 | batch_size = 4
41 | void_objective._batch_size = batch_size
42 | num_rows, num_cols, data, col_ind, row_ptr = _build_sparse_mat(batch_size)
43 | linearization.num_rows = num_rows
44 | linearization.num_cols = num_cols
45 | linearization.A_val = data.cuda()
46 | linearization.A_col_ind = col_ind
47 | linearization.A_row_ptr = row_ptr
48 | linearization.b = torch.randn(
49 | size=(batch_size, num_rows), dtype=torch.double
50 | ).cuda()
51 |
52 | linearization.A_val.requires_grad = True
53 | linearization.b.requires_grad = True
54 | # Only need this line for the test since the objective is a mock
55 | solver.reset(batch_size=batch_size)
56 | damping_alpha_beta = (
57 | 0.5 * torch.rand_like(linearization.A_val[:, 0]),
58 | 1.3 * torch.rand_like(linearization.A_val[:, 0]),
59 | )
60 | inputs = (
61 | linearization.A_val,
62 | linearization.b,
63 | linearization.structure(),
64 | solver.A_row_ptr,
65 | solver.A_col_ind,
66 | solver._solver_contexts[solver._last_solver_context],
67 | damping_alpha_beta,
68 | False, # it's the same matrix, so no overwrite problems
69 | )
70 |
71 | check_grad(LUCudaSolveFunction.apply, inputs, eps=3e-4, atol=1e-3, rtol=1e-4)
72 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/autograd/test_sparse_backward.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 | from sksparse.cholmod import analyze_AAt
9 | from torch.autograd import gradcheck
10 |
11 | import theseus as th
12 | from theseus.optimizer.autograd import CholmodSolveFunction
13 |
14 |
15 | def _build_sparse_mat(batch_size):
16 | torch.manual_seed(37)
17 | all_cols = list(range(10))
18 | col_ind = []
19 | row_ptr = [0]
20 | for i in range(12):
21 | start = max(0, i - 2)
22 | end = min(i + 1, 10)
23 | col_ind += all_cols[start:end]
24 | row_ptr.append(len(col_ind))
25 | data = torch.randn(size=(batch_size, len(col_ind)), dtype=torch.double)
26 | return 12, 10, data, col_ind, row_ptr
27 |
28 |
29 | def test_sparse_backward_step():
30 | void_objective = th.Objective()
31 | void_ordering = th.VariableOrdering(void_objective, default_order=False)
32 | solver = th.CholmodSparseSolver(
33 | void_objective, linearization_kwargs={"ordering": void_ordering}, damping=0.01
34 | )
35 | linearization = solver.linearization
36 |
37 | batch_size = 4
38 | void_objective._batch_size = batch_size
39 | num_rows, num_cols, data, col_ind, row_ptr = _build_sparse_mat(batch_size)
40 | linearization.num_rows = num_rows
41 | linearization.num_cols = num_cols
42 | linearization.A_val = data
43 | linearization.A_col_ind = col_ind
44 | linearization.A_row_ptr = row_ptr
45 | linearization.b = torch.randn(size=(batch_size, num_rows), dtype=torch.double)
46 |
47 | linearization.A_val.requires_grad = True
48 | linearization.b.requires_grad = True
49 | # Only need this line for the test since the objective is a mock
50 | solver._symbolic_cholesky_decomposition = analyze_AAt(
51 | linearization.structure().mock_csc_transpose()
52 | )
53 | inputs = (
54 | linearization.A_val,
55 | linearization.b,
56 | linearization.structure(),
57 | solver._symbolic_cholesky_decomposition,
58 | solver._damping,
59 | )
60 |
61 | assert gradcheck(CholmodSolveFunction.apply, inputs, eps=3e-4, atol=1e-3)
62 |
63 |
64 | def test_float64_used():
65 | data = torch.load("tests/theseus_tests/optimizer/autograd/bad_sparse_matrix.pth")
66 | decomp = analyze_AAt(data["struct"].mock_csc_transpose())
67 | delta = CholmodSolveFunction.apply(
68 | data["a"],
69 | data["b"],
70 | data["struct"],
71 | decomp,
72 | 1e-06,
73 | )
74 |
75 | # With this matrix, if CHOLMOD is not casted to 64-bits, this value is > 100.0
76 | assert delta.abs().max() < 1.0
77 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/linear/test_baspacho_sparse_solver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 |
9 | from tests.theseus_tests.decorators import run_if_baspacho
10 | from tests.theseus_tests.optimizer.autograd.test_baspacho_sparse_backward import (
11 | get_linearization_and_solver_for_random_sparse,
12 | )
13 |
14 |
15 | def check_sparse_solver(
16 | batch_size, rows_to_cols_ratio, num_cols, param_size_range, fill, dev="cpu"
17 | ):
18 | rng = torch.Generator(device=dev)
19 | rng.manual_seed(hash(str([batch_size, rows_to_cols_ratio, num_cols, fill])))
20 |
21 | linearization, solver = get_linearization_and_solver_for_random_sparse(
22 | batch_size, rows_to_cols_ratio, num_cols, param_size_range, fill, dev, rng
23 | )
24 |
25 | # Only need this line for the test since the objective is a mock
26 | # No need to reset manually when running a Theseus optimizer
27 | solver.reset(device=dev)
28 | damping = 1e-4
29 | solved_x = solver.solve(damping=damping, ellipsoidal_damping=False)
30 |
31 | for i in range(batch_size):
32 | csrAi = linearization.structure().csr_straight(linearization.A_val[i, :].cpu())
33 | Ai = torch.tensor(csrAi.todense(), dtype=torch.double)
34 | ata = Ai.T @ Ai
35 | b = linearization.b[i].cpu()
36 | atb = torch.DoubleTensor(csrAi.transpose() @ b)
37 |
38 | # the linear system solved is with matrix AtA
39 | solved_xi_cpu = solved_x[i].cpu()
40 | atb_check = ata @ solved_xi_cpu + damping * solved_xi_cpu
41 | torch.testing.assert_close(atb, atb_check, atol=1e-3, rtol=1e-3)
42 |
43 |
44 | @run_if_baspacho()
45 | @pytest.mark.parametrize("batch_size", [1, 32])
46 | @pytest.mark.parametrize("rows_to_cols_ratio", [1.1, 1.7])
47 | @pytest.mark.parametrize("num_cols", [30, 70])
48 | @pytest.mark.parametrize("param_size_range", ["2:6", "1:13"])
49 | @pytest.mark.parametrize("fill", [0.02, 0.05])
50 | def test_baspacho_solver_cpu_full(
51 | batch_size, rows_to_cols_ratio, num_cols, param_size_range, fill
52 | ):
53 | check_sparse_solver(
54 | batch_size=batch_size,
55 | rows_to_cols_ratio=rows_to_cols_ratio,
56 | num_cols=num_cols,
57 | param_size_range=param_size_range,
58 | fill=fill,
59 | dev="cpu",
60 | )
61 |
62 |
63 | @run_if_baspacho()
64 | def test_baspacho_solver_cpu_abriged():
65 | check_sparse_solver(
66 | batch_size=128,
67 | rows_to_cols_ratio=1.7,
68 | num_cols=70,
69 | param_size_range="1:13",
70 | fill=0.05,
71 | dev="cpu",
72 | )
73 |
74 |
75 | @run_if_baspacho()
76 | @pytest.mark.cudaext
77 | @pytest.mark.parametrize("batch_size", [1, 32])
78 | @pytest.mark.parametrize("rows_to_cols_ratio", [1.1, 1.7])
79 | @pytest.mark.parametrize("num_cols", [30, 70])
80 | @pytest.mark.parametrize("param_size_range", ["2:6", "1:13"])
81 | @pytest.mark.parametrize("fill", [0.02, 0.05])
82 | def test_baspacho_solver_cuda(
83 | batch_size, rows_to_cols_ratio, num_cols, param_size_range, fill
84 | ):
85 | check_sparse_solver(
86 | batch_size=batch_size,
87 | rows_to_cols_ratio=rows_to_cols_ratio,
88 | num_cols=num_cols,
89 | param_size_range=param_size_range,
90 | fill=fill,
91 | dev="cuda",
92 | )
93 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/linear/test_cholmod_sparse_solver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 | from sksparse.cholmod import analyze_AAt
9 |
10 | import theseus as th
11 |
12 |
13 | def _build_sparse_mat(batch_size):
14 | all_cols = list(range(10))
15 | col_ind = []
16 | row_ptr = [0]
17 | for i in range(12):
18 | start = max(0, i - 2)
19 | end = min(i + 1, 10)
20 | col_ind += all_cols[start:end]
21 | row_ptr.append(len(col_ind))
22 | data = torch.randn((batch_size, len(col_ind)))
23 | return 12, 10, data, col_ind, row_ptr
24 |
25 |
26 | def test_sparse_solver():
27 | void_objective = th.Objective()
28 | void_ordering = th.VariableOrdering(void_objective, default_order=False)
29 | damping = 0.2 # set big value for checking
30 | solver = th.CholmodSparseSolver(
31 | void_objective,
32 | linearization_kwargs={"ordering": void_ordering},
33 | damping=damping,
34 | )
35 | linearization = solver.linearization
36 |
37 | batch_size = 4
38 | void_objective._batch_size = batch_size
39 | num_rows, num_cols, data, col_ind, row_ptr = _build_sparse_mat(batch_size)
40 | linearization.num_rows = num_rows
41 | linearization.num_cols = num_cols
42 | linearization.A_val = data
43 | linearization.A_col_ind = col_ind
44 | linearization.A_row_ptr = row_ptr
45 | linearization.b = torch.randn((batch_size, num_rows))
46 | # Only need this line for the test since the objective is a mock
47 | solver._symbolic_cholesky_decomposition = analyze_AAt(
48 | linearization.structure().mock_csc_transpose()
49 | )
50 |
51 | solved_x = solver.solve()
52 |
53 | # also check that damping is being overridden via kwargs correctly
54 | other_damping = 0.3
55 | solved_x_other_damping = solver.solve(damping=other_damping)
56 |
57 | for i in range(batch_size):
58 | csrAi = linearization.structure().csr_straight(linearization.A_val[i, :])
59 | Ai = torch.Tensor(csrAi.todense())
60 | ata = Ai.T @ Ai
61 | b = linearization.b[i]
62 | atb = torch.Tensor(csrAi.transpose() @ b)
63 |
64 | def _check_correctness(solved_x_, damping_):
65 | # the linear system solved is with matrix (AtA + damping*I)
66 | atb_check = ata @ solved_x_[i] + damping_ * solved_x_[i]
67 | max_offset = torch.norm(atb - atb_check, p=float("inf"))
68 | assert max_offset < 1e-4
69 |
70 | _check_correctness(solved_x, damping)
71 | _check_correctness(solved_x_other_damping, other_damping)
72 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/test_dogleg.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 |
8 | import theseus as th
9 |
10 | from theseus.constants import __FROM_THESEUS_LAYER_TOKEN__
11 | from tests.theseus_tests.optimizer.nonlinear.common import (
12 | run_nonlinear_least_squares_check,
13 | )
14 |
15 |
16 | def test_dogleg():
17 | run_nonlinear_least_squares_check(
18 | th.Dogleg,
19 | {__FROM_THESEUS_LAYER_TOKEN__: True},
20 | )
21 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/test_gauss_newton.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 |
8 | import theseus as th
9 |
10 | from theseus.constants import __FROM_THESEUS_LAYER_TOKEN__
11 | from .common import run_nonlinear_least_squares_check
12 |
13 |
14 | def test_gauss_newton():
15 | run_nonlinear_least_squares_check(
16 | th.GaussNewton, {__FROM_THESEUS_LAYER_TOKEN__: True}
17 | )
18 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/test_info.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch
8 |
9 | import theseus as th
10 |
11 |
12 | @pytest.mark.parametrize(
13 | "var_type", [(th.Vector, 1), (th.Vector, 2), (th.SE2, None), (th.SE3, None)]
14 | )
15 | @pytest.mark.parametrize("batch_size", [1, 10])
16 | def test_state_history(var_type, batch_size):
17 | cls_, dof = var_type
18 |
19 | rand_args = (batch_size, dof) if cls_ == th.Vector else (batch_size,)
20 | v1 = cls_(tensor=cls_.rand(*rand_args).tensor, name="v1")
21 | v2 = cls_(tensor=cls_.rand(*rand_args).tensor, name="v2")
22 | w = th.ScaleCostWeight(1.0)
23 |
24 | objective = th.Objective()
25 | objective.add(th.Difference(v1, v2, w))
26 |
27 | max_iters = 10
28 | optimizer = th.GaussNewton(objective, max_iterations=max_iters)
29 | layer = th.TheseusLayer(optimizer)
30 |
31 | _, info = layer.forward(optimizer_kwargs={"track_state_history": True})
32 |
33 | for var in objective.optim_vars.values():
34 | assert var.name in info.state_history
35 | assert info.state_history[var.name].shape == (objective.batch_size,) + tuple(
36 | var.shape[1:]
37 | ) + (max_iters + 1,)
38 |
39 |
40 | @pytest.mark.parametrize("batch_size", [1, 10])
41 | def test_track_best_solution_matrix_vars(batch_size):
42 | x = th.SO3(name="x")
43 | y = th.SO3(name="y")
44 | objective = th.Objective()
45 | objective.add(th.Difference(x, y, th.ScaleCostWeight(1.0), name="cf"))
46 | optim = th.LevenbergMarquardt(objective, vectorize=True)
47 | objective.update({"x": torch.randn(batch_size, 3, 3)})
48 | info = optim.optimize(track_best_solution=True, backward_mode="implicit")
49 | assert info.best_solution["x"].shape == (batch_size, 3, 3)
50 | # Call to optimize with track_best_solution=True used to fail
51 | # when tracking matrix vars (e.g., SO3/SE3)
52 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/test_levenberg_marquardt.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import pytest # noqa: F401
6 | import torch
7 |
8 | import theseus as th
9 |
10 | from theseus.constants import __FROM_THESEUS_LAYER_TOKEN__
11 | from tests.theseus_tests.optimizer.nonlinear.common import (
12 | run_nonlinear_least_squares_check,
13 | )
14 |
15 |
16 | @pytest.fixture
17 | def mock_objective():
18 | objective = th.Objective()
19 | v1 = th.Vector(1, name="v1")
20 | v2 = th.Vector(1, name="v2")
21 | objective.add(th.Difference(v1, v2, th.ScaleCostWeight(1.0)))
22 | return objective
23 |
24 |
25 | @pytest.mark.parametrize("damping", [0.0, 0.001, 0.01, 0.1])
26 | @pytest.mark.parametrize("ellipsoidal_damping", [True, False])
27 | @pytest.mark.parametrize("adaptive_damping", [True, False])
28 | def test_levenberg_marquardt(damping, ellipsoidal_damping, adaptive_damping):
29 | run_nonlinear_least_squares_check(
30 | th.LevenbergMarquardt,
31 | {
32 | "damping": damping,
33 | "ellipsoidal_damping": ellipsoidal_damping,
34 | "adaptive_damping": adaptive_damping,
35 | "damping_eps": 0.0,
36 | __FROM_THESEUS_LAYER_TOKEN__: True,
37 | },
38 | singular_check=damping < 0.001,
39 | )
40 |
41 |
42 | def test_ellipsoidal_damping_compatibility(mock_objective):
43 | mock_objective.update({"v1": torch.ones(1, 1), "v2": torch.zeros(1, 1)})
44 | for lsc in [th.LUDenseSolver, th.CholeskyDenseSolver]:
45 | optimizer = th.LevenbergMarquardt(mock_objective, lsc)
46 | optimizer.optimize(
47 | **{"ellipsoidal_damping": True, __FROM_THESEUS_LAYER_TOKEN__: True}
48 | )
49 | optimizer.optimize(**{"damping_eps": 0.1, __FROM_THESEUS_LAYER_TOKEN__: True})
50 |
51 | for lsc in [th.CholmodSparseSolver]:
52 | optimizer = th.LevenbergMarquardt(mock_objective, lsc)
53 | with pytest.raises(RuntimeError):
54 | optimizer.optimize(
55 | **{"ellipsoidal_damping": True, __FROM_THESEUS_LAYER_TOKEN__: True}
56 | )
57 | with pytest.raises(RuntimeError):
58 | optimizer.optimize(
59 | **{"damping_eps": 0.1, __FROM_THESEUS_LAYER_TOKEN__: True}
60 | )
61 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/nonlinear/test_trust_region.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import pytest
6 |
7 | import theseus as th
8 | import torch
9 |
10 |
11 | # Passing different linear_solver_cls to test with both dense/sparse linearization
12 | # Also test sparse [cpu/gpu] because they go through different custom backends
13 | @pytest.mark.parametrize("dof", [1, 8])
14 | @pytest.mark.parametrize(
15 | "linear_solver_cls",
16 | [th.CholeskyDenseSolver, th.CholmodSparseSolver, th.LUCudaSparseSolver],
17 | )
18 | def test_rho(dof, linear_solver_cls):
19 | device = "cuda:0" if torch.cuda.is_available() else "cpu"
20 | rng = torch.Generator(device=device)
21 | rng.manual_seed(0)
22 | if device == "cpu" and linear_solver_cls == th.LUCudaSparseSolver:
23 | return
24 | if device == "cuda:0" and linear_solver_cls == th.CholmodSparseSolver:
25 | return
26 |
27 | def _rand_v():
28 | return torch.randn(1, dof, generator=rng, device=device)
29 |
30 | def _rand_w():
31 | return th.ScaleCostWeight(torch.randn(1, generator=rng, device=device))
32 |
33 | for dof in [1, 8]:
34 | n_vars = 16
35 | # This test checks that rho = 1 for a simple linear problem:
36 | # min sum (xi - ti) ** 2 + ((x[i+-i] - x[i]) - m[i]) ** 2
37 | vs = [th.Vector(tensor=_rand_v(), name=f"x{i}") for i in range(n_vars)]
38 | o = th.Objective()
39 | for i in range(n_vars):
40 | t = th.Vector(tensor=_rand_v(), name=f"t{i}")
41 | o.add(th.Difference(vs[i], t, _rand_w(), name=f"diff{i}"))
42 | if i > 0:
43 | m = th.Vector(tensor=_rand_v(), name=f"m{i}")
44 | o.add(th.Between(vs[i], vs[i - 1], m, _rand_w(), name=f"btw{i}"))
45 |
46 | o.to(device=device)
47 | # This is testing TrustRegion base class rather than Dogleg's
48 | # Using Dogleg because it's the only subclass of TrustRegion atm
49 | opt = th.Dogleg(o, linear_solver_cls=linear_solver_cls)
50 | o._resolve_batch_size()
51 | opt.linear_solver.linearization.linearize()
52 | previous_err = opt.objective.error_metric()
53 |
54 | # Check rho = 1. Predicted error by TrustRegion method should
55 | # match actual error after step for a linear problem
56 | for _ in range(100):
57 | delta = torch.randn(1, dof * n_vars, device=device, generator=rng)
58 | _, new_err = opt._compute_retracted_tensors_and_error(
59 | delta, torch.zeros_like(delta[:, 0]), False
60 | )
61 | rho = opt._compute_rho(delta, previous_err, new_err)
62 | torch.testing.assert_close(rho, torch.ones_like(rho), atol=1e-3, rtol=1e-3)
63 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/test_dense_linearization.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch # noqa: F401
8 |
9 | import theseus as th
10 | from tests.theseus_tests.optimizer.linearization_test_utils import (
11 | build_test_objective_and_linear_system,
12 | )
13 |
14 |
15 | def test_dense_linearization():
16 | objective, ordering, A, b = build_test_objective_and_linear_system()
17 |
18 | linearization = th.DenseLinearization(objective, ordering=ordering)
19 | linearization.linearize()
20 |
21 | assert b.isclose(linearization.b).all()
22 |
23 | assert A.isclose(linearization.A).all()
24 |
25 | batch_size = A.shape[0]
26 |
27 | for i in range(batch_size):
28 | ata = A[i].t() @ A[i]
29 | atb = (A[i].t() @ b[i]).unsqueeze(1)
30 | assert ata.isclose(linearization.AtA[i]).all()
31 | assert atb.isclose(linearization.Atb[i]).all()
32 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/test_sparse_linearization.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest # noqa: F401
7 | import torch # noqa: F401
8 |
9 | import theseus as th
10 | from tests.theseus_tests.optimizer.linearization_test_utils import (
11 | build_test_objective_and_linear_system,
12 | )
13 |
14 |
15 | def test_sparse_linearization():
16 | objective, ordering, A, b = build_test_objective_and_linear_system()
17 |
18 | linearization = th.SparseLinearization(objective, ordering=ordering)
19 | linearization.linearize()
20 |
21 | assert b.isclose(linearization.b).all()
22 |
23 | batch_size = A.shape[0]
24 |
25 | for i in range(batch_size):
26 | csrAi = linearization.structure().csr_straight(linearization.A_val[i, :])
27 | torch.testing.assert_close(A[i], torch.Tensor(csrAi.todense()))
28 |
29 | for i in range(batch_size):
30 | torch.testing.assert_close(b[i], linearization.b[i])
31 |
32 | # Test Atb result
33 | atb_expected = A.transpose(1, 2).bmm(b.unsqueeze(2))
34 | atb_out = linearization.Atb
35 | torch.testing.assert_close(atb_expected, atb_out)
36 |
37 | # Test Av() with a random v
38 | rng = torch.Generator()
39 | rng.manual_seed(1009)
40 | for _ in range(20):
41 | v = torch.randn(A.shape[0], A.shape[2], 1)
42 | av_expected = A.bmm(v).squeeze(2)
43 | av = linearization.Av(v.squeeze(2))
44 | torch.testing.assert_close(av_expected, av)
45 |
46 | v = v.squeeze(2)
47 | scaled_v_expected = (A.transpose(1, 2).bmm(A)).diagonal(dim1=1, dim2=2) * v
48 | scaled_v = linearization.diagonal_scaling(v)
49 | torch.testing.assert_close(scaled_v_expected, scaled_v)
50 |
--------------------------------------------------------------------------------
/tests/theseus_tests/optimizer/test_variable_ordering.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import itertools
7 | import random
8 |
9 | import pytest # noqa: F401
10 | import torch
11 |
12 | import theseus as th
13 | from tests.theseus_tests.core.common import MockCostFunction, MockCostWeight, MockVar
14 |
15 |
16 | def test_default_variable_ordering():
17 | # repeat test a few times with different inputs
18 | for _ in range(5):
19 | # vary the number of variables to have in the objective
20 | for num_variables in range(2, 11):
21 | # generate all possible 2-variable cost functions, then shuffle their add order
22 | variables = []
23 | for i in range(num_variables):
24 | variables.append(MockVar(1, tensor=None, name=f"var{i}"))
25 | variable_pairs = [c for c in itertools.combinations(variables, 2)]
26 | random.shuffle(variable_pairs)
27 |
28 | # add the cost function to the objective and store the order of variable addition
29 | expected_variable_order = []
30 | objective = th.Objective()
31 | cost_weight = MockCostWeight(
32 | th.Variable(torch.ones(1), name="cost_weight_aux")
33 | )
34 | for var1, var2 in variable_pairs:
35 | cost_function = MockCostFunction([var1, var2], [], cost_weight)
36 | if var1 not in expected_variable_order:
37 | expected_variable_order.append(var1)
38 | if var2 not in expected_variable_order:
39 | expected_variable_order.append(var2)
40 | objective.add(cost_function)
41 |
42 | # check the the default variable order matches the expected order
43 | default_order = th.VariableOrdering(objective)
44 | for i, var in enumerate(expected_variable_order):
45 | assert i == default_order.index_of(var.name)
46 |
47 |
48 | def test_variable_ordering_append_and_remove():
49 | variables = [MockVar(1, tensor=None, name=f"var{i}") for i in range(50)]
50 | mock_objective = th.Objective()
51 | mock_objective.optim_vars = dict([(var.name, var) for var in variables])
52 | # repeat a bunch of times with different order
53 | for _ in range(100):
54 | random.shuffle(variables)
55 | order = th.VariableOrdering(mock_objective, default_order=False)
56 | for v in variables:
57 | order.append(v)
58 | for i, v in enumerate(variables):
59 | assert i == order.index_of(v.name)
60 | assert v == order[i]
61 | assert order.complete
62 |
63 | random.shuffle(variables)
64 | for v in variables:
65 | order.remove(v)
66 | assert not order.complete
67 | assert v not in order._var_order
68 | assert v.name not in order._var_name_to_index
69 |
70 |
71 | def test_variable_ordering_iterator():
72 | variables = [MockVar(1, tensor=None, name=f"var{i}") for i in range(50)]
73 | mock_objective = th.Objective()
74 | mock_objective.optim_vars = dict([(var.name, var) for var in variables])
75 | order = th.VariableOrdering(mock_objective, default_order=False)
76 | for v in variables:
77 | order.append(v)
78 |
79 | i = 0
80 | for v in order:
81 | assert v == variables[i]
82 | i += 1
83 |
--------------------------------------------------------------------------------
/tests/theseus_tests/test_misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import pytest
6 |
7 | from theseus._version import lt_version
8 |
9 |
10 | def test_lt_version():
11 | assert not lt_version("2.0.0", "0.4.0")
12 | assert not lt_version("1.13.0abcd", "0.4.0")
13 | assert not lt_version("0.4.1+yzx", "0.4.0")
14 | assert lt_version("1.13.0.1.2.3.4", "2.0.0")
15 | assert lt_version("1.13.0.1.2+abc", "2.0.0")
16 | with pytest.raises(ValueError):
17 | lt_version("1.2", "0.4.0")
18 | lt_version("1", "0.4.0")
19 | lt_version("1.", "0.4.0")
20 |
--------------------------------------------------------------------------------
/tests/theseus_tests/test_pgo_benchmark.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import pytest
7 | import torch
8 | from omegaconf import OmegaConf
9 |
10 | import examples.pose_graph.pose_graph_synthetic as pgo
11 |
12 | from tests.theseus_tests.decorators import run_if_baspacho
13 |
14 |
15 | @pytest.fixture
16 | def default_cfg():
17 | cfg = OmegaConf.load("examples/configs/pose_graph/pose_graph_synthetic.yaml")
18 | cfg.outer_optim.num_epochs = 1
19 | cfg.outer_optim.max_num_batches = 4
20 | cfg.batch_size = 16
21 | cfg.num_poses = 64
22 | cfg.profile = False
23 | cfg.savemat = False
24 | cfg.inner_optim.optimizer_kwargs.verbose = False
25 | return cfg
26 |
27 |
28 | @pytest.mark.parametrize(
29 | "linear_solver_cls",
30 | ["CholeskyDenseSolver", "LUCudaSparseSolver", "CholmodSparseSolver"],
31 | )
32 | def test_pgo_losses(default_cfg, linear_solver_cls):
33 | # for everything except cholmod (need to turn off adaptive damping for that one)
34 | expected_losses = [
35 | -0.29886279606812166,
36 | -0.3054215856589109,
37 | -0.27485602196709225,
38 | -0.3005231105990632,
39 | ]
40 |
41 | default_cfg.inner_optim.linear_solver_cls = linear_solver_cls
42 | if linear_solver_cls == "LUCudaSparseSolver":
43 | if not torch.cuda.is_available():
44 | return
45 | default_cfg.device = "cuda:0"
46 | else:
47 | if linear_solver_cls == "CholmodSparseSolver":
48 | default_cfg.inner_optim.optimizer_kwargs.adaptive_damping = False
49 | expected_losses = [
50 | -0.2988627961673474,
51 | -0.30542158576120654,
52 | -0.27485602213117594,
53 | -0.3005231108739672,
54 | ]
55 | default_cfg.device = "cpu"
56 | losses = pgo.run(default_cfg)
57 | print(losses)
58 |
59 | for loss, expected_loss in zip(losses[0], expected_losses):
60 | assert loss == pytest.approx(expected_loss, rel=1e-10, abs=1e-10)
61 |
62 |
63 | @run_if_baspacho()
64 | def test_pgo_losses_baspacho(default_cfg):
65 | # for everything except cholmod (need to turn off adaptive damping for that one)
66 | expected_losses = [
67 | -0.2988627960682926,
68 | -0.30542158565900696,
69 | -0.27485602196705955,
70 | -0.3005231105991407,
71 | ]
72 |
73 | default_cfg.inner_optim.linear_solver_cls = "BaspachoSparseSolver"
74 | default_cfg.device = "cuda:0" if torch.cuda.is_available() else "cpu"
75 | losses = pgo.run(default_cfg)
76 | print(losses)
77 |
78 | for loss, expected_loss in zip(losses[0], expected_losses):
79 | assert loss == pytest.approx(expected_loss, rel=1e-10, abs=1e-10)
80 |
--------------------------------------------------------------------------------
/tests/torchkin_tests/panda_fk_dataset.json:
--------------------------------------------------------------------------------
1 | {
2 | "joint_states": [
3 | [0.2896690950055305, 0.7887120656281918, 0.6098184262843489, -1.3822266904213196, -0.45304768976307175, 2.437887624445399, -0.37036997092492996],
4 | [2.3248593412410172, 1.6994167497883723, -0.6916813389840697, -0.5851990245348841, 0.17146823279768597, 2.1335270160528377, 2.5255755709563092],
5 | [-2.545557823442099, -1.513253690733913, -2.847119985909619, -0.453153780710263, 1.6506377914866026, 3.314099494785764, 2.840216966477668]
6 | ],
7 | "ee_poses": [
8 | [[0.5618914365768433, 0.5131404399871826, 0.34427565336227417], [0.7712864279747009, 0.6026210188865662, 0.19384866952896118, -0.06624171137809753]],
9 | [[-0.3478390574455261, 0.671889066696167, -0.023775160312652588], [0.952156126499176, -0.1261112540960312, 0.22165635228157043, -0.16841386258602142]],
10 | [[0.6701399683952332, 0.41195011138916016, 0.1345270723104477], [0.4187312126159668, 0.7321641445159912, 0.47301992774009705, 0.2546604871749878]]
11 | ],
12 | "ee_name": "panda_virtual_ee_link"
13 | }
14 |
--------------------------------------------------------------------------------
/tests/torchlie_tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/torchlie_tests/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/tests/torchlie_tests/test_misc.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import pytest
6 | import torch
7 |
8 | from torchlie import reset_global_params, set_global_params
9 | from torchlie.functional import SE3, SO3, enable_checks
10 |
11 |
12 | @pytest.mark.parametrize("dtype", ["float32", "float64"])
13 | def test_global_options(dtype):
14 | rng = torch.Generator()
15 | rng.manual_seed(0)
16 | g = SE3.rand(1, generator=rng, dtype=getattr(torch, dtype))
17 | r1 = SE3.log(g)
18 | set_global_params({f"so3_near_zero_eps_{dtype}": 100.0})
19 | r2 = SE3.log(g)
20 | assert not torch.allclose(r1, r2)
21 | set_global_params({f"so3_near_pi_eps_{dtype}": 100.0})
22 | r3 = SE3.log(g)
23 |
24 | assert not torch.allclose(r2, r3)
25 | with enable_checks():
26 | fake_hat_input = torch.randn(4, 4, dtype=getattr(torch, dtype))
27 | with pytest.raises(ValueError):
28 | SE3.check_hat_tensor(fake_hat_input)
29 | set_global_params({f"so3_hat_eps_{dtype}": 1000.0})
30 | set_global_params({f"se3_hat_eps_{dtype}": 1000.0})
31 | SE3.check_hat_tensor(fake_hat_input)
32 |
33 | fake_so3_matrix = torch.randn(3, 3, dtype=getattr(torch, dtype))
34 | with pytest.raises(ValueError):
35 | SO3.check_group_tensor(fake_so3_matrix)
36 | set_global_params({f"so3_matrix_eps_{dtype}": 1000.0})
37 | SO3.check_group_tensor(fake_so3_matrix)
38 |
39 | with enable_checks():
40 | set_global_params({f"so3_quat_eps_{dtype}": 0.0})
41 | fake_hat_input = torch.randn(4, dtype=getattr(torch, dtype))
42 | with pytest.raises(ValueError):
43 | SO3.check_unit_quaternion(fake_hat_input)
44 | set_global_params({f"so3_quat_eps_{dtype}": 1000.0})
45 | SO3.check_unit_quaternion(fake_hat_input)
46 |
47 | reset_global_params()
48 |
--------------------------------------------------------------------------------
/theseus/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from ._version import __version__
6 |
7 | from .constants import DeviceType as DeviceType
8 | from .global_params import set_global_params # usort: skip
9 |
10 | from .core import ( # usort: skip
11 | AutoDiffCostFunction,
12 | AutogradMode,
13 | CostFunction,
14 | CostWeight,
15 | DiagonalCostWeight,
16 | GemanMcClureLoss,
17 | GNCRobustCostFunction,
18 | GNCRobustLoss,
19 | HingeLoss,
20 | HuberLoss,
21 | Objective,
22 | RobustCostFunction,
23 | RobustLoss,
24 | ScaleCostWeight,
25 | Variable,
26 | Vectorize,
27 | WelschLoss,
28 | as_variable,
29 | masked_jacobians,
30 | masked_variables,
31 | )
32 | from .geometry import ( # usort: skip
33 | LieGroup,
34 | LieGroupTensor,
35 | Manifold,
36 | Point2,
37 | Point3,
38 | SE2,
39 | SE3,
40 | SO2,
41 | SO3,
42 | Vector,
43 | adjoint,
44 | between,
45 | compose,
46 | enable_lie_group_check,
47 | enable_lie_tangent,
48 | exp_map,
49 | inverse,
50 | local,
51 | log_map,
52 | no_lie_group_check,
53 | no_lie_tangent,
54 | rand_point2,
55 | rand_point3,
56 | rand_se2,
57 | rand_se3,
58 | rand_so2,
59 | rand_so3,
60 | rand_vector,
61 | randn_point2,
62 | randn_point3,
63 | randn_se2,
64 | randn_se3,
65 | randn_so2,
66 | randn_so3,
67 | randn_vector,
68 | retract,
69 | set_lie_group_check_enabled,
70 | set_lie_tangent_enabled,
71 | )
72 | from .optimizer import ( # usort: skip
73 | DenseLinearization,
74 | Linearization,
75 | ManifoldGaussian,
76 | OptimizerInfo,
77 | SparseLinearization,
78 | VariableOrdering,
79 | local_gaussian,
80 | retract_gaussian,
81 | )
82 | from .optimizer.linear import ( # usort: skip
83 | BaspachoSparseSolver,
84 | CholeskyDenseSolver,
85 | CholmodSparseSolver,
86 | DenseSolver,
87 | LinearOptimizer,
88 | LinearSolver,
89 | LUCudaSparseSolver,
90 | LUDenseSolver,
91 | )
92 | from .optimizer.nonlinear import ( # usort: skip
93 | BackwardMode,
94 | DCEM,
95 | Dogleg,
96 | GaussNewton,
97 | LevenbergMarquardt,
98 | NonlinearLeastSquares,
99 | NonlinearOptimizerInfo,
100 | NonlinearOptimizerParams,
101 | NonlinearOptimizerStatus,
102 | )
103 | from .theseus_layer import TheseusLayer # usort: skip
104 |
105 | import theseus.embodied as eb # usort: skip
106 |
107 | # Aliases for some standard cost functions
108 | Difference = eb.Local
109 | Between = eb.Between
110 | Local = eb.Local
111 |
--------------------------------------------------------------------------------
/theseus/_version.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import re
6 | import warnings
7 | from typing import Tuple
8 |
9 | import torch
10 |
11 |
12 | # Returns True/False if version string v1 is less than version string v2
13 | def lt_version(v1: str, v2: str) -> bool:
14 | def _as_tuple(s: str) -> Tuple[int, int, int]:
15 | pattern = r"^[\d.]+"
16 | match = re.match(pattern, s)
17 | try:
18 | return tuple(int(x) for x in match.group().split(".")[:3]) # type: ignore
19 | except Exception:
20 | raise ValueError(
21 | f"String {s} cannot be converted to (mayor, minor, micro) format."
22 | )
23 |
24 | x1, y1, z1 = _as_tuple(v1)
25 | x2, y2, z2 = _as_tuple(v2)
26 | return x1 < x2 or (x1 == x2 and y1 < y2) or (x1 == x2 and y1 == y2 and z1 < z2)
27 |
28 |
29 | if lt_version(torch.__version__, "2.0.0"):
30 | warnings.warn(
31 | "Using torch < 2.0 for theseus is deprecated and compatibility will be "
32 | "discontinued in future releases.",
33 | FutureWarning,
34 | )
35 |
36 | __version__ = "0.2.3"
37 |
--------------------------------------------------------------------------------
/theseus/constants.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import math
6 | from typing import Optional, Union
7 |
8 | import torch
9 |
10 | TEST_EPS = 5e-7
11 | EPS = 1e-10
12 | PI = math.pi
13 |
14 | __FROM_THESEUS_LAYER_TOKEN__ = "__FROM_THESEUS_LAYER_TOKEN__"
15 |
16 |
17 | def _CHECK_DTYPE_SUPPORTED(dtype):
18 | if dtype not in [torch.float32, torch.float64]:
19 | raise ValueError(
20 | f"Unsupported data type {dtype}. "
21 | "Theseus only supports 32- and 64-bit tensors."
22 | )
23 |
24 |
25 | DeviceType = Optional[Union[str, torch.device]]
26 |
--------------------------------------------------------------------------------
/theseus/core/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .cost_function import (
7 | AutoDiffCostFunction,
8 | AutogradMode,
9 | CostFunction,
10 | ErrFnType,
11 | masked_jacobians,
12 | )
13 | from .cost_weight import CostWeight, DiagonalCostWeight, ScaleCostWeight
14 | from .objective import Objective
15 | from .robust_cost_function import GNCRobustCostFunction, RobustCostFunction
16 | from .robust_loss import (
17 | GemanMcClureLoss,
18 | GNCRobustLoss,
19 | HingeLoss,
20 | HuberLoss,
21 | RobustLoss,
22 | WelschLoss,
23 | )
24 | from .variable import Variable, as_variable, masked_variables
25 | from .vectorizer import Vectorize
26 |
--------------------------------------------------------------------------------
/theseus/embodied/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .collision import Collision2D, EffectorObjectContactPlanar, SignedDistanceField2D
7 | from .kinematics import IdentityModel, KinematicsModel, UrdfRobotModel
8 | from .measurements import Between, MovingFrameBetween, Reprojection
9 | from .misc import Local
10 | from .motionmodel import (
11 | DoubleIntegrator,
12 | GPCostWeight,
13 | GPMotionModel,
14 | HingeCost,
15 | Nonholonomic,
16 | QuasiStaticPushingPlanar,
17 | )
18 |
--------------------------------------------------------------------------------
/theseus/embodied/collision/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .collision import Collision2D
7 | from .eff_obj_contact import EffectorObjectContactPlanar
8 | from .signed_distance_field import SignedDistanceField2D
9 |
--------------------------------------------------------------------------------
/theseus/embodied/kinematics/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .kinematics_model import IdentityModel, KinematicsModel, UrdfRobotModel
7 |
--------------------------------------------------------------------------------
/theseus/embodied/measurements/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .between import Between
7 | from .moving_frame_between import MovingFrameBetween
8 | from .reprojection import Reprojection
9 |
--------------------------------------------------------------------------------
/theseus/embodied/measurements/between.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import List, Optional, Tuple
7 |
8 | import torch
9 |
10 | from theseus.core import CostFunction, CostWeight
11 | from theseus.geometry import LieGroup, between
12 |
13 |
14 | class Between(CostFunction):
15 | def __init__(
16 | self,
17 | v0: LieGroup,
18 | v1: LieGroup,
19 | measurement: LieGroup,
20 | cost_weight: CostWeight,
21 | name: Optional[str] = None,
22 | ):
23 | super().__init__(cost_weight, name=name)
24 | self.v0 = v0
25 | self.v1 = v1
26 | self.register_optim_vars(["v0", "v1"])
27 | self.measurement = measurement
28 | self.register_aux_vars(["measurement"])
29 | if not isinstance(v0, v1.__class__) or not isinstance(
30 | v0, measurement.__class__
31 | ):
32 | raise ValueError("Inconsistent types between variables and measurement.")
33 |
34 | def error(self) -> torch.Tensor:
35 | var_diff = between(self.v0, self.v1)
36 | return self.measurement.local(var_diff)
37 |
38 | def jacobians(self) -> Tuple[List[torch.Tensor], torch.Tensor]:
39 | Jlist: List[torch.Tensor] = []
40 | var_diff = between(self.v0, self.v1)
41 | log_jac: List[torch.Tensor] = []
42 | error = self.measurement.inverse().compose(var_diff).log_map(jacobians=log_jac)
43 | dlog = log_jac[0]
44 | Jlist.extend([-dlog @ var_diff.inverse().adjoint(), dlog])
45 | return Jlist, error
46 |
47 | def dim(self) -> int:
48 | return self.v0.dof()
49 |
50 | def _copy_impl(self, new_name: Optional[str] = None) -> "Between":
51 | return Between(
52 | self.v0.copy(),
53 | self.v1.copy(),
54 | self.measurement.copy(),
55 | self.weight.copy(),
56 | name=new_name,
57 | )
58 |
--------------------------------------------------------------------------------
/theseus/embodied/measurements/moving_frame_between.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import List, Optional, Tuple
7 |
8 | import torch
9 |
10 | from theseus.core import CostFunction, CostWeight
11 | from theseus.geometry import LieGroup, between
12 |
13 |
14 | class MovingFrameBetween(CostFunction):
15 | def __init__(
16 | self,
17 | frame1: LieGroup,
18 | frame2: LieGroup,
19 | pose1: LieGroup,
20 | pose2: LieGroup,
21 | measurement: LieGroup,
22 | cost_weight: CostWeight,
23 | name: Optional[str] = None,
24 | ):
25 | seen_classes = set(
26 | [x.__class__.__name__ for x in [frame1, frame2, pose1, pose2, measurement]]
27 | )
28 | if len(seen_classes) > 1:
29 | raise ValueError("Inconsistent types between input variables.")
30 |
31 | super().__init__(cost_weight, name=name)
32 | self.frame1 = frame1
33 | self.frame2 = frame2
34 | self.pose1 = pose1
35 | self.pose2 = pose2
36 | self.register_optim_vars(["frame1", "frame2", "pose1", "pose2"])
37 | self.measurement = measurement
38 | self.register_aux_vars(["measurement"])
39 |
40 | def error(self) -> torch.Tensor:
41 | pose1__frame = between(self.frame1, self.pose1)
42 | pose2__frame = between(self.frame2, self.pose2)
43 | var_diff = between(pose1__frame, pose2__frame)
44 | return self.measurement.local(var_diff)
45 |
46 | def jacobians(self) -> Tuple[List[torch.Tensor], torch.Tensor]:
47 | jacobians_b1: List[torch.Tensor] = []
48 | pose1__frame = between(self.frame1, self.pose1, jacobians=jacobians_b1)
49 | jacobians_b2: List[torch.Tensor] = []
50 | pose2__frame = between(self.frame2, self.pose2, jacobians=jacobians_b2)
51 | jacobians_b_out: List[torch.Tensor] = []
52 | var_diff = between(pose1__frame, pose2__frame, jacobians=jacobians_b_out)
53 | error = self.measurement.local(var_diff)
54 |
55 | JB1_f1, JB1_p1 = jacobians_b1
56 | JB2_f2, JB2_p2 = jacobians_b2
57 | J_Bout_B1, J_Bout_B2 = jacobians_b_out
58 | J_out_f1 = torch.matmul(J_Bout_B1, JB1_f1)
59 | J_out_p1 = torch.matmul(J_Bout_B1, JB1_p1)
60 | J_out_f2 = torch.matmul(J_Bout_B2, JB2_f2)
61 | J_out_p2 = torch.matmul(J_Bout_B2, JB2_p2)
62 |
63 | return [J_out_f1, J_out_f2, J_out_p1, J_out_p2], error
64 |
65 | def dim(self) -> int:
66 | return self.frame1.dof()
67 |
68 | def _copy_impl(self, new_name: Optional[str] = None) -> "MovingFrameBetween":
69 | return MovingFrameBetween(
70 | self.frame1.copy(),
71 | self.frame2.copy(),
72 | self.pose1.copy(),
73 | self.pose2.copy(),
74 | self.measurement.copy(),
75 | self.weight.copy(),
76 | name=new_name,
77 | )
78 |
--------------------------------------------------------------------------------
/theseus/embodied/misc/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .local_cost_fn import Local
7 |
--------------------------------------------------------------------------------
/theseus/embodied/misc/local_cost_fn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import List, Optional, Tuple
7 |
8 | import torch
9 |
10 | from theseus import CostFunction, CostWeight
11 | from theseus.geometry import LieGroup
12 | from theseus.global_params import _THESEUS_GLOBAL_PARAMS
13 |
14 |
15 | class Local(CostFunction):
16 | def __init__(
17 | self,
18 | var: LieGroup,
19 | target: LieGroup,
20 | cost_weight: CostWeight,
21 | name: Optional[str] = None,
22 | ):
23 | super().__init__(cost_weight, name=name)
24 | if not isinstance(var, target.__class__):
25 | raise ValueError(
26 | "Variable for the Local inconsistent with the given target."
27 | )
28 | if not var.dof() == target.dof():
29 | raise ValueError(
30 | "Variable and target in the Local must have identical dof."
31 | )
32 | self.var = var
33 | self.target = target
34 | self.register_optim_vars(["var"])
35 | self.register_aux_vars(["target"])
36 |
37 | self._jac_cache: torch.Tensor = None
38 |
39 | def error(self) -> torch.Tensor:
40 | return self.target.local(self.var)
41 |
42 | def jacobians(self) -> Tuple[List[torch.Tensor], torch.Tensor]:
43 | if _THESEUS_GLOBAL_PARAMS.fast_approx_local_jacobians:
44 | if (
45 | self._jac_cache is not None
46 | and self._jac_cache.shape[0] == self.var.shape[0]
47 | ):
48 | jacobian = self._jac_cache
49 | else:
50 | jacobian = torch.eye(
51 | self.dim(), device=self.var.device, dtype=self.var.dtype
52 | ).repeat(self.var.shape[0], 1, 1)
53 | self._jac_cache = jacobian
54 | return (
55 | [jacobian],
56 | self.target.local(self.var),
57 | )
58 | else:
59 | Jlist: List[torch.Tensor] = []
60 | error = self.target.local(self.var, jacobians=Jlist)
61 | return [Jlist[1]], error
62 |
63 | def dim(self) -> int:
64 | return self.var.dof()
65 |
66 | def _copy_impl(self, new_name: Optional[str] = None) -> "Local":
67 | return Local( # type: ignore
68 | self.var.copy(), self.target.copy(), self.weight.copy(), name=new_name
69 | )
70 |
--------------------------------------------------------------------------------
/theseus/embodied/motionmodel/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .double_integrator import DoubleIntegrator, GPCostWeight, GPMotionModel
7 | from .misc import HingeCost, Nonholonomic
8 | from .quasi_static_pushing_planar import QuasiStaticPushingPlanar
9 |
--------------------------------------------------------------------------------
/theseus/extlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/theseus/extlib/cusolver_sp_defs.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is licensed under the MIT license found in the
4 | // LICENSE file in the root directory of this source tree.
5 |
6 | #include "cusolver_sp_defs.h"
7 | #include
8 | #include
9 | #include
10 |
11 | // functions are defined in this headers are inline so this can be included multiple times
12 | // in units compiled independently (such as Torch extensions formed by one .cu/.cpp file)
13 | namespace theseus::cusolver_sp {
14 |
15 | const char* cusolverGetErrorMessage(cusolverStatus_t status) {
16 | switch (status) {
17 | case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCES";
18 | case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED";
19 | case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED";
20 | case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE";
21 | case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH";
22 | case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED";
23 | case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR";
24 | case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
25 | default: return "Unknown cusolver error number";
26 | }
27 | }
28 |
29 | void createCusolverSpHandle(cusolverSpHandle_t *handle) {
30 | CUSOLVER_CHECK(cusolverSpCreate(handle));
31 | }
32 |
33 | // The switch below look weird, but we will be adopting the same policy as for CusolverDn handle in Torch source
34 | void destroyCusolverSpHandle(cusolverSpHandle_t handle) {
35 | // this is because of something dumb in the ordering of
36 | // destruction. Sometimes atexit, the cuda context (or something)
37 | // would already be destroyed by the time this gets destroyed. It
38 | // happens in fbcode setting. @colesbury and @soumith decided to not destroy
39 | // the handle as a workaround.
40 | // - Comments of @soumith copied from cuDNN handle pool implementation
41 | #ifdef NO_CUDNN_DESTROY_HANDLE
42 | #else
43 | cusolverSpDestroy(handle);
44 | #endif
45 | }
46 |
47 | using CuSolverSpPoolType = at::cuda::DeviceThreadHandlePool;
48 |
49 | cusolverSpHandle_t getCurrentCUDASolverSpHandle() {
50 | int device;
51 | AT_CUDA_CHECK(cudaGetDevice(&device));
52 |
53 | // Thread local PoolWindows are lazily-initialized
54 | // to avoid initialization issues that caused hangs on Windows.
55 | // See: https://github.com/pytorch/pytorch/pull/22405
56 | // This thread local unique_ptrs will be destroyed when the thread terminates,
57 | // releasing its reserved handles back to the pool.
58 | static auto pool = std::make_shared();
59 | thread_local std::unique_ptr myPoolWindow(pool->newPoolWindow());
60 |
61 | auto handle = myPoolWindow->reserve(device);
62 | auto stream = c10::cuda::getCurrentCUDAStream();
63 | CUSOLVER_CHECK(cusolverSpSetStream(handle, stream));
64 | return handle;
65 | }
66 |
67 | } // namespace theseus::cusolver_sp
68 |
--------------------------------------------------------------------------------
/theseus/extlib/cusolver_sp_defs.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is licensed under the MIT license found in the
4 | // LICENSE file in the root directory of this source tree.
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 | #define CUSOLVER_CHECK(EXPR) \
11 | do { \
12 | cusolverStatus_t __err = EXPR; \
13 | TORCH_CHECK(__err == CUSOLVER_STATUS_SUCCESS, \
14 | "cusolver error: ", \
15 | theseus::cusolver_sp::cusolverGetErrorMessage(__err), \
16 | ", when calling `" #EXPR "`"); \
17 | } while (0)
18 |
19 | namespace theseus::cusolver_sp {
20 |
21 | const char* cusolverGetErrorMessage(cusolverStatus_t status);
22 |
23 | cusolverSpHandle_t getCurrentCUDASolverSpHandle();
24 |
25 | } // namespace theseus::cusolver_sp
26 |
--------------------------------------------------------------------------------
/theseus/extlib/utils.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Meta Platforms, Inc. and affiliates.
2 | //
3 | // This source code is licensed under the MIT license found in the
4 | // LICENSE file in the root directory of this source tree.
5 |
6 | #pragma once
7 |
8 | #include
9 |
10 |
11 | // Various checks for tensor dimensions, dtype and devices
12 | #define THESEUS_BASE_TENSOR_CHECK(tensor, d, d1, dt) \
13 | do { \
14 | TORCH_CHECK(tensor.dim() == d); \
15 | TORCH_CHECK(tensor.size(0) == d1); \
16 | TORCH_CHECK(tensor.dtype() == dt); \
17 | } while (0)
18 |
19 | #define THESEUS_TENSOR_CHECK_CPU(tensor, d, d1, dt) \
20 | do { \
21 | THESEUS_BASE_TENSOR_CHECK(tensor, d, d1, dt); \
22 | TORCH_CHECK(tensor.device().is_cpu()); \
23 | } while (0)
24 |
25 | #define THESEUS_TENSOR_CHECK_CUDA(tensor, d, d1, dt) \
26 | do { \
27 | THESEUS_BASE_TENSOR_CHECK(tensor, d, d1, dt); \
28 | TORCH_CHECK(tensor.device().is_cuda()); \
29 | } while (0)
30 |
31 |
--------------------------------------------------------------------------------
/theseus/geometry/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .lie_group import LieGroup, adjoint, between, compose, exp_map, inverse, log_map
7 | from .lie_group_check import (
8 | enable_lie_group_check,
9 | no_lie_group_check,
10 | set_lie_group_check_enabled,
11 | )
12 | from .manifold import Manifold, OptionalJacobians, local, retract
13 | from .point_types import (
14 | Point2,
15 | Point3,
16 | rand_point2,
17 | rand_point3,
18 | randn_point2,
19 | randn_point3,
20 | )
21 | from .se2 import SE2, rand_se2, randn_se2
22 | from .se3 import SE3, rand_se3, randn_se3
23 | from .so2 import SO2, rand_so2, randn_so2
24 | from .so3 import SO3, rand_so3, randn_so3
25 | from .utils import (
26 | LieGroupTensor,
27 | enable_lie_tangent,
28 | no_lie_tangent,
29 | set_lie_tangent_enabled,
30 | )
31 | from .vector import Vector, rand_vector, randn_vector
32 |
--------------------------------------------------------------------------------
/theseus/geometry/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import threading
7 | from typing import Any
8 |
9 | import torch
10 |
11 | from .lie_group import LieGroup
12 |
13 |
14 | class _LieGroupContext(object):
15 | contexts = threading.local()
16 |
17 | @classmethod
18 | def get_context(cls):
19 | if not hasattr(cls.contexts, "use_lie_tangent"):
20 | cls.contexts.use_lie_tangent = False
21 | return cls.contexts.use_lie_tangent
22 |
23 | @classmethod
24 | def set_context(cls, use_lie_tangent: bool):
25 | cls.contexts.use_lie_tangent = use_lie_tangent
26 |
27 |
28 | class set_lie_tangent_enabled(object):
29 | def __init__(self, mode: bool) -> None:
30 | self.prev = _LieGroupContext.get_context()
31 | _LieGroupContext.set_context(mode)
32 |
33 | def __enter__(self) -> None:
34 | pass
35 |
36 | def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
37 | _LieGroupContext.set_context(self.prev)
38 |
39 |
40 | class enable_lie_tangent(object):
41 | def __enter__(self) -> None:
42 | self.prev = _LieGroupContext.get_context()
43 | _LieGroupContext.set_context(True)
44 |
45 | def __exit__(self, typ, value, traceback) -> None:
46 | _LieGroupContext.set_context(self.prev)
47 |
48 |
49 | class no_lie_tangent(_LieGroupContext):
50 | def __enter__(self):
51 | self.prev = super().get_context()
52 | _LieGroupContext.set_context(False)
53 | return self
54 |
55 | def __exit__(self, typ, value, traceback):
56 | _LieGroupContext.set_context(self.prev)
57 |
58 |
59 | class LieGroupTensor(torch.Tensor):
60 | from torch._C import _disabled_torch_function_impl # type: ignore
61 |
62 | __torch_function__ = _disabled_torch_function_impl
63 |
64 | def __new__(cls, group):
65 | return torch.Tensor._make_subclass(cls, group.tensor)
66 |
67 | def __init__(self, group: LieGroup):
68 | self.group_cls = type(group)
69 |
70 | def add_(self, update, alpha=1):
71 | if _LieGroupContext.get_context():
72 | group = self.group_cls(tensor=self.data)
73 | grad = group.project(update)
74 | self.set_(group.retract(alpha * grad).tensor)
75 | else:
76 | self.add_(update, alpha=alpha)
77 |
78 | return self
79 |
80 | def addcdiv_(self, tensor1, tensor2, value=1):
81 | self.add_(
82 | value * tensor1 / tensor2
83 | ) if _LieGroupContext.get_context() else super().addcdiv_(
84 | tensor1, tensor2, value=value
85 | )
86 | return self
87 |
88 | def addcmul_(self, tensor1, tensor2, value=1):
89 | self.add_(
90 | value * tensor1 * tensor2
91 | ) if _LieGroupContext.get_context() else super().addcmul_(
92 | tensor1, tensor2, value=value
93 | )
94 | return self
95 |
--------------------------------------------------------------------------------
/theseus/global_params.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from dataclasses import dataclass, fields
6 | from typing import Any, Dict
7 |
8 | import torch
9 |
10 | from torchlie.global_params import _TORCHLIE_GLOBAL_PARAMS as _LIE_GP
11 |
12 | _LIE_GP_FIELD_NAMES = set([f.name for f in fields(_LIE_GP)])
13 |
14 |
15 | def _CHECK_DTYPE_SUPPORTED(dtype):
16 | if dtype not in [torch.float32, torch.float64]:
17 | raise ValueError(
18 | f"Unsupported data type {dtype}. "
19 | "Theseus only supports 32- and 64-bit tensors."
20 | )
21 |
22 |
23 | @dataclass
24 | class _TheseusGlobalParams:
25 | so2_norm_eps_float32: float = 0
26 | so2_matrix_eps_float32: float = 0
27 | se2_near_zero_eps_float32: float = 0
28 | so3_to_quaternion_sqrt_eps_float32: float = 0
29 | so2_norm_eps_float64: float = 0
30 | so2_matrix_eps_float64: float = 0
31 | se2_near_zero_eps_float64: float = 0
32 | so3_to_quaternion_sqrt_eps_float64: float = 0
33 | fast_approx_local_jacobians: bool = False
34 |
35 | def __init__(self):
36 | self.reset()
37 |
38 | def get_eps(self, ltype: str, attr: str, dtype: torch.dtype) -> float:
39 | try:
40 | return _LIE_GP.get_eps(ltype, attr, dtype)
41 | except AttributeError:
42 | _CHECK_DTYPE_SUPPORTED(dtype)
43 | attr_name = f"{ltype}_{attr}_eps_{str(dtype)[6:]}"
44 | return getattr(self, attr_name)
45 |
46 | def reset(self) -> None:
47 | self.so2_norm_eps_float32 = 1e-12
48 | self.so2_matrix_eps_float32 = 1e-5
49 | self.se2_near_zero_eps_float32 = 3e-2
50 | self.se2_d_near_zero_eps_float32 = 1e-1
51 | self.so3_to_quaternion_sqrt_eps_float32 = 1e-6
52 | self.so2_norm_eps_float32 = 1e-12
53 |
54 | self.so2_matrix_eps_float64 = 4e-7
55 | self.se2_near_zero_eps_float64 = 1e-6
56 | self.se2_d_near_zero_eps_float64 = 1e-3
57 | self.so3_to_quaternion_sqrt_eps_float64 = 1e-6
58 |
59 | self.fast_approx_local_jacobians = False
60 |
61 |
62 | _THESEUS_GLOBAL_PARAMS = _TheseusGlobalParams()
63 |
64 |
65 | def set_global_params(options: Dict[str, Any]) -> None:
66 | torchlie_params_found = []
67 | for k in options:
68 | if k in _LIE_GP_FIELD_NAMES:
69 | torchlie_params_found.append(k)
70 | if torchlie_params_found:
71 | raise RuntimeError(
72 | f"Theseus uses torchlie for configuring 3D Lie group tolerances, "
73 | f"but you attempted to use theseus.set_global_params() for the "
74 | f"following ones:\n {torchlie_params_found}.\n"
75 | f"Please use torchlie.set_global_params() to set these tolerances."
76 | )
77 | for k, v in options.items():
78 | if not hasattr(_THESEUS_GLOBAL_PARAMS, k):
79 | raise ValueError(f"{k} is not a valid global option for theseus.")
80 | setattr(_THESEUS_GLOBAL_PARAMS, k, v)
81 |
--------------------------------------------------------------------------------
/theseus/labs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
--------------------------------------------------------------------------------
/theseus/optimizer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dense_linearization import DenseLinearization
7 | from .linearization import Linearization
8 | from .manifold_gaussian import ManifoldGaussian, local_gaussian, retract_gaussian
9 | from .optimizer import Optimizer, OptimizerInfo
10 | from .sparse_linearization import SparseLinearization
11 | from .variable_ordering import VariableOrdering
12 |
--------------------------------------------------------------------------------
/theseus/optimizer/autograd/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .baspacho_sparse_autograd import BaspachoSolveFunction
7 | from .cholmod_sparse_autograd import CholmodSolveFunction
8 | from .lu_cuda_sparse_autograd import LUCudaSolveFunction
9 |
10 | __all__ = [
11 | "BaspachoSolveFunction",
12 | "CholmodSolveFunction",
13 | "LUCudaSolveFunction",
14 | ]
15 |
--------------------------------------------------------------------------------
/theseus/optimizer/autograd/common.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from typing import Optional, Tuple
6 |
7 | import numpy as np
8 | import torch
9 |
10 |
11 | def compute_A_grad(
12 | batch_size: int,
13 | A_row_ptr: np.ndarray,
14 | A_col_ind: np.ndarray,
15 | b: torch.Tensor,
16 | x: torch.Tensor,
17 | b_Ax: torch.Tensor,
18 | H: torch.Tensor,
19 | AH: torch.Tensor,
20 | damping_alpha_beta: Optional[Tuple[torch.Tensor, torch.Tensor]],
21 | A_val: Optional[torch.Tensor], # only needed if damping passed
22 | ctx_A_col_ind: Optional[torch.Tensor], # only needed if damping passed
23 | detach_hessian: bool,
24 | ):
25 | A_grad = torch.empty(
26 | size=(batch_size, len(A_col_ind)), device=x.device
27 | ) # return value, A's grad
28 | for r in range(len(A_row_ptr) - 1):
29 | start, end = A_row_ptr[r], A_row_ptr[r + 1]
30 | columns = A_col_ind[start:end] # col indices, for this row
31 | if detach_hessian:
32 | A_grad[:, start:end] = b[:, r].unsqueeze(1) * H[:, columns]
33 | else:
34 | A_grad[:, start:end] = (
35 | b_Ax[:, r].unsqueeze(1) * H[:, columns]
36 | - AH[:, r].unsqueeze(1) * x[:, columns]
37 | )
38 |
39 | # apply correction if there is a multiplicative damping
40 | if damping_alpha_beta is not None and (damping_alpha_beta[0] > 0.0).any():
41 | assert (
42 | not detach_hessian
43 | ) # this should only be used with a GN-step with no damping
44 | alpha = damping_alpha_beta[0].view(-1, 1)
45 | alpha2Hx = (alpha * 2.0) * H * x # componentwise product
46 | A_grad -= A_val * alpha2Hx[:, ctx_A_col_ind.type(torch.long)]
47 |
48 | return A_grad
49 |
--------------------------------------------------------------------------------
/theseus/optimizer/dense_linearization.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import Optional
7 |
8 | import torch
9 |
10 | from theseus.core import Objective
11 |
12 | from .linearization import Linearization
13 | from .variable_ordering import VariableOrdering
14 |
15 |
16 | class DenseLinearization(Linearization):
17 | def __init__(
18 | self,
19 | objective: Objective,
20 | ordering: Optional[VariableOrdering] = None,
21 | **kwargs
22 | ):
23 | super().__init__(objective, ordering)
24 | self.A: torch.Tensor = None
25 | self.b: torch.Tensor = None
26 | self._AtA: torch.Tensor = None
27 | self._Atb: torch.Tensor = None
28 |
29 | def _linearize_jacobian_impl(self):
30 | err_row_idx = 0
31 | self.A = torch.zeros(
32 | (self.objective.batch_size, self.num_rows, self.num_cols),
33 | device=self.objective.device,
34 | dtype=self.objective.dtype,
35 | )
36 | self.b = torch.zeros(
37 | (self.objective.batch_size, self.num_rows),
38 | device=self.objective.device,
39 | dtype=self.objective.dtype,
40 | )
41 | for cost_function in self.objective._get_jacobians_iter():
42 | jacobians, error = cost_function.weighted_jacobians_error()
43 | num_rows = cost_function.dim()
44 | for var_idx_in_cost_function, var_jacobian in enumerate(jacobians):
45 | var_idx_in_order = self.ordering.index_of(
46 | cost_function.optim_var_at(var_idx_in_cost_function).name
47 | )
48 | var_start_col = self.var_start_cols[var_idx_in_order]
49 |
50 | num_cols = var_jacobian.shape[2]
51 | row_slice = slice(err_row_idx, err_row_idx + num_rows)
52 | col_slice = slice(var_start_col, var_start_col + num_cols)
53 | self.A[:, row_slice, col_slice] = var_jacobian
54 |
55 | self.b[:, row_slice] = -error
56 | err_row_idx += cost_function.dim()
57 |
58 | def _linearize_hessian_impl(self, _detach_hessian: bool = False):
59 | self._linearize_jacobian_impl()
60 | At = self.A.transpose(1, 2)
61 | self._AtA = At.bmm(self.A).detach() if _detach_hessian else At.bmm(self.A)
62 | self._Atb = At.bmm(self.b.unsqueeze(2))
63 |
64 | def hessian_approx(self):
65 | return self._AtA
66 |
67 | def _ata_impl(self) -> torch.Tensor:
68 | return self._AtA
69 |
70 | def _atb_impl(self) -> torch.Tensor:
71 | return self._Atb
72 |
73 | def Av(self, v: torch.Tensor) -> torch.Tensor:
74 | return self.A.bmm(v.unsqueeze(2)).squeeze(2)
75 |
76 | def diagonal_scaling(self, v: torch.Tensor) -> torch.Tensor:
77 | return v * self._AtA.diagonal(dim1=1, dim2=2)
78 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import torch
7 |
8 | from .baspacho_sparse_solver import BaspachoSparseSolver
9 | from .cholmod_sparse_solver import CholmodSparseSolver
10 | from .dense_solver import CholeskyDenseSolver, DenseSolver, LUDenseSolver
11 | from .linear_optimizer import LinearOptimizer
12 | from .linear_solver import LinearSolver
13 | from .lu_cuda_sparse_solver import LUCudaSparseSolver
14 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear/cholmod_sparse_solver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import Any, Dict, Optional, Type, Union
7 |
8 | import torch
9 |
10 | # rename Cholesky `Factor` to CholeskyDecomposition to
11 | # prevent confusion with the factors of the factor graph,
12 | # when using the probabilistic naming convention
13 | try:
14 | from sksparse.cholmod import Factor as CholeskyDecomposition
15 | from sksparse.cholmod import analyze_AAt
16 | except ModuleNotFoundError:
17 | import warnings
18 |
19 | warnings.warn("Couldn't import skparse.cholmod. Cholmod solver won't work.")
20 |
21 |
22 | from theseus.core import Objective
23 | from theseus.optimizer import Linearization, SparseLinearization
24 | from theseus.optimizer.autograd import CholmodSolveFunction
25 |
26 | from .linear_solver import LinearSolver
27 |
28 |
29 | class CholmodSparseSolver(LinearSolver):
30 | def __init__(
31 | self,
32 | objective: Objective,
33 | linearization_cls: Optional[Type[Linearization]] = None,
34 | linearization_kwargs: Optional[Dict[str, Any]] = None,
35 | damping: float = 0.0,
36 | **kwargs,
37 | ):
38 | linearization_cls = linearization_cls or SparseLinearization
39 | if not linearization_cls == SparseLinearization:
40 | raise RuntimeError(
41 | "CholmodSparseSolver only works with theseus.optimizer.SparseLinearization,"
42 | + f" got {type(self.linearization)}"
43 | )
44 |
45 | super().__init__(objective, linearization_cls, linearization_kwargs, **kwargs)
46 | self.linearization: SparseLinearization = self.linearization
47 |
48 | # symbolic decomposition depending on the sparse structure, done with mock data
49 | self._symbolic_cholesky_decomposition: CholeskyDecomposition = analyze_AAt(
50 | self.linearization.structure().mock_csc_transpose()
51 | )
52 |
53 | # the `damping` has the purpose of (optionally) improving conditioning
54 | self._damping: float = damping
55 |
56 | def solve(
57 | self, damping: Optional[Union[float, torch.Tensor]] = None, **kwargs
58 | ) -> torch.Tensor:
59 | if damping is not None and not isinstance(damping, float):
60 | raise ValueError("CholmodSparseSolver only supports scalar damping.")
61 | damping = damping or self._damping
62 | if not isinstance(self.linearization, SparseLinearization):
63 | raise RuntimeError(
64 | "CholmodSparseSolver only works with theseus.optimizer.SparseLinearization."
65 | )
66 |
67 | return CholmodSolveFunction.apply(
68 | self.linearization.A_val,
69 | self.linearization.b,
70 | self.linearization.structure(),
71 | self._symbolic_cholesky_decomposition,
72 | damping,
73 | self.linearization.detached_hessian,
74 | )
75 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear/linear_optimizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import warnings
7 | from enum import Enum
8 | from typing import Any, Dict, Optional, Type
9 |
10 | import numpy as np
11 | import torch
12 |
13 | from theseus.core import Objective
14 | from theseus.optimizer import Linearization, Optimizer, OptimizerInfo
15 |
16 | from .linear_solver import LinearSolver
17 |
18 |
19 | class LinearOptimizerStatus(Enum):
20 | START = 0
21 | CONVERGED = 1
22 | FAIL = -1
23 |
24 |
25 | class LinearOptimizer(Optimizer):
26 | def __init__(
27 | self,
28 | objective: Objective,
29 | linear_solver_cls: Type[LinearSolver],
30 | *args,
31 | vectorize: bool = False,
32 | linearization_cls: Optional[Type[Linearization]] = None,
33 | linearization_kwargs: Optional[Dict[str, Any]] = None,
34 | linear_solver_kwargs: Optional[Dict[str, Any]] = None,
35 | **kwargs,
36 | ):
37 | super().__init__(objective, vectorize=vectorize, **kwargs)
38 | linearization_kwargs = linearization_kwargs or {}
39 | linear_solver_kwargs = linear_solver_kwargs or {}
40 | self.linear_solver = linear_solver_cls(
41 | objective,
42 | linearization_cls=linearization_cls,
43 | linearization_kwargs=linearization_kwargs,
44 | **linear_solver_kwargs,
45 | )
46 |
47 | def _optimize_impl(
48 | self,
49 | **kwargs,
50 | ) -> OptimizerInfo:
51 | info = OptimizerInfo(
52 | best_solution={},
53 | status=np.array([LinearOptimizerStatus.START] * self.objective.batch_size),
54 | )
55 | try:
56 | self.linear_solver.linearization.linearize()
57 | delta = self.linear_solver.solve()
58 | except RuntimeError as run_err:
59 | msg = (
60 | f"There was an error while running the linear optimizer. "
61 | f"Original error message: {run_err}."
62 | )
63 | if torch.is_grad_enabled():
64 | raise RuntimeError(
65 | msg + " Backward pass will not work. To obtain "
66 | "the best solution seen before the error, run with torch.no_grad()"
67 | )
68 | else:
69 | warnings.warn(msg, RuntimeWarning)
70 | info.status[:] = LinearOptimizerStatus.FAIL
71 | return info
72 | self.objective.retract_vars_sequence(
73 | delta, self.linear_solver.linearization.ordering
74 | )
75 | info.status[:] = LinearOptimizerStatus.CONVERGED
76 | for var in self.linear_solver.linearization.ordering:
77 | info.best_solution[var.name] = var.tensor.clone().cpu()
78 | return info
79 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear/linear_solver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import abc
7 | from typing import Any, Dict, Optional, Type, Union
8 |
9 | import torch
10 |
11 | from theseus.core import Objective
12 | from theseus.optimizer import Linearization
13 |
14 |
15 | class LinearSolver(abc.ABC):
16 | # linearization_cls is optional, since every linear solver will have a default
17 | def __init__(
18 | self,
19 | objective: Objective,
20 | linearization_cls: Optional[Type[Linearization]] = None,
21 | linearization_kwargs: Optional[Dict[str, Any]] = None,
22 | **kwargs
23 | ):
24 | linearization_kwargs = linearization_kwargs or {}
25 | self.linearization: Linearization = linearization_cls(
26 | objective, **linearization_kwargs
27 | )
28 |
29 | # Deliberately not abstract since some solvers don't need this
30 | def reset(self, **kwargs):
31 | pass
32 |
33 | @abc.abstractmethod
34 | def solve(
35 | self, damping: Optional[Union[float, torch.Tensor]] = None, **kwargs
36 | ) -> torch.Tensor:
37 | pass
38 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from typing import Tuple, Union
6 |
7 | import torch
8 |
9 | from theseus.constants import DeviceType
10 |
11 |
12 | # See Nocedal and Wright, Numerical Optimization, pp. 260 and 261
13 | # https://www.csie.ntu.edu.tw/~r97002/temp/num_optimization.pdf
14 | def convert_to_alpha_beta_damping_tensors(
15 | damping: Union[float, torch.Tensor],
16 | damping_eps: float,
17 | ellipsoidal_damping: bool,
18 | batch_size: int,
19 | device: DeviceType,
20 | dtype: torch.dtype,
21 | ) -> Tuple[torch.Tensor, torch.Tensor]:
22 | damping = torch.as_tensor(damping).to(device=device, dtype=dtype)
23 | if damping.ndim > 1:
24 | raise ValueError("Damping must be a float or a 1-D tensor.")
25 | if damping.ndim == 0 or damping.shape[0] == 1 and batch_size != 1:
26 | # Our damp kernel does not like expand, since it may try
27 | # to access indices beyond what's actually stored in this tensor
28 | damping = damping.repeat(batch_size)
29 | return (
30 | (damping, damping_eps * torch.ones_like(damping))
31 | if ellipsoidal_damping
32 | else (torch.zeros_like(damping), damping)
33 | )
34 |
--------------------------------------------------------------------------------
/theseus/optimizer/linear_system.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import abc
7 |
8 | import numpy as np
9 | import torch
10 | from scipy.sparse import csc_matrix, csr_matrix
11 |
12 |
13 | class SparseStructure(abc.ABC):
14 | def __init__(
15 | self,
16 | col_ind: np.ndarray,
17 | row_ptr: np.ndarray,
18 | num_rows: int,
19 | num_cols: int,
20 | dtype: np.dtype = np.float64, # type: ignore
21 | ):
22 | self.col_ind = col_ind
23 | self.row_ptr = row_ptr
24 | self.num_rows = num_rows
25 | self.num_cols = num_cols
26 | self.dtype = dtype
27 |
28 | def csr_straight(self, val: torch.Tensor) -> csr_matrix:
29 | return csr_matrix(
30 | (val, self.col_ind, self.row_ptr),
31 | (self.num_rows, self.num_cols),
32 | dtype=self.dtype,
33 | )
34 |
35 | def csc_transpose(self, val: torch.Tensor) -> csc_matrix:
36 | return csc_matrix(
37 | (val, self.col_ind, self.row_ptr),
38 | (self.num_cols, self.num_rows),
39 | dtype=self.dtype,
40 | )
41 |
42 | def mock_csc_transpose(self) -> csc_matrix:
43 | return csc_matrix(
44 | (np.ones(len(self.col_ind), dtype=self.dtype), self.col_ind, self.row_ptr),
45 | (self.num_cols, self.num_rows),
46 | dtype=self.dtype,
47 | )
48 |
--------------------------------------------------------------------------------
/theseus/optimizer/linearization.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import abc
7 | from typing import List, Optional
8 |
9 | import torch
10 |
11 | from theseus.core import Objective
12 |
13 | from .variable_ordering import VariableOrdering
14 |
15 |
16 | class Linearization(abc.ABC):
17 | # if ordering is None, this will generate a default ordering
18 | def __init__(
19 | self,
20 | objective: Objective,
21 | ordering: Optional[VariableOrdering] = None,
22 | **kwargs,
23 | ):
24 | self.objective = objective
25 | if ordering is None:
26 | ordering = VariableOrdering(objective, default_order=True)
27 | self.ordering = ordering
28 | if not self.ordering.complete:
29 | raise ValueError("Given variable ordering is not complete.")
30 |
31 | self.var_dims: List[int] = []
32 | self.var_start_cols: List[int] = []
33 | col_counter = 0
34 | for var in ordering:
35 | v_dim = var.dof()
36 | self.var_start_cols.append(col_counter)
37 | self.var_dims.append(v_dim)
38 | col_counter += v_dim
39 |
40 | self.num_cols = col_counter
41 | self.num_rows = self.objective.dim()
42 |
43 | @abc.abstractmethod
44 | def _linearize_jacobian_impl(self):
45 | pass
46 |
47 | @abc.abstractmethod
48 | def _linearize_hessian_impl(self, _detach_hessian: bool = False):
49 | pass
50 |
51 | def linearize(self, _detach_hessian: bool = False):
52 | if not self.ordering.complete:
53 | raise RuntimeError(
54 | "Attempted to linearize an objective with an incomplete variable order."
55 | )
56 | self._linearize_hessian_impl(_detach_hessian=_detach_hessian)
57 |
58 | def hessian_approx(self):
59 | raise NotImplementedError(
60 | f"hessian_approx is not implemented for {self.__class__.__name__}"
61 | )
62 |
63 | @abc.abstractmethod
64 | def _ata_impl(self) -> torch.Tensor:
65 | pass
66 |
67 | @abc.abstractmethod
68 | def _atb_impl(self) -> torch.Tensor:
69 | pass
70 |
71 | @property
72 | def AtA(self) -> torch.Tensor:
73 | return self._ata_impl()
74 |
75 | @property
76 | def Atb(self) -> torch.Tensor:
77 | return self._atb_impl()
78 |
79 | # Returns self.A @ v
80 | @abc.abstractmethod
81 | def Av(self, v: torch.Tensor) -> torch.Tensor:
82 | pass
83 |
84 | # Returns diag(self.A^T @ self.A) * v
85 | @abc.abstractmethod
86 | def diagonal_scaling(self, v: torch.Tensor) -> torch.Tensor:
87 | pass
88 |
--------------------------------------------------------------------------------
/theseus/optimizer/nonlinear/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dcem import DCEM
7 | from .dogleg import Dogleg
8 | from .gauss_newton import GaussNewton
9 | from .levenberg_marquardt import LevenbergMarquardt
10 | from .nonlinear_least_squares import NonlinearLeastSquares
11 | from .nonlinear_optimizer import (
12 | BackwardMode,
13 | NonlinearOptimizer,
14 | NonlinearOptimizerInfo,
15 | NonlinearOptimizerParams,
16 | NonlinearOptimizerStatus,
17 | )
18 | from .trust_region import TrustRegion
19 |
--------------------------------------------------------------------------------
/theseus/optimizer/nonlinear/gauss_newton.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import Any, Dict, Optional, Type
7 |
8 | import torch
9 |
10 | from theseus.core import Objective
11 | from theseus.optimizer import Linearization
12 | from theseus.optimizer.linear import LinearSolver
13 |
14 | from .nonlinear_least_squares import NonlinearLeastSquares
15 |
16 |
17 | class GaussNewton(NonlinearLeastSquares):
18 | def __init__(
19 | self,
20 | objective: Objective,
21 | linear_solver_cls: Optional[Type[LinearSolver]] = None,
22 | vectorize: bool = False,
23 | linearization_cls: Optional[Type[Linearization]] = None,
24 | linearization_kwargs: Optional[Dict[str, Any]] = None,
25 | linear_solver_kwargs: Optional[Dict[str, Any]] = None,
26 | abs_err_tolerance: float = 1e-10,
27 | rel_err_tolerance: float = 1e-8,
28 | max_iterations: int = 20,
29 | step_size: float = 1.0,
30 | **kwargs,
31 | ):
32 | super().__init__(
33 | objective,
34 | linear_solver_cls=linear_solver_cls,
35 | vectorize=vectorize,
36 | linearization_cls=linearization_cls,
37 | linearization_kwargs=linearization_kwargs,
38 | linear_solver_kwargs=linear_solver_kwargs,
39 | abs_err_tolerance=abs_err_tolerance,
40 | rel_err_tolerance=rel_err_tolerance,
41 | max_iterations=max_iterations,
42 | step_size=step_size,
43 | **kwargs,
44 | )
45 |
46 | def compute_delta(self, **kwargs) -> torch.Tensor:
47 | return self.linear_solver.solve()
48 |
--------------------------------------------------------------------------------
/theseus/optimizer/optimizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import abc
7 | import warnings
8 | from dataclasses import dataclass
9 | from typing import Dict, Optional
10 |
11 | import numpy as np
12 | import torch
13 |
14 | from theseus.constants import __FROM_THESEUS_LAYER_TOKEN__
15 | from theseus.core import Objective, Vectorize
16 | from theseus.geometry.lie_group_check import silence_internal_warnings
17 |
18 |
19 | # All info information is batched
20 | @dataclass
21 | class OptimizerInfo:
22 | best_solution: Optional[Dict[str, torch.Tensor]]
23 | # status for each element
24 | status: np.ndarray
25 |
26 |
27 | class Optimizer(abc.ABC):
28 | def __init__(self, objective: Objective, *args, vectorize: bool = False, **kwargs):
29 | self.objective = objective
30 | if vectorize:
31 | Vectorize(
32 | self.objective, empty_cuda_cache=kwargs.get("empty_cuda_cache", False)
33 | )
34 | self._objectives_version = objective.current_version
35 |
36 | @abc.abstractmethod
37 | def _optimize_impl(self, **kwargs) -> OptimizerInfo:
38 | pass
39 |
40 | def optimize(self, **kwargs) -> OptimizerInfo:
41 | from_theseus_layer = kwargs.get(__FROM_THESEUS_LAYER_TOKEN__, False)
42 | if not from_theseus_layer and not self.objective.vectorized:
43 | warnings.warn(
44 | "Vectorization is off by default when not running from TheseusLayer. "
45 | "Using TheseusLayer is the recommended way to run our optimizers."
46 | )
47 | if self._objectives_version != self.objective.current_version:
48 | raise RuntimeError(
49 | "The objective was modified after optimizer construction, which is "
50 | "currently not supported."
51 | )
52 | with silence_internal_warnings():
53 | return self._optimize_impl(**kwargs)
54 |
--------------------------------------------------------------------------------
/theseus/optimizer/variable_ordering.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import Dict, List, Sequence
7 |
8 | from theseus.core import Objective, Variable
9 |
10 |
11 | class VariableOrdering:
12 | def __init__(self, objective: Objective, default_order: bool = True):
13 | self.objective = objective
14 | self._var_order: List[Variable] = []
15 | self._var_name_to_index: Dict[str, int] = {}
16 | if default_order:
17 | self._compute_default_order(objective)
18 |
19 | def _compute_default_order(self, objective: Objective):
20 | assert not self._var_order and not self._var_name_to_index
21 | cur_idx = 0
22 | for variable_name, variable in objective.optim_vars.items():
23 | if variable_name in self._var_name_to_index:
24 | continue
25 | self._var_order.append(variable)
26 | self._var_name_to_index[variable_name] = cur_idx
27 | cur_idx += 1
28 |
29 | def index_of(self, key: str) -> int:
30 | return self._var_name_to_index[key]
31 |
32 | def __getitem__(self, index) -> Variable:
33 | return self._var_order[index]
34 |
35 | def __iter__(self):
36 | return iter(self._var_order)
37 |
38 | def append(self, var: Variable):
39 | if var.name in self._var_name_to_index:
40 | raise ValueError(
41 | f"Variable {var.name} has already been added to the order."
42 | )
43 | if var.name not in self.objective.optim_vars:
44 | raise ValueError(
45 | f"Variable {var.name} is not an optimization variable for the objective."
46 | )
47 | self._var_order.append(var)
48 | self._var_name_to_index[var.name] = len(self._var_order) - 1
49 |
50 | def remove(self, var: Variable):
51 | self._var_order.remove(var)
52 | del self._var_name_to_index[var.name]
53 |
54 | def extend(self, variables: Sequence[Variable]):
55 | for var in variables:
56 | self.append(var)
57 |
58 | @property
59 | def complete(self):
60 | return len(self._var_order) == self.objective.size_variables()
61 |
--------------------------------------------------------------------------------
/theseus/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/theseus/py.typed
--------------------------------------------------------------------------------
/theseus/third_party/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/theseus/third_party/__init__.py
--------------------------------------------------------------------------------
/theseus/third_party/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | # Obtained from https://github.com/pytorch/pytorch/issues/34704#issuecomment-878940122
5 | def grid_sample(image, optical):
6 | """
7 | Custom implementation for torch.nn.functional.grid_sample() to avoid this warning:
8 | > "RuntimeError: derivative for aten::grid_sampler_2d_backward is not implemented"
9 | """
10 | N, C, IH, IW = image.shape
11 | _, H, W, _ = optical.shape
12 | ix = optical[..., 0]
13 | iy = optical[..., 1]
14 | ix = ((ix + 1) / 2) * (IW - 1)
15 | iy = ((iy + 1) / 2) * (IH - 1)
16 | with torch.no_grad():
17 | ix_nw = torch.floor(ix)
18 | iy_nw = torch.floor(iy)
19 | ix_ne = ix_nw + 1
20 | iy_ne = iy_nw
21 | ix_sw = ix_nw
22 | iy_sw = iy_nw + 1
23 | ix_se = ix_nw + 1
24 | iy_se = iy_nw + 1
25 | nw = (ix_se - ix) * (iy_se - iy)
26 | ne = (ix - ix_sw) * (iy_sw - iy)
27 | sw = (ix_ne - ix) * (iy - iy_ne)
28 | se = (ix - ix_nw) * (iy - iy_nw)
29 | with torch.no_grad():
30 | ix_nw = torch.clamp(ix_nw, 0, IW - 1)
31 | iy_nw = torch.clamp(iy_nw, 0, IH - 1)
32 | ix_ne = torch.clamp(ix_ne, 0, IW - 1)
33 | iy_ne = torch.clamp(iy_ne, 0, IH - 1)
34 | ix_sw = torch.clamp(ix_sw, 0, IW - 1)
35 | iy_sw = torch.clamp(iy_sw, 0, IH - 1)
36 | ix_se = torch.clamp(ix_se, 0, IW - 1)
37 | iy_se = torch.clamp(iy_se, 0, IH - 1)
38 | image = image.view(N, C, IH * IW)
39 | nw_val = torch.gather(
40 | image, 2, (iy_nw * IW + ix_nw).long().view(N, 1, H * W).repeat(1, C, 1)
41 | )
42 | ne_val = torch.gather(
43 | image, 2, (iy_ne * IW + ix_ne).long().view(N, 1, H * W).repeat(1, C, 1)
44 | )
45 | sw_val = torch.gather(
46 | image, 2, (iy_sw * IW + ix_sw).long().view(N, 1, H * W).repeat(1, C, 1)
47 | )
48 | se_val = torch.gather(
49 | image, 2, (iy_se * IW + ix_se).long().view(N, 1, H * W).repeat(1, C, 1)
50 | )
51 | out_val = (
52 | nw_val.view(N, C, H, W) * nw.view(N, 1, H, W)
53 | + ne_val.view(N, C, H, W) * ne.view(N, 1, H, W)
54 | + sw_val.view(N, C, H, W) * sw.view(N, 1, H, W)
55 | + se_val.view(N, C, H, W) * se.view(N, 1, H, W)
56 | )
57 | return out_val
58 |
--------------------------------------------------------------------------------
/theseus/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .sparse_matrix_utils import (
7 | mat_vec,
8 | random_sparse_binary_matrix,
9 | random_sparse_matrix,
10 | sparse_mtv,
11 | sparse_mv,
12 | split_into_param_sizes,
13 | tmat_vec,
14 | )
15 | from .utils import (
16 | Profiler,
17 | Timer,
18 | build_mlp,
19 | check_jacobians,
20 | gather_from_rows_cols,
21 | numeric_grad,
22 | numeric_jacobian,
23 | )
24 |
--------------------------------------------------------------------------------
/theseus/utils/examples/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import warnings
6 |
7 | from .bundle_adjustment import BundleAdjustmentDataset, Camera, ba_histogram
8 |
9 | try:
10 | from .motion_planning import (
11 | InitialTrajectoryModel,
12 | MotionPlanner,
13 | MotionPlannerObjective,
14 | ScalarCollisionWeightAndCostEpstModel,
15 | ScalarCollisionWeightModel,
16 | TrajectoryDataset,
17 | generate_trajectory_figs,
18 | )
19 | except ModuleNotFoundError:
20 | warnings.warn(
21 | "Unable to import Motion Planning utilities. "
22 | "Please make sure you have matplotlib installed."
23 | )
24 |
25 | try:
26 | from .tactile_pose_estimation import (
27 | TactileMeasModel,
28 | TactilePoseEstimator,
29 | TactilePushingDataset,
30 | TactilePushingTrainer,
31 | TactileWeightModel,
32 | create_tactile_models,
33 | get_tactile_cost_weight_inputs,
34 | get_tactile_initial_optim_vars,
35 | get_tactile_motion_capture_inputs,
36 | get_tactile_nn_measurements_inputs,
37 | get_tactile_poses_from_values,
38 | init_tactile_model_from_file,
39 | update_tactile_pushing_inputs,
40 | visualize_tactile_push2d,
41 | )
42 | except ModuleNotFoundError:
43 | warnings.warn(
44 | "Unable to import Tactile Pose Estimation utilities. "
45 | "Please make sure you have matplotlib and omegaconf installed."
46 | )
47 |
48 |
49 | from .pose_graph import PoseGraphDataset, PoseGraphEdge, pg_histogram
50 |
--------------------------------------------------------------------------------
/theseus/utils/examples/bundle_adjustment/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .data import BundleAdjustmentDataset, Camera, ba_histogram
7 |
--------------------------------------------------------------------------------
/theseus/utils/examples/bundle_adjustment/util.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import math
7 | from typing import Tuple
8 |
9 | import torch
10 |
11 |
12 | # ------------------------------------------------------------ #
13 | # --------------------------- LOSSES ------------------------- #
14 | # ------------------------------------------------------------ #
15 | def soft_loss_cauchy(
16 | x: torch.Tensor, radius: torch.Tensor
17 | ) -> Tuple[torch.Tensor, torch.Tensor]:
18 | ratio = (x + radius) / radius
19 | val = torch.log(ratio) * radius
20 | der = 1.0 / ratio
21 | return val, der
22 |
23 |
24 | def soft_loss_huber_like(
25 | x: torch.Tensor, radius: torch.Tensor
26 | ) -> Tuple[torch.Tensor, torch.Tensor]:
27 | ratio = (x + radius) / radius
28 | sq = torch.sqrt(ratio)
29 | val = (sq - 1) * radius
30 | der = 0.5 / sq
31 | return val, der
32 |
33 |
34 | # ------------------------------------------------------------ #
35 | # ----------------------------- RNG -------------------------- #
36 | # ------------------------------------------------------------ #
37 | # returns a uniformly random point of the 2-sphere
38 | def random_s2(dtype: torch.dtype = torch.float64) -> torch.Tensor:
39 | theta = torch.rand(()) * math.tau
40 | z = torch.rand(()) * 2 - 1
41 | r = torch.sqrt(1 - z**2)
42 | return torch.tensor([r * torch.cos(theta), r * torch.sin(theta), z]).to(dtype=dtype)
43 |
44 |
45 | # returns a uniformly random point of the 3-sphere
46 | def random_s3(dtype: torch.dtype = torch.float64) -> torch.Tensor:
47 | u, v, w = torch.rand(3)
48 | return torch.tensor(
49 | [
50 | torch.sqrt(1 - u) * torch.sin(math.tau * v),
51 | torch.sqrt(1 - u) * torch.cos(math.tau * v),
52 | torch.sqrt(u) * torch.sin(math.tau * w),
53 | torch.sqrt(u) * torch.cos(math.tau * w),
54 | ]
55 | ).to(dtype=dtype)
56 |
57 |
58 | def random_small_quaternion(
59 | max_degrees: float, min_degrees: int = 0, dtype: torch.dtype = torch.float64
60 | ) -> torch.Tensor:
61 | x, y, z = random_s2(dtype=dtype)
62 | theta = (
63 | (min_degrees + (max_degrees - min_degrees) * torch.rand((), dtype=dtype))
64 | * math.tau
65 | / 360.0
66 | )
67 | c, s = torch.cos(theta), torch.sin(theta)
68 | return torch.tensor([c, s * x, s * y, s * z])
69 |
--------------------------------------------------------------------------------
/theseus/utils/examples/motion_planning/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .misc import TrajectoryDataset, generate_trajectory_figs
7 | from .models import (
8 | InitialTrajectoryModel,
9 | ScalarCollisionWeightAndCostEpstModel,
10 | ScalarCollisionWeightModel,
11 | )
12 | from .motion_planner import MotionPlanner, MotionPlannerObjective
13 |
--------------------------------------------------------------------------------
/theseus/utils/examples/pose_graph/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from .dataset import (
7 | PoseGraphDataset,
8 | PoseGraphEdge,
9 | pg_histogram,
10 | read_2D_g2o_file,
11 | read_3D_g2o_file,
12 | )
13 |
--------------------------------------------------------------------------------
/theseus/utils/examples/tactile_pose_estimation/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from .misc import TactilePushingDataset, visualize_tactile_push2d
6 | from .models import (
7 | TactileMeasModel,
8 | TactileWeightModel,
9 | create_tactile_models,
10 | get_tactile_cost_weight_inputs,
11 | get_tactile_initial_optim_vars,
12 | get_tactile_motion_capture_inputs,
13 | get_tactile_nn_measurements_inputs,
14 | get_tactile_poses_from_values,
15 | init_tactile_model_from_file,
16 | update_tactile_pushing_inputs,
17 | )
18 | from .pose_estimator import TactilePoseEstimator
19 | from .trainer import TactilePushingTrainer
20 |
--------------------------------------------------------------------------------
/torchkin/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Meta Platforms, Inc. and affiliates.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/torchkin/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE README.md
2 | include py.typed
3 | include requirements.txt
4 |
--------------------------------------------------------------------------------
/torchkin/requirements.txt:
--------------------------------------------------------------------------------
1 | urdf_parser_py>=0.0.4
2 | torchlie
3 |
--------------------------------------------------------------------------------
/torchkin/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) Meta Platforms, Inc. and affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | import setuptools
7 | from pathlib import Path
8 |
9 | torchkin_path = Path("torchkin")
10 | with open(torchkin_path / "__init__.py", "r") as f:
11 | for line in f:
12 | if "__version__" in line:
13 | version = line.split("__version__ = ")[1].rstrip().strip('"')
14 |
15 | with open("README.md", "r") as fh:
16 | long_description = fh.read()
17 |
18 |
19 | def parse_requirements_file(path):
20 | with open(path) as f:
21 | reqs = [line.strip() for line in f]
22 | return reqs
23 |
24 |
25 | reqs_main = parse_requirements_file("requirements.txt")
26 | setuptools.setup(
27 | name="torchkin",
28 | version=version,
29 | author="Meta Research",
30 | description="Torch extension for differentiable kinematics.",
31 | long_description=long_description,
32 | long_description_content_type="text/markdown",
33 | url="https://github.com/facebookresearch/theseus/lie",
34 | keywords="lie groups, differentiable optimization",
35 | packages=["torchkin"],
36 | classifiers=[
37 | "Programming Language :: Python :: 3",
38 | "License :: OSI Approved :: MIT License",
39 | "Intended Audience :: Science/Research",
40 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
41 | ],
42 | install_requires=reqs_main,
43 | python_requires=">=3.8",
44 | )
45 |
--------------------------------------------------------------------------------
/torchkin/torchkin/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | __version__ = "0.1.1"
6 |
7 | from .forward_kinematics import get_forward_kinematics_fns
8 | from .robot import Robot
9 |
--------------------------------------------------------------------------------
/torchkin/torchkin/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/torchkin/torchkin/py.typed
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/torchkin/torchkin/third_party/__init__.py
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/urdf_parser_py/LICENSE:
--------------------------------------------------------------------------------
1 | Redistribution and use in source and binary forms, with or without
2 | modification, are permitted provided that the following conditions
3 | are met:
4 |
5 | * Redistributions of source code must retain the above copyright
6 | notice, this list of conditions and the following disclaimer.
7 | * Redistributions in binary form must reproduce the above
8 | copyright notice, this list of conditions and the following
9 | disclaimer in the documentation and/or other materials provided
10 | with the distribution.
11 | * Neither the name of copyright holder nor the names of its
12 | contributors may be used to endorse or promote products derived
13 | from this software without specific prior written permission.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 | FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 | COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 | POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/urdf_parser_py/__init__.py:
--------------------------------------------------------------------------------
1 | # This is a partial copy of https://github.com/ros/urdf_parser_py/tree/1.2.1/src/urdf_parser_py
2 | # with only import paths changed.
3 |
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/urdf_parser_py/display_urdf.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 |
4 | from torchkin.third_party.urdf_parser_py.urdf import URDF
5 |
6 |
7 | def main():
8 | parser = argparse.ArgumentParser(usage='Load an URDF file')
9 | parser.add_argument('file', type=argparse.FileType('r'),
10 | help='File to load. Use - for stdin')
11 | parser.add_argument('-o', '--output', type=argparse.FileType('w'),
12 | default=None, help='Dump file to XML')
13 | args = parser.parse_args()
14 |
15 | robot = URDF.from_xml_string(args.file.read())
16 |
17 | print(robot)
18 |
19 | if args.output is not None:
20 | args.output.write(robot.to_xml_string())
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/urdf_parser_py/xml_reflection/__init__.py:
--------------------------------------------------------------------------------
1 | from torchkin.third_party.urdf_parser_py.xml_reflection.core import *
2 |
--------------------------------------------------------------------------------
/torchkin/torchkin/third_party/urdf_parser_py/xml_reflection/basics.py:
--------------------------------------------------------------------------------
1 | import string
2 | import yaml
3 | import collections.abc
4 | from lxml import etree
5 |
6 | def xml_string(rootXml, addHeader=True):
7 | # Meh
8 | xmlString = etree.tostring(rootXml, pretty_print=True, encoding='unicode')
9 | if addHeader:
10 | xmlString = '\n' + xmlString
11 | return xmlString
12 |
13 |
14 | def dict_sub(obj, keys):
15 | return dict((key, obj[key]) for key in keys)
16 |
17 |
18 | def node_add(doc, sub):
19 | if sub is None:
20 | return None
21 | if type(sub) == str:
22 | return etree.SubElement(doc, sub)
23 | elif isinstance(sub, etree._Element):
24 | doc.append(sub) # This screws up the rest of the tree for prettyprint
25 | return sub
26 | else:
27 | raise Exception('Invalid sub value')
28 |
29 |
30 | def pfloat(x):
31 | return str(x).rstrip('.')
32 |
33 |
34 | def xml_children(node):
35 | children = node.getchildren()
36 |
37 | def predicate(node):
38 | return not isinstance(node, etree._Comment)
39 | return list(filter(predicate, children))
40 |
41 |
42 | def isstring(obj):
43 | try:
44 | return isinstance(obj, basestring)
45 | except NameError:
46 | return isinstance(obj, str)
47 |
48 |
49 | def to_yaml(obj):
50 | """ Simplify yaml representation for pretty printing """
51 | # Is there a better way to do this by adding a representation with
52 | # yaml.Dumper?
53 | # Ordered dict: http://pyyaml.org/ticket/29#comment:11
54 | if obj is None or isstring(obj):
55 | out = str(obj)
56 | elif type(obj) in [int, float, bool]:
57 | return obj
58 | elif hasattr(obj, 'to_yaml'):
59 | out = obj.to_yaml()
60 | elif isinstance(obj, etree._Element):
61 | out = etree.tostring(obj, pretty_print=True)
62 | elif type(obj) == dict:
63 | out = {}
64 | for (var, value) in obj.items():
65 | out[str(var)] = to_yaml(value)
66 | elif hasattr(obj, 'tolist'):
67 | # For numpy objects
68 | out = to_yaml(obj.tolist())
69 | elif isinstance(obj, collections.abc.Iterable):
70 | out = [to_yaml(item) for item in obj]
71 | else:
72 | out = str(obj)
73 | return out
74 |
75 |
76 | class SelectiveReflection(object):
77 | def get_refl_vars(self):
78 | return list(vars(self).keys())
79 |
80 |
81 | class YamlReflection(SelectiveReflection):
82 | def to_yaml(self):
83 | raw = dict((var, getattr(self, var)) for var in self.get_refl_vars())
84 | return to_yaml(raw)
85 |
86 | def __str__(self):
87 | # Good idea? Will it remove other important things?
88 | return yaml.dump(self.to_yaml()).rstrip()
--------------------------------------------------------------------------------
/torchlie/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Meta Platforms, Inc. and affiliates.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/torchlie/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE README.md
2 | include py.typed
3 |
--------------------------------------------------------------------------------
/torchlie/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) Meta Platforms, Inc. and affiliates.
3 | #
4 | # This source code is licensed under the MIT license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | import setuptools
7 | from pathlib import Path
8 |
9 | lie_path = Path("torchlie")
10 | with open(lie_path / "__init__.py", "r") as f:
11 | for line in f:
12 | if "__version__" in line:
13 | version = line.split("__version__ = ")[1].rstrip().strip('"')
14 |
15 | with open("README.md", "r") as fh:
16 | long_description = fh.read()
17 |
18 |
19 | setuptools.setup(
20 | name="torchlie",
21 | version=version,
22 | author="Meta Research",
23 | description="Torch extension for differentiable Lie groups.",
24 | long_description=long_description,
25 | long_description_content_type="text/markdown",
26 | url="https://github.com/facebookresearch/theseus/lie",
27 | keywords="lie groups, differentiable optimization",
28 | packages=["torchlie", "torchlie.functional"],
29 | classifiers=[
30 | "Programming Language :: Python :: 3",
31 | "License :: OSI Approved :: MIT License",
32 | "Intended Audience :: Science/Research",
33 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
34 | ],
35 | python_requires=">=3.8",
36 | )
37 |
--------------------------------------------------------------------------------
/torchlie/torchlie/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | __version__ = "0.1.1.dev0"
6 |
7 | from .global_params import reset_global_params, set_global_params
8 | from .lie_tensor import ( # usort: skip
9 | LieTensor,
10 | adj,
11 | as_euclidean,
12 | as_lietensor,
13 | cast,
14 | compose,
15 | from_tensor,
16 | inv,
17 | jcompose,
18 | jinv,
19 | jlog,
20 | jtransform,
21 | juntransform,
22 | left_act,
23 | left_project,
24 | local,
25 | log,
26 | transform,
27 | untransform,
28 | )
29 | from .types import SE3, SO3, ltype
30 |
--------------------------------------------------------------------------------
/torchlie/torchlie/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from .check_contexts import enable_checks
6 | from .se3_impl import _fns as SE3
7 | from .so3_impl import _fns as SO3
8 |
--------------------------------------------------------------------------------
/torchlie/torchlie/functional/check_contexts.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | import threading
7 | from typing import Callable
8 |
9 | import torch
10 |
11 |
12 | class _LieGroupCheckContext:
13 | contexts = threading.local()
14 |
15 | @classmethod
16 | def get_context(cls):
17 | if not hasattr(cls.contexts, "check_lie_group"):
18 | cls.contexts.check_lie_group = False
19 | return cls.contexts.check_lie_group
20 |
21 | @classmethod
22 | def set_context(cls, check_lie_group: bool):
23 | cls.contexts.check_lie_group = check_lie_group
24 |
25 |
26 | class enable_checks(_LieGroupCheckContext):
27 | def __init__(self) -> None:
28 | pass
29 |
30 | def __enter__(self) -> None:
31 | self.prev = _LieGroupCheckContext.get_context()
32 | _LieGroupCheckContext.set_context(True)
33 |
34 | def __exit__(self, typ, value, traceback) -> None:
35 | _LieGroupCheckContext.set_context(self.prev)
36 |
37 |
38 | @torch.no_grad()
39 | def checks_base(tensor: torch.Tensor, check_impl: Callable[[torch.Tensor], None]):
40 | if not _LieGroupCheckContext.get_context():
41 | return
42 | if torch._C._functorch.is_batchedtensor(tensor):
43 | raise RuntimeError("Lie group checks must be turned off to run with vmap.")
44 | check_impl(tensor)
45 |
--------------------------------------------------------------------------------
/torchlie/torchlie/functional/constants.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | import math
6 | from typing import Optional, Union
7 |
8 | import torch
9 |
10 | TEST_EPS = 5e-7
11 | EPS = 1e-10
12 | PI = math.pi
13 |
14 | __FROM_THESEUS_LAYER_TOKEN__ = "__FROM_THESEUS_LAYER_TOKEN__"
15 |
16 |
17 | _NON_ZERO = 1.0
18 |
19 | _INF = torch.inf
20 |
21 | _NEAR_ZERO_D_ONE_MINUS_COSINE_BY_THETA2 = -1 / 12.0
22 |
23 | _NEAR_ZERO_D_THETA_MINUS_SINE_BY_THETA3 = -1 / 60.0
24 |
25 | DeviceType = Optional[Union[str, torch.device]]
26 |
--------------------------------------------------------------------------------
/torchlie/torchlie/functional/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 |
6 | from typing import List
7 |
8 | import torch
9 |
10 |
11 | def check_jacobians_list(jacobians: List[torch.Tensor]):
12 | if len(jacobians) != 0:
13 | raise ValueError("jacobians list to be populated must be empty.")
14 |
15 |
16 | def get_module(module_name):
17 | return __import__(module_name, fromlist=[""])
18 |
19 |
20 | def get_cls_module(cls):
21 | return get_module(cls.__module__)
22 |
23 |
24 | def shape_err_msg(data_type: str, expected_shape: str, tensor_shape: torch.Size) -> str:
25 | return f"{data_type} must have shape {expected_shape} but got shape {tensor_shape}."
26 |
27 |
28 | def fill_dims(tensor: torch.Tensor, dim: int) -> torch.Tensor:
29 | return tensor.view((1,) * (dim - tensor.ndim) + tensor.shape)
30 |
31 |
32 | def permute_op_dim(dim: int, op_dim: int, group_dim: int):
33 | return (
34 | [*range(dim - op_dim - group_dim, dim - group_dim)]
35 | + [*range(0, dim - op_dim - group_dim)]
36 | + [*range(dim - group_dim, dim)]
37 | )
38 |
39 |
40 | def unpermute_op_dim(dim: int, op_dim: int, group_dim: int):
41 | return (
42 | [*range(op_dim, dim - group_dim)]
43 | + [*range(0, op_dim)]
44 | + [*range(dim - group_dim, dim)]
45 | )
46 |
--------------------------------------------------------------------------------
/torchlie/torchlie/global_params.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from dataclasses import dataclass
6 | from typing import Any, Dict
7 |
8 | import torch
9 |
10 |
11 | def _CHECK_DTYPE_SUPPORTED(dtype):
12 | if dtype not in [torch.float32, torch.float64]:
13 | raise ValueError(
14 | f"Unsupported data type {dtype}. "
15 | "Theseus only supports 32- and 64-bit tensors."
16 | )
17 |
18 |
19 | @dataclass
20 | class _TorchLieGlobalParams:
21 | so3_near_pi_eps_float32: float = 0
22 | so3_near_zero_eps_float32: float = 0
23 | so3_matrix_eps_float32: float = 0
24 | so3_quat_eps_float32: float = 0
25 | so3_hat_eps_float32: float = 0
26 | se3_hat_eps_float32: float = 0
27 | so3_near_pi_eps_float64: float = 0
28 | so3_near_zero_eps_float64: float = 0
29 | so3_matrix_eps_float64: float = 0
30 | so3_quat_eps_float64: float = 0
31 | so3_hat_eps_float64: float = 0
32 | se3_hat_eps_float64: float = 0
33 | _allow_passthrough_ops: bool = False
34 | _faster_log_maps: bool = False
35 |
36 | def __init__(self):
37 | self.reset()
38 |
39 | def get_eps(self, ltype: str, attr: str, dtype: torch.dtype) -> float:
40 | _CHECK_DTYPE_SUPPORTED(dtype)
41 | attr_name = f"{ltype}_{attr}_eps_{str(dtype)[6:]}"
42 | return getattr(self, attr_name)
43 |
44 | def reset(self) -> None:
45 | self.so3_near_pi_eps_float32 = 1e-2
46 | self.so3_near_zero_eps_float32 = 1e-2
47 | self.so3_d_near_zero_eps_float32 = 2e-1
48 | self.so3_matrix_eps_float32 = 4e-4
49 | self.so3_quat_eps_float32 = 2e-4
50 | self.so3_hat_eps_float32 = 5e-6
51 | self.se3_hat_eps_float32 = 5e-6
52 | self.so3_near_pi_eps_float64 = 1e-7
53 | self.so3_near_zero_eps_float64 = 5e-3
54 | self.so3_d_near_zero_eps_float64 = 1e-2
55 | self.so3_matrix_eps_float64 = 1e-6
56 | self.so3_quat_eps_float64 = 5e-7
57 | self.so3_hat_eps_float64 = 5e-7
58 | self.se3_hat_eps_float64 = 5e-7
59 |
60 |
61 | _TORCHLIE_GLOBAL_PARAMS = _TorchLieGlobalParams()
62 |
63 |
64 | def set_global_params(options: Dict[str, Any]) -> None:
65 | for k, v in options.items():
66 | if not hasattr(_TORCHLIE_GLOBAL_PARAMS, k):
67 | raise ValueError(f"{k} is not a valid global option for torchlie.")
68 | setattr(_TORCHLIE_GLOBAL_PARAMS, k, v)
69 |
70 |
71 | def reset_global_params() -> None:
72 | _TORCHLIE_GLOBAL_PARAMS.reset()
73 |
--------------------------------------------------------------------------------
/torchlie/torchlie/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/torchlie/torchlie/py.typed
--------------------------------------------------------------------------------
/torchlie/torchlie/types.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 | #
3 | # This source code is licensed under the MIT license found in the
4 | # LICENSE file in the root directory of this source tree.
5 | from enum import Enum
6 | from typing import TYPE_CHECKING, Any, Callable, List, Optional, Protocol, Tuple, Union
7 |
8 | import torch
9 |
10 | from .functional import SE3 as _se3_impl
11 | from .functional import SO3 as _so3_impl
12 | from .functional.constants import DeviceType
13 | from .functional.lie_group import LieGroupFns
14 |
15 | if TYPE_CHECKING:
16 | from .lie_tensor import LieTensor, _LieTensorBase
17 |
18 |
19 | # The next two are similar to the ones in functional, except they return a LieTensor.
20 | class _RandFnType(Protocol):
21 | def __call__(
22 | self,
23 | *size: Any,
24 | generator: Optional[torch.Generator] = None,
25 | dtype: Optional[torch.dtype] = None,
26 | device: DeviceType = None,
27 | requires_grad: bool = False,
28 | ) -> "LieTensor":
29 | pass
30 |
31 |
32 | class _IdentityFnType(Protocol):
33 | def __call__(
34 | self,
35 | *size: int,
36 | dtype: Optional[torch.dtype] = None,
37 | device: DeviceType = None,
38 | requires_grad: bool = False,
39 | ) -> "LieTensor":
40 | pass
41 |
42 |
43 | Device = Union[torch.device, str, None]
44 | TensorType = Union[torch.Tensor, "_LieTensorBase"]
45 | _JFnReturnType = Tuple[List[torch.Tensor], TensorType]
46 |
47 |
48 | class _ltype(Enum):
49 | SE3 = 0
50 | SO3 = 1
51 |
52 |
53 | def _get_fn_lib(ltype: _ltype):
54 | return {
55 | _ltype.SE3: _se3_impl,
56 | _ltype.SO3: _so3_impl,
57 | }[ltype]
58 |
59 |
60 | def _eval_op(
61 | fn_lib: LieGroupFns,
62 | op_name: str,
63 | input0: torch.Tensor,
64 | jacobians: Optional[List[torch.Tensor]] = None,
65 | ) -> torch.Tensor:
66 | return getattr(fn_lib, op_name)(input0, jacobians=jacobians)
67 |
68 |
69 | class ltype:
70 | def __init__(self, lt: _ltype):
71 | self._lt = lt
72 | self._fn_lib = _get_fn_lib(lt)
73 |
74 | rand: _RandFnType
75 | randn: _RandFnType
76 | identity: _IdentityFnType
77 | _call_impl: Callable[[torch.Tensor], "LieTensor"]
78 |
79 | def __call__(self, tensor: torch.Tensor) -> "LieTensor":
80 | return self._call_impl(tensor)
81 |
82 | _create_lie_tensor: Callable[[torch.Tensor, "ltype"], "LieTensor"]
83 |
84 | def exp(self, tangent_vector: torch.Tensor) -> "LieTensor":
85 | return self._create_lie_tensor(
86 | _eval_op(self._fn_lib, "exp", tangent_vector), self
87 | )
88 |
89 | def jexp(self, tangent_vector: torch.Tensor) -> _JFnReturnType:
90 | jacs: List[torch.Tensor] = []
91 | op_res: TensorType = self._fn_lib.exp(tangent_vector, jacobians=jacs)
92 | return jacs, self._create_lie_tensor(op_res, self)
93 |
94 | def hat(self, tangent_vector: torch.Tensor) -> torch.Tensor:
95 | return _eval_op(self._fn_lib, "hat", tangent_vector)
96 |
97 | def vee(self, matrix: torch.Tensor) -> torch.Tensor:
98 | return _eval_op(self._fn_lib, "vee", matrix)
99 |
100 | def lift(self, matrix: torch.Tensor) -> torch.Tensor:
101 | return _eval_op(self._fn_lib, "lift", matrix)
102 |
103 | def project(self, matrix: torch.Tensor) -> torch.Tensor:
104 | return _eval_op(self._fn_lib, "project", matrix)
105 |
106 | def __str__(self) -> str:
107 | return self._lt.name
108 |
109 |
110 | SE3 = ltype(_ltype.SE3)
111 | SO3 = ltype(_ltype.SO3)
112 |
--------------------------------------------------------------------------------
/tutorials/README.md:
--------------------------------------------------------------------------------
1 | Theseus includes a number of tutorials to help a user get started:
2 | - [Tutorial 0](00_introduction.ipynb) introduces Theseus and its fundamental concepts, and shows how to use its different basic building blocks.
3 | - [Tutorial 1](01_least_squares_optimization.ipynb) describes how to model and solve a simple least-squares optimization problem.
4 | - [Tutorial 2](02_differentiating_theseus_layer.ipynb) describes how to model and solve a collection of least-squares optimization problems with shared parameters.
5 | - [Tutorial 3](03_custom_cost_functions.ipynb) describes how to write custom cost functions for use in Theseus optimization problems.
6 | - [Tutorial 4](04_motion_planning.ipynb) shows how to implement GPMP2 motion-planning algorithm [(Mukadam et al 2018)](https://arxiv.org/abs/1707.07383).
7 | - [Tutorial 5](05_differentiable_motion_planning.ipynb) shows how to implement a differentiable motion planner, similar to dGPMP2 [(Bhardwaj et al 2020)](https://arxiv.org/pdf/1907.09591.pdf).
8 |
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/im_sdf/tarpit/0_im.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/im_sdf/tarpit/0_im.png
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/im_sdf/tarpit/0_sdf.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/im_sdf/tarpit/0_sdf.npy
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/im_sdf/tarpit/1_im.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/im_sdf/tarpit/1_im.png
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/im_sdf/tarpit/1_sdf.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/im_sdf/tarpit/1_sdf.npy
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/meta.yaml:
--------------------------------------------------------------------------------
1 | env_params:
2 | padlen: 11
3 | x_lims:
4 | - -5.0
5 | - 5.0
6 | y_lims:
7 | - -5.0
8 | - 5.0
9 | im_size: 128
10 | num_envs: 2
11 | probs_per_env: 1
12 | map_types: [tarpit]
13 |
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/env_0_prob_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/env_0_prob_0.npz
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/env_1_prob_0.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/env_1_prob_0.npz
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/image_env_0_prob_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/image_env_0_prob_0.png
--------------------------------------------------------------------------------
/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/image_env_1_prob_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/data/motion_planning_2d/opt_trajs_gpmp2/tarpit/image_env_1_prob_0.png
--------------------------------------------------------------------------------
/tutorials/fig/theseus_objective.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/facebookresearch/theseus/c8583de41824613fb135ee9bad0e930ded6be404/tutorials/fig/theseus_objective.png
--------------------------------------------------------------------------------