├── factrs-bench ├── .gitignore ├── src │ ├── lib.rs │ └── sophus_se3.rs ├── Cargo.toml ├── cpp │ ├── CMakeLists.txt │ └── bench.cpp ├── benches │ └── g2o.rs └── plot.py ├── .gitmodules ├── examples ├── data │ └── README.md ├── readme.rs ├── graph_viz.rs ├── serde.rs ├── g2o.rs └── gps.rs ├── rustfmt.toml ├── .gitignore ├── factrs-proc ├── src │ ├── robust.rs │ ├── lib.rs │ ├── noise.rs │ ├── residual.rs │ ├── variable.rs │ └── fac.rs ├── Cargo.toml └── Cargo.lock ├── src ├── residuals │ ├── imu_preint │ │ ├── mod.rs │ │ └── newtypes.rs │ ├── between.rs │ ├── mod.rs │ ├── prior.rs │ └── traits.rs ├── linear │ ├── mod.rs │ ├── factor.rs │ ├── values.rs │ ├── graph.rs │ └── solvers.rs ├── containers │ ├── mod.rs │ ├── order.rs │ ├── symbol.rs │ └── graph.rs ├── linalg │ ├── dual.rs │ ├── forward_prop.rs │ ├── nalgebra_wrap.rs │ ├── mod.rs │ └── numerical_diff.rs ├── noise │ ├── unit.rs │ └── mod.rs ├── optimizers │ ├── macros.rs │ ├── gauss_newton.rs │ ├── mod.rs │ ├── levenberg_marquardt.rs │ └── traits.rs └── variables │ ├── mod.rs │ ├── imu_bias.rs │ ├── so2.rs │ ├── macros.rs │ ├── vector.rs │ └── se2.rs ├── LICENSE-MIT.txt ├── tests ├── serde.rs ├── custom_robust.rs ├── custom_noise.rs ├── custom_residual.rs ├── custom_key.rs └── custom_variable.rs ├── assets ├── references.bib └── katex-header.html ├── .github └── workflows │ └── ci.yml ├── justfile ├── Cargo.toml └── README.md /factrs-bench/.gitignore: -------------------------------------------------------------------------------- 1 | build -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "factrs-typetag"] 2 | path = factrs-typetag 3 | url = git@github.com:contagon/factrs-typetag.git 4 | -------------------------------------------------------------------------------- /examples/data/README.md: -------------------------------------------------------------------------------- 1 | Outlier datasets were created via running (with uv installed), 2 | ```bash 3 | ./add_outliers -n 400 -i sphere2500.g2o 4 | ``` -------------------------------------------------------------------------------- /factrs-bench/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod sophus_loader; 2 | mod sophus_se3; 3 | 4 | pub mod sophus { 5 | pub use crate::{sophus_loader::*, sophus_se3::*}; 6 | } 7 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | group_imports = "StdExternalCrate" 3 | # imports_layout = "HorizontalVertical" 4 | 5 | format_code_in_doc_comments = true 6 | wrap_comments = true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /build 3 | .vscode 4 | *.g2o 5 | *.svg 6 | *.png 7 | *.sh 8 | perf* 9 | .cache 10 | 11 | factrs-proc/target 12 | 13 | # benchmark results 14 | factrs-bench/benchmarks* 15 | factrs-bench/*.json -------------------------------------------------------------------------------- /factrs-proc/src/robust.rs: -------------------------------------------------------------------------------- 1 | use quote::quote; 2 | use syn::ItemImpl; 3 | 4 | pub fn mark(item: ItemImpl) -> proc_macro2::TokenStream { 5 | if !cfg!(feature = "serde") { 6 | return quote! { #item }; 7 | } 8 | 9 | quote! { 10 | #[typetag::serde] 11 | #item 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/residuals/imu_preint/mod.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("README.md")] 2 | 3 | mod newtypes; 4 | pub(crate) use newtypes::ImuState; 5 | pub use newtypes::{Accel, AccelUnbiased, Gravity, Gyro, GyroUnbiased}; 6 | 7 | mod delta; 8 | 9 | mod residual; 10 | pub use residual::{ImuCovariance, ImuPreintegrator}; 11 | -------------------------------------------------------------------------------- /src/linear/mod.rs: -------------------------------------------------------------------------------- 1 | //! Structs & traits for solving linear factor graphs 2 | 3 | mod factor; 4 | pub use factor::LinearFactor; 5 | 6 | mod graph; 7 | pub use graph::LinearGraph; 8 | 9 | mod values; 10 | pub use values::LinearValues; 11 | 12 | mod solvers; 13 | pub use solvers::{CholeskySolver, LUSolver, LinearSolver, QRSolver}; 14 | -------------------------------------------------------------------------------- /src/containers/mod.rs: -------------------------------------------------------------------------------- 1 | //! Various containers for storing variables, residuals, factors, etc. 2 | 3 | mod symbol; 4 | pub use symbol::{DefaultSymbolHandler, Key, KeyFormatter, Symbol, TypedSymbol}; 5 | 6 | mod values; 7 | pub use values::{Values, ValuesFormatter}; 8 | 9 | mod order; 10 | pub use order::{Idx, ValuesOrder}; 11 | 12 | mod graph; 13 | pub use graph::{Graph, GraphFormatter, GraphOrder}; 14 | 15 | mod factor; 16 | pub use factor::{Factor, FactorBuilder, FactorFormatter}; 17 | -------------------------------------------------------------------------------- /factrs-proc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "factrs-proc" 3 | version = "0.2.0" 4 | edition = "2021" 5 | license = "MIT" 6 | description = "Proc-macros for factrs" 7 | authors = ["Easton Potokar", "Taylor Pool"] 8 | repository = "https://github.com/rpl-cmu/factrs" 9 | keywords = ["nonlinear", "optimization", "robotics", "estimation", "SLAM"] 10 | categories = ["science::robotics", "mathematics"] 11 | rust-version = "1.84" 12 | 13 | [lib] 14 | name = "factrs_proc" 15 | path = "src/lib.rs" 16 | proc-macro = true 17 | 18 | [dependencies] 19 | proc-macro2 = "1.0.93" 20 | quote = "1.0.38" 21 | syn = { version = "2.0.96", features = ["full"] } 22 | 23 | [features] 24 | serde = [] 25 | -------------------------------------------------------------------------------- /src/linalg/dual.rs: -------------------------------------------------------------------------------- 1 | use nalgebra::{allocator::Allocator, Const}; 2 | 3 | use super::{Dim, RealField, SupersetOf}; 4 | use crate::dtype; 5 | 6 | /// Wrapper for all properties needed for dual numbers 7 | pub trait Numeric: RealField + num_dual::DualNum + SupersetOf + Copy {} 8 | impl + SupersetOf + Copy> Numeric for G {} 9 | 10 | pub type DualVector = num_dual::DualVec; 11 | pub type DualScalar = num_dual::Dual; 12 | 13 | /// Make allocator binds easier for dual numbers 14 | pub trait DualAllocator: 15 | Allocator + Allocator, N> + Allocator> + Allocator 16 | { 17 | } 18 | 19 | impl< 20 | N: Dim, 21 | T: Allocator + Allocator, N> + Allocator> + Allocator, 22 | > DualAllocator for T 23 | { 24 | } 25 | -------------------------------------------------------------------------------- /src/noise/unit.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use super::NoiseModel; 4 | use crate::linalg::{Const, MatrixX, VectorX}; 5 | 6 | /// A unit noise model. 7 | /// 8 | /// Represents a noise model that does not modify the input, or equal weighting 9 | /// in a [factor](crate::containers::Factor). 10 | #[derive(Clone, Debug)] 11 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 12 | pub struct UnitNoise; 13 | 14 | #[factrs::mark] 15 | impl NoiseModel for UnitNoise { 16 | type Dim = Const; 17 | 18 | fn whiten_vec(&self, v: VectorX) -> VectorX { 19 | v 20 | } 21 | 22 | fn whiten_mat(&self, m: MatrixX) -> MatrixX { 23 | m 24 | } 25 | } 26 | 27 | impl fmt::Display for UnitNoise { 28 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 29 | write!(f, "{:?}", self) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /examples/readme.rs: -------------------------------------------------------------------------------- 1 | use factrs::{ 2 | assign_symbols, 3 | core::{BetweenResidual, GaussNewton, Graph, Huber, PriorResidual, Values, SO2}, 4 | fac, 5 | traits::*, 6 | }; 7 | 8 | // Assign symbols to variable types 9 | assign_symbols!(X: SO2); 10 | 11 | fn main() { 12 | // Make all the values 13 | let mut values = Values::new(); 14 | 15 | let x = SO2::from_theta(1.0); 16 | let y = SO2::from_theta(2.0); 17 | values.insert(X(0), SO2::identity()); 18 | values.insert(X(1), SO2::identity()); 19 | 20 | // Make the factors & insert into graph 21 | let mut graph = Graph::new(); 22 | let res = PriorResidual::new(x.clone()); 23 | let factor = fac![res, X(0)]; 24 | graph.add_factor(factor); 25 | 26 | let res = BetweenResidual::new(y.minus(&x)); 27 | let factor = fac![res, (X(0), X(1)), 0.1 as std, Huber::default()]; 28 | graph.add_factor(factor); 29 | 30 | // Optimize! 31 | let mut opt: GaussNewton = GaussNewton::new_default(graph); 32 | let result = opt.optimize(values).unwrap(); 33 | println!("Results {:#}", result); 34 | } 35 | -------------------------------------------------------------------------------- /src/noise/mod.rs: -------------------------------------------------------------------------------- 1 | //! Noise model representations 2 | //! 3 | //! Represent Gaussian noise models in a factor graph, specifically used when 4 | //! constructing a [factor](crate::containers::Factor). 5 | 6 | use std::fmt::Debug; 7 | 8 | use dyn_clone::DynClone; 9 | 10 | use crate::linalg::{DimName, MatrixX, VectorX}; 11 | 12 | /// The trait for a noise model. 13 | #[cfg_attr(feature = "serde", typetag::serde(tag = "tag"))] 14 | pub trait NoiseModel: Debug + DynClone { 15 | /// The dimension of the noise model 16 | type Dim: DimName 17 | where 18 | Self: Sized; 19 | 20 | fn dim(&self) -> usize 21 | where 22 | Self: Sized, 23 | { 24 | Self::Dim::USIZE 25 | } 26 | 27 | /// Whiten a vector 28 | fn whiten_vec(&self, v: VectorX) -> VectorX; 29 | 30 | /// Whiten a matrix 31 | fn whiten_mat(&self, m: MatrixX) -> MatrixX; 32 | } 33 | 34 | dyn_clone::clone_trait_object!(NoiseModel); 35 | 36 | #[cfg(feature = "serde")] 37 | pub use register_noisemodel as tag_noise; 38 | 39 | mod gaussian; 40 | pub use gaussian::GaussianNoise; 41 | 42 | mod unit; 43 | pub use unit::UnitNoise; 44 | -------------------------------------------------------------------------------- /factrs-bench/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "factrs-bench" 3 | version = "0.2.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | faer.workspace = true 8 | nalgebra.workspace = true 9 | factrs = { version = "0.2.0", path = ".." } 10 | pretty_env_logger = "0.5.0" 11 | rayon = "1.10.0" 12 | 13 | # other dependencies 14 | tiny-solver = { version = "0.17" } 15 | # use a custom version of sophus to give 16 | # 1) Add cloning to graph/value structure for use in benchmarks 17 | # 2) Add stopping criteria to LM, so it takes same # of steps. 18 | sophus_opt = { git = "https://github.com/contagon/sophus-rs.git", branch = "clone-cost-fn", features = [ 19 | "simd", 20 | ] } 21 | sophus_lie = { git = "https://github.com/contagon/sophus-rs.git", branch = "clone-cost-fn" } 22 | sophus_autodiff = { git = "https://github.com/contagon/sophus-rs.git", branch = "clone-cost-fn" } 23 | # use an older version of faer to disable multithreaded in sophus 24 | sophus_faer = { version = "=0.20.1", package = "faer" } 25 | 26 | [dev-dependencies] 27 | diol = { version = "0.13.0", default-features = false } 28 | 29 | [[bench]] 30 | name = "g2o" 31 | harness = false 32 | -------------------------------------------------------------------------------- /LICENSE-MIT.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021 Andreas Longva, Jan Bender and Fenris contributors 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /tests/serde.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "serde")] 2 | mod ser_de { 3 | use factrs::{ 4 | containers::Values, residuals::PriorResidual, symbols::X, traits::Residual, 5 | variables::VectorVar1, 6 | }; 7 | 8 | #[test] 9 | fn test_vector_serialize() { 10 | let trait_object = &PriorResidual::new(VectorVar1::new(2.3)) as &dyn Residual; 11 | let json = serde_json::to_string(trait_object).unwrap(); 12 | let expected = r#"{"tag":"PriorResidual>","prior":[2.3]}"#; 13 | println!("json: {}", json); 14 | assert_eq!(json, expected); 15 | } 16 | 17 | #[test] 18 | fn test_vector() { 19 | let json = r#"{"tag":"PriorResidual>","prior":[1.2]}"#; 20 | let trait_object: Box = serde_json::from_str(json).unwrap(); 21 | 22 | let mut values = Values::new(); 23 | values.insert_unchecked(X(0), VectorVar1::new(1.2)); 24 | let error = trait_object.residual(&values, &[X(0).into()])[0]; 25 | 26 | assert_eq!(trait_object.dim_in(), 1); 27 | assert_eq!(trait_object.dim_out(), 1); 28 | assert_eq!(error, 0.0); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /tests/custom_robust.rs: -------------------------------------------------------------------------------- 1 | use factrs::{dtype, robust::RobustCost}; 2 | 3 | #[derive(Clone, Debug, Default)] 4 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 5 | pub struct DoubleL2; 6 | 7 | #[factrs::mark] 8 | impl RobustCost for DoubleL2 { 9 | fn loss(&self, d2: dtype) -> dtype { 10 | d2 11 | } 12 | 13 | fn weight(&self, _d: dtype) -> dtype { 14 | 2.0 15 | } 16 | } 17 | 18 | factrs::test_robust!(DoubleL2); 19 | 20 | #[cfg(feature = "serde")] 21 | mod ser_de { 22 | use super::*; 23 | 24 | // Make sure it serializes properly 25 | #[test] 26 | fn test_json_serialize() { 27 | let trait_object = &DoubleL2 as &dyn RobustCost; 28 | let json = serde_json::to_string(trait_object).unwrap(); 29 | let expected = r#"{"tag":"DoubleL2"}"#; 30 | println!("json: {}", json); 31 | assert_eq!(json, expected); 32 | } 33 | 34 | #[test] 35 | fn test_json_deserialize() { 36 | let json = r#"{"tag":"DoubleL2"}"#; 37 | let object = DoubleL2; 38 | let trait_object: Box = serde_json::from_str(json).unwrap(); 39 | assert_eq!(trait_object.loss(1.0), object.loss(1.0)); 40 | assert_eq!(trait_object.weight(1.0), object.weight(1.0)); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /factrs-proc/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "factrs-proc" 7 | version = "0.1.0" 8 | dependencies = [ 9 | "proc-macro2", 10 | "quote", 11 | "syn", 12 | ] 13 | 14 | [[package]] 15 | name = "proc-macro2" 16 | version = "1.0.89" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" 19 | dependencies = [ 20 | "unicode-ident", 21 | ] 22 | 23 | [[package]] 24 | name = "quote" 25 | version = "1.0.37" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" 28 | dependencies = [ 29 | "proc-macro2", 30 | ] 31 | 32 | [[package]] 33 | name = "syn" 34 | version = "2.0.87" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" 37 | dependencies = [ 38 | "proc-macro2", 39 | "quote", 40 | "unicode-ident", 41 | ] 42 | 43 | [[package]] 44 | name = "unicode-ident" 45 | version = "1.0.13" 46 | source = "registry+https://github.com/rust-lang/crates.io-index" 47 | checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" 48 | -------------------------------------------------------------------------------- /assets/references.bib: -------------------------------------------------------------------------------- 1 | @article{solaMicroLieTheory2021, 2 | title = {A Micro {{Lie}} Theory for State Estimation in Robotics}, 3 | author = {Sol{\`a}, Joan and Deray, Jeremie and Atchuthan, Dinesh}, 4 | year = {2021}, 5 | month = dec, 6 | journal = {arXiv:1812.01537}, 7 | eprint = {1812.01537}, 8 | primaryclass = {cs}, 9 | urldate = {2022-05-11}, 10 | archiveprefix = {arXiv} 11 | } 12 | 13 | @article{yangGraduatedNonConvexityRobust2020, 14 | title = {Graduated {{Non-Convexity}} for {{Robust Spatial Perception}}: {{From Non-Minimal Solvers}} to {{Global Outlier Rejection}}}, 15 | author = {Yang, Heng and Antonante, Pasquale and Tzoumas, Vasileios and Carlone, Luca}, 16 | year = {2020}, 17 | month = apr, 18 | journal = {IEEE Robotics and Automation Letters}, 19 | volume = {5}, 20 | number = {2}, 21 | pages = {1127--1134}, 22 | issn = {2377-3766} 23 | } 24 | 25 | @article{zhangParameterEstimationTechniques1997, 26 | title = {Parameter Estimation Techniques: {{A}} Tutorial with Application to Conic Fitting}, 27 | shorttitle = {Parameter Estimation Techniques}, 28 | author = {Zhang, Zhengyou}, 29 | year = {1997}, 30 | journal = {Image and vision Computing}, 31 | volume = {15}, 32 | number = {1}, 33 | pages = {59--76}, 34 | publisher = {Elsevier}, 35 | urldate = {2025-04-01} 36 | } 37 | -------------------------------------------------------------------------------- /examples/graph_viz.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "rerun")] 2 | use std::net::{SocketAddr, SocketAddrV4}; 3 | 4 | use factrs::{ 5 | core::{assign_symbols, fac, BetweenResidual, Graph, PriorResidual, SE2}, 6 | traits::Variable, 7 | }; 8 | 9 | assign_symbols!(X: SE2); 10 | 11 | #[cfg(not(feature = "rerun"))] 12 | fn rerun_viz(_graph: &Graph) {} 13 | 14 | #[cfg(feature = "rerun")] 15 | fn rerun_viz(graph: &Graph) { 16 | // Setup the rerun 17 | let socket = SocketAddrV4::new("0.0.0.0".parse().unwrap(), 9876); 18 | let rec = rerun::RecordingStreamBuilder::new("factrs-graph-viz") 19 | .connect_tcp_opts(SocketAddr::V4(socket), rerun::default_flush_timeout()) 20 | .unwrap(); 21 | 22 | // Send the graph 23 | let (nodes, edges) = graph.into(); 24 | rec.log_static("graph", &[&nodes as &dyn rerun::AsComponents, &edges]) 25 | .expect("log failed"); 26 | } 27 | 28 | fn main() { 29 | let mut graph = Graph::new(); 30 | let id = SE2::identity(); 31 | 32 | graph.add_factor(fac![PriorResidual::new(id.clone()), X(1)]); 33 | graph.add_factor(fac![BetweenResidual::new(id.clone()), (X(1), X(2))]); 34 | graph.add_factor(fac![BetweenResidual::new(id.clone()), (X(2), X(3))]); 35 | 36 | graph.add_factor(fac![BetweenResidual::new(id.clone()), (X(1), X(4))]); 37 | graph.add_factor(fac![BetweenResidual::new(id.clone()), (X(2), X(4))]); 38 | graph.add_factor(fac![BetweenResidual::new(id.clone()), (X(3), X(5))]); 39 | 40 | rerun_viz(&graph); 41 | } 42 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: [ "master", "dev" ] 6 | pull_request: 7 | branches: [ "master", "dev" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | # Loosely based on 13 | # https://github.com/nushell/nushell/blob/8771872d861eeb94eec63051cb4938f6306d1dcd/.github/workflows/ci.yml 14 | # https://www.reillywood.com/blog/rust-faster-ci/ 15 | jobs: 16 | fmt-clippy: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | with: 21 | submodules: recursive 22 | 23 | - name: Setup Rust toolchain and cache 24 | uses: actions-rust-lang/setup-rust-toolchain@v1 25 | with: 26 | toolchain: 1.84 27 | components: rustfmt,clippy 28 | 29 | - name: cargo fmt 30 | run: cargo fmt --all -- --check 31 | 32 | - name: clippy 33 | run: cargo clippy 34 | - name: clippy of tests 35 | run: cargo clippy --tests 36 | 37 | tests: 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/checkout@v4 41 | with: 42 | submodules: recursive 43 | 44 | - name: Setup Rust toolchain and cache 45 | uses: actions-rust-lang/setup-rust-toolchain@v1 46 | with: 47 | toolchain: 1.84 48 | 49 | - name: default 50 | run: cargo test 51 | - name: serde 52 | run: cargo test --features serde 53 | - name: f32 54 | run: cargo test --features f32 55 | - name: left 56 | run: cargo test --features left 57 | - name: fake_exp 58 | run: cargo test --features fake_exp -------------------------------------------------------------------------------- /examples/serde.rs: -------------------------------------------------------------------------------- 1 | use factrs::core::{ 2 | assign_symbols, fac, BetweenResidual, GemanMcClure, Graph, PriorResidual, Values, SE2, SO2, 3 | }; 4 | 5 | assign_symbols!(X: SO2; Y: SE2); 6 | 7 | fn main() { 8 | // ------------------------- Serialize values ------------------------- // 9 | let x = SO2::from_theta(0.6); 10 | let y = SE2::new(1.0, 2.0, 0.3); 11 | let mut values = Values::new(); 12 | values.insert(X(0), x.clone()); 13 | values.insert(Y(1), y.clone()); 14 | 15 | println!("------ Serializing Values ------"); 16 | 17 | let serialized = serde_json::to_string_pretty(&values).unwrap(); 18 | println!("serialized = {}", serialized); 19 | 20 | // Convert the JSON string back to a Point. 21 | let deserialized: Values = serde_json::from_str(&serialized).unwrap(); 22 | println!("deserialized = {:#}", deserialized); 23 | 24 | // ------------------------- Serialize graph ------------------------- // 25 | let prior = PriorResidual::new(x); 26 | let bet = BetweenResidual::new(y); 27 | 28 | let prior = fac![prior, X(0), 0.1 as cov, GemanMcClure::default()]; 29 | let bet = fac![bet, (Y(0), Y(1)), 10.0 as cov]; 30 | 31 | let mut graph = Graph::new(); 32 | graph.add_factor(prior); 33 | graph.add_factor(bet); 34 | 35 | println!("\n\n------ Serializing Graph ------"); 36 | 37 | let serialized = serde_json::to_string_pretty(&graph).unwrap(); 38 | println!("serialized = {}", serialized); 39 | 40 | let deserialized: Graph = serde_json::from_str(&serialized).unwrap(); 41 | println!("deserialized = {:#?}", deserialized); 42 | } 43 | -------------------------------------------------------------------------------- /src/residuals/between.rs: -------------------------------------------------------------------------------- 1 | use nalgebra::{DimNameAdd, DimNameSum}; 2 | 3 | use crate::{ 4 | linalg::{ 5 | AllocatorBuffer, DefaultAllocator, DualAllocator, DualVector, ForwardProp, Numeric, VectorX, 6 | }, 7 | residuals::Residual2, 8 | variables::{Variable, VariableDtype}, 9 | }; 10 | 11 | /// Binary factor between variables. 12 | /// 13 | /// This residual is used to enforce a constraint between two variables. 14 | /// Specifically it computes 15 | /// 16 | /// $$ 17 | /// r = (v_1 z) \ominus v_2 18 | /// $$ 19 | /// 20 | /// where $z$ is the measured value. 21 | #[derive(Clone, Debug)] 22 | #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] 23 | pub struct BetweenResidual { 24 | delta: P, 25 | } 26 | 27 | impl BetweenResidual

{ 28 | pub fn new(delta: P) -> Self { 29 | Self { delta } 30 | } 31 | } 32 | 33 | #[factrs::mark] 34 | impl Residual2 for BetweenResidual

35 | where 36 | AllocatorBuffer>: Sync + Send, 37 | DefaultAllocator: DualAllocator>, 38 | DualVector>: Copy, 39 | P::Dim: DimNameAdd, 40 | { 41 | type Differ = ForwardProp>; 42 | type V1 = P; 43 | type V2 = P; 44 | type DimOut = P::Dim; 45 | type DimIn = DimNameSum; 46 | 47 | fn residual2(&self, v1: P::Alias, v2: P::Alias) -> VectorX { 48 | let delta = self.delta.cast::(); 49 | v1.compose(&delta).ominus(&v2) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/linear/factor.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | containers::Key, 3 | dtype, 4 | linalg::{MatrixBlock, VectorX}, 5 | linear::LinearValues, 6 | }; 7 | 8 | /// Represents a linear (aka Gaussian) factor. 9 | /// 10 | /// This is the linear equivalent of [Factor](crate::containers::Factor). It 11 | /// consists of the relevant keys, a [MatrixBlock] A, and a [VectorX] b. Again, 12 | /// this *shouldn't* ever need to be used by hand. 13 | pub struct LinearFactor { 14 | pub keys: Vec, 15 | pub a: MatrixBlock, 16 | pub b: VectorX, 17 | } 18 | impl LinearFactor { 19 | pub fn new(keys: Vec, a: MatrixBlock, b: VectorX) -> Self { 20 | assert!( 21 | keys.len() == a.idx().len(), 22 | "Mismatch between keys and matrix blocks in LinearFactor::new" 23 | ); 24 | assert!( 25 | a.mat().nrows() == b.len(), 26 | "Mismatch between matrix block and vector in LinearFactor::new" 27 | ); 28 | Self { keys, a, b } 29 | } 30 | 31 | pub fn dim_out(&self) -> usize { 32 | self.b.len() 33 | } 34 | 35 | pub fn error(&self, vector: &LinearValues) -> dtype { 36 | let ax: VectorX = self 37 | .keys 38 | .iter() 39 | .enumerate() 40 | .map(|(idx, key)| { 41 | self.a.mul( 42 | idx, 43 | vector 44 | .get(*key) 45 | .expect("Missing key in LinearValues::error"), 46 | ) 47 | }) 48 | .sum(); 49 | (ax - &self.b).norm_squared() / 2.0 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /tests/custom_noise.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use factrs::{ 4 | linalg::{MatrixX, VectorX}, 5 | noise::NoiseModel, 6 | }; 7 | use nalgebra::Const; 8 | 9 | #[derive(Clone, Debug, Default)] 10 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 11 | pub struct DoubleCovariance; 12 | 13 | #[factrs::mark] 14 | impl NoiseModel for DoubleCovariance { 15 | type Dim = Const; 16 | 17 | fn whiten_vec(&self, v: VectorX) -> VectorX { 18 | 2.0 * v 19 | } 20 | 21 | fn whiten_mat(&self, m: MatrixX) -> MatrixX { 22 | 2.0 * m 23 | } 24 | } 25 | 26 | impl fmt::Display for DoubleCovariance { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | write!(f, "DoubleCovariance{}", self.dim()) 29 | } 30 | } 31 | 32 | #[cfg(feature = "serde")] 33 | mod ser_de { 34 | use factrs::linalg::vectorx; 35 | 36 | use super::*; 37 | 38 | // Make sure it serializes properly 39 | #[test] 40 | fn test_json_serialize() { 41 | let trait_object = &DoubleCovariance::<2> as &dyn NoiseModel; 42 | let json = serde_json::to_string(trait_object).unwrap(); 43 | let expected = r#"{"tag":"DoubleCovariance<2>"}"#; 44 | println!("json: {}", json); 45 | assert_eq!(json, expected); 46 | } 47 | 48 | #[test] 49 | fn test_json_deserialize() { 50 | let json = r#"{"tag":"DoubleCovariance<2>"}"#; 51 | let trait_object: Box = serde_json::from_str(json).unwrap(); 52 | let vec = vectorx![1.0, 2.0]; 53 | assert_eq!(trait_object.whiten_vec(vec.clone()), 2.0 * vec); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/optimizers/macros.rs: -------------------------------------------------------------------------------- 1 | /// Test optimizers 2 | /// 3 | /// This macro generates a handful of sanity tests for an optimizer. It tests 4 | /// - Prior optimization for VectorVar3, SO3, and SE3 5 | /// - Between optimization for VectorVar3, SO3, and SE3 6 | #[macro_export] 7 | macro_rules! test_optimizer { 8 | ($o:ty) => { 9 | #[test] 10 | fn priorvector3() { 11 | let f = |graph| <$o>::new_default(graph); 12 | $crate::optimizers::test::optimize_prior::<$o, 3, $crate::variables::VectorVar3>(&f) 13 | } 14 | 15 | #[test] 16 | fn priorso3() { 17 | let f = |graph| <$o>::new_default(graph); 18 | $crate::optimizers::test::optimize_prior::<$o, 3, $crate::variables::SO3>(&f); 19 | } 20 | 21 | #[test] 22 | fn priorse3() { 23 | let f = |graph| <$o>::new_default(graph); 24 | $crate::optimizers::test::optimize_prior::<$o, 6, $crate::variables::SE3>(&f); 25 | } 26 | 27 | #[test] 28 | fn betweenvector3() { 29 | let f = |graph| <$o>::new_default(graph); 30 | $crate::optimizers::test::optimize_between::<$o, 3, 6, $crate::variables::VectorVar3>( 31 | &f, 32 | ); 33 | } 34 | 35 | #[test] 36 | fn betweenso3() { 37 | let f = |graph| <$o>::new_default(graph); 38 | $crate::optimizers::test::optimize_between::<$o, 3, 6, $crate::variables::SO3>(&f); 39 | } 40 | 41 | #[test] 42 | fn betweense3() { 43 | let f = |graph| <$o>::new_default(graph); 44 | $crate::optimizers::test::optimize_between::<$o, 6, 12, $crate::variables::SE3>(&f); 45 | } 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | # ---------------------- General helpers that should work for everyone ---------------------- # 2 | # build and run rust benchmarks 3 | bench-rust: 4 | cargo bench -p factrs-bench 5 | 6 | # build and run cpp benchmarks 7 | bench-cpp: 8 | cmake -B build factrs-bench/cpp 9 | cmake --build build 10 | ./build/bench 11 | 12 | # profile the g2o example using flamegraph 13 | profile: 14 | cargo flamegraph --profile profile --example g2o -- ./examples/data/parking-garage.g2o 15 | 16 | # build docs with latex support 17 | doc: 18 | RUSTDOCFLAGS="--cfg docsrs --html-in-header $PWD/assets/katex-header.html" cargo doc --features="serde rerun" -Zunstable-options -Zrustdoc-scrape-examples 19 | 20 | bacon-doc: 21 | RUSTDOCFLAGS="--cfg docsrs --html-in-header $PWD/assets/katex-header.html" bacon doc --features="serde rerun" -- -Zunstable-options -Zrustdoc-scrape-examples 22 | 23 | # ---------------------- Easton specific helpers that work on my system ---------------------- # 24 | # tune the system for benchmarking using pyperf 25 | # requires pyperf installed using uv 26 | perf-tune: 27 | sudo ~/.local/share/uv/tools/pyperf/bin/pyperf system tune 28 | 29 | # reset the system after benchmarking using pyperf 30 | # requires pyperf installed using uv 31 | perf-reset: 32 | sudo ~/.local/share/uv/tools/pyperf/bin/pyperf system reset 33 | 34 | # make the benchmark plots 35 | # requires uv and pyperf installed using uv 36 | bench-plot: perf-tune 37 | # rust ones 38 | cargo bench -p factrs-bench --bench g2o -- --sample-count 100 --max-time 200 --output rust.json 39 | # cpp ones 40 | cmake -B build factrs-bench/cpp 41 | cmake --build build 42 | ./build/bench 100 43 | # make the plots 44 | uv run factrs-bench/plot.py -------------------------------------------------------------------------------- /factrs-proc/src/lib.rs: -------------------------------------------------------------------------------- 1 | use syn::{parse_macro_input, ItemImpl}; 2 | 3 | mod fac; 4 | mod noise; 5 | mod residual; 6 | mod robust; 7 | mod variable; 8 | 9 | enum BoxedTypes { 10 | Residual, 11 | Variable, 12 | Noise, 13 | Robust, 14 | } 15 | 16 | fn check_type(input: &ItemImpl) -> syn::Result { 17 | let err = syn::Error::new_spanned(input, "Missing trait"); 18 | let result = &input 19 | .trait_ 20 | .as_ref() 21 | .ok_or(err.clone())? 22 | .1 23 | .segments 24 | .last() 25 | .ok_or(err)? 26 | .ident 27 | .to_string(); 28 | 29 | if result.contains("Residual") { 30 | Ok(BoxedTypes::Residual) 31 | } else if result.contains("Variable") { 32 | Ok(BoxedTypes::Variable) 33 | } else if result.contains("Noise") { 34 | Ok(BoxedTypes::Noise) 35 | } else if result.contains("Robust") { 36 | Ok(BoxedTypes::Robust) 37 | } else { 38 | Err(syn::Error::new_spanned(input, "Not a valid trait")) 39 | } 40 | } 41 | 42 | #[proc_macro_attribute] 43 | pub fn mark( 44 | _args: proc_macro::TokenStream, 45 | input: proc_macro::TokenStream, 46 | ) -> proc_macro::TokenStream { 47 | let input = syn::parse_macro_input!(input as ItemImpl); 48 | 49 | let trait_type = match check_type(&input) { 50 | Ok(syntax_tree) => syntax_tree, 51 | Err(err) => return err.to_compile_error().into(), 52 | }; 53 | 54 | match trait_type { 55 | BoxedTypes::Residual => residual::mark(input), 56 | BoxedTypes::Variable => variable::mark(input), 57 | BoxedTypes::Noise => noise::mark(input), 58 | BoxedTypes::Robust => robust::mark(input), 59 | } 60 | .into() 61 | } 62 | 63 | #[proc_macro] 64 | pub fn fac(input: proc_macro::TokenStream) -> proc_macro::TokenStream { 65 | let factor = parse_macro_input!(input as fac::Factor); 66 | 67 | fac::fac(factor).into() 68 | } 69 | -------------------------------------------------------------------------------- /src/residuals/mod.rs: -------------------------------------------------------------------------------- 1 | //! Struct & traits for implementing residuals 2 | //! 3 | //! Residuals are the main building block of a 4 | //! [factor](crate::containers::Factor). 5 | //! 6 | //! # Examples 7 | //! Here we make a custom residual for a z-position measurement. 8 | //! The `DimIn` and `DimOut` are the dimensions in and out, respectively, 9 | //! while `V1` represents the type of the variable used (and more higher 10 | //! numbered residuals have `V2`, `V3`, etc). 11 | //! 12 | //! `Differ` is the object that computes our auto-differentation. Out of the box 13 | //! factrs comes with [ForwardProp](factrs::linalg::ForwardProp) and 14 | //! [NumericalDiff](factrs::linalg::NumericalDiff). We recommend 15 | //! [ForwardProp](factrs::linalg::ForwardProp) as it should be faster and more 16 | //! accurate. 17 | //! 18 | //! Finally, the residual is defined through a single function that is generic 19 | //! over the datatype. That's it! factrs handles the rest for you. 20 | //! 21 | //! ``` 22 | //! use std::fmt; 23 | //! 24 | //! use factrs::{ 25 | //! dtype, 26 | //! linalg::{Const, ForwardProp, Numeric, VectorX}, 27 | //! residuals, 28 | //! variables::SE3, 29 | //! }; 30 | //! 31 | //! #[derive(Debug, Clone)] 32 | //! # #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 33 | //! struct ZResidual { 34 | //! value: dtype, 35 | //! } 36 | //! 37 | //! #[factrs::mark] 38 | //! impl residuals::Residual1 for ZResidual { 39 | //! type DimIn = Const<6>; 40 | //! type DimOut = Const<1>; 41 | //! type V1 = SE3; 42 | //! type Differ = ForwardProp>; 43 | //! 44 | //! fn residual1(&self, x1: SE3) -> VectorX { 45 | //! VectorX::from_element(1, T::from(self.value) - x1.xyz().z) 46 | //! } 47 | //! } 48 | //! ``` 49 | mod traits; 50 | #[cfg(feature = "serde")] 51 | pub use traits::tag_residual; 52 | pub use traits::{Residual, Residual1, Residual2, Residual3, Residual4, Residual5, Residual6}; 53 | 54 | mod prior; 55 | pub use prior::PriorResidual; 56 | 57 | mod between; 58 | pub use between::BetweenResidual; 59 | 60 | pub mod imu_preint; 61 | pub use imu_preint::{Accel, Gravity, Gyro, ImuCovariance, ImuPreintegrator}; 62 | -------------------------------------------------------------------------------- /tests/custom_residual.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use factrs::{ 4 | dtype, 5 | linalg::{vectorx, ForwardProp, Numeric, VectorX}, 6 | residuals::Residual1, 7 | traits::Variable, 8 | variables::SE2, 9 | }; 10 | use nalgebra::Const; 11 | 12 | #[derive(Clone, Debug, Default)] 13 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 14 | pub struct XPrior { 15 | x: dtype, 16 | } 17 | 18 | impl XPrior { 19 | pub fn new(x: dtype) -> XPrior { 20 | XPrior { x } 21 | } 22 | } 23 | 24 | #[factrs::mark] 25 | impl Residual1 for XPrior { 26 | type Differ = ForwardProp<::DimIn>; 27 | type V1 = SE2; 28 | type DimIn = ::Dim; 29 | type DimOut = Const<1>; 30 | 31 | fn residual1(&self, v: SE2) -> VectorX { 32 | let z_meas = T::from(self.x); 33 | vectorx![z_meas - v.xy().x] 34 | } 35 | } 36 | 37 | impl fmt::Display for XPrior { 38 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 39 | write!(f, "XPrior({})", self.x) 40 | } 41 | } 42 | 43 | // TODO: Some tests to make sure it optimizes 44 | 45 | #[cfg(feature = "serde")] 46 | mod ser_de { 47 | use factrs::{containers::Values, symbols::X, traits::Residual}; 48 | 49 | use super::*; 50 | 51 | // Make sure it serializes properly 52 | #[test] 53 | fn test_json_serialize() { 54 | let trait_object = &XPrior::new(1.2) as &dyn Residual; 55 | let json = serde_json::to_string(trait_object).unwrap(); 56 | let expected = r#"{"tag":"XPrior","x":1.2}"#; 57 | println!("json: {}", json); 58 | assert_eq!(json, expected); 59 | } 60 | 61 | #[test] 62 | fn test_json_deserialize() { 63 | let json = r#"{"tag":"XPrior","x":1.2}"#; 64 | let trait_object: Box = serde_json::from_str(json).unwrap(); 65 | 66 | let mut values = Values::new(); 67 | values.insert_unchecked(X(0), SE2::new(0.0, 1.2, 0.0)); 68 | let error = trait_object.residual(&values, &[X(0).into()])[0]; 69 | 70 | assert_eq!(trait_object.dim_in(), 3); 71 | assert_eq!(trait_object.dim_out(), 1); 72 | assert_eq!(error, 0.0); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /factrs-proc/src/noise.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Ident, TokenStream as TokenStream2}; 2 | use quote::quote; 3 | use syn::{GenericParam, ItemImpl, Type, TypePath}; 4 | 5 | fn type_name(mut ty: &Type) -> Option { 6 | loop { 7 | match ty { 8 | Type::Path(TypePath { qself: None, path }) => { 9 | return Some(path.segments.last().unwrap().ident.clone()); 10 | } 11 | Type::Group(group) => { 12 | ty = &group.elem; 13 | } 14 | _ => return None, 15 | } 16 | } 17 | } 18 | 19 | pub fn mark(item: ItemImpl) -> TokenStream2 { 20 | if !cfg!(feature = "serde") { 21 | return quote! { #item }; 22 | } 23 | 24 | let mut expanded = quote! { 25 | #[typetag::serde] 26 | #item 27 | }; 28 | 29 | let name: Ident = type_name(&item.self_ty).unwrap(); 30 | 31 | // Have to tag for serialization 32 | match item.generics.params.len() { 33 | // If no generics, just tag 34 | 0 => { 35 | expanded.extend(quote!( 36 | factrs::noise::tag_noise!(#name); 37 | )); 38 | } 39 | // If one generic and it's const, do first 20 40 | 1 => { 41 | if let GenericParam::Const(_) = item.generics.params.first().unwrap() { 42 | for i in 1usize..=32 { 43 | let name_str = format!("{}<{}>", name, i); 44 | expanded.extend(quote!( 45 | typetag::__private::inventory::submit! { 46 | ::typetag_register( 47 | #name_str, 48 | (|deserializer| typetag::__private::Result::Ok( 49 | typetag::__private::Box::new( 50 | typetag::__private::erased_serde::deserialize::<#name<#i>>(deserializer)? 51 | ), 52 | )) as typetag::__private::DeserializeFn<::Object>, 53 | ) 54 | } 55 | )); 56 | } 57 | } 58 | } 59 | // Anymore and it's up to the user 60 | _ => {} 61 | } 62 | 63 | expanded 64 | } 65 | -------------------------------------------------------------------------------- /assets/katex-header.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 10 | 39 | -------------------------------------------------------------------------------- /factrs-bench/src/sophus_se3.rs: -------------------------------------------------------------------------------- 1 | use sophus_lie::{Isometry3, Isometry3F64}; 2 | use sophus_opt::{nlls::EvaluatedCostTerm, prelude::*, robust_kernel, variables::VarKind}; 3 | 4 | /// Pose graph term 5 | #[derive(Debug, Clone)] 6 | pub struct PoseGraph3CostTerm { 7 | /// 2d relative pose constraint 8 | pub pose_m_from_pose_n: Isometry3F64, 9 | /// ids of the two poses 10 | pub entity_indices: [usize; 2], 11 | } 12 | 13 | impl PoseGraph3CostTerm { 14 | /// Compute the residual of the pose graph term 15 | /// 16 | /// `g(ʷTₘ, ʷTₙ) = log[ (ʷTₘ)⁻¹ ∙ ʷTₙ ∙ (ᵐTₙ)⁻¹ ]` 17 | /// 18 | /// Note that `ʷTₘ:= world_from_pose_m`, `ʷTₙ:= world_from_pose_n` and 19 | /// `ᵐTₙ:= pose_m_from_pose_n` are of type `Isometry2F64`. 20 | pub fn residual, const DM: usize, const DN: usize>( 21 | world_from_pose_m: Isometry3, 22 | world_from_pose_n: Isometry3, 23 | pose_m_from_pose_n: Isometry3, 24 | ) -> S::Vector<6> { 25 | (world_from_pose_m.inverse() * world_from_pose_n * pose_m_from_pose_n.inverse()).log() 26 | } 27 | } 28 | 29 | impl HasResidualFn<12, 2, (), (Isometry3F64, Isometry3F64)> for PoseGraph3CostTerm { 30 | fn idx_ref(&self) -> &[usize; 2] { 31 | &self.entity_indices 32 | } 33 | 34 | fn eval( 35 | &self, 36 | _global_constants: &(), 37 | idx: [usize; 2], 38 | world_from_pose_x: (Isometry3F64, Isometry3F64), 39 | var_kinds: [VarKind; 2], 40 | robust_kernel: Option, 41 | ) -> EvaluatedCostTerm<12, 2> { 42 | let world_from_pose_m = world_from_pose_x.0; 43 | let world_from_pose_n = world_from_pose_x.1; 44 | 45 | let residual = Self::residual( 46 | world_from_pose_m, 47 | world_from_pose_n, 48 | self.pose_m_from_pose_n, 49 | ); 50 | 51 | ( 52 | || { 53 | -Isometry3::dx_log_a_exp_x_b_at_0( 54 | world_from_pose_m.inverse(), 55 | world_from_pose_n * self.pose_m_from_pose_n.inverse(), 56 | ) 57 | }, 58 | || { 59 | Isometry3::dx_log_a_exp_x_b_at_0( 60 | world_from_pose_m.inverse(), 61 | world_from_pose_n * self.pose_m_from_pose_n.inverse(), 62 | ) 63 | }, 64 | ) 65 | .make(idx, var_kinds, residual, robust_kernel, None) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /factrs-bench/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | 3 | project(factrs-bench-cpp 4 | VERSION 0.1.0 5 | LANGUAGES CXX 6 | ) 7 | include(FetchContent) 8 | 9 | set(CMAKE_BUILD_TYPE RELEASE) 10 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON) 11 | 12 | # ------------------------- Include Benchmarking library ------------------------- # 13 | FetchContent_Declare( 14 | nanobench 15 | GIT_REPOSITORY https://github.com/martinus/nanobench.git 16 | GIT_TAG v4.3.11 17 | GIT_SHALLOW TRUE 18 | GIT_PROGRESS TRUE 19 | USES_TERMINAL_DOWNLOAD TRUE 20 | ) 21 | FetchContent_MakeAvailable(nanobench) 22 | 23 | # ------------------------- Include Ceres ------------------------- # 24 | set(BUILD_TESTING OFF CACHE BOOL "") 25 | set(BUILD_BENCHMARKS OFF CACHE BOOL "") 26 | set(PROVIDE_UNINSTALL_TARGET OFF CACHE BOOL "") 27 | set(BUILD_EXAMPLES OFF CACHE BOOL "") # I may need this... 28 | 29 | FetchContent_Declare( 30 | Ceres 31 | GIT_REPOSITORY https://github.com/ceres-solver/ceres-solver.git 32 | GIT_TAG 2.2.0 33 | GIT_SHALLOW TRUE 34 | GIT_PROGRESS TRUE 35 | USES_TERMINAL_DOWNLOAD TRUE 36 | ) 37 | FetchContent_MakeAvailable(Ceres) 38 | 39 | # ------------------------- Include GTSAM ------------------------- # 40 | # Enable features to make closer to factrs default 41 | set(GTSAM_USE_QUATERNIONS ON CACHE BOOL "") 42 | set(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR ON CACHE BOOL "") 43 | set(GTSAM_SLOW_BUT_CORRECT_EXPMAP ON CACHE BOOL "") 44 | # Turn off extra building 45 | set(GTSAM_BUILD_TESTS OFF CACHE BOOL "") 46 | set(GTSAM_BUILD_DOCS OFF CACHE BOOL "") 47 | set(GTSAM_BUILD_PYTHON OFF CACHE BOOL "") 48 | set(GTSAM_INSTALL_MATLAB_TOOLBOX OFF CACHE BOOL "") 49 | set(GTSAM_BUILD_UNSTABLE OFF CACHE BOOL "") 50 | set(GTSAM_BUILD_EXAMPLES OFF CACHE BOOL "") 51 | set(GTSAM_BUILD_EXAMPLES_ALWAYS OFF CACHE BOOL "") 52 | 53 | # use single threaded comparisons 54 | set(GTSAM_WITH_TBB OFF CACHE BOOL "") 55 | 56 | # disable some gtsam warnings we don't care about 57 | add_compile_options(-Wno-deprecated-declarations) 58 | add_compile_options(-Wno-unused-but-set-variable) 59 | add_compile_options(-Wno-cpp) 60 | 61 | FetchContent_Declare( 62 | GTSAM 63 | GIT_REPOSITORY https://github.com/borglab/gtsam.git 64 | GIT_TAG 4.2.0 65 | GIT_SHALLOW TRUE 66 | GIT_PROGRESS TRUE 67 | USES_TERMINAL_DOWNLOAD TRUE 68 | ) 69 | FetchContent_MakeAvailable(GTSAM) 70 | 71 | # ------------------------- Our executable ------------------------- # 72 | 73 | add_executable(bench bench.cpp) 74 | target_link_libraries(bench PRIVATE nanobench gtsam Ceres::ceres) 75 | # Manually include some Ceres g2o helpers 76 | target_include_directories(bench PRIVATE ${Ceres_SOURCE_DIR}/examples/slam) 77 | set_property(TARGET bench PROPERTY CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) -------------------------------------------------------------------------------- /factrs-bench/benches/g2o.rs: -------------------------------------------------------------------------------- 1 | use diol::prelude::*; 2 | 3 | const DATA_DIR: &str = "../examples/data/"; 4 | 5 | // ------------------------- factrs ------------------------- // 6 | use factrs::{core::GaussNewton, traits::Optimizer, utils::load_g20}; 7 | fn factrs(bencher: Bencher, file: &str) { 8 | let (graph, init) = load_g20(&format!("{}{}", DATA_DIR, file)); 9 | bencher.bench(|| { 10 | let mut opt: GaussNewton = GaussNewton::new_default(graph.clone()); 11 | let mut results = opt.optimize(init.clone()); 12 | black_box(&mut results); 13 | }); 14 | } 15 | 16 | // ------------------------- tiny-solver ------------------------- // 17 | use tiny_solver::{ 18 | gauss_newton_optimizer, helper::read_g2o as load_tiny_g2o, optimizer::Optimizer as TSOptimizer, 19 | }; 20 | 21 | fn tinysolver(bencher: Bencher, file: &str) { 22 | let (graph, init) = load_tiny_g2o(&format!("{}{}", DATA_DIR, file)); 23 | bencher.bench(|| { 24 | let gn = gauss_newton_optimizer::GaussNewtonOptimizer::new(); 25 | let mut results = gn.optimize(&graph, &init, None); 26 | black_box(&mut results); 27 | }); 28 | } 29 | 30 | // ------------------------- sophus ------------------------- // 31 | fn sophus(bench: Bencher, file: &str) { 32 | let (graph, init) = if file.contains("M3500") { 33 | factrs_bench::sophus::load_g2o_2d(&format!("{}{}", DATA_DIR, file)) 34 | } else { 35 | factrs_bench::sophus::load_g2o_3d(&format!("{}{}", DATA_DIR, file)) 36 | }; 37 | 38 | let params = sophus_opt::nlls::OptParams { 39 | num_iterations: 40, 40 | initial_lm_damping: 0.0, // force to be gauss-newton 41 | parallelize: false, 42 | solver: sophus_opt::nlls::LinearSolverType::SparseLdlt(Default::default()), 43 | error_tol_relative: 1e-6, 44 | error_tol_absolute: 1e-6, 45 | error_tol: 0.0, 46 | }; 47 | bench.bench(|| { 48 | let mut results = sophus_opt::nlls::optimize_nlls(init.clone(), graph.clone(), params); 49 | black_box(&mut results); 50 | }) 51 | } 52 | 53 | // NOTE: tiny-solver is still using rayon under the hood for jacobian 54 | // computation, Setting the number of rayon threads to 1 DRASTICALLY degrades 55 | // it's performance. 56 | fn main() -> eyre::Result<()> { 57 | // set everything to single-threaded 58 | faer::set_global_parallelism(faer::Par::Seq); 59 | sophus_faer::set_global_parallelism(sophus_faer::Parallelism::None); 60 | rayon::ThreadPoolBuilder::new() 61 | .num_threads(1) 62 | .build_global() 63 | .unwrap(); 64 | 65 | let to_run = list![factrs, tinysolver, sophus]; 66 | 67 | let bench = Bench::from_args()?; 68 | bench.register_many("3d", to_run, ["sphere2500.g2o", "parking-garage.g2o"]); 69 | bench.register_many("2d", to_run, ["M3500.g2o"]); 70 | bench.run()?; 71 | 72 | Ok(()) 73 | } 74 | -------------------------------------------------------------------------------- /src/residuals/imu_preint/newtypes.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | dtype, 3 | linalg::{Numeric, Vector3}, 4 | variables::{ImuBias, SO3}, 5 | }; 6 | 7 | /// Raw gyro measurement 8 | /// 9 | /// This is a newtype for the gyro measurement ensure that the accel and gyro 10 | /// aren't mixed up. 11 | #[derive(Clone, Debug)] 12 | #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] 13 | pub struct Gyro(pub Vector3); 14 | 15 | impl Gyro { 16 | /// Remove the bias from the gyro measurement 17 | pub fn remove_bias(&self, bias: &ImuBias) -> GyroUnbiased { 18 | GyroUnbiased(self.0 - bias.gyro()) 19 | } 20 | 21 | pub fn zeros() -> Self { 22 | Gyro(Vector3::zeros()) 23 | } 24 | 25 | pub fn new(x: T, y: T, z: T) -> Self { 26 | Gyro(Vector3::new(x, y, z)) 27 | } 28 | } 29 | 30 | /// Gyro measurement with bias removed 31 | pub struct GyroUnbiased(pub Vector3); 32 | 33 | /// Raw accel measurement newtype 34 | /// 35 | /// This is a newtype for the accel measurement ensure that the accel and gyro 36 | /// aren't mixed up. 37 | #[derive(Clone, Debug)] 38 | #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] 39 | pub struct Accel(pub Vector3); 40 | 41 | impl Accel { 42 | /// Remove the bias from the accel measurement 43 | pub fn remove_bias(&self, bias: &ImuBias) -> AccelUnbiased { 44 | AccelUnbiased(self.0 - bias.accel()) 45 | } 46 | 47 | pub fn zeros() -> Self { 48 | Accel(Vector3::zeros()) 49 | } 50 | 51 | pub fn new(x: T, y: T, z: T) -> Self { 52 | Accel(Vector3::new(x, y, z)) 53 | } 54 | } 55 | 56 | /// Accel measurement with bias removed 57 | pub struct AccelUnbiased(pub Vector3); 58 | 59 | /// Gravity vector 60 | /// 61 | /// This is a newtype for the gravity vector to ensure that it is not mixed up 62 | /// with other vectors and to provide some convenience methods. 63 | #[derive(Clone, Debug)] 64 | #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] 65 | pub struct Gravity(pub Vector3); 66 | 67 | impl Gravity { 68 | /// Helper to get the gravity vector pointing up, i.e. [0, 0, 9.81] 69 | pub fn up() -> Self { 70 | Gravity(Vector3::new(T::from(0.0), T::from(0.0), T::from(9.81))) 71 | } 72 | 73 | /// Helper to get the gravity vector pointing down, i.e. [0, 0, -9.81] 74 | pub fn down() -> Self { 75 | Gravity(Vector3::new(T::from(0.0), T::from(0.0), T::from(-9.81))) 76 | } 77 | } 78 | 79 | /// Struct to hold an Imu state 80 | /// 81 | /// Specifically holds an Imu state to which an ImuDelta can be applied 82 | #[derive(Clone, Debug)] 83 | pub struct ImuState { 84 | pub r: SO3, 85 | pub v: Vector3, 86 | pub p: Vector3, 87 | pub bias: ImuBias, 88 | } 89 | -------------------------------------------------------------------------------- /factrs-proc/src/residual.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::TokenStream as TokenStream2; 2 | use quote::{format_ident, quote}; 3 | use syn::{parse_quote, ItemImpl, Path}; 4 | 5 | fn parse_residual_trait(item: &ItemImpl) -> syn::Result<(Path, u32)> { 6 | let err = syn::Error::new_spanned(item, "unable to parse residual number"); 7 | let residual_trait = item.trait_.clone().ok_or(err.clone())?.1; 8 | 9 | let num = residual_trait 10 | .segments 11 | .last() 12 | .ok_or(err.clone())? 13 | .ident 14 | .to_string() 15 | .replace("Residual", "") 16 | .parse::(); 17 | 18 | match num { 19 | Result::Err(_) => Err(err), 20 | Result::Ok(n) => Ok((residual_trait, n)), 21 | } 22 | } 23 | 24 | pub fn mark(mut item: ItemImpl) -> TokenStream2 { 25 | // Parse what residual number we're using 26 | let (residual_trait, num) = match parse_residual_trait(&item) { 27 | Result::Err(e) => return e.to_compile_error(), 28 | Result::Ok(n) => n, 29 | }; 30 | 31 | // Build all the things we need from it 32 | let residual_values = format_ident!("residual{}_values", num); 33 | let residual_jacobian = format_ident!("residual{}_jacobian", num); 34 | 35 | // If we should add typetag::Tagged to the generic bounds 36 | let typetag = if cfg!(feature = "serde") { 37 | // Add where clauses to all impl 38 | let all_type_params: Vec<_> = item.generics.type_params().cloned().collect(); 39 | for type_param in all_type_params { 40 | let ident = &type_param.ident; 41 | item.generics.make_where_clause(); 42 | item.generics 43 | .where_clause 44 | .as_mut() 45 | .unwrap() 46 | .predicates 47 | .push(parse_quote!(#ident: typetag::Tagged)); 48 | } 49 | 50 | quote!( #[typetag::serde] ) 51 | } else { 52 | TokenStream2::new() 53 | }; 54 | 55 | // Build the output 56 | let generics = &item.generics; 57 | let self_ty = &item.self_ty; 58 | let where_clause = &generics.where_clause; 59 | 60 | quote! { 61 | #item 62 | 63 | #typetag 64 | impl #generics factrs::residuals::Residual for #self_ty #where_clause { 65 | fn dim_in(&self) -> usize { 66 | <::DimIn as factrs::linalg::DimName>::USIZE 67 | } 68 | 69 | fn dim_out(&self) -> usize { 70 | <::DimOut as factrs::linalg::DimName>::USIZE 71 | } 72 | 73 | fn residual(&self, values: &factrs::containers::Values, keys: &[factrs::containers::Key]) -> factrs::linalg::VectorX { 74 | #residual_trait::#residual_values(self, values, keys) 75 | } 76 | 77 | fn residual_jacobian(&self, values: &factrs::containers::Values, keys: &[factrs::containers::Key]) -> factrs::linalg::DiffResult { 78 | #residual_trait::#residual_jacobian(self, values, keys) 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /tests/custom_key.rs: -------------------------------------------------------------------------------- 1 | /* 2 | This is mostly just used as a proof of concept to make sure this works 3 | so that users can do this if it's ever desired. 4 | 5 | For example, this could be used for a multi-robot factor graph 6 | */ 7 | 8 | use std::fmt::{self, Write}; 9 | 10 | use factrs::{ 11 | containers::{Key, KeyFormatter, Symbol, Values, ValuesFormatter}, 12 | variables::VectorVar1, 13 | }; 14 | 15 | // Char is stored in the high CHR_SIZE 16 | // Idx is stored in the low IDX_SIZE 17 | const TOTAL_SIZE: usize = u64::BITS as usize; 18 | const CHR_SIZE: usize = 2 * 8; 19 | const IDX_SIZE: usize = TOTAL_SIZE - 2 * CHR_SIZE; 20 | 21 | const CHR1_MASK: u64 = (char::MAX as u64) << IDX_SIZE << CHR_SIZE; 22 | const CHR2_MASK: u64 = (char::MAX as u64) << IDX_SIZE; 23 | const IDX_MASK: u64 = !(CHR1_MASK | CHR2_MASK); 24 | 25 | // Make custom formatter for our stuff 26 | #[derive(Clone, Debug)] 27 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 28 | pub struct DoubleCharHandler; 29 | 30 | impl DoubleCharHandler { 31 | pub fn sym_to_key(chr1: char, chr2: char, idx: u32) -> Key { 32 | debug_assert!(chr1.is_ascii()); 33 | debug_assert!(chr2.is_ascii()); 34 | 35 | Key(((chr1 as u64) << IDX_SIZE << CHR_SIZE) 36 | | ((chr2 as u64) << IDX_SIZE) 37 | | (idx as u64) & IDX_MASK) 38 | } 39 | 40 | pub fn key_to_sym(k: Key) -> (char, char, u32) { 41 | let chr1 = ((k.0 & CHR1_MASK) >> IDX_SIZE >> CHR_SIZE) as u8 as char; 42 | let chr2 = ((k.0 & CHR2_MASK) >> IDX_SIZE) as u8 as char; 43 | let idx = (k.0 & IDX_MASK) as u32; 44 | (chr1, chr2, idx) 45 | } 46 | 47 | pub fn format(f: &mut dyn Write, chr1: char, chr2: char, idx: u32) -> fmt::Result { 48 | write!(f, "{}{}{}", chr1, chr2, idx) 49 | } 50 | } 51 | 52 | impl KeyFormatter for DoubleCharHandler { 53 | fn fmt(f: &mut dyn Write, key: Key) -> fmt::Result { 54 | let (chr1, chr2, idx) = Self::key_to_sym(key); 55 | Self::format(f, chr1, chr2, idx) 56 | } 57 | } 58 | 59 | // Make a practice symbol to use 60 | pub struct XY(pub u32); 61 | 62 | impl From for Key { 63 | fn from(xy: XY) -> Key { 64 | DoubleCharHandler::sym_to_key('X', 'Y', xy.0) 65 | } 66 | } 67 | 68 | impl fmt::Debug for XY { 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | DoubleCharHandler::format(f, 'X', 'Y', self.0) 71 | } 72 | } 73 | 74 | impl Symbol for XY {} 75 | 76 | #[test] 77 | fn test_round_trip() { 78 | let key = DoubleCharHandler::sym_to_key('X', 'Y', 101); 79 | let (chr1, chr2, idx) = DoubleCharHandler::key_to_sym(key); 80 | assert_eq!('X', chr1, "chr1 is off"); 81 | assert_eq!('Y', chr2, "chr2 is off"); 82 | assert_eq!(101, idx, "idx is off"); 83 | } 84 | 85 | #[test] 86 | fn test_values() { 87 | let mut values = Values::new(); 88 | values.insert_unchecked(XY(1), VectorVar1::new(1.0)); 89 | let output = format!("{}", ValuesFormatter::::new(&values)); 90 | assert_eq!( 91 | output, "Values { XY1: VectorVar1(1.000), }", 92 | "Values formatted wrong" 93 | ); 94 | } 95 | -------------------------------------------------------------------------------- /src/containers/order.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map::Iter as HashMapIter; 2 | 3 | use foldhash::HashMap; 4 | 5 | use super::{Key, Symbol, Values}; 6 | 7 | /// Location of a variable in a list 8 | /// 9 | /// Since the map isn't ordered, we need to track both idx and len of each 10 | /// variable 11 | #[derive(Debug, Clone)] 12 | pub struct Idx { 13 | pub idx: usize, 14 | pub dim: usize, 15 | } 16 | 17 | /// Tracks the location of each variable in the graph via an [Idx]. 18 | /// 19 | /// Likely won't need to ever interface with this unless a custom optimizer is 20 | /// being implemented. Since the map isn't ordered, we need to track both idx 21 | /// and len of each variable 22 | #[derive(Debug, Clone)] 23 | pub struct ValuesOrder { 24 | map: HashMap, 25 | dim: usize, 26 | } 27 | 28 | impl ValuesOrder { 29 | pub fn new(map: HashMap) -> Self { 30 | let dim = map.values().map(|idx| idx.dim).sum(); 31 | Self { map, dim } 32 | } 33 | pub fn from_values(values: &Values) -> Self { 34 | let map = values 35 | .iter() 36 | .scan(0, |idx, (key, val)| { 37 | let order = *idx; 38 | *idx += val.dim(); 39 | Some(( 40 | *key, 41 | Idx { 42 | idx: order, 43 | dim: val.dim(), 44 | }, 45 | )) 46 | }) 47 | .collect::>(); 48 | 49 | let dim = map.values().map(|idx| idx.dim).sum(); 50 | 51 | Self { map, dim } 52 | } 53 | 54 | pub fn get(&self, symbol: impl Symbol) -> Option<&Idx> { 55 | self.map.get(&symbol.into()) 56 | } 57 | 58 | pub fn dim(&self) -> usize { 59 | self.dim 60 | } 61 | 62 | pub fn len(&self) -> usize { 63 | self.map.len() 64 | } 65 | 66 | pub fn is_empty(&self) -> bool { 67 | self.map.is_empty() 68 | } 69 | 70 | pub fn iter(&self) -> HashMapIter { 71 | self.map.iter() 72 | } 73 | } 74 | 75 | #[cfg(test)] 76 | mod test { 77 | use super::*; 78 | use crate::{ 79 | containers::Values, 80 | symbols::X, 81 | variables::{Variable, VectorVar2, VectorVar3, VectorVar6}, 82 | }; 83 | 84 | #[test] 85 | fn from_values() { 86 | // Create some form of values 87 | let mut v = Values::new(); 88 | v.insert_unchecked(X(0), VectorVar2::identity()); 89 | v.insert_unchecked(X(1), VectorVar6::identity()); 90 | v.insert_unchecked(X(2), VectorVar3::identity()); 91 | 92 | // Create an order 93 | let order = ValuesOrder::from_values(&v); 94 | 95 | // Verify the order 96 | assert_eq!(order.len(), 3); 97 | assert_eq!(order.dim(), 11); 98 | assert_eq!(order.get(X(0)).expect("Missing key").dim, 2); 99 | assert_eq!(order.get(X(1)).expect("Missing key").dim, 6); 100 | assert_eq!(order.get(X(2)).expect("Missing key").dim, 3); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/variables/mod.rs: -------------------------------------------------------------------------------- 1 | //! Variables to optimize 2 | //! 3 | //! This module contains the definition of the variables that can be optimized 4 | //! using the optimization algorithms. We model each variable as a Lie group 5 | //! $\mathcal{G}$ [^@solaMicroLieTheory2021], even if it is trivially so. 6 | //! Because of this, each type $X \in \mathcal{G}$ and it's group action $\cdot$ 7 | //! must satisfy the following properties, 8 | //! 9 | //! | Property | Description | 10 | //! | :---: | :---: | 11 | //! | Identity | $\exists I \ni X \cdot I = X = I \cdot X$ | 12 | //! | Inverse | $\exists X^{-1} \ni X \cdot X^{-1} = I = X^{-1} \cdot X$ | 13 | //! | Closed | $\forall X_1, X_2 \ni X_1 \cdot X_2 = X_3$ | 14 | //! | Associativity | $\forall X_1, X_2, X_3 \ni (X_1 \cdot X_2) \cdot X_3 = X_1 \cdot (X_2 \cdot X_3)$ | 15 | //! | Exponential maps | $\xi \in \mathfrak{g} \implies \exp(\xi) \in \mathcal{G}$ | 16 | //! | Logarithm maps | $X \in \mathcal{G} \implies \log(X) \in \mathfrak{g}$ | 17 | //! 18 | //! Additionally, for optimization purposes, we adopt $\oplus$ and $\ominus$ 19 | //! operators [^@solaMicroLieTheory2021], 20 | //! 21 | //! | Property | Right | Left | 22 | //! | :---: | :---: | :---: | 23 | //! | $\oplus$ | $x \oplus \xi = x \cdot \exp(\xi)$ | $x \oplus \xi = \exp(\xi) \cdot x$ | 24 | //! | $\ominus$ | $x \ominus y = \log(y^{-1} \cdot x)$ | $x \ominus y = \log(x \cdot y^{-1})$ | 25 | //! 26 | //! fact.rs defaults to using the right formulation, but the left formulation 27 | //! can be enabled using the `left` feature. (Note this does have significant 28 | //! consequences, including changing covariance interpretations) 29 | //! 30 | //! All these properties are encapsulated in the [Variable] trait. Additionally, 31 | //! we parametrized each variable over its datatype to allow for dual numbers to 32 | //! be propagated through residuals for automatic jacobian computation. 33 | //! 34 | //! If you want to implement a custom variable, you'll need to implement 35 | //! [Variable] and [mark](factrs::mark) if using serde. We also recommend using 36 | //! the [test_variable](crate::test_variable) macro to ensure the above 37 | //! properties are satisfied. 38 | //! 39 | //! As an implementation detail, [Variable] is the main trait with all the 40 | //! important corresponding methods. [VariableSafe] is a dyn-compatible version 41 | //! that is implemented via a blanket. [VariableDtype] has both [Variable] and 42 | //! [VariableSafe] as supertraits with the added constraint that the datatype 43 | //! is [dtype](factrs::dtype). 44 | //! 45 | //! [^@solaMicroLieTheory2021]: Solà, Joan, et al. “A Micro Lie Theory for State Estimation in Robotics.” Arxiv:1812.01537, Dec. 2021 46 | mod traits; 47 | #[cfg(feature = "serde")] 48 | pub use traits::tag_variable; 49 | pub use traits::{MatrixLieGroup, Variable, VariableDtype, VariableSafe}; 50 | 51 | mod so2; 52 | pub use so2::SO2; 53 | 54 | mod se2; 55 | pub use se2::SE2; 56 | 57 | mod so3; 58 | pub use so3::SO3; 59 | 60 | mod se3; 61 | pub use se3::SE3; 62 | 63 | mod vector; 64 | pub use vector::{ 65 | VectorVar, VectorVar1, VectorVar2, VectorVar3, VectorVar4, VectorVar5, VectorVar6, 66 | }; 67 | 68 | mod imu_bias; 69 | pub use imu_bias::ImuBias; 70 | 71 | mod macros; 72 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "factrs" 3 | version = "0.2.0" 4 | edition = "2021" 5 | license = "MIT" 6 | description = "Factor graph optimization for robotics" 7 | readme = "README.md" 8 | authors = ["Easton Potokar", "Taylor Pool"] 9 | repository = "https://github.com/rpl-cmu/factrs" 10 | keywords = ["nonlinear", "optimization", "robotics", "estimation", "SLAM"] 11 | categories = ["science::robotics", "mathematics"] 12 | rust-version = "1.84" 13 | exclude = ["examples/data/*"] 14 | 15 | [workspace] 16 | members = ["factrs-bench", "factrs-proc"] 17 | exclude = ["factrs-typetag"] 18 | 19 | [package.metadata.docs.rs] 20 | features = ["serde", "rerun"] 21 | rustdoc-args = [ 22 | "--cfg", 23 | "docsrs", 24 | "--html-in-header", 25 | "assets/katex-header.html", 26 | ] 27 | cargo-args = ["-Zunstable-options", "-Zrustdoc-scrape-examples"] 28 | 29 | [workspace.dependencies] 30 | # TODO: Open issue to make disabling npy feature possible 31 | faer = { version = "0.21.9", default-features = false, features = [ 32 | "std", 33 | "sparse-linalg", 34 | "npy", 35 | ] } 36 | faer-ext = { version = "0.5.0", features = ["nalgebra"] } 37 | nalgebra = { version = "0.33.2", features = ["compare"] } 38 | 39 | [dependencies] 40 | foldhash = "0.1.4" 41 | paste = "1.0.15" 42 | downcast-rs = "2.0.1" 43 | log = "0.4.22" 44 | pad-adapter = "0.1.1" 45 | dyn-clone = "1.0.17" 46 | factrs-proc = { version = "0.2.0", path = "./factrs-proc" } 47 | 48 | # numerical 49 | faer.workspace = true 50 | nalgebra.workspace = true 51 | faer-ext.workspace = true 52 | simba = { version = "0.9.0", default-features = false } 53 | num-dual = "0.11.0" 54 | matrixcompare = { version = "0.3.0" } 55 | statrs = { version = "0.18.0", default-features = false } 56 | 57 | # serialization 58 | serde = { version = "1.0.217", optional = true } 59 | factrs-typetag = { version = "0.2.0", optional = true, path = "./factrs-typetag" } 60 | 61 | # rerun support 62 | rerun = { version = "0.22", optional = true, default-features = false, features = [ 63 | "sdk", 64 | ] } 65 | indexmap = { version = "2.9.0", default-features = false } 66 | 67 | 68 | [features] 69 | # Run everything with f32 instead of the default f64 70 | f32 = [] 71 | 72 | # Use left instead of right for lie group updates 73 | left = [] 74 | 75 | # use SO(n) x R instead of SE(n) for exponential map 76 | fake_exp = [] 77 | 78 | # Add multithreaded support (may run slower on smaller problems) 79 | rayon = ["faer/rayon"] 80 | 81 | # Add support for serialization 82 | serde = [ 83 | "dep:serde", 84 | "dep:factrs-typetag", 85 | "factrs-proc/serde", 86 | "nalgebra/serde-serialize", 87 | "indexmap/serde", 88 | ] 89 | 90 | # Support for conversion to rerun variable types 91 | rerun = ["dep:rerun"] 92 | 93 | [dev-dependencies] 94 | matrixcompare = "0.3.0" 95 | pretty_env_logger = "0.5.0" 96 | nalgebra = { version = "0.33.2", features = ["compare"] } 97 | serde_json = { version = "1.0.135" } 98 | 99 | [profile.bench] 100 | lto = true 101 | 102 | [profile.profile] 103 | inherits = "bench" 104 | debug = true 105 | 106 | [profile.dev.package.faer] 107 | opt-level = 3 108 | 109 | [[example]] 110 | name = "serde" 111 | required-features = ["serde"] 112 | doc-scrape-examples = true 113 | -------------------------------------------------------------------------------- /src/linalg/forward_prop.rs: -------------------------------------------------------------------------------- 1 | use paste::paste; 2 | 3 | use super::{ 4 | dual::{DualAllocator, DualVector}, 5 | AllocatorBuffer, Diff, MatrixDim, 6 | }; 7 | use crate::{ 8 | linalg::{Const, DefaultAllocator, DiffResult, DimName, Dyn, MatrixX, VectorDim, VectorX}, 9 | variables::{Variable, VariableDtype}, 10 | }; 11 | 12 | /// Forward mode differentiator 13 | /// 14 | /// It requires a function that takes in variables with a dtype of [DualVector] 15 | /// and outputs a vector of the same dtype. The generic parameter `N` is used to 16 | /// specify the dimension of the DualVector. 17 | /// 18 | /// This struct is used to compute the Jacobian of a function using forward mode 19 | /// differentiation via dual-numbers. It can operate on functions with up to 6 20 | /// inputs and with vector-valued outputs. 21 | /// 22 | /// ``` 23 | /// use factrs::{ 24 | /// linalg::{vectorx, Const, DiffResult, ForwardProp, Numeric, VectorX}, 25 | /// traits::*, 26 | /// variables::SO2, 27 | /// }; 28 | /// 29 | /// fn f(x: SO2, y: SO2) -> VectorX { 30 | /// x.ominus(&y) 31 | /// } 32 | /// 33 | /// let x = SO2::from_theta(2.0); 34 | /// let y = SO2::from_theta(1.0); 35 | /// 36 | /// // 2 as the generic since we have 2 dimensions going in 37 | /// let DiffResult { value, diff } = ForwardProp::>::jacobian_2(f, &x, &y); 38 | /// assert_eq!(value, vectorx![1.0]); 39 | /// ``` 40 | pub struct ForwardProp { 41 | _phantom: std::marker::PhantomData, 42 | } 43 | 44 | macro_rules! forward_maker { 45 | ($num:expr, $( ($name:ident: $var:ident) ),*) => { 46 | paste! { 47 | #[allow(unused_assignments)] 48 | fn []<$( $var: VariableDtype, )* F: Fn($($var::Alias,)*) -> VectorX> 49 | (f: F, $($name: &$var,)*) -> DiffResult{ 50 | // Prepare variables 51 | let mut curr_dim = 0; 52 | $( 53 | let $name: $var::Alias = $name.dual(curr_dim); 54 | curr_dim += $name.dim(); 55 | )* 56 | 57 | // Compute residual 58 | let res = f($($name,)*); 59 | 60 | // Compute Jacobian 61 | let n = VectorDim::::zeros().shape_generic().0; 62 | let eps1 = MatrixDim::::from_rows( 63 | res.map(|r| r.eps.unwrap_generic(n, Const::<1>).transpose()) 64 | .as_slice(), 65 | ); 66 | 67 | let mut eps = MatrixX::zeros(res.len(), N::USIZE); 68 | eps.copy_from(&eps1); 69 | 70 | DiffResult { 71 | value: res.map(|r| r.re), 72 | diff: eps, 73 | } 74 | } 75 | } 76 | }; 77 | } 78 | 79 | impl Diff for ForwardProp 80 | where 81 | AllocatorBuffer: Sync + Send, 82 | DefaultAllocator: DualAllocator, 83 | DualVector: Copy, 84 | { 85 | type T = DualVector; 86 | 87 | forward_maker!(1, (v1: V1)); 88 | forward_maker!(2, (v1: V1), (v2: V2)); 89 | forward_maker!(3, (v1: V1), (v2: V2), (v3: V3)); 90 | forward_maker!(4, (v1: V1), (v2: V2), (v3: V3), (v4: V4)); 91 | forward_maker!(5, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5)); 92 | forward_maker!(6, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5), (v6: V6)); 93 | } 94 | -------------------------------------------------------------------------------- /src/residuals/prior.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | linalg::{ 3 | AllocatorBuffer, DefaultAllocator, DualAllocator, DualVector, ForwardProp, Numeric, VectorX, 4 | }, 5 | residuals::Residual1, 6 | variables::{Variable, VariableDtype}, 7 | }; 8 | 9 | /// Unary factor for a prior on a variable. 10 | /// 11 | /// This residual is used to enforce a prior on a variable. Specifically it 12 | /// computes $$ 13 | /// z \ominus v 14 | /// $$ 15 | /// where $z$ is the prior value and $v$ is the variable being estimated. 16 | #[derive(Clone, Debug)] 17 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 18 | pub struct PriorResidual

{ 19 | prior: P, 20 | } 21 | 22 | impl PriorResidual

{ 23 | pub fn new(prior: P) -> Self { 24 | Self { prior } 25 | } 26 | } 27 | 28 | #[factrs::mark] 29 | impl

Residual1 for PriorResidual

30 | where 31 | P: VariableDtype + 'static, 32 | AllocatorBuffer: Sync + Send, 33 | DefaultAllocator: DualAllocator, 34 | DualVector: Copy, 35 | { 36 | type Differ = ForwardProp; 37 | type V1 = P; 38 | type DimIn = P::Dim; 39 | type DimOut = P::Dim; 40 | 41 | fn residual1(&self, v: ::Alias) -> VectorX { 42 | self.prior.cast::().ominus(&v) 43 | } 44 | } 45 | 46 | #[cfg(test)] 47 | mod test { 48 | 49 | use matrixcompare::assert_matrix_eq; 50 | 51 | use super::*; 52 | use crate::{ 53 | containers::Values, 54 | linalg::{vectorx, DefaultAllocator, Diff, DualAllocator, NumericalDiff}, 55 | symbols::X, 56 | variables::{VectorVar3, SE3, SO3}, 57 | }; 58 | 59 | #[cfg(not(feature = "f32"))] 60 | const PWR: i32 = 6; 61 | #[cfg(not(feature = "f32"))] 62 | const TOL: f64 = 1e-6; 63 | 64 | #[cfg(feature = "f32")] 65 | const PWR: i32 = 4; 66 | #[cfg(feature = "f32")] 67 | const TOL: f32 = 1e-2; 68 | 69 | fn test_prior_jacobian< 70 | #[cfg(feature = "serde")] P: VariableDtype + 'static + typetag::Tagged, 71 | #[cfg(not(feature = "serde"))] P: VariableDtype + 'static, 72 | >( 73 | prior: P, 74 | ) where 75 | AllocatorBuffer: Sync + Send, 76 | DefaultAllocator: DualAllocator, 77 | DualVector: Copy, 78 | { 79 | let prior_residual = PriorResidual::new(prior); 80 | 81 | let x1 = P::identity(); 82 | let mut values = Values::new(); 83 | values.insert_unchecked(X(0), x1.clone()); 84 | let jac = prior_residual 85 | .residual1_jacobian(&values, &[X(0).into()]) 86 | .diff; 87 | 88 | let f = |v: P| { 89 | let mut vals = Values::new(); 90 | vals.insert_unchecked(X(0), v.clone()); 91 | Residual1::residual1_values(&prior_residual, &vals, &[X(0).into()]) 92 | }; 93 | let jac_n = NumericalDiff::::jacobian_1(f, &x1).diff; 94 | 95 | eprintln!("jac: {:.3}", jac); 96 | eprintln!("jac_n: {:.3}", jac_n); 97 | 98 | assert_matrix_eq!(jac, jac_n, comp = abs, tol = TOL); 99 | } 100 | 101 | #[test] 102 | fn prior_linear() { 103 | test_prior_jacobian(VectorVar3::new(1.0, 2.0, 3.0)); 104 | } 105 | 106 | #[test] 107 | fn prior_so3() { 108 | let prior = SO3::exp(vectorx![0.1, 0.2, 0.3].as_view()); 109 | test_prior_jacobian(prior); 110 | } 111 | 112 | #[test] 113 | fn prior_se3() { 114 | let prior = SE3::exp(vectorx![0.1, 0.2, 0.3, 1.0, 2.0, 3.0].as_view()); 115 | test_prior_jacobian(prior); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/optimizers/gauss_newton.rs: -------------------------------------------------------------------------------- 1 | use faer_ext::IntoNalgebra; 2 | 3 | use super::{BaseOptParams, OptObserverVec, OptResult, Optimizer}; 4 | use crate::{ 5 | containers::{Graph, GraphOrder, Values, ValuesOrder}, 6 | dtype, 7 | linalg::DiffResult, 8 | linear::{LinearSolver, LinearValues}, 9 | }; 10 | 11 | /// The Gauss-Newton optimizer 12 | /// 13 | /// Solves $A \Delta \Theta = b$ directly for each optimizer steps. It defaults 14 | /// to using [CholeskySolver](crate::linear::CholeskySolver) under the hood, but 15 | /// this can be changed using [set_solver](GaussNewton::set_solver). See 16 | /// the [linear](crate::linear) module for more linear solver options. 17 | pub struct GaussNewton { 18 | graph: Graph, 19 | solver: Box, 20 | /// Basic parameters for the optimizer 21 | params: BaseOptParams, 22 | /// Observers for the optimizer 23 | observers: OptObserverVec, 24 | // For caching computation between steps 25 | graph_order: Option, 26 | } 27 | 28 | impl GaussNewton { 29 | /// Sets the linear solver to use for the optimizer. 30 | pub fn set_solver(&mut self, solver: impl LinearSolver + 'static) { 31 | self.solver = Box::new(solver); 32 | } 33 | } 34 | 35 | impl Optimizer for GaussNewton { 36 | type Params = BaseOptParams; 37 | 38 | fn new(params: Self::Params, graph: Graph) -> Self { 39 | Self { 40 | graph, 41 | solver: Default::default(), 42 | observers: OptObserverVec::default(), 43 | params, 44 | graph_order: None, 45 | } 46 | } 47 | 48 | fn observers(&self) -> &OptObserverVec { 49 | &self.observers 50 | } 51 | 52 | fn observers_mut(&mut self) -> &mut OptObserverVec { 53 | &mut self.observers 54 | } 55 | 56 | fn graph(&self) -> &Graph { 57 | &self.graph 58 | } 59 | 60 | fn graph_mut(&mut self) -> &mut Graph { 61 | &mut self.graph 62 | } 63 | 64 | fn error(&self, values: &Values) -> dtype { 65 | self.graph.error(values) 66 | } 67 | 68 | fn params(&self) -> &BaseOptParams { 69 | &self.params 70 | } 71 | 72 | fn init(&mut self, _values: &Values) -> Vec<&'static str> { 73 | // TODO: Some way to manual specify how to compute ValuesOrder 74 | // Precompute the sparsity pattern 75 | self.graph_order = Some( 76 | self.graph 77 | .sparsity_pattern(ValuesOrder::from_values(_values)), 78 | ); 79 | 80 | Vec::new() 81 | } 82 | 83 | fn step(&mut self, mut values: Values, _idx: usize) -> OptResult<(Values, String)> { 84 | // Solve the linear system 85 | let linear_graph = self.graph.linearize(&values); 86 | let DiffResult { value: r, diff: j } = 87 | linear_graph.residual_jacobian(self.graph_order.as_ref().expect("Missing graph order")); 88 | 89 | // Solve Ax = b 90 | let delta = self 91 | .solver 92 | .solve_lst_sq(j.as_ref(), r.as_ref()) 93 | .as_ref() 94 | .into_nalgebra() 95 | .column(0) 96 | .clone_owned(); 97 | 98 | // Update the values 99 | let dx = LinearValues::from_order_and_vector( 100 | self.graph_order 101 | .as_ref() 102 | .expect("Missing graph order") 103 | .order 104 | .clone(), 105 | delta, 106 | ); 107 | values.oplus_mut(&dx); 108 | 109 | Ok((values, String::new())) 110 | } 111 | } 112 | 113 | #[cfg(test)] 114 | mod test { 115 | use super::*; 116 | use crate::test_optimizer; 117 | 118 | test_optimizer!(GaussNewton); 119 | } 120 | -------------------------------------------------------------------------------- /tests/custom_variable.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use factrs::{ 4 | dtype, 5 | linalg::{Numeric, SupersetOf, Vector1}, 6 | traits::Variable, 7 | }; 8 | 9 | #[derive(Clone)] 10 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 11 | pub struct MyVar { 12 | val: Vector1, 13 | } 14 | 15 | impl MyVar { 16 | pub fn new(v: T) -> MyVar { 17 | MyVar { 18 | val: Vector1::new(v), 19 | } 20 | } 21 | } 22 | 23 | #[factrs::mark] 24 | impl Variable for MyVar { 25 | type T = T; 26 | type Dim = factrs::linalg::Const<1>; 27 | type Alias = MyVar; 28 | 29 | fn identity() -> Self { 30 | MyVar { 31 | val: Vector1::zeros(), 32 | } 33 | } 34 | 35 | fn inverse(&self) -> Self { 36 | MyVar { val: -self.val } 37 | } 38 | 39 | fn compose(&self, other: &Self) -> Self { 40 | MyVar { 41 | val: self.val + other.val, 42 | } 43 | } 44 | 45 | fn exp(delta: factrs::linalg::VectorViewX) -> Self { 46 | let val = Vector1::new(delta[0]); 47 | MyVar { val } 48 | } 49 | 50 | fn log(&self) -> factrs::linalg::VectorX { 51 | factrs::linalg::vectorx![self.val.x] 52 | } 53 | 54 | fn cast>(&self) -> Self::Alias { 55 | MyVar { 56 | val: self.val.cast(), 57 | } 58 | } 59 | } 60 | 61 | impl fmt::Display for MyVar { 62 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 63 | write!(f, "MyVar(g: {:.3})", self.val.x) 64 | } 65 | } 66 | 67 | impl fmt::Debug for MyVar { 68 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 69 | fmt::Display::fmt(self, f) 70 | } 71 | } 72 | 73 | factrs::test_variable!(MyVar); 74 | 75 | #[cfg(feature = "serde")] 76 | mod ser_de { 77 | use factrs::{ 78 | assert_variable_eq, containers::Values, residuals::PriorResidual, symbols::X, 79 | traits::Residual, variables::VariableSafe, 80 | }; 81 | 82 | use super::*; 83 | 84 | // Make sure it serializes properly 85 | #[test] 86 | fn test_json_serialize() { 87 | let trait_object = &MyVar::new(5.5) as &dyn VariableSafe; 88 | let json = serde_json::to_string(trait_object).unwrap(); 89 | let expected = r#"{"tag":"MyVar","val":[5.5]}"#; 90 | println!("json: {}", json); 91 | assert_eq!(json, expected); 92 | } 93 | 94 | #[test] 95 | fn test_json_deserialize() { 96 | let json = r#"{"tag":"MyVar","val":[4.5]}"#; 97 | let trait_object: Box = serde_json::from_str(json).unwrap(); 98 | let var: &MyVar = trait_object.downcast_ref::().unwrap(); 99 | assert_variable_eq!(var, MyVar::new(4.5)); 100 | } 101 | 102 | // Make sure the prior can as well 103 | #[test] 104 | fn test_prior_serialize() { 105 | let trait_object = &PriorResidual::new(MyVar::new(2.3)) as &dyn Residual; 106 | let json = serde_json::to_string(trait_object).unwrap(); 107 | let expected = r#"{"tag":"PriorResidual","prior":{"val":[2.3]}}"#; 108 | println!("json: {}", json); 109 | assert_eq!(json, expected); 110 | } 111 | 112 | #[test] 113 | fn test_prior_deserialize() { 114 | let json = r#"{"tag":"PriorResidual","prior":{"val":[1.2]}}"#; 115 | let trait_object: Box = serde_json::from_str(json).unwrap(); 116 | 117 | let mut values = Values::new(); 118 | values.insert_unchecked(X(0), MyVar::new(1.2)); 119 | let error = trait_object.residual(&values, &[X(0).into()])[0]; 120 | 121 | assert_eq!(trait_object.dim_in(), 1); 122 | assert_eq!(trait_object.dim_out(), 1); 123 | assert_eq!(error, 0.0); 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /factrs-proc/src/variable.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Ident, TokenStream as TokenStream2}; 2 | use quote::{quote, ToTokens}; 3 | use syn::{Error, ItemImpl, Type, TypePath}; 4 | 5 | fn type_name(mut ty: &Type) -> Option { 6 | loop { 7 | match ty { 8 | Type::Path(TypePath { qself: None, path }) => { 9 | return Some(path.segments.last().unwrap().ident.clone()); 10 | } 11 | Type::Group(group) => { 12 | ty = &group.elem; 13 | } 14 | _ => return None, 15 | } 16 | } 17 | } 18 | 19 | pub fn mark(item: ItemImpl) -> TokenStream2 { 20 | if !cfg!(feature = "serde") { 21 | return quote! { #item }; 22 | } 23 | 24 | let mut expanded = quote::quote! { 25 | #item 26 | }; 27 | 28 | // Find name 29 | let name = type_name(&item.self_ty).unwrap().to_token_stream(); 30 | let name_str = name.to_string(); 31 | let name_quotes = quote!(#name_str); 32 | 33 | // Have to do three things here to get typetag to work 34 | // - Impl typetag::Tagged 35 | // - Register our variable in typetag 36 | // - Tag PriorResidual, BetweenResidual with this type 37 | match item.generics.params.len() { 38 | 0 => { 39 | let msg = "variable should have dtype generic"; 40 | return Error::new_spanned(&item.generics, msg).to_compile_error(); 41 | } 42 | // Simple variable, just use as is 43 | 1 => { 44 | let name_str = name.to_string(); 45 | expanded.extend(tag_all(&name, &name_str)); 46 | expanded.extend(quote!( 47 | impl typetag::Tagged for #name { 48 | fn tag() -> String { 49 | String::from(#name_quotes) 50 | } 51 | } 52 | )); 53 | } 54 | // Anymore and it's up to the user 55 | _ => {} 56 | } 57 | 58 | expanded 59 | } 60 | 61 | fn tag_all(kind: &TokenStream2, name: &str) -> TokenStream2 { 62 | let name_prior = format!("PriorResidual<{}>", name); 63 | let name_between = format!("BetweenResidual<{}>", name); 64 | quote! { 65 | // Self 66 | typetag::__private::inventory::submit! { 67 | ::typetag_register( 68 | #name, 69 | (|deserializer| typetag::__private::Result::Ok( 70 | typetag::__private::Box::new( 71 | typetag::__private::erased_serde::deserialize::<#kind>(deserializer)? 72 | ), 73 | )) as typetag::__private::DeserializeFn<::Object>, 74 | ) 75 | } 76 | 77 | // Prior 78 | typetag::__private::inventory::submit! { 79 | ::typetag_register( 80 | #name_prior, 81 | (|deserializer| typetag::__private::Result::Ok( 82 | typetag::__private::Box::new( 83 | typetag::__private::erased_serde::deserialize::>(deserializer)? 84 | ), 85 | )) as typetag::__private::DeserializeFn<::Object>, 86 | ) 87 | } 88 | 89 | // Between 90 | typetag::__private::inventory::submit! { 91 | ::typetag_register( 92 | #name_between, 93 | (|deserializer| typetag::__private::Result::Ok( 94 | typetag::__private::Box::new( 95 | typetag::__private::erased_serde::deserialize::>(deserializer)? 96 | ), 97 | )) as typetag::__private::DeserializeFn<::Object>, 98 | ) 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /factrs-bench/plot.py: -------------------------------------------------------------------------------- 1 | # /// script 2 | # requires-python = ">=3.12" 3 | # dependencies = [ 4 | # "numpy", 5 | # "polars", 6 | # "pyqt6", 7 | # "seaborn", 8 | # ] 9 | # /// 10 | import json 11 | from pathlib import Path 12 | import polars as pl # type: ignore 13 | import seaborn as sns # type: ignore 14 | import matplotlib.pyplot as plt 15 | import matplotlib 16 | 17 | DIR = Path("factrs-bench") 18 | 19 | 20 | def setup_plot(): 21 | matplotlib.rc("pdf", fonttype=42) 22 | sns.set_context("paper") 23 | sns.set_style("whitegrid") 24 | sns.set_palette("colorblind") 25 | sns.color_palette("colorblind") 26 | 27 | # Make sure you install times & clear matplotlib cache 28 | # https://stackoverflow.com/a/49884009 29 | plt.rcParams["font.family"] = "Times New Roman" 30 | plt.rcParams["mathtext.fontset"] = "stix" 31 | 32 | 33 | # Load data from rust.json 34 | with open(DIR / "rust.json", "r") as rust_file: 35 | rust_data = json.load(rust_file) 36 | with open(DIR / "cpp_3d.json", "r") as cpp_file: 37 | cpp_data = json.load(cpp_file) 38 | with open(DIR / "cpp_2d.json", "r") as cpp_file: 39 | cpp_2d_data = json.load(cpp_file) 40 | cpp_data["results"].extend(cpp_2d_data["results"]) 41 | 42 | # Extract benchmark results 43 | rust_benchmarks = [ 44 | { 45 | "method": func["name"], 46 | "filename": filename.replace('"', ""), 47 | "time": t / 1e9, # Convert from us to ms 48 | } 49 | for group in rust_data["groups"].values() 50 | for func in group["function"] 51 | for timings, filename in zip(func["timings"], group["args"]["Named"]) 52 | for t in timings 53 | ] 54 | cpp_benchmarks = [ 55 | { 56 | "method": result["name"].split("_")[0], 57 | "filename": result["name"].split("_")[1], 58 | "time": mm["elapsed"] * 1e3, # Convert from s to ms 59 | } 60 | for result in cpp_data["results"] 61 | for mm in result["measurements"] 62 | ] 63 | 64 | # Create a Polars DataFrame 65 | df = pl.DataFrame(rust_benchmarks + cpp_benchmarks) 66 | 67 | # Pretty up the names 68 | df = df.with_columns( 69 | pl.col("filename") 70 | .replace( 71 | { 72 | "M3500.g2o": "M3500", 73 | "parking-garage.g2o": "Parking Garage", 74 | "sphere2500.g2o": "Sphere2500", 75 | } 76 | ) 77 | .alias("Dataset"), 78 | pl.col("method") 79 | .replace( 80 | { 81 | "sophus": "sophus-rs", 82 | "factrs": "fact-rs", 83 | "tinysolver": "tiny-solver-rs", 84 | } 85 | ) 86 | .alias("Library"), 87 | ) 88 | 89 | # Plot them using seaborn 90 | setup_plot() 91 | # paper sized 92 | # figsize = (252.0 / 72.27 + 0.5, 2.25) 93 | # legend_opts = { 94 | # "ncol": 3, 95 | # "columnspacing": 3.65, 96 | # "bbox_to_anchor": (0.125, 1.17), 97 | # } 98 | # readme sized 99 | figsize = (5, 1.5) 100 | legend_opts = { 101 | "ncol": 4, 102 | "columnspacing": 3.4, 103 | "bbox_to_anchor": (0.10, 1.3), 104 | } 105 | 106 | fig, ax = plt.subplots(1, 1, figsize=figsize, layout="constrained") 107 | sns.barplot( 108 | data=df, 109 | x="Dataset", 110 | y="time", 111 | hue="Library", 112 | ax=ax, 113 | estimator="median", 114 | errorbar=lambda x: (x.min(), x.max()), # type: ignore 115 | order=["M3500", "Parking Garage", "Sphere2500"], 116 | hue_order=["fact-rs", "sophus-rs", "tiny-solver-rs", "gtsam", "ceres"], 117 | ) 118 | 119 | ax.tick_params(axis="x", pad=-2) 120 | ax.tick_params(axis="y", pad=-1) 121 | ax.set_ylabel("Time (ms)") 122 | 123 | 124 | ax.set_yscale("symlog", linthresh=600, linscale=1.5) 125 | ax.set_yticks([0, 200, 400, 600, 1000, 1600], [0, 200, 400, 600, 1000, 1600]) 126 | 127 | ax.legend().set_visible(False) 128 | leg = fig.legend( 129 | borderpad=0.2, 130 | labelspacing=0.15, 131 | loc="outside upper left", 132 | **legend_opts, 133 | ).get_frame() 134 | leg.set_boxstyle("square") # type: ignore 135 | leg.set_linewidth(1.0) 136 | 137 | plt.savefig(DIR / "benchmarks.png", dpi=300, bbox_inches="tight") 138 | plt.savefig(DIR / "benchmarks.pdf", dpi=300, bbox_inches="tight") 139 | plt.savefig(DIR / "benchmarks.svg", dpi=300, bbox_inches="tight") 140 | # plt.show() 141 | -------------------------------------------------------------------------------- /factrs-bench/cpp/bench.cpp: -------------------------------------------------------------------------------- 1 | #include "bench_ceres.h" 2 | #include "ceres/types.h" 3 | #include "gtsam/slam/dataset.h" 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | using namespace ankerl; 14 | 15 | std::string directory = "examples/data/"; 16 | std::vector files_3d{"sphere2500.g2o", "parking-garage.g2o"}; 17 | std::vector files_2d{"M3500.g2o"}; 18 | 19 | // ------------------------- Ceres ------------------------- // 20 | template 21 | void run_ceres(nanobench::Bench *bench, std::string file) { 22 | std::map og_poses; 23 | std::vector og_constraints; 24 | std::tie(og_poses, og_constraints) = load_ceres(directory + file); 25 | 26 | bench->context("benchmark", "ceres"); 27 | bench->run("ceres_" + file, [&]() { 28 | // Copy the poses and constraints to avoid modifying the original data 29 | auto poses(og_poses); 30 | auto constraints(og_constraints); 31 | 32 | ceres::Problem problem; 33 | DIM::Build(constraints, &poses, &problem); 34 | 35 | ceres::Solver::Options options; 36 | options.trust_region_strategy_type = ceres::LEVENBERG_MARQUARDT; 37 | // shrink as much as possible to basically do a Gauss-Newton step 38 | options.initial_trust_region_radius = 1e16; 39 | options.sparse_linear_algebra_library_type = ceres::SUITE_SPARSE; 40 | options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; 41 | options.minimizer_progress_to_stdout = false; 42 | options.num_threads = 1; 43 | 44 | ceres::Solver::Summary summary; 45 | ceres::Solve(options, &problem, &summary); 46 | 47 | nanobench::doNotOptimizeAway(summary); 48 | }); 49 | } 50 | 51 | // ------------------------- gtsam ------------------------- // 52 | gtsam::GraphAndValues load_gtsam(std::string file, bool is3D) { 53 | auto read = gtsam::readG2o(file, is3D); 54 | 55 | if (is3D) { 56 | auto priorModel = gtsam::noiseModel::Diagonal::Variances( 57 | (gtsam::Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); 58 | read.first->addPrior(0, gtsam::Pose3::Identity(), priorModel); 59 | } else { 60 | auto priorModel = gtsam::noiseModel::Diagonal::Variances( 61 | gtsam::Vector3(1e-6, 1e-6, 1e-8)); 62 | auto prior = gtsam::PriorFactor(0, gtsam::Pose2()); 63 | read.first->addPrior(0, gtsam::Pose2::Identity(), priorModel); 64 | } 65 | 66 | return read; 67 | } 68 | 69 | void run_gtsam(nanobench::Bench *bench, std::string file, bool is3D) { 70 | auto gv = load_gtsam(directory + file, is3D); 71 | 72 | bench->context("benchmark", "gtsam"); 73 | bench->run("gtsam_" + file, [&]() { 74 | gtsam::NonlinearFactorGraph graph(*gv.first); 75 | gtsam::Values values(*gv.second); 76 | 77 | gtsam::GaussNewtonOptimizer optimizer(graph, values); 78 | gtsam::Values result = optimizer.optimize(); 79 | 80 | nanobench::doNotOptimizeAway(result); 81 | }); 82 | } 83 | 84 | char const *markdown() { 85 | return R"DELIM(| benchmark | args | fastest | median | mean | 86 | {{#result}}| {{context(benchmark)}} | {{name}} | {{minimum(elapsed)}} | {{median(elapsed)}} | {{average(elapsed)}} | 87 | {{/result}})DELIM"; 88 | } 89 | 90 | // ------------------------- Run benchmarks ------------------------- // 91 | int main(int argc, char *argv[]) { 92 | nanobench::Bench b; 93 | b.timeUnit(std::chrono::milliseconds(1), "ms"); 94 | b.maxEpochTime(std::chrono::seconds(2)); 95 | 96 | // read in optional number of epochs from command line 97 | if (argc > 1) { 98 | int epochs = std::atoi(argv[1]); 99 | if (epochs > 0) { 100 | std::cout << "Running " << epochs << " epochs\n"; 101 | b.epochs(epochs); 102 | } 103 | } 104 | 105 | // 3d benchmarks 106 | b.title("3d benchmarks"); 107 | for (auto &file : files_3d) { 108 | run_gtsam(&b, file, true); 109 | run_ceres(&b, file); 110 | } 111 | std::cout << "\nIn Markdown format:\n"; 112 | b.render(markdown(), std::cout); 113 | std::ofstream outFile("factrs-bench/cpp_3d.json"); 114 | b.render(nanobench::templates::json(), outFile); 115 | outFile.close(); 116 | 117 | // 2d benchmarks 118 | b.title("2d benchmarks"); 119 | for (auto &file : files_2d) { 120 | run_gtsam(&b, file, false); 121 | run_ceres(&b, file); 122 | } 123 | std::cout << "\nIn Markdown format:\n"; 124 | b.render(markdown(), std::cout); 125 | std::ofstream outFile2("factrs-bench/cpp_2d.json"); 126 | b.render(nanobench::templates::json(), outFile2); 127 | outFile2.close(); 128 | } -------------------------------------------------------------------------------- /src/residuals/traits.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use dyn_clone::DynClone; 4 | 5 | use crate::{ 6 | containers::{Key, Values}, 7 | linalg::{Diff, DiffResult, DimName, MatrixX, Numeric, VectorX}, 8 | variables::{Variable, VariableDtype}, 9 | }; 10 | 11 | type Alias = ::Alias; 12 | 13 | // ------------------ Base Residual Trait & Helpers ------------------ // 14 | /// Base trait for residuals 15 | /// 16 | /// This trait is used to implement custom residuals. It is recommended to use 17 | /// implement one of the `ResidualN` traits, and then [mark](factrs::mark) it to 18 | /// implement this. 19 | #[cfg_attr(feature = "serde", typetag::serde(tag = "tag"))] 20 | pub trait Residual: Debug + DynClone { 21 | fn dim_in(&self) -> usize; 22 | 23 | fn dim_out(&self) -> usize; 24 | 25 | fn residual(&self, values: &Values, keys: &[Key]) -> VectorX; 26 | 27 | fn residual_jacobian(&self, values: &Values, keys: &[Key]) -> DiffResult; 28 | } 29 | 30 | dyn_clone::clone_trait_object!(Residual); 31 | 32 | // -------------- Use Macro to create residuals with set sizes -------------- // 33 | use paste::paste; 34 | #[cfg(feature = "serde")] 35 | pub use register_residual as tag_residual; 36 | macro_rules! residual_maker { 37 | ($num:expr, $( ($idx:expr, $name:ident, $var:ident) ),*) => { 38 | paste! { 39 | #[doc=concat!("Residual trait for ", $num, " variables")] 40 | pub trait []: Residual 41 | { 42 | $( 43 | #[doc=concat!("Type of variable ", $idx)] 44 | type $var: VariableDtype; 45 | )* 46 | /// The total input dimension 47 | type DimIn: DimName; 48 | /// The output dimension of the residual 49 | type DimOut: DimName; 50 | /// Differentiator type (see [Diff](crate::linalg::Diff)) 51 | type Differ: Diff; 52 | 53 | /// Main residual computation 54 | /// 55 | /// If implementing your own residual, this is the only method you need to implement. 56 | /// It is generic over the dtype to allow for differentiable types. 57 | fn [](&self, $($name: Alias,)*) -> VectorX; 58 | 59 | #[doc="Wrapper that unpacks and calls [" [] "](Self::" [] ")."] 60 | fn [](&self, values: &Values, keys: &[Key]) -> VectorX 61 | where 62 | $( 63 | Self::$var: 'static, 64 | )* 65 | { 66 | // Unwrap everything 67 | $( 68 | let $name: &Self::$var = values.get_unchecked(keys[$idx]).unwrap_or_else(|| { 69 | panic!("Key not found in values: {:?} with type {}", keys[$idx], std::any::type_name::()) 70 | }); 71 | )* 72 | self.[]($($name.clone(),)*) 73 | } 74 | 75 | 76 | #[doc="Wrapper that unpacks variables and computes jacobians using [" [] "](Self::" [] ")."] 77 | fn [](&self, values: &Values, keys: &[Key]) -> DiffResult 78 | where 79 | $( 80 | Self::$var: 'static, 81 | )* 82 | { 83 | // Unwrap everything 84 | $( 85 | let $name: &Self::$var = values.get_unchecked(keys[$idx]).unwrap_or_else(|| { 86 | panic!("Key not found in values: {:?} with type {}", keys[$idx], std::any::type_name::()) 87 | }); 88 | )* 89 | Self::Differ::[](|$($name,)*| self.[]($($name,)*), $($name,)*) 90 | } 91 | } 92 | } 93 | }; 94 | } 95 | 96 | residual_maker!(1, (0, v1, V1)); 97 | residual_maker!(2, (0, v1, V1), (1, v2, V2)); 98 | residual_maker!(3, (0, v1, V1), (1, v2, V2), (2, v3, V3)); 99 | residual_maker!(4, (0, v1, V1), (1, v2, V2), (2, v3, V3), (3, v4, V4)); 100 | residual_maker!( 101 | 5, 102 | (0, v1, V1), 103 | (1, v2, V2), 104 | (2, v3, V3), 105 | (3, v4, V4), 106 | (4, v5, V5) 107 | ); 108 | residual_maker!( 109 | 6, 110 | (0, v1, V1), 111 | (1, v2, V2), 112 | (2, v3, V3), 113 | (3, v4, V4), 114 | (4, v5, V5), 115 | (5, v6, V6) 116 | ); 117 | -------------------------------------------------------------------------------- /src/containers/symbol.rs: -------------------------------------------------------------------------------- 1 | // Similar to gtsam: https://github.com/borglab/gtsam/blob/develop/gtsam/inference/Symbol.cpp 2 | use std::{ 3 | fmt::{self, Write}, 4 | mem::size_of, 5 | }; 6 | 7 | use crate::variables::VariableDtype; 8 | 9 | // ------------------------- Symbol Basics ------------------------- // 10 | 11 | /// Newtype wrap around u64 12 | /// 13 | /// In it's final form, a Key is what is used for indexing inside of 14 | /// Values and Factors. Generally it is created from a [Symbol] 15 | #[derive(Clone, Copy, Eq, Hash, PartialEq, Debug)] 16 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 17 | pub struct Key(pub u64); 18 | 19 | impl Symbol for Key {} 20 | 21 | /// Human-readable symbol that will be turned into a [Key] 22 | /// 23 | /// This is a requirement to be inserted into Values. Examples are 24 | /// X(100), A(101), etc. 25 | pub trait Symbol: fmt::Debug + Into {} 26 | 27 | /// Adds type information to a [Symbol] 28 | /// 29 | /// Will almost always be generated by [assign_symbols](factrs::assign_symbols) 30 | pub trait TypedSymbol: Symbol {} 31 | 32 | /// Custom formatting for keys in [Values](factrs::containers::Values) or 33 | /// [Graph](factrs::containers::Graph) 34 | /// 35 | /// The average user will likely never have to implement this as 36 | /// [DefaultSymbolHandler] will almost always be sufficient, unless a custom key 37 | /// is used. 38 | pub trait KeyFormatter { 39 | fn fmt(f: &mut dyn Write, key: Key) -> fmt::Result; 40 | } 41 | 42 | // --------------------- Basic single char symbol --------------------- // 43 | 44 | // Char is stored in the high CHR_BITS 45 | // Idx is stored in the low IDX_BITS 46 | const TOTAL_SIZE: usize = u64::BITS as usize; 47 | const CHR_SIZE: usize = size_of::() * 8; 48 | const IDX_SIZE: usize = TOTAL_SIZE - CHR_SIZE; 49 | const CHR_MASK: u64 = (char::MAX as u64) << IDX_SIZE; 50 | const IDX_MASK: u64 = !CHR_MASK; 51 | 52 | /// Default symbol handler for [Symbols](Symbol) 53 | /// 54 | /// Specifically, this converts a char and an index into a [Key] and back 55 | #[derive(Clone)] 56 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 57 | pub struct DefaultSymbolHandler; 58 | 59 | impl DefaultSymbolHandler { 60 | pub fn sym_to_key(chr: char, idx: u32) -> Key { 61 | Key(((chr as u64) << IDX_SIZE) | (idx as u64 & IDX_MASK)) 62 | } 63 | 64 | pub fn key_to_sym(k: Key) -> (char, u32) { 65 | let chr = ((k.0 & CHR_MASK) >> IDX_SIZE) as u8 as char; 66 | let idx = (k.0 & IDX_MASK) as u32; 67 | (chr, idx) 68 | } 69 | 70 | pub fn format(f: &mut dyn Write, chr: char, idx: u32) -> fmt::Result { 71 | write!(f, "{}{}", chr, idx) 72 | } 73 | } 74 | 75 | impl KeyFormatter for DefaultSymbolHandler { 76 | fn fmt(f: &mut dyn Write, key: Key) -> fmt::Result { 77 | let (chr, idx) = Self::key_to_sym(key); 78 | Self::format(f, chr, idx) 79 | } 80 | } 81 | 82 | /// Creates and assigns [Symbols](Symbol) to 83 | /// [Variables](factrs::variables::Variable) 84 | /// 85 | /// To reduce runtime errors, factrs symbols are tagged 86 | /// with the type they will be used with. This macro will create a new symbol 87 | /// and implement all the necessary traits for it to be used as a symbol. 88 | /// ``` 89 | /// use factrs::{assign_symbols, variables::{SO2, SE2}}; 90 | /// assign_symbols!(X: SO2; Y: SE2); 91 | /// ``` 92 | #[macro_export] 93 | macro_rules! assign_symbols { 94 | ($($name:ident : $($var:ident),+);* $(;)?) => {$( 95 | assign_symbols!($name); 96 | 97 | $( 98 | impl $crate::containers::TypedSymbol<$var> for $name {} 99 | )* 100 | )*}; 101 | 102 | ($($name:ident),*) => { 103 | $( 104 | #[derive(Clone, Copy)] 105 | pub struct $name(pub u32); 106 | 107 | impl From<$name> for $crate::containers::Key { 108 | fn from(key: $name) -> $crate::containers::Key { 109 | // TODO: Could we compute this char -> int info at compile time? 110 | let chr = stringify!($name).chars().next().unwrap(); 111 | let idx = key.0; 112 | $crate::containers::DefaultSymbolHandler::sym_to_key(chr, idx) 113 | } 114 | } 115 | 116 | impl std::fmt::Debug for $name { 117 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 118 | let chr = stringify!($name).chars().next().unwrap(); 119 | let idx = self.0; 120 | $crate::containers::DefaultSymbolHandler::format(f, chr, idx) 121 | } 122 | } 123 | 124 | impl $crate::containers::Symbol for $name {} 125 | )* 126 | }; 127 | } 128 | -------------------------------------------------------------------------------- /src/linear/values.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map::Iter as HashMapIter; 2 | 3 | use crate::{ 4 | containers::{Idx, Key, Symbol, Values, ValuesOrder}, 5 | linalg::{VectorViewX, VectorX}, 6 | }; 7 | 8 | /// Structure to store linear (aka all vector) values 9 | /// 10 | /// This structure is the linear equivalent of [Values]. It stores all values in 11 | /// a single vector, along with indices and dimensions of each variable. Ideally 12 | /// *shouldn't* ever be needed in practice. 13 | pub struct LinearValues { 14 | values: VectorX, 15 | order: ValuesOrder, 16 | } 17 | 18 | impl LinearValues { 19 | /// Create a zero/identity LinearValues from a [ValuesOrder] 20 | pub fn zero_from_order(order: ValuesOrder) -> Self { 21 | let values = VectorX::zeros(order.dim()); 22 | Self { values, order } 23 | } 24 | 25 | /// Create a zero/identity LinearValues from a [Values] 26 | /// 27 | /// The order is inferred from the values 28 | pub fn zero_from_values(values: &Values) -> Self { 29 | let order = ValuesOrder::from_values(values); 30 | let values = VectorX::zeros(order.dim()); 31 | Self { values, order } 32 | } 33 | 34 | /// Create a LinearValues from a [ValuesOrder] and a vector 35 | pub fn from_order_and_vector(order: ValuesOrder, values: VectorX) -> Self { 36 | assert!( 37 | values.len() == order.dim(), 38 | "Vector and order must have the same dimension when creating LinearValues" 39 | ); 40 | Self { values, order } 41 | } 42 | 43 | /// Create a LinearValues from a [Values] and a vector 44 | /// 45 | /// The order is inferred from the values 46 | pub fn from_values_and_vector(values: &Values, vector: VectorX) -> Self { 47 | let order = ValuesOrder::from_values(values); 48 | assert!( 49 | vector.len() == order.dim(), 50 | "Vector and values must have the same dimension when creating LinearValues" 51 | ); 52 | Self::from_order_and_vector(order, vector) 53 | } 54 | 55 | pub fn len(&self) -> usize { 56 | self.order.len() 57 | } 58 | 59 | pub fn is_empty(&self) -> bool { 60 | self.order.is_empty() 61 | } 62 | 63 | pub fn dim(&self) -> usize { 64 | self.values.len() 65 | } 66 | 67 | fn get_idx(&self, idx: &Idx) -> VectorViewX<'_> { 68 | self.values.rows(idx.idx, idx.dim) 69 | } 70 | 71 | /// Retrieve a vector from the LinearValues 72 | pub fn get(&self, key: impl Symbol) -> Option> { 73 | let idx = self.order.get(key)?; 74 | self.get_idx(idx).into() 75 | } 76 | 77 | pub fn iter(&self) -> Iter<'_> { 78 | Iter { 79 | values: self, 80 | idx: self.order.iter(), 81 | } 82 | } 83 | } 84 | 85 | pub struct Iter<'a> { 86 | values: &'a LinearValues, 87 | idx: HashMapIter<'a, Key, Idx>, 88 | } 89 | 90 | impl<'a> Iterator for Iter<'a> { 91 | type Item = (&'a Key, VectorViewX<'a>); 92 | 93 | fn next(&mut self) -> Option { 94 | let n = self.idx.next()?; 95 | Some((n.0, self.values.get_idx(n.1))) 96 | } 97 | } 98 | 99 | #[cfg(test)] 100 | mod test { 101 | use super::*; 102 | use crate::{ 103 | dtype, 104 | symbols::X, 105 | variables::{Variable, VectorVar2, VectorVar3, VectorVar6}, 106 | }; 107 | 108 | fn make_order_vector() -> (ValuesOrder, VectorX) { 109 | // Create some form of values 110 | let mut v = Values::new(); 111 | v.insert_unchecked(X(0), VectorVar2::identity()); 112 | v.insert_unchecked(X(1), VectorVar6::identity()); 113 | v.insert_unchecked(X(2), VectorVar3::identity()); 114 | 115 | // Create an order 116 | let order = ValuesOrder::from_values(&v); 117 | let vector = VectorX::from_fn(order.dim(), |i, _| i as dtype); 118 | (order, vector) 119 | } 120 | 121 | #[test] 122 | fn from_order_and_vector() { 123 | let (order, vector) = make_order_vector(); 124 | 125 | // Create LinearValues 126 | let linear_values = LinearValues::from_order_and_vector(order, vector); 127 | assert!(linear_values.len() == 3); 128 | assert!(linear_values.dim() == 11); 129 | assert!(linear_values.get(X(0)).expect("Key was missing").len() == 2); 130 | assert!(linear_values.get(X(1)).expect("Key was missing").len() == 6); 131 | assert!(linear_values.get(X(2)).expect("Key was missing").len() == 3); 132 | assert!(linear_values.get(X(3)).is_none()); 133 | } 134 | 135 | #[test] 136 | #[should_panic] 137 | fn mismatched_size() { 138 | let (order, vector) = make_order_vector(); 139 | let vector = vector.push(0.0); 140 | LinearValues::from_order_and_vector(order, vector); 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /examples/g2o.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "rerun")] 2 | use std::net::{SocketAddr, SocketAddrV4}; 3 | use std::{env, time::Instant}; 4 | 5 | #[cfg(feature = "rerun")] 6 | use factrs::rerun::RerunObserver; 7 | use factrs::{ 8 | core::{GaussNewton, LevenMarquardt, SE2, SE3}, 9 | optimizers::{GncGemanMcClure, GncParams, GraduatedNonConvexity}, 10 | traits::Optimizer, 11 | utils::load_g20, 12 | }; 13 | #[cfg(feature = "rerun")] 14 | use rerun::{Arrows2D, Arrows3D, Points2D, Points3D}; 15 | 16 | // Setups rerun and a callback for iteratively sending to rerun 17 | // Must run with --features rerun for it to work 18 | #[cfg(feature = "rerun")] 19 | fn rerun_init(opt: &mut impl Optimizer, dim: &str, obj: &str) { 20 | // Setup the rerun & the callback 21 | let socket = SocketAddrV4::new("0.0.0.0".parse().unwrap(), 9876); 22 | let rec = rerun::RecordingStreamBuilder::new("factrs-g2o-example") 23 | .connect_tcp_opts(SocketAddr::V4(socket), rerun::default_flush_timeout()) 24 | .unwrap(); 25 | 26 | // Log the graph 27 | let (nodes, edges) = opt.graph().into(); 28 | rec.log_static("graph", &[&nodes as &dyn rerun::AsComponents, &edges]) 29 | .expect("log failed"); 30 | 31 | let topic = "base/solution"; 32 | 33 | match (dim, obj) { 34 | ("se2", "points") => { 35 | let callback = RerunObserver::::new(rec, topic); 36 | opt.observers_mut().add(callback) 37 | } 38 | ("se2", "arrows") => { 39 | let callback = RerunObserver::::new(rec, topic); 40 | opt.observers_mut().add(callback) 41 | } 42 | ("se3", "points") => { 43 | let callback = RerunObserver::::new(rec, topic); 44 | opt.observers_mut().add(callback) 45 | } 46 | ("se3", "arrows") => { 47 | let callback = RerunObserver::::new(rec, topic); 48 | opt.observers_mut().add(callback) 49 | } 50 | _ => panic!("Invalid arguments"), 51 | }; 52 | } 53 | 54 | #[cfg(not(feature = "rerun"))] 55 | fn rerun_init(_opt: &mut impl Optimizer, _dim: &str, _obj: &str) {} 56 | 57 | fn main() -> Result<(), Box> { 58 | // ---------------------- Parse Arguments & Load data ---------------------- // 59 | let mut args: Vec = env::args().collect(); 60 | match args.len() { 61 | 2 => { 62 | args.push(String::from("gauss")); 63 | args.push(String::from("arrows")); 64 | } 65 | 3 => { 66 | args.push(String::from("arrows")); 67 | } 68 | 4 => {} 69 | _ => { 70 | println!("Usage: {} ", args[0]); 71 | return Ok(()); 72 | } 73 | } 74 | 75 | pretty_env_logger::init(); 76 | 77 | // Load the graph from the g2o file 78 | let filename = &args[1]; 79 | let (graph, init) = load_g20(filename); 80 | println!("File loaded, {} factors", graph.len()); 81 | 82 | let obj = &args[3]; 83 | let dim = if init.filter::().count() != 0 { 84 | "se2" 85 | } else if init.filter::().count() != 0 { 86 | "se3" 87 | } else { 88 | panic!("Graph doesn't have SE2 or SE3 variables"); 89 | }; 90 | 91 | // Make optimizer 92 | let mut optimizer: Box = match args[2].as_str() { 93 | "gauss" => { 94 | let mut opt = GaussNewton::new_default(graph); 95 | rerun_init(&mut opt, dim, obj); 96 | Box::new(opt) 97 | } 98 | "leven" => { 99 | let mut opt = LevenMarquardt::new_default(graph); 100 | rerun_init(&mut opt, dim, obj); 101 | Box::new(opt) 102 | } 103 | #[allow(clippy::field_reassign_with_default)] 104 | "gnc" => { 105 | let mut params: GncParams = GncParams::default(); 106 | params.mu_step_size = 1.4; 107 | params.base.max_iterations = 200; 108 | params.inner.base.max_iterations = 5; 109 | params.inner.base.error_tol_absolute = 1e-4; 110 | params.inner.base.error_tol_relative = 1e-4; 111 | let mut opt: GraduatedNonConvexity = 112 | GraduatedNonConvexity::new(params, graph); 113 | rerun_init(&mut opt, dim, obj); 114 | Box::new(opt) 115 | } 116 | _ => { 117 | println!("Optimizer not recognized"); 118 | return Ok(()); 119 | } 120 | }; 121 | 122 | // ------------------------- Optimize ------------------------- // 123 | let start = Instant::now(); 124 | let result = optimizer.optimize(init); 125 | let duration = start.elapsed(); 126 | 127 | match result { 128 | Ok(_) => println!("Optimization converged!"), 129 | Err(_) => println!("Optimization failed!"), 130 | } 131 | println!("Optimization took: {:?}", duration); 132 | Ok(()) 133 | } 134 | -------------------------------------------------------------------------------- /src/linalg/nalgebra_wrap.rs: -------------------------------------------------------------------------------- 1 | // Re-export all nalgebra types to put default dtype on everything 2 | // Misc imports 3 | use nalgebra::{self as na, OVector}; 4 | pub use nalgebra::{ 5 | allocator::Allocator, dmatrix as matrixx, dvector as vectorx, ComplexField, Const, 6 | DefaultAllocator, Dim, DimName, Dyn, RealField, 7 | }; 8 | pub use simba::scalar::SupersetOf; 9 | 10 | use crate::dtype; 11 | 12 | // Make it easier to bind the buffer type 13 | pub type AllocatorBuffer = >::Buffer; 14 | 15 | // ------------------------- Vector/Matrix Aliases ------------------------- // 16 | // Vectors 17 | pub type Vector = na::SVector; 18 | pub type VectorX = na::DVector; 19 | pub type Vector1 = na::SVector; 20 | pub type Vector2 = na::SVector; 21 | pub type Vector3 = na::SVector; 22 | pub type Vector4 = na::SVector; 23 | pub type Vector5 = na::SVector; 24 | pub type Vector6 = na::SVector; 25 | 26 | // Matrices 27 | // square 28 | pub type MatrixX = na::DMatrix; 29 | pub type Matrix1 = na::Matrix1; 30 | pub type Matrix2 = na::Matrix2; 31 | pub type Matrix3 = na::Matrix3; 32 | pub type Matrix4 = na::Matrix4; 33 | pub type Matrix5 = na::Matrix5; 34 | pub type Matrix6 = na::Matrix6; 35 | 36 | // row 37 | pub type Matrix1xX = na::Matrix1xX; 38 | pub type Matrix1x2 = na::Matrix1x2; 39 | pub type Matrix1x3 = na::Matrix1x3; 40 | pub type Matrix1x4 = na::Matrix1x4; 41 | pub type Matrix1x5 = na::Matrix1x5; 42 | pub type Matrix1x6 = na::Matrix1x6; 43 | 44 | // two rows 45 | pub type Matrix2xX = na::Matrix2xX; 46 | pub type Matrix2x3 = na::Matrix2x3; 47 | pub type Matrix2x4 = na::Matrix2x4; 48 | pub type Matrix2x5 = na::Matrix2x5; 49 | pub type Matrix2x6 = na::Matrix2x6; 50 | 51 | // three rows 52 | pub type Matrix3xX = na::Matrix3xX; 53 | pub type Matrix3x2 = na::Matrix3x2; 54 | pub type Matrix3x4 = na::Matrix3x4; 55 | pub type Matrix3x5 = na::Matrix3x5; 56 | pub type Matrix3x6 = na::Matrix3x6; 57 | 58 | // four rows 59 | pub type Matrix4xX = na::Matrix4xX; 60 | pub type Matrix4x2 = na::Matrix4x2; 61 | pub type Matrix4x3 = na::Matrix4x3; 62 | pub type Matrix4x5 = na::Matrix4x5; 63 | pub type Matrix4x6 = na::Matrix4x6; 64 | 65 | // five rows 66 | pub type Matrix5xX = na::Matrix5xX; 67 | pub type Matrix5x2 = na::Matrix5x2; 68 | pub type Matrix5x3 = na::Matrix5x3; 69 | pub type Matrix5x4 = na::Matrix5x4; 70 | pub type Matrix5x6 = na::Matrix5x6; 71 | 72 | // six rows 73 | pub type Matrix6xX = na::Matrix6xX; 74 | pub type Matrix6x2 = na::Matrix6x2; 75 | pub type Matrix6x3 = na::Matrix6x3; 76 | pub type Matrix6x4 = na::Matrix6x4; 77 | pub type Matrix6x5 = na::Matrix6x5; 78 | 79 | // dynamic rows 80 | pub type MatrixXx2 = na::MatrixXx2; 81 | pub type MatrixXx3 = na::MatrixXx3; 82 | pub type MatrixXx4 = na::MatrixXx4; 83 | pub type MatrixXx5 = na::MatrixXx5; 84 | pub type MatrixXx6 = na::MatrixXx6; 85 | pub type MatrixXxN = 86 | na::Matrix, na::VecStorage>>; 87 | 88 | // Views - aka references of matrices 89 | pub type MatrixViewX<'a, T = dtype> = na::MatrixView<'a, T, Dyn, Dyn>; 90 | 91 | pub type Matrix = na::Matrix< 92 | T, 93 | Const, 94 | Const, 95 | , Const>>::Buffer, 96 | >; 97 | pub type MatrixView<'a, const R: usize, const C: usize = 1, T = dtype> = 98 | na::MatrixView<'a, T, Const, Const>; 99 | 100 | pub type VectorView<'a, const N: usize, T = dtype> = na::VectorView<'a, T, Const>; 101 | pub type VectorViewX<'a, T = dtype> = na::VectorView<'a, T, Dyn>; 102 | pub type VectorView1<'a, T = dtype> = na::VectorView<'a, T, Const<1>>; 103 | pub type VectorView2<'a, T = dtype> = na::VectorView<'a, T, Const<2>>; 104 | pub type VectorView3<'a, T = dtype> = na::VectorView<'a, T, Const<3>>; 105 | pub type VectorView4<'a, T = dtype> = na::VectorView<'a, T, Const<4>>; 106 | pub type VectorView5<'a, T = dtype> = na::VectorView<'a, T, Const<5>>; 107 | pub type VectorView6<'a, T = dtype> = na::VectorView<'a, T, Const<6>>; 108 | 109 | // Generic, taking in sizes with Const 110 | pub type VectorDim = OVector; 111 | pub type MatrixDim, T = dtype> = 112 | na::Matrix>::Buffer>; 113 | pub type MatrixViewDim<'a, R, C = Const<1>, T = dtype> = na::MatrixView<'a, T, R, C>; 114 | -------------------------------------------------------------------------------- /examples/gps.rs: -------------------------------------------------------------------------------- 1 | /* 2 | This is a port of the "LocalizationExample.cpp" found in the gtsam repository 3 | https://github.com/borglab/gtsam/blob/11142b08fc842f1fb79ccf3017946d70f5173335/examples/LocalizationExample.cpp 4 | 5 | A simple 2D pose slam example with "GPS" measurements 6 | - The robot moves forward 2 meter each iteration 7 | - The robot initially faces along the X axis (horizontal, to the right in 2D) 8 | - We have full odometry between pose 9 | - We have "GPS-like" measurements implemented with a custom factor 10 | */ 11 | 12 | #![allow(unused_imports)] 13 | // Our state will be represented by SE2 -> theta, x, y 14 | // VectorVar2 is a newtype around Vector2 for optimization purposes 15 | use factrs::{ 16 | assign_symbols, 17 | core::{BetweenResidual, GaussNewton, Graph, Values}, 18 | dtype, fac, 19 | linalg::{Const, ForwardProp, Numeric, NumericalDiff, VectorX}, 20 | residuals::Residual1, 21 | traits::*, 22 | variables::{VectorVar2, SE2}, 23 | }; 24 | 25 | #[derive(Clone, Debug)] 26 | // Enable serialization if it's desired 27 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 28 | pub struct GpsResidual { 29 | meas: VectorVar2, 30 | } 31 | 32 | impl GpsResidual { 33 | pub fn new(x: dtype, y: dtype) -> Self { 34 | Self { 35 | meas: VectorVar2::new(x, y), 36 | } 37 | } 38 | } 39 | 40 | // The `mark` macro handles serialization stuff and some custom impl as well 41 | #[factrs::mark] 42 | impl Residual1 for GpsResidual { 43 | // Use forward propagation for differentiation 44 | type Differ = ForwardProp<::DimIn>; 45 | // Alternatively, could use numerical differentiation (6 => 10^-6 as 46 | // denominator) 47 | // type Differ = NumericalDiff<6>; 48 | 49 | // The input variable type, input dimension of variable(s), and output dimension 50 | // of residual 51 | type V1 = SE2; 52 | type DimIn = Const<3>; 53 | type DimOut = Const<2>; 54 | 55 | // D is a custom numeric type that can be leveraged for autodiff 56 | fn residual1(&self, v: SE2) -> VectorX { 57 | // Convert measurement from dtype to T 58 | let p_meas = self.meas.cast(); 59 | // Convert p to VectorVar2 as well 60 | let p = VectorVar2::from(v.xy().into_owned()); 61 | 62 | p.ominus(&p_meas) 63 | } 64 | 65 | // You can also hand-code the jacobian by hand if extra efficiency is desired. 66 | // fn residual1_jacobian(&self, values: &Values, keys: &[Key]) -> 67 | // DiffResult { let p: &SE2 = values 68 | // .get_unchecked(keys[0]) 69 | // .expect("got wrong variable type"); 70 | // let s = p.theta().sin(); 71 | // let c = p.theta().cos(); 72 | // let diff = MatrixX::from_row_slice(2, 3, &[0.0, c, -s, 0.0, s, c]); 73 | // DiffResult { 74 | // value: self.residual1(p.clone()), 75 | // diff, 76 | // } 77 | // } 78 | // As a note - the above jacobian is only valid if running with the "left" 79 | // feature disabled 80 | // Enabling the left feature will change the jacobian 81 | } 82 | 83 | // Here we assign X to always represent SE2 variables 84 | // We'll get a compile-time error if we try anything else 85 | assign_symbols!(X: SE2); 86 | 87 | fn main() { 88 | let mut graph = Graph::new(); 89 | 90 | // Add odometry factors 91 | let res = BetweenResidual::new(SE2::new(0.0, 2.0, 0.0)); 92 | let odometry_01 = fac![res.clone(), (X(0), X(1)), (0.1, 0.2) as cov]; 93 | let odometry_12 = fac![res, (X(1), X(2)), (0.1, 0.2) as cov]; 94 | graph.add_factor(odometry_01); 95 | graph.add_factor(odometry_12); 96 | 97 | // Add gps factors 98 | let g0 = fac![GpsResidual::new(0.0, 0.0), X(0), 1.0 as std]; 99 | let g1 = fac![GpsResidual::new(2.0, 0.0), X(1), 1.0 as std]; 100 | let g2 = fac![GpsResidual::new(4.0, 0.0), X(2), 1.0 as std]; 101 | graph.add_factor(g0); 102 | graph.add_factor(g1); 103 | graph.add_factor(g2); 104 | 105 | // Make values 106 | let mut values = Values::new(); 107 | values.insert(X(0), SE2::new(1.0, 2.0, 3.0)); 108 | values.insert(X(1), SE2::identity()); 109 | values.insert(X(2), SE2::identity()); 110 | 111 | // // These will all compile-time error 112 | // // mismatched symbol-variable types 113 | // values.insert(X(5), VectorVar2::identity()); 114 | // // wrong number of keys 115 | // let f = fac![GpsResidual::new(0.0, 0.0), (X(0), X(1))]; 116 | // // wrong noise-model dimension 117 | // let n = factrs::noise::GaussianNoise::<5>::from_scalar_sigma(0.1); 118 | // let f = fac![GpsResidual::new(0.0, 0.0), X(0), n]; 119 | // // mismatched symbol-variable types 120 | // assign_symbols!(Y : VectorVar2); 121 | // let f = fac![GpsResidual::new(0.0, 0.0), Y(0), 0.1 as std]; 122 | 123 | // optimize 124 | let mut opt: GaussNewton = GaussNewton::new_default(graph); 125 | let result = opt.optimize(values).expect("Optimization failed"); 126 | 127 | println!("Final Result: {:#?}", result); 128 | } 129 | -------------------------------------------------------------------------------- /factrs-proc/src/fac.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Span, TokenStream as TokenStream2}; 2 | use quote::{format_ident, quote, ToTokens}; 3 | use syn::{ 4 | parse::Parse, parse_quote, punctuated::Punctuated, spanned::Spanned, Expr, ExprCast, Ident, 5 | Token, 6 | }; 7 | 8 | pub struct Factor { 9 | residual: Expr, 10 | keys: Punctuated, 11 | noise: Option, 12 | robust: Option, 13 | } 14 | 15 | impl Factor { 16 | fn noise_call(&self) -> TokenStream2 { 17 | match &self.noise { 18 | Some(n) => quote! { .noise(#n) }, 19 | None => TokenStream2::new(), 20 | } 21 | } 22 | 23 | fn robust_call(&self) -> TokenStream2 { 24 | match &self.robust { 25 | Some(r) => quote! {.robust(#r) }, 26 | None => TokenStream2::new(), 27 | } 28 | } 29 | 30 | fn new_call(&self) -> TokenStream2 { 31 | let func = Ident::new(&format!("new{}", self.keys.len()), Span::call_site()); 32 | let res = &self.residual; 33 | let keys = &self.keys; 34 | quote! { #func(#res, #keys) } 35 | } 36 | } 37 | 38 | impl Parse for Factor { 39 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 40 | let input = Punctuated::::parse_terminated(input)?; 41 | 42 | // Make sure we go the right number of arguments 43 | if input.len() < 2 { 44 | return Err(syn::Error::new_spanned( 45 | &input[0], 46 | "Expected at least two items", 47 | )); 48 | } else if input.len() > 4 { 49 | return Err(syn::Error::new_spanned( 50 | input.last(), 51 | "Expected at most four items", 52 | )); 53 | } 54 | 55 | // Residual is first 56 | let residual = input[0].clone(); 57 | 58 | // Then the keys 59 | let keys = match &input[1] { 60 | // in brackets 61 | Expr::Array(a) => a.elems.clone(), 62 | // in parentheses 63 | Expr::Tuple(t) => t.elems.clone(), 64 | // a single key for unary factors 65 | Expr::Path(_) | Expr::Call(_) => { 66 | let mut p = Punctuated::::new(); 67 | p.push(input[1].clone()); 68 | p 69 | } 70 | _ => { 71 | return Err(syn::Error::new_spanned( 72 | &input[1], 73 | "Expected keys in brackets or parentheses", 74 | )); 75 | } 76 | }; 77 | 78 | // Then the noise 79 | let noise = if input.len() >= 3 { 80 | let m = quote!(factrs::noise); 81 | match &input[2] { 82 | Expr::Cast(ExprCast { expr, ty, .. }) => { 83 | // Make sure it's a cov or std cast 84 | let ty = match ty.to_token_stream().to_string().as_str() { 85 | "cov" => Ident::new("cov", ty.span()), 86 | "std" | "sigma" | "sig" => Ident::new("sigma", ty.span()), 87 | _ => return Err(syn::Error::new_spanned(ty, "Unknown cast for noise")), 88 | }; 89 | 90 | // Check if it's a tuple or a single variable 91 | match expr.as_ref() { 92 | Expr::Tuple(t) => { 93 | if t.elems.len() != 2 { 94 | return Err(syn::Error::new_spanned( 95 | t, 96 | "Expected tuple with two elements for split std/cov", 97 | )); 98 | } 99 | let (a, b) = (&t.elems[0], &t.elems[1]); 100 | let func = format_ident!("from_split_{}", ty); 101 | Some(parse_quote!(#m::GaussianNoise::#func(#a, #b))) 102 | } 103 | _ => { 104 | let func = format_ident!("from_scalar_{}", ty); 105 | Some(parse_quote!(#m::GaussianNoise::#func(#expr))) 106 | } 107 | } 108 | } 109 | Expr::Infer(_) => Some(parse_quote!(#m::UnitNoise)), 110 | _ => Some(input[2].clone()), 111 | } 112 | } else { 113 | None 114 | }; 115 | 116 | // Finally robust cost function 117 | let robust = if input.len() == 4 { 118 | Some(input[3].clone()) 119 | } else { 120 | None 121 | }; 122 | 123 | Ok(Factor { 124 | residual, 125 | keys, 126 | noise, 127 | robust, 128 | }) 129 | } 130 | } 131 | 132 | pub fn fac(factor: Factor) -> TokenStream2 { 133 | let call = factor.new_call(); 134 | let noise = factor.robust_call(); 135 | let robust = factor.noise_call(); 136 | 137 | let out = quote! { 138 | factrs::containers::FactorBuilder:: #call #noise #robust.build() 139 | }; 140 | 141 | out 142 | } 143 | -------------------------------------------------------------------------------- /src/variables/imu_bias.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, ops}; 2 | 3 | use super::Variable; 4 | use crate::{ 5 | dtype, 6 | linalg::{Const, DualVector, Numeric, SupersetOf, Vector3, VectorDim}, 7 | residuals::{Accel, Gyro}, 8 | }; 9 | 10 | // TODO: Use newtypes internally as well? 11 | /// IMU bias 12 | /// 13 | /// The IMU bias is a 6D vector containing the gyro and accel biases. It is 14 | /// treated as a 6D vector for optimization purposes. 15 | #[derive(Clone)] 16 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 17 | pub struct ImuBias { 18 | gyro: Vector3, 19 | accel: Vector3, 20 | } 21 | 22 | impl ImuBias { 23 | /// Create a new IMU bias 24 | pub fn new(gyro: Gyro, accel: Accel) -> Self { 25 | ImuBias { 26 | gyro: gyro.0, 27 | accel: accel.0, 28 | } 29 | } 30 | 31 | /// Create an IMU bias of zeros (same as identity) 32 | pub fn zeros() -> Self { 33 | ImuBias { 34 | gyro: Vector3::zeros(), 35 | accel: Vector3::zeros(), 36 | } 37 | } 38 | 39 | /// Get the gyro bias 40 | pub fn gyro(&self) -> &Vector3 { 41 | &self.gyro 42 | } 43 | 44 | /// Get the accel bias 45 | pub fn accel(&self) -> &Vector3 { 46 | &self.accel 47 | } 48 | } 49 | 50 | #[factrs::mark] 51 | impl Variable for ImuBias { 52 | type T = T; 53 | type Dim = crate::linalg::Const<6>; 54 | type Alias = ImuBias; 55 | 56 | fn identity() -> Self { 57 | ImuBias { 58 | gyro: Vector3::zeros(), 59 | accel: Vector3::zeros(), 60 | } 61 | } 62 | 63 | fn inverse(&self) -> Self { 64 | ImuBias { 65 | gyro: -self.gyro, 66 | accel: -self.accel, 67 | } 68 | } 69 | 70 | fn compose(&self, other: &Self) -> Self { 71 | ImuBias { 72 | gyro: self.gyro + other.gyro, 73 | accel: self.accel + other.accel, 74 | } 75 | } 76 | 77 | fn exp(delta: crate::linalg::VectorViewX) -> Self { 78 | let gyro = Vector3::new(delta[0], delta[1], delta[2]); 79 | let accel = Vector3::new(delta[3], delta[4], delta[5]); 80 | ImuBias { gyro, accel } 81 | } 82 | 83 | fn log(&self) -> crate::linalg::VectorX { 84 | crate::linalg::vectorx![ 85 | self.gyro.x, 86 | self.gyro.y, 87 | self.gyro.z, 88 | self.accel.x, 89 | self.accel.y, 90 | self.accel.z 91 | ] 92 | } 93 | 94 | fn cast>(&self) -> Self::Alias { 95 | ImuBias { 96 | gyro: self.gyro.cast(), 97 | accel: self.accel.cast(), 98 | } 99 | } 100 | 101 | fn dual_exp(idx: usize) -> Self::Alias> 102 | where 103 | crate::linalg::AllocatorBuffer: Sync + Send, 104 | nalgebra::DefaultAllocator: crate::linalg::DualAllocator, 105 | crate::linalg::DualVector: Copy, 106 | { 107 | let n = VectorDim::::zeros().shape_generic().0; 108 | 109 | let mut gyro = Vector3::>::zeros(); 110 | for (i, gi) in gyro.iter_mut().enumerate() { 111 | gi.eps = num_dual::Derivative::derivative_generic(n, Const::<1>, idx + i); 112 | } 113 | 114 | let mut accel = Vector3::>::zeros(); 115 | for (i, ai) in accel.iter_mut().enumerate() { 116 | ai.eps = num_dual::Derivative::derivative_generic(n, Const::<1>, idx + i + 3); 117 | } 118 | 119 | ImuBias { gyro, accel } 120 | } 121 | } 122 | 123 | impl fmt::Display for ImuBias { 124 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 125 | let precision = f.precision().unwrap_or(3); 126 | write!( 127 | f, 128 | "ImuBias(g: ({:.p$}, {:.p$}, {:.p$}), a: ({:.p$}, {:.p$}, {:.p$}))", 129 | self.gyro.x, 130 | self.gyro.y, 131 | self.gyro.z, 132 | self.accel.x, 133 | self.accel.y, 134 | self.accel.z, 135 | p = precision 136 | ) 137 | } 138 | } 139 | 140 | impl fmt::Debug for ImuBias { 141 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 142 | fmt::Display::fmt(self, f) 143 | } 144 | } 145 | 146 | impl ops::Sub for ImuBias { 147 | type Output = Self; 148 | 149 | fn sub(self, rhs: Self) -> Self { 150 | ImuBias { 151 | gyro: self.gyro - rhs.gyro, 152 | accel: self.accel - rhs.accel, 153 | } 154 | } 155 | } 156 | 157 | impl ops::Sub for &ImuBias { 158 | type Output = ImuBias; 159 | 160 | fn sub(self, rhs: Self) -> ImuBias { 161 | ImuBias { 162 | gyro: self.gyro - rhs.gyro, 163 | accel: self.accel - rhs.accel, 164 | } 165 | } 166 | } 167 | 168 | #[cfg(test)] 169 | mod tests { 170 | use super::*; 171 | use crate::test_variable; 172 | 173 | test_variable!(ImuBias); 174 | } 175 | -------------------------------------------------------------------------------- /src/variables/so2.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, ops}; 2 | 3 | use crate::{ 4 | dtype, 5 | linalg::{ 6 | vectorx, AllocatorBuffer, Const, DefaultAllocator, Derivative, DimName, DualAllocator, 7 | DualVector, Matrix1, Matrix2, MatrixView, Numeric, SupersetOf, Vector1, Vector2, VectorDim, 8 | VectorView1, VectorView2, VectorViewX, VectorX, 9 | }, 10 | variables::{MatrixLieGroup, Variable}, 11 | }; 12 | 13 | /// Special Orthogonal Group in 2D 14 | /// 15 | /// Implementation of SO(2) for 2D rotations. Specifically, we use complex 16 | /// numbers to represent rotations. 17 | #[derive(Clone)] 18 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 19 | pub struct SO2 { 20 | a: T, 21 | b: T, 22 | } 23 | 24 | impl SO2 { 25 | /// Create a new SO2 from an angle in radians 26 | #[allow(clippy::needless_borrow)] 27 | pub fn from_theta(theta: T) -> Self { 28 | SO2 { 29 | a: (&theta).cos(), 30 | b: (&theta).sin(), 31 | } 32 | } 33 | 34 | /// Convert SO2 to an angle in radians 35 | pub fn to_theta(&self) -> T { 36 | self.b.atan2(self.a) 37 | } 38 | } 39 | 40 | #[factrs::mark] 41 | impl Variable for SO2 { 42 | type T = T; 43 | type Dim = Const<1>; 44 | type Alias = SO2; 45 | 46 | fn identity() -> Self { 47 | SO2 { 48 | a: T::from(1.0), 49 | b: T::from(0.0), 50 | } 51 | } 52 | 53 | fn inverse(&self) -> Self { 54 | SO2 { 55 | a: self.a, 56 | b: -self.b, 57 | } 58 | } 59 | 60 | fn compose(&self, other: &Self) -> Self { 61 | SO2 { 62 | a: self.a * other.a - self.b * other.b, 63 | b: self.a * other.b + self.b * other.a, 64 | } 65 | } 66 | 67 | fn exp(xi: VectorViewX) -> Self { 68 | let theta = xi[0]; 69 | SO2::from_theta(theta) 70 | } 71 | 72 | fn log(&self) -> VectorX { 73 | vectorx![self.b.atan2(self.a)] 74 | } 75 | 76 | fn cast>(&self) -> Self::Alias { 77 | SO2 { 78 | a: TT::from_subset(&self.a), 79 | b: TT::from_subset(&self.b), 80 | } 81 | } 82 | 83 | fn dual_exp(idx: usize) -> Self::Alias> 84 | where 85 | AllocatorBuffer: Sync + Send, 86 | DefaultAllocator: DualAllocator, 87 | DualVector: Copy, 88 | { 89 | let mut a = DualVector::::from_re(1.0); 90 | a.eps = Derivative::new(Some(VectorDim::::zeros())); 91 | 92 | let mut b = DualVector::::from_re(0.0); 93 | let mut eps = VectorDim::::zeros(); 94 | eps[idx] = 1.0; 95 | b.eps = Derivative::new(Some(eps)); 96 | 97 | SO2 { a, b } 98 | } 99 | } 100 | 101 | impl MatrixLieGroup for SO2 { 102 | type TangentDim = Const<1>; 103 | type MatrixDim = Const<2>; 104 | type VectorDim = Const<2>; 105 | 106 | fn adjoint(&self) -> Matrix1 { 107 | Matrix1::identity() 108 | } 109 | 110 | fn hat(xi: VectorView1) -> Matrix2 { 111 | Matrix2::new(T::from(0.0), -xi[0], xi[0], T::from(0.0)) 112 | } 113 | 114 | fn vee(xi: MatrixView<2, 2, T>) -> Vector1 { 115 | Vector1::new(xi[(1, 0)]) 116 | } 117 | 118 | fn hat_swap(xi: VectorView2) -> Vector2 { 119 | Vector2::new(-xi[1], xi[0]) 120 | } 121 | 122 | fn apply(&self, v: VectorView2) -> Vector2 { 123 | Vector2::new(v[0] * self.a - v[1] * self.b, v[0] * self.b + v[1] * self.a) 124 | } 125 | 126 | fn from_matrix(mat: MatrixView<2, 2, T>) -> Self { 127 | SO2 { 128 | a: mat[(0, 0)], 129 | b: mat[(1, 0)], 130 | } 131 | } 132 | 133 | fn to_matrix(&self) -> Matrix2 { 134 | Matrix2::new(self.a, -self.b, self.b, self.a) 135 | } 136 | } 137 | 138 | impl ops::Mul for SO2 { 139 | type Output = SO2; 140 | 141 | #[inline] 142 | fn mul(self, other: Self) -> Self::Output { 143 | self.compose(&other) 144 | } 145 | } 146 | 147 | impl ops::Mul for &SO2 { 148 | type Output = SO2; 149 | 150 | #[inline] 151 | fn mul(self, other: Self) -> Self::Output { 152 | self.compose(other) 153 | } 154 | } 155 | 156 | impl fmt::Display for SO2 { 157 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 158 | let precision: usize = f.precision().unwrap_or(3); 159 | write!(f, "SO2(theta: {:.p$})", self.log()[0], p = precision) 160 | } 161 | } 162 | 163 | impl fmt::Debug for SO2 { 164 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 165 | let precision: usize = f.precision().unwrap_or(3); 166 | write!( 167 | f, 168 | "SO2 {{ a: {:.p$}, b: {:.p$} }}", 169 | self.a, 170 | self.b, 171 | p = precision 172 | ) 173 | } 174 | } 175 | 176 | #[cfg(test)] 177 | mod tests { 178 | use super::*; 179 | use crate::{test_lie, test_variable}; 180 | 181 | test_variable!(SO2); 182 | 183 | test_lie!(SO2); 184 | } 185 | -------------------------------------------------------------------------------- /src/linalg/mod.rs: -------------------------------------------------------------------------------- 1 | //! Various helpers for linear algebra structures. 2 | //! 3 | //! Specifically this module contains the following, 4 | //! - re-alias all nalgebra types to use our dtype by default. 5 | //! - re-alias num-dual types for identical reasons 6 | //! - a [MatrixBlock] struct to help with block matrix operations after 7 | //! linearization 8 | //! - a [Diff] trait to help with numerical and forward-mode differentiation 9 | //! - Forward mode differentiator [ForwardProp] 10 | //! - Numerical differentiator [NumericalDiff] 11 | use crate::dtype; 12 | 13 | mod dual; 14 | pub use dual::{DualAllocator, DualScalar, DualVector, Numeric}; 15 | // Dual numbers 16 | pub use num_dual::Derivative; 17 | 18 | mod nalgebra_wrap; 19 | pub use nalgebra_wrap::*; 20 | 21 | // ------------------------- MatrixBlocks ------------------------- // 22 | /// A struct to help with block matrix operations after linearization 23 | /// 24 | /// This struct is used to store a matrix and a set of indices that 25 | /// represent the start of each block in the matrix. This is useful 26 | /// when linearizing a factor graph, where the Jacobian is a block 27 | /// matrix with each block corresponding to a different variable. 28 | #[derive(Debug, Clone)] 29 | pub struct MatrixBlock { 30 | mat: MatrixX, 31 | idx: Vec, 32 | } 33 | 34 | impl MatrixBlock { 35 | pub fn new(mat: MatrixX, idx: Vec) -> Self { 36 | Self { mat, idx } 37 | } 38 | 39 | pub fn get_block(&self, idx: usize) -> MatrixViewX<'_> { 40 | let idx_start = self.idx[idx]; 41 | let idx_end = if idx + 1 < self.idx.len() { 42 | self.idx[idx + 1] 43 | } else { 44 | self.mat.ncols() 45 | }; 46 | self.mat.columns(idx_start, idx_end - idx_start) 47 | } 48 | 49 | pub fn mul(&self, idx: usize, x: VectorViewX<'_>) -> VectorX { 50 | self.get_block(idx) * x 51 | } 52 | 53 | pub fn mat(&self) -> MatrixViewX<'_> { 54 | self.mat.as_view() 55 | } 56 | 57 | pub fn idx(&self) -> &[usize] { 58 | &self.idx 59 | } 60 | } 61 | 62 | // ------------------------- Derivatives ------------------------- // 63 | use paste::paste; 64 | 65 | use crate::variables::VariableDtype; 66 | 67 | /// A struct to hold the result of a differentiation operation 68 | #[derive(Debug, Clone)] 69 | pub struct DiffResult { 70 | pub value: V, 71 | pub diff: G, 72 | } 73 | 74 | macro_rules! fn_maker { 75 | (grad, $num:expr, $( ($name:ident: $var:ident) ),*) => { 76 | paste! { 77 | fn []<$( $var: VariableDtype, )* F: Fn($($var::Alias,)*) -> Self::T> 78 | (f: F, $($name: &$var,)*) -> DiffResult{ 79 | let f_wrapped = |$($name: $var::Alias,)*| vectorx![f($($name.clone(),)*)]; 80 | let DiffResult { value, diff } = Self::[](f_wrapped, $($name,)*); 81 | let diff = VectorX::from_iterator(diff.len(), diff.iter().cloned()); 82 | DiffResult { value: value[0], diff } 83 | } 84 | } 85 | }; 86 | 87 | (jac, $num:expr, $( ($name:ident: $var:ident) ),*) => { 88 | paste! { 89 | fn []<$( $var: VariableDtype, )* F: Fn($($var::Alias,)*) -> VectorX> 90 | (f: F, $($name: &$var,)*) -> DiffResult; 91 | } 92 | }; 93 | } 94 | 95 | /// A trait to abstract over different differentiation methods 96 | /// 97 | /// Specifically, this trait works for multi-input functions (where each input 98 | /// is a variable) with scalar output (gradient) or vector output (Jacobian). 99 | /// 100 | /// This trait is implemented for both numerical and forward-mode in 101 | /// [NumericalDiff] and [ForwardProp], respectively. Where possible, we 102 | /// recommend [ForwardProp] which functions using dual numbers. 103 | pub trait Diff { 104 | /// The dtype of the variables 105 | type T: Numeric; 106 | 107 | fn_maker!(grad, 1, (v1: V1)); 108 | fn_maker!(grad, 2, (v1: V1), (v2: V2)); 109 | fn_maker!(grad, 3, (v1: V1), (v2: V2), (v3: V3)); 110 | fn_maker!(grad, 4, (v1: V1), (v2: V2), (v3: V3), (v4: V4)); 111 | fn_maker!(grad, 5, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5)); 112 | fn_maker!(grad, 6, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5), (v6: V6)); 113 | 114 | fn_maker!(jac, 1, (v1: V1)); 115 | fn_maker!(jac, 2, (v1: V1), (v2: V2)); 116 | fn_maker!(jac, 3, (v1: V1), (v2: V2), (v3: V3)); 117 | fn_maker!(jac, 4, (v1: V1), (v2: V2), (v3: V3), (v4: V4)); 118 | fn_maker!(jac, 5, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5)); 119 | fn_maker!(jac, 6, (v1: V1), (v2: V2), (v3: V3), (v4: V4), (v5: V5), (v6: V6)); 120 | } 121 | 122 | /// Compute the derivative of a scalar function using numerical derivatives. 123 | pub fn numerical_derivative dtype>( 124 | f: F, 125 | x: dtype, 126 | eps: dtype, 127 | ) -> DiffResult { 128 | let r = f(x); 129 | let d = (f(x + eps) - f(x - eps)) / (2.0 * eps); 130 | 131 | DiffResult { value: r, diff: d } 132 | } 133 | 134 | /// Compute the derivative of a scalar function using forward derivatives. 135 | pub fn forward_prop_derivative DualScalar>( 136 | f: F, 137 | x: dtype, 138 | ) -> DiffResult { 139 | let xd = x.into(); 140 | let r = f(xd); 141 | DiffResult { 142 | value: r.re, 143 | diff: r.eps, 144 | } 145 | } 146 | 147 | mod numerical_diff; 148 | pub use numerical_diff::NumericalDiff; 149 | 150 | mod forward_prop; 151 | pub use forward_prop::ForwardProp; 152 | -------------------------------------------------------------------------------- /src/optimizers/mod.rs: -------------------------------------------------------------------------------- 1 | //! Optimizers for solving non-linear least squares problems. 2 | //! 3 | //! Specifically, given a nonlinear least squares problem of the form, 4 | //! 5 | //! $$ 6 | //! \Theta^* = \argmin_{\Theta} \sum_{i} \rho_i(||r_i(\Theta)||_{\Sigma_i} ) 7 | //! $$ 8 | //! 9 | //! These optimizers function by first, linearizing to a linear least squares 10 | //! problem, 11 | //! $$ 12 | //! \Delta \Theta = \argmin_{\Delta \Theta} \sum_{i} ||A_i (\Delta \Theta)_i - 13 | //! b_i ||^2 14 | //! $$ 15 | //! 16 | //! This can be rearranged to a large, sparse linear system given by, 17 | //! 18 | //! $$ 19 | //! A \Delta \Theta = b 20 | //! $$ 21 | //! 22 | //! which can then be solved. For example, Gauss-Newton solves this directly, 23 | //! while Levenberg-Marquardt adds a damping term to the diagonal of $A^\top A$ 24 | //! to ensure positive definiteness. 25 | //! 26 | //! This module provides a set of optimizers that can be used to solve 27 | //! non-linear least squares problems. Each optimizer implements the [Optimizer] 28 | //! trait to give similar structure and usage. 29 | //! 30 | //! Additionally observers can be added to the optimizer to monitor the progress 31 | //! of the optimization. A prebuilt [rerun 32 | //! observer](crate::rerun::RerunObserver) can be enabled via the `rerun` 33 | //! feature. 34 | //! 35 | //! If you desire to implement your own optimizer, we additionally recommend 36 | //! using the [test_optimizer](crate::test_optimizer) macro to run a handful of 37 | //! simple tests over a few different variable types to ensure correctness. 38 | mod traits; 39 | pub use traits::{ 40 | BaseOptParams, OptError, OptObserver, OptObserverVec, OptParams, OptResult, Optimizer, 41 | }; 42 | 43 | mod macros; 44 | 45 | mod gauss_newton; 46 | pub use gauss_newton::GaussNewton; 47 | 48 | mod levenberg_marquardt; 49 | pub use levenberg_marquardt::{LevenMarquardt, LevenParams}; 50 | 51 | mod gnc; 52 | pub use gnc::{ConvexableKernel, GncGemanMcClure, GncParams, GraduatedNonConvexity}; 53 | 54 | // These aren't tests themselves, but are helpers to test optimizers 55 | #[cfg(test)] 56 | pub mod test { 57 | use matrixcompare::assert_matrix_eq; 58 | use nalgebra::{DefaultAllocator, DimNameAdd, DimNameSum, ToTypenum}; 59 | 60 | use super::*; 61 | use crate::{ 62 | containers::{FactorBuilder, Graph, Values}, 63 | dtype, 64 | linalg::{AllocatorBuffer, Const, DualAllocator, DualVector, VectorX}, 65 | residuals::{BetweenResidual, PriorResidual, Residual}, 66 | symbols::X, 67 | variables::VariableDtype, 68 | }; 69 | 70 | pub fn optimize_prior< 71 | O, 72 | const DIM: usize, 73 | #[cfg(feature = "serde")] T: VariableDtype> + 'static + typetag::Tagged, 74 | #[cfg(not(feature = "serde"))] T: VariableDtype> + 'static, 75 | >( 76 | new: &dyn Fn(Graph) -> O, 77 | ) where 78 | PriorResidual: Residual, 79 | O: Optimizer, 80 | { 81 | let t = VectorX::from_fn(T::DIM, |_, i| ((i + 1) as dtype) / 10.0); 82 | let p = T::exp(t.as_view()); 83 | 84 | let mut values = Values::new(); 85 | values.insert_unchecked(X(0), T::identity()); 86 | 87 | let mut graph = Graph::new(); 88 | let res = PriorResidual::new(p.clone()); 89 | let factor = FactorBuilder::new1_unchecked(res, X(0)).build(); 90 | graph.add_factor(factor); 91 | 92 | let mut opt = new(graph); 93 | values = opt.optimize(values).expect("Optimization failed"); 94 | 95 | let out: &T = values.get_unchecked(X(0)).expect("Missing X(0)"); 96 | assert_matrix_eq!( 97 | out.ominus(&p), 98 | VectorX::zeros(T::DIM), 99 | comp = abs, 100 | tol = 1e-6 101 | ); 102 | } 103 | 104 | pub fn optimize_between< 105 | O, 106 | const DIM: usize, 107 | const DIM_DOUBLE: usize, 108 | #[cfg(feature = "serde")] T: VariableDtype> + 'static + typetag::Tagged, 109 | #[cfg(not(feature = "serde"))] T: VariableDtype> + 'static, 110 | >( 111 | new: &dyn Fn(Graph) -> O, 112 | ) where 113 | PriorResidual: Residual, 114 | BetweenResidual: Residual, 115 | O: Optimizer, 116 | Const: ToTypenum, 117 | AllocatorBuffer, Const>>: Sync + Send, 118 | DefaultAllocator: DualAllocator, Const>>, 119 | DualVector, Const>>: Copy, 120 | Const: DimNameAdd>, 121 | { 122 | let t = VectorX::from_fn(T::DIM, |_, i| ((i as dtype) - (T::DIM as dtype)) / 10.0); 123 | let p1 = T::exp(t.as_view()); 124 | 125 | let t = VectorX::from_fn(T::DIM, |_, i| ((i + 1) as dtype) / 10.0); 126 | let p2 = T::exp(t.as_view()); 127 | 128 | let mut values = Values::new(); 129 | values.insert_unchecked(X(0), T::identity()); 130 | values.insert_unchecked(X(1), T::identity()); 131 | 132 | let mut graph = Graph::new(); 133 | let res = PriorResidual::new(p1.clone()); 134 | let factor = FactorBuilder::new1_unchecked(res, X(0)).build(); 135 | graph.add_factor(factor); 136 | 137 | let diff = p2.minus(&p1); 138 | let res = BetweenResidual::new(diff); 139 | let factor = FactorBuilder::new2_unchecked(res, X(0), X(1)).build(); 140 | graph.add_factor(factor); 141 | 142 | let mut opt = new(graph); 143 | values = opt.optimize(values).expect("Optimization failed"); 144 | 145 | let out1: &T = values.get_unchecked(X(0)).expect("Missing X(0)"); 146 | assert_matrix_eq!( 147 | out1.ominus(&p1), 148 | VectorX::zeros(T::DIM), 149 | comp = abs, 150 | tol = 1e-6 151 | ); 152 | 153 | let out2: &T = values.get_unchecked(X(1)).expect("Missing X(1)"); 154 | assert_matrix_eq!( 155 | out2.ominus(&p2), 156 | VectorX::zeros(T::DIM), 157 | comp = abs, 158 | tol = 1e-6 159 | ); 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fact.rs 2 | 3 | [![minimum rustc 1.84](https://img.shields.io/badge/rustc-1.84+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html) 4 | [![crate](https://img.shields.io/crates/v/factrs.svg)](https://crates.io/crates/factrs) 5 | [![doc](https://docs.rs/factrs/badge.svg)](https://docs.rs/factrs) 6 | [![ci](https://github.com/rpl-cmu/factrs/actions/workflows/ci.yml/badge.svg)](https://github.com/rpl-cmu/factrs/actions/workflows/ci.yml) 7 | 8 | fact.rs (pronounced factors) is a nonlinear least squares optimization library over factor graphs written in Rust. 9 | 10 | It is specifically geared toward sensor fusion in robotics. It aims to be fast, easy to use, and safe. The fact.rs API takes heavy inspiration from the [gtsam library](https://gtsam.org/). 11 | 12 | Currently, it supports the following features 13 | - Gauss-Newton & Levenberg-Marquadt Optimizers 14 | - Common Lie Groups supported (SO2, SO3, SE2, SE3) with optimization in Lie 15 | Algebras 16 | - Automatic differentiation via dual numbers 17 | - Serialization of graphs & variables via optional serde support 18 | - Easy conversion to rerun types for straightforward visualization 19 | 20 | We recommend you checkout the [docs](https://docs.rs/factrs/latest/factrs/) for more info. For usage, simply add factrs to your `Cargo.toml` and start using it! 21 | 22 | # Examples 23 | There's a number of examples found in the [examples](/examples/) folder, including loading g20 files, serialization, and custom factors. 24 | 25 | To run a simple pose graph optimization, simply clone this repository and run, 26 | ```bash 27 | cargo run --release --example g2o ./examples/data/M3500.g2o 28 | ``` 29 | to visualize the optimization steps with [rerun](https://rerun.io) simply add `--features rerun` to the above command. 30 | 31 | Running the other examples can be done similarly, 32 | ```bash 33 | cargo run --release --example gps 34 | cargo run --release --example serde --features serde 35 | ``` 36 | 37 | Additionally, we recommend checking out the [tests](/tests/) folder for more examples of how to make custom noise models, residuals, robust kernels, and variables. 38 | 39 |

40 | Full Example 41 | 42 | ```rust 43 | use factrs::{ 44 | assign_symbols, 45 | core::{BetweenResidual, GaussNewton, Graph, Huber, PriorResidual, Values, SO2}, 46 | fac, 47 | traits::*, 48 | }; 49 | 50 | // Assign symbols to variable types 51 | assign_symbols!(X: SO2); 52 | 53 | fn main() { 54 | // Make all the values 55 | let mut values = Values::new(); 56 | 57 | let x = SO2::from_theta(1.0); 58 | let y = SO2::from_theta(2.0); 59 | values.insert(X(0), SO2::identity()); 60 | values.insert(X(1), SO2::identity()); 61 | 62 | // Make the factors & insert into graph 63 | let mut graph = Graph::new(); 64 | let res = PriorResidual::new(x.clone()); 65 | let factor = fac![res, X(0)]; 66 | graph.add_factor(factor); 67 | 68 | let res = BetweenResidual::new(y.minus(&x)); 69 | let factor = fac![res, (X(0), X(1)), 0.1 as std, Huber::default()]; 70 | graph.add_factor(factor); 71 | 72 | // Optimize! 73 | let mut opt: GaussNewton = GaussNewton::new_default(graph); 74 | let result = opt.optimize(values).unwrap(); 75 | println!("Results {:#}", result); 76 | } 77 | ``` 78 |
79 |
80 | 81 | # Compile-time Errors 82 | 83 | fact.rs leans into the Rust way of doing things, and attempts to compile-time error as much as possible. This includes the following, 84 | - Symbols are assigned to variables at compile-time, ensuring that symbols are can not be mismatched 85 | - Enforcing the correct number of keys for a factor 86 | - Ensuring that noise model dimensions match the residual dimensions 87 | 88 | A few examples, 89 | ```rust 90 | use factrs::core::{assign_symbols, fac, PriorResidual, Values, VectorVar2, SO2}; 91 | 92 | // Assign symbols to variable types 93 | assign_symbols(X: SO2, Y: SO2); 94 | let mut values = Values::new(); 95 | 96 | // Proper usage 97 | let id = SO2::identity(); 98 | values.insert(X(0), id); 99 | let prior = PriorResidual::new(id); 100 | let f = fac![prior, X(0), (0.1, 0.2) as std]; 101 | 102 | // These will all compile-time error 103 | // mismatched symbol-variable types 104 | values.insert(X(5), VectorVar2::identity()); 105 | // wrong number of keys 106 | let f = fac![PriorResidual::new(id), (X(0), X(1))]; 107 | // wrong noise-model dimension 108 | let n = GaussianNoise::<5>::from_scalar_sigma(0.1); 109 | let f = fac![PriorResidual::new(id), X(0), n]; 110 | // mismatched symbol-variable types 111 | let f = fac![PriorResidual::new(id), Y(0), 0.1 as std]; 112 | ``` 113 | 114 | # Benchmarks 115 | Performance-wise, factrs is the fastest Rust library, and competitive with other C++ libraries. Benchmarks were ran on a 12th Gen Intel i9 and are all single-threaded (for now). Current benchmarks include [sophus-rs](https://github.com/sophus-vision/sophus-rs), [tiny-solver-rs](https://github.com/powei-lin/tiny-solver-rs), [gtsam](https://github.com/borglab/gtsam/), and [ceres](http://ceres-solver.org/). Data can be found in the [examples/data](/examples/data) folder. 116 | 117 |

118 | 119 |

120 | 121 | *Note, gtsam and Ceres are faster for the parking garage due to leveraging the sparsity of the pose graph better using the Baye's tree, something that is planned for factrs.* 122 | 123 | To run the rust benchmarks after cloning, simply run, 124 | ```bash 125 | cargo bench -p factrs-bench 126 | ``` 127 | and the C++ benchmarks can be run with, 128 | ```bash 129 | cmake -B build factrs-bench/cpp 130 | cmake --build build 131 | ./build/bench 132 | ``` 133 | 134 | both of which have alias commands in the root justfile (which also includes a plotting alias). 135 | 136 | # Installation 137 | Simply add via cargo as you do any rust dependency, 138 | ```bash 139 | cargo add factrs 140 | ``` 141 | 142 | 143 | 144 | # Contributions 145 | 146 | Contributions are more than welcome! Feel free to open an issue or a pull request with any ideas, bugs, features, etc you might have or want. 147 | 148 | We feel rust and robotics are a good match and want to see rust robotics libraries catch-up to their C++ counterparts. 149 | -------------------------------------------------------------------------------- /src/containers/graph.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{Debug, Write}, 3 | marker::PhantomData, 4 | }; 5 | 6 | use faer::sparse::{Pair, SymbolicSparseColMat}; 7 | use pad_adapter::PadAdapter; 8 | 9 | use super::{DefaultSymbolHandler, Idx, KeyFormatter, Values, ValuesOrder}; 10 | // Once "debug_closure_helpers" is stabilized, we won't need this anymore 11 | // Need custom debug to handle pretty key printing at the moment 12 | // Pad adapter helps with the pretty printing 13 | use crate::containers::factor::FactorFormatter; 14 | use crate::{containers::Factor, dtype, linear::LinearGraph}; 15 | 16 | /// Structure to represent a nonlinear factor graph 17 | /// 18 | /// Main usage will be via `add_factor` to add new [factors](Factor) to the 19 | /// graph. Also of note is the `linearize` function that returns a [linear (aka 20 | /// Gaussian) factor graph](LinearGraph). 21 | /// 22 | /// Since the graph represents a nonlinear least-squares problem, during 23 | /// optimization it will be iteratively linearized about a set of variables and 24 | /// solved iteratively. 25 | /// 26 | /// ``` 27 | /// # use factrs::{ 28 | /// assign_symbols, 29 | /// containers::{Graph, FactorBuilder}, 30 | /// residuals::PriorResidual, 31 | /// robust::GemanMcClure, 32 | /// traits::*, 33 | /// variables::SO2, 34 | /// }; 35 | /// # assign_symbols!(X: SO2); 36 | /// # let factor = FactorBuilder::new1(PriorResidual::new(SO2::identity()), X(0)).build(); 37 | /// let mut graph = Graph::new(); 38 | /// graph.add_factor(factor); 39 | /// ``` 40 | #[derive(Default, Clone)] 41 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 42 | pub struct Graph { 43 | factors: Vec, 44 | } 45 | 46 | impl Graph { 47 | pub fn new() -> Self { 48 | Self::default() 49 | } 50 | 51 | pub fn with_capacity(capacity: usize) -> Self { 52 | Self { 53 | factors: Vec::with_capacity(capacity), 54 | } 55 | } 56 | 57 | pub fn add_factor(&mut self, factor: Factor) { 58 | self.factors.push(factor); 59 | } 60 | 61 | pub fn len(&self) -> usize { 62 | self.factors.len() 63 | } 64 | 65 | pub fn is_empty(&self) -> bool { 66 | self.factors.is_empty() 67 | } 68 | 69 | pub fn error(&self, values: &Values) -> dtype { 70 | self.factors.iter().map(|f| f.error(values)).sum() 71 | } 72 | 73 | pub fn linearize(&self, values: &Values) -> LinearGraph { 74 | let factors = self.factors.iter().map(|f| f.linearize(values)).collect(); 75 | LinearGraph::from_vec(factors) 76 | } 77 | 78 | pub fn sparsity_pattern(&self, order: ValuesOrder) -> GraphOrder { 79 | let total_rows = self.factors.iter().map(|f| f.dim_out()).sum(); 80 | let total_columns = order.dim(); 81 | 82 | let mut indices = Vec::>::new(); 83 | 84 | let _ = self.factors.iter().fold(0, |row, f| { 85 | f.keys().iter().for_each(|key| { 86 | (0..f.dim_out()).for_each(|i| { 87 | let Idx { 88 | idx: col, 89 | dim: col_dim, 90 | } = order.get(*key).expect("Key missing in values"); 91 | (0..*col_dim).for_each(|j| { 92 | indices.push(Pair::new(row + i, col + j)); 93 | }); 94 | }); 95 | }); 96 | row + f.dim_out() 97 | }); 98 | 99 | let (sparsity_pattern, sparsity_order) = 100 | SymbolicSparseColMat::try_new_from_indices(total_rows, total_columns, &indices) 101 | .expect("Failed to make sparse matrix"); 102 | GraphOrder { 103 | order, 104 | sparsity_pattern, 105 | sparsity_order, 106 | } 107 | } 108 | 109 | pub fn iter(&self) -> std::slice::Iter { 110 | self.factors.iter() 111 | } 112 | 113 | pub fn iter_mut(&mut self) -> std::slice::IterMut { 114 | self.factors.iter_mut() 115 | } 116 | } 117 | 118 | impl Debug for Graph { 119 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 120 | GraphFormatter::::new(self).fmt(f) 121 | } 122 | } 123 | 124 | impl IntoIterator for Graph { 125 | type Item = Factor; 126 | type IntoIter = std::vec::IntoIter; 127 | 128 | fn into_iter(self) -> Self::IntoIter { 129 | self.factors.into_iter() 130 | } 131 | } 132 | 133 | /// Formatter for a graph 134 | /// 135 | /// Specifically, this can be used if custom symbols are desired. See 136 | /// [tests/custom_key](https://github.com/rpl-cmu/factrs/blob/dev/tests/custom_key.rs) for examples. 137 | pub struct GraphFormatter<'g, KF> { 138 | pub graph: &'g Graph, 139 | kf: PhantomData, 140 | } 141 | 142 | impl<'g, KF> GraphFormatter<'g, KF> { 143 | pub fn new(graph: &'g Graph) -> Self { 144 | Self { 145 | graph, 146 | kf: Default::default(), 147 | } 148 | } 149 | } 150 | 151 | impl Debug for GraphFormatter<'_, KF> { 152 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 153 | if f.alternate() { 154 | f.write_str("Graph [\n")?; 155 | let mut pad = PadAdapter::new(f); 156 | for factor in self.graph.factors.iter() { 157 | writeln!(pad, "{:#?},", FactorFormatter::::new(factor))?; 158 | } 159 | f.write_str("]") 160 | } else { 161 | f.write_str("Graph [ ")?; 162 | for factor in self.graph.factors.iter() { 163 | write!(f, "{:?}, ", FactorFormatter::::new(factor))?; 164 | } 165 | f.write_str("]") 166 | } 167 | } 168 | } 169 | 170 | /// Simple structure to hold the order of the graph 171 | /// 172 | /// Specifically this is used to cache linearization results such as the order 173 | /// of the graph and the sparsity pattern of the Jacobian (allows use to avoid 174 | /// resorting indices). 175 | pub struct GraphOrder { 176 | // Contains the order of the variables 177 | pub order: ValuesOrder, 178 | // Contains the sparsity pattern of the jacobian 179 | pub sparsity_pattern: SymbolicSparseColMat, 180 | // Contains the order of values to put into the sparsity pattern 181 | pub sparsity_order: faer::sparse::Argsort, 182 | } 183 | -------------------------------------------------------------------------------- /src/linear/graph.rs: -------------------------------------------------------------------------------- 1 | use faer::sparse::{Pair, SparseColMat, SymbolicSparseColMat}; 2 | use faer_ext::IntoFaer; 3 | 4 | use super::LinearValues; 5 | use crate::{ 6 | containers::{GraphOrder, Idx, ValuesOrder}, 7 | dtype, 8 | linalg::DiffResult, 9 | linear::LinearFactor, 10 | }; 11 | 12 | /// A graph of linear factors 13 | /// 14 | /// This is the linear equivalent of [Graph](crate::containers::Graph). Rather 15 | /// than store nonlinear factors, it stores [LinearFactors](LinearFactor). 16 | #[derive(Default)] 17 | pub struct LinearGraph { 18 | factors: Vec, 19 | } 20 | 21 | impl LinearGraph { 22 | pub fn new() -> Self { 23 | Self::default() 24 | } 25 | 26 | pub fn from_vec(factors: Vec) -> Self { 27 | Self { factors } 28 | } 29 | 30 | pub fn add_factor(&mut self, factor: LinearFactor) { 31 | self.factors.push(factor); 32 | } 33 | 34 | pub fn error(&self, values: &LinearValues) -> dtype { 35 | self.factors.iter().map(|f| f.error(values)).sum() 36 | } 37 | 38 | // TODO: This is identical for nonlinear case, is there a way we can reduce code 39 | // reuse? 40 | pub fn sparsity_pattern(&self, order: ValuesOrder) -> GraphOrder { 41 | let total_rows = self.factors.iter().map(|f| f.dim_out()).sum(); 42 | let total_columns = order.dim(); 43 | 44 | let mut indices = Vec::>::new(); 45 | 46 | let _ = self.factors.iter().fold(0, |row, f| { 47 | f.keys.iter().for_each(|key| { 48 | (0..f.dim_out()).for_each(|i| { 49 | let Idx { 50 | idx: col, 51 | dim: col_dim, 52 | } = order.get(*key).expect("Key missing in values"); 53 | (0..*col_dim).for_each(|j| { 54 | indices.push(Pair::new(row + i, col + j)); 55 | }); 56 | }); 57 | }); 58 | row + f.dim_out() 59 | }); 60 | 61 | let (sparsity_pattern, sparsity_order) = 62 | SymbolicSparseColMat::try_new_from_indices(total_rows, total_columns, &indices) 63 | .expect("Failed to create sparse matrix"); 64 | 65 | GraphOrder { 66 | order, 67 | sparsity_pattern, 68 | sparsity_order, 69 | } 70 | } 71 | 72 | /// Computes J and r for use in solver 73 | pub fn residual_jacobian( 74 | &self, 75 | graph_order: &GraphOrder, 76 | ) -> DiffResult, SparseColMat> { 77 | // Create the residual vector 78 | let total_rows = self.factors.iter().map(|f| f.dim_out()).sum(); 79 | let mut r = faer::Mat::zeros(total_rows, 1); 80 | let _ = self.factors.iter().fold(0, |row, f| { 81 | r.subrows_mut(row, f.dim_out()) 82 | .copy_from(&f.b.view_range(.., ..).into_faer()); 83 | row + f.dim_out() 84 | }); 85 | 86 | // Create the jacobian matrix 87 | let mut values: Vec = Vec::new(); 88 | // Iterate over all factors 89 | let _ = self.factors.iter().fold(0, |row, f| { 90 | // Iterate over keys 91 | (0..f.keys.len()).for_each(|idx| { 92 | // Iterate over rows, then column elements 93 | f.a.get_block(idx).row_iter().for_each(|r| { 94 | r.iter().for_each(|val| { 95 | values.push(*val); 96 | }); 97 | }); 98 | }); 99 | row + f.dim_out() 100 | }); 101 | 102 | let jac = SparseColMat::new_from_argsort( 103 | graph_order.sparsity_pattern.clone(), 104 | &graph_order.sparsity_order, 105 | values.as_slice(), 106 | ) 107 | .expect("Failed to form sparse matrix from previous sparsity pattern"); 108 | 109 | DiffResult { 110 | value: r, 111 | diff: jac, 112 | } 113 | } 114 | } 115 | 116 | #[cfg(test)] 117 | mod test { 118 | use faer_ext::IntoNalgebra; 119 | use foldhash::HashMap; 120 | use matrixcompare::assert_matrix_eq; 121 | 122 | use super::*; 123 | use crate::{ 124 | containers::Idx, 125 | linalg::{MatrixBlock, MatrixX, VectorX}, 126 | symbols::X, 127 | }; 128 | 129 | #[test] 130 | fn residual_jacobian() { 131 | // Make a linear graph 132 | let mut graph = LinearGraph::new(); 133 | 134 | // Make a handful of factors 135 | let a1 = MatrixX::from_fn(2, 2, |i, j| (i + j) as dtype); 136 | let block1 = MatrixBlock::new(a1, vec![0]); 137 | let b1 = VectorX::from_fn(2, |i, j| (i + j) as dtype); 138 | graph.add_factor(LinearFactor::new( 139 | vec![X(1).into()], 140 | block1.clone(), 141 | b1.clone(), 142 | )); 143 | 144 | let a2 = MatrixX::from_fn(3, 5, |i, j| (i + j) as dtype); 145 | let block2 = MatrixBlock::new(a2, vec![0, 2]); 146 | let b2 = VectorX::from_fn(3, |_, _| 5.0); 147 | graph.add_factor(LinearFactor::new( 148 | vec![X(0).into(), X(2).into()], 149 | block2.clone(), 150 | b2.clone(), 151 | )); 152 | 153 | // Make fake ordering 154 | let mut map = HashMap::default(); 155 | map.insert(X(0).into(), Idx { idx: 0, dim: 2 }); 156 | map.insert(X(1).into(), Idx { idx: 2, dim: 2 }); 157 | map.insert(X(2).into(), Idx { idx: 4, dim: 3 }); 158 | let order = ValuesOrder::new(map); 159 | 160 | // Compute the residual and jacobian 161 | let graph_order = graph.sparsity_pattern(order); 162 | let DiffResult { value, diff } = graph.residual_jacobian(&graph_order); 163 | let value = value.as_ref().into_nalgebra().clone_owned(); 164 | let diff = diff.to_dense().as_ref().into_nalgebra().clone_owned(); 165 | 166 | println!("Value: {}", value); 167 | println!("Diff: {}", diff); 168 | 169 | // Check the residual 170 | assert_matrix_eq!(b1, value.rows(0, 2), comp = float); 171 | assert_matrix_eq!(b2, value.rows(2, 3), comp = float); 172 | 173 | // Check the jacobian 174 | assert_matrix_eq!(block1.get_block(0), diff.view((0, 2), (2, 2)), comp = float); 175 | assert_matrix_eq!(block2.get_block(0), diff.view((2, 0), (3, 2)), comp = float); 176 | assert_matrix_eq!(block2.get_block(1), diff.view((2, 4), (3, 3)), comp = float); 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /src/linear/solvers.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Mul; 2 | 3 | use faer::{ 4 | linalg::solvers::Solve, 5 | sparse::{linalg::solvers, SparseColMatRef}, 6 | Mat, MatRef, 7 | }; 8 | 9 | use crate::dtype; 10 | 11 | /// Trait to solve sparse linear systems 12 | pub trait LinearSolver { 13 | /// Solve a symmetric linear system 14 | /// 15 | /// This will be used by Cholesky to solve A^T A and by Levenberg-Marquardt 16 | /// to solve J^T J 17 | fn solve_symmetric(&mut self, a: SparseColMatRef, b: MatRef) 18 | -> Mat; 19 | 20 | /// Solve a least squares problem 21 | /// 22 | /// Used by QR to solve Ax = b, where the number of rows in A is greater 23 | /// than the number of columns 24 | fn solve_lst_sq(&mut self, a: SparseColMatRef, b: MatRef) -> Mat; 25 | } 26 | 27 | impl Default for Box { 28 | fn default() -> Self { 29 | Box::new(CholeskySolver::default()) 30 | } 31 | } 32 | 33 | // ------------------------- Cholesky Linear Solver ------------------------- // 34 | 35 | /// Cholesky linear solver 36 | #[derive(Default)] 37 | pub struct CholeskySolver { 38 | sparsity_pattern: Option>, 39 | } 40 | 41 | impl LinearSolver for CholeskySolver { 42 | fn solve_symmetric( 43 | &mut self, 44 | a: SparseColMatRef, 45 | b: MatRef, 46 | ) -> Mat { 47 | if self.sparsity_pattern.is_none() { 48 | self.sparsity_pattern = Some( 49 | solvers::SymbolicLlt::try_new(a.symbolic(), faer::Side::Lower) 50 | .expect("Symbolic cholesky failed"), 51 | ); 52 | } 53 | 54 | solvers::Llt::try_new_with_symbolic( 55 | self.sparsity_pattern 56 | .clone() 57 | .expect("Missing symbol cholesky"), 58 | a, 59 | faer::Side::Lower, 60 | ) 61 | .expect("Cholesky decomp failed") 62 | .solve(&b) 63 | } 64 | 65 | fn solve_lst_sq(&mut self, a: SparseColMatRef, b: MatRef) -> Mat { 66 | let ata = a 67 | .transpose() 68 | .to_col_major() 69 | .expect("Failed to transpose A matrix") 70 | .mul(a); 71 | 72 | let atb = a.transpose().mul(b); 73 | 74 | self.solve_symmetric(ata.as_ref(), atb.as_ref()) 75 | } 76 | } 77 | 78 | // ------------------------- QR Linear Solver ------------------------- // 79 | 80 | /// QR linear solver 81 | #[derive(Default)] 82 | pub struct QRSolver { 83 | sparsity_pattern: Option>, 84 | } 85 | 86 | impl LinearSolver for QRSolver { 87 | fn solve_symmetric( 88 | &mut self, 89 | a: SparseColMatRef, 90 | b: MatRef, 91 | ) -> Mat { 92 | self.solve_lst_sq(a, b) 93 | } 94 | 95 | fn solve_lst_sq(&mut self, a: SparseColMatRef, b: MatRef) -> Mat { 96 | if self.sparsity_pattern.is_none() { 97 | self.sparsity_pattern = 98 | Some(solvers::SymbolicQr::try_new(a.symbolic()).expect("Symbolic QR failed")); 99 | } 100 | 101 | // TODO: I think we're doing an extra copy here from solution -> slice solution 102 | solvers::Qr::try_new_with_symbolic( 103 | self.sparsity_pattern.clone().expect("Missing symbolic QR"), 104 | a, 105 | ) 106 | .expect("QR failed") 107 | .solve(&b) 108 | .as_ref() 109 | .subrows(0, a.ncols()) 110 | .to_owned() 111 | } 112 | } 113 | 114 | // ------------------------- LU Linear Solver ------------------------- // 115 | 116 | /// LU linear solver 117 | #[derive(Default)] 118 | pub struct LUSolver { 119 | sparsity_pattern: Option>, 120 | } 121 | 122 | impl LinearSolver for LUSolver { 123 | fn solve_symmetric( 124 | &mut self, 125 | a: SparseColMatRef, 126 | b: MatRef, 127 | ) -> Mat { 128 | if self.sparsity_pattern.is_none() { 129 | self.sparsity_pattern = 130 | Some(solvers::SymbolicLu::try_new(a.symbolic()).expect("Symbolic LU failed")); 131 | } 132 | 133 | solvers::Lu::try_new_with_symbolic( 134 | self.sparsity_pattern.clone().expect("Symbolic LU missing"), 135 | a.as_ref(), 136 | ) 137 | .expect("LU decomp failed") 138 | .solve(&b) 139 | } 140 | 141 | fn solve_lst_sq(&mut self, a: SparseColMatRef, b: MatRef) -> Mat { 142 | let ata = a 143 | .transpose() 144 | .to_col_major() 145 | .expect("Failed to transpose A matrix") 146 | .mul(a); 147 | let atb = a.transpose().mul(b); 148 | 149 | self.solve_symmetric(ata.as_ref(), atb.as_ref()) 150 | } 151 | } 152 | 153 | #[cfg(test)] 154 | mod test { 155 | use faer::{ 156 | mat, 157 | sparse::{SparseColMat, Triplet}, 158 | }; 159 | 160 | use super::*; 161 | 162 | fn solve(solver: &mut T) { 163 | let a = SparseColMat::::try_new_from_triplets( 164 | 3, 165 | 2, 166 | &[ 167 | Triplet::new(0, 0, 10.0), 168 | Triplet::new(1, 0, 2.0), 169 | Triplet::new(2, 0, 3.0), 170 | Triplet::new(0, 1, 4.0), 171 | Triplet::new(1, 1, 20.0), 172 | Triplet::new(2, 1, -45.0), 173 | ], 174 | ) 175 | .expect("Failed to make symbolic matrix"); 176 | let b = mat![[15.0], [-3.0], [33.0]]; 177 | 178 | let x_exp = mat![[1.874901], [-0.566112]]; 179 | let x = solver.solve_lst_sq(a.as_ref(), b.as_ref()); 180 | println!("{:?}", x); 181 | 182 | let relative_err = |a: dtype, b: dtype| (a - b).abs() / dtype::max(a.abs(), b.abs()); 183 | assert!(relative_err(x[(0, 0)], x_exp[(0, 0)]) < 1e-6); 184 | assert!(relative_err(x[(1, 0)], x_exp[(1, 0)]) < 1e-6); 185 | } 186 | 187 | #[test] 188 | fn test_cholesky_solver() { 189 | let mut solver = CholeskySolver::default(); 190 | solve(&mut solver); 191 | } 192 | 193 | #[test] 194 | fn test_qr_solver() { 195 | let mut solver = QRSolver::default(); 196 | solve(&mut solver); 197 | } 198 | 199 | #[test] 200 | fn test_lu_solver() { 201 | let mut solver = LUSolver::default(); 202 | solve(&mut solver); 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /src/variables/macros.rs: -------------------------------------------------------------------------------- 1 | /// Variable wrapper around [assert_matrix_eq](matrixcompare::assert_matrix_eq) 2 | /// 3 | /// This compares two variables using 4 | /// [ominus](crate::variables::Variable::ominus) 5 | #[macro_export] 6 | macro_rules! assert_variable_eq { 7 | ($x:expr, $y:expr) => { 8 | matrixcompare::assert_matrix_eq!($crate::variables::Variable::ominus($x, &$y), $crate::linalg::VectorX::zeros($crate::variables::Variable::dim($x))); 9 | }; 10 | ($x:expr, $y:expr, comp = exact) => { 11 | matrixcompare::assert_matrix_eq!($x.ominus(&$y), $crate::linalg::VectorX::zeros($x.dim()), comp = exact); 12 | }; 13 | ($x:expr, $y:expr, comp = abs, tol = $tol:expr) => { 14 | matrixcompare::assert_matrix_eq!( 15 | $x.ominus(&$y), 16 | $crate::linalg::VectorX::zeros($crate::variables::Variable::dim(&$x)), 17 | comp = abs, 18 | tol = $tol 19 | ); 20 | }; 21 | ($x:expr, $y:expr, comp = ulp, tol = $tol:expr) => { 22 | matrixcompare::assert_matrix_eq!( 23 | $x.ominus(&$y), 24 | $crate::linalg::VectorX::zeros($x.dim()), 25 | comp = ulp, 26 | tol = $tol 27 | ); 28 | }; 29 | ($x:expr, $y:expr, comp = float) => { 30 | matrixcompare::assert_matrix_eq!($x.ominus(&$y), $crate::linalg::VectorX::zeros($x.dim()), comp = float); 31 | }; 32 | ($x:expr, $y:expr, comp = float, $($key:ident = $val:expr),+) => { 33 | matrixcompare::assert_matrix_eq!( 34 | $x.ominus(&$y), 35 | $crate::linalg::VectorX::zeros($x.dim()), 36 | comp = float, 37 | $($key:ident = $val:expr),+ 38 | ); 39 | }; 40 | } 41 | 42 | /// Test (most of) the lie group rules 43 | /// 44 | /// Specifically this tests: 45 | /// - identity 46 | /// - inverse 47 | /// - associativity 48 | /// - exp/log are invertible near the origin 49 | #[macro_export] 50 | macro_rules! test_variable { 51 | ($var:ident) => { 52 | // Return a misc element for our tests 53 | fn element(scale: $crate::dtype) -> T { 54 | let xi = $crate::linalg::VectorX::from_fn(T::DIM, |_, i| { 55 | scale * ((i + 1) as $crate::dtype) / 10.0 56 | }); 57 | T::exp(xi.as_view()) 58 | } 59 | 60 | #[test] 61 | #[allow(non_snake_case)] 62 | fn identity() { 63 | let var: $var = element(1.0); 64 | let id = <$var as Variable>::identity(); 65 | $crate::assert_variable_eq!(var, var.compose(&id), comp = abs, tol = 1e-6); 66 | $crate::assert_variable_eq!(var, id.compose(&var), comp = abs, tol = 1e-6); 67 | } 68 | 69 | #[test] 70 | #[allow(non_snake_case)] 71 | fn inverse() { 72 | let var: $var = element(1.0); 73 | let inv = Variable::inverse(&var); 74 | let id = <$var as Variable>::identity(); 75 | println!("{:?}", var); 76 | println!("{:?}", inv); 77 | $crate::assert_variable_eq!(var.compose(&inv), id, comp = abs, tol = 1e-6); 78 | $crate::assert_variable_eq!(inv.compose(&var), id, comp = abs, tol = 1e-6); 79 | } 80 | 81 | #[test] 82 | #[allow(non_snake_case)] 83 | fn associativity() { 84 | let var1: $var = element(1.0); 85 | let var2: $var = element(2.0); 86 | let var3: $var = element(3.0); 87 | $crate::assert_variable_eq!( 88 | var1.compose(&var2).compose(&var3), 89 | var1.compose(&var2.compose(&var3)), 90 | comp = abs, 91 | tol = 1e-6 92 | ); 93 | } 94 | 95 | #[test] 96 | #[allow(non_snake_case)] 97 | fn exp_log() { 98 | let var: $var = element(1.0); 99 | let out = <$var as Variable>::exp(var.log().as_view()); 100 | $crate::assert_variable_eq!(var, out, comp = abs, tol = 1e-6); 101 | } 102 | }; 103 | } 104 | 105 | /// Test (most of) the matrix lie group rules 106 | /// 107 | /// Specifically test 108 | /// - to/from matrix are invertible 109 | /// - hat/vee are invertible 110 | /// - jacobian of rotation function with hat_swap 111 | #[macro_export] 112 | macro_rules! test_lie { 113 | ($var:ident) => { 114 | fn tangent(scale: dtype) -> $crate::linalg::VectorX { 115 | $crate::linalg::VectorX::from_fn(T::DIM, |_, i| scale * ((i + 1) as dtype) / 10.0) 116 | } 117 | 118 | #[test] 119 | fn matrix() { 120 | let tan = tangent::<$var>(1.0); 121 | let var = $var::exp(tan.as_view()); 122 | let mat = var.to_matrix(); 123 | let var_after = $var::from_matrix(mat.as_view()); 124 | $crate::assert_variable_eq!(var, var_after, comp = abs, tol = 1e-6); 125 | } 126 | 127 | #[test] 128 | fn hat_vee() { 129 | let tan = tangent::<$var>(1.0); 130 | let mat = $var::hat(tan.as_view()); 131 | let tan_after = $var::vee(mat.as_view()); 132 | matrixcompare::assert_matrix_eq!(tan, tan_after); 133 | } 134 | 135 | // TODO: Someway to test rotate & adjoint functions? 136 | 137 | #[cfg(not(feature = "left"))] 138 | use $crate::linalg::{Diff, Dim}; 139 | 140 | #[cfg(not(feature = "left"))] 141 | #[test] 142 | fn jacobian() { 143 | let vec_len = 144 | <$var as $crate::variables::MatrixLieGroup>::VectorDim::try_to_usize().unwrap(); 145 | let v = VectorX::from_fn(vec_len, |i, _| (i + 1) as $crate::dtype); 146 | 147 | // Function that simply rotates a vector 148 | fn rotate(r: $var) -> $crate::linalg::VectorX { 149 | let vec_len = 150 | <$var as $crate::variables::MatrixLieGroup>::VectorDim::try_to_usize().unwrap(); 151 | let v = VectorX::from_fn(vec_len, |i, _| T::from((i + 1) as $crate::dtype)); 152 | let rotated = r.apply(v.as_view()); 153 | VectorX::from_fn(vec_len, |i, _| rotated[i].clone()) 154 | } 155 | 156 | let t = $var::exp(tangent::<$var>(1.0).as_view()); 157 | let $crate::linalg::DiffResult { 158 | value: _x, 159 | diff: dx, 160 | } = $crate::linalg::ForwardProp::<<$var as Variable>::Dim>::jacobian_1(rotate, &t); 161 | 162 | let size = 163 | <$var as $crate::variables::MatrixLieGroup>::VectorDim::try_to_usize().unwrap(); 164 | let dx_exp = t.to_matrix().view((0, 0), (size, size)) * $var::hat_swap(v.as_view()); 165 | 166 | println!("Expected: {}", dx_exp); 167 | println!("Actual: {}", dx); 168 | 169 | matrixcompare::assert_matrix_eq!(dx, dx_exp, comp = abs, tol = 1e-6); 170 | } 171 | }; 172 | } 173 | -------------------------------------------------------------------------------- /src/optimizers/levenberg_marquardt.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Mul; 2 | 3 | use faer::sparse::{SparseColMat, Triplet}; 4 | use faer_ext::IntoNalgebra; 5 | 6 | use super::{BaseOptParams, OptError, OptObserverVec, OptParams, OptResult, Optimizer}; 7 | use crate::{ 8 | containers::{Graph, GraphOrder, Values, ValuesOrder}, 9 | dtype, 10 | linalg::DiffResult, 11 | linear::{LinearSolver, LinearValues}, 12 | }; 13 | 14 | /// Levenberg-Marquardt specific parameters 15 | #[derive(Clone, Debug)] 16 | pub struct LevenParams { 17 | pub lambda_min: dtype, 18 | pub lambda_max: dtype, 19 | pub lambda_factor: dtype, 20 | pub diagonal_damping: bool, 21 | pub base: BaseOptParams, 22 | } 23 | 24 | impl Default for LevenParams { 25 | fn default() -> Self { 26 | Self { 27 | lambda_min: 1e-10, 28 | lambda_max: 1e5, 29 | lambda_factor: 10.0, 30 | diagonal_damping: true, 31 | base: Default::default(), 32 | } 33 | } 34 | } 35 | 36 | impl OptParams for LevenParams { 37 | fn base_params(&self) -> &BaseOptParams { 38 | &self.base 39 | } 40 | } 41 | 42 | /// The Levenberg-Marquadt optimizer 43 | /// 44 | /// Solves a damped version of the normal equations, 45 | /// $$A^\top A \Delta \Theta + \lambda diag(A) = A^\top b$$ 46 | /// each optimizer steps. It defaults 47 | /// to using [CholeskySolver](crate::linear::CholeskySolver) under the hood, but 48 | /// this can be changed using [set_solver](LevenMarquardt::set_solver). See the 49 | /// [linear](crate::linear) module for more linear solver options. 50 | pub struct LevenMarquardt { 51 | graph: Graph, 52 | solver: Box, 53 | /// Levenberg-Marquardt specific parameters 54 | params: LevenParams, 55 | /// Observers for the optimizer 56 | observers: OptObserverVec, 57 | lambda: dtype, 58 | // For caching computation between steps 59 | graph_order: Option, 60 | } 61 | 62 | impl LevenMarquardt { 63 | /// Set the linear solver to use for solving the linear system 64 | pub fn set_solver(&mut self, solver: impl LinearSolver + 'static) { 65 | self.solver = Box::new(solver); 66 | } 67 | } 68 | 69 | impl Optimizer for LevenMarquardt { 70 | type Params = LevenParams; 71 | 72 | fn new(params: Self::Params, graph: Graph) -> Self { 73 | Self { 74 | graph, 75 | solver: Default::default(), 76 | observers: OptObserverVec::default(), 77 | params, 78 | lambda: 1e-5, 79 | graph_order: None, 80 | } 81 | } 82 | 83 | fn observers(&self) -> &OptObserverVec { 84 | &self.observers 85 | } 86 | 87 | fn observers_mut(&mut self) -> &mut OptObserverVec { 88 | &mut self.observers 89 | } 90 | 91 | fn graph(&self) -> &Graph { 92 | &self.graph 93 | } 94 | 95 | fn graph_mut(&mut self) -> &mut Graph { 96 | &mut self.graph 97 | } 98 | 99 | fn params(&self) -> &BaseOptParams { 100 | &self.params.base 101 | } 102 | 103 | fn error(&self, values: &Values) -> crate::dtype { 104 | self.graph.error(values) 105 | } 106 | 107 | fn init(&mut self, values: &Values) -> Vec<&'static str> { 108 | // TODO: Some way to manual specify how to computer ValuesOrder 109 | // Precompute the sparsity pattern 110 | self.graph_order = Some( 111 | self.graph 112 | .sparsity_pattern(ValuesOrder::from_values(values)), 113 | ); 114 | 115 | vec![" Lambda "] 116 | } 117 | 118 | // TODO: More sophisticated stopping criteria based on magnitude of the gradient 119 | fn step(&mut self, mut values: Values, _idx: usize) -> OptResult<(Values, String)> { 120 | // Make an ordering 121 | let order = ValuesOrder::from_values(&values); 122 | 123 | // Form the linear system 124 | let linear_graph = self.graph.linearize(&values); 125 | let DiffResult { value: r, diff: j } = 126 | linear_graph.residual_jacobian(self.graph_order.as_ref().expect("Missing graph order")); 127 | 128 | // Form A 129 | let jtj = j 130 | .as_ref() 131 | .transpose() 132 | .to_col_major() 133 | .expect("J failed to transpose") 134 | .mul(j.as_ref()); 135 | 136 | // Form I 137 | let triplets_i = if self.params.diagonal_damping { 138 | (0..jtj.ncols()) 139 | .map(|i| Triplet::new(i as isize, i as isize, jtj[(i, i)])) 140 | .collect::>() 141 | } else { 142 | (0..jtj.ncols()) 143 | .map(|i| Triplet::new(i as isize, i as isize, 1.0)) 144 | .collect::>() 145 | }; 146 | let i = SparseColMat::::try_new_from_nonnegative_triplets( 147 | jtj.ncols(), 148 | jtj.ncols(), 149 | &triplets_i, 150 | ) 151 | .expect("Failed to make damping terms"); 152 | 153 | // Form b 154 | let b = j.as_ref().transpose().mul(&r); 155 | 156 | let mut dx = LinearValues::zero_from_order(order.clone()); 157 | let old_error = linear_graph.error(&dx); 158 | 159 | loop { 160 | // Make Ax = b 161 | // Have to cast due to missing impl in faer for f32 162 | #[allow(clippy::unnecessary_cast)] 163 | let a = &jtj + (&i * self.lambda as f64); 164 | 165 | // Solve Ax = b 166 | let delta = self 167 | .solver 168 | .solve_symmetric(a.as_ref(), b.as_ref()) 169 | .as_ref() 170 | .into_nalgebra() 171 | .column(0) 172 | .clone_owned(); 173 | dx = LinearValues::from_order_and_vector( 174 | self.graph_order 175 | .as_ref() 176 | .expect("Missing graph order") 177 | .order 178 | .clone(), 179 | delta, 180 | ); 181 | 182 | // Update our cost 183 | let curr_error = linear_graph.error(&dx); 184 | 185 | if curr_error < old_error { 186 | break; 187 | } 188 | 189 | self.lambda *= self.params.lambda_factor; 190 | if self.lambda > self.params.lambda_max { 191 | return Err(OptError::FailedToStep); 192 | } 193 | } 194 | 195 | // Update the values 196 | values.oplus_mut(&dx); 197 | self.lambda /= self.params.lambda_factor; 198 | if self.lambda < self.params.lambda_min { 199 | self.lambda = self.params.lambda_min; 200 | } 201 | 202 | Ok((values, format!("{:^12.4e} |", self.lambda))) 203 | } 204 | } 205 | 206 | #[cfg(test)] 207 | mod test { 208 | use super::*; 209 | use crate::test_optimizer; 210 | 211 | test_optimizer!(LevenMarquardt); 212 | } 213 | -------------------------------------------------------------------------------- /src/linalg/numerical_diff.rs: -------------------------------------------------------------------------------- 1 | use paste::paste; 2 | 3 | use crate::{ 4 | dtype, 5 | linalg::{Diff, DiffResult, MatrixX, VectorX}, 6 | variables::{Variable, VariableDtype}, 7 | }; 8 | 9 | /// Forward mode differentiator 10 | /// 11 | /// It operates on functions with regular dtype inputs and outputs, no dual 12 | /// numbers required. The generic parameter `PWR` is used to specify the power 13 | /// of the step size, it PWR=6 uses 1e-6 as a step size. 14 | /// 15 | /// This struct is used to compute the Jacobian of a function using forward mode 16 | /// differentiation via dual-numbers. It can operate on functions with up to 6 17 | /// inputs and with vector-valued outputs. 18 | /// 19 | /// ``` 20 | /// use factrs::{ 21 | /// linalg::{vectorx, DiffResult, NumericalDiff, VectorX}, 22 | /// traits::*, 23 | /// variables::SO2, 24 | /// }; 25 | /// 26 | /// // We can also be generic over Numeric as in [ForwardProp] as well if desired 27 | /// fn f(x: SO2, y: SO2) -> VectorX { 28 | /// x.ominus(&y) 29 | /// } 30 | /// 31 | /// let x = SO2::from_theta(2.0); 32 | /// let y = SO2::from_theta(1.0); 33 | /// 34 | /// // 2 as the generic since we have 2 dimensions going in 35 | /// let DiffResult { value, diff } = NumericalDiff::<6>::jacobian_2(f, &x, &y); 36 | /// assert_eq!(value, vectorx![1.0]); 37 | /// ``` 38 | pub struct NumericalDiff; 39 | 40 | macro_rules! numerical_maker { 41 | ($num:expr, $( ($idx:expr, $name:ident, $var:ident) ),*) => { 42 | paste! { 43 | #[allow(unused_assignments)] 44 | fn []<$( $var: VariableDtype, )* F: Fn($($var,)*) -> VectorX> 45 | (f: F, $($name: &$var,)*) -> DiffResult { 46 | let eps = dtype::powi(10.0, -PWR); 47 | 48 | // Get Dimension 49 | let mut dim = 0; 50 | $(dim += Variable::dim($name);)* 51 | 52 | let res = f($( $name.clone(), )*); 53 | 54 | // Compute gradient 55 | let mut jac: MatrixX = MatrixX::zeros(res.len(), dim); 56 | let mut tvs = [$( VectorX::zeros(Variable::dim($name)), )*]; 57 | 58 | for i in 0..$num { 59 | let mut curr_dim = 0; 60 | for j in 0..tvs[i].len() { 61 | tvs[i][j] = eps; 62 | // TODO: It'd be more efficient to not have to add tangent vectors to each variable 63 | // However, I couldn't find a way to do this for a single vector without having to 64 | // do a nested iteration of $name which isn't allowed 65 | $(let [<$name _og>] = $name.oplus(tvs[$idx].as_view());)* 66 | let plus = f($( [<$name _og>], )*); 67 | 68 | tvs[i][j] = -eps; 69 | $(let [<$name _og>] = $name.oplus(tvs[$idx].as_view());)* 70 | let minus = f($( [<$name _og>], )*); 71 | 72 | let delta = (plus - minus) / (2.0 * eps); 73 | jac.columns_mut(curr_dim + j, 1).copy_from(&delta); 74 | 75 | tvs[i][j] = 0.0; 76 | } 77 | curr_dim += tvs[i].len(); 78 | } 79 | 80 | DiffResult { value: res, diff: jac } 81 | } 82 | } 83 | }; 84 | } 85 | 86 | impl Diff for NumericalDiff { 87 | type T = dtype; 88 | 89 | numerical_maker!(1, (0, v1, V1)); 90 | numerical_maker!(2, (0, v1, V1), (1, v2, V2)); 91 | numerical_maker!(3, (0, v1, V1), (1, v2, V2), (2, v3, V3)); 92 | numerical_maker!(4, (0, v1, V1), (1, v2, V2), (2, v3, V3), (3, v4, V4)); 93 | numerical_maker!( 94 | 5, 95 | (0, v1, V1), 96 | (1, v2, V2), 97 | (2, v3, V3), 98 | (3, v4, V4), 99 | (4, v5, V5) 100 | ); 101 | numerical_maker!( 102 | 6, 103 | (0, v1, V1), 104 | (1, v2, V2), 105 | (2, v3, V3), 106 | (3, v4, V4), 107 | (4, v5, V5), 108 | (5, v6, V6) 109 | ); 110 | } 111 | 112 | macro_rules! numerical_variable_maker { 113 | ($num:expr, $( ($idx:expr, $name:ident, $var:ident) ),*) => { 114 | paste! { 115 | #[allow(unused_assignments)] 116 | pub fn []<$( $var: VariableDtype, )* VOut: VariableDtype, F: Fn($($var,)*) -> VOut> 117 | (f: F, $($name: &$var,)*) -> DiffResult { 118 | let eps = dtype::powi(10.0, -PWR); 119 | 120 | // Get Dimension 121 | let mut dim = 0; 122 | $(dim += Variable::dim($name);)* 123 | 124 | let res = f($( $name.clone(), )*); 125 | 126 | // Compute gradient 127 | let mut jac: MatrixX = MatrixX::zeros(VOut::DIM, dim); 128 | let mut tvs = [$( VectorX::zeros(Variable::dim($name)), )*]; 129 | 130 | for i in 0..$num { 131 | let mut curr_dim = 0; 132 | for j in 0..tvs[i].len() { 133 | tvs[i][j] = eps; 134 | // TODO: It'd be more efficient to not have to add tangent vectors to each variable 135 | // However, I couldn't find a way to do this for a single vector without having to 136 | // do a nested iteration of $name which isn't allowed 137 | $(let [<$name _og>] = $name.oplus(tvs[$idx].as_view());)* 138 | let plus = f($( [<$name _og>], )*); 139 | 140 | tvs[i][j] = -eps; 141 | $(let [<$name _og>] = $name.oplus(tvs[$idx].as_view());)* 142 | let minus = f($( [<$name _og>], )*); 143 | 144 | let delta = plus.ominus(&minus) / (2.0 * eps); 145 | jac.columns_mut(curr_dim + j, 1).copy_from(&delta); 146 | 147 | tvs[i][j] = 0.0; 148 | } 149 | curr_dim += tvs[i].len(); 150 | } 151 | 152 | DiffResult { value: res, diff: jac } 153 | } 154 | } 155 | }; 156 | } 157 | 158 | impl NumericalDiff { 159 | numerical_variable_maker!(1, (0, v1, V1)); 160 | numerical_variable_maker!(2, (0, v1, V1), (1, v2, V2)); 161 | numerical_variable_maker!(3, (0, v1, V1), (1, v2, V2), (2, v3, V3)); 162 | numerical_variable_maker!(4, (0, v1, V1), (1, v2, V2), (2, v3, V3), (3, v4, V4)); 163 | numerical_variable_maker!( 164 | 5, 165 | (0, v1, V1), 166 | (1, v2, V2), 167 | (2, v3, V3), 168 | (3, v4, V4), 169 | (4, v5, V5) 170 | ); 171 | numerical_variable_maker!( 172 | 6, 173 | (0, v1, V1), 174 | (1, v2, V2), 175 | (2, v3, V3), 176 | (3, v4, V4), 177 | (4, v5, V5), 178 | (5, v6, V6) 179 | ); 180 | } 181 | -------------------------------------------------------------------------------- /src/variables/vector.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{Debug, Display}, 3 | ops::Index, 4 | }; 5 | 6 | use crate::{ 7 | dtype, 8 | linalg::{ 9 | AllocatorBuffer, Const, DefaultAllocator, DimName, DualAllocator, DualVector, Numeric, 10 | SupersetOf, Vector, VectorDim, VectorViewX, VectorX, 11 | }, 12 | variables::Variable, 13 | }; 14 | 15 | // ------------------------- Our needs ------------------------- // 16 | /// Newtype wrapper around nalgebra::Vector 17 | /// 18 | /// We create a newtype specifically for vectors we're estimating over due to, 19 | /// 1 - So we can manually implement Debug/Display 20 | /// 2 - Overcome identity issues with the underlying Vector type 21 | /// 3 - Impl Into\ 22 | #[derive(Clone)] 23 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 24 | pub struct VectorVar(pub Vector); 25 | 26 | #[factrs::mark] 27 | impl Variable for VectorVar { 28 | type T = T; 29 | type Dim = Const; 30 | type Alias = VectorVar; 31 | 32 | fn identity() -> Self { 33 | VectorVar(Vector::zeros()) 34 | } 35 | 36 | fn inverse(&self) -> Self { 37 | VectorVar(-self.0) 38 | } 39 | 40 | fn compose(&self, other: &Self) -> Self { 41 | VectorVar(self.0 + other.0) 42 | } 43 | 44 | fn exp(delta: VectorViewX) -> Self { 45 | VectorVar(Vector::from_iterator(delta.iter().cloned())) 46 | } 47 | 48 | fn log(&self) -> VectorX { 49 | VectorX::from_iterator(Self::DIM, self.0.iter().cloned()) 50 | } 51 | 52 | fn cast>(&self) -> Self::Alias { 53 | VectorVar(self.0.cast()) 54 | } 55 | 56 | // Mostly unnecessary, but avoids having to convert VectorX to static vector 57 | fn dual_exp(idx: usize) -> Self::Alias> 58 | where 59 | AllocatorBuffer: Sync + Send, 60 | DefaultAllocator: DualAllocator, 61 | DualVector: Copy, 62 | { 63 | let n = VectorDim::::zeros().shape_generic().0; 64 | let mut tv = Vector::>::zeros(); 65 | for (i, tvi) in tv.iter_mut().enumerate() { 66 | tvi.eps = num_dual::Derivative::derivative_generic(n, Const::<1>, idx + i); 67 | } 68 | VectorVar(tv) 69 | } 70 | } 71 | 72 | // Since it has an extra generic, we have to tag things by hand 73 | #[cfg(feature = "serde")] 74 | const _: () = { 75 | use factrs::{ 76 | core::{BetweenResidual, PriorResidual}, 77 | residuals::Residual, 78 | variables::VariableSafe, 79 | }; 80 | 81 | impl typetag::Tagged for VectorVar { 82 | fn tag() -> String { 83 | format!("VectorVar<{}>", N) 84 | } 85 | } 86 | 87 | factrs::serde::tag_variable! { 88 | VectorVar<1>, 89 | VectorVar<2>, 90 | VectorVar<3>, 91 | VectorVar<4>, 92 | VectorVar<5>, 93 | VectorVar<6>, 94 | VectorVar<7>, 95 | VectorVar<8>, 96 | VectorVar<9>, 97 | VectorVar<10>, 98 | VectorVar<11>, 99 | VectorVar<12>, 100 | VectorVar<13>, 101 | VectorVar<14>, 102 | VectorVar<15>, 103 | VectorVar<16>, 104 | } 105 | 106 | factrs::serde::tag_residual! { 107 | PriorResidual>, 108 | PriorResidual>, 109 | PriorResidual>, 110 | PriorResidual>, 111 | PriorResidual>, 112 | PriorResidual>, 113 | PriorResidual>, 114 | PriorResidual>, 115 | PriorResidual>, 116 | PriorResidual>, 117 | PriorResidual>, 118 | PriorResidual>, 119 | PriorResidual>, 120 | PriorResidual>, 121 | PriorResidual>, 122 | PriorResidual>, 123 | BetweenResidual>, 124 | BetweenResidual>, 125 | BetweenResidual>, 126 | BetweenResidual>, 127 | BetweenResidual>, 128 | BetweenResidual>, 129 | BetweenResidual>, 130 | BetweenResidual>, 131 | BetweenResidual>, 132 | BetweenResidual>, 133 | BetweenResidual>, 134 | BetweenResidual>, 135 | BetweenResidual>, 136 | BetweenResidual>, 137 | BetweenResidual>, 138 | BetweenResidual>, 139 | } 140 | }; 141 | 142 | macro_rules! impl_vector_new { 143 | ($($num:literal, [$($args:ident),*]);* $(;)?) => {$( 144 | impl VectorVar<$num, T> { 145 | pub fn new($($args: T),*) -> Self { 146 | VectorVar(Vector::<$num, T>::new($($args),*)) 147 | } 148 | } 149 | )*}; 150 | } 151 | 152 | impl_vector_new!( 153 | 1, [x]; 154 | 2, [x, y]; 155 | 3, [x, y, z]; 156 | 4, [x, y, z, w]; 157 | 5, [x, y, z, w, a]; 158 | 6, [x, y, z, w, a, b]; 159 | ); 160 | 161 | impl From> for VectorVar { 162 | fn from(v: Vector) -> Self { 163 | VectorVar(v) 164 | } 165 | } 166 | 167 | impl From> for Vector { 168 | fn from(v: VectorVar) -> Self { 169 | v.0 170 | } 171 | } 172 | 173 | impl Display for VectorVar { 174 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 175 | let precision = f.precision().unwrap_or(3); 176 | write!(f, "VectorVar{}(", N)?; 177 | for (i, x) in self.0.iter().enumerate() { 178 | if i > 0 { 179 | write!(f, ", ")?; 180 | } 181 | write!(f, "{:.p$}", x, p = precision)?; 182 | } 183 | write!(f, ")") 184 | } 185 | } 186 | 187 | impl Debug for VectorVar { 188 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 189 | Display::fmt(self, f) 190 | } 191 | } 192 | 193 | impl Index for VectorVar { 194 | type Output = T; 195 | 196 | fn index(&self, index: usize) -> &Self::Output { 197 | &self.0[index] 198 | } 199 | } 200 | 201 | /// 1D Vector Variable 202 | pub type VectorVar1 = VectorVar<1, T>; 203 | /// 2D Vector Variable 204 | pub type VectorVar2 = VectorVar<2, T>; 205 | /// 3D Vector Variable 206 | pub type VectorVar3 = VectorVar<3, T>; 207 | /// 4D Vector Variable 208 | pub type VectorVar4 = VectorVar<4, T>; 209 | /// 5D Vector Variable 210 | pub type VectorVar5 = VectorVar<5, T>; 211 | /// 6D Vector Variable 212 | pub type VectorVar6 = VectorVar<6, T>; 213 | 214 | #[cfg(test)] 215 | mod tests { 216 | use super::*; 217 | use crate::test_variable; 218 | 219 | // Be lazy and only test Vector6 - others should work the same 220 | test_variable!(VectorVar6); 221 | } 222 | -------------------------------------------------------------------------------- /src/variables/se2.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt, ops}; 2 | 3 | use super::VectorVar2; 4 | use crate::{ 5 | dtype, 6 | linalg::{ 7 | vectorx, AllocatorBuffer, Const, DefaultAllocator, DimName, DualAllocator, DualVector, 8 | Matrix2, Matrix2x3, Matrix3, MatrixView, Numeric, SupersetOf, Vector2, Vector3, 9 | VectorView2, VectorView3, VectorViewX, VectorX, 10 | }, 11 | variables::{MatrixLieGroup, Variable, SO2}, 12 | }; 13 | 14 | /// Special Euclidean Group in 2D 15 | /// 16 | /// Implementation of SE(2) for 2D transformations. 17 | #[derive(Clone)] 18 | #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] 19 | pub struct SE2 { 20 | rot: SO2, 21 | xy: Vector2, 22 | } 23 | 24 | impl SE2 { 25 | /// Create a new SE2 from an angle, x, and y coordinates 26 | pub fn new(theta: T, x: T, y: T) -> Self { 27 | SE2 { 28 | rot: SO2::from_theta(theta), 29 | xy: Vector2::new(x, y), 30 | } 31 | } 32 | 33 | pub fn xy(&self) -> VectorView2 { 34 | self.xy.as_view() 35 | } 36 | 37 | pub fn x(&self) -> T { 38 | self.xy[0] 39 | } 40 | 41 | pub fn y(&self) -> T { 42 | self.xy[1] 43 | } 44 | 45 | pub fn rot(&self) -> &SO2 { 46 | &self.rot 47 | } 48 | 49 | pub fn theta(&self) -> T { 50 | self.rot.log()[0] 51 | } 52 | } 53 | 54 | #[factrs::mark] 55 | impl Variable for SE2 { 56 | type T = T; 57 | type Dim = Const<3>; 58 | type Alias = SE2; 59 | 60 | fn identity() -> Self { 61 | SE2 { 62 | rot: Variable::identity(), 63 | xy: Vector2::zeros(), 64 | } 65 | } 66 | 67 | fn compose(&self, other: &Self) -> Self { 68 | SE2 { 69 | rot: &self.rot * &other.rot, 70 | xy: self.rot.apply(other.xy.as_view()) + self.xy, 71 | } 72 | } 73 | 74 | fn inverse(&self) -> Self { 75 | let inv = self.rot.inverse(); 76 | SE2 { 77 | xy: -&inv.apply(self.xy.as_view()), 78 | rot: inv, 79 | } 80 | } 81 | 82 | #[allow(non_snake_case)] 83 | #[allow(clippy::needless_borrow)] 84 | fn exp(xi: VectorViewX) -> Self { 85 | let theta = xi[0]; 86 | let xy = Vector2::new(xi[1], xi[2]); 87 | 88 | let rot = SO2::::exp(xi.rows(0, 1)); 89 | 90 | let xy = if cfg!(feature = "fake_exp") { 91 | xy 92 | } else { 93 | let A; 94 | let B; 95 | if theta < T::from(1e-5) { 96 | A = T::from(1.0); 97 | B = T::from(0.0); 98 | } else { 99 | A = theta.sin() / theta; 100 | B = (T::from(1.0) - theta.cos()) / theta; 101 | }; 102 | let V = Matrix2::new(A, -B, B, A); 103 | V * xy 104 | }; 105 | 106 | SE2 { rot, xy } 107 | } 108 | 109 | #[allow(non_snake_case)] 110 | #[allow(clippy::needless_borrow)] 111 | fn log(&self) -> VectorX { 112 | let theta = self.rot.log()[0]; 113 | 114 | let xy = if cfg!(feature = "fake_exp") { 115 | &self.xy 116 | } else { 117 | let A; 118 | let B; 119 | if theta < T::from(1e-5) { 120 | A = T::from(1.0); 121 | B = T::from(0.0); 122 | } else { 123 | A = theta.sin() / theta; 124 | B = (T::from(1.0) - theta.cos()) / theta; 125 | }; 126 | let V = Matrix2::new(A, -B, B, A); 127 | 128 | let Vinv = V.try_inverse().expect("V is not invertible"); 129 | &(Vinv * self.xy) 130 | }; 131 | 132 | vectorx![theta, xy[0], xy[1]] 133 | } 134 | 135 | fn cast>(&self) -> Self::Alias { 136 | SE2 { 137 | rot: self.rot.cast(), 138 | xy: self.xy.cast(), 139 | } 140 | } 141 | 142 | fn dual_exp(idx: usize) -> Self::Alias> 143 | where 144 | AllocatorBuffer: Sync + Send, 145 | DefaultAllocator: DualAllocator, 146 | DualVector: Copy, 147 | { 148 | SE2 { 149 | rot: SO2::::dual_exp(idx), 150 | xy: VectorVar2::::dual_exp(idx + 1).into(), 151 | } 152 | } 153 | } 154 | 155 | impl MatrixLieGroup for SE2 { 156 | type TangentDim = Const<3>; 157 | type MatrixDim = Const<3>; 158 | type VectorDim = Const<2>; 159 | 160 | fn adjoint(&self) -> Matrix3 { 161 | let mut mat = Matrix3::::zeros(); 162 | 163 | let r_mat = self.rot.to_matrix(); 164 | 165 | mat.fixed_view_mut::<2, 2>(0, 0).copy_from(&r_mat); 166 | mat[(0, 2)] = self.xy[2]; 167 | mat[(1, 2)] = -self.xy[1]; 168 | 169 | mat 170 | } 171 | 172 | fn hat(xi: VectorView3) -> Matrix3 { 173 | let mut mat = Matrix3::::zeros(); 174 | mat[(0, 1)] = -xi[0]; 175 | mat[(1, 0)] = xi[0]; 176 | 177 | mat[(0, 2)] = xi[1]; 178 | mat[(1, 2)] = xi[2]; 179 | 180 | mat 181 | } 182 | 183 | fn vee(xi: MatrixView<3, 3, T>) -> Vector3 { 184 | Vector3::new(xi[(1, 0)], xi[(0, 1)], xi[(0, 2)]) 185 | } 186 | 187 | fn apply(&self, v: VectorView2) -> Vector2 { 188 | self.rot.apply(v) + self.xy 189 | } 190 | 191 | fn hat_swap(xi: VectorView2) -> Matrix2x3 { 192 | let mut mat = Matrix2x3::::zeros(); 193 | mat.fixed_view_mut::<2, 1>(0, 0) 194 | .copy_from(&SO2::hat_swap(xi.as_view())); 195 | 196 | mat.fixed_view_mut::<2, 2>(0, 1) 197 | .copy_from(&Matrix2::identity()); 198 | mat 199 | } 200 | 201 | fn to_matrix(&self) -> Matrix3 { 202 | let mut mat = Matrix3::::identity(); 203 | mat.fixed_view_mut::<2, 2>(0, 0) 204 | .copy_from(&self.rot.to_matrix()); 205 | mat.fixed_view_mut::<2, 1>(0, 2).copy_from(&self.xy); 206 | mat 207 | } 208 | 209 | fn from_matrix(mat: MatrixView<3, 3, T>) -> Self { 210 | let rot = mat.fixed_view::<2, 2>(0, 0).clone_owned(); 211 | let rot = SO2::from_matrix(rot.as_view()); 212 | 213 | let xy = mat.fixed_view::<2, 1>(0, 2).into(); 214 | 215 | SE2 { rot, xy } 216 | } 217 | } 218 | 219 | impl ops::Mul for SE2 { 220 | type Output = SE2; 221 | 222 | #[inline] 223 | fn mul(self, other: Self) -> Self::Output { 224 | self.compose(&other) 225 | } 226 | } 227 | 228 | impl ops::Mul for &SE2 { 229 | type Output = SE2; 230 | 231 | #[inline] 232 | fn mul(self, other: Self) -> Self::Output { 233 | self.compose(other) 234 | } 235 | } 236 | 237 | impl fmt::Display for SE2 { 238 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 239 | let precision: usize = f.precision().unwrap_or(3); 240 | write!( 241 | f, 242 | "SE2(theta: {:.p$}, x: {:.p$}, y: {:.p$})", 243 | self.rot.log()[0], 244 | self.xy[0], 245 | self.xy[1], 246 | p = precision 247 | ) 248 | } 249 | } 250 | 251 | impl fmt::Debug for SE2 { 252 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 253 | let precision: usize = f.precision().unwrap_or(3); 254 | write!( 255 | f, 256 | "SE2 {{ rot: {:?}, x: {:.p$}, y: {:.p$} }}", 257 | self.rot, 258 | self.xy[0], 259 | self.xy[1], 260 | p = precision 261 | ) 262 | } 263 | } 264 | 265 | #[cfg(test)] 266 | mod tests { 267 | use super::*; 268 | use crate::{test_lie, test_variable}; 269 | 270 | test_variable!(SO2); 271 | 272 | test_lie!(SO2); 273 | } 274 | -------------------------------------------------------------------------------- /src/optimizers/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | core::{Graph, Values}, 3 | dtype, 4 | }; 5 | 6 | /// Error types for optimizers 7 | #[derive(Debug)] 8 | pub enum OptError { 9 | MaxIterations(Values), 10 | InvalidSystem, 11 | FailedToStep, 12 | } 13 | 14 | /// Result type for optimizers 15 | pub type OptResult = Result; 16 | 17 | // ------------------------- Optimizer Params ------------------------- // 18 | /// Parameters for the optimizer 19 | /// 20 | /// Simple trait to return base parameters for the optimizer. 21 | pub trait OptParams: Default + Clone { 22 | fn base_params(&self) -> &BaseOptParams; 23 | } 24 | 25 | /// Parameters for the optimizer 26 | #[derive(Debug, Clone)] 27 | pub struct BaseOptParams { 28 | pub max_iterations: usize, 29 | pub error_tol_relative: dtype, 30 | pub error_tol_absolute: dtype, 31 | pub error_tol: dtype, 32 | } 33 | 34 | impl Default for BaseOptParams { 35 | fn default() -> Self { 36 | Self { 37 | max_iterations: 100, 38 | error_tol_relative: 1e-6, 39 | error_tol_absolute: 1e-6, 40 | error_tol: 0.0, 41 | } 42 | } 43 | } 44 | 45 | impl OptParams for BaseOptParams { 46 | fn base_params(&self) -> &BaseOptParams { 47 | self 48 | } 49 | } 50 | 51 | // ------------------------- Optimizer Observers ------------------------- // 52 | /// Observer trait for optimization 53 | /// 54 | /// This trait is used to observe the optimization process. It is called at each 55 | /// step of the optimization process. 56 | pub trait OptObserver { 57 | fn on_step(&self, values: &Values, time: f64); 58 | } 59 | 60 | /// Observer collection for optimization 61 | /// 62 | /// This struct holds a collection of observers for optimization. It is used to 63 | /// notify all observers at each step of the optimization process. 64 | #[derive(Default)] 65 | pub struct OptObserverVec { 66 | observers: Vec>, 67 | } 68 | 69 | impl OptObserverVec { 70 | pub fn add(&mut self, callback: impl OptObserver + 'static) { 71 | let boxed = Box::new(callback); 72 | self.observers.push(boxed); 73 | } 74 | 75 | pub fn notify(&self, values: &Values, idx: usize) { 76 | for callback in &self.observers { 77 | callback.on_step(values, idx as f64); 78 | } 79 | } 80 | } 81 | 82 | // ------------------------- Actual Trait Impl ------------------------- // 83 | /// Trait for optimization algorithms 84 | /// 85 | /// This trait is used to define the core optimization functions for an 86 | /// optimizer, specifically a handful of stopping criteria and the main loop. 87 | pub trait Optimizer { 88 | type Params: OptParams 89 | where 90 | Self: Sized; 91 | 92 | // ------------------------- Required ------------------------- // 93 | /// Create a new optimizer 94 | fn new(params: Self::Params, graph: Graph) -> Self 95 | where 96 | Self: Sized; 97 | 98 | /// Observers 99 | fn observers(&self) -> &OptObserverVec; 100 | 101 | /// Observers 102 | fn observers_mut(&mut self) -> &mut OptObserverVec; 103 | 104 | /// The graph we are optimizing 105 | fn graph(&self) -> &Graph; 106 | 107 | /// The graph we are optimizing 108 | /// 109 | /// This is mutable to allow for modifying the graph during optimization. 110 | /// BE CAREFUL! In most optimizer, the overall structure of the graph should 111 | /// remain the same between optimization steps. 112 | fn graph_mut(&mut self) -> &mut Graph; 113 | 114 | /// Parameters for the optimizer 115 | fn params(&self) -> &BaseOptParams; 116 | 117 | /// Perform a single step of optimization 118 | /// 119 | /// Returns the new values and a string with information about the step 120 | fn step(&mut self, values: Values, idx: usize) -> OptResult<(Values, String)>; 121 | 122 | /// Compute the error of the current values 123 | fn error(&self, values: &Values) -> dtype; 124 | 125 | /// Initialize the optimizer, optional 126 | /// 127 | /// Returns a vector of strings to append to a column when logging 128 | fn init(&mut self, _values: &Values) -> Vec<&'static str> { 129 | Vec::new() 130 | } 131 | 132 | // ------------------------- Derived from the above ------------------------- // 133 | /// Main optimization call function 134 | fn optimize(&mut self, mut values: Values) -> OptResult { 135 | // Setup up everything from our values 136 | let append = self.init(&values); 137 | 138 | // Check if we need to optimize at all 139 | let mut error_old = self.error(&values); 140 | if error_old <= self.params().error_tol { 141 | log::info!("Error is already below tolerance, skipping optimization"); 142 | return Ok(values); 143 | } 144 | 145 | let extra = if append.is_empty() { "" } else { " |" }; 146 | 147 | log::info!( 148 | "{:^5} | {:^12} | {:^12} | {:^12} | {}", 149 | "Iter", 150 | "Error", 151 | "ErrorAbs", 152 | "ErrorRel", 153 | append.join(" | ") + extra, 154 | ); 155 | log::info!( 156 | "{:^5} | {:^12} | {:^12} | {:^12} | {}", 157 | "-----", 158 | "------------", 159 | "------------", 160 | "------------", 161 | append 162 | .iter() 163 | .map(|s| "-".repeat(s.len())) 164 | .collect::>() 165 | .join(" | ") 166 | + extra 167 | ); 168 | log::info!( 169 | "{:^5} | {:^12.4e} | {:^12} | {:^12} | {}", 170 | 0, 171 | error_old, 172 | "-", 173 | "-", 174 | append 175 | .iter() 176 | .map(|s| format!("{:^width$}", "-", width = s.len())) 177 | .collect::>() 178 | .join(" | ") 179 | + extra 180 | ); 181 | 182 | // Begin iterations 183 | let mut error_new = error_old; 184 | for i in 1..self.params().max_iterations + 1 { 185 | error_old = error_new; 186 | let (temp, info) = self.step(values, i)?; 187 | values = temp; 188 | self.observers().notify(&values, i); 189 | 190 | // Evaluate error again to see how we did 191 | error_new = self.error(&values); 192 | 193 | let error_decrease_abs = error_old - error_new; 194 | let error_decrease_rel = error_decrease_abs / error_old; 195 | 196 | log::info!( 197 | "{:^5} | {:^12.4e} | {:^12.4e} | {:^12.4e} | {}", 198 | i, 199 | error_new, 200 | error_decrease_abs, 201 | error_decrease_rel, 202 | info 203 | ); 204 | 205 | // Check if we need to stop 206 | if error_new <= self.params().error_tol { 207 | log::info!("Error is below tolerance, stopping optimization"); 208 | return Ok(values); 209 | } 210 | if error_decrease_abs <= self.params().error_tol_absolute { 211 | log::info!("Error decrease is below absolute tolerance, stopping optimization"); 212 | return Ok(values); 213 | } 214 | if error_decrease_rel <= self.params().error_tol_relative { 215 | log::info!("Error decrease is below relative tolerance, stopping optimization"); 216 | return Ok(values); 217 | } 218 | } 219 | 220 | Err(OptError::MaxIterations(values)) 221 | } 222 | 223 | fn add_observer(&mut self, observer: impl OptObserver + 'static) 224 | where 225 | Self: Sized, 226 | { 227 | self.observers_mut().add(observer); 228 | } 229 | 230 | /// Create a new optimizer with default params 231 | fn new_default(graph: Graph) -> Self 232 | where 233 | Self: Sized, 234 | { 235 | Self::new(Self::Params::default(), graph) 236 | } 237 | } 238 | --------------------------------------------------------------------------------