├── .gitattributes ├── examples └── quadratic │ ├── cost_functions │ ├── __init__.py │ ├── wrappers.pyx │ └── cost_functions.h │ ├── Makefile │ ├── quadratic.py │ └── setup.py ├── README.md ├── cyres ├── __init__.py └── src │ ├── cyres.pxd │ ├── cyres.pyx │ └── ceres.pxd ├── scripts └── cyresc ├── .gitignore ├── LICENSE └── setup.py /.gitattributes: -------------------------------------------------------------------------------- 1 | cyres/src/cyres.cpp binary 2 | -------------------------------------------------------------------------------- /examples/quadratic/cost_functions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | cyres 2 | ===== 3 | 4 | Python bindings for ceres-solver (via Cython) 5 | -------------------------------------------------------------------------------- /cyres/__init__.py: -------------------------------------------------------------------------------- 1 | from cyres import * 2 | 3 | def get_cython_include(): 4 | import os 5 | include_path = os.path.dirname(__file__) 6 | include_path = os.path.join(include_path, "src") 7 | return include_path 8 | -------------------------------------------------------------------------------- /examples/quadratic/Makefile: -------------------------------------------------------------------------------- 1 | all: wrappers.so 2 | 3 | clean: 4 | rm cost_functions/wrappers.so 5 | rm cost_functions/wrappers.cpp 6 | rm -rf build 7 | 8 | wrappers.so: 9 | python setup.py build_ext --inplace --force 10 | -------------------------------------------------------------------------------- /cyres/src/cyres.pxd: -------------------------------------------------------------------------------- 1 | cimport ceres 2 | 3 | cdef class CostFunction: 4 | cdef ceres.CostFunction* _cost_function 5 | 6 | cpdef parameter_block_sizes(self) 7 | cpdef num_residuals(self) 8 | 9 | cdef class LossFunction: 10 | cdef ceres.LossFunction* _loss_function 11 | -------------------------------------------------------------------------------- /scripts/cyresc: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys 4 | import os 5 | 6 | import cyres 7 | 8 | include_path = os.path.dirname(cyres.__file__) 9 | include_path = os.path.join(include_path, "src") 10 | 11 | command = "cython -I {0} {1}".format(include_path, ' '.join(sys.argv[1:])) 12 | os.system(command) 13 | -------------------------------------------------------------------------------- /examples/quadratic/cost_functions/wrappers.pyx: -------------------------------------------------------------------------------- 1 | from cyres cimport CostFunction, LossFunction 2 | cimport ceres 3 | cimport numpy as np 4 | import numpy as np 5 | 6 | cdef extern from "cost_functions.h": 7 | ceres.CostFunction* createCostFunction() 8 | 9 | cdef class SimpleCostFunction(CostFunction): 10 | 11 | def __cinit__(self): 12 | self._cost_function = createCostFunction() 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | *.o 6 | 7 | # Packages 8 | *.egg 9 | *.egg-info 10 | dist 11 | build 12 | eggs 13 | parts 14 | bin 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib 20 | lib64 21 | __pycache__ 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | 39 | # Editors 40 | ~* 41 | *.swp 42 | 43 | # Example wrapper .cpp files 44 | examples/*/cost_functions/wrappers.cpp 45 | -------------------------------------------------------------------------------- /examples/quadratic/quadratic.py: -------------------------------------------------------------------------------- 1 | from cyres import * 2 | from cost_functions.wrappers import SimpleCostFunction 3 | 4 | x = np.array([5.]) 5 | problem = Problem() 6 | problem.add_residual_block(SimpleCostFunction(), SquaredLoss(), [x]) 7 | 8 | options = SolverOptions() 9 | options.max_num_iterations = 50 10 | options.linear_solver_type = LinearSolverType.DENSE_QR 11 | options.trust_region_strategy_type = TrustRegionStrategyType.DOGLEG 12 | options.dogleg_type = DoglegType.SUBSPACE_DOGLEG 13 | options.minimizer_progress_to_stdout = False 14 | 15 | summary = Summary() 16 | 17 | solve(options, problem, summary) 18 | print summary.briefReport() 19 | print summary.fullReport() 20 | print x 21 | -------------------------------------------------------------------------------- /examples/quadratic/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Distutils import build_ext 3 | from Cython.Distutils.extension import Extension 4 | import os 5 | import numpy 6 | import cyres 7 | 8 | ceres_include = "/usr/local/include/ceres/" 9 | eigen_choices = ["/usr/local/include/eigen3", "/usr/include/eigen3"] 10 | eigen_include = [x for x in eigen_choices if os.path.exists(x)][0] 11 | 12 | ext_modules = [ 13 | Extension( 14 | "wrappers", 15 | ["cost_functions/wrappers.pyx"], 16 | language="c++", 17 | include_dirs=[ceres_include, numpy.get_include(), eigen_include], 18 | cython_include_dirs=[cyres.get_cython_include()], 19 | ) 20 | ] 21 | 22 | setup( 23 | name = 'cost_functions', 24 | version='0.0.1', 25 | cmdclass = {'build_ext': build_ext}, 26 | ext_package = 'cost_functions', 27 | ext_modules = ext_modules, 28 | ) 29 | -------------------------------------------------------------------------------- /examples/quadratic/cost_functions/cost_functions.h: -------------------------------------------------------------------------------- 1 | #include "ceres/ceres.h" 2 | 3 | using ceres::CostFunction; 4 | using ceres::SizedCostFunction; 5 | 6 | class SimpleCostFunction 7 | : public SizedCostFunction<1 /* number of residuals */, 8 | 1 /* size of first parameter */> { 9 | public: 10 | virtual ~SimpleCostFunction() {} 11 | virtual bool Evaluate(double const* const* parameters, 12 | double* residuals, 13 | double** jacobians) const { 14 | double x = parameters[0][0]; 15 | 16 | // f(x) = 10 - x. 17 | residuals[0] = 10 - x; 18 | 19 | // f'(x) = -1. Since there's only 1 parameter and that parameter 20 | // has 1 dimension, there is only 1 element to fill in the 21 | // jacobians. 22 | if (jacobians != NULL && jacobians[0] != NULL) { 23 | jacobians[0][0] = -1; 24 | } 25 | return true; 26 | } 27 | }; 28 | 29 | CostFunction* createCostFunction(){ 30 | return new SimpleCostFunction(); 31 | } 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, Berkeley RLL 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | Redistributions in binary form must reproduce the above copyright notice, this 11 | list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from Cython.Distutils import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy 5 | 6 | import os, tempfile, subprocess, shutil 7 | 8 | # see http://openmp.org/wp/openmp-compilers/ 9 | omp_test = r"""#include 10 | #include 11 | int main() { 12 | #pragma omp parallel 13 | printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); 14 | } 15 | """ 16 | 17 | def has_openmp(): 18 | tmpdir = tempfile.mkdtemp() 19 | curdir = os.getcwd() 20 | os.chdir(tmpdir) 21 | 22 | filename = r'test.c' 23 | file = open(filename,'w', 0) 24 | file.write(omp_test) 25 | with open(os.devnull, 'w') as fnull: 26 | result = subprocess.call(['cc', '-fopenmp', filename], stdout=fnull, 27 | stderr=fnull) 28 | 29 | file.close 30 | os.chdir(curdir) 31 | #clean up 32 | shutil.rmtree(tmpdir) 33 | 34 | return result == 0 35 | 36 | ceres_include = "/usr/local/include/ceres/" 37 | 38 | ceres_lib = "/usr/local/lib/" 39 | gflags_lib = "/usr/local/lib/" 40 | glog_lib = "/usr/local/lib/" 41 | cholmod_lib = amd_lib = camd_lib = colamd_lib = "/usr/local/lib/" 42 | cxsparse_lib = "/usr/local/lib/" 43 | 44 | extra_compile_args = ['-O3'] 45 | extra_link_args = [] 46 | 47 | if has_openmp(): 48 | extra_compile_args = ['-fopenmp'] 49 | extra_link_args = ['-lgomp'] 50 | 51 | ext_modules = [ 52 | Extension( 53 | "cyres", 54 | ["cyres/src/cyres.pyx", "cyres/src/cyres.pxd", "cyres/src/ceres.pxd"], 55 | language="c++", 56 | include_dirs=[ceres_include, numpy.get_include()], 57 | libraries=['ceres', 'gflags', 'glog', "cholmod", "camd", "amd", "colamd", "cxsparse"], 58 | library_dirs=[ceres_lib, gflags_lib, glog_lib, cholmod_lib, amd_lib, camd_lib, colamd_lib, cxsparse_lib], 59 | extra_compile_args=extra_compile_args, 60 | extra_link_args=extra_link_args, 61 | ) 62 | ] 63 | 64 | setup( 65 | name = 'cyres', 66 | version='0.0.1', 67 | cmdclass = {'build_ext': build_ext}, 68 | ext_package = 'cyres', 69 | ext_modules = ext_modules, 70 | packages= ['cyres'], 71 | package_data={'cyres': ['src/*.pxd']}, 72 | scripts=['scripts/cyresc'] 73 | ) 74 | -------------------------------------------------------------------------------- /cyres/src/cyres.pyx: -------------------------------------------------------------------------------- 1 | #cython: boundscheck=False, wraparound=False 2 | 3 | import cython 4 | from libcpp.vector cimport vector 5 | from libc.stdlib cimport malloc, free 6 | from cython.operator cimport dereference as drf 7 | 8 | cimport numpy as np 9 | 10 | import numpy as np 11 | 12 | cimport ceres 13 | from cyres cimport * 14 | 15 | def enum(*sequential, **named): 16 | enums = dict(zip(sequential, range(len(sequential))), **named) 17 | reverse = dict((value, key) for key, value in enums.iteritems()) 18 | enums['reverse_mapping'] = reverse 19 | return type('Enum', (), enums) 20 | 21 | Ownership = enum("DO_NOT_TAKE_OWNERSHIP", "TAKE_OWNERSHIP") 22 | MinimizerType = enum("LINE_SEARCH", "TRUST_REGION") 23 | LinearSolverType = enum("DENSE_NORMAL_CHOLESKY", "DENSE_QR", 24 | "SPARSE_NORMAL_CHOLESKY", "DENSE_SCHUR", "SPARSE_SCHUR", 25 | "ITERATIVE_SCHUR", "CGNR") 26 | PreconditionerType = enum("IDENTITY", "JACOBI", "SCHUR_JACOBI", 27 | "CLUSTER_JACOBI", "CLUSTER_TRIDIAGONAL") 28 | SparseLinearAlgebraLibraryType = enum("SUITE_SPARSE", "CX_SPARSE") 29 | LinearSolverTerminationType = enum("TOLERANCE", "MAX_ITERATIONS", "STAGNATION", 30 | "FAILURE") 31 | LoggingType = enum("SILENT", "PER_MINIMIZER_ITERATION") 32 | LineSearchDirectionType = enum("STEEPEST_DESCENT", 33 | "NONLINEAR_CONJUGATE_GRADIENT", 34 | "LBFGS") 35 | NonlinearConjugateGradientType = enum("FLETCHER_REEVES", "POLAK_RIBIRERE", 36 | "HESTENES_STIEFEL") 37 | LineSearchType = enum("ARMIJO") 38 | TrustRegionStrategyType = enum("LEVENBERG_MARQUARDT", "DOGLEG") 39 | DoglegType = enum("TRADITIONAL_DOGLEG", "SUBSPACE_DOGLEG") 40 | SolverTerminationType = enum("DID_NOT_RUN", "NO_CONVERGENCE", "FUNCTION_TOLERANCE", "GRADIENT_TOLERANCE", "PARAMETER_TOLERANCE", "NUMERICAL_FAILURE", "USER_ABORT", "USER_SUCCESS") 41 | CallbackReturnType = enum("SOLVER_CONTINUE", "SOLVER_ABORT", "SOLVER_TERMINATE_SUCCESSFULLY") 42 | DumpFormatType = enum("CONSOLE", "PROTOBUF", "TEXTFILE") 43 | DimensionType = enum(DYNAMIC=-1) 44 | NumericDiffMethod = enum("CENTRAL", "FORWARD") 45 | 46 | cdef class CostFunction: 47 | 48 | cpdef parameter_block_sizes(self): 49 | block_sizes = [] 50 | cdef vector[ceres.int16] _parameter_block_sizes = self._cost_function.parameter_block_sizes() 51 | for i in range(_parameter_block_sizes.size()): 52 | block_sizes.append(_parameter_block_sizes[i]) 53 | return block_sizes 54 | 55 | cpdef num_residuals(self): 56 | return self._cost_function.num_residuals() 57 | 58 | def evaluate(self, *param_blocks, **kwargs): 59 | 60 | include_jacobians = kwargs.get("include_jacobians", False) 61 | 62 | cdef double** _params_ptr = NULL 63 | cdef double* _residuals_ptr = NULL 64 | cdef double** _jacobians_ptr = NULL 65 | 66 | block_sizes = self.parameter_block_sizes() 67 | 68 | _params_ptr = malloc(sizeof(double*)*len(block_sizes)) 69 | 70 | cdef np.ndarray[np.double_t, ndim=1] _param_block 71 | 72 | for i, param_block in enumerate(param_blocks): 73 | if block_sizes[i] != len(param_block): 74 | raise Exception("Expected param block of size %d, got %d" % (block_sizes[i], len(param_block))) 75 | _param_block = param_block 76 | _params_ptr[i] = _param_block.data 77 | 78 | cdef np.ndarray[np.double_t, ndim=1] residuals 79 | 80 | residuals = np.empty((self.num_residuals()), dtype=np.double) 81 | _residuals_ptr = residuals.data 82 | 83 | cdef np.ndarray[np.double_t, ndim=2] _jacobian 84 | if include_jacobians: 85 | # jacobians is an array of size CostFunction::parameter_block_sizes_ 86 | # containing pointers to storage for Jacobian matrices corresponding 87 | # to each parameter block. The Jacobian matrices are in the same 88 | # order as CostFunction::parameter_block_sizes_. jacobians[i] is an 89 | # array that contains CostFunction::num_residuals_ x 90 | # CostFunction::parameter_block_sizes_[i] elements. Each Jacobian 91 | # matrix is stored in row-major order, i.e., jacobians[i][r * 92 | # parameter_block_size_[i] + c] 93 | jacobians = [] 94 | _jacobians_ptr = malloc(sizeof(double*)*len(block_sizes)) 95 | for i, block_size in enumerate(block_sizes): 96 | jacobian = np.empty((self.num_residuals(), block_size), dtype=np.double) 97 | jacobians.append(jacobian) 98 | _jacobian = jacobian 99 | _jacobians_ptr[i] = _jacobian.data 100 | 101 | self._cost_function.Evaluate(_params_ptr, _residuals_ptr, _jacobians_ptr) 102 | 103 | free(_params_ptr) 104 | 105 | if include_jacobians: 106 | free(_jacobians_ptr) 107 | return residuals, jacobians 108 | else: 109 | return residuals 110 | 111 | cdef class SquaredLoss(LossFunction): 112 | def __cinit__(self): 113 | _loss_function = NULL 114 | 115 | cdef class HuberLoss(LossFunction): 116 | def __init__(self, double _a): 117 | self._loss_function = new ceres.HuberLoss(_a) 118 | 119 | cdef class SoftLOneLoss(LossFunction): 120 | def __init__(self, double _a): 121 | self._loss_function = new ceres.SoftLOneLoss(_a) 122 | 123 | cdef class CauchyLoss(LossFunction): 124 | def __init__(self, double _a): 125 | self._loss_function = new ceres.CauchyLoss(_a) 126 | 127 | cdef class ArctanLoss(LossFunction): 128 | def __init__(self, double _a): 129 | """ 130 | Loss that is capped beyond a certain level using the arc-tangent 131 | function. The scaling parameter 'a' determines the level where falloff 132 | occurs. For costs much smaller than 'a', the loss function is linear 133 | and behaves like TrivialLoss, and for values much larger than 'a' the 134 | value asymptotically approaches the constant value of a * PI / 2. 135 | 136 | rho(s) = a atan(s / a). 137 | 138 | At s = 0: rho = [0, 1, 0]. 139 | """ 140 | self._loss_function = new ceres.ArctanLoss(_a) 141 | 142 | cdef class TolerantLoss(LossFunction): 143 | """ 144 | Loss function that maps to approximately zero cost in a range around the 145 | origin, and reverts to linear in error (quadratic in cost) beyond this 146 | range. The tolerance parameter 'a' sets the nominal point at which the 147 | transition occurs, and the transition size parameter 'b' sets the nominal 148 | distance over which most of the transition occurs. Both a and b must be 149 | greater than zero, and typically b will be set to a fraction of a. The 150 | slope rho'[s] varies smoothly from about 0 at s <= a - b to about 1 at s >= 151 | a + b. 152 | 153 | The term is computed as: 154 | 155 | rho(s) = b log(1 + exp((s - a) / b)) - c0. 156 | 157 | where c0 is chosen so that rho(0) == 0 158 | 159 | c0 = b log(1 + exp(-a / b) 160 | 161 | This has the following useful properties: 162 | 163 | rho(s) == 0 for s = 0 164 | rho'(s) ~= 0 for s << a - b 165 | rho'(s) ~= 1 for s >> a + b 166 | rho''(s) > 0 for all s 167 | 168 | In addition, all derivatives are continuous, and the curvature is 169 | concentrated in the range a - b to a + b. 170 | 171 | At s = 0: rho = [0, ~0, ~0]. 172 | """ 173 | def __init__(self, double _a, double _b): 174 | self._loss_function = new ceres.TolerantLoss(_a, _b) 175 | 176 | cdef class ComposedLoss(LossFunction): 177 | 178 | def __init__(self, LossFunction f, LossFunction g): 179 | self._loss_function = new ceres.ComposedLoss(f._loss_function, 180 | ceres.DO_NOT_TAKE_OWNERSHIP, 181 | g._loss_function, 182 | ceres.DO_NOT_TAKE_OWNERSHIP) 183 | 184 | cdef class ScaledLoss(LossFunction): 185 | 186 | def __init__(self, LossFunction loss_function, double _a): 187 | self._loss_function = new ceres.ScaledLoss(loss_function._loss_function, 188 | _a, 189 | ceres.DO_NOT_TAKE_OWNERSHIP) 190 | 191 | cdef class Summary: 192 | cdef ceres.Summary _summary 193 | 194 | def briefReport(self): 195 | return self._summary.BriefReport() 196 | 197 | def fullReport(self): 198 | return self._summary.FullReport() 199 | 200 | cdef class EvaluateOptions: 201 | cdef ceres.EvaluateOptions _options 202 | 203 | def __cinit__(self): 204 | pass 205 | 206 | def __init__(self): 207 | self._options = ceres.EvaluateOptions() 208 | 209 | property residual_blocks: 210 | def __get__(self): 211 | blocks = [] 212 | cdef int i 213 | for i in range(self._options.residual_blocks.size()): 214 | block = ResidualBlockId() 215 | block._block_id = self._options.residual_blocks[i] 216 | blocks.append(block) 217 | return blocks 218 | def __set__(self, blocks): 219 | self._options.residual_blocks.clear() 220 | cdef ResidualBlockId block 221 | for block in blocks: 222 | self._options.residual_blocks.push_back(block._block_id) 223 | 224 | property apply_loss_function: 225 | def __get__(self): 226 | return self._options.apply_loss_function 227 | def __set__(self, value): 228 | self._options.apply_loss_function = value 229 | 230 | cdef class SolverOptions: 231 | cdef ceres.SolverOptions* _options 232 | 233 | def __cinit__(self): 234 | pass 235 | 236 | def __init__(self): 237 | self._options = new ceres.SolverOptions() 238 | 239 | property max_num_iterations: 240 | def __get__(self): 241 | return self._options.max_num_iterations 242 | 243 | def __set__(self, value): 244 | self._options.max_num_iterations = value 245 | 246 | property minimizer_progress_to_stdout: 247 | def __get__(self): 248 | return self._options.minimizer_progress_to_stdout 249 | 250 | def __set__(self, value): 251 | self._options.minimizer_progress_to_stdout = value 252 | 253 | property linear_solver_type: 254 | def __get__(self): 255 | return self._options.linear_solver_type 256 | 257 | def __set__(self, value): 258 | self._options.linear_solver_type = value 259 | 260 | property trust_region_strategy_type: 261 | def __get__(self): 262 | return self._options.trust_region_strategy_type 263 | 264 | def __set__(self, value): 265 | self._options.trust_region_strategy_type = value 266 | 267 | property dogleg_type: 268 | def __get__(self): 269 | return self._options.dogleg_type 270 | 271 | def __set__(self, value): 272 | self._options.dogleg_type = value 273 | 274 | property preconditioner_type: 275 | def __get__(self): 276 | return self._options.preconditioner_type 277 | 278 | def __set__(self, value): 279 | self._options.preconditioner_type = value 280 | 281 | property num_threads: 282 | def __get__(self): 283 | return self._options.num_threads 284 | 285 | def __set__(self, value): 286 | self._options.num_threads = value 287 | 288 | property num_linear_solver_threads: 289 | def __get__(self): 290 | return self._options.num_linear_solver_threads 291 | 292 | def __set__(self, value): 293 | self._options.num_linear_solver_threads = value 294 | 295 | property use_nonmonotonic_steps: 296 | def __get__(self): 297 | return self._options.use_nonmonotonic_steps 298 | 299 | def __set__(self, value): 300 | self._options.use_nonmonotonic_steps = value 301 | 302 | cdef class Problem: 303 | cdef ceres.Problem _problem 304 | 305 | def __cinit__(self): 306 | pass 307 | 308 | # loss_function=NULL yields squared loss 309 | cpdef add_residual_block(self, 310 | CostFunction cost_function, 311 | LossFunction loss_function, 312 | parameter_blocks=[]): 313 | 314 | cdef np.ndarray _tmp_array 315 | cdef vector[double*] _parameter_blocks 316 | cdef double f 317 | 318 | cdef ceres.ResidualBlockId _block_id 319 | 320 | for parameter_block in parameter_blocks: 321 | _tmp_array = np.ascontiguousarray(parameter_block, dtype=np.double) 322 | _parameter_blocks.push_back( _tmp_array.data) 323 | _block_id = self._problem.AddResidualBlock(cost_function._cost_function, 324 | loss_function._loss_function, 325 | _parameter_blocks) 326 | block_id = ResidualBlockId() 327 | block_id._block_id = _block_id 328 | return block_id 329 | 330 | cpdef evaluate(self, residual_blocks, apply_loss_function=True): 331 | 332 | cdef double cost 333 | 334 | options = EvaluateOptions() 335 | options.apply_loss_function = apply_loss_function 336 | options.residual_blocks = residual_blocks 337 | 338 | self._problem.Evaluate(options._options, &cost, NULL, NULL, NULL) 339 | return cost 340 | 341 | cpdef set_parameter_block_constant(self, block): 342 | cdef np.ndarray _tmp_array = np.ascontiguousarray(block, dtype=np.double) 343 | cdef double* _values = _tmp_array.data 344 | self._problem.SetParameterBlockConstant(_values) 345 | 346 | cpdef set_parameter_block_variable(self, block): 347 | cdef np.ndarray _tmp_array = np.ascontiguousarray(block, dtype=np.double) 348 | cdef double* _values = _tmp_array.data 349 | self._problem.SetParameterBlockVariable(_values) 350 | 351 | cdef class ResidualBlockId: 352 | cdef ceres.ResidualBlockId _block_id 353 | 354 | def solve(SolverOptions options, Problem problem, Summary summary): 355 | ceres.Solve(drf(options._options), &problem._problem, &summary._summary) 356 | -------------------------------------------------------------------------------- /cyres/src/ceres.pxd: -------------------------------------------------------------------------------- 1 | from libcpp cimport bool 2 | from libcpp.vector cimport vector 3 | from libcpp.string cimport string 4 | from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t 5 | 6 | cdef extern from "types.h" namespace "ceres": 7 | ctypedef short int16 8 | ctypedef int int32 9 | ctypedef enum Ownership: 10 | DO_NOT_TAKE_OWNERSHIP 11 | TAKE_OWNERSHIP 12 | 13 | ctypedef enum MinimizerType: 14 | LINE_SEARCH 15 | TRUST_REGION 16 | 17 | # TODO(keir): Considerably expand the explanations of each solver type. 18 | ctypedef enum LinearSolverType: 19 | # These solvers are for general rectangular systems formed from the 20 | # normal equations A'A x = A'b. They are direct solvers and do not 21 | # assume any special problem structure. 22 | 23 | # Solve the normal equations using a dense Cholesky solver; based 24 | # on Eigen. 25 | DENSE_NORMAL_CHOLESKY 26 | 27 | # Solve the normal equations using a dense QR solver; based on 28 | # Eigen. 29 | DENSE_QR 30 | 31 | # Solve the normal equations using a sparse cholesky solver; requires 32 | # SuiteSparse or CXSparse. 33 | SPARSE_NORMAL_CHOLESKY 34 | 35 | # Specialized solvers specific to problems with a generalized 36 | # bi-partitite structure. 37 | 38 | # Solves the reduced linear system using a dense Cholesky solver; 39 | # based on Eigen. 40 | DENSE_SCHUR 41 | 42 | # Solves the reduced linear system using a sparse Cholesky solver; 43 | # based on CHOLMOD. 44 | SPARSE_SCHUR 45 | 46 | # Solves the reduced linear system using Conjugate Gradients based 47 | # on a new Ceres implementation. Suitable for large scale 48 | # problems. 49 | ITERATIVE_SCHUR 50 | 51 | # Conjugate gradients on the normal equations. 52 | CGNR 53 | 54 | ctypedef enum PreconditionerType: 55 | # Trivial preconditioner - the identity matrix. 56 | IDENTITY 57 | 58 | # Block diagonal of the Gauss-Newton Hessian. 59 | JACOBI 60 | 61 | # Block diagonal of the Schur complement. This preconditioner may 62 | # only be used with the ITERATIVE_SCHUR solver. 63 | SCHUR_JACOBI 64 | 65 | # Visibility clustering based preconditioners. 66 | # 67 | # These preconditioners are well suited for Structure from Motion 68 | # problems particularly problems arising from community photo 69 | # collections. These preconditioners use the visibility structure 70 | # of the scene to determine the sparsity structure of the 71 | # preconditioner. Requires SuiteSparse/CHOLMOD. 72 | CLUSTER_JACOBI 73 | CLUSTER_TRIDIAGONAL 74 | 75 | ctypedef enum SparseLinearAlgebraLibraryType: 76 | # High performance sparse Cholesky factorization and approximate 77 | # minimum degree ordering. 78 | SUITE_SPARSE 79 | 80 | # A lightweight replacment for SuiteSparse. 81 | CX_SPARSE 82 | 83 | ctypedef enum LinearSolverTerminationType: 84 | # Termination criterion was met. For factorization based solvers 85 | # the tolerance is assumed to be zero. Any user provided values are 86 | # ignored. 87 | TOLERANCE 88 | 89 | # Solver ran for max_num_iterations and terminated before the 90 | # termination tolerance could be satified. 91 | MAX_ITERATIONS 92 | 93 | # Solver is stuck and further iterations will not result in any 94 | # measurable progress. 95 | STAGNATION 96 | 97 | # Solver failed. Solver was terminated due to numerical errors. The 98 | # exact cause of failure depends on the particular solver being 99 | # used. 100 | FAILURE 101 | 102 | # Logging options 103 | # The options get progressively noisier. 104 | ctypedef enum LoggingType: 105 | SILENT 106 | PER_MINIMIZER_ITERATION 107 | 108 | ctypedef enum LineSearchDirectionType: 109 | STEEPEST_DESCENT 110 | 111 | # A generalization of the Conjugate Gradient method to non-linear 112 | # functions. The generalization can be performed in a number of 113 | # different ways, resulting in a variety of search directions. The 114 | # precise choice of the non-linear conjugate gradient algorithm 115 | # used is determined by NonlinerConjuateGradientType. 116 | NONLINEAR_CONJUGATE_GRADIENT 117 | 118 | # A limited memory approximation to the inverse Hessian is 119 | # maintained and used to compute a quasi-Newton step. 120 | # 121 | # For more details see 122 | # 123 | # Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited 124 | # Storage". Mathematics of Computation 35 (151): 773–782. 125 | # 126 | # Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994). 127 | # "Representations of Quasi-Newton Matrices and their use in 128 | # Limited Memory Methods". Mathematical Programming 63 (4): 129 | # 129–156. 130 | LBFGS 131 | 132 | # Nonliner conjugate gradient methods are a generalization of the 133 | # method of Conjugate Gradients for linear systems. The 134 | # generalization can be carried out in a number of different ways 135 | # leading to number of different rules for computing the search 136 | # direction. Ceres provides a number of different variants. For more 137 | # details see Numerical Optimization by Nocedal & Wright. 138 | ctypedef enum NonlinearConjugateGradientType: 139 | FLETCHER_REEVES 140 | POLAK_RIBIRERE 141 | HESTENES_STIEFEL 142 | 143 | ctypedef enum LineSearchType: 144 | # Backtracking line search with polynomial interpolation or 145 | # bisection. 146 | ARMIJO 147 | 148 | # Ceres supports different strategies for computing the trust region 149 | # step. 150 | ctypedef enum TrustRegionStrategyType: 151 | # The default trust region strategy is to use the step computation 152 | # used in the Levenberg-Marquardt algorithm. For more details see 153 | # levenberg_marquardt_strategy.h 154 | LEVENBERG_MARQUARDT 155 | 156 | # Powell's dogleg algorithm interpolates between the Cauchy point 157 | # and the Gauss-Newton step. It is particularly useful if the 158 | # LEVENBERG_MARQUARDT algorithm is making a large number of 159 | # unsuccessful steps. For more details see dogleg_strategy.h. 160 | # 161 | # NOTES: 162 | # 163 | # 1. This strategy has not been experimented with or tested as 164 | # extensively as LEVENBERG_MARQUARDT and therefore it should be 165 | # considered EXPERIMENTAL for now. 166 | # 167 | # 2. For now this strategy should only be used with exact 168 | # factorization based linear solvers i.e. SPARSE_SCHUR 169 | # DENSE_SCHUR DENSE_QR and SPARSE_NORMAL_CHOLESKY. 170 | DOGLEG 171 | 172 | # Ceres supports two different dogleg strategies. 173 | # The "traditional" dogleg method by Powell and the 174 | # "subspace" method described in 175 | # R. H. Byrd, R. B. Schnabel, and G. A. Shultz, 176 | # "Approximate solution of the trust region problem by minimization 177 | # over two-dimensional subspaces", Mathematical Programming, 178 | # 40 (1988), pp. 247--263 179 | ctypedef enum DoglegType: 180 | # The traditional approach constructs a dogleg path 181 | # consisting of two line segments and finds the furthest 182 | # point on that path that is still inside the trust region. 183 | TRADITIONAL_DOGLEG 184 | 185 | # The subspace approach finds the exact minimum of the model 186 | # constrained to the subspace spanned by the dogleg path. 187 | SUBSPACE_DOGLEG 188 | 189 | ctypedef enum SolverTerminationType: 190 | # The minimizer did not run at all; usually due to errors in the user's 191 | # Problem or the solver options. 192 | DID_NOT_RUN 193 | 194 | # The solver ran for maximum number of iterations specified by the 195 | # user but none of the convergence criterion specified by the user 196 | # were met. 197 | NO_CONVERGENCE 198 | 199 | # Minimizer terminated because 200 | # (new_cost - old_cost) < function_tolerance * old_cost; 201 | FUNCTION_TOLERANCE 202 | 203 | # Minimizer terminated because 204 | # max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i| 205 | GRADIENT_TOLERANCE 206 | 207 | # Minimized terminated because 208 | # |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) 209 | PARAMETER_TOLERANCE 210 | 211 | # The minimizer terminated because it encountered a numerical error 212 | # that it could not recover from. 213 | NUMERICAL_FAILURE 214 | 215 | # Using an IterationCallback object user code can control the 216 | # minimizer. The following enums indicate that the user code was 217 | # responsible for termination. 218 | 219 | # User's IterationCallback returned SOLVER_ABORT. 220 | USER_ABORT 221 | 222 | # User's IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY 223 | USER_SUCCESS 224 | 225 | # Enums used by the IterationCallback instances to indicate to the 226 | # solver whether it should continue solving, the user detected an 227 | # error or the solution is good enough and the solver should 228 | # terminate. 229 | ctypedef enum CallbackReturnType: 230 | # Continue solving to next iteration. 231 | SOLVER_CONTINUE 232 | 233 | # Terminate solver and do not update the parameter blocks upon 234 | # return. Unless the user has set 235 | # Solver:Options:::update_state_every_iteration in which case the 236 | # state would have been updated every iteration 237 | # anyways. Solver::Summary::termination_type is set to USER_ABORT. 238 | SOLVER_ABORT 239 | 240 | # Terminate solver update state and 241 | # return. Solver::Summary::termination_type is set to USER_SUCCESS. 242 | SOLVER_TERMINATE_SUCCESSFULLY 243 | 244 | # The format in which linear least squares problems should be logged 245 | # when Solver::Options::lsqp_iterations_to_dump is non-empty. 246 | ctypedef enum DumpFormatType: 247 | # Print the linear least squares problem in a human readable format 248 | # to stderr. The Jacobian is printed as a dense matrix. The vectors 249 | # D x and f are printed as dense vectors. This should only be used 250 | # for small problems. 251 | CONSOLE 252 | 253 | # Write out the linear least squares problem to the directory 254 | # pointed to by Solver::Options::lsqp_dump_directory as a protocol 255 | # buffer. linear_least_squares_problems.h/cc contains routines for 256 | # loading these problems. For details on the on disk format used 257 | # see matrix.proto. The files are named lm_iteration_???.lsqp. 258 | PROTOBUF 259 | 260 | # Write out the linear least squares problem to the directory 261 | # pointed to by Solver::Options::lsqp_dump_directory as text files 262 | # which can be read into MATLAB/Octave. The Jacobian is dumped as a 263 | # text file containing (ijs) triplets the vectors D x and f are 264 | # dumped as text files containing a list of their values. 265 | # 266 | # A MATLAB/octave script called lm_iteration_???.m is also output 267 | # which can be used to parse and load the problem into memory. 268 | TEXTFILE 269 | 270 | # For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for 271 | # the number of residuals. If specified, then the number of residuas for that 272 | # cost function can vary at runtime. 273 | ctypedef enum DimensionType: 274 | DYNAMIC = -1 275 | 276 | ctypedef enum NumericDiffMethod: 277 | CENTRAL 278 | FORWARD 279 | 280 | 281 | cdef extern from "ordered_groups.h" namespace "ceres": 282 | cdef cppclass OrderedGroups[T]: 283 | pass 284 | ctypedef OrderedGroups[double*] ParameterBlockOrdering 285 | 286 | cdef extern from "iteration_callback.h" namespace "ceres": 287 | cdef struct IterationSummary: 288 | IterationSummary() 289 | 290 | # Current iteration number. 291 | int32 iteration 292 | 293 | # Step was numerically valid, i.e., all values are finite and the 294 | # step reduces the value of the linearized model. 295 | # 296 | # Note: step_is_valid is false when iteration = 0. 297 | bool step_is_valid 298 | 299 | # Step did not reduce the value of the objective function 300 | # sufficiently, but it was accepted because of the relaxed 301 | # acceptance criterion used by the non-monotonic trust region 302 | # algorithm. 303 | # 304 | # Note: step_is_nonmonotonic is false when iteration = 0 305 | bool step_is_nonmonotonic 306 | 307 | # Whether or not the minimizer accepted this step or not. If the 308 | # ordinary trust region algorithm is used, this means that the 309 | # relative reduction in the objective function value was greater 310 | # than Solver::Options::min_relative_decrease. However, if the 311 | # non-monotonic trust region algorithm is used 312 | # (Solver::Options:use_nonmonotonic_steps = true), then even if the 313 | # relative decrease is not sufficient, the algorithm may accept the 314 | # step and the step is declared successful. 315 | # 316 | # Note: step_is_successful is false when iteration = 0. 317 | bool step_is_successful 318 | 319 | # Value of the objective function. 320 | double cost 321 | 322 | # Change in the value of the objective function in this 323 | # iteration. This can be positive or negative. 324 | double cost_change 325 | 326 | # Infinity norm of the gradient vector. 327 | double gradient_max_norm 328 | 329 | # 2-norm of the size of the step computed by the optimization 330 | # algorithm. 331 | double step_norm 332 | 333 | # For trust region algorithms, the ratio of the actual change in 334 | # cost and the change in the cost of the linearized approximation. 335 | double relative_decrease 336 | 337 | # Size of the trust region at the end of the current iteration. For 338 | # the Levenberg-Marquardt algorithm, the regularization parameter 339 | # mu = 1.0 / trust_region_radius. 340 | double trust_region_radius 341 | 342 | # For the inexact step Levenberg-Marquardt algorithm, this is the 343 | # relative accuracy with which the Newton(LM) step is solved. This 344 | # number affects only the iterative solvers capable of solving 345 | # linear systems inexactly. Factorization-based exact solvers 346 | # ignore it. 347 | double eta 348 | 349 | # Step sized computed by the line search algorithm. 350 | double step_size 351 | 352 | # Number of function evaluations used by the line search algorithm. 353 | int line_search_function_evaluations 354 | 355 | # Number of iterations taken by the linear solver to solve for the 356 | # Newton step. 357 | int linear_solver_iterations 358 | 359 | # Time (in seconds) spent inside the minimizer loop in the current 360 | # iteration. 361 | double iteration_time_in_seconds 362 | 363 | # Time (in seconds) spent inside the trust region step solver. 364 | double step_solver_time_in_seconds 365 | 366 | # Time (in seconds) since the user called Solve(). 367 | double cumulative_time_in_seconds 368 | 369 | 370 | cdef cppclass IterationCallback: 371 | CallbackReturnType operator()(const IterationSummary& summary) 372 | 373 | cdef extern from "crs_matrix.h" namespace "ceres": 374 | ctypedef struct CRSMatrix: 375 | CRSMatrix() 376 | 377 | int num_rows 378 | int num_cols 379 | 380 | vector[int] cols 381 | vector[int] rows 382 | vector[double] values 383 | 384 | cdef extern from "local_parameterization.h" namespace "ceres": 385 | cdef cppclass LocalParameterization: 386 | bool Plus(const double* x, 387 | const double* delta, 388 | double* x_plus_delta) const 389 | 390 | bool ComputeJacobian(const double* x, double* jacobian) const 391 | int GlobalSize() const 392 | 393 | LocalSize() const 394 | 395 | cdef extern from "problem.h" namespace "ceres::internal": 396 | 397 | cdef cppclass Preprocessor: 398 | pass 399 | cdef cppclass ProblemImpl: 400 | pass 401 | cdef cppclass ParameterBlock: 402 | pass 403 | cdef cppclass ResidualBlock: 404 | pass 405 | 406 | cdef extern from "loss_function.h" namespace "ceres": 407 | 408 | cdef cppclass LossFunction: 409 | void Evaluate(double sq_norm, double out[3]) const 410 | 411 | cdef cppclass HuberLoss(LossFunction): 412 | HuberLoss(double _a) 413 | 414 | cdef cppclass SoftLOneLoss(LossFunction): 415 | SoftLOneLoss(double _a) 416 | 417 | cdef cppclass CauchyLoss(LossFunction): 418 | CauchyLoss(double _a) 419 | 420 | cdef cppclass ArctanLoss(LossFunction): 421 | ArctanLoss(double _a) 422 | 423 | cdef cppclass TolerantLoss(LossFunction): 424 | TolerantLoss(double _a, double _b) 425 | 426 | cdef cppclass ComposedLoss(LossFunction): 427 | ComposedLoss(const LossFunction* f, Ownership ownership_f, 428 | const LossFunction* g, Ownership ownership_g) 429 | 430 | cdef cppclass ScaledLoss(LossFunction): 431 | ScaledLoss(const LossFunction* rho, double a, Ownership ownership) 432 | 433 | 434 | cdef extern from "cost_function.h" namespace "ceres": 435 | cdef cppclass CostFunction: 436 | bool Evaluate(double** parameters, 437 | double* residuals, 438 | double** jacobians) const 439 | 440 | const vector[int16]& parameter_block_sizes() const 441 | 442 | int num_residuals() const 443 | 444 | cdef extern from "solver.h" namespace "ceres::Solver": 445 | cdef cppclass SolverOptions "ceres::Solver::Options": 446 | MinimizerType minimizer_type 447 | 448 | LineSearchDirectionType line_search_direction_type 449 | LineSearchType line_search_type 450 | NonlinearConjugateGradientType nonlinear_conjugate_gradient_type 451 | 452 | # The LBFGS hessian approximation is a low rank approximation to 453 | # the inverse of the Hessian matrix. The rank of the 454 | # approximation determines (linearly) the space and time 455 | # complexity of using the approximation. Higher the rank, the 456 | # better is the quality of the approximation. The increase in 457 | # quality is however is bounded for a number of reasons. 458 | # 459 | # 1. The method only uses secant information and not actual 460 | # derivatives. 461 | # 462 | # 2. The Hessian approximation is constrained to be positive 463 | # definite. 464 | # 465 | # So increasing this rank to a large number will cost time and 466 | # space complexity without the corresponding increase in solution 467 | # quality. There are no hard and fast rules for choosing the 468 | # maximum rank. The best choice usually requires some problem 469 | # specific experimentation. 470 | # 471 | # For more theoretical and implementation details of the LBFGS 472 | # method, please see: 473 | # 474 | # Nocedal, J. (1980). "Updating Quasi-Newton Matrices with 475 | # Limited Storage". Mathematics of Computation 35 (151): 773–782. 476 | int max_lbfgs_rank 477 | 478 | TrustRegionStrategyType trust_region_strategy_type 479 | 480 | # Type of dogleg strategy to use. 481 | DoglegType dogleg_type 482 | 483 | # The classical trust region methods are descent methods, in that 484 | # they only accept a point if it strictly reduces the value of 485 | # the objective function. 486 | # 487 | # Relaxing this requirement allows the algorithm to be more 488 | # efficient in the long term at the cost of some local increase 489 | # in the value of the objective function. 490 | # 491 | # This is because allowing for non-decreasing objective function 492 | # values in a princpled manner allows the algorithm to "jump over 493 | # boulders" as the method is not restricted to move into narrow 494 | # valleys while preserving its convergence properties. 495 | # 496 | # Setting use_nonmonotonic_steps to true enables the 497 | # non-monotonic trust region algorithm as described by Conn, 498 | # Gould & Toint in "Trust Region Methods", Section 10.1. 499 | # 500 | # The parameter max_consecutive_nonmonotonic_steps controls the 501 | # window size used by the step selection algorithm to accept 502 | # non-monotonic steps. 503 | # 504 | # Even though the value of the objective function may be larger 505 | # than the minimum value encountered over the course of the 506 | # optimization, the final parameters returned to the user are the 507 | # ones corresponding to the minimum cost over all iterations. 508 | bool use_nonmonotonic_steps 509 | int max_consecutive_nonmonotonic_steps 510 | 511 | # Maximum number of iterations for the minimizer to run for. 512 | int max_num_iterations 513 | 514 | # Maximum time for which the minimizer should run for. 515 | double max_solver_time_in_seconds 516 | 517 | # Number of threads used by Ceres for evaluating the cost and 518 | # jacobians. 519 | int num_threads 520 | 521 | # Trust region minimizer settings. 522 | double initial_trust_region_radius 523 | double max_trust_region_radius 524 | 525 | # Minimizer terminates when the trust region radius becomes 526 | # smaller than this value. 527 | double min_trust_region_radius 528 | 529 | # Lower bound for the relative decrease before a step is 530 | # accepted. 531 | double min_relative_decrease 532 | 533 | # For the Levenberg-Marquadt algorithm, the scaled diagonal of 534 | # the normal equations J'J is used to control the size of the 535 | # trust region. Extremely small and large values along the 536 | # diagonal can make this regularization scheme 537 | # fail. lm_max_diagonal and lm_min_diagonal, clamp the values of 538 | # diag(J'J) from above and below. In the normal course of 539 | # operation, the user should not have to modify these parameters. 540 | double lm_min_diagonal 541 | double lm_max_diagonal 542 | 543 | # Sometimes due to numerical conditioning problems or linear 544 | # solver flakiness, the trust region strategy may return a 545 | # numerically invalid step that can be fixed by reducing the 546 | # trust region size. So the TrustRegionMinimizer allows for a few 547 | # successive invalid steps before it declares NUMERICAL_FAILURE. 548 | int max_num_consecutive_invalid_steps 549 | 550 | # Minimizer terminates when 551 | # 552 | # (new_cost - old_cost) < function_tolerance * old_cost 553 | # 554 | double function_tolerance 555 | 556 | # Minimizer terminates when 557 | # 558 | # max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i| 559 | # 560 | # This value should typically be 1e-4 * function_tolerance. 561 | double gradient_tolerance 562 | 563 | # Minimizer terminates when 564 | # 565 | # |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) 566 | # 567 | double parameter_tolerance 568 | 569 | # Linear least squares solver options ------------------------------------- 570 | 571 | LinearSolverType linear_solver_type 572 | 573 | # Type of preconditioner to use with the iterative linear solvers. 574 | PreconditionerType preconditioner_type 575 | 576 | # Ceres supports using multiple sparse linear algebra libraries 577 | # for sparse matrix ordering and factorizations. Currently, 578 | # SUITE_SPARSE and CX_SPARSE are the valid choices, depending on 579 | # whether they are linked into Ceres at build time. 580 | SparseLinearAlgebraLibraryType sparse_linear_algebra_library 581 | 582 | # Number of threads used by Ceres to solve the Newton 583 | # step. Currently only the SPARSE_SCHUR solver is capable of 584 | # using this setting. 585 | int num_linear_solver_threads 586 | 587 | # The order in which variables are eliminated in a linear solver 588 | # can have a significant of impact on the efficiency and accuracy 589 | # of the method. e.g., when doing sparse Cholesky factorization, 590 | # there are matrices for which a good ordering will give a 591 | # Cholesky factor with O(n) storage, where as a bad ordering will 592 | # result in an completely dense factor. 593 | # 594 | # Ceres allows the user to provide varying amounts of hints to 595 | # the solver about the variable elimination ordering to use. This 596 | # can range from no hints, where the solver is free to decide the 597 | # best possible ordering based on the user's choices like the 598 | # linear solver being used, to an exact order in which the 599 | # variables should be eliminated, and a variety of possibilities 600 | # in between. 601 | # 602 | # Instances of the ParameterBlockOrdering class are used to 603 | # communicate this information to Ceres. 604 | # 605 | # Formally an ordering is an ordered partitioning of the 606 | # parameter blocks, i.e, each parameter block belongs to exactly 607 | # one group, and each group has a unique non-negative integer 608 | # associated with it, that determines its order in the set of 609 | # groups. 610 | # 611 | # Given such an ordering, Ceres ensures that the parameter blocks in 612 | # the lowest numbered group are eliminated first, and then the 613 | # parmeter blocks in the next lowest numbered group and so on. Within 614 | # each group, Ceres is free to order the parameter blocks as it 615 | # chooses. 616 | # 617 | # If NULL, then all parameter blocks are assumed to be in the 618 | # same group and the solver is free to decide the best 619 | # ordering. 620 | # 621 | # e.g. Consider the linear system 622 | # 623 | # x + y = 3 624 | # 2x + 3y = 7 625 | # 626 | # There are two ways in which it can be solved. First eliminating x 627 | # from the two equations, solving for y and then back substituting 628 | # for x, or first eliminating y, solving for x and back substituting 629 | # for y. The user can construct three orderings here. 630 | # 631 | # {0: x}, {1: y} - eliminate x first. 632 | # {0: y}, {1: x} - eliminate y first. 633 | # {0: x, y} - Solver gets to decide the elimination order. 634 | # 635 | # Thus, to have Ceres determine the ordering automatically using 636 | # heuristics, put all the variables in group 0 and to control the 637 | # ordering for every variable, create groups 0..N-1, one per 638 | # variable, in the desired order. 639 | # 640 | # Bundle Adjustment 641 | # ----------------- 642 | # 643 | # A particular case of interest is bundle adjustment, where the user 644 | # has two options. The default is to not specify an ordering at all, 645 | # the solver will see that the user wants to use a Schur type solver 646 | # and figure out the right elimination ordering. 647 | # 648 | # But if the user already knows what parameter blocks are points and 649 | # what are cameras, they can save preprocessing time by partitioning 650 | # the parameter blocks into two groups, one for the points and one 651 | # for the cameras, where the group containing the points has an id 652 | # smaller than the group containing cameras. 653 | # 654 | # Once assigned, Solver::Options owns this pointer and will 655 | # deallocate the memory when destroyed. 656 | ParameterBlockOrdering* linear_solver_ordering 657 | 658 | # Sparse Cholesky factorization algorithms use a fill-reducing 659 | # ordering to permute the columns of the Jacobian matrix. There 660 | # are two ways of doing this. 661 | 662 | # 1. Compute the Jacobian matrix in some order and then have the 663 | # factorization algorithm permute the columns of the Jacobian. 664 | 665 | # 2. Compute the Jacobian with its columns already permuted. 666 | 667 | # The first option incurs a significant memory penalty. The 668 | # factorization algorithm has to make a copy of the permuted 669 | # Jacobian matrix, thus Ceres pre-permutes the columns of the 670 | # Jacobian matrix and generally speaking, there is no performance 671 | # penalty for doing so. 672 | 673 | # In some rare cases, it is worth using a more complicated 674 | # reordering algorithm which has slightly better runtime 675 | # performance at the expense of an extra copy of the Jacobian 676 | # matrix. Setting use_postordering to true enables this tradeoff. 677 | bool use_postordering 678 | 679 | # Some non-linear least squares problems have additional 680 | # structure in the way the parameter blocks interact that it is 681 | # beneficial to modify the way the trust region step is computed. 682 | # 683 | # e.g., consider the following regression problem 684 | # 685 | # y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1) 686 | # 687 | # Given a set of pairs{(x_i, y_i)}, the user wishes to estimate 688 | # a_1, a_2, b_1, b_2, and c_1. 689 | # 690 | # Notice here that the expression on the left is linear in a_1 691 | # and a_2, and given any value for b_1, b_2 and c_1, it is 692 | # possible to use linear regression to estimate the optimal 693 | # values of a_1 and a_2. Indeed, its possible to analytically 694 | # eliminate the variables a_1 and a_2 from the problem all 695 | # together. Problems like these are known as separable least 696 | # squares problem and the most famous algorithm for solving them 697 | # is the Variable Projection algorithm invented by Golub & 698 | # Pereyra. 699 | # 700 | # Similar structure can be found in the matrix factorization with 701 | # missing data problem. There the corresponding algorithm is 702 | # known as Wiberg's algorithm. 703 | # 704 | # Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares 705 | # Problems, SIAM Reviews, 22(3), 1980) present an analyis of 706 | # various algorithms for solving separable non-linear least 707 | # squares problems and refer to "Variable Projection" as 708 | # Algorithm I in their paper. 709 | # 710 | # Implementing Variable Projection is tedious and expensive, and 711 | # they present a simpler algorithm, which they refer to as 712 | # Algorithm II, where once the Newton/Trust Region step has been 713 | # computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and 714 | # additional optimization step is performed to estimate a_1 and 715 | # a_2 exactly. 716 | # 717 | # This idea can be generalized to cases where the residual is not 718 | # linear in a_1 and a_2, i.e., Solve for the trust region step 719 | # for the full problem, and then use it as the starting point to 720 | # further optimize just a_1 and a_2. For the linear case, this 721 | # amounts to doing a single linear least squares solve. For 722 | # non-linear problems, any method for solving the a_1 and a_2 723 | # optimization problems will do. The only constraint on a_1 and 724 | # a_2 is that they do not co-occur in any residual block. 725 | # 726 | # This idea can be further generalized, by not just optimizing 727 | # (a_1, a_2), but decomposing the graph corresponding to the 728 | # Hessian matrix's sparsity structure in a collection of 729 | # non-overlapping independent sets and optimizing each of them. 730 | # 731 | # Setting "use_inner_iterations" to true enables the use of this 732 | # non-linear generalization of Ruhe & Wedin's Algorithm II. This 733 | # version of Ceres has a higher iteration complexity, but also 734 | # displays better convergence behaviour per iteration. Setting 735 | # Solver::Options::num_threads to the maximum number possible is 736 | # highly recommended. 737 | bool use_inner_iterations 738 | 739 | # If inner_iterations is true, then the user has two choices. 740 | # 741 | # 1. Let the solver heuristically decide which parameter blocks 742 | # to optimize in each inner iteration. To do this leave 743 | # Solver::Options::inner_iteration_ordering untouched. 744 | # 745 | # 2. Specify a collection of of ordered independent sets. Where 746 | # the lower numbered groups are optimized before the higher 747 | # number groups. Each group must be an independent set. 748 | ParameterBlockOrdering* inner_iteration_ordering 749 | 750 | # Minimum number of iterations for which the linear solver should 751 | # run, even if the convergence criterion is satisfied. 752 | int linear_solver_min_num_iterations 753 | 754 | # Maximum number of iterations for which the linear solver should 755 | # run. If the solver does not converge in less than 756 | # linear_solver_max_num_iterations, then it returns 757 | # MAX_ITERATIONS, as its termination type. 758 | int linear_solver_max_num_iterations 759 | 760 | # Forcing sequence parameter. The truncated Newton solver uses 761 | # this number to control the relative accuracy with which the 762 | # Newton step is computed. 763 | # 764 | # This constant is passed to ConjugateGradientsSolver which uses 765 | # it to terminate the iterations when 766 | # 767 | # (Q_i - Q_{i-1})/Q_i < eta/i 768 | double eta 769 | 770 | # Normalize the jacobian using Jacobi scaling before calling 771 | # the linear least squares solver. 772 | bool jacobi_scaling 773 | 774 | # Logging options --------------------------------------------------------- 775 | 776 | LoggingType logging_type 777 | 778 | # By default the Minimizer progress is logged to VLOG(1), which 779 | # is sent to STDERR depending on the vlog level. If this flag is 780 | # set to true, and logging_type is not SILENT, the logging output 781 | # is sent to STDOUT. 782 | bool minimizer_progress_to_stdout 783 | 784 | # List of iterations at which the optimizer should dump the 785 | # linear least squares problem to disk. Useful for testing and 786 | # benchmarking. If empty (default), no problems are dumped. 787 | # 788 | # This is ignored if protocol buffers are disabled. 789 | vector[int] lsqp_iterations_to_dump 790 | string lsqp_dump_directory 791 | DumpFormatType lsqp_dump_format_type 792 | 793 | # Finite differences options ---------------------------------------------- 794 | 795 | # Check all jacobians computed by each residual block with finite 796 | # differences. This is expensive since it involves computing the 797 | # derivative by normal means (e.g. user specified, autodiff, 798 | # etc), then also computing it using finite differences. The 799 | # results are compared, and if they differ substantially, details 800 | # are printed to the log. 801 | bool check_gradients 802 | 803 | # Relative precision to check for in the gradient checker. If the 804 | # relative difference between an element in a jacobian exceeds 805 | # this number, then the jacobian for that cost term is dumped. 806 | double gradient_check_relative_precision 807 | 808 | # Relative shift used for taking numeric derivatives. For finite 809 | # differencing, each dimension is evaluated at slightly shifted 810 | # values for the case of central difference, this is what gets 811 | # evaluated: 812 | # 813 | # delta = numeric_derivative_relative_step_size 814 | # f_initial = f(x) 815 | # f_forward = f((1 + delta) * x) 816 | # f_backward = f((1 - delta) * x) 817 | # 818 | # The finite differencing is done along each dimension. The 819 | # reason to use a relative (rather than absolute) step size is 820 | # that this way, numeric differentation works for functions where 821 | # the arguments are typically large (e.g. 1e9) and when the 822 | # values are small (e.g. 1e-5). It is possible to construct 823 | # "torture cases" which break this finite difference heuristic, 824 | # but they do not come up often in practice. 825 | # 826 | # TODO(keir): Pick a smarter number than the default above! In 827 | # theory a good choice is sqrt(eps) * x, which for doubles means 828 | # about 1e-8 * x. However, I have found this number too 829 | # optimistic. This number should be exposed for users to change. 830 | double numeric_derivative_relative_step_size 831 | 832 | # If true, the user's parameter blocks are updated at the end of 833 | # every Minimizer iteration, otherwise they are updated when the 834 | # Minimizer terminates. This is useful if, for example, the user 835 | # wishes to visualize the state of the optimization every 836 | # iteration. 837 | bool update_state_every_iteration 838 | 839 | # Callbacks that are executed at the end of each iteration of the 840 | # Minimizer. An iteration may terminate midway, either due to 841 | # numerical failures or because one of the convergence tests has 842 | # been satisfied. In this case none of the callbacks are 843 | # executed. 844 | 845 | # Callbacks are executed in the order that they are specified in 846 | # this vector. By default, parameter blocks are updated only at 847 | # the end of the optimization, i.e when the Minimizer 848 | # terminates. This behaviour is controlled by 849 | # update_state_every_variable. If the user wishes to have access 850 | # to the update parameter blocks when his/her callbacks are 851 | # executed, then set update_state_every_iteration to true. 852 | # 853 | # The solver does NOT take ownership of these pointers. 854 | vector[IterationCallback*] callbacks 855 | 856 | # If non-empty, a summary of the execution of the solver is 857 | # recorded to this file. 858 | string solver_log 859 | 860 | cdef cppclass Summary: 861 | Summary() 862 | 863 | # A brief one line description of the state of the solver after 864 | # termination. 865 | string BriefReport() const 866 | 867 | # A full multiline description of the state of the solver after 868 | # termination. 869 | string FullReport() const 870 | 871 | # Minimizer summary ------------------------------------------------- 872 | MinimizerType minimizer_type 873 | 874 | SolverTerminationType termination_type 875 | 876 | # If the solver did not run, or there was a failure, a 877 | # description of the error. 878 | string error 879 | 880 | # Cost of the problem before and after the optimization. See 881 | # problem.h for definition of the cost of a problem. 882 | double initial_cost 883 | double final_cost 884 | 885 | # The part of the total cost that comes from residual blocks that 886 | # were held fixed by the preprocessor because all the parameter 887 | # blocks that they depend on were fixed. 888 | double fixed_cost 889 | 890 | vector[IterationSummary] iterations 891 | 892 | int num_successful_steps 893 | int num_unsuccessful_steps 894 | 895 | # When the user calls Solve, before the actual optimization 896 | # occurs, Ceres performs a number of preprocessing steps. These 897 | # include error checks, memory allocations, and reorderings. This 898 | # time is accounted for as preprocessing time. 899 | double preprocessor_time_in_seconds 900 | 901 | # Time spent in the TrustRegionMinimizer. 902 | double minimizer_time_in_seconds 903 | 904 | # After the Minimizer is finished, some time is spent in 905 | # re-evaluating residuals etc. This time is accounted for in the 906 | # postprocessor time. 907 | double postprocessor_time_in_seconds 908 | 909 | # Some total of all time spent inside Ceres when Solve is called. 910 | double total_time_in_seconds 911 | 912 | double linear_solver_time_in_seconds 913 | double residual_evaluation_time_in_seconds 914 | double jacobian_evaluation_time_in_seconds 915 | 916 | # Preprocessor summary. 917 | int num_parameter_blocks 918 | int num_parameters 919 | int num_effective_parameters 920 | int num_residual_blocks 921 | int num_residuals 922 | 923 | int num_parameter_blocks_reduced 924 | int num_parameters_reduced 925 | int num_effective_parameters_reduced 926 | int num_residual_blocks_reduced 927 | int num_residuals_reduced 928 | 929 | int num_eliminate_blocks_given 930 | int num_eliminate_blocks_used 931 | 932 | int num_threads_given 933 | int num_threads_used 934 | 935 | int num_linear_solver_threads_given 936 | int num_linear_solver_threads_used 937 | 938 | LinearSolverType linear_solver_type_given 939 | LinearSolverType linear_solver_type_used 940 | 941 | vector[int] linear_solver_ordering_given 942 | vector[int] linear_solver_ordering_used 943 | 944 | PreconditionerType preconditioner_type 945 | 946 | TrustRegionStrategyType trust_region_strategy_type 947 | DoglegType dogleg_type 948 | bool inner_iterations 949 | 950 | SparseLinearAlgebraLibraryType sparse_linear_algebra_library 951 | 952 | LineSearchDirectionType line_search_direction_type 953 | LineSearchType line_search_type 954 | int max_lbfgs_rank 955 | 956 | vector[int] inner_iteration_ordering_given 957 | vector[int] inner_iteration_ordering_used 958 | 959 | cdef extern from "solver.h" namespace "ceres": 960 | 961 | void Solve(const SolverOptions& options, 962 | Problem* problem, 963 | Summary* summary) 964 | 965 | cdef extern from "problem.h" namespace "ceres::Problem": 966 | 967 | ctypedef ResidualBlock* ResidualBlockId 968 | 969 | ctypedef struct ProblemOptions "ceres::Problem::Options": 970 | ProblemOptions() 971 | Ownership cost_function_ownership 972 | Ownership loss_function_ownership 973 | Ownership local_parameterization_ownership 974 | 975 | bool enable_fast_parameter_block_removal 976 | bool disable_all_safety_checks 977 | 978 | ctypedef struct EvaluateOptions: 979 | EvaluateOptions() 980 | vector[double*] parameter_blocks 981 | vector[ResidualBlockId] residual_blocks 982 | bool apply_loss_function 983 | int num_threads 984 | 985 | cdef extern from "problem.h" namespace "ceres": 986 | 987 | ctypedef ResidualBlock* ResidualBlockId 988 | 989 | cdef cppclass Problem: 990 | 991 | Problem() 992 | Problem(const ProblemOptions& options) 993 | 994 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 995 | LossFunction* loss_function, 996 | const vector[double*]& parameter_blocks) 997 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 998 | LossFunction* loss_function, 999 | double* x0) 1000 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1001 | LossFunction* loss_function, 1002 | double* x0, double* x1) 1003 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1004 | LossFunction* loss_function, 1005 | double* x0, double* x1, double* x2) 1006 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1007 | LossFunction* loss_function, 1008 | double* x0, double* x1, double* x2, 1009 | double* x3) 1010 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1011 | LossFunction* loss_function, 1012 | double* x0, double* x1, double* x2, 1013 | double* x3, double* x4) 1014 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1015 | LossFunction* loss_function, 1016 | double* x0, double* x1, double* x2, 1017 | double* x3, double* x4, double* x5) 1018 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1019 | LossFunction* loss_function, 1020 | double* x0, double* x1, double* x2, 1021 | double* x3, double* x4, double* x5, 1022 | double* x6) 1023 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1024 | LossFunction* loss_function, 1025 | double* x0, double* x1, double* x2, 1026 | double* x3, double* x4, double* x5, 1027 | double* x6, double* x7) 1028 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1029 | LossFunction* loss_function, 1030 | double* x0, double* x1, double* x2, 1031 | double* x3, double* x4, double* x5, 1032 | double* x6, double* x7, double* x8) 1033 | ResidualBlockId AddResidualBlock(CostFunction* cost_function, 1034 | LossFunction* loss_function, 1035 | double* x0, double* x1, double* x2, 1036 | double* x3, double* x4, double* x5, 1037 | double* x6, double* x7, double* x8, 1038 | double* x9) 1039 | 1040 | void AddParameterBlock(double* values, int size) 1041 | 1042 | void AddParameterBlock(double* values, 1043 | int size, 1044 | LocalParameterization* local_parameterization) 1045 | 1046 | void RemoveParameterBlock(double* values) 1047 | 1048 | void RemoveResidualBlock(ResidualBlockId residual_block) 1049 | 1050 | void SetParameterBlockConstant(double* values) 1051 | 1052 | void SetParameterBlockVariable(double* values) 1053 | 1054 | void SetParameterization(double* values, 1055 | LocalParameterization* local_parameterization) 1056 | 1057 | int NumParameterBlocks() const 1058 | 1059 | int NumParameters() const 1060 | 1061 | int NumResidualBlocks() const 1062 | 1063 | int NumResiduals() const 1064 | 1065 | int ParameterBlockSize(double* values) const 1066 | 1067 | int ParameterBlockLocalSize(double* values) const 1068 | 1069 | void GetParameterBlocks(vector[double*]* parameter_blocks) const 1070 | 1071 | bool Evaluate(const EvaluateOptions& options, 1072 | double* cost, 1073 | vector[double]* residuals, 1074 | vector[double]* gradient, 1075 | CRSMatrix* jacobian) 1076 | --------------------------------------------------------------------------------