├── src ├── CMakeLists.txt ├── BAProblem.cpp ├── BAProblem.hpp ├── SnavelyReprojectionError.hpp └── main.cpp ├── scripts ├── CMakeLists.txt ├── extract_ba.py ├── intrinsic.py ├── extrinsic.py ├── check_3d.py └── build_ba.py ├── CMakeLists.txt ├── cmake-external ├── glogConfig.cmake └── gflagsConfig.cmake ├── README.md └── COPYING /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | FILE(GLOB SOURCES "*.cpp") 2 | 3 | ADD_EXECUTABLE(${TARGET_EXEC} ${SOURCES}) 4 | SET_TARGET_PROPERTIES(${TARGET_EXEC} PROPERTIES COMPILE_FLAGS ${BUILD_FLAGS}) 5 | SET_TARGET_PROPERTIES(${TARGET_EXEC} PROPERTIES LINK_FLAGS ${BUILD_FLAGS}) 6 | TARGET_LINK_LIBRARIES(${TARGET_EXEC} ${CERES_LIBRARIES} 7 | ${GLOG_LIBRARIES} ${GFLAGS_LIBRARIES}) 8 | MESSAGE("build_flags : ${BUILD_FLAGS}") 9 | INSTALL(TARGETS ${TARGET_EXEC} DESTINATION bin) 10 | -------------------------------------------------------------------------------- /scripts/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | FILE (GLOB SCRIPTS "*.py") 2 | 3 | MACRO(ADD_SCRIPTS TARGET FILES) 4 | ADD_CUSTOM_TARGET(${TARGET} ALL) 5 | SET(_FILES ${FILES} ${ARGN}) 6 | FOREACH(FILENAME ${_FILES}) 7 | SET(SRC "${FILENAME}") 8 | GET_FILENAME_COMPONENT(DSTFILE ${FILENAME} NAME_WE) 9 | SET(DST "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${DSTFILE}") 10 | ADD_CUSTOM_COMMAND( 11 | TARGET ${TARGET} 12 | COMMAND ${CMAKE_COMMAND} -E copy ${SRC} ${DST} 13 | COMMENT "Copying ${SRC}") 14 | SET(MAKE_CLEAN_FILES ${MAKE_CLEAN_FILES} ${DST}) 15 | SET(INSTALL_BINS ${INSTALL_BINS} ${DST}) 16 | ENDFOREACH(FILENAME) 17 | SET_PROPERTY(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${MAKE_CLEAN_FILES}") 18 | INSTALL(PROGRAMS ${INSTALL_BINS} DESTINATION bin) 19 | ENDMACRO(ADD_SCRIPTS) 20 | 21 | ADD_SCRIPTS(${TARGET_SCRIPTS} ${SCRIPTS}) 22 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | PROJECT(bundle_adjustment) 2 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8) 3 | SET(CMAKE_EXPORT_COMPILE_COMMANDS ON) 4 | 5 | SET(TARGET_EXEC "bundle_adjuster") 6 | SET(TARGET_SCRIPTS "camera_calibration") 7 | 8 | SET(BA_PATH "${CMAKE_CURRENT_SOURCE_DIR}/src") 9 | SET(SCRIPTS_PATH "${CMAKE_CURRENT_SOURCE_DIR}/scripts") 10 | 11 | SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 12 | SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 13 | SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 14 | 15 | FIND_PACKAGE(Ceres REQUIRED) 16 | MESSAGE(STATUS "Found ceres-solver library version ${CERES_VERSION}") 17 | INCLUDE_DIRECTORIES(${CERES_INCLUDE_DIRS}) 18 | 19 | SET(glog_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake-external/") 20 | FIND_PACKAGE(glog REQUIRED PATHS cmake_external) 21 | INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIRS}) 22 | 23 | SET(gflags_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake-external/") 24 | FIND_PACKAGE(gflags REQUIRED PATHS cmake_external) 25 | INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIRS}) 26 | 27 | SET(SETTINGS "-std=c++14 -fopenmp -fexceptions -fpic") 28 | SET(OPTIMIZATIONS "-D_FORTIFY_SOURCE=2 -march=native -mtune=native -ffast-math") 29 | SET(DIAGNOSTICS "-Wall -Wextra -pedantic -Werror") 30 | 31 | 32 | SET(BUILD_FLAGS "${SETTINGS} ${OPTIMIZATIONS} ${DIAGNOSTICS}") 33 | 34 | if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 35 | #dirty hacky workaround 36 | ADD_DEFINITIONS( -D__extern_always_inline=inline ) 37 | if(UNIX) 38 | if(CMAKE_GENERATOR STREQUAL "Ninja") 39 | LIST(APPEND CMAKE_C_FLAGS "-fcolor-diagnostics") 40 | LIST(APPEND CMAKE_CXX_FLAGS "-fcolor-diagnostics") 41 | endif(CMAKE_GENERATOR STREQUAL "Ninja") 42 | endif(UNIX) 43 | endif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") 44 | 45 | ADD_SUBDIRECTORY(${BA_PATH}) 46 | ADD_SUBDIRECTORY(${SCRIPTS_PATH}) 47 | -------------------------------------------------------------------------------- /cmake-external/glogConfig.cmake: -------------------------------------------------------------------------------- 1 | # - Find GLog library 2 | # This module finds GLog library and its header files. 3 | # This code sets the following variables: 4 | # 5 | # GLOG_FOUND - has GLog library been found? 6 | # GLOG_LIBRARIES - names of the GLog libraries 7 | # GLOG_INCLUDE_PATH - path to the GLog includes 8 | # 9 | # and provides the following defines: 10 | # 11 | # __GLOG_FOUND__ 12 | # 13 | 14 | if (NOT GLOG_FOUND) 15 | 16 | set(GLOG_FOUND TRUE) 17 | 18 | set(GLOG_LIBRARYNAME "glog") 19 | 20 | # find GLog library 21 | find_library(GLOG_LIBRARY NAMES ${GLOG_LIBRARYNAME}) 22 | 23 | if (NOT GLOG_LIBRARY) 24 | set(GLOG_FOUND FALSE) 25 | set(GLOG_ERROR_REASON "${GLOG_ERROR_REASON} Libraries not found.") 26 | else() 27 | get_filename_component(GLOG_LIBRARY_PATH ${GLOG_LIBRARY} PATH CACHE) 28 | set(GLOG_LIBRARIES ${GLOG_LIBRARYNAME}) 29 | message(STATUS "GLog libraries: ${GLOG_LIBRARIES}") 30 | message(STATUS "GLog library path: ${GLOG_LIBRARY_PATH}") 31 | endif() 32 | 33 | # find GLog headers 34 | find_path(GLOG_INCLUDE_PATH NAMES gflags/gflags.h) 35 | 36 | if (NOT GLOG_INCLUDE_PATH) 37 | set(GLOG_FOUND FALSE) 38 | set(GLOG_ERROR_REASON "${GLOG_ERROR_REASON} Includes not found.") 39 | else() 40 | message(STATUS "GLog includes: ${GLOG_INCLUDE_PATH}") 41 | endif() 42 | 43 | if (NOT GLOG_FOUND) 44 | if(glog_FIND_REQUIRED) 45 | message(SEND_ERROR "Unable to find required GLog.\n${GLOG_ERROR_REASON}") 46 | else() 47 | message(STATUS "Unable to find GLog: ${GLOG_ERROR_REASON}") 48 | endif() 49 | else() 50 | ADD_DEFINITIONS(-D__GLOG_FOUND__) 51 | include_directories(${GLOG_INCLUDE_PATH}) 52 | link_directories (${GLOG_LIBRARY_PATH}) 53 | endif() 54 | 55 | endif() 56 | -------------------------------------------------------------------------------- /cmake-external/gflagsConfig.cmake: -------------------------------------------------------------------------------- 1 | # - Find GFlags library 2 | # This module finds GFlags library and its header files. 3 | # This code sets the following variables: 4 | # 5 | # GFLAGS_FOUND - has GFlags library been found? 6 | # GFLAGS_LIBRARIES - names of the GFlags libraries 7 | # GFLAGS_INCLUDE_PATH - path to the GFlags includes 8 | # 9 | # and provides the following defines: 10 | # 11 | # __GFLAGS_FOUND__ 12 | # 13 | 14 | if (NOT GFLAGS_FOUND) 15 | 16 | set(GFLAGS_FOUND TRUE) 17 | 18 | set(GFLAGS_LIBRARYNAME "gflags") 19 | 20 | # find GFlags library 21 | find_library(GFLAGS_LIBRARY NAMES ${GFLAGS_LIBRARYNAME}) 22 | 23 | if (NOT GFLAGS_LIBRARY) 24 | set(GFLAGS_FOUND FALSE) 25 | set(GFLAGS_ERROR_REASON "${GFLAGS_ERROR_REASON} Libraries not found.") 26 | else() 27 | get_filename_component(GFLAGS_LIBRARY_PATH ${GFLAGS_LIBRARY} PATH CACHE) 28 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARYNAME}) 29 | message(STATUS "GFlags libraries: ${GFLAGS_LIBRARIES}") 30 | message(STATUS "GFlags library path: ${GFLAGS_LIBRARY_PATH}") 31 | endif() 32 | 33 | # find GFlags headers 34 | find_path(GFLAGS_INCLUDE_PATH NAMES gflags/gflags.h) 35 | 36 | if (NOT GFLAGS_INCLUDE_PATH) 37 | set(GFLAGS_FOUND FALSE) 38 | set(GFLAGS_ERROR_REASON "${GFLAGS_ERROR_REASON} Includes not found.") 39 | else() 40 | message(STATUS "GFlags includes: ${GFLAGS_INCLUDE_PATH}") 41 | endif() 42 | 43 | if (NOT GFLAGS_FOUND) 44 | if(gflags_FIND_REQUIRED) 45 | message(SEND_ERROR "Unable to find required GFlags.\n${GFLAGS_ERROR_REASON}") 46 | else() 47 | message(STATUS "Unable to find GFlags: ${GFLAGS_ERROR_REASON}") 48 | endif() 49 | else() 50 | ADD_DEFINITIONS(-D__GFLAGS_FOUND__) 51 | include_directories(${GFLAGS_INCLUDE_PATH}) 52 | link_directories (${GFLAGS_LIBRARY_PATH}) 53 | endif() 54 | 55 | endif() 56 | -------------------------------------------------------------------------------- /scripts/extract_ba.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | #################################################################################################### 5 | # Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 6 | # calibration. It also specifically contains a bundle adjustment module to help with the joint 7 | # calibration of the cameras. 8 | # 9 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 10 | # Written by Salim Kayal , 11 | # 12 | # This file is part of Multi-Camera Calibration Suite. 13 | # 14 | # Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 15 | # it under the terms of the GNU General Public License version 3 as 16 | # published by the Free Software Foundation. 17 | # 18 | # Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 | # GNU General Public License for more details. 22 | # 23 | # You should have received a copy of the GNU General Public License 24 | # along with Multi-Camera Calibration Suite. If not, see . 25 | #################################################################################################### 26 | 27 | __author__ = "Salim Kayal" 28 | __copyright__ = "Copyright 2016, Idiap Research Institute" 29 | __version__ = "1.0" 30 | __maintainer__ = "Salim Kayal" 31 | __email__ = "salim.kayal@idiap.ch" 32 | __status__ = "Production" 33 | __license__ = "GPLv3" 34 | 35 | from argparse import ArgumentParser 36 | from os import path 37 | import numpy as np 38 | import json 39 | 40 | def parse_args(): 41 | parser = ArgumentParser("options") 42 | parser.add_argument("input", help="input file") 43 | parser.add_argument("intrinsic", help="intrinsic calibration pattern") 44 | parser.add_argument("extrinsic", help="extrinsic calibration pattern") 45 | parser.add_argument("cameras", help="list of cameras") 46 | return parser.parse_args() 47 | 48 | def write_ba_solution(input_path, cameras, intrinsic, extrinsic): 49 | begincam = 0 50 | endcam = 0 51 | icam = 0 52 | with open(input_path, 'r') as solution: 53 | rvec = np.ndarray((3), dtype=np.float32) 54 | tvec = np.ndarray((3), dtype=np.float32) 55 | cmat = np.zeros((3,3), dtype=np.float32) 56 | dist = np.ndarray((5), dtype=np.float32) 57 | for i, line in enumerate(solution): 58 | if i == 0: 59 | fields = line.split(' ') 60 | num_cameras = int(fields[0]) 61 | num_obs = int(fields[3]) 62 | num_fixed_obs = int(fields[4]) 63 | begincam = (1 + num_obs + num_fixed_obs) 64 | endcam = 15 * num_cameras + begincam 65 | continue 66 | #ugly but so what... 67 | if i >= begincam and i < endcam: 68 | cnt = (i - begincam) % 15 69 | val = float(line) 70 | if cnt < 3: 71 | rvec[cnt] = val 72 | elif cnt < 6: 73 | tvec[cnt - 3] = val 74 | elif cnt == 6: 75 | cmat[0,0] = val 76 | elif cnt == 7: 77 | cmat[1,1] = val 78 | elif cnt == 8: 79 | cmat[0,2] = val 80 | elif cnt == 9: 81 | cmat[1,2] = val 82 | elif cnt < 12: 83 | dist[cnt - 10] = val 84 | elif cnt == 12: 85 | dist[4] = val 86 | elif cnt < 15: 87 | dist[cnt - 11] = val 88 | if cnt == 14: 89 | camera = cameras[icam] 90 | cmat[2,2] = 1 91 | icam += 1 92 | ipath = intrinsic[0] + camera + intrinsic[1] 93 | print ipath 94 | epath = extrinsic[0] + camera + extrinsic[1] 95 | print epath 96 | with open(ipath, 'w') as ifile: 97 | json.dump({'intrinsic':cmat.tolist(), 98 | 'distortion_coefficients':dist.tolist()}, 99 | ifile) 100 | with open(epath, 'w') as efile: 101 | json.dump({'rvec':rvec.tolist(), 'tvec':tvec.tolist()}, efile) 102 | rvec = np.ndarray((3), dtype=np.float32) 103 | tvec = np.ndarray((3), dtype=np.float32) 104 | cmat = np.zeros((3,3), dtype=np.float32) 105 | dist = np.ndarray((5), dtype=np.float32) 106 | elif i >= endcam: 107 | break 108 | 109 | def main(): 110 | opts = parse_args() 111 | cameras = opts.cameras.split(',') 112 | intrinsic = opts.intrinsic.split('{}') 113 | extrinsic = opts.extrinsic.split('{}') 114 | write_ba_solution(opts.input, cameras, intrinsic, extrinsic) 115 | 116 | if __name__ == "__main__": 117 | main() 118 | -------------------------------------------------------------------------------- /src/BAProblem.cpp: -------------------------------------------------------------------------------- 1 | /*************************************************************************************************** 2 | * Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 3 | * calibration. It also specifically contains a bundle adjustment module to help with the joint 4 | * calibration of the cameras. 5 | * 6 | * Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 7 | * Written by Salim Kayal , 8 | * 9 | * This file is part of Multi-Camera Calibration Suite. 10 | * 11 | * Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 12 | * it under the terms of the GNU General Public License version 3 as 13 | * published by the Free Software Foundation. 14 | * 15 | * Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 | * GNU General Public License for more details. 19 | * 20 | * You should have received a copy of the GNU General Public License 21 | * along with Multi-Camera Calibration Suite. If not, see . 22 | ***************************************************************************************************/ 23 | 24 | // Inspired from : ceres solver examples/bal_problem.cc 25 | // 26 | // Ceres Solver - A fast non-linear least squares minimizer 27 | // Copyright 2015 Google Inc. All rights reserved. 28 | // http://ceres-solver.org/ 29 | // 30 | // Redistribution and use in source and binary forms, with or without 31 | // modification, are permitted provided that the following conditions are met: 32 | // 33 | // * Redistributions of source code must retain the above copyright notice, 34 | // this list of conditions and the following disclaimer. 35 | // * Redistributions in binary form must reproduce the above copyright notice, 36 | // this list of conditions and the following disclaimer in the documentation 37 | // and/or other materials provided with the distribution. 38 | // * Neither the name of Google Inc. nor the names of its contributors may be 39 | // used to endorse or promote products derived from this software without 40 | // specific prior written permission. 41 | // 42 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 43 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 46 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 47 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 48 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 49 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 50 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 51 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 52 | // POSSIBILITY OF SUCH DAMAGE. 53 | // 54 | // Author: sameeragarwal@google.com (Sameer Agarwal) 55 | 56 | #include "BAProblem.hpp" 57 | 58 | BAProblem::~BAProblem() { 59 | delete[] point_index_; 60 | delete[] camera_index_; 61 | delete[] observations_; 62 | delete[] parameters_; 63 | } 64 | 65 | BAProblem::BAProblem(const std::string filename) { 66 | bool ret = LoadFile(filename); 67 | if (!ret) LOG(FATAL) << "Invalid input file"; 68 | } 69 | 70 | bool BAProblem::LoadFile(const std::string filename) { 71 | FILE* fptr = fopen(filename.c_str(), "r"); 72 | if (fptr == NULL) { 73 | return false; 74 | }; 75 | 76 | FscanfOrDie(fptr, "%d", &num_cameras_); 77 | FscanfOrDie(fptr, "%d", &num_points_); 78 | FscanfOrDie(fptr, "%d", &num_fixed_points_); 79 | FscanfOrDie(fptr, "%d", &num_observations_); 80 | FscanfOrDie(fptr, "%d", &num_fixed_observations_); 81 | 82 | point_index_ = new int[num_observations_ + num_fixed_observations_]; 83 | fixed_point_index_ = point_index_ + num_observations_; 84 | camera_index_ = new int[num_observations_ + num_fixed_observations_]; 85 | observations_ = new double[2 * num_observations_ + 2 * num_fixed_observations_]; 86 | 87 | num_parameters_ = NUM_CAMERA_PARAMETERS * num_cameras_ + 88 | NUM_COORDINATES * num_points_ + 89 | NUM_COORDINATES * num_fixed_points_; 90 | parameters_ = new double[num_parameters_]; 91 | 92 | for (int i = 0; i < num_observations_ + num_fixed_observations_; ++i) { 93 | FscanfOrDie(fptr, "%d", camera_index_ + i); 94 | FscanfOrDie(fptr, "%d", point_index_ + i); 95 | for (int j = 0; j < 2; ++j) { 96 | FscanfOrDie(fptr, "%lf", observations_ + 2*i + j); 97 | } 98 | } 99 | 100 | for (int i = 0; i < num_parameters_; ++i) { 101 | FscanfOrDie(fptr, "%lf", parameters_ + i); 102 | } 103 | return true; 104 | } 105 | 106 | bool BAProblem::WriteFile(const std::string filename) { 107 | FILE* fptr = fopen(filename.c_str(), "w"); 108 | if (fptr == NULL) { 109 | return false; 110 | }; 111 | 112 | fprintf(fptr, "%d ", num_cameras_); 113 | fprintf(fptr, "%d ", num_points_); 114 | fprintf(fptr, "%d ", num_fixed_points_); 115 | fprintf(fptr, "%d ", num_observations_); 116 | fprintf(fptr, "%d \n", num_fixed_observations_); 117 | 118 | for (int i = 0; i < num_observations_ + num_fixed_observations_; ++i) { 119 | fprintf(fptr, "%d ", *(camera_index_ + i)); 120 | fprintf(fptr, "%d ", *(point_index_ + i)); 121 | for (int j = 0; j < 2; ++j) { 122 | fprintf(fptr, "%lf", *(observations_ + 2*i + j)); 123 | if (j == 0) fprintf(fptr, " "); 124 | else fprintf(fptr, "\n"); 125 | } 126 | } 127 | 128 | for (int i = 0; i < num_parameters_; ++i) { 129 | fprintf(fptr, "%lf\n", *(parameters_ + i)); 130 | } 131 | return true; 132 | } 133 | -------------------------------------------------------------------------------- /src/BAProblem.hpp: -------------------------------------------------------------------------------- 1 | /*************************************************************************************************** 2 | * Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 3 | * calibration. It also specifically contains a bundle adjustment module to help with the joint 4 | * calibration of the cameras. 5 | * 6 | * Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 7 | * Written by Salim Kayal , 8 | * 9 | * This file is part of Multi-Camera Calibration Suite. 10 | * 11 | * Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 12 | * it under the terms of the GNU General Public License version 3 as 13 | * published by the Free Software Foundation. 14 | * 15 | * Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 | * GNU General Public License for more details. 19 | * 20 | * You should have received a copy of the GNU General Public License 21 | * along with Multi-Camera Calibration Suite. If not, see . 22 | ***************************************************************************************************/ 23 | 24 | // Inspired from : ceres solver examples/bal_problem.h 25 | // 26 | // Ceres Solver - A fast non-linear least squares minimizer 27 | // Copyright 2015 Google Inc. All rights reserved. 28 | // http://ceres-solver.org/ 29 | // 30 | // Redistribution and use in source and binary forms, with or without 31 | // modification, are permitted provided that the following conditions are met: 32 | // 33 | // * Redistributions of source code must retain the above copyright notice, 34 | // this list of conditions and the following disclaimer. 35 | // * Redistributions in binary form must reproduce the above copyright notice, 36 | // this list of conditions and the following disclaimer in the documentation 37 | // and/or other materials provided with the distribution. 38 | // * Neither the name of Google Inc. nor the names of its contributors may be 39 | // used to endorse or promote products derived from this software without 40 | // specific prior written permission. 41 | // 42 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 43 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 46 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 47 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 48 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 49 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 50 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 51 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 52 | // POSSIBILITY OF SUCH DAMAGE. 53 | // 54 | // Author: sameeragarwal@google.com (Sameer Agarwal) 55 | // 56 | // Class for loading and holding in memory bundle adjustment problems 57 | // from the BAL (Bundle Adjustment in the Large) dataset from the 58 | // University of Washington. 59 | // 60 | // For more details see http://grail.cs.washington.edu/projects/bal/ 61 | 62 | #ifndef _BA_PROBLEM_HPP_ 63 | #define _BA_PROBLEM_HPP_ 64 | 65 | #include 66 | #include 67 | #include "ceres/ceres.h" 68 | 69 | class BAProblem { 70 | public: 71 | BAProblem(const std::string filename); 72 | ~BAProblem(); 73 | int num_observations() const {return num_observations_;}; 74 | const double* observations() const {return observations_;}; 75 | int num_fixed_observations() const {return num_fixed_observations_;}; 76 | const double* fixed_observations() const {return observations_ + 2*num_observations_;}; 77 | int num_cameras() const {return num_cameras_;}; 78 | int num_points() const {return num_points_;}; 79 | int num_fixed_points() const {return num_fixed_points_;}; 80 | int point_block_size() const {return NUM_COORDINATES;}; 81 | int camera_block_size() const {return NUM_CAMERA_PARAMETERS;}; 82 | double* mutable_cameras() {return parameters_;}; 83 | double* mutable_points(){return mutable_cameras() + NUM_CAMERA_PARAMETERS * num_cameras_;}; 84 | double* fixed_points(){return mutable_points() + NUM_COORDINATES * num_points_;}; 85 | double* mutable_camera_for_observation(int i){return mutable_cameras() + 86 | camera_index_[i] * NUM_CAMERA_PARAMETERS;}; 87 | double* mutable_camera_for_fixed_observation(int i){return mutable_cameras() + 88 | camera_index_[i + num_observations_] * 89 | NUM_CAMERA_PARAMETERS;}; 90 | double* mutable_point_for_observation(int i){return mutable_points() + 91 | point_index_[i] * NUM_COORDINATES;}; 92 | double* fixed_point_for_observation(int i){return fixed_points() + 93 | point_index_[i + num_observations_] * NUM_COORDINATES;}; 94 | bool LoadFile(const std::string filename); 95 | bool WriteFile(const std::string filename); 96 | 97 | private: 98 | 99 | static const int NUM_CAMERA_PARAMETERS = 15; 100 | static const int NUM_COORDINATES = 3; 101 | 102 | template 103 | void FscanfOrDie(FILE *fptr, const char *format, T *value) { 104 | int num_scanned = fscanf(fptr, format, value); 105 | if (num_scanned != 1) { 106 | LOG(FATAL) << "Invalid UW data file."; 107 | } 108 | } 109 | 110 | int num_cameras_; 111 | int num_points_; 112 | int num_fixed_points_; 113 | int num_observations_; 114 | int num_fixed_observations_; 115 | int num_parameters_; 116 | 117 | int* point_index_; 118 | int* fixed_point_index_; 119 | int* camera_index_; 120 | double* observations_; 121 | double* parameters_; 122 | }; 123 | 124 | #endif /* ifndef _BA_PROBLEM_HPP_ */ 125 | -------------------------------------------------------------------------------- /src/SnavelyReprojectionError.hpp: -------------------------------------------------------------------------------- 1 | /*************************************************************************************************** 2 | * Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 3 | * calibration. It also specifically contains a bundle adjustment module to help with the joint 4 | * calibration of the cameras. 5 | * 6 | * Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 7 | * Written by Salim Kayal , 8 | * 9 | * This file is part of Multi-Camera Calibration Suite. 10 | * 11 | * Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 12 | * it under the terms of the GNU General Public License version 3 as 13 | * published by the Free Software Foundation. 14 | * 15 | * Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 | * GNU General Public License for more details. 19 | * 20 | * You should have received a copy of the GNU General Public License 21 | * along with Multi-Camera Calibration Suite. If not, see . 22 | ***************************************************************************************************/ 23 | 24 | // Inspired from : ceres solver examples/SnavelyReprojectionError.h 25 | // 26 | // Ceres Solver - A fast non-linear least squares minimizer 27 | // Copyright 2015 Google Inc. All rights reserved. 28 | // http://ceres-solver.org/ 29 | // 30 | // Redistribution and use in source and binary forms, with or without 31 | // modification, are permitted provided that the following conditions are met: 32 | // 33 | // * Redistributions of source code must retain the above copyright notice, 34 | // this list of conditions and the following disclaimer. 35 | // * Redistributions in binary form must reproduce the above copyright notice, 36 | // this list of conditions and the following disclaimer in the documentation 37 | // and/or other materials provided with the distribution. 38 | // * Neither the name of Google Inc. nor the names of its contributors may be 39 | // used to endorse or promote products derived from this software without 40 | // specific prior written permission. 41 | // 42 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 43 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 46 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 47 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 48 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 49 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 50 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 51 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 52 | // POSSIBILITY OF SUCH DAMAGE. 53 | // 54 | // Author: sameeragarwal@google.com (Sameer Agarwal) 55 | // 56 | // Templated struct implementing the camera model and residual 57 | // computation for bundle adjustment used by Noah Snavely's Bundler 58 | // SfM system. This is also the camera model/residual for the bundle 59 | // adjustment problems in the BAL dataset. It is templated so that we 60 | // can use Ceres's automatic differentiation to compute analytic 61 | // jacobians. 62 | // 63 | // For details see: http://phototour.cs.washington.edu/bundler/ 64 | // and http://grail.cs.washington.edu/projects/bal/ 65 | 66 | #ifndef _SNAVELY_REPROJECTION_ERROR_HPP_ 67 | #define _SNAVELY_REPROJECTION_ERROR_HPP_ 68 | 69 | #include "ceres/rotation.h" 70 | #include "ceres/ceres.h" 71 | 72 | 73 | // Templated pinhole camera model. The camera is parameterized using 15 parameters: 74 | // 3 for rotation, 3 for translation, 2 for focal length (x and y), 2 for the principal point, 75 | // 3 for radial distortion and 2 for tangential distortion. 76 | class SnavelyReprojectionError { 77 | public: 78 | SnavelyReprojectionError(double observed_x, double observed_y) 79 | :observed_x(observed_x), observed_y(observed_y) {} 80 | 81 | template 82 | bool operator()(const T* const extrinsics, 83 | const T* const intrinsics, 84 | const T* const point, 85 | T* residuals) const { 86 | // camera[0,1,2] are the angle-axis rotation. 87 | T p[3]; 88 | ceres::AngleAxisRotatePoint(extrinsics, point, p); 89 | // camera[3,4,5] are the translation. 90 | p[0] += extrinsics[3]; 91 | p[1] += extrinsics[4]; 92 | p[2] += extrinsics[5]; 93 | T xp = p[0] / p[2]; 94 | T yp = p[1] / p[2]; 95 | const T& focalx = intrinsics[0]; 96 | const T& focaly = intrinsics[1]; 97 | const T& centerx = intrinsics[2]; 98 | const T& centery = intrinsics[3]; 99 | // Apply second and fourth order radial distortion. 100 | const T& l1 = intrinsics[4]; 101 | const T& l2 = intrinsics[5]; 102 | const T& l3 = intrinsics[6]; 103 | T r2 = xp * xp + yp * yp; 104 | T r4 = r2 * r2; 105 | T r6 = r4 * r2; 106 | T radial_distortion = T(1.0) + r2 * l1 + r4 * l2 + r6 * l3; 107 | // Apply tengential distortion 108 | const T& t1 = intrinsics[7]; 109 | const T& t2 = intrinsics[8]; 110 | T a1 = T(2.0) * xp * yp; 111 | T a2 = r2 + T(2.0) * xp * xp; 112 | T a3 = r2 + T(2.0) * yp * yp; 113 | T xpp = xp * radial_distortion + t1 * a1 + t2 * a2; 114 | T ypp = yp * radial_distortion + t1 * a3 + t2 * a1; 115 | // Compute final projected point position. 116 | T predicted_x = focalx * xpp + centerx; 117 | T predicted_y = focaly * ypp + centery; 118 | // The error is the difference between the predicted and observed position. 119 | residuals[0] = predicted_x - T(observed_x); 120 | residuals[1] = predicted_y - T(observed_y); 121 | return true; 122 | } 123 | 124 | // Factory to hide the construction of the CostFunction object from the client code. 125 | static ceres::CostFunction* Create(const double observed_x, const double observed_y) { 126 | return (new ceres::AutoDiffCostFunction( 127 | new SnavelyReprojectionError(observed_x, observed_y))); 128 | } 129 | 130 | private: 131 | double observed_x; 132 | double observed_y; 133 | }; 134 | 135 | #endif /* ifndef _SNAVELY_REPROJECTION_ERROR_HPP_ */ 136 | -------------------------------------------------------------------------------- /scripts/intrinsic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | #################################################################################################### 5 | # Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 6 | # calibration. It also specifically contains a bundle adjustment module to help with the joint 7 | # calibration of the cameras. 8 | # 9 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 10 | # Written by Salim Kayal , 11 | # 12 | # This file is part of Multi-Camera Calibration Suite. 13 | # 14 | # Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 15 | # it under the terms of the GNU General Public License version 3 as 16 | # published by the Free Software Foundation. 17 | # 18 | # Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 | # GNU General Public License for more details. 22 | # 23 | # You should have received a copy of the GNU General Public License 24 | # along with Multi-Camera Calibration Suite. If not, see . 25 | #################################################################################################### 26 | 27 | __author__ = "Salim Kayal" 28 | __copyright__ = "Copyright 2016, Idiap Research Institute" 29 | __version__ = "1.0" 30 | __maintainer__ = "Salim Kayal" 31 | __email__ = "salim.kayal@idiap.ch" 32 | __status__ = "Production" 33 | __license__ = "GPLv3" 34 | 35 | from os import path 36 | from shutil import copy 37 | from argparse import ArgumentParser 38 | from glob import glob 39 | import numpy as np 40 | import cv2 41 | import json 42 | 43 | def parse(): 44 | """Parse command line 45 | :returns: options 46 | """ 47 | parser = ArgumentParser() 48 | parser.add_argument('-n', '--num', default=20, type=int, help='number to subsample') 49 | parser.add_argument('-W', '--width', default=4, help='board width') 50 | parser.add_argument('-H', '--height', default=11, help='board height') 51 | parser.add_argument('-p', '--pattern', choices=['chessboard', 'circles', 'asymmetric_circles'], 52 | default='asymmetric_circles', help='pattern to discover') 53 | parser.add_argument('-o', '--full_output', help='output folder for all images') 54 | parser.add_argument('-s', '--select_output', help='output folder for selected images') 55 | parser.add_argument('input', help='input folder with images') 56 | parser.add_argument('output', help='output folder for calibration') 57 | return parser.parse_args() 58 | 59 | def filter_images(input_pattern, pattern, width, height): 60 | # termination criteria 61 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 62 | # Arrays to store object points and image points from all the images. 63 | imgpoints = [] # 2d points in image plane. 64 | images = glob(input_pattern) 65 | to_del = [] 66 | for i, fname in enumerate(images): 67 | #print fname 68 | img = cv2.imread(fname) 69 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 70 | # Find the chess board corners 71 | if pattern=='chessboard': 72 | ret, interest_points = cv2.findChessboardCorners(gray, (width, height), None) 73 | if ret == True: 74 | cv2.cornerSubPix(gray, interest_points, (11, 11), (-1, -1), criteria) 75 | else: 76 | flag = cv2.CALIB_CB_SYMMETRIC_GRID if pattern=='circles' else \ 77 | cv2.CALIB_CB_ASYMMETRIC_GRID 78 | ret, interest_points = cv2.findCirclesGrid(gray, (width, height), None, flag) 79 | # If found, add object points, image points (after refining them) 80 | if ret == True: 81 | cv2.cornerSubPix(gray, interest_points, (11, 11), (-1, -1), criteria) 82 | imgpoints.append(interest_points) 83 | else: 84 | to_del.append(i) 85 | while len(to_del) > 0: 86 | last_item = to_del[-1] 87 | del images[last_item] 88 | del to_del[-1] 89 | return images, imgpoints 90 | 91 | def mean_dist(pts1, pts2): 92 | tot_dist = 0 93 | for pt1, pt2 in zip(pts1, pts2): 94 | tot_dist += ((pt1[0][0] - pt2[0][0])**2 + (pt1[0][1] - pt2[0][1])**2)**0.5 95 | return tot_dist/len(pts1) 96 | 97 | def select_images(image_points, number_to_select): 98 | imlist = [0] 99 | imranks = [] 100 | for _ in range(number_to_select - 1): 101 | imgpoint = image_points[imlist[-1]] 102 | imdist = [] 103 | for i in range(len(image_points)): 104 | imdist.append([i, mean_dist(image_points[i], imgpoint)]) 105 | imdist.sort(key=lambda x: x[1]) 106 | imrank = [(idx, rank) for rank, (idx, _) in enumerate(imdist)] 107 | imrank.sort(key=lambda x: x[0]) 108 | imrank = [rank for _, rank in imrank] 109 | imranks.append(imrank) 110 | minranks = {i:len(image_points) for i in range(len(image_points)) if i not in imlist} 111 | for imrank in imranks: 112 | for idx, rank in minranks.iteritems(): 113 | if imrank[idx] < rank: 114 | minranks[idx] = imrank[idx] 115 | max_min_idx = max(minranks, key=minranks.get) 116 | imlist.append(max_min_idx) 117 | return imlist 118 | 119 | def main(): 120 | opts = parse() 121 | images, image_points = filter_images(opts.input, opts.pattern, opts.width, opts.height) 122 | if opts.full_output is not None: 123 | for image in images: 124 | copy(image, path.join(opts.full_output, path.basename(image))) 125 | selected_indices = select_images(image_points, opts.num) 126 | if opts.select_output is not None: 127 | for index in selected_indices: 128 | image = images[index] 129 | copy(image, path.join(opts.select_output, path.basename(image))) 130 | #compute calibration 131 | object_points = np.zeros((opts.width*opts.height, 3), np.float32) 132 | if opts.pattern == 'asymmetric_circles': 133 | grid = np.mgrid[0:opts.width * 2:2, 0:opts.height] 134 | grid[0, :, 1::2] += 1 135 | object_points[:, :2] = grid.T.reshape(-1, 2) 136 | else: 137 | object_points[:, :2] = np.mgrid[0:opts.width, 0:opts.height].T.reshape(-1, 2) 138 | opoints = [object_points for _ in range(len(selected_indices))] 139 | ipoints = [image_points[idx] for idx in selected_indices] 140 | image_resolution = cv2.imread(images[0]).shape[:-1][::-1] 141 | print image_resolution 142 | _, cam_matrix, distortion, rotation_vectors, translation_vectors = \ 143 | cv2.calibrateCamera(opoints, ipoints, image_resolution) 144 | #save calibration 145 | with open(opts.output, 'w') as out_file: 146 | json.dump({'intrinsic':cam_matrix.tolist(), 147 | 'distortion_coefficients':distortion.squeeze().tolist()}, 148 | out_file) 149 | # compute retroprojection error 150 | tot_error = 0 151 | for i in xrange(len(opoints)): 152 | ipoints2, _ = cv2.projectPoints(opoints[i], rotation_vectors[i], translation_vectors[i], 153 | cam_matrix, distortion) 154 | error = cv2.norm(ipoints[i],ipoints2, cv2.NORM_L2)/len(ipoints2) 155 | tot_error += error 156 | print "total error :", tot_error, "mean error :", tot_error/len(opoints) 157 | #show results 158 | pos = 0 159 | key = None 160 | while key != ord('q'): 161 | key = None 162 | image = cv2.imread(images[pos]) 163 | undistorted = cv2.undistort(image, cam_matrix, distortion, None) 164 | cv2.imshow('img', undistorted) 165 | print 'next (n), previous (p) or quit (q)' 166 | while key not in [ord('q'), ord('n'), ord('p')]: 167 | key = cv2.waitKey(10) 168 | pos = ((pos + 1) % len(selected_indices)) \ 169 | if key == ord('n') else \ 170 | (len(selected_indices) - 1 if pos == 0 else pos - 1) 171 | 172 | if __name__ == "__main__": 173 | main() 174 | -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | /*************************************************************************************************** 2 | * Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 3 | * calibration. It also specifically contains a bundle adjustment module to help with the joint 4 | * calibration of the cameras. 5 | * 6 | * Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 7 | * Written by Salim Kayal , 8 | * 9 | * This file is part of Multi-Camera Calibration Suite. 10 | * 11 | * Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 12 | * it under the terms of the GNU General Public License version 3 as 13 | * published by the Free Software Foundation. 14 | * 15 | * Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 | * GNU General Public License for more details. 19 | * 20 | * You should have received a copy of the GNU General Public License 21 | * along with Multi-Camera Calibration Suite. If not, see . 22 | ***************************************************************************************************/ 23 | 24 | #include "BAProblem.hpp" 25 | #include "SnavelyReprojectionError.hpp" 26 | 27 | #include 28 | #include 29 | #include "ceres/ceres.h" 30 | #include "gflags/gflags.h" 31 | #include "glog/logging.h" 32 | 33 | #include 34 | 35 | DEFINE_string(input, "", "input file name"); 36 | DEFINE_string(output, "", "output file name"); 37 | DEFINE_string(intrinsic_adjustment, "fixed", "Options are: fixed, unconstrained, two_pass"); 38 | 39 | bool setOrdering(BAProblem &problem, ceres::Solver::Options &options, bool fix_intrinsics = false){ 40 | const int num_points = problem.num_points(); 41 | const int point_block_size = problem.point_block_size(); 42 | double* points = problem.mutable_points(); 43 | //const int num_fixed_points = problem.num_fixed_points(); 44 | //double* fixed_points = problem.fixed_points(); 45 | const int num_cameras = problem.num_cameras(); 46 | const int camera_block_size = problem.camera_block_size(); 47 | double* cameras = problem.mutable_cameras(); 48 | ceres::ParameterBlockOrdering* ordering = new ceres::ParameterBlockOrdering; 49 | ceres::ParameterBlockOrdering* inner_ordering = new ceres::ParameterBlockOrdering(); 50 | // The points come before the cameras. 51 | for (int i = 0; i < num_points; ++i) { 52 | ordering->AddElementToGroup(points + point_block_size * i, 0); 53 | inner_ordering->AddElementToGroup(points + point_block_size * i, 0); 54 | } 55 | for (int i = 0; i < num_cameras; ++i) { 56 | ordering->AddElementToGroup(cameras + camera_block_size * i, 1); 57 | inner_ordering->AddElementToGroup(cameras + camera_block_size * i, 1); 58 | if (fix_intrinsics == false){ 59 | ordering->AddElementToGroup(cameras + 6 + camera_block_size * i, 2); 60 | inner_ordering->AddElementToGroup(cameras + 6 + camera_block_size * i, 2); 61 | } 62 | } 63 | options.linear_solver_ordering.reset(ordering); 64 | if (options.use_inner_iterations){ 65 | options.inner_iteration_ordering.reset(inner_ordering); 66 | } 67 | else{ 68 | delete inner_ordering; 69 | } 70 | return true; 71 | } 72 | 73 | bool buildCeresProblem(BAProblem &ba_problem, ceres::Problem &problem, bool fix_intrinsics = false){ 74 | // Create residuals for each observation in the bundle adjustment problem. The 75 | // parameters for cameras and points are added automatically. 76 | const double* observations = ba_problem.observations(); 77 | const int num_observations = ba_problem.num_observations(); 78 | const double* fixed_observations = ba_problem.fixed_observations(); 79 | const int num_fixed_observations = ba_problem.num_fixed_observations(); 80 | for (int i = 0; i < ba_problem.num_observations(); ++i) { 81 | // Each Residual block takes a point and a camera as input and outputs a 2 82 | // dimensional residual. Internally, the cost function stores the observed 83 | // image location and compares the reprojection against the observation. 84 | ceres::CostFunction* cost_function = 85 | SnavelyReprojectionError::Create(observations[2 * i + 0], 86 | observations[2 * i + 1]); 87 | ceres::LossFunction* loss_function = new ceres::HuberLoss(1); 88 | double* mutable_camera = ba_problem.mutable_camera_for_observation(i); 89 | double* mutable_point = ba_problem.mutable_point_for_observation(i); 90 | problem.AddResidualBlock(cost_function, 91 | loss_function, 92 | mutable_camera, 93 | mutable_camera + 6, 94 | mutable_point); 95 | } 96 | for (int i = 0; i < ba_problem.num_fixed_observations(); ++i){ 97 | ceres::CostFunction* cost_function = 98 | SnavelyReprojectionError::Create(fixed_observations[2 * i + 0], 99 | fixed_observations[2 * i + 1]); 100 | ceres::LossFunction* loss_function = new ceres::ScaledLoss( 101 | new ceres::HuberLoss(1), 102 | (double) num_observations / num_fixed_observations, 103 | ceres::TAKE_OWNERSHIP); 104 | double* mutable_camera = ba_problem.mutable_camera_for_fixed_observation(i); 105 | double* fixed_point = ba_problem.fixed_point_for_observation(i); 106 | problem.AddResidualBlock(cost_function, 107 | loss_function, 108 | mutable_camera, 109 | mutable_camera + 6, 110 | fixed_point); 111 | problem.SetParameterBlockConstant(fixed_point); 112 | } 113 | const int num_cameras = ba_problem.num_cameras(); 114 | const int camera_block_size = ba_problem.camera_block_size(); 115 | double* cameras = ba_problem.mutable_cameras(); 116 | for (int i = 0; i < num_cameras; ++i) { 117 | if (fix_intrinsics){ 118 | problem.SetParameterBlockConstant(cameras + camera_block_size * i + 6); 119 | } 120 | else{ 121 | problem.SetParameterBlockVariable(cameras + camera_block_size * i + 6); 122 | } 123 | } 124 | return true; 125 | } 126 | 127 | bool buildCeresOptions(BAProblem &ba_problem, ceres::Solver::Options &options, 128 | bool fix_intrinsics = false){ 129 | options.linear_solver_type = ceres::ITERATIVE_SCHUR; 130 | options.preconditioner_type= ceres::SCHUR_JACOBI; 131 | options.use_inner_iterations = true; 132 | options.use_nonmonotonic_steps = true; 133 | options.max_num_iterations = 200000; 134 | options.num_threads = omp_get_max_threads(); 135 | options.minimizer_progress_to_stdout = true; 136 | options.gradient_tolerance = 1e-16; 137 | options.function_tolerance = 1e-16; 138 | options.parameter_tolerance = 1e-16; 139 | options.max_num_consecutive_invalid_steps = 20; 140 | setOrdering(ba_problem, options, fix_intrinsics); 141 | return true; 142 | } 143 | 144 | int main(int argc, char** argv) { 145 | google::ParseCommandLineFlags(&argc, &argv, true); 146 | google::InitGoogleLogging(argv[0]); 147 | google::SetUsageMessage("bundle_adjuster [options] --input=input --output=output --intrinsic_adjustment=[fixed, unconstrained, two_pass]"); 148 | if (FLAGS_input.empty() || FLAGS_output.empty()){ 149 | std::cout << "Usage: " << google::ProgramUsage() << std::endl; 150 | return 1; 151 | } 152 | std::cout << "open file " << FLAGS_input << "\n"; 153 | BAProblem ba_problem(FLAGS_input); 154 | ceres::Problem* problem = new ceres::Problem; 155 | bool constrain = FLAGS_intrinsic_adjustment.compare("unconstrained") != 0; 156 | bool two_pass = FLAGS_intrinsic_adjustment.compare("two_pass") == 0; 157 | buildCeresProblem(ba_problem, *problem, constrain); 158 | ceres::Solver::Options options; 159 | buildCeresOptions(ba_problem, options, constrain); 160 | ceres::Solver::Summary summary; 161 | ceres::Solve(options, problem, &summary); 162 | std::cout << summary.FullReport() << "\n"; 163 | if (two_pass){ 164 | delete problem; 165 | problem = new ceres::Problem; 166 | buildCeresProblem(ba_problem, *problem); 167 | buildCeresOptions(ba_problem, options); 168 | ceres::Solve(options, problem, &summary); 169 | std::cout << summary.FullReport() << "\n"; 170 | } 171 | delete problem; 172 | if (!ba_problem.WriteFile(FLAGS_output)) { 173 | std::cerr << "ERROR: unable to open file " << argv[2] << "\n"; 174 | return 1; 175 | } 176 | return 0; 177 | } 178 | -------------------------------------------------------------------------------- /scripts/extrinsic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | #################################################################################################### 5 | # Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 6 | # calibration. It also specifically contains a bundle adjustment module to help with the joint 7 | # calibration of the cameras. 8 | # 9 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 10 | # Written by Salim Kayal , 11 | # 12 | # This file is part of Multi-Camera Calibration Suite. 13 | # 14 | # Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 15 | # it under the terms of the GNU General Public License version 3 as 16 | # published by the Free Software Foundation. 17 | # 18 | # Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 | # GNU General Public License for more details. 22 | # 23 | # You should have received a copy of the GNU General Public License 24 | # along with Multi-Camera Calibration Suite. If not, see . 25 | #################################################################################################### 26 | 27 | __author__ = "Salim Kayal" 28 | __copyright__ = "Copyright 2016, Idiap Research Institute" 29 | __version__ = "1.0" 30 | __maintainer__ = "Salim Kayal" 31 | __email__ = "salim.kayal@idiap.ch" 32 | __status__ = "Production" 33 | __license__ = "GPLv3" 34 | 35 | from os import path 36 | from argparse import ArgumentParser 37 | import numpy as np 38 | import json 39 | import cv2 40 | 41 | class CalibrationInterface(object): 42 | GET_POINT = 0 43 | GET_VALUES = 1 44 | 45 | @classmethod 46 | def __init__(self, camera_matrix, distortion_coefficients, image, object_points=None, 47 | image_points=None): 48 | if None in [object_points, image_points]: 49 | assert all(points is None for points in [object_points, image_points]) 50 | else: 51 | assert len(object_points) == len(image_points) 52 | self.wname = 'calibration interface' 53 | self.camera_matrix = camera_matrix 54 | self.distortion_coefficients = distortion_coefficients 55 | self.subtext = '' 56 | self.entry = '' 57 | self.entry_error = False 58 | self.state = self.GET_POINT 59 | self.image = image 60 | self.image_points = [] if image_points is None else image_points 61 | self.object_points = [] if object_points is None else object_points 62 | cv2.namedWindow(self.wname) 63 | cv2.setMouseCallback(self.wname, self.get_points) 64 | self.refresh() 65 | 66 | @classmethod 67 | def solve(self): 68 | assert len(self.object_points) == len(self.image_points) 69 | object_points = np.array(self.object_points)\ 70 | .reshape((len(self.object_points), 3, 1)).astype(np.float32) 71 | image_points = np.array(self.image_points)\ 72 | .reshape((len(self.object_points), 2, 1)).astype(np.float32) 73 | retval, rvec, tvec = cv2.solvePnP(object_points, image_points, self.camera_matrix, 74 | self.distortion_coefficients) 75 | if retval: 76 | return rvec, tvec 77 | 78 | @classmethod 79 | def get_points(self, event, x, y, flags, param): 80 | if self.state == self.GET_POINT: 81 | if event == cv2.EVENT_LBUTTONUP: 82 | self.image_points.append((x, y)) 83 | self.state = self.GET_VALUES 84 | elif event == cv2.EVENT_RBUTTONUP: 85 | self.image_points.pop() 86 | self.object_points.pop() 87 | self.refresh() 88 | 89 | @classmethod 90 | def run(self): 91 | opoint = [] 92 | def manage_key(message, key, opoint): 93 | self.subtext = 'PLEASE ENTER A NUMBER. ' + message if self.entry_error else message 94 | if key == chr(8): 95 | self.entry = self.entry[:-1] 96 | elif key == chr(10): 97 | try: 98 | opoint.append(float(self.entry)) 99 | self.entry_error = False 100 | self.subtext = '' 101 | self.entry = '' 102 | except ValueError: 103 | self.entry_error = True 104 | self.entry = '' 105 | elif key == chr(27): 106 | opoint = [] 107 | self.entry = '' 108 | self.subtext = '' 109 | self.state = self.GET_POINT 110 | self.image_points.pop() 111 | elif key == chr(255): 112 | pass 113 | else: 114 | self.entry += key 115 | return opoint 116 | while True: 117 | key = chr(cv2.waitKey(30) & 0xFF) 118 | if self.state == self.GET_VALUES: 119 | if len(opoint) == 0: 120 | message = 'please enter the real world x in cm' 121 | opoint = manage_key(message, key, opoint) 122 | elif len(opoint) == 1: 123 | message = "x value is %f. please enter the real world y in cm" % opoint[0] 124 | opoint = manage_key(message, key, opoint) 125 | else: 126 | opoint.append(0.) 127 | self.object_points.append(tuple(opoint)) 128 | opoint = [] 129 | self.state = self.GET_POINT 130 | if self.state == self.GET_POINT and key in ['s', 'q']: 131 | break 132 | self.refresh() 133 | return None if key == 'q' else self.solve() 134 | 135 | @classmethod 136 | def refresh(self): 137 | frame = np.copy(self.image) 138 | cv2.putText(frame, self.subtext, (10, 25), cv2.FONT_HERSHEY_DUPLEX, 1, 139 | (0,0,255)) 140 | cv2.putText(frame, self.entry, (10, 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, 141 | (0,0,255)) 142 | for num, (ipoint, opoint) in enumerate(zip(self.image_points, self.object_points)): 143 | cv2.circle(frame, ipoint, 3, (255,0,0), 2) 144 | text = '{}: {}, {}'.format(num, opoint[0], opoint[1]) 145 | cv2.putText(frame, text, (ipoint[0] + 5, ipoint[1] + 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 146 | 1, (255,0,0)) 147 | cv2.imshow(self.wname, frame) 148 | 149 | def parse(): 150 | """Parse command line 151 | :returns: options 152 | """ 153 | parser = ArgumentParser() 154 | parser.add_argument('-p', '--points', help='input points for calibration') 155 | parser.add_argument('-o', '--output_points', help='output points for future calibration') 156 | parser.add_argument('intrinsic', help='intrinsic calibration file') 157 | parser.add_argument('input', help='input image file') 158 | parser.add_argument('output', help='output extrinsic calibration file') 159 | return parser.parse_args() 160 | 161 | def main(): 162 | """main function""" 163 | opts = parse() 164 | image = cv2.imread(opts.input) 165 | with open(opts.intrinsic, 'r') as cfile: 166 | calibration = json.load(cfile) 167 | cam_matrix = np.array(calibration['intrinsic']) 168 | distortion_coefficients = np.array(calibration['distortion_coefficients'])\ 169 | .reshape((len(calibration['distortion_coefficients']), 1)) 170 | if opts.points is not None: 171 | with open(opts.points, 'r') as pfile: 172 | points = json.load(pfile) 173 | object_points = [tuple(point) for point in points['object_points']] 174 | image_points = [tuple(point) for point in points['image_points']] 175 | interface = CalibrationInterface(cam_matrix, distortion_coefficients, image, 176 | object_points, image_points) 177 | else: 178 | interface = CalibrationInterface(cam_matrix, distortion_coefficients, image) 179 | results = interface.run() 180 | if results is not None: 181 | with open(opts.output, 'w') as ofile: 182 | json.dump(dict(rvec=results[0].tolist(), tvec=results[1].tolist()), ofile) 183 | if opts.output_points is not None: 184 | with open(opts.output_points, 'w') as opfile: 185 | json.dump(dict(image_points=interface.image_points, 186 | object_points=interface.object_points), opfile) 187 | 188 | if __name__ == "__main__": 189 | main() 190 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Multi Camera Calibration Suite 2 | ============================== 3 | 4 | This toolset provides the basics for calibrating a multi-camera scene. it contains six utilities for 5 | different purposes. In this README I will walk the user through the calibration of a multi camera 6 | scene using this toolset. 7 | 8 | Dependencies 9 | ------------ 10 | 11 | the use of this suite requires 12 | 13 | - [ceres-solver](http://ceres-solver.org/) 14 | - [google logging suite](https://code.google.com/archive/p/google-glog/) 15 | - [google flags](https://gflags.github.io/gflags/) 16 | - [OpenCV](http://opencv.org/) 17 | - [numpy](http://www.numpy.org/) 18 | 19 | Getting the source 20 | ------------------ 21 | 22 | clone the repository using : 23 | 24 | ```git clone git@github.com:idiap/multicamera-calibration.git``` 25 | 26 | Building the source 27 | ------------------- 28 | 29 | Go to the source directory 30 | 31 | Do 32 | 33 | ``` 34 | mkdir build 35 | cd build 36 | cmake .. -DCMAKE_BUILD_TYPE=Release 37 | make 38 | ``` 39 | 40 | Compute intrinsic camera parameters 41 | ----------------------------------- 42 | 43 | The intrinsic camera parameters are usually computed finding a known grid with multiple poses on 44 | different images. To do so we propose the following method. 45 | 46 | Capture a video by waving the camera over a grid such as 47 | [this one](http://docs.opencv.org/2.4/_downloads/acircles_pattern.png) or 48 | [that one](http://docs.opencv.org/2.4/_downloads/pattern.png). Extract then the frames as images in 49 | a folder then run the software ```bin\intrinsic``` 50 | 51 | ```bash 52 | intrinsic [-h] [-n NUM] [-W WIDTH] [-H HEIGHT] 53 | [-p {chessboard,circles,asymmetric_circles}] [-o FULL_OUTPUT] 54 | [-s SELECT_OUTPUT] 55 | input output 56 | 57 | -h: show help 58 | -n: number of frames to use 59 | -w: width of grid 60 | -h: height of grid 61 | -p: type of grid 62 | -o: output for all frames with a visible grid (optionnal) 63 | -s: output for all n frames (option -n) selected for the calibration computation (optionnal) 64 | input: pattern for the images 65 | output: calibration file (json format) 66 | ``` 67 | 68 | For example: let's imagine we have the frames in the folder /home/user1/camera1/frames with the asymmetric 69 | circles grid provided earlier. 70 | 71 | you would use the script this way 72 | ```bash 73 | bin/intrinsic -o /home/user1/frames_with_grid -s /home/user1/camera1/selected_frames_with_grid /home/user1/camera1/frames/\*.bmp /home/user1/camera1/intrinsic.json 74 | ``` 75 | 76 | once the computation is done, the undistorted frames are shown. 77 | - 'n' goes to next frame 78 | - 'p' goes to previous frame 79 | - 'q' quits 80 | 81 | this produces the folders with the usable frames and the selected frames so that reproducing the 82 | calibration takes less time. 83 | 84 | Compute extrinsic camera parameters 85 | ----------------------------------- 86 | 87 | The extrinsic parameters are computed by clicking on points with known coordinates (in cm) in an 88 | image. The syntax is the following. 89 | ```bash 90 | extrinsic [-h] [-p POINTS] [-o OUTPUT_POINTS] intrinsic input output 91 | 92 | -h: show help 93 | -p: saved points file, used to resume or correct the calibration (optionnal) 94 | -o: output point file. this file will be used with the -p option (optionnal) 95 | intrinsic: intrinsic camera parameters 96 | input: image to annotate 97 | output: calibration output 98 | ``` 99 | 100 | Now using the intrinsic.json file we computed on the previous step, do. 101 | ```bash 102 | bin/extrinsic /home/user1/camera1/intrinsic.json /home/user1/camera1/video_frames/000000.bmp /home/user1/camera1/extrinsic.json 103 | ``` 104 | 105 | it shows an the frame 000000.bmp you can left click on a point to add it. it will ask for x and y in 106 | cm. right click removes last point. 107 | 108 | - 'q' quits without saving 109 | - 's' computes the extrinsics and saves 110 | 111 | Verify the 3d projections in the scene 112 | -------------------------------------- 113 | 114 | Now both the intrinsic and extrinsic calibrations have been computed for cameras 1 to 4 115 | to check the correspondance between the cameras, you can use the following utility 116 | 117 | ```bash 118 | check3d [-h] intrinsic extrinsic images cameras 119 | -h: help 120 | intrinsic: pattern to intrinsic calibrations files 121 | extrinsic: pattern to extrinsic calibrations files 122 | images: pattern to camera images (same frame for each camera) 123 | cameras: camera list comma separated 124 | ``` 125 | 126 | using all our cameras calibrated, we would go to something like that 127 | 128 | ```bash 129 | bin/check3d /home/user1/{}/intrinsic.json /home/user1/{}/extrinsic.json /home/user1/{}/video_frames/000000.bmp camera1,camera2,camera3,camera4 130 | ``` 131 | 132 | once the soft launched you get an image from the frame list. 133 | you can click on a point in some views, it will appear as a blue dot. 134 | once you clicked at least two views you get a red dot representing the 135 | projection on the other views. 136 | left click removes the blue dot. 137 | if you clicked wrong, a right click moves the blue dot to the new location. 138 | 139 | - pressing 'n' goes to the next image 140 | - pressing 'p' goes to the previous image 141 | - pressing 'r' resets the points 142 | - pressing 'q' quits the soft 143 | 144 | Perform the bundle adjustment 145 | ----------------------------- 146 | 147 | to perform the bundle adjustment you need to provide annotations in the .pos format 148 | the pos format has one file per frame such as /home/user1/camera1/annotations/000000.pos is the 149 | annotation for the /home/user1/camera1/video_frames/000000.bmp image. 150 | 151 | the pos format is done as such: 152 | ``` 153 | identity_name_1 154 | 0 x_val y_val 155 | 1 x_val y_val 156 | 157 | identity_name_2 158 | 0 x_val y_val 159 | 1 x_val y_val 160 | ``` 161 | 162 | point number 0 corresponds to feet, 1 to head. 163 | point numbers can be sparse and identities don't have to exist on each frame 164 | 165 | once some frames are annotated, run 166 | 167 | ```bash 168 | build_ba [-h] [-f FIXED_POINTS] 169 | intrinsic extrinsic observations cameras output 170 | 171 | -h: help 172 | -f: pattern to known points that must remain at the same place 173 | i.e. points outputed by the -o option in extrinsic calibration (optionnal) 174 | intrinsic: pattern to intrinsic calibrations files 175 | extrinsic: pattern to extrinsic calibrations files 176 | observations: pattern to pos files 177 | cameras: camera list comma separated 178 | output: bundle adjustment problem file 179 | ``` 180 | 181 | ```bash 182 | bin/build_ba -f /home/user1/{}/points.json /home/user1/{}/intrinsic.json /home/user1/{}/extrinsic.json /home/user1/{}/annotations/\*.pos /home/user1/ba_problem.txt 183 | ``` 184 | 185 | then run the bundle adjustment 186 | ```bash 187 | bundle_adjuster --input=input --output=output --intrinsic_adjustment=[fixed, unconstrained, two_pass] 188 | --input: bundle adjustment problem file 189 | --output: bundle adjustment problem file, post processing 190 | --intrinsic_adjustment: type of adjustment to perform 191 | ``` 192 | 193 | there are three types of adjustment 194 | - fixed: the intrinsics are fixed, only perform on the extrinsics 195 | - unconstrained: adjust all parameters 196 | - two_pass: run once fixed and then unconstrained 197 | 198 | using our example, run 199 | ```bash 200 | bin/bundle_adjuster --input=/home/user1/ba_problem.txt --output=/home/user1/processed_ba.txt 201 | ``` 202 | 203 | then we need to convert back the bundle adjustment problem file to camera calibrations 204 | 205 | ```bash 206 | extract_ba [-h] input intrinsic extrinsic cameras 207 | -h: help 208 | input: bundle adjustment problem file 209 | intrinsic: output to intrinsic calibration file 210 | extrinsic: output to extrinsic calibration file 211 | cameras: camera list comma separated 212 | ``` 213 | 214 | In our example it translates to 215 | ```bash 216 | bin/extract_ba /home/user1/processed_ba.txt /home/user1/{}/intrinsic_ba.json /home/user1/{}/extrinsic_ba.json camera1,camera2,camera3,camera4 217 | ``` 218 | 219 | then you may check again the projections in 3d like seen previously 220 | ```bash 221 | bin/check3d /home/user1/{}/intrinsic_ba.json /home/user1/{}/extrinsic_ba.json /home/user1/{}/video_frames/000000.bmp camera1,camera2,camera3,camera4 222 | ``` 223 | -------------------------------------------------------------------------------- /scripts/check_3d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | #################################################################################################### 5 | # Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 6 | # calibration. It also specifically contains a bundle adjustment module to help with the joint 7 | # calibration of the cameras. 8 | # 9 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 10 | # Written by Salim Kayal , 11 | # 12 | # This file is part of Multi-Camera Calibration Suite. 13 | # 14 | # Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 15 | # it under the terms of the GNU General Public License version 3 as 16 | # published by the Free Software Foundation. 17 | # 18 | # Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 | # GNU General Public License for more details. 22 | # 23 | # You should have received a copy of the GNU General Public License 24 | # along with Multi-Camera Calibration Suite. If not, see . 25 | #################################################################################################### 26 | 27 | __author__ = "Salim Kayal" 28 | __copyright__ = "Copyright 2016, Idiap Research Institute" 29 | __version__ = "1.0" 30 | __maintainer__ = "Salim Kayal" 31 | __email__ = "salim.kayal@idiap.ch" 32 | __status__ = "Production" 33 | __license__ = "GPLv3" 34 | 35 | from argparse import ArgumentParser 36 | from glob import iglob 37 | import numpy as np 38 | import cv2 39 | import json 40 | 41 | class CalibrationInterface(object): 42 | 43 | @classmethod 44 | def __init__(self, images, calibrations): 45 | self.wname = '3D projection' 46 | self.calibrations = calibrations 47 | self.camera_matrices = {camera: value['cam_matrix'] 48 | for camera, value in calibrations.iteritems()} 49 | self.distortion_coefficients = {camera: value['distortion_coefficients'] 50 | for camera, value in calibrations.iteritems()} 51 | self.projection_matrices = {camera: self.projection_matrix(value['rvec'], value['tvec']) 52 | for camera, value in calibrations.iteritems()} 53 | self.images = images 54 | self.cameras = images.keys() 55 | self.camera = 0 56 | self.image_points = {} 57 | self.undistorted_points = {} 58 | self.point3d = None 59 | cv2.namedWindow(self.wname) 60 | cv2.setMouseCallback(self.wname, self.get_points) 61 | 62 | @staticmethod 63 | def projection_matrix(rvec, tvec): 64 | rotation_matrix, _ = cv2.Rodrigues(rvec) 65 | projection = np.hstack((rotation_matrix, tvec)) 66 | return projection 67 | 68 | @classmethod 69 | def get_points(self, event, x, y, flags, param): 70 | camera = self.cameras[self.camera] 71 | camera_matrix = self.camera_matrices[camera] 72 | distortion_coefficients = self.distortion_coefficients[camera] 73 | if event == cv2.EVENT_LBUTTONUP: 74 | pt = (x, y) 75 | self.image_points[camera] = pt 76 | pts = np.asarray(((pt,),)).astype(np.float32) 77 | upt = cv2.undistortPoints(pts, camera_matrix, distortion_coefficients, 78 | None, None, camera_matrix)[0, 0] 79 | self.undistorted_points[camera] = upt 80 | elif event == cv2.EVENT_RBUTTONUP: 81 | try: 82 | del self.image_points[camera] 83 | del self.undistorted_points[camera] 84 | except KeyError: 85 | pass 86 | self.refresh() 87 | 88 | @staticmethod 89 | def solve_projections(projections, points): 90 | matrA = np.zeros((3*len(projections), 4)) 91 | for view, proj in enumerate(projections): 92 | x = points[view][0] 93 | y = points[view][1] 94 | for val in range(4): 95 | matrA[view*3+0, val] = x *proj[2,val] - proj[0, val] 96 | matrA[view*3+1, val] = y *proj[2,val] - proj[1, val] 97 | matrA[view*3+2, val] = x *proj[1,val] - y * proj[0, val] 98 | matrW = np.zeros((3*len(projections), 4)) 99 | matrV = np.zeros((4, 4)) 100 | _, _, v = cv2.SVDecomp(matrA) 101 | point3d = v[3, :] 102 | point3d = point3d/point3d[3] 103 | return (point3d[0], point3d[1], point3d[2]) 104 | 105 | @staticmethod 106 | def make_projections(camera_matrices, projection_matrices): 107 | projections = [] 108 | for camera_matrix, projection_matrix in zip(camera_matrices, projection_matrices): 109 | projections.append(np.matrix(np.matrix(camera_matrix)*np.matrix(projection_matrix))) 110 | return projections 111 | 112 | @classmethod 113 | def build_3d_points(self): 114 | cam_matrices = [] 115 | projections = [] 116 | obspoint = [] 117 | if len(self.undistorted_points) >= 2: 118 | for camera, point in self.undistorted_points.iteritems(): 119 | obspoint.append(point) 120 | cam_matrices.append(self.camera_matrices[camera]) 121 | projections.append(self.projection_matrices[camera]) 122 | projs = self.make_projections(cam_matrices, projections) 123 | self.point3d = self.solve_projections(projs, obspoint) 124 | else: 125 | self.point3d = None 126 | 127 | @classmethod 128 | def run(self): 129 | while True: 130 | key = chr(cv2.waitKey(30) & 0xFF) 131 | self.build_3d_points() 132 | if key == 'r': 133 | self.image_points = {} 134 | self.undistorted_points = {} 135 | if key == 'n': 136 | self.camera = (self.camera + 1) % len(self.cameras) 137 | if key == 'p': 138 | self.camera = len(self.cameras) - 1 if self.camera == 0 else self.camera - 1 139 | if key == 'q': 140 | break 141 | self.refresh() 142 | 143 | @classmethod 144 | def refresh(self): 145 | camera = self.cameras[self.camera] 146 | frame = np.copy(self.images[camera]) 147 | impoint = self.image_points.get(camera) 148 | cal = self.calibrations[camera] 149 | if impoint is not None: 150 | cv2.circle(frame, impoint, 3, (255, 0, 0), 2) 151 | elif self.point3d is not None: 152 | opoint = np.array((self.point3d,)) 153 | impoint, _ = cv2.projectPoints(opoint, cal['rvec'], cal['tvec'], cal['cam_matrix'], 154 | cal['distortion_coefficients']) 155 | impoint = tuple(impoint[0, 0].astype(np.int16)) 156 | cv2.circle(frame, impoint, 3, (0, 0, 255), 2) 157 | cv2.imshow(self.wname, frame) 158 | 159 | def parse_args(): 160 | parser = ArgumentParser("options") 161 | parser.add_argument("intrinsic", help="intrinsic calibration pattern") 162 | parser.add_argument("extrinsic", help="extrinsic calibration pattern") 163 | parser.add_argument("images", help="images pattern") 164 | parser.add_argument("cameras", help="list of cameras") 165 | return parser.parse_args() 166 | 167 | def build_calibrations(intrinsic, extrinsic): 168 | calibration = {} 169 | with open(intrinsic, 'r') as icf: 170 | data = json.load(icf) 171 | calibration['cam_matrix'] = np.array(data['intrinsic']) 172 | calibration['distortion_coefficients'] = \ 173 | np.array(data['distortion_coefficients'])\ 174 | .reshape((len(data['distortion_coefficients']), 1)) 175 | with open(extrinsic, 'r') as ecf: 176 | data = json.load(ecf) 177 | calibration['tvec'] = np.array(data['tvec'])\ 178 | .reshape((len(data['tvec']), 1)) 179 | calibration['rvec'] = np.array(data['rvec'])\ 180 | .reshape((len(data['rvec']), 1)) 181 | return calibration 182 | 183 | def read_calibration(cameras, intrinsic, extrinsic): 184 | calibrations = {} 185 | for camera in cameras: 186 | intrinsic_path = intrinsic[0] + camera + intrinsic[1] 187 | extrinsic_path = extrinsic[0] + camera + extrinsic[1] 188 | calibrations[camera] = build_calibrations(intrinsic_path, extrinsic_path) 189 | return calibrations 190 | 191 | def read_images(cameras, images): 192 | return {camera: cv2.imread(images[0]+camera+images[1]) for camera in cameras} 193 | 194 | def read_data(cameras, intrinsic_path, extrinsic_path, images_path): 195 | calibrations = read_calibration(cameras, intrinsic_path, extrinsic_path) 196 | images = read_images(cameras, images_path) 197 | return images, calibrations 198 | 199 | def main(): 200 | opts = parse_args() 201 | cameras = opts.cameras.split(',') 202 | intrinsic_path = opts.intrinsic.split('{}') 203 | extrinsic_path = opts.extrinsic.split('{}') 204 | images_path = opts.images.split('{}') 205 | images, calibrations = read_data(cameras, intrinsic_path, extrinsic_path, images_path) 206 | interface = CalibrationInterface(images, calibrations) 207 | interface.run() 208 | 209 | if __name__ == "__main__": 210 | main() 211 | -------------------------------------------------------------------------------- /scripts/build_ba.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | #################################################################################################### 5 | # Multi-Camera Calibration Suite was built to help with intrinsic and extrinsic multi-camera 6 | # calibration. It also specifically contains a bundle adjustment module to help with the joint 7 | # calibration of the cameras. 8 | # 9 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/ 10 | # Written by Salim Kayal , 11 | # 12 | # This file is part of Multi-Camera Calibration Suite. 13 | # 14 | # Multi-Camera Calibration Suite is free software: you can redistribute it and/or modify 15 | # it under the terms of the GNU General Public License version 3 as 16 | # published by the Free Software Foundation. 17 | # 18 | # Multi-Camera Calibration Suite is distributed in the hope that it will be useful, 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 | # GNU General Public License for more details. 22 | # 23 | # You should have received a copy of the GNU General Public License 24 | # along with Multi-Camera Calibration Suite. If not, see . 25 | #################################################################################################### 26 | 27 | __author__ = "Salim Kayal" 28 | __copyright__ = "Copyright 2016, Idiap Research Institute" 29 | __version__ = "1.0" 30 | __maintainer__ = "Salim Kayal" 31 | __email__ = "salim.kayal@idiap.ch" 32 | __status__ = "Production" 33 | __license__ = "GPLv3" 34 | 35 | from argparse import ArgumentParser 36 | from os import path 37 | from glob import iglob 38 | from collections import defaultdict 39 | import numpy as np 40 | import json 41 | import cv2 42 | 43 | def parse_args(): 44 | parser = ArgumentParser("options") 45 | parser.add_argument("-f", "--fixed-points", help="fixed point file pattern") 46 | parser.add_argument("intrinsic", help="intrinsic calibration pattern") 47 | parser.add_argument("extrinsic", help="extrinsic calibration pattern") 48 | parser.add_argument("observations", help="observation files pattern") 49 | parser.add_argument("cameras", help="list of cameras") 50 | parser.add_argument("output", help="output file") 51 | return parser.parse_args() 52 | 53 | def read_annotation(annotation_path): 54 | annotation_data = {} 55 | with open(annotation_path, 'r') as pf: 56 | field = None 57 | head = None 58 | feet = None 59 | offset = 0 60 | for i, line in enumerate(pf): 61 | if i == offset: 62 | if line.split(" ")[0] == "\n": 63 | offset += 1 64 | continue 65 | if (i-offset)%4 == 0: 66 | field = line 67 | elif (i-offset+1)%4 == 0: 68 | annotation_data[field] = (feet, head) 69 | else: 70 | values = line.split(' ') 71 | if values[0] == '1': 72 | feet = (int(values[1]), int(values[2])) 73 | elif values[0] == '2': 74 | head = (int(values[1]), int(values[2])) 75 | return annotation_data 76 | 77 | def build_calibrations(intrinsic, extrinsic): 78 | calibration = {} 79 | with open(intrinsic, 'r') as icf: 80 | data = json.load(icf) 81 | calibration['cam_matrix'] = np.array(data['intrinsic']) 82 | calibration['distortion_coefficients'] = \ 83 | np.array(data['distortion_coefficients'])\ 84 | .reshape((len(data['distortion_coefficients']), 1)) 85 | with open(extrinsic, 'r') as ecf: 86 | data = json.load(ecf) 87 | calibration['tvec'] = np.array(data['tvec'])\ 88 | .reshape((len(data['tvec']), 1)) 89 | calibration['rvec'] = np.array(data['rvec'])\ 90 | .reshape((len(data['rvec']), 1)) 91 | return calibration 92 | 93 | def build_observations(annotations, camera_matrix, distortion_coefficients): 94 | observations = {} 95 | undistorted_observations = {} 96 | tmp_obs_id = [] 97 | tmp_obs_value = [] 98 | for annotation_path in iglob(annotations): 99 | frame = path.basename(annotation_path).split('.')[0] 100 | annotation_data = read_annotation(annotation_path) 101 | for subject_id, values in annotation_data.iteritems(): 102 | pid = frame+subject_id 103 | tmp_obs_id.append(pid) 104 | tmp_obs_value.append(values[0]) 105 | tmp_obs_value.append(values[1]) 106 | tmp_obs_value = np.array([tmp_obs_value]).astype(np.float32) 107 | tmp_undistorted_values = cv2.undistortPoints(tmp_obs_value, camera_matrix, 108 | distortion_coefficients, None, None, 109 | camera_matrix) \ 110 | if len(tmp_obs_value.shape) == 3 else \ 111 | np.array([]) 112 | for i, pid in enumerate(tmp_obs_id): 113 | observations[pid] = (tmp_obs_value[0, 2*i], tmp_obs_value[0, 2*i+1]) 114 | undistorted_observations[pid] = (tmp_undistorted_values[0, 2*i], 115 | tmp_undistorted_values[0, 2*i+1]) 116 | return observations, undistorted_observations 117 | 118 | def projection_matrix(rvec, tvec): 119 | rotation_matrix, _ = cv2.Rodrigues(rvec) 120 | projection = np.hstack((rotation_matrix, tvec)) 121 | return projection 122 | 123 | def solve_projections(projections, points): 124 | matrA = np.zeros((3*len(projections), 4)) 125 | for view, proj in enumerate(projections): 126 | x = points[view][0] 127 | y = points[view][1] 128 | for val in range(4): 129 | matrA[view*3+0, val] = x *proj[2,val] - proj[0, val] 130 | matrA[view*3+1, val] = y *proj[2,val] - proj[1, val] 131 | matrA[view*3+2, val] = x *proj[1,val] - y * proj[0, val] 132 | matrW = np.zeros((3*len(projections), 4)) 133 | matrV = np.zeros((4, 4)) 134 | _, _, v = cv2.SVDecomp(matrA) 135 | point3d = v[3, :] 136 | point3d = point3d/point3d[3] 137 | return (point3d[0], point3d[1], point3d[2]) 138 | 139 | def make_linear_system(camera_matrices, projection_matrices, points): 140 | projection = None 141 | u_v = None 142 | for point, camera_matrix, projection_matrix \ 143 | in zip(points, camera_matrices, projection_matrices): 144 | if projection is None: 145 | projection = np.matrix(np.matrix(camera_matrix)*np.matrix(projection_matrix)) 146 | u_v = np.matrix([point[0], point[1], 1]).T 147 | else: 148 | projection = np.matrix(np.vstack((projection, 149 | np.matrix(np.matrix(camera_matrix)*np.matrix(projection_matrix))))) 150 | u_v = np.matrix(np.vstack((u_v, np.matrix([point[0], point[1], 1]).T))) 151 | return projection, u_v 152 | 153 | def make_projections(camera_matrices, projection_matrices): 154 | projections = [] 155 | for camera_matrix, projection_matrix in zip(camera_matrices, projection_matrices): 156 | projections.append(np.matrix(np.matrix(camera_matrix)*np.matrix(projection_matrix))) 157 | return projections 158 | 159 | def get_3d_foot(projection, u_v): 160 | p = np.matrix(np.hstack((projection[:, 0:2], projection[:, 3]))) 161 | x_y = np.linalg.solve(p, u_v) 162 | x_y = np.squeeze(np.asarray(x_y)) 163 | return (x_y[0]/x_y[2], x_y[1]/x_y[2], 0) 164 | 165 | def get_3d_head_from_foot(projection, u_v, foot3d): 166 | b = u_v - foot3d[0] * projection[:, 0] - foot3d[1] * projection[:, 1] - projection[:, 3] 167 | z, _, _, _ = np.linalg.lstsq(np.matrix(projection[:, 2]), b) 168 | z = np.squeeze(np.asarray(z)) 169 | return (foot3d[0], foot3d[1], z[()]) 170 | 171 | def build_3d_points(observations, camera_matrices, projection_matrices, outliers=None): 172 | points = {} 173 | for pid, obs in observations.iteritems(): 174 | feet = [] 175 | heads = [] 176 | cam_matrices_f = [] 177 | cam_matrices_h = [] 178 | projections_f = [] 179 | projections_h = [] 180 | obspoint = [feet, heads] 181 | cam_matrix = [cam_matrices_f, cam_matrices_h] 182 | projections = [projections_f, projections_h] 183 | for camera, pts in obs.iteritems(): 184 | outlier = None if outliers is None else outliers.get(pid) 185 | filterlist = None if outlier is None else outlier.get(camera) 186 | for i, pt in enumerate(pts): 187 | if pt is not None and (filterlist is None or i not in filterlist): 188 | obspoint[i].append(pts[i]) 189 | cam_matrix[i].append(camera_matrices[camera]) 190 | projections[i].append(projection_matrices[camera]) 191 | projection_f, u_v_f = make_linear_system(cam_matrices_f, projections_f, feet) 192 | projs_f = make_projections(cam_matrices_f, projections_f) 193 | projection_h, u_v_h = make_linear_system(cam_matrices_h, projections_h, heads) 194 | projs_h = make_projections(cam_matrices_h, projections_h) 195 | head3d = None 196 | foot3d = None 197 | if len(feet) == 1: 198 | print "point " + pid[:-1] + " only in one camera : " + camera 199 | foot3d = get_3d_foot(projection_f, u_v_f) 200 | if len(heads) == 1: 201 | head3d = get_3d_head_from_foot(projection_h, u_v_h, foot3d) 202 | elif len(feet) > 1: 203 | foot3d = solve_projections(projs_f, feet) 204 | if len(heads) > 1: 205 | head3d = solve_projections(projs_h, heads) 206 | points[pid] = (foot3d, head3d) 207 | return points 208 | 209 | def check_reprojection_error(cameras, calibrations, observations, points): 210 | total_diffs = None 211 | fltr = defaultdict(lambda:defaultdict(list)) 212 | for camera in cameras: 213 | calibration = calibrations[camera] 214 | outliers = [] 215 | impoints = [] 216 | objpoints = [] 217 | pids = [] 218 | for pid, point in points.iteritems(): 219 | impoint = observations[pid].get(camera) 220 | if impoint is not None: 221 | for i, (ipt, opt) in enumerate(zip(impoint, point)): 222 | if None not in [ipt, opt]: 223 | pids.append((pid, i)) 224 | impoints.append(ipt) 225 | objpoints.append(opt) 226 | if len(objpoints) > 0: 227 | impoints = np.array(impoints) 228 | objpoints = np.array(objpoints) 229 | rep_impoints, _ = cv2.projectPoints(objpoints, calibration['rvec'], calibration['tvec'], 230 | calibration['cam_matrix'], 231 | calibration['distortion_coefficients']) 232 | rep_impoints = np.squeeze(rep_impoints) 233 | diff = impoints - rep_impoints 234 | euclidean_dist = np.sqrt(diff[:, 0]*diff[:,0] + diff[:,1]*diff[:,1]) 235 | total_diffs = euclidean_dist if total_diffs is None else np.hstack((total_diffs, euclidean_dist)) 236 | mean_reprojection_error = np.mean(euclidean_dist) 237 | indices = np.arange(euclidean_dist.shape[0])[euclidean_dist > 5*np.mean(euclidean_dist)] 238 | for index in indices: 239 | outliers.append(pids[index]) 240 | fltr[pids[index][0]][camera].append(pids[index][1]) 241 | print camera + " mean reprojection error : ", mean_reprojection_error, 242 | print "median :", np.median(euclidean_dist), 243 | mask = np.ones(euclidean_dist.shape).astype(np.bool) 244 | mask[indices] = False 245 | print "filtered mean :", np.mean(euclidean_dist[mask]), 246 | print "filtered median :", np.median(euclidean_dist[mask]) 247 | else: 248 | print camera, ' empty' 249 | print "overall mean reprojection error :", np.mean(total_diffs), "median :", np.median(total_diffs) 250 | return fltr 251 | 252 | def filter_data(observations, outliers): 253 | new_obs = defaultdict(dict) 254 | for pid, obs in observations.iteritems(): 255 | for camera, pts in obs.iteritems(): 256 | outlier = None if outliers is None else outliers.get(pid) 257 | filterlist = None if outlier is None else outlier.get(camera) 258 | foot = None 259 | head = None 260 | if filterlist is None or 0 not in filterlist: 261 | foot = pts[0] 262 | if filterlist is None or 1 not in filterlist: 263 | head = pts[1] 264 | new_obs[pid][camera] = (foot, head) 265 | return new_obs 266 | 267 | def read_fixed_points(cameras, fixed_points_pattern): 268 | fixed_points = defaultdict(dict) 269 | if fixed_points_pattern is not None: 270 | for camera in cameras: 271 | with open(fixed_points_pattern[0] + camera + fixed_points_pattern[1], 'r') as fpf: 272 | data = json.load(fpf) 273 | for object_point, image_point in zip(data['object_points'], data['image_points']): 274 | fixed_points[tuple(object_point)][camera] = tuple(image_point) 275 | return fixed_points 276 | 277 | def read_calibration(cameras, intrinsic, extrinsic): 278 | calibrations = {} 279 | for camera in cameras: 280 | intrinsic_path = intrinsic[0] + camera + intrinsic[1] 281 | extrinsic_path = extrinsic[0] + camera + extrinsic[1] 282 | calibrations[camera] = build_calibrations(intrinsic_path, extrinsic_path) 283 | return calibrations 284 | 285 | def read_observations(cameras, calibrations, annotations): 286 | observations = defaultdict(dict) 287 | undistorted_observations = defaultdict(dict) 288 | for camera in cameras: 289 | annotations_path = annotations[0] + camera + annotations[1] 290 | tmp_observations, tmp_undistorted_observations = \ 291 | build_observations(annotations_path, calibrations[camera]['cam_matrix'], 292 | calibrations[camera]['distortion_coefficients']) 293 | for pid, value in tmp_observations.iteritems(): 294 | observations[pid][camera] = value 295 | undistorted_observations[pid][camera] = tmp_undistorted_observations[pid] 296 | return observations, undistorted_observations 297 | 298 | def read_data(cameras, intrinsic, extrinsic, annotations, fixed_points_pattern): 299 | fixed_points = read_fixed_points(cameras, fixed_points_pattern) 300 | calibrations = read_calibration(cameras, intrinsic, extrinsic) 301 | observations, undistorted_observations = read_observations(cameras, calibrations, annotations) 302 | camera_matrices = {camera: value['cam_matrix'] for camera, value in calibrations.iteritems()} 303 | projection_matrices = {camera: projection_matrix(value['rvec'], value['tvec']) 304 | for camera, value in calibrations.iteritems()} 305 | points = build_3d_points(undistorted_observations, camera_matrices, projection_matrices) 306 | outliers = check_reprojection_error(cameras, calibrations, observations, points) 307 | observations = filter_data(observations, outliers) 308 | undistorted_observations = filter_data(undistorted_observations, outliers) 309 | points = build_3d_points(undistorted_observations, camera_matrices, projection_matrices, outliers) 310 | check_reprojection_error(cameras, calibrations, observations, points) 311 | return calibrations, observations, points, fixed_points 312 | 313 | def write_ba_problem(cameras, observations, calibrations, points, fixed_points, output_path): 314 | pids = [(key, i) for key, value in points.iteritems() 315 | for i, val in enumerate(value) 316 | if val is not None] 317 | fpids = fixed_points.keys() 318 | n_obs = sum(1 for key, idx in pids 319 | for value in observations[key].itervalues() 320 | if value[idx] is not None) 321 | n_fobs = sum(1 for projs in fixed_points.itervalues() for proj in projs.itervalues()) 322 | with open(output_path, 'w') as output: 323 | output.write("{} {} {} {} {}\n".format(len(cameras), len(pids), len(fpids), n_obs, n_fobs)) 324 | for i_cam, camera in enumerate(cameras): 325 | for i_pid, pid in enumerate(pids): 326 | cam_obs = observations[pid[0]].get(camera) 327 | if cam_obs is not None: 328 | obs = cam_obs[pid[1]] 329 | if obs is not None: 330 | output.write("{} {} {} {}\n"\ 331 | .format(i_cam, i_pid, obs[0], obs[1])) 332 | for i_cam, camera in enumerate(cameras): 333 | for i_pid, fpid in enumerate(fpids): 334 | cam_obs = fixed_points[fpid].get(camera) 335 | if cam_obs is not None: 336 | if cam_obs is not None: 337 | output.write("{} {} {} {}\n"\ 338 | .format(i_cam, i_pid, cam_obs[0], cam_obs[1])) 339 | for camera in cameras: 340 | cal = calibrations[camera] 341 | for val in cal['rvec'].squeeze(): 342 | output.write("{}\n".format(val)) 343 | for val in cal['tvec'].squeeze(): 344 | output.write("{}\n".format(val)) 345 | cam_matrix = cal['cam_matrix'].squeeze() 346 | distortion_coefficients = cal['distortion_coefficients'].squeeze() 347 | output.write("{}\n".format(cam_matrix[0, 0])) 348 | output.write("{}\n".format(cam_matrix[1, 1])) 349 | output.write("{}\n".format(cam_matrix[0, 2])) 350 | output.write("{}\n".format(cam_matrix[1, 2])) 351 | output.write("{}\n".format(distortion_coefficients[0])) 352 | output.write("{}\n".format(distortion_coefficients[1])) 353 | output.write("{}\n".format(distortion_coefficients[4])) 354 | output.write("{}\n".format(distortion_coefficients[2])) 355 | output.write("{}\n".format(distortion_coefficients[3])) 356 | for pid in pids: 357 | point = points[pid[0]][pid[1]] 358 | for val in point: 359 | output.write("{}\n".format(val)) 360 | for fpid in fpids: 361 | for val in fpid: 362 | output.write("{}\n".format(val)) 363 | 364 | def main(): 365 | opts = parse_args() 366 | cameras = opts.cameras.split(',') 367 | fixed_points_pattern = None if opts.fixed_points is None else opts.fixed_points.split('{}') 368 | intrinsic = opts.intrinsic.split('{}') 369 | extrinsic = opts.extrinsic.split('{}') 370 | annotations = opts.observations.split('{}') 371 | calibrations, observations, points, fixed_points = read_data(cameras, intrinsic, extrinsic, 372 | annotations, fixed_points_pattern) 373 | write_ba_problem(cameras, observations, calibrations, points, fixed_points, opts.output) 374 | 375 | if __name__ == "__main__": 376 | main() 377 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------