├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .gitlab-ci.yml ├── CMakeLists.txt ├── LICENSE ├── README.md ├── img ├── deformation_thumbnail.png ├── manifold_thumbnail.png ├── parametrization_thumbnail.png ├── polycurl_reduction_thumbnail.png ├── quad_planarization_thumbnail.png ├── tinyad_logo.png ├── tinyad_logo.svg ├── tinyad_logo_640.png ├── tinyad_square.png ├── tinyad_square.svg ├── tinyad_square_250.png └── tinyad_square_256.png ├── include └── TinyAD │ ├── Detail │ ├── EigenVectorTypedefs.hh │ ├── Element.hh │ ├── EvalSettings.hh │ ├── Parallel.hh │ ├── ScalarFunctionImpl.hh │ ├── ScalarObjectiveTerm.hh │ ├── VectorFunctionImpl.hh │ └── VectorObjectiveTerm.hh │ ├── Operations │ └── SVD.hh │ ├── Scalar.hh │ ├── ScalarFunction.hh │ ├── Support │ ├── Common.hh │ ├── GeometryCentral.hh │ ├── OpenMesh.hh │ ├── PMP.hh │ └── Polymesh.hh │ ├── Utils │ ├── GaussNewtonDirection.hh │ ├── Helpers.hh │ ├── HessianProjection.hh │ ├── LineSearch.hh │ ├── LinearSolver.hh │ ├── NewtonDecrement.hh │ ├── NewtonDirection.hh │ ├── Out.hh │ ├── Timer.hh │ └── ToPassive.hh │ └── VectorFunction.hh ├── scripts └── compare_build_time.bat └── tests ├── ComplexTest.cc ├── CustomDerivativesTest.cc ├── DeferredLambdaTest.cc ├── DynamicElementsTest.cc ├── EigenTest.cc ├── ElementTest.cc ├── ExceptionTest.cc ├── GaussNewtonTest.cc ├── HandleTypeTest.cc ├── Meshes.hh ├── NewtonTest.cc ├── OpenMPTest.cc ├── PerformanceTest.cc ├── SVDTest.cc ├── ScalarFunctionTest.cc ├── ScalarTestBinaryOperators.cc ├── ScalarTestComparison.cc ├── ScalarTestConstructors.cc ├── ScalarTestHessianBlock.cc ├── ScalarTestMisc.cc ├── ScalarTestUnaryOperators.cc ├── SwitchSolverTest.cc └── VectorFunctionTest.cc /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [push] 3 | jobs: 4 | Ubuntu: 5 | strategy: 6 | fail-fast: false 7 | matrix: 8 | build_type: [Release, Debug] 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Install Dependencies 12 | run: | 13 | sudo apt-get update 14 | sudo apt-get install libeigen3-dev 15 | - name: Check out TinyAD 16 | uses: actions/checkout@v3 17 | - name: Configure 18 | run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DTINYAD_UNIT_TESTS=ON . 19 | - name: Build 20 | run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} --parallel $(nproc) 21 | - name: Run Tests 22 | run: ${{ github.workspace }}/build/TinyAD-Tests --gtest_output="xml:test-report.xml" 23 | - name: Create Test Report 24 | if: always() 25 | uses: test-summary/action@v2 26 | with: 27 | paths: test-report.xml 28 | 29 | macOS: 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | build_type: [Release, Debug] 34 | runs-on: macos-latest 35 | steps: 36 | - name: Install Dependencies 37 | run: | 38 | brew update 39 | brew install eigen libomp 40 | echo "LDFLAGS=-L/opt/homebrew/opt/libomp/lib" >> $GITHUB_ENV 41 | echo "CPPFLAGS=-I/opt/homebrew/opt/libomp/include" >> $GITHUB_ENV 42 | - name: Check out TinyAD 43 | uses: actions/checkout@v3 44 | - name: Configure 45 | run: cmake -B ${{ github.workspace }}/build -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DTINYAD_UNIT_TESTS=ON . 46 | - name: Build 47 | run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} --parallel $(sysctl -n hw.ncpu) 48 | - name: Run Tests 49 | run: ${{ github.workspace }}/build/TinyAD-Tests --gtest_output="xml:test-report.xml" 50 | - name: Create Test Report 51 | if: always() 52 | uses: test-summary/action@v2 53 | with: 54 | paths: test-report.xml 55 | 56 | Windows: 57 | strategy: 58 | fail-fast: false 59 | matrix: 60 | vs_version: [VS19, VS22] 61 | build_type: [Release, Debug] 62 | include: 63 | - vs_version: VS19 64 | os: windows-2019 65 | - vs_version: VS22 66 | os: windows-2022 67 | runs-on: ${{ matrix.os }} 68 | env: 69 | BUILD_TYPE: ${{ matrix.build_type }} 70 | steps: 71 | - name: Install Dependencies 72 | run: | 73 | C:\vcpkg\bootstrap-vcpkg.bat 74 | vcpkg install eigen3:x64-windows 75 | - name: Check out TinyAD 76 | uses: actions/checkout@v3 77 | - name: Configure 78 | run: cmake -B ${{ github.workspace }}/build -DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} -DTINYAD_UNIT_TESTS=ON . 79 | - name: Build 80 | run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} --parallel 81 | - name: Run Tests 82 | run: ${{ github.workspace }}\build\${{ env.BUILD_TYPE }}\TinyAD-Tests.exe --gtest_output="xml:test-report.xml" 83 | - name: Create Test Report 84 | if: always() 85 | uses: test-summary/action@v2 86 | with: 87 | paths: test-report.xml 88 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .vs 3 | .vscode 4 | build 5 | build-*/ 6 | build_times.csv 7 | CMakeSettings.json 8 | CMakeLists.txt.user 9 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | LINUX_GTEST_ROOT: "~/sw/gtest-1.10.0" 3 | APPLE_CMAKE: "/opt/local/bin/cmake" 4 | APPLE_GTEST_ROOT: "~/sw/gtest-1.7.0/" 5 | WINDOWS_EIGEN3_DIR: "e:\\libs\\general\\Eigen3.3.9\\share\\eigen3\\cmake" 6 | VS2017_GTEST_ROOT: "e:\\libs\\VS2017\\x64\\gtest-1.7.0" 7 | VS2017_GENERATOR: "Visual Studio 15 Win64" 8 | VS2019_GTEST_ROOT: "e:\\libs\\VS2019\\x64\\gtest-1.10.0" 9 | VS2019_GENERATOR: "Visual Studio 16 2019" 10 | 11 | stages: 12 | - build 13 | - test 14 | 15 | # ##################################### 16 | # Linux Debug 17 | # ##################################### 18 | 19 | build-linux-debug: 20 | stage: build 21 | tags: 22 | - Linux 23 | script: 24 | - echo "Hello, $GITLAB_USER_LOGIN!" 25 | - echo "On branch $CI_COMMIT_BRANCH!" 26 | - mkdir build 27 | - cd build 28 | - cmake -DGTEST_ROOT=$LINUX_GTEST_ROOT -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_FLAGS="-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC" .. 29 | - make -j $(nproc) 30 | artifacts: 31 | paths: 32 | - build 33 | expire_in: 1 hour 34 | 35 | test-linux-debug: 36 | stage: test 37 | tags: 38 | - Linux 39 | dependencies: [build-linux-debug] 40 | needs: [build-linux-debug] 41 | script: 42 | - cd build 43 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 44 | artifacts: 45 | when: always 46 | reports: 47 | junit: build/report.xml 48 | 49 | # ##################################### 50 | # Linux Release 51 | # ##################################### 52 | 53 | build-linux-release: 54 | stage: build 55 | tags: 56 | - Linux 57 | script: 58 | - echo "Hello, $GITLAB_USER_LOGIN!" 59 | - echo "On branch $CI_COMMIT_BRANCH!" 60 | - mkdir build 61 | - cd build 62 | - cmake -DGTEST_ROOT=$LINUX_GTEST_ROOT -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Release .. 63 | - make -j $(nproc) 64 | artifacts: 65 | paths: 66 | - build 67 | expire_in: 1 hour 68 | 69 | test-linux-release: 70 | stage: test 71 | tags: 72 | - Linux 73 | dependencies: [build-linux-release] 74 | needs: [build-linux-release] 75 | script: 76 | - cd build 77 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 78 | artifacts: 79 | when: always 80 | reports: 81 | junit: build/report.xml 82 | 83 | # ##################################### 84 | # Apple Debug 85 | # ##################################### 86 | 87 | build-apple-debug: 88 | stage: build 89 | tags: 90 | - Apple 91 | script: 92 | - echo "Hello, $GITLAB_USER_LOGIN!" 93 | - echo "On branch $CI_COMMIT_BRANCH!" 94 | - mkdir build 95 | - cd build 96 | - $APPLE_CMAKE -DGTEST_ROOT=$APPLE_GTEST_ROOT -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Debug .. 97 | - make -j4 98 | artifacts: 99 | paths: 100 | - build 101 | expire_in: 1 hour 102 | 103 | test-apple-debug: 104 | stage: test 105 | tags: 106 | - Apple 107 | dependencies: [build-apple-debug] 108 | needs: [build-apple-debug] 109 | script: 110 | - cd build 111 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 112 | artifacts: 113 | when: always 114 | reports: 115 | junit: build/report.xml 116 | 117 | # ##################################### 118 | # Apple Release 119 | # ##################################### 120 | 121 | build-apple-release: 122 | stage: build 123 | tags: 124 | - Apple 125 | script: 126 | - echo "Hello, $GITLAB_USER_LOGIN!" 127 | - echo "On branch $CI_COMMIT_BRANCH!" 128 | - mkdir build 129 | - cd build 130 | - $APPLE_CMAKE -DGTEST_ROOT=$APPLE_GTEST_ROOT -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Release .. 131 | - make -j4 132 | artifacts: 133 | paths: 134 | - build 135 | expire_in: 1 hour 136 | 137 | test-apple-release: 138 | stage: test 139 | tags: 140 | - Apple 141 | dependencies: [build-apple-release] 142 | needs: [build-apple-release] 143 | script: 144 | - cd build 145 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 146 | artifacts: 147 | when: always 148 | reports: 149 | junit: build/report.xml 150 | 151 | # ##################################### 152 | # Apple M1 Debug 153 | # ##################################### 154 | 155 | build-apple-m1-debug: 156 | stage: build 157 | tags: 158 | - AppleM1 159 | script: 160 | - echo "Hello, $GITLAB_USER_LOGIN!" 161 | - echo "On branch $CI_COMMIT_BRANCH!" 162 | - mkdir build 163 | - cd build 164 | - cmake -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Debug .. 165 | - make -j4 166 | artifacts: 167 | paths: 168 | - build 169 | expire_in: 1 hour 170 | 171 | test-apple-m1-debug: 172 | stage: test 173 | tags: 174 | - AppleM1 175 | dependencies: [build-apple-m1-debug] 176 | needs: [build-apple-m1-debug] 177 | script: 178 | - cd build 179 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 180 | artifacts: 181 | when: always 182 | reports: 183 | junit: build/report.xml 184 | 185 | # ##################################### 186 | # Apple M1 Release 187 | # ##################################### 188 | 189 | build-apple-m1-release: 190 | stage: build 191 | tags: 192 | - AppleM1 193 | script: 194 | - echo "Hello, $GITLAB_USER_LOGIN!" 195 | - echo "On branch $CI_COMMIT_BRANCH!" 196 | - mkdir build 197 | - cd build 198 | - cmake -DTINYAD_UNIT_TESTS=ON -DCMAKE_BUILD_TYPE=Release .. 199 | - make -j4 200 | artifacts: 201 | paths: 202 | - build 203 | expire_in: 1 hour 204 | 205 | test-apple-m1-release: 206 | stage: test 207 | tags: 208 | - AppleM1 209 | dependencies: [build-apple-m1-release] 210 | needs: [build-apple-m1-release] 211 | script: 212 | - cd build 213 | - ./TinyAD-Tests --gtest_output="xml:report.xml" 214 | artifacts: 215 | when: always 216 | reports: 217 | junit: build/report.xml 218 | 219 | # ##################################### 220 | # VS2017 Debug 221 | # ##################################### 222 | 223 | build-VS2017-debug: 224 | stage: build 225 | tags: 226 | - VS2017 227 | script: 228 | - echo "Hello, $GITLAB_USER_LOGIN!" 229 | - echo "On branch $CI_COMMIT_BRANCH!" 230 | - mkdir build 231 | - cd build 232 | - C:\"Program Files"\CMake\bin\cmake.exe -G "$VS2017_GENERATOR" -DEigen3_DIR="$WINDOWS_EIGEN3_DIR" -DGTEST_ROOT="$VS2017_GTEST_ROOT" -DTINYAD_UNIT_TESTS=ON .. 233 | - C:\"Program Files (x86)"\"Microsoft Visual Studio"\2017\Professional\Common7\IDE\devenv.com /Build "Debug" TinyAD.sln /Project "ALL_BUILD" 234 | artifacts: 235 | paths: 236 | - build 237 | expire_in: 1 hour 238 | 239 | test-VS2017-debug: 240 | stage: test 241 | tags: 242 | - VS2017 243 | dependencies: [build-VS2017-debug] 244 | needs: [build-VS2017-debug] 245 | script: 246 | - cd "build\Debug" 247 | - .\TinyAD-Tests.exe --gtest_output="xml:report.xml" 248 | artifacts: 249 | when: always 250 | reports: 251 | junit: build/Debug/report.xml 252 | 253 | # ##################################### 254 | # VS2017 Release 255 | # ##################################### 256 | 257 | build-VS2017-release: 258 | stage: build 259 | tags: 260 | - VS2017 261 | script: 262 | - echo "Hello, $GITLAB_USER_LOGIN!" 263 | - echo "On branch $CI_COMMIT_BRANCH!" 264 | - mkdir build 265 | - cd build 266 | - C:\"Program Files"\CMake\bin\cmake.exe -G "$VS2017_GENERATOR" -DEigen3_DIR="$WINDOWS_EIGEN3_DIR" -DGTEST_ROOT="$VS2017_GTEST_ROOT" -DTINYAD_UNIT_TESTS=ON .. 267 | - C:\"Program Files (x86)"\"Microsoft Visual Studio"\2017\Professional\Common7\IDE\devenv.com /Build "Release" TinyAD.sln /Project "ALL_BUILD" 268 | artifacts: 269 | paths: 270 | - build 271 | expire_in: 1 hour 272 | 273 | test-VS2017-release: 274 | stage: test 275 | tags: 276 | - VS2017 277 | dependencies: [build-VS2017-release] 278 | needs: [build-VS2017-release] 279 | script: 280 | - cd "build\Release" 281 | - .\TinyAD-Tests.exe --gtest_output="xml:report.xml" 282 | artifacts: 283 | when: always 284 | reports: 285 | junit: build/Release/report.xml 286 | 287 | # ##################################### 288 | # VS2019 Debug 289 | # ##################################### 290 | 291 | build-VS2019-debug: 292 | stage: build 293 | tags: 294 | - VS2019 295 | - Qt5141 # Workaround to avoid runner "bajer" which doesn't have gtest in the expected folder. 296 | script: 297 | - echo "Hello, $GITLAB_USER_LOGIN!" 298 | - echo "On branch $CI_COMMIT_BRANCH!" 299 | - mkdir build 300 | - cd build 301 | - C:\"Program Files"\CMake\bin\cmake.exe -G "$VS2019_GENERATOR" -DCMAKE_GENERATOR_PLATFORM=x64 -DEigen3_DIR="$WINDOWS_EIGEN3_DIR" -DGTEST_ROOT="$VS2019_GTEST_ROOT" -DTINYAD_UNIT_TESTS=ON .. 302 | - C:\"Program Files (x86)"\"Microsoft Visual Studio"\2019\Professional\Common7\IDE\devenv.com /Build "Debug" TinyAD.sln /Project "ALL_BUILD" 303 | artifacts: 304 | paths: 305 | - build 306 | expire_in: 1 hour 307 | 308 | test-VS2019-debug: 309 | stage: test 310 | tags: 311 | - VS2019 312 | dependencies: [build-VS2019-debug] 313 | needs: [build-VS2019-debug] 314 | script: 315 | - cd "build\Debug" 316 | - .\TinyAD-Tests.exe --gtest_output="xml:report.xml" 317 | artifacts: 318 | when: always 319 | reports: 320 | junit: build/Debug/report.xml 321 | 322 | # ##################################### 323 | # VS2019 Release 324 | # ##################################### 325 | 326 | build-VS2019-release: 327 | stage: build 328 | tags: 329 | - VS2019 330 | - Qt5141 # Workaround to avoid runner "bajer" which doesn't have gtest in the expected folder. 331 | script: 332 | - echo "Hello, $GITLAB_USER_LOGIN!" 333 | - echo "On branch $CI_COMMIT_BRANCH!" 334 | - mkdir build 335 | - cd build 336 | - C:\"Program Files"\CMake\bin\cmake.exe -G "$VS2019_GENERATOR" -DCMAKE_GENERATOR_PLATFORM=x64 -DEigen3_DIR="$WINDOWS_EIGEN3_DIR" -DGTEST_ROOT="$VS2019_GTEST_ROOT" -DTINYAD_UNIT_TESTS=ON .. 337 | - C:\"Program Files (x86)"\"Microsoft Visual Studio"\2019\Professional\Common7\IDE\devenv.com /Build "Release" TinyAD.sln /Project "ALL_BUILD" 338 | artifacts: 339 | paths: 340 | - build 341 | expire_in: 1 hour 342 | 343 | test-VS2019-release: 344 | stage: test 345 | tags: 346 | - VS2019 347 | dependencies: [build-VS2019-release] 348 | needs: [build-VS2019-release] 349 | script: 350 | - cd "build\Release" 351 | - .\TinyAD-Tests.exe --gtest_output="xml:report.xml" 352 | artifacts: 353 | when: always 354 | reports: 355 | junit: build/Release/report.xml 356 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # This file is part of TinyAD and released under the MIT license. 2 | # Author: Patrick Schmidt 3 | 4 | cmake_minimum_required(VERSION 3.10) 5 | 6 | project(TinyAD) 7 | 8 | # Set c++ version 9 | set(CMAKE_CXX_STANDARD 17) 10 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 11 | 12 | # Find Eigen if not already present 13 | if (NOT TARGET Eigen3::Eigen) 14 | find_package(Eigen3 REQUIRED) 15 | endif() 16 | 17 | # Find OpenMP (optional) 18 | if (NOT TARGET OpenMP::OpenMP_CXX) 19 | if(APPLE) 20 | # Homebrew installs libomp here (keg-only) 21 | set(OpenMP_C_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include") 22 | set(OpenMP_C_LIB_NAMES "omp") 23 | set(OpenMP_CXX_FLAGS "-Xpreprocessor -fopenmp -I/opt/homebrew/opt/libomp/include") 24 | set(OpenMP_CXX_LIB_NAMES "omp") 25 | set(OpenMP_omp_LIBRARY /opt/homebrew/opt/libomp/lib/libomp.dylib) 26 | endif() 27 | 28 | find_package(OpenMP) 29 | endif() 30 | 31 | if (NOT TARGET OpenMP::OpenMP_CXX) 32 | message("WARNING: Building TinyAD without OpenMP!") 33 | endif() 34 | 35 | # Output build type 36 | message("Building TinyAD in " ${CMAKE_BUILD_TYPE} " mode") 37 | 38 | # Create library target 39 | file(GLOB_RECURSE TINYAD_HEADERS include/*.hh) 40 | add_library(TinyAD INTERFACE) 41 | #target_sources(TinyAD INTERFACE ${TINYAD_HEADERS}) # IDE shows TinyAD headers for each target 42 | add_custom_target(TinyAD-Headers SOURCES ${TINYAD_HEADERS}) # IDE shows TinyAD headers in one place only 43 | target_include_directories(TinyAD INTERFACE include) 44 | target_compile_options(TinyAD INTERFACE $<$:/bigobj>) 45 | target_link_libraries(TinyAD INTERFACE Eigen3::Eigen) 46 | if (TARGET OpenMP::OpenMP_CXX) 47 | target_link_libraries(TinyAD INTERFACE OpenMP::OpenMP_CXX) 48 | endif() 49 | 50 | # Create unit tests target 51 | option(TINYAD_UNIT_TESTS "" OFF) 52 | if (${TINYAD_UNIT_TESTS}) 53 | enable_testing() 54 | 55 | message("Building TinyAD with unit tests.") 56 | 57 | if (TARGET GTest::gtest_main) 58 | message("Found existing gtest target.") 59 | else() 60 | message("Downloading googletest.") 61 | 62 | # Avoid DOWNLOAD_EXTRACT_TIMESTAMP warning 63 | if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") 64 | cmake_policy(SET CMP0135 NEW) 65 | endif() 66 | 67 | # From https://google.github.io/googletest/quickstart-cmake.html 68 | include(FetchContent) 69 | FetchContent_Declare( 70 | googletest 71 | URL https://github.com/google/googletest/archive/6910c9d9165801d8827d628cb72eb7ea9dd538c5.zip 72 | ) 73 | # For Windows: Prevent overriding the parent project's compiler/linker settings 74 | set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) 75 | FetchContent_MakeAvailable(googletest) 76 | endif() 77 | 78 | file(GLOB_RECURSE TINYAD_TEST_HEADERS tests/*.hh) 79 | file(GLOB_RECURSE TINYAD_TEST_SOURCES tests/*.cc) 80 | add_executable(TinyAD-Tests ${TINYAD_TEST_HEADERS} ${TINYAD_TEST_SOURCES}) 81 | add_dependencies(TinyAD-Tests TinyAD) 82 | target_include_directories(TinyAD-Tests PUBLIC "include") 83 | target_link_libraries(TinyAD-Tests PRIVATE 84 | GTest::gtest_main 85 | TinyAD 86 | ) 87 | add_test(TinyAD-Tests TinyAD-Tests) 88 | endif() 89 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Patrick Schmidt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![](img/tinyad_logo.png) 2 | 3 | # TinyAD 4 | 5 | TinyAD is a **C++ header-only** library for **second-order automatic differentiation**. Small dense problems are differentiated in forward mode, which allows unrestricted looping and branching. An interface for per-element functions allows convenient differentiation of large sparse problems, which are typical in geometry processing on meshes. For more details see our [paper](https://graphics.rwth-aachen.de/media/papers/341/TinyAD.pdf) or watch our [talk](https://youtu.be/FGG07HoVFEk). 6 | 7 | # Integration 8 | TinyAD has been tested on Linux, Mac, and Windows (VS >= 2017). It only requires: 9 | 10 | * A C++17 compiler 11 | * Eigen (e.g. `sudo apt-get install libeigen3-dev`) 12 | 13 | To use TinyAD in your existing project, include either `TinyAD/Scalar.hh`, `TinyAD/ScalarFunction.hh`, or `TinyAD/VectorFunction.hh`. 14 | 15 | A minimal example project using TinyAD with [libigl](https://github.com/libigl/libigl/) is available [here](https://github.com/alecjacobson/libigl-tinyad-example). 16 | 17 | # Basic Usage 18 | We provide the scalar type `TinyAD::Double` as a drop-in replacement for `double`. For small problems, simply choose the number of variables `k` and generate a vector of active variables. Then, perform computations as usual (e.g. using Eigen) and query the gradient and Hessian of any intermediate variable: 19 | ```c++ 20 | #include 21 | 22 | // Choose autodiff scalar type for 3 variables 23 | using ADouble = TinyAD::Double<3>; 24 | 25 | // Init a 3D vector of active variables and a 3D vector of passive variables 26 | Eigen::Vector3 x = ADouble::make_active({0.0, -1.0, 1.0}); 27 | Eigen::Vector3 y(2.0, 3.0, 5.0); 28 | 29 | // Compute angle using Eigen functions and retrieve gradient and Hessian w.r.t. x 30 | ADouble angle = acos(x.dot(y) / (x.norm() * y.norm())); 31 | Eigen::Vector3d g = angle.grad; 32 | Eigen::Matrix3d H = angle.Hess; 33 | ``` 34 | All derivative computations are inlined and thus available for compiler optimization. As no taping is needed in forward mode, any kind of run time branching is possible. 35 | 36 | # Sparse Interface 37 | Sparse problems on meshes can be implemented using our `ScalarFunction` or `VectorFunction` interfaces. Just pass a set of _variable handles_, a set of _element handles_, and a lambda function to be evaluated for each element. For example, in a planar parametrization problem, the variables are 2D positions per vertex, and the summands of the objective function are defined per face, each accessing 3 vertices: 38 | ```c++ 39 | #include 40 | 41 | // Set up a function with 2D vertex positions as variables 42 | auto func = TinyAD::scalar_function<2>(mesh.vertices()); 43 | 44 | // Add an objective term per triangle. Each connecting 3 vertices 45 | func.add_elements<3>(mesh.faces(), [&] (auto& element) 46 | { 47 | // Element is evaluated with either double or TinyAD::Double<6> 48 | using T = TINYAD_SCALAR_TYPE(element); 49 | 50 | // Get variable 2D vertex positions of triangle t 51 | OpenMesh::SmartFaceHandle t = element.handle; 52 | Eigen::Vector2 a = element.variables(t.halfedge().to()); 53 | Eigen::Vector2 b = element.variables(t.halfedge().next().to()); 54 | Eigen::Vector2 c = element.variables(t.halfedge().from()); 55 | 56 | return ... 57 | }); 58 | 59 | // Evaluate the funcion using any of these methods: 60 | double f = func.eval(x); 61 | auto [f, g] = func.eval_with_gradient(x); 62 | auto [f, g, H] = func.eval_with_derivatives(x); 63 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 64 | ... 65 | ``` 66 | 67 | Handle types from multiple mesh data structures are supported, e.g., OpenMesh, polymesh, geometry-central, or libigl-style matrices. Support for new types can be added by overloading a single function (see [`TinyAD/Support/Common.hh`](include/TinyAD/Support/Common.hh)). 68 | 69 | # Examples 70 | To get started, take a look at one of our [TinyAD-Examples](https://github.com/patr-schm/TinyAD-Examples). 71 | We implement objective functions and basic solvers for typical geometry processing tasks using various mesh libraries. 72 | 73 | ## Surface Mesh Parametrization 74 | ![](img/parametrization_thumbnail.png) 75 | 76 | We compute a piecewise linear map from a disk-topology triangle mesh to the plane and optimize the symmetric Dirichlet energy via a Projected-Newton solver. 77 | This can be the basis to experiment with more specialized algorithms or more complex objective functions. 78 | We provide examples using different mesh representations: 79 | 80 | [`parametrization_openmesh.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/parametrization_openmesh.cc) 81 | [`parametrization_polymesh.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/parametrization_polymesh.cc) 82 | [`parametrization_geometrycentral.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/parametrization_geometrycentral.cc) 83 | [`parametrization_libigl.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/parametrization_libigl.cc) 84 | 85 | ## Volume Mesh Deformation 86 | ![](img/deformation_thumbnail.png) 87 | 88 | In this example, we compute a 3D deformation of a tetrahedral mesh by optimizing different distortion energies subject to position constraints: 89 | 90 | [`deformation.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/deformation.cc) 91 | 92 | ## Frame Field Optimization 93 | ![](img/polycurl_reduction_thumbnail.png) 94 | 95 | Here, we show how to re-implement the non-linear frame field optimization algorithm presented in [Integrable PolyVector Fields [Diamanti et al. 2015]](https://igl.ethz.ch/projects/integrable/), using very little code. 96 | Given an input frame field (two tangent vectors per triangle), the algorithm optimizes an objective based on complex polynomials via a Gauss-Newton method: 97 | 98 | [`polycurl_reduction.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/polycurl_reduction.cc) 99 | 100 | ## Manifold Optimization 101 | ![](img/manifold_thumbnail.png) 102 | 103 | We optimize a map from a genus 0 surface to the sphere using a technique from manifold optimization. Vertex trajectories on the sphere are parametrized via tangent vectors and a retraction operator: 104 | 105 | [`manifold_optimization.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/manifold_optimization.cc) 106 | 107 | ## Quad Mesh Planarization 108 | ![](img/quad_planarization_thumbnail.png) 109 | 110 | In this example, we optimize the 3D vertex positions of a quad mesh for face planarity. 111 | We implement one of the objective terms from [Geometric Modeling with Conical Meshes and Developable Surfaces [Liu 2006]](http://www.geometrie.tugraz.at/wallner/quad06.pdf): 112 | 113 | [`quad_planarization.cc`](https://github.com/patr-schm/TinyAD-Examples/blob/main/apps/quad_planarization.cc) 114 | 115 | 116 | # Advanced Usage and Common Pitfalls 117 | * Internal floating point types other than `double` can be used via `TinyAD::Scalar`. 118 | * A gradient-only mode is availabe via `TinyAD::Scalar`. 119 | * Use `to_passive(...)` to explicitly cast an active variable back to its scalar type without derivatives. E.g. to implement assertions or branching which should not be differentiated. 120 | * [Avoid using the `auto` keyword](https://eigen.tuxfamily.org/dox/TopicPitfalls.html#TopicPitfalls_auto_keyword) when working with Eigen expressions. This is a limitation of Eigen and can produce unexpected results due to deleted temporary objects. 121 | * Use e.g. `cos(...)` instead of `std::cos(...)`. 122 | * A common source for errors in the implementation of objective functions (per-element lambdas passed to `func.add_elements(...)`) are multiple return statements of different types. This may lead to a compiler error, but can be prevented by explicitly stating the correct return type via: 123 | `func.add_elements<...>(..., [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) { return ... });` 124 | * Note that calls to math functions involving TinyAD types are only legal if the derivatives exist and are finite for the given function argument. E.g. it is illegal to call `acos(x)` with `x==1.0` since the derivative of acos is unbounded at 1.0. 125 | 126 | # Unit Tests 127 | When contributing to TinyAD, please run and extend the unit tests located in [`TinyAD/tests`](https://github.com/patr-schm/TinyAD/tree/main/tests). 128 | 129 | You can build and run the tests via: 130 | ``` 131 | mkdir build 132 | cd build 133 | cmake -DTINYAD_UNIT_TESTS=ON .. 134 | make -j4 135 | ./TinyAD-Tests 136 | ``` 137 | 138 | Alternatively, you can use the [TinyAD-Examples](https://github.com/patr-schm/TinyAD-Examples) project which builds the unit tests by default. 139 | 140 | # Authors 141 | 142 | * [Patrick Schmidt](https://www.graphics.rwth-aachen.de/person/232/) 143 | * [Janis Born](https://www.graphics.rwth-aachen.de/person/97/) 144 | * [David Bommes](http://cgg.unibe.ch/person/1/) 145 | * [Marcel Campen](http://graphics.cs.uos.de/) 146 | * [Leif Kobbelt](https://www.graphics.rwth-aachen.de/person/3/) 147 | 148 | We thank all test users and contributors. In particular: Alexandra Heuschling, Anton Florey, Dörte Pieper, Joe Jakobi, Philipp Domagalski, and David Jourdan. 149 | 150 | # Cite TinyAD 151 | If you use TinyAD in your academic work, please cite our paper: 152 | ``` 153 | @article{schmidt2022tinyad, 154 | title={{TinyAD}: Automatic Differentiation in Geometry Processing Made Simple}, 155 | author={Schmidt, Patrick and Born, Janis and Bommes, David and Campen, Marcel and Kobbelt, Leif}, 156 | year={2022}, 157 | journal={Computer Graphics Forum}, 158 | volume={41}, 159 | number={5}, 160 | } 161 | ``` 162 | 163 | 164 | # License 165 | TinyAD is released under the MIT license. 166 | -------------------------------------------------------------------------------- /img/deformation_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/deformation_thumbnail.png -------------------------------------------------------------------------------- /img/manifold_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/manifold_thumbnail.png -------------------------------------------------------------------------------- /img/parametrization_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/parametrization_thumbnail.png -------------------------------------------------------------------------------- /img/polycurl_reduction_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/polycurl_reduction_thumbnail.png -------------------------------------------------------------------------------- /img/quad_planarization_thumbnail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/quad_planarization_thumbnail.png -------------------------------------------------------------------------------- /img/tinyad_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/tinyad_logo.png -------------------------------------------------------------------------------- /img/tinyad_logo_640.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/tinyad_logo_640.png -------------------------------------------------------------------------------- /img/tinyad_square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/tinyad_square.png -------------------------------------------------------------------------------- /img/tinyad_square_250.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/tinyad_square_250.png -------------------------------------------------------------------------------- /img/tinyad_square_256.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/patr-schm/TinyAD/4b48d1a1a588874556a692a3abbdecd0db4c23e1/img/tinyad_square_256.png -------------------------------------------------------------------------------- /include/TinyAD/Detail/EigenVectorTypedefs.hh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // Supply vector typedefs that do not exist in old Eigen versions. 6 | // Allows writing e.g. Eigen::Vector or Eigen::Vector3. 7 | // Based on https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Core/Matrix.h 8 | 9 | namespace Eigen 10 | { 11 | 12 | #define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \ 13 | /** \ingroup matrixtypedefs */ \ 14 | /** \brief \cpp11 */ \ 15 | template \ 16 | using Matrix##SizeSuffix = Matrix; \ 17 | /** \ingroup matrixtypedefs */ \ 18 | /** \brief \cpp11 */ \ 19 | template \ 20 | using Vector##SizeSuffix = Matrix; \ 21 | /** \ingroup matrixtypedefs */ \ 22 | /** \brief \cpp11 */ \ 23 | template \ 24 | using RowVector##SizeSuffix = Matrix; 25 | 26 | #define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \ 27 | /** \ingroup matrixtypedefs */ \ 28 | /** \brief \cpp11 */ \ 29 | template \ 30 | using Matrix##Size##X = Matrix; \ 31 | /** \ingroup matrixtypedefs */ \ 32 | /** \brief \cpp11 */ \ 33 | template \ 34 | using Matrix##X##Size = Matrix; 35 | 36 | EIGEN_MAKE_TYPEDEFS(2, 2) 37 | EIGEN_MAKE_TYPEDEFS(3, 3) 38 | EIGEN_MAKE_TYPEDEFS(4, 4) 39 | EIGEN_MAKE_TYPEDEFS(Dynamic, X) 40 | EIGEN_MAKE_FIXED_TYPEDEFS(2) 41 | EIGEN_MAKE_FIXED_TYPEDEFS(3) 42 | EIGEN_MAKE_FIXED_TYPEDEFS(4) 43 | 44 | /** \ingroup matrixtypedefs 45 | * \brief \cpp11 */ 46 | template 47 | using Vector = Matrix; 48 | 49 | /** \ingroup matrixtypedefs 50 | * \brief \cpp11 */ 51 | template 52 | using RowVector = Matrix; 53 | 54 | #undef EIGEN_MAKE_TYPEDEFS 55 | #undef EIGEN_MAKE_FIXED_TYPEDEFS 56 | 57 | } 58 | 59 | -------------------------------------------------------------------------------- /include/TinyAD/Detail/EvalSettings.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #ifdef _OPENMP 8 | #include 9 | #endif 10 | 11 | namespace TinyAD 12 | { 13 | 14 | struct EvalSettings 15 | { 16 | /** 17 | * Number of OpenMP threads. 18 | * Positive number or -1 for max available threads. 19 | */ 20 | int n_threads = -1; 21 | }; 22 | 23 | } 24 | -------------------------------------------------------------------------------- /include/TinyAD/Detail/Parallel.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #ifdef _OPENMP 14 | #include 15 | #endif 16 | 17 | namespace TinyAD 18 | { 19 | 20 | inline int get_n_threads( 21 | const EvalSettings& _settings) 22 | { 23 | #ifdef _OPENMP 24 | if (_settings.n_threads >= 1) 25 | return _settings.n_threads; 26 | else 27 | return std::max(1, omp_get_max_threads() - 1); // Don't use all available threads to prevent random hangs. 28 | #else 29 | return 1; 30 | #endif 31 | } 32 | 33 | /** 34 | * Runs the specified lambda function in an omp for loop. 35 | * On throw: Catches first exception and re-throws it in the main thread. 36 | */ 37 | inline void parallel_for( 38 | const Eigen::Index n, 39 | const EvalSettings& settings, 40 | std::function function_body) 41 | { 42 | std::mutex exception_mutex; 43 | std::optional exception_ptr; 44 | std::atomic cancel_requested(false); 45 | 46 | #pragma omp parallel for schedule(static) num_threads(get_n_threads(settings)) 47 | for (Eigen::Index i = 0; i < n; ++i) 48 | { 49 | if (cancel_requested) 50 | continue; 51 | 52 | try 53 | { 54 | function_body(i); 55 | } 56 | catch (const std::exception& ex) 57 | { 58 | std::lock_guard lock(exception_mutex); 59 | if (!exception_ptr.has_value()) 60 | exception_ptr = std::make_exception_ptr(ex); 61 | 62 | cancel_requested = true; 63 | } 64 | } 65 | 66 | // Re-throw, if we stored an exception 67 | if (exception_ptr.has_value()) 68 | std::rethrow_exception(exception_ptr.value()); 69 | } 70 | 71 | } 72 | -------------------------------------------------------------------------------- /include/TinyAD/Detail/ScalarObjectiveTerm.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | namespace TinyAD 15 | { 16 | 17 | /** 18 | * Abstract base type for objective terms. 19 | * We need this to be able to store multiple different 20 | * objective term instantiations in a vector in ScalarFunction. 21 | */ 22 | template 23 | struct ScalarObjectiveTermBase 24 | { 25 | virtual ~ScalarObjectiveTermBase() = default; 26 | 27 | virtual Eigen::Index n_elements() const = 0; 28 | 29 | virtual PassiveT eval( 30 | const Eigen::VectorX& _x) const = 0; 31 | 32 | virtual void eval_with_gradient_add( 33 | const Eigen::VectorX& _x, 34 | PassiveT& _f, 35 | Eigen::VectorX& _g) const = 0; 36 | 37 | virtual void eval_with_derivatives_add( 38 | const Eigen::VectorX& _x, 39 | PassiveT& _f, 40 | Eigen::VectorX& _g, 41 | std::vector>& _H_proj_triplets, 42 | const bool _project_hessian, 43 | const PassiveT& _projection_eps) const = 0; 44 | }; 45 | 46 | /** 47 | * Objective term stored in ScalarFunction. 48 | */ 49 | template 50 | struct ScalarObjectiveTerm : ScalarObjectiveTermBase 51 | { 52 | static constexpr int n_element = variable_dimension * element_valence; 53 | 54 | // Scalar types. Either passive (e.g. double), or active (TinyAD::Scalar). 55 | // Declare separate types for first-order-only and for second-order use cases. 56 | using PassiveScalarType = PassiveT; 57 | using ActiveFirstOrderScalarType = TinyAD::Scalar; 58 | using ActiveSecondOrderScalarType = TinyAD::Scalar; 59 | 60 | // Possible element types. These are passed as argument to the user-provided lambda function. 61 | using PassiveElementType = Element; 62 | using ActiveFirstOrderElementType = Element; 63 | using ActiveSecondOrderElementType = Element; 64 | 65 | // Possible return types of the user-provided lambda function. 66 | using PassiveEvalElementReturnType = PassiveScalarType; 67 | using ActiveFirstOrderEvalElementReturnType = ActiveFirstOrderScalarType; 68 | using ActiveSecondOrderEvalElementReturnType = ActiveSecondOrderScalarType; 69 | 70 | // Possible types of the user-provided lambda function. 71 | using PassiveEvalElementFunction = std::function; 72 | using ActiveFirstOrderEvalElementFunction = std::function; 73 | using ActiveSecondOrderEvalElementFunction = std::function; 74 | 75 | // Base class for storing the type-erased user-provided lambda as a member of ScalarObjectiveTerm. 76 | // We use this pattern to only compile versions of the lambda that are actually called 77 | // by the user via eval_... functions. 78 | struct LambdaBase 79 | { 80 | virtual ~LambdaBase() = default; 81 | virtual PassiveEvalElementFunction get_passive() = 0; 82 | virtual ActiveFirstOrderEvalElementFunction get_active_first_order() = 0; 83 | virtual ActiveSecondOrderEvalElementFunction get_active_second_order() = 0; 84 | }; 85 | 86 | // Subclass where F is the type-erased lambda function. 87 | // Calling the get_...() functions actually compiles/instantiates the user-provided lambda. 88 | template 89 | struct LambdaImpl : LambdaBase 90 | { 91 | LambdaImpl(F&& f) : func(std::forward(f)) {} 92 | 93 | PassiveEvalElementFunction get_passive() override 94 | { 95 | return [this](PassiveElementType& element) -> PassiveEvalElementReturnType { 96 | return func(element); 97 | }; 98 | } 99 | 100 | ActiveFirstOrderEvalElementFunction get_active_first_order() override 101 | { 102 | return [this](ActiveFirstOrderElementType& element) -> ActiveFirstOrderEvalElementReturnType { 103 | return func(element); 104 | }; 105 | } 106 | 107 | ActiveSecondOrderEvalElementFunction get_active_second_order() override 108 | { 109 | return [this](ActiveSecondOrderElementType& element) -> ActiveSecondOrderEvalElementReturnType { 110 | return func(element); 111 | }; 112 | } 113 | 114 | F func; 115 | }; 116 | 117 | template 118 | ScalarObjectiveTerm( 119 | const std::vector& _element_handles, 120 | EvalElementFunction _eval_element, 121 | const Eigen::Index _n_global, 122 | const EvalSettings& _settings) 123 | : n_vars_global(_n_global), 124 | element_handles(_element_handles), 125 | settings(_settings) 126 | { 127 | static_assert (std::is_same_v< 128 | std::decay_t())))>, 129 | PassiveEvalElementReturnType>, 130 | "Please make sure that the user-provided lambda function has the signature (const auto& element) -> TINYAD_SCALAR_TYPE(element)"); 131 | 132 | // Store the user-provided lambda for deferred instantiation 133 | type_erased_lambda = std::make_unique>(std::forward(_eval_element)); 134 | } 135 | 136 | // Move constructor 137 | ScalarObjectiveTerm(ScalarObjectiveTerm&& other) noexcept 138 | : n_vars_global(other.n_vars_global), 139 | element_handles(std::move(other.element_handles)), 140 | settings(other.settings), 141 | type_erased_lambda(std::move(other.type_erased_lambda)) 142 | { 143 | } 144 | 145 | // Move assignment 146 | ScalarObjectiveTerm& operator=(ScalarObjectiveTerm&& other) noexcept 147 | { 148 | if (this != &other) 149 | { 150 | // n_vars_global and settings are const, so we can't move them 151 | element_handles = std::move(other.element_handles); 152 | type_erased_lambda = std::move(other.type_erased_lambda); 153 | } 154 | return *this; 155 | } 156 | 157 | Eigen::Index n_elements() const override 158 | { 159 | return (Eigen::Index)element_handles.size(); 160 | } 161 | 162 | PassiveT eval( 163 | const Eigen::VectorX& _x) const override 164 | { 165 | TINYAD_ASSERT_EQ(_x.size(), n_vars_global); 166 | 167 | // Instantiate the passive evaluation function 168 | auto eval_element_passive = type_erased_lambda->get_passive(); 169 | 170 | // Eval elements using plain double type 171 | std::vector element_results(element_handles.size()); 172 | 173 | parallel_for(element_handles.size(), settings, [&] (Eigen::Index i_element) 174 | { 175 | // Call user code 176 | PassiveElementType element(element_handles[i_element], _x); 177 | element_results[i_element] = eval_element_passive(element); 178 | }); 179 | 180 | // Sum up results 181 | PassiveT f = 0.0; 182 | for (Eigen::Index i_element = 0; i_element < (Eigen::Index)element_results.size(); ++i_element) 183 | f += element_results[i_element]; 184 | 185 | return f; 186 | } 187 | 188 | void eval_with_gradient_add( 189 | const Eigen::VectorX& _x, 190 | PassiveT& _f, 191 | Eigen::VectorX& _g) const override 192 | { 193 | TINYAD_ASSERT_EQ(_x.size(), n_vars_global); 194 | TINYAD_ASSERT_EQ(_g.size(), n_vars_global); 195 | 196 | // Instantiate the first-order evaluation function 197 | auto eval_element_active_first_order = type_erased_lambda->get_active_first_order(); 198 | 199 | // Eval elements using active scalar type 200 | std::vector elements(element_handles.size()); 201 | std::vector element_results(element_handles.size()); 202 | 203 | parallel_for(element_handles.size(), settings, [&] (Eigen::Index i_element) 204 | { 205 | // Call user code, which initializes active variables via element.variables(...) and performs computations. 206 | elements[i_element] = ActiveFirstOrderElementType(element_handles[i_element], _x); 207 | element_results[i_element] = eval_element_active_first_order(elements[i_element]); 208 | 209 | // Assert that derivatives are finite 210 | TINYAD_ASSERT_FINITE_MAT(element_results[i_element].grad); 211 | }); 212 | 213 | // Add to global f and g 214 | for (Eigen::Index i_element = 0; i_element < (Eigen::Index)element_handles.size(); ++i_element) 215 | { 216 | _f += element_results[i_element].val; 217 | 218 | // Add to global gradient 219 | for (Eigen::Index i = 0; i < (Eigen::Index)elements[i_element].idx_local_to_global.size(); ++i) 220 | _g[elements[i_element].idx_local_to_global[i]] += element_results[i_element].grad[i]; 221 | } 222 | } 223 | 224 | void eval_with_derivatives_add( 225 | const Eigen::VectorX& _x, 226 | PassiveT& _f, 227 | Eigen::VectorX& _g, 228 | std::vector>& _H_triplets, 229 | const bool _project_hessian, 230 | const PassiveT& _projection_eps) const override 231 | { 232 | TINYAD_ASSERT_EQ(_x.size(), n_vars_global); 233 | TINYAD_ASSERT_EQ(_g.size(), n_vars_global); 234 | 235 | // Instantiate the second-order evaluation function 236 | auto eval_element_active_second_order = type_erased_lambda->get_active_second_order(); 237 | 238 | // Eval elements using active scalar type 239 | std::vector elements(element_handles.size()); 240 | std::vector element_results(element_handles.size()); 241 | 242 | parallel_for(element_handles.size(), settings, [&] (Eigen::Index i_element) 243 | { 244 | // Call user code, which initializes active variables via element.variables(...) and performs computations. 245 | elements[i_element] = ActiveSecondOrderElementType(element_handles[i_element], _x); 246 | element_results[i_element] = eval_element_active_second_order(elements[i_element]); 247 | 248 | if (_project_hessian) 249 | project_positive_definite(element_results[i_element].Hess, _projection_eps); 250 | 251 | // Assert that derivatives are finite 252 | TINYAD_ASSERT_FINITE_MAT(element_results[i_element].grad); 253 | TINYAD_ASSERT_FINITE_MAT(element_results[i_element].Hess); 254 | }); 255 | 256 | // Add to global f, g and H 257 | for (Eigen::Index i_element = 0; i_element < (Eigen::Index)element_handles.size(); ++i_element) 258 | { 259 | _f += element_results[i_element].val; 260 | 261 | // Add to global gradient 262 | for (Eigen::Index i = 0; i < (Eigen::Index)elements[i_element].idx_local_to_global.size(); ++i) 263 | _g[elements[i_element].idx_local_to_global[i]] += element_results[i_element].grad[i]; 264 | 265 | // Add to global Hessian 266 | using SparseIndex = typename Eigen::SparseMatrix::StorageIndex; 267 | for (Eigen::Index i = 0; i < (Eigen::Index)elements[i_element].idx_local_to_global.size(); ++i) 268 | { 269 | for (Eigen::Index j = 0; j < (Eigen::Index)elements[i_element].idx_local_to_global.size(); ++j) 270 | { 271 | _H_triplets.push_back(Eigen::Triplet( 272 | (SparseIndex)elements[i_element].idx_local_to_global[i], 273 | (SparseIndex)elements[i_element].idx_local_to_global[j], 274 | element_results[i_element].Hess(i, j))); 275 | } 276 | } 277 | } 278 | } 279 | 280 | private: 281 | const Eigen::Index n_vars_global; 282 | 283 | const std::vector element_handles; 284 | const EvalSettings& settings; 285 | 286 | // Store the user-provided lambda function 287 | // without instantiating it with a specific scalar type yet. 288 | std::unique_ptr type_erased_lambda; 289 | }; 290 | 291 | } 292 | -------------------------------------------------------------------------------- /include/TinyAD/Detail/VectorFunctionImpl.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #ifndef TINYAD_VectorFunction_DEFINED 8 | #include 9 | #endif 10 | 11 | #include 12 | #include 13 | 14 | namespace TinyAD 15 | { 16 | 17 | /** 18 | * VectorFunction implementation: 19 | */ 20 | 21 | template 22 | VectorFunction:: 23 | VectorFunction(VectorFunction&& _other) 24 | : settings(std::move(_other.settings)), 25 | n_vars(_other.n_vars), 26 | n_elements(_other.n_elements), 27 | n_outputs(_other.n_outputs), 28 | variable_handles(std::move(_other.variable_handles)), 29 | objective_terms(std::move(_other.objective_terms)) 30 | { 31 | 32 | } 33 | 34 | template 35 | VectorFunction& 36 | VectorFunction:: 37 | operator=(VectorFunction&& _other) 38 | { 39 | settings = std::move(_other.settings); 40 | n_vars = _other.n_vars; 41 | n_elements = _other.n_elements; 42 | n_outputs = _other.n_outputs; 43 | variable_handles = std::move(_other.variable_handles); 44 | objective_terms = std::move(_other.objective_terms); 45 | return *this; 46 | } 47 | 48 | template 49 | VectorFunction:: 50 | VectorFunction( 51 | std::vector _variable_handles, 52 | const EvalSettings& _settings) 53 | : settings(_settings), 54 | n_vars(variable_dimension * _variable_handles.size()), 55 | variable_handles(std::move(_variable_handles)) 56 | { 57 | static_assert (variable_dimension >= 1, "Variable dimension needs to be at least 1."); 58 | 59 | TINYAD_ASSERT(!variable_handles.empty()); 60 | TINYAD_ASSERT(variable_indices_compact(variable_handles)); 61 | TINYAD_ASSERT_G(n_vars, 0); 62 | } 63 | 64 | template 65 | template 66 | void 67 | VectorFunction:: 68 | add_elements( 69 | const ElementHandleRangeT& _element_range, 70 | EvalElementFunction _eval_element) 71 | { 72 | static_assert (element_valence >= 0, "Element valence needs to be non-negative."); 73 | 74 | // If this line does not compile: Make sure to pass a range with .begin() and .end() methods. 75 | // E.g. you could create a range of integers via TinyAD::range(n). 76 | using ElementHandle = typename std::decay_t; 77 | 78 | // Assertion fails with Polymesh because polymesh::end_iterator has no operator* 79 | // static_assert (std::is_same_v>, "Please supply a valid range (with begin() and end()) as _element_range"); 80 | 81 | // Copy handles into vector 82 | std::vector element_handles; 83 | element_handles.reserve(count(_element_range)); 84 | for (auto eh : _element_range) 85 | element_handles.push_back(eh); 86 | 87 | // Store objective term 88 | using ObjectiveType = VectorObjectiveTerm< 89 | variable_dimension, 90 | element_valence, 91 | outputs_per_element, 92 | PassiveT, 93 | VariableHandleT, 94 | ElementHandle>; 95 | 96 | objective_terms.push_back(std::make_unique( 97 | element_handles, _eval_element, n_vars, settings)); 98 | 99 | n_elements += element_handles.size(); 100 | n_outputs += outputs_per_element * element_handles.size(); 101 | } 102 | 103 | template 104 | Eigen::VectorX 105 | VectorFunction:: 106 | x_from_data( 107 | std::function _read_user_data) const 108 | { 109 | Eigen::VectorX x = Eigen::VectorX::Constant(n_vars, (PassiveT)NAN); 110 | for (auto v : variable_handles) 111 | { 112 | const Eigen::Vector user_vec = _read_user_data(v); 113 | for (Eigen::Index idx_local = 0; idx_local < variable_dimension; ++idx_local) 114 | { 115 | const Eigen::Index idx_global = global_idx(v, idx_local, n_vars); 116 | x[idx_global] = user_vec[idx_local]; 117 | } 118 | } 119 | TINYAD_ASSERT_FINITE_MAT(x); 120 | 121 | return x; 122 | } 123 | 124 | template 125 | void 126 | VectorFunction:: 127 | x_to_data( 128 | const Eigen::VectorX& _x, 129 | std::function _write_user_data) const 130 | { 131 | TINYAD_ASSERT_EQ(_x.size(), n_vars); 132 | 133 | for (auto v : variable_handles) 134 | { 135 | PassiveVariableVectorType vec; 136 | for (Eigen::Index i = 0; i < variable_dimension; ++i) 137 | vec[i] = _x[global_idx(v, i, n_vars)]; 138 | 139 | _write_user_data(v, vec); 140 | } 141 | } 142 | 143 | template 144 | Eigen::VectorX 145 | VectorFunction:: 146 | eval(const Eigen::VectorX& _x) const 147 | { 148 | TINYAD_ASSERT_EQ(_x.size(), n_vars); 149 | 150 | Eigen::VectorX result(n_outputs); 151 | Eigen::Index start_idx = 0; 152 | for (auto& objective : objective_terms) 153 | { 154 | result.segment(start_idx, objective->n_outputs()) = objective->eval(_x); 155 | start_idx += objective->n_outputs(); 156 | } 157 | 158 | return result; 159 | } 160 | 161 | template 162 | Eigen::VectorX 163 | VectorFunction:: 164 | operator()( 165 | const Eigen::VectorX& _x) const 166 | { 167 | return eval(_x); 168 | } 169 | 170 | template 171 | void 172 | VectorFunction:: 173 | eval_with_jacobian( 174 | const Eigen::VectorX& _x, 175 | Eigen::VectorX& _r, 176 | Eigen::SparseMatrix& _J) const 177 | { 178 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 179 | 180 | _r = Eigen::VectorX(); 181 | std::vector> J_triplets; 182 | 183 | for (auto& objective : this->objective_terms) 184 | objective->eval_with_jacobian_add(_x, _r, J_triplets); 185 | 186 | _J = Eigen::SparseMatrix(_r.size(), this->n_vars); 187 | _J.setFromTriplets(J_triplets.begin(), J_triplets.end()); 188 | } 189 | 190 | template 191 | std::tuple, Eigen::SparseMatrix> 192 | VectorFunction:: 193 | eval_with_jacobian( 194 | const Eigen::VectorX& _x) const 195 | { 196 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 197 | 198 | Eigen::VectorX r; 199 | Eigen::SparseMatrix J; 200 | eval_with_jacobian(_x, r, J); 201 | 202 | return std::tuple, Eigen::SparseMatrix>( 203 | std::move(r), std::move(J)); 204 | } 205 | 206 | template 207 | void 208 | VectorFunction:: 209 | eval_with_derivatives( 210 | const Eigen::VectorX& _x, 211 | Eigen::VectorX& _r, 212 | Eigen::SparseMatrix& _J, 213 | std::vector>& _H) const 214 | { 215 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 216 | 217 | _r = Eigen::VectorX(); 218 | std::vector> J_triplets; 219 | std::vector>> H_triplets; 220 | 221 | for (auto& objective : this->objective_terms) 222 | objective->eval_with_derivatives_add(_x, _r, J_triplets, H_triplets); 223 | 224 | _J = Eigen::SparseMatrix(_r.size(), this->n_vars); 225 | _J.setFromTriplets(J_triplets.begin(), J_triplets.end()); 226 | 227 | TINYAD_ASSERT_EQ(_J.rows(), n_outputs); 228 | TINYAD_ASSERT_EQ(H_triplets.size(), n_outputs); 229 | _H.resize(n_outputs); 230 | for (Eigen::Index i_output = 0; i_output < n_outputs; ++i_output) 231 | { 232 | _H[i_output] = Eigen::SparseMatrix(this->n_vars, this->n_vars); 233 | _H[i_output].setFromTriplets(H_triplets[i_output].begin(), H_triplets[i_output].end()); 234 | } 235 | } 236 | 237 | template 238 | std::tuple, Eigen::SparseMatrix, std::vector>> 239 | VectorFunction:: 240 | eval_with_derivatives( 241 | const Eigen::VectorX& _x) const 242 | { 243 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 244 | 245 | Eigen::VectorX r; 246 | Eigen::SparseMatrix J; 247 | std::vector> H; 248 | eval_with_derivatives(_x, r, J, H); 249 | 250 | return std::tuple, Eigen::SparseMatrix, std::vector>>( 251 | std::move(r), std::move(J), std::move(H)); 252 | } 253 | 254 | template 255 | PassiveT 256 | VectorFunction:: 257 | eval_sum_of_squares(const Eigen::VectorX& _x) const 258 | { 259 | TINYAD_ASSERT_EQ(_x.size(), n_vars); 260 | 261 | PassiveT result = 0.0; 262 | for (auto& objective : objective_terms) 263 | result += objective->eval_sum_of_squares(_x); 264 | 265 | return result; 266 | } 267 | 268 | template 269 | void 270 | VectorFunction:: 271 | eval_sum_of_squares_with_derivatives( 272 | const Eigen::VectorX& _x, 273 | PassiveT& _f, 274 | Eigen::VectorX& _g, 275 | Eigen::VectorX& _r, 276 | Eigen::SparseMatrix& _J) const 277 | { 278 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 279 | 280 | eval_with_jacobian(_x, _r, _J); 281 | _f = _r.dot(_r); 282 | _g = 2.0 * _J.transpose() * _r; 283 | } 284 | 285 | template 286 | std::tuple, Eigen::VectorX, Eigen::SparseMatrix> 287 | VectorFunction:: 288 | eval_sum_of_squares_with_derivatives( 289 | const Eigen::VectorX& _x) const 290 | { 291 | TINYAD_ASSERT_EQ(_x.size(), this->n_vars); 292 | 293 | PassiveT f = 0.0; 294 | Eigen::VectorX g; 295 | Eigen::VectorX r; 296 | Eigen::SparseMatrix J; 297 | eval_sum_of_squares_with_derivatives(_x, f, g, r, J); 298 | 299 | return std::tuple, Eigen::VectorX, Eigen::SparseMatrix>( 300 | f, std::move(g), std::move(r), std::move(J)); 301 | } 302 | 303 | template 304 | auto vector_function( 305 | const VariableRangeT& _variable_range, 306 | const EvalSettings& _settings) 307 | { 308 | // If this line does not compile: Make sure to pass a range with .begin() and .end() methods. 309 | // E.g. you could create a range of integers via TinyAD::range(n). 310 | using VariableHandle = typename std::decay_t; 311 | 312 | // Assertion fails with Polymesh because polymesh::end_iterator has no operator* 313 | // static_assert (std::is_same_v>, "Please supply a valid range (with begin() and end()) as _variable_range"); 314 | 315 | // Copy handles into vectors 316 | std::vector variable_handles; 317 | variable_handles.reserve(count(_variable_range)); 318 | for (auto vh : _variable_range) 319 | variable_handles.push_back(vh); 320 | 321 | return VectorFunction( 322 | std::move(variable_handles), _settings); 323 | } 324 | 325 | } 326 | -------------------------------------------------------------------------------- /include/TinyAD/Operations/SVD.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | 9 | namespace TinyAD 10 | { 11 | 12 | template 13 | int sign(const T& _x) 14 | { 15 | if (_x < T(0.0)) 16 | return -1; 17 | else if (_x > T(0.0)) 18 | return 1; 19 | else 20 | return 0; 21 | } 22 | 23 | /** 24 | * 2x2 closed-form SVD. 25 | */ 26 | template 27 | void svd( 28 | const Eigen::Matrix2& _A, 29 | Eigen::Matrix2& _U, 30 | Eigen::Vector2& _S, 31 | Eigen::Matrix2& _V) 32 | { 33 | // From https://lucidar.me/en/mathematics/singular-value-decomposition-of-a-2x2-matrix/ 34 | // [U,SIG,V] = svd2x2(A) finds the SVD of 2x2 matrix A 35 | // where U and V are orthogonal, SIG is diagonal, 36 | // and A=U*SIG*V’ 37 | // Find U such that U*A*A’*U’=diag 38 | Eigen::Matrix2 Su = _A * _A.transpose(); 39 | T phi = 0.5 * atan2(Su(0, 1) + Su(1, 0), Su(0, 0) - Su(1, 1)); 40 | T Cphi = cos(phi); 41 | T Sphi = sin(phi); 42 | _U << Cphi, -Sphi, 43 | Sphi, Cphi; 44 | 45 | // Find W such that W’*A’*A*W=diag 46 | Eigen::Matrix2 Sw = _A.transpose() * _A; 47 | T theta = 0.5 * atan2(Sw(0, 1) + Sw(1, 0), Sw(0, 0) - Sw(1, 1)); 48 | T Ctheta = cos(theta); 49 | T Stheta = sin(theta); 50 | Eigen::Matrix2 W; 51 | W << Ctheta, -Stheta, 52 | Stheta, Ctheta; 53 | 54 | // Find the singular values from U 55 | T SUsum = Su(0, 0) + Su(1, 1); 56 | T SUdif = sqrt(sqr(Su(0, 0) - Su(1, 1)) + 4.0 * Su(0, 1) * Su(1, 0)); 57 | _S << sqrt((SUsum + SUdif) / 2.0), sqrt((SUsum - SUdif) / 2.0); 58 | 59 | // Find the correction matrix for the right side 60 | Eigen::Matrix2 S = _U.transpose() * _A * W; 61 | Eigen::Vector2 C(sign(S(0, 0)), sign(S(1, 1))); 62 | _V = W * C.asDiagonal(); 63 | } 64 | 65 | /** 66 | * Compute closest orthogonal 2x2 matrix via SVD. 67 | * Returns U * V^T for SVD A = U * S * V^T. 68 | */ 69 | template 70 | Eigen::Matrix2 closest_orthogonal( 71 | const Eigen::Matrix2& _A) 72 | { 73 | // Based on https://lucidar.me/en/mathematics/singular-value-decomposition-of-a-2x2-matrix/ 74 | 75 | // Find U such that U*A*A’*U’=diag 76 | Eigen::Matrix2 Su = _A * _A.transpose(); 77 | T phi = 0.5 * atan2(Su(0, 1) + Su(1, 0), Su(0, 0) - Su(1, 1)); 78 | T Cphi = cos(phi); 79 | T Sphi = sin(phi); 80 | Eigen::Matrix2 U; 81 | U << Cphi, -Sphi, 82 | Sphi, Cphi; 83 | 84 | // Find W such that W’*A’*A*W=diag 85 | Eigen::Matrix2 Sw = _A.transpose() * _A; 86 | T theta = 0.5 * atan2(Sw(0, 1) + Sw(1, 0), Sw(0, 0) - Sw(1, 1)); 87 | T Ctheta = cos(theta); 88 | T Stheta = sin(theta); 89 | Eigen::Matrix2 W; 90 | W << Ctheta, -Stheta, 91 | Stheta, Ctheta; 92 | 93 | // Find the correction matrix for the right side 94 | Eigen::Matrix2 S = U.transpose() * _A * W; 95 | Eigen::Vector2 C(sign(S(0, 0)), sign(S(1, 1))); 96 | Eigen::Matrix2 V = W * C.asDiagonal(); 97 | 98 | return U * V.transpose(); 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /include/TinyAD/ScalarFunction.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace TinyAD 14 | { 15 | 16 | /** 17 | * Class implementing a differentiable scalar function f: R^n -> R, 18 | * defined as a sum of element functions. 19 | * 20 | * Example (2D mesh parametrization). 21 | * 2 variables per vertex. Objective per triangle, using 3 vertices each: 22 | * 23 | * auto func = scalar_function<2>(TinyAD::range(V.rows())); 24 | * func.add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 25 | * { 26 | * using T = TINYAD_SCALAR_TYPE(element); 27 | * int f_idx = element.handle; 28 | * Eigen::Vector2 a = element.variables(F(f_idx, 0)); 29 | * Eigen::Vector2 b = element.variables(F(f_idx, 1)); 30 | * Eigen::Vector2 c = element.variables(F(f_idx, 2)); 31 | * return ...; 32 | * }); 33 | * Eigen::VectorXd x = func.x_from_data([&] (int v_idx) { return ...; }); 34 | * auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 35 | */ 36 | template < 37 | int variable_dimension, // Number of scalar variables per handle. E.g. 2 in 2D mesh parametrization. 38 | typename PassiveT, // Internal scalar type. E.g. float or double. 39 | typename VariableHandleT> // Type of variable handles. E.g. int or OpenMesh::VertexHandle, ... 40 | struct ScalarFunction 41 | { 42 | using PassiveScalarType = PassiveT; 43 | using PassiveVariableVectorType = Eigen::Vector; 44 | static constexpr bool is_vector_function = false; 45 | 46 | // Scalar function is not copyable but movable 47 | ScalarFunction() = default; 48 | ScalarFunction(const ScalarFunction&) = delete; 49 | ScalarFunction(ScalarFunction&& _other); 50 | ScalarFunction& operator=(const ScalarFunction&) = delete; 51 | ScalarFunction& operator=(ScalarFunction&& _other); 52 | 53 | /** 54 | * Instead of this contructor, use scalar_function<..>(..), 55 | * which helps with deducing template arguments. 56 | */ 57 | ScalarFunction( 58 | std::vector _variable_handles, 59 | const EvalSettings& _settings); 60 | 61 | /** 62 | * Add a set of elements (summands in the objective) 63 | * and a lambda function that evaluates each element. 64 | * Can be called multiple times to add different terms. 65 | * 66 | * This is the **static** version, where every element accesses exactly 67 | * the same number of variable handles (element_valence). For elements 68 | * that need to access different numbers of variables at runtime, use 69 | * add_elements_dynamic<...>() instead, which internally maps each element 70 | * to the appropriate static implementation for efficiency. 71 | */ 72 | template < 73 | int element_valence, // Number of variable handles accessed per element. 74 | typename ElementHandleRangeT, // Type of element handles. E.g. int or OpenMesh::Face handle or other. Deduced automatically. 75 | typename EvalElementFunction> // Type of per-element eval function. Deduced automatically. 76 | void add_elements( 77 | const ElementHandleRangeT& _element_range, 78 | EvalElementFunction _eval_element); 79 | 80 | /** 81 | * Add a set of elements (summands in the objective) 82 | * and a lambda function that evaluates each element. 83 | * Can be called multiple times to add different terms. 84 | * 85 | * This is the **dynamic** version, where each element can access a different number of variable handles at runtime. 86 | * The function works by: 87 | * 1. Analyzing each element to determine how many variable handles it accesses 88 | * 2. Grouping elements by their valence (number of accessed variables) 89 | * 3. Mapping each group to the appropriate static implementation 90 | * 91 | * To maintain compile-time optimizations, a list of supported valences has to be provided as template arguments 92 | * (e.g., add_elements_dynamic<5, 6, 7, 16>(...)), representing the valences you expect to encounter. 93 | * Each element will be mapped to the exact or next higher static valence in this list. 94 | * A runtime valence may never exceed the maximum provided static valence! 95 | * 96 | * Example: With template arguments <6, 8, 10>, elements accessing 5 variables will use the valence-6 97 | * implementation, elements accessing 7 variables will use valence-8, etc. 98 | 99 | * Warning: This mapping is built once, when add_elements_dynamic() is called. 100 | * The number of requested variables per element may not depend on run time branching! 101 | */ 102 | template < 103 | int... ElementValences, // List of common element valences. E.g. 5, 6, 7, 16 for meshes with max valence 16. 104 | typename ElementHandleRangeT, // Type of element handles. E.g. int or OpenMesh::Face handle or other. Deduced automatically. 105 | typename EvalElementFunction> // Type of per-element eval function. Deduced automatically. 106 | void add_elements_dynamic( 107 | const ElementHandleRangeT& _element_range, 108 | EvalElementFunction _eval_element); 109 | 110 | /** 111 | * Assemble variable vector x from user data. 112 | * Uses internal index map from variable handles to entries of x. 113 | * 114 | * Pass a lambda function that takes a variable handle and returns its associated scalar values. 115 | * 116 | * Example: 117 | * Eigen::VectorXd x = func.x_from_data([&] (int v_idx) { return param.row(v_idx); }); 118 | */ 119 | Eigen::VectorX x_from_data( 120 | std::function _read_user_data) const; 121 | 122 | /** 123 | * Write variable vector x to user data. 124 | * Uses internal index map from variable handles to entries of x. 125 | * 126 | * Pass a lambda function that takes a variable handle its associated scalar values 127 | * and writes these values to the user data structure. 128 | * 129 | * Example: 130 | * func.x_to_data(x, [&] (int v_idx, const Eigen::Vector2d& p) { param.row(v_idx) = p; }); 131 | */ 132 | void x_to_data( 133 | const Eigen::VectorX& _x, 134 | std::function _write_user_data) const; 135 | 136 | /** 137 | * Evaluate function without computing derivatives. 138 | */ 139 | PassiveT eval( 140 | const Eigen::VectorX& _x) const; 141 | 142 | /** 143 | * Evaluate function without computing derivatives. 144 | */ 145 | PassiveT operator()( 146 | const Eigen::VectorX& _x) const; 147 | 148 | /** 149 | * Evaluate function with gradient. 150 | */ 151 | void eval_with_gradient( 152 | const Eigen::VectorX& _x, 153 | PassiveT& _f, 154 | Eigen::VectorX& _g) const; 155 | 156 | /** 157 | * Evaluate function with gradient. 158 | */ 159 | std::tuple> 160 | eval_with_gradient( 161 | const Eigen::VectorX& _x) const; 162 | 163 | /** 164 | * Evaluate function with gradient and Hessian. 165 | */ 166 | void eval_with_derivatives( 167 | const Eigen::VectorX& _x, 168 | PassiveT& _f, 169 | Eigen::VectorX& _g, 170 | Eigen::SparseMatrix& _H) const; 171 | 172 | /** 173 | * Evaluate function with gradient and Hessian. 174 | */ 175 | std::tuple, Eigen::SparseMatrix> 176 | eval_with_derivatives( 177 | const Eigen::VectorX& _x) const; 178 | 179 | /** 180 | * Evaluate Hessian matrix only. 181 | */ 182 | Eigen::SparseMatrix eval_hessian( 183 | const Eigen::VectorX& _x) const; 184 | 185 | /** 186 | * Evaluate Hessian assuming function is quadratic, i.e. Hessian is independent of x. 187 | * Warning: It is not checked if the function is actually quadratic. 188 | */ 189 | Eigen::SparseMatrix eval_hessian_of_quadratic() const; 190 | 191 | /** 192 | * Evaluate function with gradient and Hessian. 193 | * The returned Hessian matrix is positive-definite (via per-element projection). 194 | * If _projection_eps is nonnegative: Eigenvalues are clamped to this value. 195 | * If _projection_eps is negative: Negative eigenvalues are replaced by their absolute value. 196 | */ 197 | void eval_with_hessian_proj( 198 | const Eigen::VectorX& _x, 199 | PassiveT& _f, 200 | Eigen::VectorX& _g, 201 | Eigen::SparseMatrix& _H_proj, 202 | const PassiveT& _projection_eps = default_hessian_projection_eps) const; 203 | 204 | /** 205 | * Evaluate function with gradient and Hessian. 206 | * The returned Hessian matrix is positive-definite (via per-element projection). 207 | * If _projection_eps is nonnegative: Eigenvalues are clamped to this value. 208 | * If _projection_eps is negative: Negative eigenvalues are replaced by their absolute value. 209 | */ 210 | std::tuple, Eigen::SparseMatrix> 211 | eval_with_hessian_proj( 212 | const Eigen::VectorX& _x, 213 | const PassiveT& _projection_eps = default_hessian_projection_eps) const; 214 | 215 | /** 216 | * Change settings before calling eval(..). 217 | * 218 | * Example: 219 | * func.settings.n_threads = 4; 220 | */ 221 | EvalSettings settings; 222 | 223 | /** 224 | * Number of scalar variables (size of variable vector x). 225 | * This is variable_dimension * #variable_handles. 226 | */ 227 | Eigen::Index n_vars = 0; 228 | 229 | /** 230 | * Current number of elements 231 | */ 232 | Eigen::Index n_elements = 0; 233 | 234 | std::vector variable_handles; 235 | std::vector>> objective_terms; 236 | }; 237 | 238 | /** 239 | * Use this to construct scalar functions. 240 | * Automatically deduces variable handle type. 241 | */ 242 | template < 243 | int variable_dimension, // Number of scalar variables per handle. 244 | typename PassiveT = double, // Internal scalar type. E.g. float or double. 245 | typename VariableRangeT> // Range type of variable handles. E.g. std::vector. Deduced automatically. 246 | auto scalar_function( 247 | const VariableRangeT& _variable_range, 248 | const EvalSettings& _settings = EvalSettings()); 249 | 250 | } 251 | 252 | #define TINYAD_ScalarFunction_DEFINED 253 | #include 254 | -------------------------------------------------------------------------------- /include/TinyAD/Support/Common.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | namespace TinyAD 11 | { 12 | 13 | /* 14 | * TinyAD::ScalarFunction and TinyAD::VectorFunction operate on (vertex, edge, face, ...) handle types 15 | * of different mesh libraries. Internally, TinyAD needs to convert a set of handles to a contiguous 16 | * list of integers. To add support for a new mesh library, add an overload of idx_from_handle() 17 | * that extracts an index from a handle. 18 | */ 19 | 20 | /** 21 | * Enable support for integer handle types by overloading idx_from_handle(...). 22 | * 23 | * Allows usage: 24 | * std::vector variable_handles { 0, 1, 2, 3 }; 25 | * auto func = TinyAD::scalar_function<2>(variable_handles); 26 | * 27 | * Or alternatively: 28 | * auto func = TinyAD::scalar_function<2>(TinyAD::range(4)); // Initializes handles 0, 1, 2, 3 29 | */ 30 | inline Eigen::Index idx_from_handle(Eigen::Index _idx) 31 | { 32 | return _idx; 33 | } 34 | 35 | /** 36 | * Fallback, if no specialized overload exists. 37 | */ 38 | inline Eigen::Index idx_from_handle(...) // Variadic argument has lowest priorty in overload resolution 39 | { 40 | TINYAD_ERROR_throw( 41 | "Handle type not supported. Please overload idx_from_handle() for your handle type or include one of the provided header files, e.g. TinyAD/Support/OpenMesh.hh."); 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /include/TinyAD/Support/GeometryCentral.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | #ifdef TINYAD_ScalarFunction_DEFINED 11 | #error Please include this file before ScalarFunction.hh 12 | #endif 13 | #ifdef TINYAD_VectorFunction_DEFINED 14 | #error Please include this file before VectorFunction.hh 15 | #endif 16 | 17 | namespace TinyAD 18 | { 19 | 20 | /** 21 | * Enable support for GeometryCentral handle types by overloading idx_from_handle(...). 22 | * 23 | * Allows usage: 24 | * std::unique_ptr mesh; 25 | * auto func = TinyAD::scalar_function<2>(mesh->vertices()); 26 | */ 27 | template 28 | Eigen::Index idx_from_handle(const geometrycentral::Element& _h) 29 | { 30 | return _h.getIndex(); 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /include/TinyAD/Support/OpenMesh.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | #ifdef TINYAD_ScalarFunction_DEFINED 11 | #error Please include this file before ScalarFunction.hh 12 | #endif 13 | #ifdef TINYAD_VectorFunction_DEFINED 14 | #error Please include this file before VectorFunction.hh 15 | #endif 16 | 17 | namespace TinyAD 18 | { 19 | 20 | /** 21 | * Enable support for OpenMesh handle types by overloading idx_from_handle(...). 22 | * 23 | * Allows usage: 24 | * OpenMesh::TriMesh mesh; 25 | * auto func = TinyAD::scalar_function<2>(mesh.vertices()); 26 | */ 27 | inline Eigen::Index idx_from_handle(const OpenMesh::BaseHandle& _h) 28 | { 29 | return _h.idx(); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /include/TinyAD/Support/PMP.hh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #ifdef TINYAD_ScalarFunction_DEFINED 7 | #error Please include this file before ScalarFunction.hh 8 | #endif 9 | #ifdef TINYAD_VectorFunction_DEFINED 10 | #error Please include this file before VectorFunction.hh 11 | #endif 12 | 13 | namespace TinyAD 14 | { 15 | 16 | /** 17 | * Enable support for PMP handle types by overloading idx_from_handle(...). 18 | * 19 | * Allows usage: 20 | * pmp::Mesh mesh; 21 | * auto func = TinyAD::scalar_function<2>(mesh.vertices()); 22 | */ 23 | inline Eigen::Index idx_from_handle(const pmp::Handle& _h) 24 | { 25 | return _h.idx(); 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /include/TinyAD/Support/Polymesh.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | #ifdef TINYAD_ScalarFunction_DEFINED 11 | #error Please include this file before ScalarFunction.hh 12 | #endif 13 | #ifdef TINYAD_VectorFunction_DEFINED 14 | #error Please include this file before VectorFunction.hh 15 | #endif 16 | 17 | namespace TinyAD 18 | { 19 | 20 | /** 21 | * Enable support for Polymesh handle types by overloading idx_from_handle(...). 22 | * 23 | * Allows usage: 24 | * OpenMesh::TriMesh mesh; 25 | * auto func = TinyAD::scalar_function<2>(mesh.vertices()); 26 | */ 27 | template 28 | inline Eigen::Index idx_from_handle(const pm::primitive_handle& _h) 29 | { 30 | return _h.idx.value; 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/GaussNewtonDirection.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace TinyAD 13 | { 14 | 15 | /** 16 | * Compute update vector d such that x + d performs a Gauss-Newton step 17 | * on a sum-of-squares function f(x) = sum_i r_i(x)^2. 18 | * Input: 19 | * _r: vector of residuals 20 | * _J: Jacobian of residuals. Size n_outputs-by-n_vars 21 | * _solver: A solver that can be used over multiple iterations 22 | * in case the sparsity pattern of J^T * J is constant. 23 | */ 24 | template 25 | Eigen::VectorX gauss_newton_direction( 26 | const Eigen::VectorX& _r, 27 | const Eigen::SparseMatrix& _J, 28 | LinearSolver& _solver, 29 | const PassiveT& _w_identity = 0.0) 30 | { 31 | const Eigen::SparseMatrix JtJ_reg = _w_identity * identity(_J.cols()) + _J.transpose() * _J; 32 | 33 | if (_solver.sparsity_pattern_dirty) 34 | { 35 | _solver.solver.analyzePattern(JtJ_reg); 36 | _solver.sparsity_pattern_dirty = false; 37 | } 38 | 39 | _solver.solver.factorize(JtJ_reg); 40 | const Eigen::VectorX d = _solver.solver.solve(-_J.transpose() * _r); 41 | 42 | if (_solver.solver.info() != Eigen::Success) 43 | TINYAD_ERROR_throw("Linear solve failed."); 44 | 45 | TINYAD_ASSERT_FINITE_MAT(d); 46 | return d; 47 | } 48 | 49 | /** 50 | * Compute update vector d such that x + d performs a Gauss-Newton step 51 | * on a sum-of-squares function f(x) = sum_i r_i(x)^2. 52 | * Input: 53 | * _r: vector of residuals 54 | * _J: Jacobian of residuals. Size n_elements-by-n_vars 55 | */ 56 | template 57 | Eigen::VectorX gauss_newton_direction( 58 | const Eigen::VectorX& _r, 59 | const Eigen::SparseMatrix& _J, 60 | const PassiveT& _w_identity = 0.0) 61 | { 62 | LinearSolver solver; 63 | return gauss_newton_direction(_r, _J, solver, _w_identity); 64 | } 65 | 66 | } 67 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/Helpers.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace TinyAD 12 | { 13 | 14 | /** 15 | * Create vector of indices from 0 to n-1. 16 | */ 17 | inline std::vector range( 18 | const Eigen::Index _n) 19 | { 20 | TINYAD_ASSERT_GEQ(_n, 0); 21 | 22 | std::vector r(_n); 23 | for (Eigen::Index i = 0; i < _n; ++i) 24 | r[i] = i; 25 | 26 | return r; 27 | } 28 | 29 | /** 30 | * Count elements in range. 31 | * (This exists because std::distance cannot handle 32 | * different iterator types for begin and end.) 33 | */ 34 | template 35 | Eigen::Index count( 36 | const RangeT& _range) 37 | { 38 | Eigen::Index n = 0; 39 | for (const auto& r : _range) 40 | ++n; 41 | 42 | return n; 43 | } 44 | 45 | /** 46 | * Assemble matrix from column vectors. 47 | */ 48 | template 49 | auto col_mat( 50 | const Eigen::MatrixBase& _v0, 51 | const Eigen::MatrixBase& _v1) 52 | { 53 | using T = typename Derived::Scalar; 54 | Eigen::Matrix M; 55 | 56 | M << _v0, _v1; 57 | 58 | return M; 59 | } 60 | 61 | /** 62 | * Assemble matrix from column vectors. 63 | */ 64 | template 65 | auto col_mat( 66 | const Eigen::MatrixBase& _v0, 67 | const Eigen::MatrixBase& _v1, 68 | const Eigen::MatrixBase& _v2) 69 | { 70 | using T = typename Derived::Scalar; 71 | Eigen::Matrix M; 72 | 73 | M << _v0, _v1, _v2; 74 | 75 | return M; 76 | } 77 | 78 | /** 79 | * Sparse identity matrix. 80 | */ 81 | template 82 | Eigen::SparseMatrix identity( 83 | const Eigen::Index _n) 84 | { 85 | Eigen::SparseMatrix Id(_n, _n); 86 | Id.setIdentity(); 87 | 88 | return Id; 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/HessianProjection.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace TinyAD 12 | { 13 | 14 | // Eigenvalues are clamped to be larger or equal to this value. 15 | // If negative: Negative eigenvalues are replaced by their absolute value. 16 | constexpr double default_hessian_projection_eps = 1e-9; 17 | 18 | /** 19 | * Check if matrix is diagonally dominant and has positive diagonal entries. 20 | * This is a sufficient condition for positive-definiteness 21 | * and can be used as an early out to avoid eigen decomposition. 22 | */ 23 | template 24 | bool positive_diagonally_dominant( 25 | Eigen::Matrix& _H, 26 | const PassiveT& _eps) 27 | { 28 | for (Eigen::Index i = 0; i < _H.rows(); ++i) 29 | { 30 | PassiveT off_diag_abs_sum = 0.0; 31 | for(Eigen::Index j = 0; j < _H.cols(); ++j) 32 | { 33 | if (i != j) 34 | off_diag_abs_sum += std::abs(_H(i, j)); 35 | } 36 | 37 | if (_H(i, i) < off_diag_abs_sum + _eps) 38 | return false; 39 | } 40 | 41 | return true; 42 | } 43 | 44 | /** 45 | * Project symmetric matrix to positive-definite matrix 46 | * via eigen decomposition. 47 | */ 48 | template 49 | void project_positive_definite( 50 | Eigen::Matrix& _H, 51 | const PassiveT& _eigenvalue_eps) 52 | { 53 | if constexpr (k == 0) 54 | { 55 | return; 56 | } 57 | else 58 | { 59 | using MatT = Eigen::Matrix; 60 | 61 | // Early out if sufficient condition is fulfilled 62 | if (positive_diagonally_dominant(_H, _eigenvalue_eps)) 63 | return; 64 | 65 | // Compute eigen-decomposition (of symmetric matrix) 66 | Eigen::SelfAdjointEigenSolver eig(_H); 67 | MatT D = eig.eigenvalues().asDiagonal(); 68 | 69 | // Clamp all eigenvalues to eps 70 | bool all_positive = true; 71 | for (Eigen::Index i = 0; i < _H.rows(); ++i) 72 | { 73 | if (_eigenvalue_eps < 0) 74 | { 75 | // Use absolute eigenvalue strategy (https://arxiv.org/html/2406.05928v3) 76 | if (D(i, i) < 0) 77 | { 78 | D(i, i) = -D(i, i); 79 | all_positive = false; 80 | } 81 | } 82 | else 83 | { 84 | // Use clamping strategy 85 | if (D(i, i) < _eigenvalue_eps) 86 | { 87 | D(i, i) = _eigenvalue_eps; 88 | all_positive = false; 89 | } 90 | } 91 | } 92 | 93 | // Do nothing if all eigenvalues were already at least eps 94 | if (all_positive) 95 | return; 96 | 97 | // Re-assemble matrix using clamped eigenvalues 98 | _H = eig.eigenvectors() * D * eig.eigenvectors().transpose(); 99 | TINYAD_ASSERT_FINITE_MAT(_H); 100 | } 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/LineSearch.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace TinyAD 12 | { 13 | 14 | template 15 | bool armijo_condition( 16 | const PassiveT _f_curr, 17 | const PassiveT _f_new, 18 | const PassiveT _s, 19 | const Eigen::Vector& _d, 20 | const Eigen::Vector& _g, 21 | const PassiveT _armijo_const) 22 | { 23 | return _f_new <= _f_curr + _armijo_const * _s * _d.dot(_g); 24 | } 25 | 26 | template 27 | Eigen::Vector line_search( 28 | const Eigen::Vector& _x0, 29 | const Eigen::Vector& _d, 30 | const PassiveT _f, 31 | const Eigen::Vector& _g, 32 | const EvalFunctionT& _eval, // Callable of type T(const Eigen::Vector&) 33 | const PassiveT _s_max = 1.0, // Initial step size 34 | const PassiveT _shrink = 0.8, 35 | const int _max_iters = 64, 36 | const PassiveT _armijo_const = 1e-4) 37 | { 38 | // Check input 39 | TINYAD_ASSERT_EQ(_x0.size(), _g.size()); 40 | if (_s_max <= 0.0) 41 | TINYAD_ERROR_throw("Max step size not positive."); 42 | 43 | // Also try a step size of 1.0 (if valid) 44 | const bool try_one = _s_max > 1.0; 45 | 46 | Eigen::Vector x_new = _x0; 47 | PassiveT s = _s_max; 48 | for (int i = 0; i < _max_iters; ++i) 49 | { 50 | x_new = _x0 + s * _d; 51 | const PassiveT f_new = _eval(x_new); 52 | TINYAD_ASSERT_EQ(f_new, f_new); 53 | if (armijo_condition(_f, f_new, s, _d, _g, _armijo_const)) 54 | return x_new; 55 | 56 | if (try_one && s > 1.0 && s * _shrink < 1.0) 57 | s = 1.0; 58 | else 59 | s *= _shrink; 60 | } 61 | 62 | TINYAD_WARNING("Line search couldn't find improvement. Gradient max norm is " << _g.cwiseAbs().maxCoeff()); 63 | 64 | return _x0; 65 | } 66 | 67 | } 68 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/LinearSolver.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | 9 | namespace TinyAD 10 | { 11 | 12 | template < 13 | typename PassiveT = double, 14 | typename SolverT = Eigen::SimplicialLDLT>> 15 | struct LinearSolver 16 | { 17 | SolverT solver; 18 | bool sparsity_pattern_dirty = true; 19 | }; 20 | 21 | } 22 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/NewtonDecrement.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | namespace TinyAD 11 | { 12 | 13 | /** 14 | * Computes (one half of the squared) Newton-Decrement. 15 | * The returned value is the difference between the current objective 16 | * value f(x) and the minimum of the quadratic approximation f(x + d). 17 | * It can be used as stopping criterion (newton_decrement(d, g) < eps) 18 | * and is affinely invariant (i.e. the same for f(x) and f(Ax + b)). 19 | */ 20 | template 21 | double newton_decrement( 22 | const Eigen::Vector& _d, 23 | const Eigen::Vector& _g) 24 | { 25 | return -0.5 * _d.dot(_g); 26 | } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/NewtonDirection.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace TinyAD 14 | { 15 | 16 | /** 17 | * Compute update vector d such that x + d performs a Newton step 18 | * (i.e. minimizes the quadratic approximation at x). 19 | * Input: 20 | * _g: gradient 21 | * _H_proj: symmetric positive-definite Hessian approximation 22 | * _solver: A solver that can be used over multiple iterations 23 | * in case the sparsity pattern of _H_proj is constant. 24 | */ 25 | template 26 | Eigen::VectorX newton_direction( 27 | const Eigen::VectorX& _g, 28 | const Eigen::SparseMatrix& _H_proj, 29 | LinearSolver& _solver, 30 | const PassiveT& _w_identity = 0.0) 31 | { 32 | const Eigen::SparseMatrix H_reg = _w_identity * identity(_g.size()) + _H_proj; 33 | 34 | if (_solver.sparsity_pattern_dirty) 35 | { 36 | _solver.solver.analyzePattern(H_reg); 37 | _solver.sparsity_pattern_dirty = false; 38 | } 39 | 40 | _solver.solver.factorize(H_reg); 41 | const Eigen::VectorX d = _solver.solver.solve(-_g); 42 | 43 | if (_solver.solver.info() != Eigen::Success) 44 | TINYAD_ERROR_throw("Linear solve failed."); 45 | 46 | TINYAD_ASSERT_FINITE_MAT(d); 47 | return d; 48 | } 49 | 50 | /** 51 | * Compute update vector d such that x + d performs a Newton step 52 | * (i.e. minimizes the quadratic approximation at x). 53 | * Input: 54 | * _g: gradient 55 | * _H_proj: symmetric positive definite Hessian approximation 56 | */ 57 | template 58 | Eigen::VectorX newton_direction( 59 | const Eigen::VectorX& _g, 60 | const Eigen::SparseMatrix& _H_proj, 61 | const PassiveT& _w_identity = 0.0) 62 | { 63 | LinearSolver solver; 64 | return newton_direction(_g, _H_proj, solver, _w_identity); 65 | } 66 | 67 | /** 68 | * Compute update vector d such that x + d performs a Newton step 69 | * (i.e. minimizes the quadratic approximation at x), 70 | * constrained to a linear subspace with known basis. 71 | * The n-by-m matrix B maps from the subspace to the solution space, 72 | * (i.e. d = B * d_reduced). 73 | * For problems with constant sparsity pattern, cache _solver 74 | * to benefit from pre-factorization. 75 | */ 76 | template 77 | Eigen::VectorX newton_direction_reduced_basis( 78 | const Eigen::VectorX& _g, 79 | const Eigen::SparseMatrix& _H_proj, 80 | const Eigen::SparseMatrix& _B, 81 | LinearSolver& _solver, 82 | const PassiveT& _w_identity = 0.0) 83 | { 84 | const Eigen::Index n = _B.rows(); 85 | const Eigen::Index m = _B.cols(); 86 | TINYAD_ASSERT_EQ(_g.rows(), n); 87 | TINYAD_ASSERT_EQ(_H_proj.rows(), n); 88 | TINYAD_ASSERT_EQ(_H_proj.cols(), n); 89 | 90 | const Eigen::SparseMatrix H_reduced = _B.transpose() * _H_proj * _B + _w_identity * identity(m); 91 | if (_solver.sparsity_pattern_dirty) 92 | { 93 | _solver.solver.analyzePattern(H_reduced); 94 | _solver.sparsity_pattern_dirty = false; 95 | } 96 | 97 | _solver.solver.factorize(H_reduced); 98 | const Eigen::VectorX d_reduced = _solver.solver.solve(-_B.transpose() * _g); 99 | 100 | if (_solver.solver.info() != Eigen::Success) 101 | TINYAD_ERROR_throw("Linear solve failed."); 102 | 103 | TINYAD_ASSERT_FINITE_MAT(d_reduced); 104 | return _B * d_reduced; 105 | } 106 | 107 | /** 108 | * Compute update vector d such that x + d performs a Newton step 109 | * (i.e. minimizes the quadratic approximation at x), 110 | * constrained to a linear subspace with known basis. 111 | * The n-by-m matrix B maps from the subspace to the solution space, 112 | * (i.e. d = B * d_reduced). 113 | */ 114 | template 115 | Eigen::VectorX newton_direction_reduced_basis( 116 | const Eigen::VectorX& _g, 117 | const Eigen::SparseMatrix& _H_proj, 118 | const Eigen::SparseMatrix& _B, 119 | const PassiveT& _w_identity = 0.0) 120 | { 121 | LinearSolver solver; 122 | return newton_direction_reduced_basis(_g, _H_proj, _B, solver, _w_identity); 123 | } 124 | 125 | } 126 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/Out.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | namespace TinyAD 11 | { 12 | 13 | // /////////////////////////////////////////////////////////////////////////// 14 | // Debugging options 15 | // /////////////////////////////////////////////////////////////////////////// 16 | 17 | #define TINYAD_ENABLE_OPERATOR_LOGGING 0 18 | #define TINYAD_ENABLE_FINITE_CHECKS 0 19 | 20 | // /////////////////////////////////////////////////////////////////////////// 21 | // Assertions and debug macros 22 | // /////////////////////////////////////////////////////////////////////////// 23 | 24 | #define TINYAD_ANSI_FG_MAGENTA "\x1b[35m" 25 | #define TINYAD_ANSI_FG_YELLOW "\x1b[33m" 26 | #define TINYAD_ANSI_FG_GREEN "\x1b[32m" 27 | #define TINYAD_ANSI_FG_WHITE "\x1b[37m" 28 | #define TINYAD_ANSI_FG_RED "\x1b[31m" 29 | #define TINYAD_ANSI_RESET "\x1b[0m" 30 | 31 | 32 | #define TINYAD_INFO(str) \ 33 | { \ 34 | std::cout << TINYAD_ANSI_FG_GREEN << str << TINYAD_ANSI_RESET << std::endl; \ 35 | std::cout.flush(); \ 36 | } 37 | 38 | #define TINYAD_DEBUG_OUT(str) \ 39 | { \ 40 | std::cout << TINYAD_ANSI_FG_MAGENTA \ 41 | << "[DEBUG] " \ 42 | << str \ 43 | << TINYAD_ANSI_RESET << std::endl; \ 44 | std::cout.flush(); \ 45 | } 46 | 47 | #define TINYAD_DEBUG_VAR(var) \ 48 | { \ 49 | TINYAD_DEBUG_OUT(#var << " = " << var) \ 50 | } 51 | 52 | #define TINYAD_WARNING(str) \ 53 | { \ 54 | std::cout << TINYAD_ANSI_FG_YELLOW \ 55 | << "[WARNING] " \ 56 | << str \ 57 | << TINYAD_ANSI_RESET \ 58 | << " (in function " << __FUNCTION__ << ":" << __LINE__ \ 59 | << " in file " << __FILE__ << ")" \ 60 | << std::endl; \ 61 | std::cout.flush(); \ 62 | } 63 | 64 | #define TINYAD_ERROR(str) \ 65 | std::cout << TINYAD_ANSI_FG_RED \ 66 | << "[ERROR] " \ 67 | << str \ 68 | << TINYAD_ANSI_RESET \ 69 | << " (in function " << __FUNCTION__ << ":" << __LINE__ \ 70 | << " in file " << __FILE__ << ")" \ 71 | << std::endl 72 | 73 | #define TINYAD_ERROR_throw(st) \ 74 | { \ 75 | TINYAD_ERROR(st); \ 76 | std::stringstream str_strm; \ 77 | str_strm << "[ERROR] " << st; \ 78 | throw std::runtime_error(str_strm.str()); \ 79 | } 80 | 81 | #define TINYAD_ASSERT(exp) \ 82 | { \ 83 | if(!(exp)) TINYAD_ERROR_throw("Assertion failed: " << (#exp)); \ 84 | } 85 | 86 | #define TINYAD_ASSERT_EQ(a, b) \ 87 | { \ 88 | if((a) != (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " == " << (b)); \ 89 | } 90 | 91 | #define TINYAD_ASSERT_NEQ(a, b) \ 92 | { \ 93 | if((a) == (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " != " << (b)); \ 94 | } 95 | 96 | #define TINYAD_ASSERT_G(a, b) \ 97 | { \ 98 | if((a) <= (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " > " << (b)); \ 99 | } 100 | 101 | #define TINYAD_ASSERT_GEQ(a, b) \ 102 | { \ 103 | if((a) < (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " >= " << (b)); \ 104 | } 105 | 106 | #define TINYAD_ASSERT_L(a, b) \ 107 | { \ 108 | if((a) >= (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " < " << (b)); \ 109 | } 110 | 111 | #define TINYAD_ASSERT_LEQ(a, b) \ 112 | { \ 113 | if((a) > (b)) TINYAD_ERROR_throw("Assertion failed: " << (a) << " <= " << (b)); \ 114 | } 115 | 116 | #define TINYAD_ASSERT_EPS(a, b, eps) \ 117 | { \ 118 | if(std::abs((a) - (b)) >= eps) TINYAD_ERROR_throw("Assertion failed: |" << (a) << " - " << (b) << "| < " << eps); \ 119 | } 120 | 121 | #define TINYAD_ASSERT_EPS_MAT(A, B, eps) \ 122 | { \ 123 | const auto& A_ref = A; \ 124 | const auto& B_ref = B; \ 125 | TINYAD_ASSERT_EQ(A_ref.rows(), B_ref.rows()); \ 126 | TINYAD_ASSERT_EQ(A_ref.cols(), B_ref.cols()); \ 127 | for (Eigen::Index i = 0; i < A_ref.rows(); ++i) \ 128 | { \ 129 | for (Eigen::Index j = 0; j < A_ref.cols(); ++j) \ 130 | TINYAD_ASSERT_EPS(A_ref(i, j), B_ref(i, j), eps); \ 131 | } \ 132 | } 133 | 134 | #define TINYAD_ASSERT_FINITE(a) \ 135 | { \ 136 | TINYAD_ASSERT(std::isfinite(a)); \ 137 | } 138 | 139 | #define TINYAD_ASSERT_FINITE_MAT(A) \ 140 | { \ 141 | const auto& A_ref = A; \ 142 | for (Eigen::Index i = 0; i < A_ref.rows(); ++i) \ 143 | { \ 144 | for (Eigen::Index j = 0; j < A_ref.cols(); ++j) \ 145 | { \ 146 | if (!std::isfinite(A_ref(i, j))) \ 147 | TINYAD_ERROR_throw("Assertion failed: Not finite " << A_ref); \ 148 | } \ 149 | } \ 150 | } 151 | 152 | #define TINYAD_ASSERT_SYMMETRIC(A, eps) \ 153 | { \ 154 | const auto& A_ref = A; \ 155 | if (((A_ref) - (A_ref).transpose()).array().abs().maxCoeff() > eps) \ 156 | TINYAD_ERROR_throw("Matrix not symmetric"); \ 157 | } 158 | 159 | /// NAN-check for double type 160 | #if (TINYAD_ENABLE_FINITE_CHECKS) 161 | #define TINYAD_CHECK_FINITE_IF_ENABLED_d(exp) TINYAD_ASSERT_FINITE(exp); 162 | #else 163 | #define TINYAD_CHECK_FINITE_IF_ENABLED_d(exp) { } 164 | #endif 165 | 166 | /// NAN-check for TinyAD::Scalar type 167 | #if (TINYAD_ENABLE_FINITE_CHECKS) 168 | #define TINYAD_CHECK_FINITE_IF_ENABLED_AD(exp) \ 169 | { \ 170 | const auto& exp_ref = exp; \ 171 | TINYAD_ASSERT_FINITE(exp_ref.val); \ 172 | TINYAD_ASSERT_FINITE_MAT(exp_ref.grad); \ 173 | TINYAD_ASSERT_FINITE_MAT(exp_ref.Hess); \ 174 | } 175 | #else 176 | #define TINYAD_CHECK_FINITE_IF_ENABLED_AD(exp) { } 177 | #endif 178 | 179 | } 180 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/Timer.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace TinyAD 12 | { 13 | 14 | struct Timer 15 | { 16 | using Clock = std::chrono::high_resolution_clock; 17 | 18 | Timer(const std::string& _name, const bool _silent = false) : 19 | name(_name), 20 | silent(_silent), 21 | running(true), 22 | start(Clock::now()), 23 | duration(0.0) 24 | { } 25 | 26 | ~Timer() 27 | { 28 | if (!silent) 29 | { 30 | TINYAD_INFO(TINYAD_ANSI_FG_WHITE 31 | << "[TIMER] " << name << " took " 32 | << seconds() 33 | << "s."); 34 | } 35 | } 36 | 37 | void stop() 38 | { 39 | if (running) 40 | update_duration(); 41 | 42 | running = false; 43 | } 44 | 45 | double seconds() 46 | { 47 | if (running) 48 | update_duration(); 49 | 50 | return duration.count(); 51 | } 52 | 53 | private: 54 | 55 | void update_duration() 56 | { 57 | std::atomic_thread_fence(std::memory_order_relaxed); 58 | duration = Clock::now() - start; 59 | std::atomic_thread_fence(std::memory_order_relaxed); 60 | } 61 | 62 | const std::string name; 63 | const bool silent; 64 | bool running; 65 | const typename Clock::time_point start; 66 | std::chrono::duration duration; 67 | }; 68 | 69 | } 70 | -------------------------------------------------------------------------------- /include/TinyAD/Utils/ToPassive.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | 9 | namespace TinyAD 10 | { 11 | 12 | // Include this file for a fallback no-op version of to_passive(...) 13 | // without needing to include Scalar.hh 14 | 15 | template 16 | const PassiveT& to_passive(const PassiveT& a) 17 | { 18 | return a; 19 | } 20 | 21 | } 22 | 23 | // Additional passive-type functions for which Scalar.hh 24 | // offers active overloads: 25 | 26 | template 27 | const PassiveT sqr(const PassiveT& a) 28 | { 29 | return a * a; 30 | } 31 | -------------------------------------------------------------------------------- /include/TinyAD/VectorFunction.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace TinyAD 13 | { 14 | 15 | /** 16 | * Class implementing a differentiable vector function f: R^n -> R^m, 17 | * defined via a set of elements. Each element produces a segment of the output vector. 18 | */ 19 | template < 20 | int variable_dimension, // Number of scalar variables per handle. E.g. 2 in 2D mesh parametrization. 21 | typename PassiveT, // Internal scalar type. E.g. float or double. 22 | typename VariableHandleT> // Type of variable handles. E.g. int or OpenMesh::VertexHandle, ... 23 | struct VectorFunction 24 | { 25 | using PassiveScalarType = PassiveT; 26 | using PassiveVariableVectorType = Eigen::Vector; 27 | static constexpr bool is_vector_function = true; 28 | 29 | // Vector function is not copyable but movable 30 | VectorFunction() = default; 31 | VectorFunction(const VectorFunction&) = delete; 32 | VectorFunction(VectorFunction&& _other); 33 | VectorFunction& operator=(const VectorFunction&) = delete; 34 | VectorFunction& operator=(VectorFunction&& _other); 35 | 36 | /** 37 | * Instead of this constructor, use vector_function<..>(..), 38 | * which helps with deducing template arguments. 39 | */ 40 | VectorFunction( 41 | std::vector _variable_handles, 42 | const EvalSettings& _settings); 43 | 44 | /** 45 | * Add a set of elements and a lambda function evaluating each element. 46 | * Each element produces a segment of the output vector. 47 | * Can be called multiple times to add different terms. 48 | */ 49 | template < 50 | int element_valence, // Number of variable handles accessed per element. 51 | int outputs_per_element, // Number of entries in the output vector produced by one element. 52 | typename ElementHandleRangeT, // Type of element handles. E.g. int or OpenMesh::Face handle, ... Deduced automatically. 53 | typename EvalElementFunction> // Type of per-element eval function. Deduced automatically. 54 | void add_elements( 55 | const ElementHandleRangeT& _element_range, 56 | EvalElementFunction _eval_element); 57 | 58 | /** 59 | * Assemble variable vector x from user data. 60 | * Uses internal index map from variable handles to entries of x. 61 | * 62 | * Pass a lambda function that takes a variable handle and returns its associated scalar values. 63 | * 64 | * Example: 65 | * Eigen::VectorXd x = func.x_from_data([&] (int v_idx) { return param.row(v_idx); }); 66 | */ 67 | Eigen::VectorX x_from_data( 68 | std::function _read_user_data) const; 69 | 70 | /** 71 | * Write variable vector x to user data. 72 | * Uses internal index map from variable handles to entries of x. 73 | * 74 | * Pass a lambda function that takes a variable handle its associated scalar values 75 | * and writes these values to the user data structure. 76 | * 77 | * Example: 78 | * func.x_to_data(x, [&] (int v_idx, const Eigen::Vector2d& p) { param.row(v_idx) = p; }); 79 | */ 80 | void x_to_data( 81 | const Eigen::VectorX& _x, 82 | std::function _write_user_data) const; 83 | 84 | /** 85 | * Evaluate function without computing derivatives. 86 | */ 87 | Eigen::VectorX eval( 88 | const Eigen::VectorX& _x) const; 89 | 90 | /** 91 | * Evaluate function without computing derivatives. 92 | */ 93 | Eigen::VectorX operator()( 94 | const Eigen::VectorX& _x) const; 95 | 96 | /** 97 | * Evaluate result vector and Jacobian matrix. 98 | * J has dimension n_outputs-by-n_variables. 99 | */ 100 | void eval_with_jacobian( 101 | const Eigen::VectorX& _x, 102 | Eigen::VectorX& _r, 103 | Eigen::SparseMatrix& _J) const; 104 | 105 | /** 106 | * Evaluate result vector and Jacobian matrix. 107 | * J has dimension n_outputs-by-n_variables. 108 | */ 109 | std::tuple, Eigen::SparseMatrix> 110 | eval_with_jacobian( 111 | const Eigen::VectorX& _x) const; 112 | 113 | /** 114 | * Evaluate result vector, Jacobian matrix, and Hessian tensor. 115 | * J has dimension n_outputs-by-n_variables. 116 | * H has dimension n_output-by-n_variables-by-n_variables. 117 | */ 118 | void eval_with_derivatives( 119 | const Eigen::VectorX& _x, 120 | Eigen::VectorX& _r, 121 | Eigen::SparseMatrix& _J, 122 | std::vector>& _H) const; 123 | 124 | /** 125 | * Evaluate result vector, Jacobian matrix, and Hessian tensor. 126 | * J has dimension n_outputs-by-n_variables. 127 | * H has dimension n_output-by-n_variables-by-n_variables. 128 | */ 129 | std::tuple, Eigen::SparseMatrix, std::vector>> 130 | eval_with_derivatives( 131 | const Eigen::VectorX& _x) const; 132 | 133 | /** 134 | * Evaluate f(x) = sum_i (r_i(x))^2 without derivatives. 135 | */ 136 | PassiveT eval_sum_of_squares( 137 | const Eigen::VectorX& _x) const; 138 | 139 | /** 140 | * Evaluate f(x) = sum_i (r_i(x))^2. 141 | * Returns the gradient of f. 142 | * Returns the vector of residuals r, of size n_elements. 143 | * Returns Jacobian matrix J with gradients as rows, 144 | * i.e. J_ij = d r_i / d x_j. 145 | * J has dimension n_outputs-by-n_variables. 146 | */ 147 | void eval_sum_of_squares_with_derivatives( 148 | const Eigen::VectorX& _x, 149 | PassiveT& _f, 150 | Eigen::VectorX& _g, 151 | Eigen::VectorX& _r, 152 | Eigen::SparseMatrix& _J) const; 153 | 154 | /** 155 | * Evaluate f(x) = sum_i (r_i(x))^2. 156 | * Returns the gradient of f. 157 | * Returns the vector of residuals r, of size n_elements. 158 | * Returns Jacobian matrix J with gradients as rows, 159 | * i.e. J_ij = d r_i / d x_j. 160 | * J has dimension n_outputs-by-n_variables. 161 | */ 162 | std::tuple, Eigen::VectorX, Eigen::SparseMatrix> 163 | eval_sum_of_squares_with_derivatives( 164 | const Eigen::VectorX& _x) const; 165 | 166 | /** 167 | * Change settings before calling eval(..). 168 | * 169 | * Example: 170 | * func.settings.n_threads = 4; 171 | */ 172 | EvalSettings settings; 173 | 174 | /** 175 | * Number of scalar variables (size of variable vector x). 176 | * This is variable_dimension * #variable_handles. 177 | */ 178 | Eigen::Index n_vars = 0; 179 | 180 | /** 181 | * Current number of elements 182 | */ 183 | Eigen::Index n_elements = 0; 184 | 185 | /** 186 | * Current number of outputs 187 | */ 188 | Eigen::Index n_outputs = 0; 189 | 190 | std::vector variable_handles; 191 | std::vector>> objective_terms; 192 | }; 193 | 194 | /** 195 | * Use this to construct vector functions. 196 | * Automatically deduces variable handle type. 197 | */ 198 | template < 199 | int variable_dimension, // Number of scalar variables per handle. 200 | typename PassiveT = double, // Internal scalar type. E.g. float or double. 201 | typename VariableRangeT> // Range type of variable handles. E.g. std::vector. Deduced automatically. 202 | auto vector_function( 203 | const VariableRangeT& _variable_range, 204 | const EvalSettings& _settings = EvalSettings()); 205 | 206 | } 207 | 208 | #define TINYAD_VectorFunction_DEFINED 209 | #include 210 | -------------------------------------------------------------------------------- /scripts/compare_build_time.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal enabledelayedexpansion 3 | 4 | :: Define variables 5 | set REPO_DIR=%~dp0.. 6 | set BUILD_DIR=build-timing 7 | set CSV_FILE=build_times.csv 8 | set BUILD_TYPE=Release 9 | set NUM_CORES=%NUMBER_OF_PROCESSORS% 10 | 11 | :: Visual Studio environment setup 12 | :: Detect Visual Studio installation (adjust paths as needed for your system) 13 | if exist "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat" ( 14 | set VS_ENV="C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat" 15 | set VS_VERSION=2022 16 | ) else if exist "C:\Program Files\Microsoft Visual Studio\2022\Professional\VC\Auxiliary\Build\vcvarsall.bat" ( 17 | set VS_ENV="C:\Program Files\Microsoft Visual Studio\2022\Professional\VC\Auxiliary\Build\vcvarsall.bat" 18 | set VS_VERSION=2022 19 | ) else if exist "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ( 20 | set VS_ENV="C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" 21 | set VS_VERSION=2022 22 | ) else if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" ( 23 | set VS_ENV="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" 24 | set VS_VERSION=2019 25 | ) else if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\VC\Auxiliary\Build\vcvarsall.bat" ( 26 | set VS_ENV="C:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\VC\Auxiliary\Build\vcvarsall.bat" 27 | set VS_VERSION=2019 28 | ) else if exist "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ( 29 | set VS_ENV="C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" 30 | set VS_VERSION=2019 31 | ) else if exist "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" ( 32 | set VS_ENV="C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" 33 | set VS_VERSION=2017 34 | ) else ( 35 | echo Error: Could not find Visual Studio vcvarsall.bat 36 | echo Please update the script with the correct path to your Visual Studio installation 37 | exit /b 1 38 | ) 39 | 40 | :: Get branch names from command line or use defaults 41 | if "%~1"=="" ( 42 | set BRANCH1=main 43 | ) else ( 44 | set BRANCH1=%~1 45 | ) 46 | 47 | if "%~2"=="" ( 48 | set BRANCH2=deferred-lambda-instantiation 49 | ) else ( 50 | set BRANCH2=%~2 51 | ) 52 | 53 | :: Special test mode 54 | if /i "%BRANCH1%"=="test" ( 55 | call :RunTestMode 56 | goto :eof 57 | ) 58 | 59 | :: Initialize CSV file 60 | echo Branch,Build Time (seconds) > %CSV_FILE% 61 | echo Starting build time comparison between %BRANCH1% and %BRANCH2%... 62 | echo Build configuration: %BUILD_TYPE% mode using Ninja with parallel builds (%NUM_CORES% cores) 63 | echo Using Visual Studio %VS_VERSION% toolchain 64 | 65 | :: Build first branch 66 | call :BuildBranch %BRANCH1% 67 | if errorlevel 1 goto :error 68 | 69 | :: Build second branch 70 | call :BuildBranch %BRANCH2% 71 | if errorlevel 1 goto :error 72 | 73 | :: Show results 74 | echo. 75 | echo =================================== 76 | echo Results: 77 | echo =================================== 78 | echo. 79 | type %CSV_FILE% 80 | echo. 81 | echo Build times written to %CSV_FILE% 82 | 83 | goto :eof 84 | 85 | :error 86 | echo. 87 | echo Error occurred during build process 88 | exit /b 1 89 | 90 | :RunTestMode 91 | echo Running in test mode - simulating builds 92 | echo Branch,Build Time (seconds) > %CSV_FILE% 93 | echo main,12.45 >> %CSV_FILE% 94 | echo feature,9.87 >> %CSV_FILE% 95 | echo. 96 | echo Test results: 97 | type %CSV_FILE% 98 | exit /b 0 99 | 100 | :BuildBranch 101 | set BRANCH=%~1 102 | echo. 103 | echo =================================== 104 | echo Building branch: %BRANCH% 105 | echo =================================== 106 | echo. 107 | 108 | :: Checkout branch 109 | echo Checking out branch %BRANCH%... 110 | git -C "%REPO_DIR%" checkout %BRANCH% 111 | if errorlevel 1 ( 112 | echo Error: Failed to checkout branch %BRANCH% 113 | exit /b 1 114 | ) 115 | 116 | :: Clean and create build directory 117 | echo Cleaning previous build artifacts... 118 | if exist "%REPO_DIR%\%BUILD_DIR%" rmdir /s /q "%REPO_DIR%\%BUILD_DIR%" 119 | mkdir "%REPO_DIR%\%BUILD_DIR%" 120 | 121 | :: Set up Visual Studio environment (x64 architecture) 122 | echo Setting up Visual Studio %VS_VERSION% environment for x64 architecture... 123 | call %VS_ENV% x64 124 | if errorlevel 1 ( 125 | echo Error: Failed to set up Visual Studio environment 126 | exit /b 1 127 | ) 128 | 129 | :: Configure with CMake using Ninja and Release mode 130 | echo Running CMake with Ninja generator in %BUILD_TYPE% mode... 131 | cmake -S "%REPO_DIR%" -B "%REPO_DIR%\%BUILD_DIR%" -G "Ninja" -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -DTINYAD_UNIT_TESTS=On 132 | if errorlevel 1 ( 133 | echo Error: CMake configuration failed 134 | exit /b 1 135 | ) 136 | 137 | :: Build and measure time 138 | echo Building with Ninja using %NUM_CORES% parallel jobs... 139 | set START_TIME=!time! 140 | 141 | cmake --build "%REPO_DIR%\%BUILD_DIR%" --config %BUILD_TYPE% -- -j %NUM_CORES% 142 | if errorlevel 1 ( 143 | echo Error: Build failed 144 | exit /b 1 145 | ) 146 | 147 | set END_TIME=!time! 148 | 149 | :: Calculate elapsed time 150 | call :CalculateElapsedTime "!START_TIME!" "!END_TIME!" 151 | echo %BRANCH%,!ELAPSED_TIME! >> %CSV_FILE% 152 | echo Build of %BRANCH% completed in !ELAPSED_TIME! seconds 153 | exit /b 0 154 | 155 | :CalculateElapsedTime 156 | :: Get start and end times 157 | set START=%~1 158 | set END=%~2 159 | 160 | :: Parse hours, minutes, seconds, centiseconds 161 | for /f "tokens=1-4 delims=:,. " %%a in ("%START%") do ( 162 | set /a START_H=%%a 163 | set /a START_M=%%b 164 | set /a START_S=%%c 165 | set /a START_CS=%%d 166 | ) 167 | 168 | for /f "tokens=1-4 delims=:,. " %%a in ("%END%") do ( 169 | set /a END_H=%%a 170 | set /a END_M=%%b 171 | set /a END_S=%%c 172 | set /a END_CS=%%d 173 | ) 174 | 175 | :: Convert to centiseconds 176 | set /a START_TOTAL=(START_H*360000)+(START_M*6000)+(START_S*100)+START_CS 177 | set /a END_TOTAL=(END_H*360000)+(END_M*6000)+(END_S*100)+END_CS 178 | 179 | :: Handle midnight crossing 180 | if %END_TOTAL% LSS %START_TOTAL% set /a END_TOTAL+=8640000 181 | 182 | :: Calculate difference in seconds with 2 decimal places 183 | set /a DIFF=%END_TOTAL%-%START_TOTAL% 184 | set /a DIFF_SEC=%DIFF%/100 185 | set /a DIFF_CS=%DIFF%%%100 186 | 187 | if %DIFF_CS% LSS 10 ( 188 | set ELAPSED_TIME=%DIFF_SEC%.0%DIFF_CS% 189 | ) else ( 190 | set ELAPSED_TIME=%DIFF_SEC%.%DIFF_CS% 191 | ) 192 | 193 | exit /b -------------------------------------------------------------------------------- /tests/ComplexTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #define _SILENCE_NONFLOATING_COMPLEX_DEPRECATION_WARNING 6 | #include 7 | #include 8 | #include 9 | 10 | template 11 | void test_complex(const PassiveT _eps) 12 | { 13 | using ScalarT = TinyAD::Scalar<4, PassiveT, with_hessian>; 14 | 15 | std::complex a(ScalarT(1, 0), ScalarT(2, 1)); 16 | std::complex b(ScalarT(3, 2), ScalarT(4, 3)); 17 | 18 | { // Addition 19 | std::complex c = a + b; 20 | 21 | TINYAD_ASSERT_EPS(c.real().val, 4.0, _eps); 22 | TINYAD_ASSERT_EPS(c.imag().val, 6.0, _eps); 23 | } 24 | 25 | { // Subtraction 26 | std::complex c = a - b; 27 | 28 | TINYAD_ASSERT_EPS(c.real().val, -2.0, _eps); 29 | TINYAD_ASSERT_EPS(c.imag().val, -2.0, _eps); 30 | } 31 | 32 | { // Multiplication 33 | std::complex c = a * b; 34 | 35 | TINYAD_ASSERT_EPS(c.real().val, -5.0, _eps); 36 | TINYAD_ASSERT_EPS(c.imag().val, 10.0, _eps); 37 | 38 | TINYAD_ASSERT_EPS(c.real().grad(0), 3.0, _eps); 39 | TINYAD_ASSERT_EPS(c.imag().grad(0), 4.0, _eps); 40 | TINYAD_ASSERT_EPS(c.real().grad(1), -4.0, _eps); 41 | TINYAD_ASSERT_EPS(c.imag().grad(1), 3.0, _eps); 42 | TINYAD_ASSERT_EPS(c.real().grad(2), 1.0, _eps); 43 | TINYAD_ASSERT_EPS(c.imag().grad(2), 2.0, _eps); 44 | TINYAD_ASSERT_EPS(c.real().grad(3), -2.0, _eps); 45 | TINYAD_ASSERT_EPS(c.imag().grad(3), 1.0, _eps); 46 | 47 | if constexpr (with_hessian) 48 | { 49 | Eigen::Matrix4 H_real; 50 | H_real << 0.0, 0.0, 1.0, 0.0, 51 | 0.0, 0.0, 0.0, -1.0, 52 | 1.0, 0.0, 0.0, 0.0, 53 | 0.0, -1.0, 0.0, 0.0; 54 | Eigen::Matrix4 H_imag; 55 | H_imag << 0.0, 0.0, 0.0, 1.0, 56 | 0.0, 0.0, 1.0, 0.0, 57 | 0.0, 1.0, 0.0, 0.0, 58 | 1.0, 0.0, 0.0, 0.0; 59 | 60 | TINYAD_ASSERT_L((c.real().Hess - H_real).cwiseAbs().maxCoeff(), _eps); 61 | } 62 | } 63 | 64 | { // Division 65 | std::complex c = a / b; 66 | 67 | TINYAD_ASSERT_EPS(c.real().val, 11.0 / 25.0, _eps); 68 | TINYAD_ASSERT_EPS(c.imag().val, 2.0 / 25.0, _eps); 69 | 70 | TINYAD_ASSERT_EPS(c.real().grad(0), 3.0 / 25.0, _eps); 71 | TINYAD_ASSERT_EPS(c.imag().grad(0), -4.0 / 25.0, _eps); 72 | TINYAD_ASSERT_EPS(c.real().grad(1), 4.0 / 25.0, _eps); 73 | TINYAD_ASSERT_EPS(c.imag().grad(1), 3.0 / 25.0, _eps); 74 | TINYAD_ASSERT_EPS(c.real().grad(2), -41.0 / 625.0, _eps); 75 | TINYAD_ASSERT_EPS(c.imag().grad(2), 38.0 / 625.0, _eps); 76 | TINYAD_ASSERT_EPS(c.real().grad(3), -38.0 / 625.0, _eps); 77 | TINYAD_ASSERT_EPS(c.imag().grad(3), -41.0 / 625.0, _eps); 78 | 79 | // TODO: Check Hessian 80 | } 81 | 82 | { // Conjugate 83 | std::complex c = conj(a); 84 | 85 | TINYAD_ASSERT_EQ(c.real().val, a.real().val); 86 | TINYAD_ASSERT_EQ(c.imag().val, -a.imag().val); 87 | 88 | TINYAD_ASSERT_EQ(c.real().grad, a.real().grad); 89 | TINYAD_ASSERT_EQ(c.imag().grad, -a.imag().grad); 90 | 91 | if constexpr (with_hessian) 92 | { 93 | TINYAD_ASSERT_EQ(c.real().Hess, a.real().Hess); 94 | TINYAD_ASSERT_EQ(c.imag().Hess, -a.imag().Hess); 95 | } 96 | } 97 | 98 | { // Abs 99 | ScalarT length = abs(a); 100 | ScalarT length_ref = hypot(a.real(), a.imag()); 101 | 102 | TINYAD_ASSERT_EPS(length.val, length_ref.val, _eps); 103 | TINYAD_ASSERT_L((length.grad - length_ref.grad).cwiseAbs().maxCoeff(), _eps); 104 | 105 | if constexpr (with_hessian) 106 | TINYAD_ASSERT_L((length.Hess - length_ref.Hess).cwiseAbs().maxCoeff(), _eps); 107 | } 108 | 109 | { // Arg 110 | ScalarT angle = arg(a); 111 | ScalarT angle_ref = atan2(a.imag(), a.real()); 112 | 113 | TINYAD_ASSERT_EPS(angle.val, angle_ref.val, _eps); 114 | TINYAD_ASSERT_L((angle.grad - angle_ref.grad).cwiseAbs().maxCoeff(), _eps); 115 | 116 | if constexpr (with_hessian) 117 | TINYAD_ASSERT_L((angle.Hess - angle_ref.Hess).cwiseAbs().maxCoeff(), _eps); 118 | } 119 | } 120 | 121 | TEST(ComplexTest, ComplexFloatFirstOrder) { test_complex(1e-6f); } 122 | TEST(ComplexTest, ComplexDoubleFirstOrder) { test_complex(1e-12); } 123 | TEST(ComplexTest, ComplexLongDoubleFirstOrder) { test_complex(1e-15); } 124 | TEST(ComplexTest, ComplexFloatSecondOrder) { test_complex(1e-6f); } 125 | TEST(ComplexTest, ComplexDoubleSecondOrder) { test_complex(1e-12); } 126 | TEST(ComplexTest, ComplexLongDoubleSecondOrder) { test_complex(1e-15); } 127 | -------------------------------------------------------------------------------- /tests/CustomDerivativesTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | 8 | using ADouble = TinyAD::Double<1>; 9 | 10 | double plus(const double a, const double b) 11 | { 12 | return a + b; 13 | } 14 | 15 | ADouble plus(const ADouble& a, const ADouble& b) 16 | { 17 | return ADouble::known_derivatives( 18 | a.val + b.val, 19 | a.grad + b.grad, 20 | a.Hess + b.Hess); 21 | } 22 | 23 | TEST(CustomDerivativesTest, Plus) 24 | { 25 | // a(x) = x^2 + x + 2 at x=1 26 | // b(x) = x^3 - x^2 at x=1 27 | ADouble a = ADouble::known_derivatives(4.0, 3.0, 2.0); 28 | ADouble b = ADouble::known_derivatives(0.0, 1.0, 4.0); 29 | 30 | const ADouble f = plus(a, b); 31 | ASSERT_NEAR(f.val, 4.0, 1e-12); 32 | ASSERT_NEAR(f.grad(0), 4.0, 1e-12); 33 | ASSERT_NEAR(f.Hess(0, 0), 6.0, 1e-12); 34 | } 35 | -------------------------------------------------------------------------------- /tests/DeferredLambdaTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include "Meshes.hh" 6 | #include 7 | #include 8 | #include 9 | 10 | template 11 | void test_compilation_time() 12 | { 13 | // Load test mesh 14 | Eigen::MatrixX V_rest; 15 | Eigen::MatrixX V; 16 | Eigen::MatrixXi F; 17 | std::vector b; 18 | std::vector> bc; 19 | planar_test_mesh(V_rest, V, F, b, bc); 20 | 21 | // 2D variables 22 | auto func = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(V.rows())); 23 | 24 | // Add symmetric Dirichlet energy term. 25 | // 4 elements using 3 variable handles each. 26 | func.template add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 27 | { 28 | using T = TINYAD_SCALAR_TYPE(element); 29 | Eigen::Vector ar = V_rest.row(F(element.handle, 0)); 30 | Eigen::Vector br = V_rest.row(F(element.handle, 1)); 31 | Eigen::Vector cr = V_rest.row(F(element.handle, 2)); 32 | Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 33 | Eigen::Vector a = element.variables(F(element.handle, 0)); 34 | Eigen::Vector b = element.variables(F(element.handle, 1)); 35 | Eigen::Vector c = element.variables(F(element.handle, 2)); 36 | Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 37 | 38 | if (M.determinant() <= 0.0) 39 | return INFINITY; 40 | 41 | return ((M * Mr.inverse()).squaredNorm() + (Mr * M.inverse()).squaredNorm()) / (PassiveT)F.rows(); 42 | }); 43 | 44 | // Don't call any of the eval_* functions. 45 | // After introducing deferred lambda instantiation, this should compile very fast. 46 | } 47 | 48 | TEST(DeferredLambdaTest, 2DDeformationFloat) { test_compilation_time(); } 49 | TEST(DeferredLambdaTest, 2DDeformationDouble) { test_compilation_time(); } 50 | TEST(DeferredLambdaTest, 2DDeformationLongDouble) { test_compilation_time(); } -------------------------------------------------------------------------------- /tests/DynamicElementsTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include "Meshes.hh" 8 | 9 | TEST(DynamicElementsTest, DynamicElementsTest) 10 | { 11 | auto func = TinyAD::scalar_function<2>(TinyAD::range(4)); 12 | 13 | // Instantiate two element groups with valences 3 and 1 14 | func.add_elements_dynamic<3, 1>(TinyAD::range(4), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 15 | { 16 | using T = TINYAD_SCALAR_TYPE(element); 17 | int e = (int)element.handle; 18 | 19 | // Element e accesses e many variables 20 | Eigen::Vector2 sum = Eigen::Vector2::Zero(); 21 | for (int v = 0; v < e; ++v) 22 | sum += element.variables(v); 23 | 24 | return sum.squaredNorm(); 25 | }); 26 | 27 | ASSERT_EQ(func.objective_terms.size(), 2); 28 | ASSERT_EQ(func.objective_terms[0]->n_elements(), 2); 29 | ASSERT_EQ(func.objective_terms[1]->n_elements(), 2); 30 | 31 | Eigen::VectorXd x = Eigen::VectorXd::Ones(4 * 2); 32 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 33 | } 34 | 35 | namespace 36 | { 37 | 38 | Eigen::SparseMatrix reference_laplace( 39 | const Eigen::MatrixXd& V, 40 | const Eigen::MatrixXi& F) 41 | { 42 | Eigen::SparseMatrix L(V.rows(), V.rows()); 43 | for (int f = 0; f < F.rows(); ++f) 44 | { 45 | for (int i = 0; i < 3; ++i) 46 | { 47 | int v1 = F(f, i); 48 | int v2 = F(f, (i + 1) % 3); 49 | L.coeffRef(v1, v1) += 1.0; 50 | L.coeffRef(v1, v2) -= 1.0; 51 | } 52 | } 53 | 54 | L.makeCompressed(); 55 | return L; 56 | } 57 | 58 | } 59 | 60 | TEST(DynamicElementsTest, LaplaceTest) 61 | { 62 | // Load bunny mesh (closed surface) 63 | Eigen::MatrixXd V; 64 | Eigen::MatrixXi F; 65 | bunny_closed_mesh(V, F); 66 | 67 | // Compute reference Laplacian 68 | Eigen::SparseMatrix L_ref = reference_laplace(V, F); 69 | L_ref.makeCompressed(); 70 | 71 | // Compute Laplacian as Hessian of Dirichlet energy using static triangle elements 72 | { 73 | auto func = TinyAD::scalar_function<1>(TinyAD::range(V.rows())); 74 | func.add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) 75 | { 76 | using T = TINYAD_SCALAR_TYPE(element); 77 | int f = (int)element.handle; 78 | T a = element.variable(F(f, 0)); 79 | T b = element.variable(F(f, 1)); 80 | T c = element.variable(F(f, 2)); 81 | return 0.25 * (sqr(b - a) + sqr(c - b) + sqr(a - c)); 82 | }); 83 | 84 | Eigen::SparseMatrix L_tri = func.eval_hessian_of_quadratic(); 85 | L_tri.makeCompressed(); 86 | 87 | ASSERT_EQ(L_tri.rows(), L_ref.rows()); 88 | ASSERT_EQ(L_tri.cols(), L_ref.cols()); 89 | ASSERT_EQ(L_tri.nonZeros(), L_ref.nonZeros()); 90 | ASSERT_NEAR((L_tri - L_ref).norm(), 0.0, 1e-12) << "Laplacian mismatch!"; 91 | } 92 | 93 | // Compute Laplacian as Hessian of Dirichlet energy using dynamic vertex elements. 94 | { 95 | // Compute vertex to neighbor mapping 96 | std::vector> vertex_to_neighbors(V.rows()); 97 | for (int f = 0; f < F.rows(); ++f) 98 | { 99 | for (int i = 0; i < 3; ++i) 100 | { 101 | int v1 = F(f, i); 102 | int v2 = F(f, (i + 1) % 3); 103 | vertex_to_neighbors[v1].push_back(v2); 104 | } 105 | } 106 | 107 | // Output distribution of vertex valences 108 | std::map valence_histogram; 109 | for (const auto& neighbors : vertex_to_neighbors) 110 | ++valence_histogram[(int)neighbors.size()]; 111 | for (const auto& [valence, count] : valence_histogram) 112 | TINYAD_INFO("Valence " << valence << ": " << count << " vertices"); 113 | 114 | // We compile code for one-ring elements of sizes 6, 7, 8, and 10. 115 | // The maxiumum vertex valence in this mesh is 9. 116 | // Including the vertex itself, this gives a maximum element valence of 10. 117 | auto func = TinyAD::scalar_function<1>(TinyAD::range(V.rows())); 118 | func.add_elements_dynamic<6, 7, 8, 10>(TinyAD::range(V.rows()), [&] (auto& element) 119 | { 120 | using T = TINYAD_SCALAR_TYPE(element); 121 | int v = (int)element.handle; 122 | 123 | T v_val = element.variable(v); 124 | std::vector neigh_vals(vertex_to_neighbors[v].size()); 125 | for (int i = 0; i < vertex_to_neighbors[v].size(); ++i) 126 | neigh_vals[i] = element.variable(vertex_to_neighbors[v][i]); 127 | 128 | T dirichlet = 0.0; 129 | for (int i = 0; i < neigh_vals.size(); ++i) 130 | dirichlet += 0.25 * sqr(v_val - neigh_vals[i]); 131 | 132 | return dirichlet; 133 | }); 134 | 135 | Eigen::SparseMatrix L_vert = func.eval_hessian_of_quadratic(); 136 | L_vert.makeCompressed(); 137 | 138 | ASSERT_EQ(L_vert.rows(), L_ref.rows()); 139 | ASSERT_EQ(L_vert.cols(), L_ref.cols()); 140 | // ASSERT_EQ(L_vert.nonZeros(), L_ref.nonZeros()); // NNZ are different, we add a bunch of extra "zeros"! Consider pruning the resulting Hessian. 141 | ASSERT_NEAR((L_vert - L_ref).norm(), 0.0, 1e-12) << "Laplacian mismatch!"; 142 | } 143 | } -------------------------------------------------------------------------------- /tests/EigenTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | 8 | TEST(EigenTest, Auto) 9 | { 10 | auto func = TinyAD::scalar_function<1>(TinyAD::range(1)); 11 | func.add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 12 | { 13 | // auto v = element.variables(0); 14 | // auto nv = -v.normalized(); 15 | // return nv[0]; 16 | 17 | // The above line fails because an expression template keeps a reference to temporary type. 18 | // Never use auto on the left-hand side of Eigen expressions. 19 | // See https://eigen.tuxfamily.org/dox/TopicPitfalls.html#TopicPitfalls_auto_keyword. 20 | // 21 | // Instead, write: 22 | 23 | using T = TINYAD_SCALAR_TYPE(element); 24 | Eigen::Vector v = element.variables(0); 25 | Eigen::Vector nv = -v.normalized(); 26 | return nv[0]; 27 | }); 28 | 29 | const double f = func.eval(Eigen::Vector(1.0)); 30 | const double eps = 1e-9; 31 | ASSERT_NEAR(f, -1.0, eps); 32 | } 33 | -------------------------------------------------------------------------------- /tests/ElementTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | TEST(ElementTest, ElementTest) 10 | { 11 | auto sf = TinyAD::scalar_function<1>(TinyAD::range(1)); 12 | auto vf = TinyAD::vector_function<1>(TinyAD::range(1)); 13 | 14 | // auto reference. Should compile 15 | sf.add_elements<1>(TinyAD::range(1), [] (auto& element) { return 0.0; }); 16 | 17 | // auto value. Should NOT compile 18 | // sf.add_elements<1>(TinyAD::range(1), [] (auto element) { return 0.0; }); 19 | 20 | // auto reference. Should compile 21 | vf.add_elements<1, 2>(TinyAD::range(1), [] (auto& element) { return Eigen::Vector2d(0.0, 0.0); }); 22 | 23 | // auto value. Should NOT compile 24 | // vf.add_elements<1, 2>(TinyAD::range(1), [] (auto element) { return Eigen::Vector2d(0.0, 0.0); }); 25 | } 26 | 27 | TEST(ElementTest, VariablesPassive) 28 | { 29 | auto sf = TinyAD::scalar_function<1>(TinyAD::range(2)); 30 | sf.add_elements<1>(TinyAD::range(1), [] (auto& element) 31 | { 32 | using T = TINYAD_SCALAR_TYPE(element); 33 | const Eigen::Vector a = element.variables_passive(0); 34 | const Eigen::Vector b = element.variables_passive(1); 35 | const double c = element.variable_passive(0); 36 | const double d = element.variable_passive(1); 37 | return element.variable(0) + a.norm() + b.norm() + c + d; 38 | }); 39 | sf.eval(Eigen::Vector2d::Zero()); 40 | sf.eval_with_derivatives(Eigen::Vector2d::Zero()); 41 | 42 | auto vf = TinyAD::vector_function<1>(TinyAD::range(2)); 43 | vf.add_elements<1, 1>(TinyAD::range(1), [] (auto& element) 44 | { 45 | using T = TINYAD_SCALAR_TYPE(element); 46 | const Eigen::Vector a = element.variables_passive(0); 47 | const Eigen::Vector b = element.variables_passive(1); 48 | const double c = element.variable_passive(0); 49 | const double d = element.variable_passive(1); 50 | return Eigen::Vector (element.variable(0) + a.norm() + b.norm() + c + d); 51 | }); 52 | vf.eval(Eigen::Vector2d::Zero()); 53 | vf.eval_with_jacobian(Eigen::Vector2d::Zero()); 54 | } 55 | -------------------------------------------------------------------------------- /tests/ExceptionTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | 8 | TEST(ExceptionTest, ExceptionTest) 9 | { 10 | const int n_elements = 10; 11 | bool throw_exception = false; 12 | 13 | using Vector1 = Eigen::Matrix; 14 | auto func = TinyAD::scalar_function<1>(TinyAD::range(1)); 15 | func.template add_elements<1>(TinyAD::range(n_elements), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 16 | { 17 | element.variables(0); 18 | 19 | if (throw_exception) 20 | throw std::runtime_error("Exception in parallel section"); 21 | else 22 | return 1.0; 23 | }); 24 | 25 | throw_exception = true; 26 | bool caught_first_exception = false; 27 | bool caught_second_exception = false; 28 | 29 | try 30 | { 31 | auto [f, g, H] = func.eval_with_hessian_proj(Vector1(0.0)); 32 | } 33 | catch (const std::exception&) 34 | { 35 | caught_first_exception = true; 36 | } 37 | 38 | try 39 | { 40 | auto [f, g, H] = func.eval_with_hessian_proj(Vector1(0.0)); 41 | } 42 | catch (const std::exception&) 43 | { 44 | caught_second_exception = true; 45 | } 46 | 47 | TINYAD_ASSERT(caught_first_exception); 48 | TINYAD_ASSERT(caught_second_exception); 49 | 50 | // Make sure we can still proceed normally 51 | throw_exception = false; 52 | auto [f, g, H] = func.eval_with_hessian_proj(Vector1(0.0)); 53 | TINYAD_ASSERT_EPS(f, n_elements, 1e-12); 54 | } 55 | -------------------------------------------------------------------------------- /tests/GaussNewtonTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Meshes.hh" 12 | 13 | template 14 | Eigen::VectorX test_2d_deformation_gauss_newton( 15 | const PassiveT& _eps) 16 | { 17 | Eigen::MatrixX V_rest; 18 | Eigen::MatrixX V; 19 | Eigen::MatrixXi F; 20 | std::vector b; 21 | std::vector> bc; 22 | planar_test_mesh(V_rest, V, F, b, bc); 23 | 24 | // 6 2D variables 25 | // Distortion: 26 | // 4 triangle elements. 27 | // Each triangle element uses 3 vertices. 28 | // Each triangle element creates 8 residuals (one per Jacobian (or inverse Jacobian) entry). 29 | // Position penalty: 30 | // 2 vertex elements. 31 | // Each vertex element uses 1 vertex. 32 | // Each vertex element creates 2 residuals (one per coordinate) 33 | auto func_sos = TinyAD::vector_function<2, PassiveT>(TinyAD::range(V.rows())); 34 | func_sos.template add_elements<3, 8>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_VECTOR_TYPE(element) 35 | { 36 | using T = TINYAD_SCALAR_TYPE(element); 37 | Eigen::Index f_idx = element.handle; 38 | Eigen::Vector ar = V_rest.row(F(f_idx, 0)); 39 | Eigen::Vector br = V_rest.row(F(f_idx, 1)); 40 | Eigen::Vector cr = V_rest.row(F(f_idx, 2)); 41 | Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 42 | Eigen::Vector a = element.variables(F(f_idx, 0)); 43 | Eigen::Vector b = element.variables(F(f_idx, 1)); 44 | Eigen::Vector c = element.variables(F(f_idx, 2)); 45 | Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 46 | 47 | if (M.determinant() <= 0.0) 48 | return Eigen::Vector::Constant(INFINITY); 49 | 50 | Eigen::Matrix2 J = M * Mr.inverse(); 51 | Eigen::Matrix2 J_inv = Mr * M.inverse(); 52 | 53 | Eigen::Vector E; 54 | E(0) = J(0, 0); 55 | E(1) = J(0, 1); 56 | E(2) = J(1, 0); 57 | E(3) = J(1, 1); 58 | E(4) = J_inv(0, 0); 59 | E(5) = J_inv(0, 1); 60 | E(6) = J_inv(1, 0); 61 | E(7) = J_inv(1, 1); 62 | 63 | return 1.0 / sqrt(F.rows()) * E; 64 | }); 65 | func_sos.template add_elements<1, 2>(TinyAD::range(b.size()), [&] (auto& element) -> TINYAD_VECTOR_TYPE(element) 66 | { 67 | using T = TINYAD_SCALAR_TYPE(element); 68 | Eigen::Vector2 p_target = bc[element.handle]; 69 | Eigen::Vector2 p = element.variables(b[element.handle]); 70 | 71 | return p_target - p; 72 | }); 73 | 74 | // For comparison: Standard formulation 75 | // 6 2D variables. 76 | // Element per triangle. 77 | // Positional penalty terms: 2 elements using 1 variable handle each. 78 | auto func_ref = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(V.rows())); 79 | func_ref.template add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 80 | { 81 | using T = TINYAD_SCALAR_TYPE(element); 82 | Eigen::Index f_idx = element.handle; 83 | Eigen::Vector ar = V_rest.row(F(f_idx, 0)); 84 | Eigen::Vector br = V_rest.row(F(f_idx, 1)); 85 | Eigen::Vector cr = V_rest.row(F(f_idx, 2)); 86 | Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 87 | Eigen::Vector a = element.variables(F(f_idx, 0)); 88 | Eigen::Vector b = element.variables(F(f_idx, 1)); 89 | Eigen::Vector c = element.variables(F(f_idx, 2)); 90 | Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 91 | 92 | if (M.determinant() <= 0.0) 93 | return INFINITY; 94 | 95 | return ((M * Mr.inverse()).squaredNorm() + (Mr * M.inverse()).squaredNorm()) / (PassiveT)F.rows(); 96 | }); 97 | func_ref.template add_elements<1>(TinyAD::range(b.size()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 98 | { 99 | using T = TINYAD_SCALAR_TYPE(element); 100 | Eigen::Vector2 p_target = bc[element.handle]; 101 | Eigen::Vector2 p = element.variables(b[element.handle]); 102 | 103 | return (p_target - p).squaredNorm(); 104 | }); 105 | 106 | // Assemble initial x vector 107 | Eigen::VectorX x = func_sos.x_from_data([&] (Eigen::Index v_idx) { 108 | return V.row(v_idx); 109 | }); 110 | 111 | // Optimize 112 | TinyAD::LinearSolver solver; 113 | for (int i = 0; i < 20; ++i) 114 | { 115 | // Compare different ways of evaluation to reference 116 | auto [f_ref, g_ref, H_proj_ref] = func_ref.eval_with_hessian_proj(x); 117 | 118 | { // Reference function, eval() 119 | PassiveT f = func_ref.eval(x); 120 | TINYAD_ASSERT_EPS(f, f_ref, _eps); 121 | } 122 | 123 | { // Reference function, eval_with_gradient() 124 | auto [f, g] = func_ref.eval_with_gradient(x); 125 | TINYAD_ASSERT_EPS(f, f_ref, _eps); 126 | TINYAD_ASSERT_L((g - g_ref).cwiseAbs().maxCoeff(), _eps); 127 | } 128 | 129 | { // Sum-of-squares function, eval_sum_of_squares() 130 | PassiveT f = func_sos.eval_sum_of_squares(x); 131 | TINYAD_ASSERT_EPS(f, f_ref, _eps); 132 | } 133 | 134 | { // Sum-of-squares function, eval_sum_of_squares_with_derivatives() 135 | auto [f, g, r, J] = func_sos.eval_sum_of_squares_with_derivatives(x); 136 | TINYAD_ASSERT_EPS(f, f_ref, _eps); 137 | TINYAD_ASSERT_L((g - g_ref).cwiseAbs().maxCoeff(), _eps); 138 | } 139 | 140 | // Compute Gauss-Newton step 141 | auto [f, g, r, J] = func_sos.eval_sum_of_squares_with_derivatives(x); 142 | const Eigen::VectorX d = TinyAD::gauss_newton_direction(r, J, solver, (PassiveT)1e-12); 143 | x = TinyAD::line_search(x, d, f, g, [&] (const Eigen::VectorX& _x) 144 | { 145 | return func_sos.eval_sum_of_squares(_x); 146 | }); 147 | } 148 | 149 | // Write final x vector to V 150 | func_sos.x_to_data(x, [&] (Eigen::Index v_idx, const Eigen::Vector2& p) { 151 | V.row(v_idx) = p; 152 | }); 153 | 154 | // Assert distortion minimum has been reached and gradient is zero 155 | { 156 | auto [f, g, r, J] = func_sos.eval_sum_of_squares_with_derivatives(x); 157 | TINYAD_ASSERT_EPS(f, 4.0, 0.1); 158 | TINYAD_ASSERT_EPS(g.cwiseAbs().maxCoeff(), 0.0, 0.1); 159 | } 160 | 161 | return x; 162 | } 163 | 164 | TEST(GaussNewtonTest, 2DDeformationGaussNewtonFloat) { test_2d_deformation_gauss_newton(1e-5f); } 165 | TEST(GaussNewtonTest, 2DDeformationGaussNewtonDouble) { test_2d_deformation_gauss_newton(1e-12); } 166 | TEST(GaussNewtonTest, 2DDeformationGaussNewtonLongDouble) { test_2d_deformation_gauss_newton(1e-14); } 167 | -------------------------------------------------------------------------------- /tests/HandleTypeTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | 8 | struct CustomVariableHandle 9 | { 10 | CustomVariableHandle() = default; 11 | CustomVariableHandle(const int _idx) 12 | : idx(_idx) { } 13 | 14 | bool operator==(const CustomVariableHandle& other) const { return idx == other.idx; } 15 | 16 | int idx = -1; 17 | }; 18 | 19 | struct CustomElementHandle 20 | { 21 | CustomElementHandle() = default; 22 | CustomElementHandle(const int _idx) 23 | : idx(_idx) { } 24 | 25 | bool operator==(const CustomElementHandle& other) const { return idx == other.idx; } 26 | 27 | int idx = -1; 28 | }; 29 | 30 | namespace TinyAD 31 | { 32 | 33 | Eigen::Index idx_from_handle(const CustomVariableHandle& _vh) 34 | { 35 | return _vh.idx; 36 | } 37 | 38 | Eigen::Index idx_from_handle(const CustomElementHandle& _eh) 39 | { 40 | return _eh.idx; 41 | } 42 | 43 | } 44 | 45 | // Include finite element function after custom idx_from_handle() implementation 46 | #include 47 | 48 | template 49 | void test_handle_types() 50 | { 51 | std::vector variable_handles; 52 | for (int i = 0; i < 5; ++i) 53 | variable_handles.push_back({ i }); 54 | 55 | std::vector element_handles; 56 | for (int i = 0; i < 10; ++i) 57 | element_handles.push_back({ i }); 58 | 59 | auto func = TinyAD::scalar_function<1>(variable_handles); 60 | func.template add_elements<1>(element_handles, [] (auto& element) 61 | { 62 | VariableHandleT vh((int)TinyAD::idx_from_handle(element.handle) / 2); 63 | return element.variables(vh)[0]; 64 | }); 65 | 66 | func.eval_with_hessian_proj(Eigen::VectorXd::Zero(func.n_vars)); 67 | } 68 | 69 | TEST(HandleTypeTest, HandleTypesInt) { test_handle_types(); } 70 | TEST(HandleTypeTest, HandleTypesLongInt) { test_handle_types(); } 71 | TEST(HandleTypeTest, HandleTypesCustom) { test_handle_types(); } 72 | -------------------------------------------------------------------------------- /tests/Meshes.hh: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | 7 | /** 8 | * Triangle mesh in the plane with 6 vertices and 4 faces. 9 | * Init stretched version. 10 | * Position constraints at two vertices, that require 90° ccw rotation. 11 | */ 12 | template 13 | void planar_test_mesh( 14 | Eigen::MatrixX& _V_rest, 15 | Eigen::MatrixX& _V_init, 16 | Eigen::MatrixXi& _F, 17 | std::vector& _b, 18 | std::vector>& _bc) 19 | { 20 | _V_rest = Eigen::MatrixX (6, 2); 21 | _V_rest << 0.0, 0.0, 22 | 1.0, 0.0, 23 | 0.0, 1.0, 24 | 1.0, 1.0, 25 | 0.0, 2.0, 26 | 1.0, 2.0; 27 | 28 | // Init stretched version 29 | _V_init = _V_rest; 30 | _V_init.col(0) *= 0.5; 31 | _V_init.col(1) *= 0.25; 32 | 33 | _F = Eigen::MatrixXi(4, 3); 34 | _F << 0, 1, 2, 35 | 1, 3, 2, 36 | 2, 3, 5, 37 | 2, 5, 4; 38 | 39 | // Add position constraints at two vertices, that require 90° ccw rotation. 40 | _b = { 0, 4 }; 41 | _bc = { { 0.0, 0.0 }, { -2.0, 0.0 } }; 42 | } 43 | 44 | template 45 | void bunny_disk_mesh( 46 | Eigen::MatrixX& _V, 47 | Eigen::MatrixXi& _F) 48 | { 49 | _V = Eigen::MatrixX(46, 3); 50 | _V << 0.014693, 0.034859, -0.017492, 51 | -0.036904, 0.127238, -0.005124, 52 | -0.074639, 0.176419, -0.049740, 53 | 0.051019, 0.069193, 0.022474, 54 | -0.004906, 0.048737, 0.054009, 55 | 0.032741, 0.049282, 0.035893, 56 | 0.015155, 0.085233, 0.056121, 57 | -0.076828, 0.104784, 0.037908, 58 | -0.065012, 0.154134, 0.003927, 59 | 0.006688, 0.121734, 0.037576, 60 | -0.021675, 0.089240, 0.053983, 61 | -0.067227, 0.077131, 0.042467, 62 | 0.009475, 0.134593, 0.015024, 63 | -0.079380, 0.136588, 0.051328, 64 | 0.038698, 0.078901, -0.015798, 65 | -0.063620, 0.162594, -0.056235, 66 | 0.027464, 0.116221, -0.009778, 67 | -0.034912, 0.172602, -0.001215, 68 | -0.021075, 0.162288, -0.009884, 69 | -0.021729, 0.041594, -0.027408, 70 | 0.028948, 0.033317, 0.010711, 71 | -0.037600, 0.094847, -0.024914, 72 | 0.016440, 0.054445, -0.029721, 73 | -0.091845, 0.116681, 0.044212, 74 | 0.062414, 0.048888, 0.006749, 75 | -0.060028, 0.136585, -0.004227, 76 | -0.059451, 0.154713, 0.030062, 77 | -0.088030, 0.152777, 0.021302, 78 | -0.032821, 0.052454, 0.038265, 79 | -0.091845, 0.091069, 0.012848, 80 | -0.074725, 0.083133, -0.016780, 81 | -0.091826, 0.116716, 0.004118, 82 | 0.047464, 0.082514, 0.009797, 83 | -0.049998, 0.141248, 0.012185, 84 | -0.041535, 0.152417, -0.006075, 85 | -0.016272, 0.087044, -0.041215, 86 | -0.035222, 0.055073, -0.011049, 87 | -0.058118, 0.051838, 0.014853, 88 | 0.002913, 0.118006, -0.015443, 89 | -0.077730, 0.150676, -0.003536, 90 | -0.061400, 0.120111, -0.011313, 91 | 0.038585, 0.105182, 0.030786, 92 | -0.036696, 0.101966, 0.042601, 93 | -0.063304, 0.115138, 0.047098, 94 | 0.012157, 0.036657, 0.040296, 95 | -0.013331, 0.182631, -0.026955; 96 | 97 | _F = Eigen::MatrixXi(82, 3); 98 | _F << 11, 28, 42, 99 | 11, 37, 28, 100 | 45, 8, 17, 101 | 14, 24, 20, 102 | 14, 20, 0, 103 | 19, 22, 0, 104 | 34, 18, 33, 105 | 10, 28, 4, 106 | 10, 42, 28, 107 | 1, 12, 38, 108 | 33, 1, 25, 109 | 33, 43, 1, 110 | 43, 42, 1, 111 | 1, 42, 9, 112 | 1, 9, 12, 113 | 11, 29, 37, 114 | 11, 7, 29, 115 | 26, 43, 33, 116 | 2, 8, 15, 117 | 8, 2, 39, 118 | 26, 13, 43, 119 | 3, 24, 32, 120 | 3, 5, 24, 121 | 19, 36, 21, 122 | 19, 21, 35, 123 | 1, 38, 21, 124 | 4, 44, 5, 125 | 4, 5, 6, 126 | 5, 3, 6, 127 | 6, 3, 41, 128 | 41, 9, 6, 129 | 43, 7, 42, 130 | 42, 7, 11, 131 | 43, 23, 7, 132 | 43, 13, 23, 133 | 14, 0, 22, 134 | 22, 35, 14, 135 | 14, 35, 38, 136 | 14, 38, 16, 137 | 8, 25, 15, 138 | 7, 31, 29, 139 | 31, 7, 23, 140 | 31, 39, 40, 141 | 23, 27, 31, 142 | 31, 27, 39, 143 | 8, 45, 34, 144 | 8, 34, 25, 145 | 25, 40, 39, 146 | 12, 9, 41, 147 | 9, 10, 6, 148 | 9, 42, 10, 149 | 6, 10, 4, 150 | 33, 17, 26, 151 | 13, 26, 27, 152 | 30, 40, 21, 153 | 32, 41, 3, 154 | 41, 16, 12, 155 | 24, 14, 32, 156 | 14, 16, 32, 157 | 32, 16, 41, 158 | 16, 38, 12, 159 | 27, 8, 39, 160 | 18, 45, 17, 161 | 17, 33, 18, 162 | 35, 21, 38, 163 | 19, 35, 22, 164 | 17, 8, 26, 165 | 33, 25, 34, 166 | 2, 15, 39, 167 | 15, 25, 39, 168 | 20, 24, 5, 169 | 30, 37, 29, 170 | 30, 29, 31, 171 | 30, 31, 40, 172 | 30, 36, 37, 173 | 36, 30, 21, 174 | 26, 8, 27, 175 | 1, 21, 40, 176 | 1, 40, 25, 177 | 23, 13, 27, 178 | 44, 20, 5, 179 | 45, 18, 34; 180 | } 181 | 182 | template 183 | void bunny_closed_mesh( 184 | Eigen::MatrixX& _V, 185 | Eigen::MatrixXi& _F) 186 | { 187 | _V = Eigen::MatrixX(52, 3); 188 | _V << 0.014693, 0.034859, -0.017492, 189 | -0.036904, 0.127238, -0.005124, 190 | -0.074639, 0.176419, -0.049740, 191 | -0.042872, 0.034860, -0.029119, 192 | 0.051019, 0.069193, 0.022474, 193 | -0.004906, 0.048737, 0.054009, 194 | 0.032741, 0.049282, 0.035893, 195 | 0.015155, 0.085233, 0.056121, 196 | -0.076828, 0.104784, 0.037908, 197 | -0.065012, 0.154134, 0.003927, 198 | 0.006688, 0.121734, 0.037576, 199 | -0.021675, 0.089240, 0.053983, 200 | -0.067227, 0.077131, 0.042467, 201 | 0.009475, 0.134593, 0.015024, 202 | -0.079380, 0.136588, 0.051328, 203 | 0.038698, 0.078901, -0.015798, 204 | -0.063620, 0.162594, -0.056235, 205 | 0.027464, 0.116221, -0.009778, 206 | -0.034912, 0.172602, -0.001215, 207 | -0.021075, 0.162288, -0.009884, 208 | -0.021729, 0.041594, -0.027408, 209 | 0.028948, 0.033317, 0.010711, 210 | -0.037600, 0.094847, -0.024914, 211 | 0.016440, 0.054445, -0.029721, 212 | -0.091845, 0.116681, 0.044212, 213 | 0.062414, 0.048888, 0.006749, 214 | -0.060028, 0.136585, -0.004227, 215 | -0.059451, 0.154713, 0.030062, 216 | -0.088030, 0.152777, 0.021302, 217 | -0.032821, 0.052454, 0.038265, 218 | -0.091845, 0.091069, 0.012848, 219 | -0.074725, 0.083133, -0.016780, 220 | -0.091826, 0.116716, 0.004118, 221 | 0.047464, 0.082514, 0.009797, 222 | -0.049998, 0.141248, 0.012185, 223 | -0.041535, 0.152417, -0.006075, 224 | -0.016272, 0.087044, -0.041215, 225 | -0.035222, 0.055073, -0.011049, 226 | -0.058118, 0.051838, 0.014853, 227 | 0.002913, 0.118006, -0.015443, 228 | -0.041425, 0.038204, -0.016265, 229 | -0.077730, 0.150676, -0.003536, 230 | -0.061400, 0.120111, -0.011313, 231 | 0.038585, 0.105182, 0.030786, 232 | -0.036696, 0.101966, 0.042601, 233 | -0.063921, 0.040003, 0.036430, 234 | -0.020979, 0.037298, 0.029990, 235 | -0.028962, 0.037050, 0.052674, 236 | -0.063304, 0.115138, 0.047098, 237 | 0.012157, 0.036657, 0.040296, 238 | -0.069366, 0.033253, -0.005216, 239 | -0.013331, 0.182631, -0.026955; 240 | 241 | _F = Eigen::MatrixX(100, 3); 242 | _F << 12, 29, 44, 243 | 12, 38, 29, 244 | 51, 9, 18, 245 | 15, 25, 21, 246 | 15, 21, 0, 247 | 20, 23, 0, 248 | 0, 21, 46, 249 | 0, 46, 40, 250 | 0, 40, 20, 251 | 20, 40, 3, 252 | 35, 19, 34, 253 | 11, 29, 5, 254 | 11, 44, 29, 255 | 49, 47, 46, 256 | 1, 13, 39, 257 | 34, 1, 26, 258 | 34, 48, 1, 259 | 48, 44, 1, 260 | 1, 44, 10, 261 | 1, 10, 13, 262 | 12, 30, 38, 263 | 12, 8, 30, 264 | 27, 48, 34, 265 | 2, 9, 16, 266 | 9, 2, 41, 267 | 27, 14, 48, 268 | 5, 29, 47, 269 | 3, 40, 37, 270 | 4, 25, 33, 271 | 4, 6, 25, 272 | 46, 29, 45, 273 | 20, 37, 22, 274 | 20, 22, 36, 275 | 3, 37, 20, 276 | 1, 39, 22, 277 | 5, 49, 6, 278 | 5, 6, 7, 279 | 6, 4, 7, 280 | 7, 4, 43, 281 | 43, 10, 7, 282 | 48, 8, 44, 283 | 44, 8, 12, 284 | 48, 24, 8, 285 | 48, 14, 24, 286 | 15, 0, 23, 287 | 23, 36, 15, 288 | 15, 36, 39, 289 | 15, 39, 17, 290 | 9, 26, 16, 291 | 46, 47, 29, 292 | 46, 50, 40, 293 | 8, 32, 30, 294 | 32, 8, 24, 295 | 32, 41, 42, 296 | 24, 28, 32, 297 | 32, 28, 41, 298 | 9, 51, 35, 299 | 9, 35, 26, 300 | 26, 42, 41, 301 | 13, 10, 43, 302 | 10, 11, 7, 303 | 10, 44, 11, 304 | 7, 11, 5, 305 | 34, 18, 27, 306 | 14, 27, 28, 307 | 38, 45, 29, 308 | 31, 42, 22, 309 | 33, 43, 4, 310 | 43, 17, 13, 311 | 25, 15, 33, 312 | 15, 17, 33, 313 | 33, 17, 43, 314 | 17, 39, 13, 315 | 38, 50, 45, 316 | 28, 9, 41, 317 | 19, 51, 18, 318 | 18, 34, 19, 319 | 36, 22, 39, 320 | 20, 36, 23, 321 | 18, 9, 27, 322 | 34, 26, 35, 323 | 2, 16, 41, 324 | 16, 26, 41, 325 | 21, 25, 6, 326 | 31, 38, 30, 327 | 31, 30, 32, 328 | 31, 32, 42, 329 | 31, 37, 38, 330 | 37, 31, 22, 331 | 27, 9, 28, 332 | 40, 38, 37, 333 | 1, 22, 42, 334 | 1, 42, 26, 335 | 24, 14, 28, 336 | 49, 21, 6, 337 | 49, 5, 47, 338 | 40, 50, 38, 339 | 51, 19, 35, 340 | 21, 49, 46, 341 | 46, 45, 50; 342 | } -------------------------------------------------------------------------------- /tests/NewtonTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "Meshes.hh" 11 | 12 | template 13 | Eigen::VectorX test_2d_deformation_newton( 14 | const PassiveT& _eps) 15 | { 16 | Eigen::MatrixX V_rest; 17 | Eigen::MatrixX V; 18 | Eigen::MatrixXi F; 19 | std::vector b; 20 | std::vector> bc; 21 | planar_test_mesh(V_rest, V, F, b, bc); 22 | 23 | // 6 2D variables 24 | auto func = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(V.rows())); 25 | 26 | // Add symmetric Dirichlet energy term. 27 | // 4 elements using 3 variable handles each. 28 | func.template add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 29 | { 30 | using T = TINYAD_SCALAR_TYPE(element); 31 | Eigen::Vector ar = V_rest.row(F(element.handle, 0)); 32 | Eigen::Vector br = V_rest.row(F(element.handle, 1)); 33 | Eigen::Vector cr = V_rest.row(F(element.handle, 2)); 34 | Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 35 | Eigen::Vector a = element.variables(F(element.handle, 0)); 36 | Eigen::Vector b = element.variables(F(element.handle, 1)); 37 | Eigen::Vector c = element.variables(F(element.handle, 2)); 38 | Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 39 | 40 | if (M.determinant() <= 0.0) 41 | return INFINITY; 42 | 43 | return ((M * Mr.inverse()).squaredNorm() + (Mr * M.inverse()).squaredNorm()) / (PassiveT)F.rows(); 44 | }); 45 | 46 | // Add positional penalty terms. 47 | // 2 elements using 1 variable handle each. 48 | func.template add_elements<1>(TinyAD::range(b.size()), [&] (auto& element) 49 | { 50 | using T = TINYAD_SCALAR_TYPE(element); 51 | Eigen::Vector2 p_target = bc[element.handle]; 52 | Eigen::Vector2 p = element.variables(b[element.handle]); 53 | 54 | return (p_target - p).squaredNorm(); 55 | }); 56 | 57 | // Assemble initial x vector 58 | Eigen::VectorX x = func.x_from_data([&] (Eigen::Index v_idx) { 59 | return V.row(v_idx); 60 | }); 61 | 62 | // Assert number of non-zeros in Hessian 63 | { 64 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 65 | TINYAD_ASSERT_EQ(H_proj.nonZeros(), 4 * V.rows() + 8 * (V.rows() + F.rows() - 1)); 66 | } 67 | 68 | // Optimize 69 | TinyAD::LinearSolver solver; 70 | for (int i = 0; i < 10; ++i) 71 | { 72 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 73 | Eigen::VectorX d = newton_direction(g, H_proj, solver); 74 | x = line_search(x, d, f, g, func); 75 | } 76 | 77 | // Write final x vector to V 78 | func.x_to_data(x, [&] (Eigen::Index v_idx, const Eigen::Vector2& p) { 79 | V.row(v_idx) = p; 80 | }); 81 | 82 | // Assert distortion minimum has been reached and gradient is zero 83 | { 84 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 85 | TINYAD_ASSERT_EPS(func.eval(x), f, _eps); 86 | TINYAD_ASSERT_EPS(f, 4.0, _eps); 87 | TINYAD_ASSERT_EPS(g.cwiseAbs().maxCoeff(), 0.0, _eps); 88 | } 89 | 90 | return x; 91 | } 92 | 93 | TEST(NewtonTest, 2DDeformationFloat) { test_2d_deformation_newton(1e-6f); } 94 | TEST(NewtonTest, 2DDeformationDouble) { test_2d_deformation_newton(1e-15); } 95 | TEST(NewtonTest, 2DDeformationLongDouble) { test_2d_deformation_newton(1e-15); } 96 | 97 | TEST(NewtonTest, Deterministic) 98 | { 99 | std::vector xs(4); 100 | 101 | #pragma omp parallel for 102 | for (int i = 0; i < (int)xs.size(); ++i) 103 | { 104 | xs[i] = test_2d_deformation_newton(1e-15); 105 | } 106 | 107 | for (int i = 0; i < (int)xs.size(); ++i) 108 | { 109 | TINYAD_ASSERT_EQ(xs[i], xs[0]); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /tests/OpenMPTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | 8 | TEST(OpenMPTest, OpenMPEnabled) 9 | { 10 | // Check if OpenMP is enabled 11 | #ifdef _OPENMP 12 | SUCCEED() << "OpenMP is enabled."; 13 | #else 14 | FAIL() << "OpenMP is not enabled."; 15 | #endif 16 | } 17 | 18 | TEST(OpenMPTest, ScalarFunctionParallel) 19 | { 20 | auto func = TinyAD::scalar_function<1>(TinyAD::range(1)); 21 | func.add_elements<1>(TinyAD::range(20), [] (auto& element) 22 | { 23 | // Assert parallel 24 | TINYAD_ASSERT_GEQ(omp_get_max_threads(), 2); 25 | 26 | return element.variables(0)[0]; 27 | }); 28 | 29 | func.eval_with_hessian_proj(Eigen::VectorXd::Zero(1)); 30 | } 31 | -------------------------------------------------------------------------------- /tests/PerformanceTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace 11 | { 12 | 13 | /// Triangle mesh in the plane with 6 vertices and 4 faces. 14 | template 15 | void planar_test_mesh( 16 | Eigen::MatrixX& _V, Eigen::MatrixXi& _F) 17 | { 18 | _V = Eigen::MatrixX (6, 2); 19 | _V << 0.0, 0.0, 20 | 1.0, 0.0, 21 | 0.0, 1.0, 22 | 1.0, 1.0, 23 | 0.0, 2.0, 24 | 1.0, 2.0; 25 | 26 | _F = Eigen::MatrixXi(4, 3); 27 | _F << 0, 1, 2, 28 | 1, 3, 2, 29 | 2, 3, 5, 30 | 2, 5, 4; 31 | } 32 | 33 | } 34 | 35 | template 36 | void test_2d_deformation_performance( 37 | const std::string& _floating_type_str) 38 | { 39 | Eigen::MatrixX V_rest; 40 | Eigen::MatrixXi F_orig; 41 | planar_test_mesh(V_rest, F_orig); 42 | 43 | // Init stretched version 44 | Eigen::MatrixX V = V_rest; 45 | V.col(0) *= 0.5; 46 | V.col(1) *= 0.25; 47 | 48 | // Duplicate faces 49 | Eigen::MatrixXi F = F_orig.replicate(1000 / F_orig.rows(), 1); 50 | 51 | // Optimize symmetric Dirichlet energy 52 | // 6 2D variables, 4 elements using 3 variables each. 53 | auto func = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(V.rows())); 54 | 55 | func.template add_elements<3>(TinyAD::range(F.rows()), [&] (auto& element) -> TINYAD_SCALAR_TYPE(element) 56 | { 57 | using T = TINYAD_SCALAR_TYPE(element); 58 | Eigen::Vector ar = V_rest.row(F(element.handle, 0)); 59 | Eigen::Vector br = V_rest.row(F(element.handle, 1)); 60 | Eigen::Vector cr = V_rest.row(F(element.handle, 2)); 61 | Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 62 | Eigen::Vector a = element.variables(F(element.handle, 0)); 63 | Eigen::Vector b = element.variables(F(element.handle, 1)); 64 | Eigen::Vector c = element.variables(F(element.handle, 2)); 65 | Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 66 | 67 | if (M.determinant() <= 0.0) 68 | return INFINITY; 69 | 70 | return ((M * Mr.inverse()).squaredNorm() + (Mr * M.inverse()).squaredNorm()) / (PassiveT)F.rows(); 71 | }); 72 | 73 | // Assemble initial x vector 74 | Eigen::VectorX x = func.x_from_data([&] (Eigen::Index v_idx) { return V.row(v_idx); }); 75 | 76 | // Eval derivatives 77 | { 78 | TinyAD::Timer timer(std::string(__FUNCTION__) + " evaluating " + std::to_string(F.rows()) + " elements (" + _floating_type_str + ")"); 79 | auto [f, g, H_proj] = func.eval_with_hessian_proj(x); 80 | } 81 | } 82 | 83 | TEST(PerformanceTest, 2DDeformationPerformanceFloat) { test_2d_deformation_performance("float"); } 84 | TEST(PerformanceTest, 2DDeformationPerformanceDouble) { test_2d_deformation_performance("double"); } 85 | TEST(PerformanceTest, 2DDeformationPerformanceLongDouble) { test_2d_deformation_performance("long double"); } 86 | -------------------------------------------------------------------------------- /tests/SVDTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | template 10 | void test_svd( 11 | const PassiveT _eps0, 12 | const PassiveT _eps1, 13 | const PassiveT _eps2) 14 | { 15 | // Choose autodiff scalar type for 4 variables 16 | using ADouble = TinyAD::Scalar<4, PassiveT, with_hessian>; 17 | 18 | Eigen::Matrix2 A_passive = Eigen::Matrix2::Random(2, 2); 19 | 20 | Eigen::Matrix2 A_active; 21 | A_active << ADouble(A_passive(0, 0), 0), ADouble(A_passive(0, 1), 1), 22 | ADouble(A_passive(1, 0), 2), ADouble(A_passive(1, 1), 3); 23 | 24 | // Eigen Jacobi SVD 25 | Eigen::JacobiSVD> svd(A_active, Eigen::ComputeFullU | Eigen::ComputeFullV); 26 | Eigen::Matrix2 U_eigen = svd.matrixU(); 27 | Eigen::Vector2 S_eigen = svd.singularValues(); 28 | Eigen::Matrix2 V_eigen = svd.matrixV(); 29 | Eigen::Matrix2 A_eigen = U_eigen * S_eigen.asDiagonal() * V_eigen.transpose(); 30 | TINYAD_ASSERT_L((TinyAD::to_passive(A_eigen) - A_passive).array().abs().maxCoeff(), _eps0); 31 | 32 | // TinyAD closed-form SVD 33 | Eigen::Matrix2 U_closed, V_closed; 34 | Eigen::Vector2 S_closed; 35 | TinyAD::svd(A_active, U_closed, S_closed, V_closed); 36 | Eigen::Matrix2 A_closed = U_closed * S_closed.asDiagonal() * V_closed.transpose(); 37 | TINYAD_ASSERT_L((TinyAD::to_passive(A_closed) - A_passive).array().abs().maxCoeff(), _eps0); 38 | 39 | for (int i = 0; i < 2; ++i) 40 | { 41 | for (int j = 0; j < 2; ++j) 42 | { 43 | TINYAD_ASSERT_L(std::fabs(A_closed(i, j).val - A_eigen(i, j).val), _eps0); 44 | TINYAD_ASSERT_L((A_closed(i, j).grad - A_eigen(i, j).grad).array().abs().maxCoeff(), _eps1); 45 | if constexpr (with_hessian) 46 | TINYAD_ASSERT_L((A_closed(i, j).Hess - A_eigen(i, j).Hess).array().abs().maxCoeff(), _eps2); 47 | } 48 | } 49 | } 50 | 51 | TEST(SVDTest, SVDFloatFirstOrder) { test_svd(1e-6f, 1e-6f, 1e-4f); } 52 | TEST(SVDTest, SVDDoubleFirstOrder) { test_svd(1e-12, 1e-8, 1e-4); } 53 | TEST(SVDTest, SVDLongDoubleFirstOrder) { test_svd(1e-12, 1e-8, 1e-4); } 54 | TEST(SVDTest, SVDFloatSecondOrder) { test_svd(1e-6f, 1e-6f, 1e-3f); } 55 | TEST(SVDTest, SVDDoubleSecondOrder) { test_svd(1e-12, 1e-8, 1e-4); } 56 | TEST(SVDTest, SVDLongDoubleSecondOrder) { test_svd(1e-12, 1e-8, 1e-4); } 57 | 58 | template 59 | void test_closest_orthogonal( 60 | const PassiveT _eps0, 61 | const PassiveT _eps1, 62 | const PassiveT _eps2) 63 | { 64 | // Choose autodiff scalar type for 4 variables 65 | using ADouble = TinyAD::Scalar<4, PassiveT, with_hessian>; 66 | 67 | Eigen::Matrix2 A_passive = Eigen::Matrix2::Random(2, 2); 68 | 69 | Eigen::Matrix2 A_active; 70 | A_active << ADouble(A_passive(0, 0), 0), ADouble(A_passive(0, 1), 1), 71 | ADouble(A_passive(1, 0), 2), ADouble(A_passive(1, 1), 3); 72 | 73 | // Eigen Jacobi SVD 74 | Eigen::JacobiSVD> svd(A_active, Eigen::ComputeFullU | Eigen::ComputeFullV); 75 | Eigen::Matrix2 R_eigen = svd.matrixU() * svd.matrixV().transpose(); 76 | 77 | // TinyAD closed-form SVD 78 | Eigen::Matrix2 R_closed = closest_orthogonal(A_active); 79 | 80 | for (int i = 0; i < 2; ++i) 81 | { 82 | for (int j = 0; j < 2; ++j) 83 | { 84 | TINYAD_ASSERT_L(std::fabs(R_closed(i, j).val - R_eigen(i, j).val), _eps0); 85 | TINYAD_ASSERT_L((R_closed(i, j).grad - R_eigen(i, j).grad).array().abs().maxCoeff(), _eps1); 86 | if constexpr (with_hessian) 87 | TINYAD_ASSERT_L((R_closed(i, j).Hess - R_eigen(i, j).Hess).array().abs().maxCoeff(), _eps2); 88 | } 89 | } 90 | } 91 | 92 | TEST(SVDTest, ClosestOrthogonalFloatFirstOrder) { test_closest_orthogonal(1e-6f, 1e-6f, 1e-4f); } 93 | TEST(SVDTest, ClosestOrthogonalDoubleFirstOrder) { test_closest_orthogonal(1e-12, 1e-8, 1e-4); } 94 | TEST(SVDTest, ClosestOrthogonalLongDoubleFirstOrder) { test_closest_orthogonal(1e-12, 1e-8, 1e-4); } 95 | TEST(SVDTest, ClosestOrthogonalFloatSecondOrder) { test_closest_orthogonal(1e-6f, 1e-6f, 1e-3f); } 96 | TEST(SVDTest, ClosestOrthogonalDoubleSecondOrder) { test_closest_orthogonal(1e-12, 1e-8, 1e-4); } 97 | TEST(SVDTest, ClosestOrthogonalLongDoubleSecondOrder) { test_closest_orthogonal(1e-12, 1e-8, 1e-4); } 98 | -------------------------------------------------------------------------------- /tests/ScalarFunctionTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | // Simple test that verifies the scalar function works correctly 10 | TEST(ScalarFunctionTest, Basic) 11 | { 12 | // Create a scalar function 13 | auto func = TinyAD::scalar_function<2>(TinyAD::range(1)); 14 | 15 | // Add a simple quadratic function 16 | func.template add_elements<1>(TinyAD::range(1), [](auto& element) 17 | { 18 | auto x = element.variables(0); 19 | return x[0] * x[0] + x[1] * x[1]; 20 | }); 21 | 22 | // Create test point 23 | Eigen::Vector2d x(1.0, 2.0); 24 | 25 | // Evaluate function 26 | double f = func.eval(x); 27 | EXPECT_EQ(f, 5.0); 28 | 29 | // Evaluate function with gradient 30 | auto [f_g, g] = func.eval_with_gradient(x); 31 | EXPECT_EQ(f_g, 5.0); 32 | EXPECT_EQ(g[0], 2.0); 33 | EXPECT_EQ(g[1], 4.0); 34 | 35 | // Evaluate function with Hessian 36 | auto [f_h, g_h, H] = func.eval_with_derivatives(x); 37 | EXPECT_EQ(f_h, 5.0); 38 | EXPECT_EQ(g_h[0], 2.0); 39 | EXPECT_EQ(g_h[1], 4.0); 40 | EXPECT_EQ(H.coeff(0, 0), 2.0); 41 | EXPECT_EQ(H.coeff(1, 1), 2.0); 42 | } 43 | 44 | template 45 | void test_1d() 46 | { 47 | // 1 1D variable, 1 element 48 | // Convex quadratic function 49 | auto func = TinyAD::scalar_function<1, PassiveT>(TinyAD::range(1)); 50 | func.template add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 51 | { 52 | using T = TINYAD_SCALAR_TYPE(element); 53 | Eigen::Vector v = element.variables(0); 54 | 55 | T x = v[0]; 56 | return (PassiveT)2.0 * sqr(x) + x + (PassiveT)1.0; 57 | }); 58 | 59 | using Vector1 = Eigen::Matrix; 60 | auto [f, g, H] = func.eval_with_hessian_proj(Vector1(1.0)); 61 | 62 | const PassiveT eps = (PassiveT)1e-16; 63 | ASSERT_NEAR(f, 4.0, eps); 64 | ASSERT_NEAR(g[0], 5.0, eps); 65 | ASSERT_NEAR(H.coeff(0, 0), 4.0, eps); 66 | } 67 | 68 | TEST(ScalarFunctionTest, 1DFloat) { test_1d(); } 69 | TEST(ScalarFunctionTest, 1DDouble) { test_1d(); } 70 | TEST(ScalarFunctionTest, 1DLongDouble) { test_1d(); } 71 | 72 | template 73 | void test_2d() 74 | { 75 | // 1 2D variable, 1 element 76 | // Convex quadratic function 77 | auto func = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(1)); 78 | func.template add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 79 | { 80 | using T = TINYAD_SCALAR_TYPE(element); 81 | Eigen::Vector2 x = element.variables(0); 82 | 83 | return (PassiveT)2.0 * sqr(x[0]) + (PassiveT)2.0 * x[0] * x[1] + sqr(x[1]) + x[0] + (PassiveT)1.0; 84 | }); 85 | 86 | const Eigen::Vector2 x(1.0, 2.0); 87 | 88 | auto [f, g, H] = func.eval_with_derivatives(x); 89 | auto [f2, g2, H_proj] = func.eval_with_hessian_proj(x); 90 | 91 | const PassiveT eps = (PassiveT)1e-16; 92 | ASSERT_NEAR(f, 12, eps); 93 | ASSERT_NEAR(f2, 12, eps); 94 | ASSERT_NEAR(g[0], 9.0, eps); 95 | ASSERT_NEAR(g[1], 6.0, eps); 96 | ASSERT_NEAR(g2[0], 9.0, eps); 97 | ASSERT_NEAR(g2[1], 6.0, eps); 98 | ASSERT_NEAR(H.coeff(0, 0), 4.0, eps); 99 | ASSERT_NEAR(H.coeff(0, 1), 2.0, eps); 100 | ASSERT_NEAR(H.coeff(1, 0), 2.0, eps); 101 | ASSERT_NEAR(H.coeff(1, 1), 2.0, eps); 102 | ASSERT_NEAR(H_proj.coeff(0, 0), 4.0, eps); 103 | ASSERT_NEAR(H_proj.coeff(0, 1), 2.0, eps); 104 | ASSERT_NEAR(H_proj.coeff(1, 0), 2.0, eps); 105 | ASSERT_NEAR(H_proj.coeff(1, 1), 2.0, eps); 106 | } 107 | 108 | TEST(ScalarFunctionTest, 2DFloat) { test_2d(); } 109 | TEST(ScalarFunctionTest, 2DDouble) { test_2d(); } 110 | TEST(ScalarFunctionTest, 2DLongDouble) { test_2d(); } 111 | 112 | template 113 | void test_2d_non_convex() 114 | { 115 | // 1 2D variable, 1 element 116 | // Non-convex quadratic function 117 | auto func = TinyAD::scalar_function<2, PassiveT>(TinyAD::range(1)); 118 | func.template add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 119 | { 120 | using T = TINYAD_SCALAR_TYPE(element); 121 | Eigen::Vector2 x = element.variables(0); 122 | 123 | return -((PassiveT)2.0 * sqr(x[0]) + (PassiveT)2.0 * x[0] * x[1] + sqr(x[1]) + x[0] + (PassiveT)1.0); 124 | }); 125 | 126 | const Eigen::Vector2 x(1.0, 2.0); 127 | 128 | auto [f, g, H] = func.eval_with_derivatives(x); 129 | auto [f2, g2, H_proj] = func.eval_with_hessian_proj(x); 130 | 131 | const PassiveT eps = (PassiveT)1e-16; 132 | ASSERT_NEAR(f, -12, eps); 133 | ASSERT_NEAR(f2, -12, eps); 134 | ASSERT_NEAR(g[0], -9.0, eps); 135 | ASSERT_NEAR(g[1], -6.0, eps); 136 | ASSERT_NEAR(g2[0], -9.0, eps); 137 | ASSERT_NEAR(g2[1], -6.0, eps); 138 | ASSERT_NEAR(H.coeff(0, 0), -4.0, eps); 139 | ASSERT_NEAR(H.coeff(0, 1), -2.0, eps); 140 | ASSERT_NEAR(H.coeff(1, 0), -2.0, eps); 141 | ASSERT_NEAR(H.coeff(1, 1), -2.0, eps); 142 | 143 | // Assert positive-definite 144 | Eigen::SelfAdjointEigenSolver> eig(H_proj.toDense()); 145 | ASSERT_GT(eig.eigenvalues()[0], 0.0); 146 | ASSERT_GT(eig.eigenvalues()[1], 0.0); 147 | } 148 | 149 | TEST(ScalarFunctionTest, 2DNonConvexFloat) { test_2d_non_convex(); }; 150 | TEST(ScalarFunctionTest, 2DNonConvexDouble) { test_2d_non_convex(); }; 151 | TEST(ScalarFunctionTest, 2DNonConvexLongDouble) { test_2d_non_convex(); }; 152 | 153 | TEST(ScalarFunctionTest, ElementVariables) 154 | { 155 | // 2 2D variables, 1 element 156 | auto func = TinyAD::scalar_function<2>(TinyAD::range(2)); 157 | func.template add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 158 | { 159 | using T = TINYAD_SCALAR_TYPE(element); 160 | Eigen::Vector v = element.variables(0); 161 | Eigen::Vector v2 = element.variables(0); 162 | // Eigen::Vector v3 = element.variables(1); // [ERROR] Too many variables requested. 163 | 164 | TINYAD_ASSERT_EQ(v, v2); 165 | if constexpr (TINYAD_ACTIVE_MODE(element)) 166 | { 167 | TINYAD_ASSERT_EQ(v[0].grad, v2[0].grad); 168 | TINYAD_ASSERT_EQ(v[1].grad, v2[1].grad); 169 | TINYAD_ASSERT_EQ(v[0].Hess, v2[0].Hess); 170 | TINYAD_ASSERT_EQ(v[1].Hess, v2[1].Hess); 171 | } 172 | 173 | return 0; 174 | }); 175 | 176 | const Eigen::Vector4d x = Eigen::Vector4d::Zero(); 177 | func.eval_with_hessian_proj(x); 178 | func.eval(x); 179 | } 180 | 181 | TEST(ScalarFunctionTest, Move1) 182 | { 183 | // Test default constructor 184 | { 185 | TinyAD::ScalarFunction<2, double, Eigen::Index> empty; 186 | Eigen::Vector x; 187 | TINYAD_ASSERT_EQ(empty.eval(x), 0.0); 188 | empty.eval_with_hessian_proj(x); 189 | } 190 | 191 | // 1 2D variable, 1 element 192 | auto func1 = TinyAD::scalar_function<2>(TinyAD::range(1)); 193 | func1.template add_elements<1>(TinyAD::range(1), [] (auto& element) -> TINYAD_SCALAR_TYPE(element) 194 | { 195 | using T = TINYAD_SCALAR_TYPE(element); 196 | Eigen::Vector2 v = element.variables(0); 197 | return v.sum(); 198 | }); 199 | const Eigen::Vector2d x = Eigen::Vector2d::Constant(1.0); 200 | TINYAD_ASSERT_EQ(func1.eval(x), 2.0); 201 | 202 | // Test move constructor 203 | auto func2 = std::move(func1); 204 | TINYAD_ASSERT_EQ(func2.eval(x), 2.0); 205 | 206 | // Test move assignment 207 | TinyAD::ScalarFunction<2, double, Eigen::Index> func3; 208 | func3 = std::move(func2); 209 | TINYAD_ASSERT_EQ(func3.eval(x), 2.0); 210 | } 211 | 212 | // Test that verifies move semantics work correctly for scalar functions 213 | TEST(ScalarFunctionTest, Move2) 214 | { 215 | // Create a scalar function 216 | auto func1 = TinyAD::scalar_function<2>(TinyAD::range(1)); 217 | 218 | // Add a simple quadratic function 219 | func1.template add_elements<1>(TinyAD::range(1), [](auto& element) 220 | { 221 | auto x = element.variables(0); 222 | return x[0] * x[0] + x[1] * x[1]; 223 | }); 224 | 225 | // Create test point 226 | Eigen::Vector2d x(1.0, 2.0); 227 | 228 | // Evaluate function 229 | double f1 = func1.eval(x); 230 | EXPECT_EQ(f1, 5.0); 231 | 232 | // Move the function 233 | auto func2 = std::move(func1); 234 | 235 | // Evaluate with gradient 236 | auto [f_g, g] = func2.eval_with_gradient(x); 237 | EXPECT_EQ(f_g, 5.0); 238 | EXPECT_EQ(g[0], 2.0); 239 | EXPECT_EQ(g[1], 4.0); 240 | 241 | // Move again 242 | TinyAD::ScalarFunction<2, double, Eigen::Index> func3; 243 | func3 = std::move(func2); 244 | 245 | // Evaluate with Hessian 246 | auto [f_h, g_h, H] = func3.eval_with_derivatives(x); 247 | EXPECT_EQ(f_h, 5.0); 248 | EXPECT_EQ(g_h[0], 2.0); 249 | EXPECT_EQ(g_h[1], 4.0); 250 | EXPECT_EQ(H.coeff(0, 0), 2.0); 251 | EXPECT_EQ(H.coeff(1, 1), 2.0); 252 | } 253 | 254 | // Test that verifies thread safety for scalar functions 255 | TEST(ScalarFunctionTest, ThreadSafety) 256 | { 257 | // Create a scalar function 258 | auto func = TinyAD::scalar_function<2>(TinyAD::range(1)); 259 | 260 | // Add a simple quadratic function 261 | func.template add_elements<1>(TinyAD::range(1), [](auto& element) 262 | { 263 | auto x = element.variables(0); 264 | return x[0] * x[0] + x[1] * x[1]; 265 | }); 266 | 267 | // Create test point 268 | Eigen::Vector2d x(1.0, 2.0); 269 | 270 | // Simulate concurrent access by calling eval methods multiple times 271 | #pragma omp parallel sections 272 | { 273 | #pragma omp section 274 | { 275 | func.eval(x); 276 | } 277 | 278 | #pragma omp section 279 | { 280 | func.eval_with_gradient(x); 281 | } 282 | 283 | #pragma omp section 284 | { 285 | func.eval_with_derivatives(x); 286 | } 287 | } 288 | 289 | // If we got here without crashes, the test passes 290 | SUCCEED(); 291 | } -------------------------------------------------------------------------------- /tests/ScalarTestComparison.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #define _USE_MATH_DEFINES // Required for M_PI on Windows 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | template 12 | void test_isnan_isinf() 13 | { 14 | constexpr int dim = dynamic ? Eigen::Dynamic : 1; 15 | using ADouble = TinyAD::Scalar; 16 | const ADouble a = ADouble::make_passive(0.0, 1); 17 | const ADouble b = ADouble::make_passive(INFINITY, 1); 18 | const ADouble c = ADouble::make_passive(-INFINITY, 1); 19 | const ADouble d = ADouble::make_passive(NAN, 1); 20 | ASSERT_EQ(isnan(a), false); 21 | ASSERT_EQ(isnan(b), false); 22 | ASSERT_EQ(isnan(c), false); 23 | ASSERT_EQ(isnan(d), true); 24 | ASSERT_EQ(isinf(a), false); 25 | ASSERT_EQ(isinf(b), true); 26 | ASSERT_EQ(isinf(c), true); 27 | ASSERT_EQ(isinf(d), false); 28 | ASSERT_EQ(isfinite(a), true); 29 | ASSERT_EQ(isfinite(b), false); 30 | ASSERT_EQ(isfinite(c), false); 31 | ASSERT_EQ(isfinite(d), false); 32 | } 33 | 34 | TEST(ScalarTestComparison, IsnanIsinfFloat) { test_isnan_isinf(); } 35 | TEST(ScalarTestComparison, IsnanIsinfDouble) { test_isnan_isinf(); } 36 | TEST(ScalarTestComparison, IsnanIsinfLongDouble) { test_isnan_isinf(); } 37 | 38 | TEST(ScalarTestComparison, IsnanIsinfDoubleDynamic) { test_isnan_isinf(); } 39 | 40 | template 41 | void test_comparison() 42 | { 43 | constexpr int dim = dynamic ? Eigen::Dynamic : 1; 44 | using ADouble = TinyAD::Scalar; 45 | const ADouble a = ADouble::known_derivatives(1.0, 1.0, 4.0); 46 | const ADouble b = ADouble::known_derivatives(1.0, 2.0, 8.0); 47 | const ADouble c = ADouble::known_derivatives(2.0, 2.0, 8.0); 48 | 49 | ASSERT_TRUE(a == b); 50 | ASSERT_TRUE(b == a); 51 | ASSERT_TRUE(a != c); 52 | ASSERT_TRUE(c != a); 53 | ASSERT_TRUE(b != c); 54 | ASSERT_TRUE(c != b); 55 | 56 | ASSERT_FALSE(a < b); 57 | ASSERT_FALSE(b < a); 58 | ASSERT_TRUE(a < c); 59 | ASSERT_FALSE(c < a); 60 | ASSERT_TRUE(b < c); 61 | ASSERT_FALSE(c < b); 62 | 63 | ASSERT_TRUE(a <= b); 64 | ASSERT_TRUE(b <= a); 65 | ASSERT_TRUE(a <= c); 66 | ASSERT_FALSE(c <= a); 67 | ASSERT_TRUE(b <= c); 68 | ASSERT_FALSE(c <= b); 69 | 70 | ASSERT_FALSE(a > b); 71 | ASSERT_FALSE(b > a); 72 | ASSERT_FALSE(a > c); 73 | ASSERT_TRUE(c > a); 74 | ASSERT_FALSE(b > c); 75 | ASSERT_TRUE(c > b); 76 | 77 | ASSERT_TRUE(a >= b); 78 | ASSERT_TRUE(b >= a); 79 | ASSERT_FALSE(a >= c); 80 | ASSERT_TRUE(c >= a); 81 | ASSERT_FALSE(b >= c); 82 | ASSERT_TRUE(c >= b); 83 | 84 | // Test double overloads 85 | ASSERT_TRUE(a == 1.0); 86 | ASSERT_FALSE(a == 2.0); 87 | ASSERT_FALSE(a != 1.0); 88 | ASSERT_TRUE(a != 2.0); 89 | ASSERT_FALSE(a < 1.0); 90 | ASSERT_TRUE(a < 2.0); 91 | ASSERT_TRUE(a <= 1.0); 92 | ASSERT_TRUE(a <= 2.0); 93 | ASSERT_FALSE(a > 1.0); 94 | ASSERT_FALSE(a > 2.0); 95 | ASSERT_TRUE(a >= 1.0); 96 | ASSERT_FALSE(a >= 2.0); 97 | } 98 | 99 | TEST(ScalarTestComparison, ComparisonFloatFirstOrder) { test_comparison(); } 100 | TEST(ScalarTestComparison, ComparisonDoubleFirstOrder) { test_comparison(); } 101 | TEST(ScalarTestComparison, ComparisonLongDoubleFirstOrder) { test_comparison(); } 102 | TEST(ScalarTestComparison, ComparisonFloatSecondOrder) { test_comparison(); } 103 | TEST(ScalarTestComparison, ComparisonDoubleSecondOrder) { test_comparison(); } 104 | TEST(ScalarTestComparison, ComparisonLongDoubleSecondOrder) { test_comparison(); } 105 | 106 | TEST(ScalarTestComparison, ComparisonDoubleFirstOrderDynamic) { test_comparison(); } 107 | TEST(ScalarTestComparison, ComparisonDoubleSecondOrderDynamic) { test_comparison(); } 108 | 109 | template 110 | void test_min_max() 111 | { 112 | constexpr int dim = dynamic ? Eigen::Dynamic : 1; 113 | using ADouble = TinyAD::Scalar; 114 | const ADouble a = ADouble::known_derivatives(1.0, 2.0, 3.0); 115 | const ADouble b = ADouble::known_derivatives(2.0, 3.0, 4.0); 116 | 117 | ASSERT_EQ(min(a, b), a); 118 | ASSERT_EQ(min(a, b).grad, a.grad); 119 | ASSERT_EQ(min(a, b).Hess, a.Hess); 120 | 121 | ASSERT_EQ(fmin(a, b), a); 122 | ASSERT_EQ(fmin(a, b).grad, a.grad); 123 | ASSERT_EQ(fmin(a, b).Hess, a.Hess); 124 | 125 | ASSERT_EQ(max(a, b), b); 126 | ASSERT_EQ(max(a, b).grad, b.grad); 127 | ASSERT_EQ(max(a, b).Hess, b.Hess); 128 | 129 | ASSERT_EQ(fmax(a, b), b); 130 | ASSERT_EQ(fmax(a, b).grad, b.grad); 131 | ASSERT_EQ(fmax(a, b).Hess, b.Hess); 132 | } 133 | 134 | TEST(ScalarTestComparison, MinMaxFloat) { test_min_max(); } 135 | TEST(ScalarTestComparison, MinMaxDouble) { test_min_max(); } 136 | TEST(ScalarTestComparison, MinMaxLongDouble) { test_min_max(); } 137 | 138 | TEST(ScalarTestComparison, MinMaxDoubleDynamic) { test_min_max(); } 139 | 140 | template 141 | void test_clamp() 142 | { 143 | constexpr int dim = dynamic ? Eigen::Dynamic : 1; 144 | using ADouble = TinyAD::Scalar; 145 | const ADouble x = ADouble::known_derivatives(4.0, 3.0, 2.0); 146 | 147 | ASSERT_EQ(clamp(x, 0.0, 5.0), x); 148 | ASSERT_EQ(clamp(x, 0.0, 5.0).grad, x.grad); 149 | ASSERT_EQ(clamp(x, 0.0, 5.0).Hess, x.Hess); 150 | 151 | ASSERT_EQ(clamp(x, -5.0, 0.0), 0.0); 152 | ASSERT_EQ(clamp(x, -5.0, 0.0).grad(0), 0.0); 153 | ASSERT_EQ(clamp(x, -5.0, 0.0).Hess(0, 0), 0.0); 154 | 155 | ASSERT_EQ(clamp(x, 5.0, 10.0), 5.0); 156 | ASSERT_EQ(clamp(x, 5.0, 10.0).grad(0), 0.0); 157 | ASSERT_EQ(clamp(x, 5.0, 10.0).Hess(0, 0), 0.0); 158 | } 159 | 160 | TEST(ScalarTestComparison, ClampFloat) { test_clamp(); } 161 | TEST(ScalarTestComparison, ClampDouble) { test_clamp(); } 162 | TEST(ScalarTestComparison, ClampLongDouble) { test_clamp(); } 163 | 164 | //TEST(ScalarTestComparison, ClampDoubleDynamic) { test_clamp(); } // clamp(TinyAD::Scalar, double) not implemented in for dynamic mode 165 | -------------------------------------------------------------------------------- /tests/ScalarTestConstructors.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #define _USE_MATH_DEFINES // Required for M_PI on Windows 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | template 12 | void test_constructors_static() 13 | { 14 | static_assert(std::is_copy_constructible>::value, ""); 15 | static_assert(std::is_move_constructible>::value, ""); 16 | static_assert(std::is_copy_assignable>::value, ""); 17 | static_assert(std::is_move_assignable>::value, ""); 18 | 19 | { 20 | // Active variable 21 | TinyAD::Scalar<2, PassiveT, with_hessian> a(4.0, 0); 22 | ASSERT_EQ(a.val, 4.0); 23 | ASSERT_EQ(a.grad[0], 1.0); 24 | ASSERT_EQ(a.grad[1], 0.0); 25 | ASSERT_TRUE(a.Hess.isZero()); 26 | 27 | // Passive variable 28 | TinyAD::Scalar<2, PassiveT, with_hessian> b(5.0); 29 | ASSERT_EQ(b.val, 5.0); 30 | ASSERT_TRUE(b.grad.isZero()); 31 | ASSERT_TRUE(b.Hess.isZero()); 32 | 33 | // Copy constructor 34 | const auto a2(a); 35 | ASSERT_EQ(a.val, a2.val); 36 | ASSERT_EQ(a.grad, a2.grad); 37 | ASSERT_EQ(a.Hess, a2.Hess); 38 | 39 | // Assignment operator 40 | const auto b2 = b; 41 | ASSERT_EQ(b.val, b2.val); 42 | ASSERT_EQ(b.grad, b2.grad); 43 | ASSERT_EQ(b.Hess, b2.Hess); 44 | } 45 | } 46 | 47 | TEST(ScalarTestConstructors, ConstructorsFloatFirstOrder) { test_constructors_static(); } 48 | TEST(ScalarTestConstructors, ConstructorsDoubleFirstOrder) { test_constructors_static(); } 49 | TEST(ScalarTestConstructors, ConstructorsLongDoubleFirstOrder) { test_constructors_static(); } 50 | TEST(ScalarTestConstructors, ConstructorsFloatSecondOrder) { test_constructors_static(); } 51 | TEST(ScalarTestConstructors, ConstructorsDoubleSecondOrder) { test_constructors_static(); } 52 | TEST(ScalarTestConstructors, ConstructorsLongDoubleSecondOrder) { test_constructors_static(); } 53 | 54 | template 55 | void test_constructors_dynamic() 56 | { 57 | static_assert(std::is_copy_constructible>::value, ""); 58 | static_assert(std::is_move_constructible>::value, ""); 59 | static_assert(std::is_copy_assignable>::value, ""); 60 | static_assert(std::is_move_assignable>::value, ""); 61 | 62 | const Eigen::Index k = 2; 63 | 64 | { 65 | // Active variable 66 | using ADouble = TinyAD::Scalar; 67 | ADouble a = ADouble::make_active(4.0, 0, k); 68 | ASSERT_EQ(a.val, 4.0); 69 | ASSERT_EQ(a.grad[0], 1.0); 70 | ASSERT_EQ(a.grad[1], 0.0); 71 | ASSERT_TRUE(a.Hess.isZero()); 72 | 73 | // Passive variable 74 | ADouble b = ADouble::make_passive(5.0, k); 75 | ASSERT_EQ(b.val, 5.0); 76 | ASSERT_TRUE(b.grad.isZero()); 77 | ASSERT_TRUE(b.Hess.isZero()); 78 | 79 | // Copy constructor 80 | const auto a2(a); 81 | ASSERT_EQ(a.val, a2.val); 82 | ASSERT_EQ(a.grad, a2.grad); 83 | ASSERT_EQ(a.Hess, a2.Hess); 84 | 85 | // Assignment operator 86 | const auto b2 = b; 87 | ASSERT_EQ(b.val, b2.val); 88 | ASSERT_EQ(b.grad, b2.grad); 89 | ASSERT_EQ(b.Hess, b2.Hess); 90 | } 91 | } 92 | 93 | TEST(ScalarTestConstructors, ConstructorsFloatFirstOrderDynamic) { test_constructors_dynamic(); } 94 | TEST(ScalarTestConstructors, ConstructorsDoubleFirstOrderDynamic) { test_constructors_dynamic(); } 95 | TEST(ScalarTestConstructors, ConstructorsLongDoubleFirstOrderDynamic) { test_constructors_dynamic(); } 96 | TEST(ScalarTestConstructors, ConstructorsFloatSecondOrderDynamic) { test_constructors_dynamic(); } 97 | TEST(ScalarTestConstructors, ConstructorsDoubleSecondOrderDynamic) { test_constructors_dynamic(); } 98 | TEST(ScalarTestConstructors, ConstructorsLongDoubleSecondOrderDynamic) { test_constructors_dynamic(); } 99 | 100 | template 101 | void test_to_passive() 102 | { 103 | { 104 | // make_active() 105 | constexpr int dim = dynamic ? Eigen::Dynamic : 2; 106 | const auto v = TinyAD::Scalar::make_active({ 2.0, 4.0 }); 107 | ASSERT_EQ(v[0].val, 2.0); 108 | ASSERT_EQ(v[1].val, 4.0); 109 | ASSERT_EQ(v[0].grad[0], 1.0); 110 | ASSERT_EQ(v[0].grad[1], 0.0); 111 | ASSERT_EQ(v[1].grad[0], 0.0); 112 | ASSERT_EQ(v[1].grad[1], 1.0); 113 | ASSERT_TRUE(v[0].Hess.isZero()); 114 | ASSERT_TRUE(v[1].Hess.isZero()); 115 | 116 | // to_passive() vector 117 | const Eigen::Matrix v_passive = TinyAD::to_passive(v); 118 | const Eigen::Matrix v_passive2 = TinyAD::to_passive(v_passive); 119 | TINYAD_ASSERT_EQ(v_passive[0], 2.0); 120 | TINYAD_ASSERT_EQ(v_passive[1], 4.0); 121 | TINYAD_ASSERT_EQ(v_passive2[0], 2.0); 122 | TINYAD_ASSERT_EQ(v_passive2[1], 4.0); 123 | } 124 | 125 | { 126 | // to_passive() matrix 127 | constexpr int dim = dynamic ? Eigen::Dynamic : 4; 128 | using ADouble = TinyAD::Scalar; 129 | const Eigen::Vector v = ADouble::make_active({ 1.0, 2.0, 3.0, 4.0 }); 130 | Eigen::Matrix M; 131 | M << v[0], v[1], v[2], v[3]; 132 | const Eigen::Matrix2 M_passive = TinyAD::to_passive(M); 133 | const Eigen::Matrix2 M_passive2 = TinyAD::to_passive(M_passive); 134 | ASSERT_EQ(M(0, 0).val, M_passive(0, 0)); 135 | ASSERT_EQ(M(0, 1).val, M_passive(0, 1)); 136 | ASSERT_EQ(M(1, 0).val, M_passive(1, 0)); 137 | ASSERT_EQ(M(1, 1).val, M_passive(1, 1)); 138 | ASSERT_EQ(M_passive2(0, 0), M_passive(0, 0)); 139 | ASSERT_EQ(M_passive2(0, 1), M_passive(0, 1)); 140 | ASSERT_EQ(M_passive2(1, 0), M_passive(1, 0)); 141 | ASSERT_EQ(M_passive2(1, 1), M_passive(1, 1)); 142 | } 143 | } 144 | 145 | TEST(ScalarTestConstructors, ToPassiveFloatFirstOrder) { test_to_passive(); } 146 | TEST(ScalarTestConstructors, ToPassiveDoubleFirstOrder) { test_to_passive(); } 147 | TEST(ScalarTestConstructors, ToPassiveLongDoubleFirstOrder) { test_to_passive(); } 148 | TEST(ScalarTestConstructors, ToPassiveFloatSecondOrder) { test_to_passive(); } 149 | TEST(ScalarTestConstructors, ToPassiveDoubleSecondOrder) { test_to_passive(); } 150 | TEST(ScalarTestConstructors, ToPassiveLongDoubleSecondOrder) { test_to_passive(); } 151 | 152 | TEST(ScalarTestConstructors, ToPassiveFirstOrderDynamic) { test_to_passive(); } 153 | TEST(ScalarTestConstructors, ToPassiveSecondOrderDynamic) { test_to_passive(); } 154 | -------------------------------------------------------------------------------- /tests/ScalarTestHessianBlock.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #define _USE_MATH_DEFINES // Required for M_PI on Windows 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | template 13 | T f( 14 | const T& x1, const T& x2, 15 | const T& y1, const T& y2, const T& y3) 16 | { 17 | // From https://github.com/patr-schm/TinyAD/issues/13 18 | const auto r1 = x1-y1; 19 | const auto r2 = x2-y2; 20 | const auto d = r1*r1 + r2*r2; 21 | return y3*d; 22 | } 23 | 24 | template 25 | void test_hess_block_1() 26 | { 27 | PassiveT x1 = 1; 28 | PassiveT x2 = 2; 29 | PassiveT y1 = 3; 30 | PassiveT y2 = 4; 31 | PassiveT y3 = 5; 32 | 33 | // Compute full Hessian 34 | using ADFull = TinyAD::Scalar<5, PassiveT>; 35 | Eigen::Matrix x_full = ADFull::make_active({x1, x2, y1, y2, y3}); 36 | ADFull f_ad_full = f(x_full[0], x_full[1], x_full[2], x_full[3], x_full[4]); 37 | 38 | // Compute truncated Hessian (2-by-3 block ddf/dxdy) 39 | using ADTrunc = TinyAD::Scalar<5, PassiveT, true, 0, 2, 2, 3>; 40 | Eigen::Matrix x_trunc = ADTrunc::make_active({x1, x2, y1, y2, y3}); 41 | ADTrunc f_ad_trunc = f(x_trunc[0], x_trunc[1], x_trunc[2], x_trunc[3], x_trunc[4]); 42 | 43 | TINYAD_ASSERT_EPS_MAT(f_ad_trunc.Hess, f_ad_full.Hess.block(0, 2, 2, 3), 1e-16); 44 | } 45 | 46 | TEST(ScalarTestHessianBlock, HessianBlock1Float) { test_hess_block_1(); } 47 | TEST(ScalarTestHessianBlock, HessianBlock1Double) { test_hess_block_1(); } 48 | TEST(ScalarTestHessianBlock, HessianBlock1LongDouble) { test_hess_block_1(); } 49 | 50 | template 51 | T symm_dirich(const Eigen::Matrix& _x, const Eigen::Matrix& _x_rest) 52 | { 53 | const Eigen::Matrix ar(_x_rest[0], _x_rest[1]); 54 | const Eigen::Matrix br(_x_rest[2], _x_rest[3]); 55 | const Eigen::Matrix cr(_x_rest[4], _x_rest[5]); 56 | const Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 57 | 58 | const Eigen::Matrix a(_x[0], _x[1]); 59 | const Eigen::Matrix b(_x[2], _x[3]); 60 | const Eigen::Matrix c(_x[4], _x[5]); 61 | const Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 62 | 63 | const Eigen::Matrix J = M * Mr.inverse(); 64 | const PassiveT area = 1.0; 65 | 66 | return area * (J.squaredNorm() + J.inverse().squaredNorm()); 67 | } 68 | 69 | template 70 | void test_hessian_block_symmetric_dirichlet() 71 | { 72 | // Passive rest-state triangle ar, br, cr 73 | const Eigen::Matrix ar(1.0, 1.0); 74 | const Eigen::Matrix br(2.0, 1.0); 75 | const Eigen::Matrix cr(1.0, 2.0); 76 | const Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 77 | 78 | const Eigen::Matrix x_rest = { 1.0, 1.0, 2.0, 1.0, 1.0, 2.0 }; 79 | const Eigen::Matrix x_init = { 10.0, 1.0, 15.0, 3.0, 2.0, 2.0 }; 80 | 81 | // Compute full 6-by-6 Hessian 82 | using ADFull = TinyAD::Scalar<6, PassiveT>; 83 | const Eigen::Matrix H_full = symm_dirich(ADFull::make_active(x_init), x_rest).Hess; 84 | 85 | // Compute a Hessian block only and compare 86 | using ADBlock = TinyAD::Scalar<6, PassiveT, true, hess_row_start, hess_col_start, hess_rows, hess_cols>; 87 | const Eigen::Matrix H_block = symm_dirich(ADBlock::make_active(x_init), x_rest).Hess; 88 | 89 | TINYAD_ASSERT_EPS_MAT(H_block, H_full.block(hess_row_start, hess_col_start, hess_rows, hess_cols), 1e-16); 90 | } 91 | 92 | TEST(ScalarTestHessianBlock, HessianBlockSymmetricDirichletDouble) 93 | { 94 | test_hessian_block_symmetric_dirichlet(); 95 | test_hessian_block_symmetric_dirichlet(); 96 | test_hessian_block_symmetric_dirichlet(); 97 | test_hessian_block_symmetric_dirichlet(); 98 | test_hessian_block_symmetric_dirichlet(); 99 | test_hessian_block_symmetric_dirichlet(); 100 | } 101 | -------------------------------------------------------------------------------- /tests/ScalarTestMisc.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #define _USE_MATH_DEFINES // Required for M_PI on Windows 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | template 12 | void test_quadratic() 13 | { 14 | // f(a) = a^2 + a + 2 at a=1 15 | constexpr int dim = dynamic ? Eigen::Dynamic : 1; 16 | using ADouble = TinyAD::Scalar; 17 | const ADouble a = ADouble::make_active(1.0, 0, 1); 18 | const ADouble f = sqr(a) + a + 2.0; 19 | ASSERT_EQ(f.val, 4.0); 20 | ASSERT_EQ(f.grad(0), 3.0); 21 | if constexpr (with_hessian) 22 | { 23 | ASSERT_EQ(f.Hess(0, 0), 2.0); 24 | TINYAD_ASSERT_SYMMETRIC(f.Hess, 1e-12); 25 | } 26 | } 27 | 28 | TEST(ScalarTestMisc, QuadraticFloatFirstOrder) { test_quadratic();} 29 | TEST(ScalarTestMisc, QuadraticDoubleFirstOrder) { test_quadratic();} 30 | TEST(ScalarTestMisc, QuadraticLongDoubleFirstOrder) { test_quadratic();} 31 | TEST(ScalarTestMisc, QuadraticFloatSecondOrder) { test_quadratic();} 32 | TEST(ScalarTestMisc, QuadraticDoubleSecondOrder) { test_quadratic();} 33 | TEST(ScalarTestMisc, QuadraticLongDoubleSecondOrder) { test_quadratic();} 34 | 35 | TEST(ScalarTestMisc, QuadraticDoubleFirstOrderDynamic) { test_quadratic(); } 36 | TEST(ScalarTestMisc, QuadraticDoubleSecondOrderDynamic) { test_quadratic(); } 37 | 38 | template 39 | void test_sphere() 40 | { 41 | // f: R^2 -> R^3 42 | // f(phi, psi) = (sin(phi) * cos(psi), sin(phi) * sin(psi), cos(phi)) 43 | constexpr int dim = dynamic ? Eigen::Dynamic : 2; 44 | using ADouble = TinyAD::Scalar; 45 | ADouble alpha = ADouble::make_active((PassiveT)M_PI / 8.0, 0, 2); 46 | ADouble beta = ADouble::make_active((PassiveT)M_PI / 8.0, 1, 2); 47 | const auto f = Eigen::Matrix( 48 | sin(alpha) * cos(beta), 49 | sin(alpha) * sin(beta), 50 | cos(alpha)); 51 | 52 | // Test function value 53 | ASSERT_NEAR(f[0].val, std::sin(alpha.val) * std::cos(alpha.val), 1e-12); 54 | ASSERT_NEAR(f[1].val, std::sin(alpha.val) * std::sin(alpha.val), 1e-12); 55 | ASSERT_NEAR(f[2].val, std::cos(alpha.val), 1e-12); 56 | 57 | // Test gradient (Jacobian) 58 | ASSERT_NEAR(f[0].grad(0), std::cos(alpha.val) * std::cos(beta.val), 1e-12); 59 | ASSERT_NEAR(f[0].grad(1), -std::sin(alpha.val) * std::sin(beta.val), 1e-12); 60 | ASSERT_NEAR(f[1].grad(0), std::cos(alpha.val) * std::sin(beta.val), 1e-12); 61 | ASSERT_NEAR(f[1].grad(1), std::cos(beta.val) * std::sin(alpha.val), 1e-12); 62 | ASSERT_NEAR(f[2].grad(0), -std::sin(alpha.val), 1e-12); 63 | ASSERT_NEAR(f[2].grad(1), 0.0, 1e-12); 64 | 65 | if constexpr (with_hessian) 66 | { 67 | // Test Hessian 68 | ASSERT_NEAR(f[0].Hess(0, 0), -std::sin(alpha.val) * std::cos(beta.val), 1e-12); 69 | ASSERT_NEAR(f[0].Hess(0, 1), -std::cos(alpha.val) * std::sin(beta.val), 1e-12); 70 | ASSERT_NEAR(f[0].Hess(1, 0), -std::cos(alpha.val) * std::sin(beta.val), 1e-12); 71 | ASSERT_NEAR(f[0].Hess(1, 1), -std::sin(alpha.val) * std::cos(beta.val), 1e-12); 72 | ASSERT_NEAR(f[1].Hess(0, 0), -std::sin(alpha.val) * std::sin(beta.val), 1e-12); 73 | ASSERT_NEAR(f[1].Hess(0, 1), std::cos(alpha.val) * std::cos(beta.val), 1e-12); 74 | ASSERT_NEAR(f[1].Hess(1, 0), std::cos(alpha.val) * std::cos(beta.val), 1e-12); 75 | ASSERT_NEAR(f[1].Hess(1, 1), -std::sin(alpha.val) * std::sin(beta.val), 1e-12); 76 | ASSERT_NEAR(f[2].Hess(0, 0), -std::cos(alpha.val), 1e-12); 77 | ASSERT_NEAR(f[2].Hess(0, 1), 0.0, 1e-12); 78 | ASSERT_NEAR(f[2].Hess(1, 0), 0.0, 1e-12); 79 | ASSERT_NEAR(f[2].Hess(1, 1), 0.0, 1e-12); 80 | TINYAD_ASSERT_SYMMETRIC(f[0].Hess, 1e-12); 81 | TINYAD_ASSERT_SYMMETRIC(f[1].Hess, 1e-12); 82 | TINYAD_ASSERT_SYMMETRIC(f[2].Hess, 1e-12); 83 | } 84 | } 85 | 86 | TEST(ScalarTestMisc, SphereFloatFirstOrder) { test_sphere(); } 87 | TEST(ScalarTestMisc, SphereDoubleFirstOrder) { test_sphere(); } 88 | TEST(ScalarTestMisc, SphereLongDoubleFirstOrder) { test_sphere(); } 89 | TEST(ScalarTestMisc, SphereFloatSecondOrder) { test_sphere(); } 90 | TEST(ScalarTestMisc, SphereDoubleSecondOrder) { test_sphere(); } 91 | TEST(ScalarTestMisc, SphereLongDoubleSecondOrder) { test_sphere(); } 92 | 93 | TEST(ScalarTestMisc, SphereDoubleFirstOrderDynamic) { test_sphere(); } 94 | TEST(ScalarTestMisc, SphereDoubleSecondOrderDynamic) { test_sphere(); } 95 | 96 | template 97 | void test_min_quadric() 98 | { 99 | // Variable vector in R^3 100 | constexpr int dim = dynamic ? Eigen::Dynamic : 3; 101 | using ADouble = TinyAD::Scalar; 102 | const Eigen::Vector x = ADouble::make_active({ 0.0, 0.0, 0.0 }); 103 | 104 | // Quadratic function 105 | const ADouble f = sqr(x[0]) + 2.0 * sqr(x[1]) + 6.0 * sqr(x[2]) + x[0] - 2.0 * x[1] + 6.0 * x[2] + 10; 106 | 107 | // Solve for minimum 108 | const Eigen::Vector x_min = -f.Hess.inverse() * f.grad; 109 | 110 | ASSERT_NEAR(x_min.x(), -0.5, 1e-12); 111 | ASSERT_NEAR(x_min.y(), 0.5, 1e-12); 112 | ASSERT_NEAR(x_min.z(), -0.5, 1e-12); 113 | } 114 | 115 | TEST(ScalarTestMisc, MinQuadraticFloat) { test_min_quadric(); } 116 | TEST(ScalarTestMisc, MinQuadraticDouble) { test_min_quadric(); } 117 | TEST(ScalarTestMisc, MinQuadraticLongDouble) { test_min_quadric(); } 118 | 119 | TEST(ScalarTestMisc, MinQuadraticDoubleDynamic) { test_min_quadric(); } 120 | 121 | template 122 | void test_triangle_distortion() 123 | { 124 | constexpr int dim = dynamic ? Eigen::Dynamic : 6; 125 | using ADouble = TinyAD::Scalar; 126 | 127 | // Passive rest-state triangle ar, br, cr 128 | const Eigen::Matrix ar(1.0, 1.0); 129 | const Eigen::Matrix br(2.0, 1.0); 130 | const Eigen::Matrix cr(1.0, 2.0); 131 | const Eigen::Matrix Mr = TinyAD::col_mat(br - ar, cr - ar); 132 | 133 | // Active variable vector for vertex positions a, b, c 134 | const Eigen::Vector x = ADouble::make_active({ 135 | 10.0, 1.0, 136 | 15.0, 3.0, 137 | 2.0, 2.0, 138 | }); 139 | const Eigen::Matrix a(x[0], x[1]); 140 | const Eigen::Matrix b(x[2], x[3]); 141 | const Eigen::Matrix c(x[4], x[5]); 142 | const Eigen::Matrix M = TinyAD::col_mat(b - a, c - a); 143 | 144 | const Eigen::Matrix J = M * Mr.inverse(); 145 | const ADouble E = J.squaredNorm() + J.inverse().squaredNorm(); 146 | TINYAD_ASSERT_FINITE(E.val); 147 | TINYAD_ASSERT_FINITE_MAT(E.grad); 148 | TINYAD_ASSERT_FINITE_MAT(E.Hess); 149 | } 150 | 151 | TEST(ScalarTestMisc, TriangleDistortionFloat) { test_triangle_distortion(); } 152 | TEST(ScalarTestMisc, TriangleDistortionDouble) { test_triangle_distortion(); } 153 | TEST(ScalarTestMisc, TriangleDistortionLongDouble) { test_triangle_distortion(); } 154 | 155 | //TEST(ScalarTestMisc, TriangleDistortionDoubleDynamic) { test_triangle_distortion(); } // Not available, b/c squaredNorm() needs default constructor "Scalar(0)" to be implemented, which is currently not the case in dynamic mode. 156 | -------------------------------------------------------------------------------- /tests/SwitchSolverTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | namespace 14 | { 15 | 16 | template 17 | void test_solver( 18 | const PassiveT& _eps) 19 | { 20 | // x^2 + y^2 at x = (1, 1) 21 | // Test Newton step 22 | Eigen::VectorX g(2); 23 | g.setConstant(2.0); 24 | Eigen::SparseMatrix H = TinyAD::identity(2); 25 | H *= 2.0; 26 | 27 | Eigen::SparseMatrix B = TinyAD::identity(2); 28 | 29 | SolverT solver; 30 | Eigen::VectorX d = newton_direction(g, H, solver); 31 | ASSERT_NEAR(d(0), -1.0, _eps); 32 | ASSERT_NEAR(d(1), -1.0, _eps); 33 | 34 | d = newton_direction_reduced_basis(g, H, B, solver); 35 | ASSERT_NEAR(d(0), -1.0, _eps); 36 | ASSERT_NEAR(d(1), -1.0, _eps); 37 | 38 | // r = (x^2, y^2) at x = (1, 1) 39 | // Test Gauss-Newton step 40 | Eigen::VectorX r(2); 41 | r.setConstant(1.0); 42 | Eigen::SparseMatrix J = H; 43 | 44 | d = gauss_newton_direction(r, J, solver); 45 | ASSERT_NEAR(d(0), -0.5, _eps); 46 | ASSERT_NEAR(d(1), -0.5, _eps); 47 | } 48 | 49 | } 50 | 51 | template 52 | void test_switch_solver( 53 | const PassiveT& _eps) 54 | { 55 | test_solver>(_eps); 56 | test_solver>>>(_eps); 57 | test_solver>>>(_eps); 58 | test_solver>>>(_eps); 59 | test_solver, Eigen::COLAMDOrdering>>>(_eps); 60 | } 61 | 62 | TEST(SwitchSolverTest, 2DDeformationFloat) { test_switch_solver(1e-6f); } 63 | TEST(SwitchSolverTest, 2DDeformationDouble) { test_switch_solver(1e-15); } 64 | TEST(SwitchSolverTest, 2DDeformationLongDouble) { test_switch_solver(1e-15); } 65 | -------------------------------------------------------------------------------- /tests/VectorFunctionTest.cc: -------------------------------------------------------------------------------- 1 | /* 2 | * This file is part of TinyAD and released under the MIT license. 3 | * Author: Patrick Schmidt 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | // Simple test that verifies the vector function works correctly 10 | TEST(VectorFunctionTest, Basic) 11 | { 12 | // R^2 -> R^3 13 | // 2D input space, 2D+1D output (split across two element types) 14 | auto func = TinyAD::vector_function<1, double>(TinyAD::range(2)); 15 | 16 | // First element: R^1 -> R^2 17 | func.template add_elements<1, 2>(TinyAD::range(1), [](auto& element) -> TINYAD_VECTOR_TYPE(element) 18 | { 19 | using T = TINYAD_SCALAR_TYPE(element); 20 | T x0 = element.variables(0)[0]; 21 | return Eigen::Vector2(2.0 * x0, sqr(x0)); 22 | }); 23 | 24 | // Second element: R^1 -> R^1 25 | func.template add_elements<1, 1>(TinyAD::range(1), [](auto& element) -> TINYAD_VECTOR_TYPE(element) 26 | { 27 | using T = TINYAD_SCALAR_TYPE(element); 28 | T x1 = element.variables(1)[0]; 29 | return Eigen::Vector(sqr(x1)); 30 | }); 31 | 32 | // Test at x = (3,4) 33 | Eigen::Vector2d x(3.0, 4.0); 34 | 35 | // Test function value 36 | Eigen::VectorXd r = func.eval(x); 37 | EXPECT_EQ(r.size(), 3); 38 | EXPECT_EQ(r[0], 6.0); // 2*x0 39 | EXPECT_EQ(r[1], 9.0); // x0^2 40 | EXPECT_EQ(r[2], 16.0); // x1^2 41 | 42 | // Test sum of squares 43 | double f = func.eval_sum_of_squares(x); 44 | EXPECT_EQ(f, 373.0); // 6^2 + 9^2 + 16^2 45 | 46 | // Test sum of squares with derivatives 47 | auto [f_s, g_s, r_s, J_s] = func.eval_sum_of_squares_with_derivatives(x); 48 | EXPECT_EQ(f_s, 373.0); 49 | EXPECT_EQ(r_s.size(), 3); 50 | EXPECT_EQ(r_s[0], 6.0); 51 | EXPECT_EQ(r_s[1], 9.0); 52 | EXPECT_EQ(r_s[2], 16.0); 53 | EXPECT_EQ(g_s[0], 8.0 * x[0] + 4.0 * x[0] * x[0] * x[0]); // d/dx0(36 + 81 + 256) 54 | EXPECT_EQ(g_s[1], 4.0 * x[1] * x[1] * x[1]); // d/dx1(36 + 81 + 256) 55 | 56 | // Test Jacobian 57 | auto [r_j, J] = func.eval_with_jacobian(x); 58 | EXPECT_EQ(r_j.size(), 3); 59 | EXPECT_EQ(J.rows(), 3); 60 | EXPECT_EQ(J.cols(), 2); 61 | EXPECT_EQ(J.coeff(0, 0), 2.0); // d(2*x0)/dx0 62 | EXPECT_EQ(J.coeff(1, 0), 6.0); // d(x0^2)/dx0 63 | EXPECT_EQ(J.coeff(2, 1), 8.0); // d(x1^2)/dx1 64 | 65 | // Test Hessians 66 | auto [r_h, J_h, H] = func.eval_with_derivatives(x); 67 | EXPECT_EQ(r_h.size(), 3); 68 | EXPECT_EQ(H.size(), 3); 69 | EXPECT_EQ(H[1].coeff(0, 0), 2.0); // d^2(x0^2)/dx0^2 70 | EXPECT_EQ(H[2].coeff(1, 1), 2.0); // d^2(x1^2)/dx1^2 71 | } 72 | 73 | template 74 | void test_eval() 75 | { 76 | // R^2 -> R^3 77 | // 2 1D variables, 1 2D element, 1 1D element 78 | auto func = TinyAD::vector_function<1, PassiveT>(TinyAD::range(2)); 79 | func.template add_elements<1, 2>(TinyAD::range(1), [] (auto& element) -> TINYAD_VECTOR_TYPE(element) 80 | { 81 | using T = TINYAD_SCALAR_TYPE(element); 82 | T x0 = element.variables(0)[0]; 83 | 84 | return Eigen::Vector2(2.0 * x0, sqr(x0)); 85 | }); 86 | func.template add_elements<1, 1>(TinyAD::range(1), [] (auto& element) -> TINYAD_VECTOR_TYPE(element) 87 | { 88 | using T = TINYAD_SCALAR_TYPE(element); 89 | T x1 = element.variables(1)[0]; 90 | 91 | return Eigen::Vector(sqr(x1)); 92 | }); 93 | 94 | Eigen::Vector2 x(3.0, 4.0); 95 | 96 | // (2.0 * x0, x0^2, x1^2) 97 | PassiveT f_expected = 373.0; 98 | Eigen::Vector2 g_expected((PassiveT)8.0 * x[0] + (PassiveT)4.0 * x[0] * x[0] * x[0], (PassiveT)4.0 * x[1] * x[1] * x[1]); 99 | Eigen::Vector3 r_expected((PassiveT)2.0 * x[0], sqr(x[0]), sqr(x[1])); 100 | Eigen::SparseMatrix J_expected(3, 2); 101 | J_expected.coeffRef(0, 0) = (PassiveT)2.0; 102 | J_expected.coeffRef(1, 0) = (PassiveT)2.0 * x[0]; 103 | J_expected.coeffRef(2, 1) = (PassiveT)2.0 * x[1]; 104 | std::vector> H_expected(3, Eigen::SparseMatrix(2, 2)); 105 | H_expected[1].coeffRef(0, 0) = (PassiveT)2.0; 106 | H_expected[2].coeffRef(1, 1) = (PassiveT)2.0; 107 | 108 | { // Test eval() 109 | Eigen::VectorX r = func.eval(x); 110 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 111 | } 112 | 113 | { // Test eval_sum_of_squares() 114 | PassiveT f = func.eval_sum_of_squares(x); 115 | TINYAD_ASSERT_EPS(f, f_expected, 1e-16); 116 | } 117 | 118 | { // Test eval_with_jacobian() 119 | Eigen::VectorX r; 120 | Eigen::SparseMatrix J; 121 | func.eval_with_jacobian(x, r, J); 122 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 123 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 124 | } 125 | 126 | { // Test eval_with_jacobian() 127 | auto [r, J] = func.eval_with_jacobian(x); 128 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 129 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 130 | } 131 | 132 | { // Test eval_with_derivatives() 133 | Eigen::VectorX r; 134 | Eigen::SparseMatrix J; 135 | std::vector> H; 136 | func.eval_with_derivatives(x, r, J, H); 137 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 138 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 139 | TINYAD_ASSERT_EQ((int)H.size(), r.size()); 140 | for (int i = 0; i < r.size(); ++i) 141 | TINYAD_ASSERT_EPS_MAT(H[i].toDense(), H_expected[i].toDense(), 1e-16); 142 | } 143 | 144 | { // Test eval_with_derivatives() 145 | auto [r, J, H] = func.eval_with_derivatives(x); 146 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 147 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 148 | TINYAD_ASSERT_EQ((int)H.size(), r.size()); 149 | for (int i = 0; i < r.size(); ++i) 150 | TINYAD_ASSERT_EPS_MAT(H[i].toDense(), H_expected[i].toDense(), 1e-16); 151 | } 152 | 153 | { // Test eval_sum_of_squares_with_derivatives() 154 | PassiveT f; 155 | Eigen::VectorX g; 156 | Eigen::VectorX r; 157 | Eigen::SparseMatrix J; 158 | func.eval_sum_of_squares_with_derivatives(x, f, g, r, J); 159 | TINYAD_ASSERT_EPS(f, f_expected, 1e-16); 160 | TINYAD_ASSERT_EPS_MAT(g, g_expected, 1e-16); 161 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 162 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 163 | } 164 | 165 | { // Test eval_sum_of_squares_with_derivatives() 166 | auto [f, g, r, J] = func.eval_sum_of_squares_with_derivatives(x); 167 | TINYAD_ASSERT_EPS(f, f_expected, 1e-16); 168 | TINYAD_ASSERT_EPS_MAT(g, g_expected, 1e-16); 169 | TINYAD_ASSERT_EPS_MAT(r, r_expected, 1e-16); 170 | TINYAD_ASSERT_EPS_MAT(J.toDense(), J_expected.toDense(), 1e-16); 171 | } 172 | } 173 | 174 | TEST(VectorFunctionTest, 1DFloat) { test_eval(); } 175 | TEST(VectorFunctionTest, 1DDouble) { test_eval(); } 176 | TEST(VectorFunctionTest, 1DLongDouble) { test_eval(); } 177 | 178 | // Test that verifies move semantics work correctly for vector functions 179 | TEST(VectorFunctionTest, Move) 180 | { 181 | // R^2 -> R^3 (same as above) 182 | auto func1 = TinyAD::vector_function<1, double>(TinyAD::range(2)); 183 | 184 | func1.template add_elements<1, 2>(TinyAD::range(1), [](auto& element) -> TINYAD_VECTOR_TYPE(element) 185 | { 186 | using T = TINYAD_SCALAR_TYPE(element); 187 | T x0 = element.variables(0)[0]; 188 | return Eigen::Vector2(2.0 * x0, sqr(x0)); 189 | }); 190 | 191 | func1.template add_elements<1, 1>(TinyAD::range(1), [](auto& element) -> TINYAD_VECTOR_TYPE(element) 192 | { 193 | using T = TINYAD_SCALAR_TYPE(element); 194 | T x1 = element.variables(1)[0]; 195 | return Eigen::Vector(sqr(x1)); 196 | }); 197 | 198 | // Create test point 199 | Eigen::Vector2d x(3.0, 4.0); 200 | 201 | // Evaluate function 202 | Eigen::VectorXd r1 = func1.eval(x); 203 | EXPECT_EQ(r1.size(), 3); 204 | EXPECT_EQ(r1[0], 6.0); // 2*x0 205 | EXPECT_EQ(r1[1], 9.0); // x0^2 206 | EXPECT_EQ(r1[2], 16.0); // x1^2 207 | 208 | // Move the function 209 | auto func2 = std::move(func1); 210 | 211 | // Evaluate with Jacobian 212 | auto [r_j, J] = func2.eval_with_jacobian(x); 213 | EXPECT_EQ(r_j.size(), 3); 214 | EXPECT_EQ(J.coeff(0, 0), 2.0); // d(2*x0)/dx0 215 | EXPECT_EQ(J.coeff(1, 0), 6.0); // d(x0^2)/dx0 216 | EXPECT_EQ(J.coeff(2, 1), 8.0); // d(x1^2)/dx1 217 | 218 | // Move again 219 | TinyAD::VectorFunction<1, double, Eigen::Index> func3; 220 | func3 = std::move(func2); 221 | 222 | // Evaluate with Hessians 223 | auto [r_h, J_h, H] = func3.eval_with_derivatives(x); 224 | EXPECT_EQ(r_h.size(), 3); 225 | EXPECT_EQ(J_h.coeff(0, 0), 2.0); // d(2*x0)/dx0 226 | EXPECT_EQ(J_h.coeff(1, 0), 6.0); // d(x0^2)/dx0 227 | EXPECT_EQ(J_h.coeff(2, 1), 8.0); // d(x1^2)/dx1 228 | EXPECT_EQ(H[1].coeff(0, 0), 2.0); // d^2(x0^2)/dx0^2 229 | EXPECT_EQ(H[2].coeff(1, 1), 2.0); // d^2(x1^2)/dx1^2 230 | } 231 | 232 | // Test that verifies thread safety for vector functions 233 | TEST(VectorFunctionTest, ThreadSafety) 234 | { 235 | // Create a vector function 236 | auto func = TinyAD::vector_function<2, double>(TinyAD::range(1)); 237 | 238 | // Add elements 239 | func.template add_elements<1, 2>(TinyAD::range(1), [](auto& element) -> TINYAD_VECTOR_TYPE(element) 240 | { 241 | using T = TINYAD_SCALAR_TYPE(element); 242 | auto x = element.variables(0); 243 | return Eigen::Vector2(x[0] * x[0], x[1] * x[1]); 244 | }); 245 | 246 | // Create test point 247 | Eigen::Vector2d x(1.0, 2.0); 248 | 249 | // Simulate concurrent access by calling eval methods multiple times 250 | #pragma omp parallel sections 251 | { 252 | #pragma omp section 253 | { 254 | func.eval(x); 255 | } 256 | 257 | #pragma omp section 258 | { 259 | func.eval_with_jacobian(x); 260 | } 261 | 262 | #pragma omp section 263 | { 264 | func.eval_with_derivatives(x); 265 | } 266 | } 267 | 268 | // If we got here without crashes, the test passes 269 | SUCCEED(); 270 | } 271 | --------------------------------------------------------------------------------