├── src ├── utreexo.cpp ├── bench │ ├── nanobench.cpp │ ├── README.md │ ├── util │ │ ├── leaves.h │ │ ├── args.h │ │ └── args.cpp │ ├── bench.h │ ├── bench.cpp │ ├── pollard.cpp │ ├── bench_utreexo.cpp │ └── ram_forest.cpp ├── test │ ├── tests.cpp │ ├── state_tests.cpp │ └── accumulator_tests.cpp ├── fuzz │ ├── batchproof.cpp │ ├── forest_state.cpp │ ├── fuzz.cpp │ ├── fuzz.h │ └── pollard.cpp ├── util │ └── macros.h ├── check.h ├── compat │ ├── cpuid.h │ ├── byteswap.h │ └── endian.h ├── crypto │ ├── sha512.h │ ├── common.h │ └── sha512.cpp ├── node.h ├── state.h ├── batchproof.cpp ├── accumulator.cpp ├── ram_forest.cpp ├── state.cpp └── pollard.cpp ├── include ├── utreexo.h ├── ram_forest.h ├── pollard.h ├── batchproof.h └── accumulator.h ├── README.md ├── .github └── workflows │ └── c-cpp.yml ├── autogen.sh ├── .gitignore ├── LICENSE ├── .clang-format ├── Makefile.am ├── sources.mk └── configure.ac /src/utreexo.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | void placeholder() {} 3 | -------------------------------------------------------------------------------- /src/bench/nanobench.cpp: -------------------------------------------------------------------------------- 1 | #define ANKERL_NANOBENCH_IMPLEMENT 2 | #include "nanobench.h" -------------------------------------------------------------------------------- /src/test/tests.cpp: -------------------------------------------------------------------------------- 1 | #define BOOST_TEST_MODULE "Utreexo unit tests" 2 | #include 3 | -------------------------------------------------------------------------------- /include/utreexo.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_H 2 | #define UTREEXO_H 3 | 4 | #include "accumulator.h" 5 | #include "batchproof.h" 6 | #include "pollard.h" 7 | #include "ram_forest.h" 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # THIS IS STILL A WIP, DO NOT USE. 2 | 3 | ## libutreexo 4 | A c++ transliteration of the go reference implementation. 5 | 6 | ## Contributing 7 | Contributions are welcome, see [issues](https://github.com/mit-dci/libutreexo/issues) and [projects](https://github.com/mit-dci/libutreexo/projects). 8 | Please use the [Go repo](https://github.com/mit-dci/utreexo) or the #utreexo IRC channel for genereal utreexo discussion and questions. 9 | -------------------------------------------------------------------------------- /.github/workflows/c-cpp.yml: -------------------------------------------------------------------------------- 1 | name: C/C++ CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v2 12 | - name: install boost-test 13 | run: sudo apt install -y libboost-test-dev 14 | - name: autogen 15 | run: ./autogen.sh 16 | - name: configure 17 | run: ./configure 18 | - name: make 19 | run: make 20 | - name: make check 21 | run: make check 22 | -------------------------------------------------------------------------------- /src/fuzz/batchproof.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/batchproof.h" 2 | #include "fuzz.h" 3 | #include 4 | 5 | using namespace utreexo; 6 | 7 | FUZZ(batchproof) 8 | { 9 | FUZZ_CONSUME(uint8_t, num_targets) 10 | FUZZ_CONSUME(uint8_t, num_hashes) 11 | FUZZ_CONSUME_VEC(uint8_t, proof_bytes, 8 + num_targets * 4 + num_hashes * 32) 12 | 13 | BatchProof proof; 14 | if (proof.Unserialize(proof_bytes)) { 15 | std::vector bytes; 16 | proof.Serialize(bytes); 17 | assert(proof_bytes == bytes); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright (c) 2013-2016 The Bitcoin Core developers 3 | # Distributed under the MIT software license, see the accompanying 4 | # file COPYING or http://www.opensource.org/licenses/mit-license.php. 5 | 6 | set -e 7 | srcdir="$(dirname $0)" 8 | cd "$srcdir" 9 | if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then 10 | LIBTOOLIZE="${GLIBTOOLIZE}" 11 | export LIBTOOLIZE 12 | fi 13 | which autoreconf >/dev/null || \ 14 | (echo "configuration failed, please install autoconf first" && exit 1) 15 | autoreconf --install --force --warnings=all 16 | -------------------------------------------------------------------------------- /src/util/macros.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019-2021 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef UTREEXO_UTIL_MACROS_H 6 | #define UTREEXO_UTIL_MACROS_H 7 | 8 | #define PASTE(x, y) x ## y 9 | #define PASTE2(x, y) PASTE(x, y) 10 | 11 | #define UNIQUE_NAME(name) PASTE2(name, __COUNTER__) 12 | 13 | /** 14 | * Converts the parameter X to a string after macro replacement on X has been performed. 15 | * Don't merge these into one macro! 16 | */ 17 | #define STRINGIZE(X) DO_STRINGIZE(X) 18 | #define DO_STRINGIZE(X) #X 19 | 20 | #endif // UTREEXO_UTIL_MACROS_H -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gitignore 2 | *.a 3 | *.la 4 | *.lo 5 | *.dylib 6 | *.lai 7 | *.o 8 | .deps 9 | *.swp 10 | test_utreexo 11 | .vscode 12 | .DS_Store 13 | compile_commands.json 14 | .cache 15 | .dirstamp 16 | make 17 | test-verify 18 | bench_utreexo 19 | 20 | # autoreconf 21 | Makefile 22 | Makefile.in 23 | aclocal.m4 24 | autom4te.cache/ 25 | build-aux/config.guess 26 | build-aux/config.sub 27 | build-aux/depcomp 28 | build-aux/install-sh 29 | build-aux/ltmain.sh 30 | build-aux/m4/libtool.m4 31 | build-aux/m4/lt~obsolete.m4 32 | build-aux/m4/ltoptions.m4 33 | build-aux/m4/ltsugar.m4 34 | build-aux/m4/ltversion.m4 35 | build-aux/missing 36 | build-aux/compile 37 | build-aux/test-driver 38 | config.cache 39 | config.log 40 | config.status 41 | configure 42 | libtool 43 | 44 | -------------------------------------------------------------------------------- /src/fuzz/forest_state.cpp: -------------------------------------------------------------------------------- 1 | #include "fuzz.h" 2 | #include "state.h" 3 | #include 4 | 5 | using namespace utreexo; 6 | 7 | FUZZ(forest_state) 8 | { 9 | FUZZ_CONSUME(uint64_t, num_leaves) 10 | ForestState state(num_leaves); 11 | 12 | state.NumRoots(); 13 | state.NumRows(); 14 | 15 | state.RootPositions(); 16 | 17 | FUZZ_CONSUME(uint16_t, num_targets) 18 | FUZZ_CONSUME_VEC(uint64_t, targets, num_targets); 19 | 20 | std::sort(targets.begin(), targets.end()); 21 | state.ProofPositions(targets); 22 | state.Transform(targets); 23 | state.CheckTargetsSanity(targets); 24 | state.UndoTransform(targets); 25 | 26 | // TODO: figure out why this makes the target so much slower 27 | /*for (uint64_t target : targets) { 28 | state.Path(target); 29 | }*/ 30 | } 31 | -------------------------------------------------------------------------------- /src/fuzz/fuzz.cpp: -------------------------------------------------------------------------------- 1 | #include "fuzz.h" 2 | #include 3 | #include 4 | #include 5 | 6 | void RegisterFuzzTarget(std::string_view name, FuzzFunc func) 7 | { 8 | const auto it = g_targets.try_emplace(name, func); 9 | assert(it.second); 10 | } 11 | 12 | static FuzzFunc* g_fuzz_func{nullptr}; 13 | 14 | extern "C" int LLVMFuzzerTestOneInput(unsigned char* data, size_t size) 15 | { 16 | assert(g_fuzz_func); 17 | (*g_fuzz_func)(data, size); 18 | return 0; 19 | } 20 | 21 | extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) 22 | { 23 | // Get target name from "FUZZ" env variable and initialize it. 24 | std::string_view target{std::getenv("FUZZ")}; 25 | 26 | const auto it = g_targets.find(target); 27 | assert(it != g_targets.end()); 28 | 29 | g_fuzz_func = &it->second; 30 | return 0; 31 | } 32 | -------------------------------------------------------------------------------- /src/check.h: -------------------------------------------------------------------------------- 1 | // Taken and slightly modified from minisketch/util.h 2 | 3 | #ifdef UTREEXO_VERIFY 4 | #include 5 | #endif 6 | 7 | /* Assertion macros */ 8 | 9 | /** 10 | * Unconditional failure on condition failure. 11 | * Primarily used in testing harnesses. 12 | */ 13 | #define CHECK(cond) do { \ 14 | if (!(cond)) { \ 15 | fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, "Check condition failed: " #cond); \ 16 | abort(); \ 17 | } \ 18 | } while(0) 19 | 20 | /** 21 | * Check macro that does nothing in normal non-verify builds but crashes in verify builds. 22 | * This is used to test conditions at runtime that should always be true, but are either 23 | * expensive to test or in locations where returning on failure would be messy. 24 | */ 25 | #ifdef UTREEXO_VERIFY 26 | #define CHECK_SAFE(cond) CHECK(cond) 27 | #else 28 | #define CHECK_SAFE(cond) 29 | #endif 30 | -------------------------------------------------------------------------------- /src/compat/cpuid.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017-2019 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef BITCOIN_COMPAT_CPUID_H 6 | #define BITCOIN_COMPAT_CPUID_H 7 | 8 | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) 9 | #define HAVE_GETCPUID 10 | 11 | #include 12 | 13 | // We can't use cpuid.h's __get_cpuid as it does not support subleafs. 14 | void static inline GetCPUID(uint32_t leaf, uint32_t subleaf, uint32_t& a, uint32_t& b, uint32_t& c, uint32_t& d) 15 | { 16 | #ifdef __GNUC__ 17 | __cpuid_count(leaf, subleaf, a, b, c, d); 18 | #else 19 | __asm__ ("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(leaf), "2"(subleaf)); 20 | #endif 21 | } 22 | 23 | #endif // defined(__x86_64__) || defined(__amd64__) || defined(__i386__) 24 | #endif // BITCOIN_COMPAT_CPUID_H 25 | -------------------------------------------------------------------------------- /src/crypto/sha512.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014-2019 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef UTREEXO_CRYPTO_SHA512_H 6 | #define UTREEXO_CRYPTO_SHA512_H 7 | 8 | #include 9 | #include 10 | 11 | namespace utreexo { 12 | 13 | /** A hasher class for SHA-512. */ 14 | class CSHA512 15 | { 16 | private: 17 | uint64_t s[8]; 18 | unsigned char buf[128]; 19 | uint64_t bytes; 20 | 21 | public: 22 | static constexpr size_t OUTPUT_SIZE = 64; 23 | static constexpr size_t OUTPUT_SIZE_256 = 32; 24 | 25 | CSHA512(); 26 | CSHA512(int output_size); 27 | CSHA512& Write(const unsigned char* data, size_t len); 28 | void Finalize(unsigned char hash[OUTPUT_SIZE]); 29 | void Finalize256(unsigned char hash[OUTPUT_SIZE_256]); 30 | 31 | CSHA512& Reset(); 32 | uint64_t Size() const { return bytes; } 33 | }; 34 | 35 | }; // namespace utreexo 36 | #endif // UTREEXO_CRYPTO_SHA512_H 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 The MIT Digital Currency Initiative @ Media Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /src/node.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_NODE_H 2 | #define UTREEXO_NODE_H 3 | 4 | #include "../include/accumulator.h" 5 | #include 6 | 7 | namespace utreexo { 8 | 9 | using Hash = std::array; 10 | 11 | class Accumulator::Node 12 | { 13 | public: 14 | // The number of leaves at the time this node was created. 15 | uint64_t m_num_leaves; 16 | 17 | // A pointer to the parent node. 18 | // This is useful if you want to rehash a path from the bottom up. 19 | NodePtr m_parent; 20 | 21 | // The position of the node in the forest. 22 | uint64_t m_position; 23 | 24 | virtual ~Node() 25 | { 26 | m_parent = nullptr; 27 | } 28 | 29 | /* 30 | * Return the hash of the node. 31 | * This does not compute the hash only returns a previously computed hash. 32 | */ 33 | virtual const Hash& GetHash() const = 0; 34 | 35 | /* Recompute the hash from children nodes and return it. */ 36 | virtual void ReHash() = 0; 37 | 38 | /* 39 | * Return the parent of the node. 40 | * A return value of nullptr does *not* always indicate that a tree top was reached. 41 | */ 42 | virtual NodePtr Parent() const { return m_parent; } 43 | }; 44 | 45 | }; // namespace utreexo 46 | 47 | #endif // UTREEXO_NODE_H 48 | -------------------------------------------------------------------------------- /src/bench/README.md: -------------------------------------------------------------------------------- 1 | Benchmarking 2 | ============ 3 | 4 | Utreexo has an internal benchmarking framework that uses the [nanobench library](https://github.com/martinus/nanobench) and follows the Bitcoin Core benchmarking implementation. 5 | 6 | Benchmarks are compiled by default and can be disabled with 7 | 8 | ``` 9 | ./configure --disable-bench 10 | ``` 11 | 12 | 13 | Running 14 | --------------------- 15 | After compiling, the benchmarks can be run with 16 | 17 | ``` 18 | ./bench_utreexo 19 | ``` 20 | 21 | #### Arguments 22 | 23 | The `-min_time=` argument allows to run the benchmark for much longer than the default. When results are unreliable, choosing a large timeframe here should usually get repeatable results. 24 | For more details see https://github.com/bitcoin/bitcoin/pull/23025 25 | 26 | The `-filter=` argument is a regular expression filter to select benchmark by name. For example, to run 27 | 28 | - only forest related benchmarks: `./bench_utreexo -filter=.*Forest` 29 | - only a single benchmark: `./bench_utreexo -filter=` 30 | 31 | The `-asymptote=` argument allows for dynamic parameters and then calculates asymptotic complexity (Big O) from multiple runs of the benchmark with different complexity N. [Read more about nanobench's asymptotic complexity](https://nanobench.ankerl.com/tutorial.html#asymptotic-complexity). 32 | 33 | 34 | -------------------------------------------------------------------------------- /src/bench/util/leaves.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_BENCH_UTIL_LEAVES_H 2 | #define UTREEXO_BENCH_UTIL_LEAVES_H 3 | 4 | #include "include/accumulator.h" 5 | 6 | #include 7 | 8 | using namespace utreexo; 9 | 10 | // Copied from src/test/accumulator_tests.cpp 11 | static void SetHash(Hash& hash, int num) 12 | { 13 | hash[0] = num; 14 | hash[1] = num >> 8; 15 | hash[2] = num >> 16; 16 | hash[3] = num >> 24; 17 | hash[4] = 0xFF; 18 | } 19 | // Copied from src/test/accumulator_tests.cpp 20 | static void CreateTestLeaves(std::vector& leaves, int count, int offset) 21 | { 22 | for (int i = 0; i < count; i++) { 23 | Hash hash = {}; // initialize all elements to 0 24 | SetHash(hash, offset + i); 25 | leaves.emplace_back(std::move(hash), false); 26 | } 27 | } 28 | // Copied from src/test/accumulator_tests.cpp 29 | static void CreateTestLeaves(std::vector& leaves, int count) 30 | { 31 | CreateTestLeaves(leaves, count, 0); 32 | } 33 | 34 | // Fisher-Yates shuffle, https://stackoverflow.com/a/9345144/5800072 35 | template 36 | bidiiter random_unique(bidiiter begin, bidiiter end, size_t num_random) 37 | { 38 | size_t left = std::distance(begin, end); 39 | while (num_random--) { 40 | bidiiter r = begin; 41 | std::advance(r, rand() % left); 42 | std::swap(*begin, *r); 43 | ++begin; 44 | --left; 45 | } 46 | return begin; 47 | } 48 | 49 | #endif // UTREEXO_BENCH_UTIL_LEAVES_H 50 | -------------------------------------------------------------------------------- /src/compat/byteswap.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014-2018 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef BITCOIN_COMPAT_BYTESWAP_H 6 | #define BITCOIN_COMPAT_BYTESWAP_H 7 | 8 | #if defined(HAVE_CONFIG_H) 9 | #include 10 | #endif 11 | 12 | #include 13 | 14 | #if defined(HAVE_BYTESWAP_H) 15 | #include 16 | #endif 17 | 18 | #if defined(MAC_OSX) 19 | 20 | #include 21 | #define bswap_16(x) OSSwapInt16(x) 22 | #define bswap_32(x) OSSwapInt32(x) 23 | #define bswap_64(x) OSSwapInt64(x) 24 | 25 | #else 26 | // Non-MacOS / non-Darwin 27 | 28 | #if HAVE_DECL_BSWAP_16 == 0 29 | inline uint16_t bswap_16(uint16_t x) 30 | { 31 | return (x >> 8) | (x << 8); 32 | } 33 | #endif // HAVE_DECL_BSWAP16 == 0 34 | 35 | #if HAVE_DECL_BSWAP_32 == 0 36 | inline uint32_t bswap_32(uint32_t x) 37 | { 38 | return (((x & 0xff000000U) >> 24) | ((x & 0x00ff0000U) >> 8) | 39 | ((x & 0x0000ff00U) << 8) | ((x & 0x000000ffU) << 24)); 40 | } 41 | #endif // HAVE_DECL_BSWAP32 == 0 42 | 43 | #if HAVE_DECL_BSWAP_64 == 0 44 | inline uint64_t bswap_64(uint64_t x) 45 | { 46 | return (((x & 0xff00000000000000ull) >> 56) 47 | | ((x & 0x00ff000000000000ull) >> 40) 48 | | ((x & 0x0000ff0000000000ull) >> 24) 49 | | ((x & 0x000000ff00000000ull) >> 8) 50 | | ((x & 0x00000000ff000000ull) << 8) 51 | | ((x & 0x0000000000ff0000ull) << 24) 52 | | ((x & 0x000000000000ff00ull) << 40) 53 | | ((x & 0x00000000000000ffull) << 56)); 54 | } 55 | #endif // HAVE_DECL_BSWAP64 == 0 56 | 57 | #endif // defined(MAC_OSX) 58 | 59 | #endif // BITCOIN_COMPAT_BYTESWAP_H 60 | -------------------------------------------------------------------------------- /src/bench/bench.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2021 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef UTREEXO_BENCH_BENCH_H 6 | #define UTREEXO_BENCH_BENCH_H 7 | 8 | #include "nanobench.h" 9 | #include "util/args.h" 10 | #include "util/macros.h" 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | /* 18 | * Usage: 19 | 20 | static void NameOfYourBenchmarkFunction(benchmark::Bench& bench) 21 | { 22 | ...do any setup needed... 23 | tip: use bench.complexityN() to refer to the asymptote value that was 24 | set through the BenchRunner 25 | 26 | bench.run([&] { 27 | ...do stuff you want to time; refer to src/bench/nanobench.h 28 | for more information and the options that can be passed here... 29 | }); 30 | 31 | ...do any cleanup needed... 32 | } 33 | 34 | BENCHMARK(NameOfYourBenchmarkFunction); 35 | 36 | */ 37 | 38 | namespace benchmark { 39 | 40 | using ankerl::nanobench::Bench; 41 | 42 | typedef std::function BenchFunction; 43 | 44 | struct Args { 45 | std::chrono::milliseconds min_time; 46 | std::vector asymptote; 47 | std::string regex_filter; 48 | }; 49 | 50 | class BenchRunner 51 | { 52 | typedef std::map BenchmarkMap; 53 | static BenchmarkMap& benchmarks(); 54 | 55 | public: 56 | BenchRunner(std::string name, BenchFunction func); 57 | 58 | static void RunAll(const Args& args); 59 | }; 60 | } // namespace benchmark 61 | 62 | // BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo); 63 | #define BENCHMARK(n) \ 64 | benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n); 65 | 66 | #endif // UTREEXO_BENCH_BENCH_H -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | Language: Cpp 2 | AccessModifierOffset: -4 3 | AlignAfterOpenBracket: true 4 | AlignEscapedNewlinesLeft: true 5 | AlignTrailingComments: true 6 | AllowAllParametersOfDeclarationOnNextLine: true 7 | AllowShortBlocksOnASingleLine: false 8 | AllowShortCaseLabelsOnASingleLine: true 9 | AllowShortFunctionsOnASingleLine: All 10 | AllowShortIfStatementsOnASingleLine: true 11 | AllowShortLoopsOnASingleLine: false 12 | AlwaysBreakBeforeMultilineStrings: false 13 | AlwaysBreakTemplateDeclarations: true 14 | BinPackParameters: false 15 | BreakBeforeBinaryOperators: false 16 | BreakBeforeBraces: Custom 17 | BraceWrapping: 18 | AfterClass: true 19 | AfterFunction: true 20 | BreakBeforeTernaryOperators: false 21 | BreakConstructorInitializersBeforeComma: false 22 | ColumnLimit: 0 23 | CommentPragmas: "^ IWYU pragma:" 24 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 25 | ConstructorInitializerIndentWidth: 4 26 | ContinuationIndentWidth: 4 27 | Cpp11BracedListStyle: true 28 | DerivePointerAlignment: false 29 | DisableFormat: false 30 | IndentCaseLabels: false 31 | IndentFunctionDeclarationAfterType: false 32 | IndentWidth: 4 33 | KeepEmptyLinesAtTheStartOfBlocks: false 34 | MaxEmptyLinesToKeep: 2 35 | NamespaceIndentation: None 36 | ObjCSpaceAfterProperty: false 37 | ObjCSpaceBeforeProtocolList: false 38 | PenaltyBreakBeforeFirstCallParameter: 1 39 | PenaltyBreakComment: 300 40 | PenaltyBreakFirstLessLess: 120 41 | PenaltyBreakString: 1000 42 | PenaltyExcessCharacter: 1000000 43 | PenaltyReturnTypeOnItsOwnLine: 200 44 | PointerAlignment: Left 45 | SpaceBeforeAssignmentOperators: true 46 | SpaceBeforeParens: ControlStatements 47 | SpaceInEmptyParentheses: false 48 | SpacesBeforeTrailingComments: 1 49 | SpacesInAngles: false 50 | SpacesInContainerLiterals: true 51 | SpacesInCStyleCastParentheses: false 52 | SpacesInParentheses: false 53 | Standard: Cpp11 54 | TabWidth: 8 55 | UseTab: Never 56 | -------------------------------------------------------------------------------- /src/bench/bench.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2021 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #include "bench.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace std::chrono_literals; 14 | 15 | benchmark::BenchRunner::BenchmarkMap& benchmark::BenchRunner::benchmarks() 16 | { 17 | static std::map benchmarks_map; 18 | return benchmarks_map; 19 | } 20 | 21 | benchmark::BenchRunner::BenchRunner(std::string name, benchmark::BenchFunction func) 22 | { 23 | benchmarks().insert(std::make_pair(name, func)); 24 | } 25 | 26 | void benchmark::BenchRunner::RunAll(const Args& args) 27 | { 28 | std::regex reFilter(args.regex_filter); 29 | std::smatch baseMatch; 30 | 31 | std::vector benchmarkResults; 32 | for (const auto& p : benchmarks()) { 33 | if (!std::regex_match(p.first, baseMatch, reFilter)) { 34 | continue; 35 | } 36 | 37 | Bench bench; 38 | bench.name(p.first); 39 | if (args.min_time > 0ms) { 40 | // convert to nanos before dividing to reduce rounding errors 41 | std::chrono::nanoseconds min_time_ns = args.min_time; 42 | bench.minEpochTime(min_time_ns / bench.epochs()); 43 | } 44 | 45 | if (args.asymptote.empty()) { 46 | p.second(bench); 47 | } else { 48 | for (auto n : args.asymptote) { 49 | bench.complexityN(n); 50 | p.second(bench); 51 | } 52 | std::cout << bench.complexityBigO() << std::endl; 53 | } 54 | 55 | if (!bench.results().empty()) { 56 | benchmarkResults.push_back(bench.results().back()); 57 | } 58 | } 59 | } -------------------------------------------------------------------------------- /src/fuzz/fuzz.h: -------------------------------------------------------------------------------- 1 | #ifndef LIBUTREEXO_FUZZ 2 | #define LIBUTREEXO_FUZZ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using FuzzFunc = std::function; 10 | 11 | static std::map g_targets; 12 | void RegisterFuzzTarget(std::string_view name, FuzzFunc func); 13 | 14 | #define FUZZ(name) \ 15 | void name##_fuzz_target(const uint8_t*, size_t); \ 16 | struct name##_register { \ 17 | name##_register() \ 18 | { \ 19 | RegisterFuzzTarget(#name, name##_fuzz_target); \ 20 | } \ 21 | } const static g_##name##_register; \ 22 | void name##_fuzz_target(const uint8_t* data, size_t size) 23 | 24 | #define FUZZ_CONSUME_UNCHECKED(type, name) \ 25 | type name; \ 26 | std::memcpy(&name, data, sizeof(type)); \ 27 | size -= sizeof(type); \ 28 | data += sizeof(type); 29 | 30 | #define FUZZ_CONSUME(type, name) \ 31 | if (sizeof(type) > size) { \ 32 | return; \ 33 | } \ 34 | FUZZ_CONSUME_UNCHECKED(type, name) 35 | 36 | #define FUZZ_CONSUME_VEC(type, name, len) \ 37 | if ((size_t)(sizeof(type) * len) > size) { \ 38 | return; \ 39 | } \ 40 | std::vector name; \ 41 | for (int i = 0; i < len; i++) { \ 42 | FUZZ_CONSUME_UNCHECKED(type, j); \ 43 | name.push_back(j); \ 44 | } 45 | 46 | #define FUZZ_CONSUME_BOOL(name) \ 47 | FUZZ_CONSUME(uint8_t, name_uint8) \ 48 | bool name = 1 & name_uint8; 49 | 50 | #endif 51 | 52 | -------------------------------------------------------------------------------- /Makefile.am: -------------------------------------------------------------------------------- 1 | ACLOCAL_AMFLAGS = -I build-aux/m4 2 | AM_CXXFLAGS = $(WARN_CXXFLAGS) $(NOWARN_CXXFLAGS) $(DEBUG_CXXFLAGS) $(SANITIZER_CXXFLAGS) 3 | AM_CPPFLAGS = $(DEBUG_CPPFLAGS) 4 | AM_LDFLAGS = $(SANITIZER_LDFLAGS) 5 | 6 | include sources.mk 7 | 8 | #include_HEADERS = $(UTREEXO_DIST_HEADER_INT) 9 | #include_HEADERS = $(UTREEXO_DIST_HEADER_INT) $(UTREEXO_LIB_HEADERS_INT) 10 | 11 | LIBUTREEXO = libutreexo.la 12 | if USE_TESTS 13 | LIBUTREEXO_VERIFY = libutreexo_verify.la 14 | endif 15 | 16 | lib_LTLIBRARIES = 17 | lib_LTLIBRARIES += $(LIBUTREEXO) 18 | 19 | noinst_LTLIBRARIES = 20 | noinst_LTLIBRARIES += $(LIBUTREEXO_VERIFY) 21 | 22 | libutreexo_la_SOURCES = $(UTREEXO_LIB_SOURCES_INT) 23 | libutreexo_la_CPPFLAGS = -I$(srcdir)/src $(AM_CPPFLAGS) $(RELEASE_DEFINES) 24 | libutreexo_la_CXXFLAGS = $(AM_CXXFLAGS) 25 | libutreexo_la_LDFLAGS = $(AM_LDFLAGS) 26 | 27 | libutreexo_verify_la_SOURCES = $(UTREEXO_LIB_SOURCES_INT) 28 | libutreexo_verify_la_CPPFLAGS = -I$(srcdir)/src $(AM_CPPFLAGS) $(VERIFY_DEFINES) 29 | libutreexo_verify_la_CXXFLAGS = $(AM_CXXFLAGS) 30 | libutreexo_verify_la_LDFLAGS = $(AM_LDFLAGS) 31 | 32 | if USE_TESTS 33 | noinst_PROGRAMS = test-verify 34 | TESTS = test-verify 35 | endif 36 | 37 | test_verify_SOURCES = $(UTREEXO_TEST_SOURCES_INT) 38 | test_verify_CPPFLAGS = -I$(srcdir)/src $(AM_CPPFLAGS) $(VERIFY_DEFINES) 39 | test_verify_LDADD = $(LIBUTREEXO_VERIFY) 40 | test_verify_LDFLAGS = $(AM_LDFLAGS) 41 | 42 | if ENABLE_FUZZ 43 | noinst_PROGRAMS = fuzz 44 | TESTS = fuzz 45 | endif 46 | 47 | fuzz_SOURCES = $(UTREEXO_FUZZ_SOURCES_INT) 48 | fuzz_CPPFLAGS = -I$(srcdir)/src -I$(srcdir)/fuzz $(AM_CPPFLAGS) $(RELEASE_DEFINES) 49 | fuzz_CXXFLAGS = $(AM_CXXFLAGS) 50 | fuzz_LDADD = $(LIBUTREEXO) 51 | fuzz_LDFLAGS = $(AM_LDFLAGS) 52 | 53 | if ENABLE_BENCH 54 | bin_PROGRAMS = bench_utreexo 55 | bench_utreexo_SOURCES = ${UTREEXO_BENCH_SOURCES_INT} 56 | bench_utreexo_CPPFLAGS = -I$(srcdir)/src $(AM_CPPFLAGS) 57 | bench_utreexo_CXXFLAGS = $(AM_CXXFLAGS) 58 | bench_utreexo_LDADD = $(LIBUTREEXO) 59 | bench_utreexo_LDFLAGS = $(AM_LDFLAGS) 60 | endif -------------------------------------------------------------------------------- /src/bench/util/args.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_BENCH_UTIL_ARGS_H 2 | #define UTREEXO_BENCH_UTIL_ARGS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | namespace benchmark { 11 | 12 | class ArgsManager 13 | { 14 | protected: 15 | struct Arg { 16 | std::string m_help_param; 17 | std::string m_help_text; 18 | }; 19 | std::map m_available_args; 20 | //! Map of setting name to list of command line values. 21 | std::map m_command_line_options; 22 | 23 | public: 24 | void ParseParameters(int argc, const char* const argv[]); 25 | 26 | /** 27 | * Get the help string 28 | */ 29 | std::string GetHelpMessage(); 30 | 31 | /** 32 | * Format a string to be used as option description in help messages 33 | * 34 | * @param option Option message (e.g. "-rpcuser=") 35 | * @param message Option description (e.g. "Username for JSON-RPC connections") 36 | * @return the formatted string 37 | */ 38 | std::string HelpMessageOpt(const std::string& option, const std::string& message); 39 | 40 | /** 41 | * Add argument 42 | */ 43 | void AddArg(const std::string& name, const std::string& help); 44 | 45 | /** 46 | * Return string argument or default value 47 | * 48 | * @param strArg Argument to get (e.g. "-foo") 49 | * @param strDefault (e.g. "1") 50 | * @return command-line argument or default value 51 | */ 52 | std::string GetArg(const std::string& strArg, const std::string& strDefault) const; 53 | 54 | /** 55 | * Return integer argument or default value 56 | * 57 | * @param strArg Argument to get (e.g. "-foo") 58 | * @param nDefault (e.g. 1) 59 | * @return command-line argument (0 if invalid number) or default value 60 | */ 61 | int64_t GetIntArg(const std::string& strArg, int64_t nDefault) const; 62 | 63 | /** 64 | * Return true if the given argument has been manually set 65 | * 66 | * @param strArg Argument to get (e.g. "-foo") 67 | * @return true if the argument has been set 68 | */ 69 | bool IsArgSet(const std::string& strArg) const; 70 | }; 71 | 72 | } // namespace benchmark 73 | #endif // UTREEXO_BENCH_UTIL_ARGS_H -------------------------------------------------------------------------------- /src/fuzz/pollard.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/pollard.h" 2 | #include "../include/batchproof.h" 3 | #include "../include/ram_forest.h" 4 | #include "fuzz.h" 5 | #include 6 | 7 | using namespace utreexo; 8 | 9 | void SetHash(Hash& hash, int num) 10 | { 11 | for (uint8_t& byte : hash) { 12 | byte = 0; 13 | } 14 | 15 | hash[0] = num; 16 | hash[1] = num >> 8; 17 | hash[2] = num >> 16; 18 | hash[3] = num >> 24; 19 | hash[4] = 0xFF; 20 | } 21 | 22 | FUZZ(pollard) 23 | { 24 | RamForest forest(0); 25 | Pollard pollard(0); 26 | 27 | FUZZ_CONSUME(uint8_t, num_leaves); 28 | num_leaves = num_leaves & 127; 29 | 30 | std::vector leaves; 31 | for (int i = 0; i < static_cast(num_leaves); ++i) { 32 | Hash leaf_hash; 33 | SetHash(leaf_hash, i); 34 | FUZZ_CONSUME_BOOL(remember); 35 | leaves.emplace_back(leaf_hash, remember); 36 | } 37 | 38 | { 39 | UndoBatch undo; 40 | assert(forest.Modify(undo, leaves, {})); 41 | assert(pollard.Modify(leaves, {})); 42 | } 43 | 44 | std::vector targets; 45 | for (int i = 0; i < static_cast(num_leaves); ++i) { 46 | FUZZ_CONSUME_BOOL(remove); 47 | if (remove) { 48 | targets.push_back(forest.GetLeaf(i)); 49 | } 50 | } 51 | 52 | // Let the forest prove the target leaves. 53 | BatchProof proof; 54 | assert(forest.Prove(proof, targets)); 55 | 56 | // Verify the proof. 57 | FUZZ_CONSUME_BOOL(proof_ok); 58 | if (proof.GetHashes().size() > 0 && !proof_ok) { 59 | // Invalidate the proof by changing one hash 60 | std::vector hashes = proof.GetHashes(); 61 | hashes[0].fill(0); 62 | assert(!pollard.Verify(BatchProof{proof.GetTargets(), hashes}, targets)); 63 | return; 64 | } else { 65 | assert(pollard.Verify(proof, targets)); 66 | } 67 | 68 | // Remove the targets from both the forest and the pollard. 69 | { 70 | UndoBatch undo; 71 | assert(forest.Modify(undo, {}, proof.GetSortedTargets())); 72 | assert(pollard.Modify({}, proof.GetSortedTargets())); 73 | std::vector forest_roots, pollard_roots; 74 | forest.Roots(forest_roots); 75 | pollard.Roots(pollard_roots); 76 | assert(forest_roots == pollard_roots); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /sources.mk: -------------------------------------------------------------------------------- 1 | UTREEXO_DIST_HEADERS_INT = 2 | UTREEXO_DIST_HEADERS_INT += %reldir%/include/utreexo.h 3 | 4 | UTREEXO_LIB_HEADERS_INT = 5 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/accumulator.h 6 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/pollard.h 7 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/ram_forest.h 8 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/attributes.h 9 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/check.h 10 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/batchproof.h 11 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/state.h 12 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/crypto/common.h 13 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/crypto/sha512.h 14 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/compat/byteswap.h 15 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/compat/endian.h 16 | UTREEXO_LIB_HEADERS_INT += %reldir%/src/compat/cpuid.h 17 | 18 | UTREEXO_LIB_SOURCES_INT = 19 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/accumulator.cpp 20 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/pollard.cpp 21 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/ram_forest.cpp 22 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/batchproof.cpp 23 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/state.cpp 24 | UTREEXO_LIB_SOURCES_INT += %reldir%/src/crypto/sha512.cpp 25 | 26 | UTREEXO_TEST_SOURCES_INT = 27 | UTREEXO_TEST_SOURCES_INT += %reldir%/src/test/tests.cpp 28 | UTREEXO_TEST_SOURCES_INT += %reldir%/src/test/accumulator_tests.cpp 29 | UTREEXO_TEST_SOURCES_INT += %reldir%/src/test/state_tests.cpp 30 | 31 | UTREEXO_FUZZ_SOURCES_INT = 32 | UTREEXO_FUZZ_SOURCES_INT += %reldir%/src/fuzz/fuzz.cpp 33 | UTREEXO_FUZZ_SOURCES_INT += %reldir%/src/fuzz/forest_state.cpp 34 | UTREEXO_FUZZ_SOURCES_INT += %reldir%/src/fuzz/batchproof.cpp 35 | UTREEXO_FUZZ_SOURCES_INT += %reldir%/src/fuzz/pollard.cpp 36 | 37 | UTREEXO_BENCH_SOURCES_INT = 38 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/pollard.cpp 39 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/ram_forest.cpp 40 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/bench_utreexo.cpp 41 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/bench.cpp 42 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/bench.h 43 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/nanobench.cpp 44 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/nanobench.h 45 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/util/args.h 46 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/util/args.cpp 47 | UTREEXO_BENCH_SOURCES_INT += %reldir%/src/bench/util/leaves.h 48 | -------------------------------------------------------------------------------- /include/ram_forest.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_RAMFOREST_H 2 | #define UTREEXO_RAMFOREST_H 3 | 4 | #include 5 | #include 6 | 7 | #include "accumulator.h" 8 | 9 | namespace utreexo { 10 | 11 | class BatchProof; 12 | class UndoBatch; 13 | class ForestState; 14 | 15 | class RamForest : public Accumulator 16 | { 17 | private: 18 | // A vector of hashes for each row. 19 | std::vector> m_data; 20 | 21 | // RamForests implementation of Accumulator::Node. 22 | class Node; 23 | 24 | // Path to the file in which the forest is stored. 25 | std::string m_file_path; 26 | std::fstream m_file; 27 | 28 | bool Restore(); 29 | 30 | std::optional Read(ForestState state, uint64_t pos) const; 31 | std::optional Read(uint64_t pos) const override; 32 | std::vector ReadLeafRange(uint64_t pos, uint64_t range) const override; 33 | 34 | /* Swap the hashes of ranges (from, from+range) and (to, to+range). */ 35 | void SwapRange(uint64_t from, uint64_t to, uint64_t range); 36 | 37 | NodePtr SwapSubTrees(uint64_t from, uint64_t to) override; 38 | NodePtr MergeRoot(uint64_t parent_pos, Hash parent_hash) override; 39 | NodePtr NewLeaf(const Leaf& leaf) override; 40 | void FinalizeRemove(uint64_t next_num_leaves) override; 41 | 42 | void RestoreRoots(); 43 | 44 | /** 45 | * Build the UndoBatch that can be used to roll back a modification. 46 | * This should only be called in Modify after the deletion and before the addition of new leaves. 47 | */ 48 | bool BuildUndoBatch(UndoBatch& undo, uint64_t num_adds, const std::vector& targets) const; 49 | 50 | public: 51 | RamForest(uint64_t num_leaves); 52 | RamForest(const std::string& file); 53 | ~RamForest(); 54 | 55 | bool Verify(const BatchProof& proof, const std::vector& target_hashes) override; 56 | bool Add(const std::vector& leaves) override; 57 | 58 | bool Modify(UndoBatch& undo, 59 | const std::vector& new_leaves, 60 | const std::vector& targets); 61 | 62 | bool Undo(const UndoBatch& undo); 63 | 64 | /** Save the forest to file. */ 65 | bool Commit(); 66 | 67 | Hash GetLeaf(uint64_t pos) const; 68 | 69 | bool operator==(const RamForest& other); 70 | }; 71 | 72 | }; // namespace utreexo 73 | #endif // UTREEXO_RAMFOREST_H 74 | -------------------------------------------------------------------------------- /src/bench/pollard.cpp: -------------------------------------------------------------------------------- 1 | #include "bench.h" 2 | #include "include/utreexo.h" 3 | #include "util/leaves.h" 4 | 5 | #include 6 | 7 | using namespace utreexo; 8 | 9 | // Benchmarks the creation of leaves 10 | static void AddElementsPollard(benchmark::Bench& bench) 11 | { 12 | const int num_leaves = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 64; 13 | 14 | std::vector leaves; 15 | CreateTestLeaves(leaves, num_leaves); 16 | 17 | // Benchmark 18 | bench.run([&]() { 19 | Pollard pruned(0); 20 | pruned.Modify(leaves, {}); 21 | }); 22 | } 23 | 24 | // Benchmark the proof verification of half the number of created leaves 25 | // proof is first calculated from the forest 26 | static void VerifyElementsPollard(benchmark::Bench& bench) 27 | { 28 | const int num_leaves_to_verify = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 32; 29 | int num_leaves = num_leaves_to_verify * 2; 30 | 31 | std::vector leaves; 32 | CreateTestLeaves(leaves, num_leaves); 33 | 34 | UndoBatch unused_undo; 35 | BatchProof proof; 36 | Pollard pruned(0); 37 | RamForest full(0); 38 | 39 | full.Modify(unused_undo, leaves, {}); // add leaves to forest 40 | pruned.Modify(leaves, {}); // add leaves to pollard 41 | // select leaves to proof/verify 42 | std::vector leaf_hashes; 43 | random_unique(leaves.begin(), leaves.end(), num_leaves_to_verify); 44 | for (int i = 0; i < num_leaves_to_verify; ++i) { 45 | leaf_hashes.push_back(leaves[i].first); 46 | } 47 | full.Prove(proof, leaf_hashes); // prove elements in the forest 48 | 49 | // Benchmark 50 | bench.unit("verification").run([&] { 51 | pruned.Verify(proof, leaf_hashes); 52 | }); 53 | } 54 | 55 | // Benchmarks the restoration from roots 56 | static void RestorePollard(benchmark::Bench& bench) 57 | { 58 | const int num_leaves = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 64; 59 | 60 | std::vector leaves; 61 | CreateTestLeaves(leaves, num_leaves); 62 | 63 | // create pollard 64 | Pollard pruned(0); 65 | pruned.Modify(leaves, {}); 66 | 67 | // get roots 68 | std::vector roots; 69 | pruned.Roots(roots); 70 | 71 | // Benchmark 72 | bench.run([&]() { 73 | // restore pollard 74 | Pollard restored(roots, num_leaves); 75 | }); 76 | } 77 | 78 | BENCHMARK(AddElementsPollard); 79 | BENCHMARK(VerifyElementsPollard); 80 | BENCHMARK(RestorePollard); -------------------------------------------------------------------------------- /include/pollard.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_POLLARD_H 2 | #define UTREEXO_POLLARD_H 3 | 4 | #include 5 | 6 | #include "accumulator.h" 7 | 8 | namespace utreexo { 9 | 10 | class Pollard : public Accumulator 11 | { 12 | private: 13 | class InternalNode; 14 | using InternalSiblings = std::tuple, NodePtr>; 15 | 16 | /* Pollards implementation of Accumulator::Node */ 17 | class Node; 18 | 19 | NodePtr m_remember; 20 | 21 | /* 22 | * Return the node and its sibling. Point path to the parent of the node. 23 | * The path to the node can be traversed in reverse order using the 24 | * Accumulator::Node::Parent function. 25 | */ 26 | InternalSiblings ReadSiblings(uint64_t pos, NodePtr& path, bool record_path) const; 27 | InternalSiblings ReadSiblings(uint64_t pos) const; 28 | 29 | std::optional Read(uint64_t pos) const override; 30 | std::vector ReadLeafRange(uint64_t pos, uint64_t range) const override; 31 | NodePtr SwapSubTrees(uint64_t from, uint64_t to) override; 32 | NodePtr MergeRoot(uint64_t parent_pos, Hash parent_hash) override; 33 | NodePtr NewLeaf(const Leaf& hash) override; 34 | void FinalizeRemove(uint64_t next_num_leaves) override; 35 | 36 | void InitChildrenOfComputed(NodePtr& node, 37 | NodePtr& left_child, 38 | NodePtr& right_child, 39 | bool& recover_left, 40 | bool& recover_right); 41 | 42 | bool CreateProofTree(std::vector>& proof_tree, 43 | std::vector, int>>& recovery, 44 | const BatchProof& proof); 45 | 46 | bool VerifyProofTree(std::vector> proof_tree, 47 | const std::vector& target_hashes, 48 | const std::vector& proof_hashes); 49 | 50 | public: 51 | Pollard(const std::vector& roots, uint64_t num_leaves); 52 | Pollard(uint64_t num_leaves); 53 | ~Pollard(); 54 | 55 | bool Verify(const BatchProof& proof, const std::vector& target_hashes) override; 56 | 57 | /** Prune everything except the roots. */ 58 | void Prune(); 59 | 60 | uint64_t NumCachedLeaves() const { return m_remember ? m_remember.use_count() - 1 : 0; } 61 | uint64_t CountNodes(const NodePtr& node) const; 62 | uint64_t CountNodes() const; 63 | }; 64 | 65 | }; // namespace utreexo 66 | #endif // UTREEXO_POLLARD_H 67 | -------------------------------------------------------------------------------- /src/crypto/common.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014-2017 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef BITCOIN_CRYPTO_COMMON_H 6 | #define BITCOIN_CRYPTO_COMMON_H 7 | 8 | #if defined(HAVE_CONFIG_H) 9 | #include 10 | #endif 11 | 12 | #include 13 | #include 14 | 15 | #include "compat/endian.h" 16 | 17 | uint16_t static inline ReadLE16(const unsigned char* ptr) 18 | { 19 | uint16_t x; 20 | memcpy((char*)&x, ptr, 2); 21 | return le16toh(x); 22 | } 23 | 24 | uint32_t static inline ReadLE32(const unsigned char* ptr) 25 | { 26 | uint32_t x; 27 | memcpy((char*)&x, ptr, 4); 28 | return le32toh(x); 29 | } 30 | 31 | uint64_t static inline ReadLE64(const unsigned char* ptr) 32 | { 33 | uint64_t x; 34 | memcpy((char*)&x, ptr, 8); 35 | return le64toh(x); 36 | } 37 | 38 | void static inline WriteLE16(unsigned char* ptr, uint16_t x) 39 | { 40 | uint16_t v = htole16(x); 41 | memcpy(ptr, (char*)&v, 2); 42 | } 43 | 44 | void static inline WriteLE32(unsigned char* ptr, uint32_t x) 45 | { 46 | uint32_t v = htole32(x); 47 | memcpy(ptr, (char*)&v, 4); 48 | } 49 | 50 | void static inline WriteLE64(unsigned char* ptr, uint64_t x) 51 | { 52 | uint64_t v = htole64(x); 53 | memcpy(ptr, (char*)&v, 8); 54 | } 55 | 56 | uint32_t static inline ReadBE32(const unsigned char* ptr) 57 | { 58 | uint32_t x; 59 | memcpy((char*)&x, ptr, 4); 60 | return be32toh(x); 61 | } 62 | 63 | uint64_t static inline ReadBE64(const unsigned char* ptr) 64 | { 65 | uint64_t x; 66 | memcpy((char*)&x, ptr, 8); 67 | return be64toh(x); 68 | } 69 | 70 | void static inline WriteBE32(unsigned char* ptr, uint32_t x) 71 | { 72 | uint32_t v = htobe32(x); 73 | memcpy(ptr, (char*)&v, 4); 74 | } 75 | 76 | void static inline WriteBE64(unsigned char* ptr, uint64_t x) 77 | { 78 | uint64_t v = htobe64(x); 79 | memcpy(ptr, (char*)&v, 8); 80 | } 81 | 82 | /** Return the smallest number n such that (x >> n) == 0 (or 64 if the highest bit in x is set. */ 83 | uint64_t static inline CountBits(uint64_t x) 84 | { 85 | #if HAVE_DECL___BUILTIN_CLZL 86 | if (sizeof(unsigned long) >= sizeof(uint64_t)) { 87 | return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0; 88 | } 89 | #endif 90 | #if HAVE_DECL___BUILTIN_CLZLL 91 | if (sizeof(unsigned long long) >= sizeof(uint64_t)) { 92 | return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0; 93 | } 94 | #endif 95 | int ret = 0; 96 | while (x) { 97 | x >>= 1; 98 | ++ret; 99 | } 100 | return ret; 101 | } 102 | 103 | #endif // BITCOIN_CRYPTO_COMMON_H 104 | -------------------------------------------------------------------------------- /include/batchproof.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_BATCHPROOF_H 2 | #define UTREEXO_BATCHPROOF_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace utreexo { 10 | 11 | /** BatchProof represents a proof for multiple leaves. */ 12 | class BatchProof 13 | { 14 | private: 15 | // The unsorted/sorted lists of leaf positions that are being proven. 16 | std::vector m_targets, m_sorted_targets; 17 | 18 | // The proof hashes for the targets. 19 | std::vector> m_proof; 20 | 21 | public: 22 | BatchProof(const std::vector& targets, std::vector> proof) 23 | : m_targets(targets), m_sorted_targets(targets), m_proof(proof) 24 | { 25 | std::sort(m_sorted_targets.begin(), m_sorted_targets.end()); 26 | } 27 | 28 | BatchProof() {} 29 | 30 | void SetNull() 31 | { 32 | m_targets = std::vector(); 33 | m_sorted_targets = std::vector(); 34 | m_proof = std::vector>(); 35 | } 36 | 37 | const std::vector& GetTargets() const; 38 | const std::vector& GetSortedTargets() const; 39 | const std::vector>& GetHashes() const; 40 | 41 | void Serialize(std::vector& bytes) const; 42 | bool Unserialize(const std::vector& bytes); 43 | 44 | /** 45 | * Perform some simple sanity checks on a proof. 46 | * - Check that the targets are sorted in ascending order with no duplicates. 47 | * - Check that the number of proof hashes is not larger than the number expected hashes. 48 | */ 49 | bool CheckSanity(uint64_t num_leaves) const; 50 | 51 | bool operator==(const BatchProof& other); 52 | 53 | void Print(); 54 | }; 55 | 56 | /** UndoBatch represents the data needed to undo a batch modification in the accumulator. */ 57 | class UndoBatch 58 | { 59 | private: 60 | uint64_t m_num_additions; 61 | std::vector m_deleted_positions; 62 | std::vector> m_deleted_hashes; 63 | 64 | public: 65 | UndoBatch(uint64_t num_adds, 66 | const std::vector& deleted_positions, 67 | const std::vector>& deleted_hashes) 68 | : m_num_additions(num_adds), 69 | m_deleted_positions(deleted_positions), 70 | m_deleted_hashes(deleted_hashes) {} 71 | UndoBatch() {} 72 | 73 | void Serialize(std::vector& bytes) const; 74 | bool Unserialize(const std::vector& bytes); 75 | 76 | uint64_t GetNumAdds() const; 77 | const std::vector& GetDeletedPositions() const; 78 | const std::vector>& GetDeletedHashes() const; 79 | 80 | bool operator==(const UndoBatch& other); 81 | 82 | void Print(); 83 | }; 84 | 85 | }; // namespace utreexo 86 | #endif // UTREEXO_BATCHPROOF_H 87 | -------------------------------------------------------------------------------- /src/bench/util/args.cpp: -------------------------------------------------------------------------------- 1 | #include "args.h" 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | // trimmed down version of https://github.com/bitcoin/bitcoin/blob/23.x/src/util/system.cpp#L306 8 | void benchmark::ArgsManager::ParseParameters(int argc, const char* const argv[]) 9 | { 10 | m_command_line_options.clear(); 11 | 12 | for (int i = 1; i < argc; i++) { 13 | std::string key(argv[i]); 14 | std::string value; 15 | size_t is_index = key.find('='); 16 | 17 | if (is_index != std::string::npos) { 18 | value = key.substr(is_index + 1); 19 | key.erase(is_index); 20 | } 21 | 22 | // key is -foo 23 | m_command_line_options[key] = value; 24 | } 25 | } 26 | 27 | // trimmed down version of https://github.com/bitcoin/bitcoin/blob/23.x/src/util/system.cpp#L677 28 | std::string benchmark::ArgsManager::GetHelpMessage() 29 | { 30 | std::string usage = ""; 31 | usage += std::string("Options") + std::string("\n\n"); 32 | for (const auto& arg : m_available_args) { 33 | std::string name; 34 | if (arg.second.m_help_param.empty()) { 35 | name = arg.first; 36 | } else { 37 | name = arg.first + arg.second.m_help_param; 38 | } 39 | usage += HelpMessageOpt(name, arg.second.m_help_text); 40 | } 41 | return usage; 42 | } 43 | 44 | static const int optIndent = 2; 45 | static const int msgIndent = 7; 46 | 47 | // trimmed down version of https://github.com/bitcoin/bitcoin/blob/23.x/src/util/system.cpp#L765 48 | std::string benchmark::ArgsManager::HelpMessageOpt(const std::string& option, const std::string& message) 49 | { 50 | return std::string(optIndent, ' ') + std::string(option) + 51 | std::string("\n") + std::string(msgIndent, ' ') + std::string(message) + 52 | std::string("\n\n"); 53 | } 54 | 55 | // trimmed down version of https://github.com/bitcoin/bitcoin/blob/23.x/src/util/system.cpp#L649 56 | void benchmark::ArgsManager::AddArg(const std::string& name, const std::string& help) 57 | { 58 | // Split arg name from its help param 59 | size_t eq_index = name.find('='); 60 | if (eq_index == std::string::npos) { 61 | eq_index = name.size(); 62 | } 63 | std::string arg_name = name.substr(0, eq_index); 64 | 65 | std::map& arg_map = m_available_args; 66 | auto ret = arg_map.emplace(arg_name, Arg{name.substr(eq_index, name.size() - eq_index), help}); 67 | assert(ret.second); // Make sure an insertion actually happened 68 | } 69 | 70 | std::string benchmark::ArgsManager::GetArg(const std::string& strArg, const std::string& strDefault) const 71 | { 72 | try { 73 | const std::string& value = m_command_line_options.at(strArg); 74 | return value; 75 | } catch (const std::out_of_range&) { 76 | return strDefault; 77 | } 78 | } 79 | 80 | int64_t benchmark::ArgsManager::GetIntArg(const std::string& strArg, int64_t nDefault) const 81 | { 82 | try { 83 | const std::string& value = m_command_line_options.at(strArg); 84 | return stoi(value); 85 | } catch (const std::out_of_range&) { 86 | return nDefault; 87 | }; 88 | } 89 | 90 | bool benchmark::ArgsManager::IsArgSet(const std::string& strArg) const 91 | { 92 | try { 93 | m_command_line_options.at(strArg); 94 | return true; 95 | } catch (const std::out_of_range&) { 96 | return false; 97 | } 98 | } -------------------------------------------------------------------------------- /src/test/state_tests.cpp: -------------------------------------------------------------------------------- 1 | #include "../state.h" 2 | #include 3 | 4 | using namespace utreexo; 5 | 6 | BOOST_AUTO_TEST_SUITE(state_tests) 7 | 8 | BOOST_AUTO_TEST_CASE(constructor) 9 | { 10 | ForestState state; 11 | BOOST_CHECK(state.m_num_leaves == 0); 12 | ForestState state1(100); 13 | BOOST_CHECK(state1.m_num_leaves == 100); 14 | } 15 | 16 | BOOST_AUTO_TEST_CASE(positions) 17 | { 18 | /* 19 | * 28 20 | * |---------------\ 21 | * 24 25 26 22 | * |-------\ |-------\ |-------\ 23 | * 16 17 18 19 20 21 22 24 | * |---\ |---\ |---\ |---\ |---\ |---\ |---\ 25 | * 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 26 | */ 27 | 28 | /* 29 | * 11100 30 | * |-----------------------\ 31 | * 11000 11001 11010 32 | * |-----------\ |-----------\ |-----------\ 33 | * 10000 10001 10010 10011 10100 10101 10110 34 | * |-----\ |-----\ |-----\ |-----\ |-----\ |-----\ |-----\ 35 | * 00000 00001 00010 00011 00100 00101 00110 00111 01000 01001 01010 01011 01100 01101 01110 36 | */ 37 | // the bits visualization helps to better understand the algorithms 38 | // https://github.com/mit-dci/utreexo/blob/master/accumulator/printout.txt 39 | 40 | ForestState state(15); 41 | 42 | BOOST_CHECK(state.LeftChild(28) == 24); 43 | BOOST_CHECK(state.Sibling(state.LeftChild(28)) == 25); 44 | BOOST_CHECK(state.RightSibling(state.LeftChild(28)) == 25); 45 | BOOST_CHECK(state.RightSibling(25) == 25); 46 | BOOST_CHECK(state.Parent(state.LeftChild(28)) == 28); 47 | 48 | for (uint64_t pos = 0; pos < 8; pos++) { 49 | BOOST_CHECK(state.Ancestor(pos, 3) == 28); 50 | BOOST_CHECK(state.LeftDescendant(state.Ancestor(pos, 3), 3) == 0); 51 | } 52 | 53 | BOOST_CHECK(state.LeftDescendant(26, 2) == 8); 54 | BOOST_CHECK(state.LeftDescendant(25, 2) == 4); 55 | BOOST_CHECK(state.Cousin(4) == 6); 56 | BOOST_CHECK(state.Cousin(5) == 7); 57 | } 58 | 59 | BOOST_AUTO_TEST_CASE(proof) 60 | { 61 | /* 62 | * 28 63 | * |---------------\ 64 | * 24 25 26 65 | * |-------\ |-------\ |-------\ 66 | * 16 17 18 19 20 21 22 67 | * |---\ |---\ |---\ |---\ |---\ |---\ |---\ 68 | * 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 69 | */ 70 | 71 | ForestState state(15); 72 | 73 | std::vector targets = {0}; 74 | std::vector expected_proof = {1, 17, 25}; 75 | std::vector expected_computed = {0, 16, 24, 28}; 76 | std::pair, std::vector> output = state.ProofPositions(targets); 77 | BOOST_CHECK_EQUAL_COLLECTIONS(expected_proof.begin(), expected_proof.end(), 78 | output.first.begin(), output.first.end()); 79 | BOOST_CHECK_EQUAL_COLLECTIONS(expected_computed.begin(), expected_computed.end(), 80 | output.second.begin(), output.second.end()); 81 | 82 | targets = {0, 2, 3, 6, 8, 10, 11}; 83 | expected_proof = {1, 7, 9, 18}; 84 | expected_computed = {0, 2, 3, 6, 8, 10, 11, 16, 17, 19, 20, 21, 24, 25, 26, 28}; 85 | output = state.ProofPositions(targets); 86 | BOOST_CHECK_EQUAL_COLLECTIONS(expected_proof.begin(), expected_proof.end(), 87 | output.first.begin(), output.first.end()); 88 | BOOST_CHECK_EQUAL_COLLECTIONS(expected_computed.begin(), expected_computed.end(), 89 | output.second.begin(), output.second.end()); 90 | 91 | // TODO: add tests with random numbers 92 | } 93 | 94 | BOOST_AUTO_TEST_SUITE_END() 95 | -------------------------------------------------------------------------------- /src/bench/bench_utreexo.cpp: -------------------------------------------------------------------------------- 1 | #include "bench.h" 2 | #include "util/args.h" 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | static const char* DEFAULT_BENCH_FILTER = ".*"; 10 | static constexpr int64_t DEFAULT_MIN_TIME_MS{10}; 11 | 12 | using namespace benchmark; 13 | 14 | static void SetupBenchArgs(ArgsManager& argsman) 15 | { 16 | argsman.AddArg("-asymptote=", "Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"); 17 | argsman.AddArg("-filter=", "Regular expression filter to select benchmark by name (default: " + std::string(DEFAULT_BENCH_FILTER) + ")"); 18 | argsman.AddArg("-min_time=", "Minimum runtime per benchmark, in milliseconds (default: " + std::to_string(DEFAULT_MIN_TIME_MS) + ")"); 19 | // help options 20 | argsman.AddArg("-?", "Print this help message and exit"); 21 | } 22 | 23 | // parses a comma separated list like "10,20,30,50" 24 | static std::vector parseAsymptote(const std::string& str) 25 | { 26 | std::stringstream ss(str); 27 | std::vector numbers; 28 | double d; 29 | char c; 30 | while (ss >> d) { 31 | numbers.push_back(d); 32 | ss >> c; 33 | } 34 | return numbers; 35 | } 36 | 37 | int main(int argc, char** argv) 38 | { 39 | ArgsManager argsman; 40 | SetupBenchArgs(argsman); 41 | argsman.ParseParameters(argc, argv); 42 | 43 | // Help text 44 | if (argsman.IsArgSet("-?") || argsman.IsArgSet("-h")) { 45 | std::cout << "Usage: bench_utreexo [options]\n" 46 | "\n" 47 | << argsman.GetHelpMessage() 48 | << "Description:\n" 49 | "\n" 50 | " bench_utreexo executes microbenchmarks. The quality of the benchmark results\n" 51 | " highly depend on the stability of the machine. It can sometimes be difficult\n" 52 | " to get stable, repeatable results, so here are a few tips:\n" 53 | "\n" 54 | " * Use pyperf [1] to disable frequency scaling, turbo boost etc. For best\n" 55 | " results, use CPU pinning and CPU isolation (see [2]).\n" 56 | "\n" 57 | " * Each call of run() should do exactly the same work. E.g. inserting into\n" 58 | " a std::vector doesn't do that as it will reallocate on certain calls. Make\n" 59 | " sure each run has exactly the same preconditions.\n" 60 | "\n" 61 | " * If results are still not reliable, increase runtime with e.g.\n" 62 | " -min_time=5000 to let a benchmark run for at least 5 seconds.\n" 63 | "\n" 64 | " * bench_utreexo uses nanobench [3] for which there is extensive\n" 65 | " documentation available online.\n" 66 | "\n" 67 | "Environment Variables:\n" 68 | "\n" 69 | " To attach a profiler you can run a benchmark in endless mode. This can be\n" 70 | " done with the environment variable NANOBENCH_ENDLESS. E.g. like so:\n" 71 | "\n" 72 | " NANOBENCH_ENDLESS=MuHash ./bench_bitcoin -filter=MuHash\n" 73 | "\n" 74 | " In rare cases it can be useful to suppress stability warnings. This can be\n" 75 | " done with the environment variable NANOBENCH_SUPPRESS_WARNINGS, e.g:\n" 76 | "\n" 77 | " NANOBENCH_SUPPRESS_WARNINGS=1 ./bench_bitcoin\n" 78 | "\n" 79 | "Notes:\n" 80 | "\n" 81 | " 1. pyperf\n" 82 | " https://github.com/psf/pyperf\n" 83 | "\n" 84 | " 2. CPU pinning & isolation\n" 85 | " https://pyperf.readthedocs.io/en/latest/system.html\n" 86 | "\n" 87 | " 3. nanobench\n" 88 | " https://github.com/martinus/nanobench\n" 89 | "\n"; 90 | 91 | return EXIT_SUCCESS; 92 | } 93 | 94 | // initialize arguments 95 | Args args; 96 | args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", "")); 97 | args.min_time = std::chrono::milliseconds(argsman.GetIntArg("-min_time", DEFAULT_MIN_TIME_MS)); 98 | args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER); 99 | 100 | BenchRunner::RunAll(args); 101 | 102 | return EXIT_SUCCESS; 103 | } -------------------------------------------------------------------------------- /src/bench/ram_forest.cpp: -------------------------------------------------------------------------------- 1 | #include "bench.h" 2 | #include "include/utreexo.h" 3 | #include "util/leaves.h" 4 | 5 | #include 6 | 7 | using namespace utreexo; 8 | 9 | static void AddElements(benchmark::Bench& bench, bool with_modify = false) 10 | { 11 | UndoBatch unused_undo; 12 | const int num_leaves = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 64; 13 | 14 | std::vector leaves; 15 | CreateTestLeaves(leaves, num_leaves); 16 | 17 | // Benchmark 18 | if (with_modify) { 19 | bench.run([&]() { 20 | RamForest full(0); // initialize 21 | full.Modify(unused_undo, leaves, {}); // add leaves 22 | }); 23 | } else { 24 | bench.run([&]() { 25 | RamForest full(0); // initialize 26 | full.Add(leaves); // add leaves 27 | }); 28 | } 29 | } 30 | 31 | // Benchmarks the creation of leaves 32 | static void AddElementsForest(benchmark::Bench& bench) 33 | { 34 | AddElements(bench); 35 | } 36 | 37 | // Benchmarks the creation of leaves using Modify 38 | static void AddElementsWithModifyForest(benchmark::Bench& bench) 39 | { 40 | AddElements(bench, true); 41 | } 42 | 43 | // Benchmarks the restoration from disk 44 | static void RestoreFromDiskForest(benchmark::Bench& bench) 45 | { 46 | UndoBatch unused_undo; 47 | const int num_leaves = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 64; 48 | 49 | std::remove("./bench_forest"); // in case file already exists from prev run 50 | std::vector leaves; 51 | CreateTestLeaves(leaves, num_leaves); 52 | { 53 | RamForest full("./bench_forest"); // initialize 54 | full.Modify(unused_undo, leaves, {}); // add leaves 55 | } 56 | 57 | bench.run([&]() { 58 | RamForest full("./bench_forest"); 59 | assert(full.NumLeaves() == num_leaves); 60 | }); 61 | } 62 | 63 | // Benchmarks the proof of half the number of created leaves 64 | static void ProveElementsForest(benchmark::Bench& bench) 65 | { 66 | const int num_leaves_to_proof = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 32; 67 | const int num_leaves = num_leaves_to_proof * 2; 68 | std::vector leaves; 69 | CreateTestLeaves(leaves, num_leaves); 70 | 71 | UndoBatch unused_undo; 72 | BatchProof proof; 73 | RamForest full(0); // initialize 74 | 75 | full.Modify(unused_undo, leaves, {}); // add leaves 76 | // select leaves to proof 77 | std::vector leaf_hashes; 78 | random_unique(leaves.begin(), leaves.end(), num_leaves_to_proof); 79 | for (int i = 0; i < num_leaves_to_proof; ++i) { 80 | leaf_hashes.push_back(leaves[i].first); 81 | } 82 | 83 | // Benchmark 84 | bench.unit("proof").run([&] { 85 | full.Prove(proof, leaf_hashes); 86 | }); 87 | } 88 | 89 | // Benchmarks the proof verification of half the number of created leaves 90 | static void VerifyElementsForest(benchmark::Bench& bench) 91 | { 92 | const int num_leaves_to_verify = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 32; 93 | int num_leaves = num_leaves_to_verify * 2; 94 | std::vector leaves; 95 | CreateTestLeaves(leaves, num_leaves); 96 | UndoBatch unused_undo; 97 | BatchProof proof; 98 | RamForest full(0); 99 | 100 | full.Modify(unused_undo, leaves, {}); // add leaves to forest 101 | // select leaves to proof/verify 102 | std::vector leaf_hashes; 103 | random_unique(leaves.begin(), leaves.end(), num_leaves_to_verify); 104 | for (int i = 0; i < num_leaves_to_verify; ++i) { 105 | leaf_hashes.push_back(leaves[i].first); 106 | } 107 | full.Prove(proof, leaf_hashes); // prove elements in the forest 108 | 109 | // Benchmark 110 | bench.unit("verification").run([&] { 111 | full.Verify(proof, leaf_hashes); 112 | }); 113 | } 114 | 115 | // Benchmarks the removal of half the number of created leaves 116 | // this benchmark unavoidably includes the creation of the leaves 117 | static void RemoveElementsForest(benchmark::Bench& bench) 118 | { 119 | UndoBatch unused_undo; 120 | BatchProof proof; 121 | const int num_leaves_to_remove = bench.complexityN() > 1 ? static_cast(bench.complexityN()) : 32; 122 | const int num_leaves = num_leaves_to_remove * 2; 123 | 124 | std::vector leaves; 125 | CreateTestLeaves(leaves, num_leaves); 126 | 127 | // select leaves to remove 128 | std::vector leaf_hashes; 129 | std::vector leaves_to_shuffle(leaves); // copy leaves 130 | random_unique(leaves_to_shuffle.begin(), leaves_to_shuffle.end(), num_leaves_to_remove); 131 | for (int i = 0; i < num_leaves_to_remove; ++i) { 132 | leaf_hashes.push_back(leaves_to_shuffle[i].first); 133 | } 134 | // create the forest once outside the benchmark to calculate the proof 135 | // without including the proof operation in the benchmark 136 | { 137 | RamForest full(0); 138 | full.Add(leaves); 139 | full.Prove(proof, leaf_hashes); 140 | } 141 | 142 | // Benchmark 143 | bench.run([&]() { 144 | // add leaves 145 | RamForest full(0); 146 | full.Add(leaves); 147 | 148 | // remove leaves 149 | full.Modify(unused_undo, {}, proof.GetSortedTargets()); 150 | }); 151 | } 152 | 153 | BENCHMARK(AddElementsForest); 154 | BENCHMARK(AddElementsWithModifyForest); 155 | BENCHMARK(RestoreFromDiskForest); 156 | BENCHMARK(ProveElementsForest); 157 | BENCHMARK(VerifyElementsForest); 158 | BENCHMARK(RemoveElementsForest); -------------------------------------------------------------------------------- /include/accumulator.h: -------------------------------------------------------------------------------- 1 | #ifndef UTREEXO_ACCUMULATOR_H 2 | #define UTREEXO_ACCUMULATOR_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | namespace utreexo { 15 | class ForestState; 16 | 17 | using Hash = std::array; 18 | using Leaf = std::pair; 19 | template 20 | using NodePtr = std::shared_ptr; 21 | 22 | class BatchProof; 23 | 24 | /** Provides an interface for a hash based dynamic accumulator. */ 25 | class Accumulator 26 | { 27 | public: 28 | Accumulator(uint64_t num_leaves); 29 | virtual ~Accumulator(); 30 | 31 | /** 32 | * Verify a batch proof. 33 | * Return whether or not the proof proved the target hashes. 34 | * The internal state of the accumulator might be mutated but the roots will not. 35 | */ 36 | virtual bool Verify(const BatchProof& proof, const std::vector& target_hashes) = 0; 37 | 38 | /** Modify the accumulator by adding leaves and removing targets. */ 39 | bool Modify(const std::vector& new_leaves, const std::vector& targets); 40 | 41 | /** 42 | * Create a batch proof for a set of target hashes. (A target hash is the hash a leaf in the forest) 43 | * The target hashes are not required to be sorted by leaf position in the forest and 44 | * the targets of the batch proof will have the same order as the hashes. 45 | * 46 | * Example: 47 | * target_hashes = [hash of leaf 50, hash of leaf 10, hash of leaf 20] 48 | * proof.targets = [50, 10, 20] 49 | * 50 | * Return true on success and false on failure. Proving can fail if a target hash does not 51 | * exist in the accumulator. For forests, this failure will only happen if a target hash 52 | * doesn't exist at all. For Pollards, a target hash could exist but the failure may still 53 | * happen as Pollard doesn't cache every hash. 54 | */ 55 | bool Prove(BatchProof& proof, const std::vector& target_hashes) const; 56 | 57 | /** Return the root hashes (roots of taller trees first) */ 58 | void Roots(std::vector& roots) const; 59 | 60 | bool ComparePositionMap(Accumulator& other) const; 61 | void PrintPositionMap() const; 62 | void PrintRoots() const; 63 | 64 | uint64_t NumLeaves() const; 65 | 66 | protected: 67 | struct LeafHasher { 68 | size_t operator()(const Hash& hash) const; 69 | }; 70 | 71 | /* 72 | * Node represents a node in the accumulator forest. 73 | * This is used to create an abstraction on top of a accumulator implementation, 74 | * because it might not use a pointer based tree datastructure but the verification and modification 75 | * algorithms are quite nicely expressed using one. 76 | */ 77 | class Node; 78 | 79 | // The number of leaves in the forest. 80 | uint64_t m_num_leaves; 81 | 82 | // The roots of the accumulator. 83 | std::vector> m_roots; 84 | 85 | // A map from leaf hashes to their positions. 86 | // This is needed for proving that leaves are included in the accumulator. 87 | // The forest will always have all the positions of all the leaves. The pollard 88 | // has the option of pruning leaves, thus will not always have all the positions 89 | // of all the leaves. 90 | std::unordered_map m_posmap; 91 | 92 | void UpdatePositionMapForRange(uint64_t from, uint64_t to, uint64_t range); 93 | void UpdatePositionMapForSubtreeSwap(uint64_t from, uint64_t to); 94 | 95 | /* Return the hash at a position */ 96 | virtual std::optional Read(uint64_t pos) const = 0; 97 | /* Return all hashes that are available in the interval [pos, pos+range[ */ 98 | virtual std::vector ReadLeafRange(uint64_t pos, uint64_t range) const = 0; 99 | 100 | /* 101 | * Swap two subtrees in the forest. 102 | * Return the nodes that need to be rehashed. 103 | */ 104 | virtual NodePtr SwapSubTrees(uint64_t from, uint64_t to) = 0; 105 | 106 | // MergeRoot and NewLeaf only have the desired effect if called correctly. 107 | // newLeaf should be called to allocate a new leaf. 108 | // After calling newLeaf, mergeRoot should be called for every consecutive least significant bit that is set to 1. 109 | 110 | /* Return the result of the latest merge. */ 111 | virtual NodePtr MergeRoot(uint64_t parent_pos, Hash parent_hash) = 0; 112 | /* Allocate a new leaf and assign it the given hash */ 113 | virtual NodePtr NewLeaf(const Leaf& leaf) = 0; 114 | 115 | /* Free memory or select new roots. */ 116 | virtual void FinalizeRemove(uint64_t next_num_leaves) = 0; 117 | 118 | /* Add new leaves to the accumulator. */ 119 | virtual bool Add(const std::vector& leaves); 120 | /* Remove target leaves from the accumulator. */ 121 | bool Remove(const std::vector& targets); 122 | 123 | /* Compute the parent hash from two children. */ 124 | static void ParentHash(Hash& parent, const Hash& left, const Hash& right); 125 | 126 | template 127 | static NodePtr MakeNodePtr(const Args&... args) 128 | { 129 | NodePtr node = std::make_shared(args...); 130 | if (!node) { 131 | throw std::runtime_error("Accumulator::MakeNodePtr failed to allocate node."); 132 | } 133 | 134 | return node; 135 | } 136 | }; 137 | 138 | }; // namespace utreexo 139 | #endif // UTREEXO_ACCUMULATOR_H 140 | -------------------------------------------------------------------------------- /src/compat/endian.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014-2018 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #ifndef BITCOIN_COMPAT_ENDIAN_H 6 | #define BITCOIN_COMPAT_ENDIAN_H 7 | 8 | #if defined(HAVE_CONFIG_H) 9 | #include 10 | #endif 11 | 12 | #include 13 | 14 | #include 15 | 16 | #if defined(HAVE_ENDIAN_H) 17 | #include 18 | #elif defined(HAVE_SYS_ENDIAN_H) 19 | #include 20 | #endif 21 | 22 | #ifndef HAVE_CONFIG_H 23 | // While not technically a supported configuration, defaulting to defining these 24 | // DECLs when we were compiled without autotools makes it easier for other build 25 | // systems to build things like libbitcoinconsensus for strange targets. 26 | #ifdef htobe16 27 | #define HAVE_DECL_HTOBE16 1 28 | #endif 29 | #ifdef htole16 30 | #define HAVE_DECL_HTOLE16 1 31 | #endif 32 | #ifdef be16toh 33 | #define HAVE_DECL_BE16TOH 1 34 | #endif 35 | #ifdef le16toh 36 | #define HAVE_DECL_LE16TOH 1 37 | #endif 38 | 39 | #ifdef htobe32 40 | #define HAVE_DECL_HTOBE32 1 41 | #endif 42 | #ifdef htole32 43 | #define HAVE_DECL_HTOLE32 1 44 | #endif 45 | #ifdef be32toh 46 | #define HAVE_DECL_BE32TOH 1 47 | #endif 48 | #ifdef le32toh 49 | #define HAVE_DECL_LE32TOH 1 50 | #endif 51 | 52 | #ifdef htobe64 53 | #define HAVE_DECL_HTOBE64 1 54 | #endif 55 | #ifdef htole64 56 | #define HAVE_DECL_HTOLE64 1 57 | #endif 58 | #ifdef be64toh 59 | #define HAVE_DECL_BE64TOH 1 60 | #endif 61 | #ifdef le64toh 62 | #define HAVE_DECL_LE64TOH 1 63 | #endif 64 | 65 | #endif // HAVE_CONFIG_H 66 | 67 | #if defined(WORDS_BIGENDIAN) 68 | 69 | #if HAVE_DECL_HTOBE16 == 0 70 | inline uint16_t htobe16(uint16_t host_16bits) 71 | { 72 | return host_16bits; 73 | } 74 | #endif // HAVE_DECL_HTOBE16 75 | 76 | #if HAVE_DECL_HTOLE16 == 0 77 | inline uint16_t htole16(uint16_t host_16bits) 78 | { 79 | return bswap_16(host_16bits); 80 | } 81 | #endif // HAVE_DECL_HTOLE16 82 | 83 | #if HAVE_DECL_BE16TOH == 0 84 | inline uint16_t be16toh(uint16_t big_endian_16bits) 85 | { 86 | return big_endian_16bits; 87 | } 88 | #endif // HAVE_DECL_BE16TOH 89 | 90 | #if HAVE_DECL_LE16TOH == 0 91 | inline uint16_t le16toh(uint16_t little_endian_16bits) 92 | { 93 | return bswap_16(little_endian_16bits); 94 | } 95 | #endif // HAVE_DECL_LE16TOH 96 | 97 | #if HAVE_DECL_HTOBE32 == 0 98 | inline uint32_t htobe32(uint32_t host_32bits) 99 | { 100 | return host_32bits; 101 | } 102 | #endif // HAVE_DECL_HTOBE32 103 | 104 | #if HAVE_DECL_HTOLE32 == 0 105 | inline uint32_t htole32(uint32_t host_32bits) 106 | { 107 | return bswap_32(host_32bits); 108 | } 109 | #endif // HAVE_DECL_HTOLE32 110 | 111 | #if HAVE_DECL_BE32TOH == 0 112 | inline uint32_t be32toh(uint32_t big_endian_32bits) 113 | { 114 | return big_endian_32bits; 115 | } 116 | #endif // HAVE_DECL_BE32TOH 117 | 118 | #if HAVE_DECL_LE32TOH == 0 119 | inline uint32_t le32toh(uint32_t little_endian_32bits) 120 | { 121 | return bswap_32(little_endian_32bits); 122 | } 123 | #endif // HAVE_DECL_LE32TOH 124 | 125 | #if HAVE_DECL_HTOBE64 == 0 126 | inline uint64_t htobe64(uint64_t host_64bits) 127 | { 128 | return host_64bits; 129 | } 130 | #endif // HAVE_DECL_HTOBE64 131 | 132 | #if HAVE_DECL_HTOLE64 == 0 133 | inline uint64_t htole64(uint64_t host_64bits) 134 | { 135 | return bswap_64(host_64bits); 136 | } 137 | #endif // HAVE_DECL_HTOLE64 138 | 139 | #if HAVE_DECL_BE64TOH == 0 140 | inline uint64_t be64toh(uint64_t big_endian_64bits) 141 | { 142 | return big_endian_64bits; 143 | } 144 | #endif // HAVE_DECL_BE64TOH 145 | 146 | #if HAVE_DECL_LE64TOH == 0 147 | inline uint64_t le64toh(uint64_t little_endian_64bits) 148 | { 149 | return bswap_64(little_endian_64bits); 150 | } 151 | #endif // HAVE_DECL_LE64TOH 152 | 153 | #else // WORDS_BIGENDIAN 154 | 155 | #if HAVE_DECL_HTOBE16 == 0 156 | inline uint16_t htobe16(uint16_t host_16bits) 157 | { 158 | return bswap_16(host_16bits); 159 | } 160 | #endif // HAVE_DECL_HTOBE16 161 | 162 | #if HAVE_DECL_HTOLE16 == 0 163 | inline uint16_t htole16(uint16_t host_16bits) 164 | { 165 | return host_16bits; 166 | } 167 | #endif // HAVE_DECL_HTOLE16 168 | 169 | #if HAVE_DECL_BE16TOH == 0 170 | inline uint16_t be16toh(uint16_t big_endian_16bits) 171 | { 172 | return bswap_16(big_endian_16bits); 173 | } 174 | #endif // HAVE_DECL_BE16TOH 175 | 176 | #if HAVE_DECL_LE16TOH == 0 177 | inline uint16_t le16toh(uint16_t little_endian_16bits) 178 | { 179 | return little_endian_16bits; 180 | } 181 | #endif // HAVE_DECL_LE16TOH 182 | 183 | #if HAVE_DECL_HTOBE32 == 0 184 | inline uint32_t htobe32(uint32_t host_32bits) 185 | { 186 | return bswap_32(host_32bits); 187 | } 188 | #endif // HAVE_DECL_HTOBE32 189 | 190 | #if HAVE_DECL_HTOLE32 == 0 191 | inline uint32_t htole32(uint32_t host_32bits) 192 | { 193 | return host_32bits; 194 | } 195 | #endif // HAVE_DECL_HTOLE32 196 | 197 | #if HAVE_DECL_BE32TOH == 0 198 | inline uint32_t be32toh(uint32_t big_endian_32bits) 199 | { 200 | return bswap_32(big_endian_32bits); 201 | } 202 | #endif // HAVE_DECL_BE32TOH 203 | 204 | #if HAVE_DECL_LE32TOH == 0 205 | inline uint32_t le32toh(uint32_t little_endian_32bits) 206 | { 207 | return little_endian_32bits; 208 | } 209 | #endif // HAVE_DECL_LE32TOH 210 | 211 | #if HAVE_DECL_HTOBE64 == 0 212 | inline uint64_t htobe64(uint64_t host_64bits) 213 | { 214 | return bswap_64(host_64bits); 215 | } 216 | #endif // HAVE_DECL_HTOBE64 217 | 218 | #if HAVE_DECL_HTOLE64 == 0 219 | inline uint64_t htole64(uint64_t host_64bits) 220 | { 221 | return host_64bits; 222 | } 223 | #endif // HAVE_DECL_HTOLE64 224 | 225 | #if HAVE_DECL_BE64TOH == 0 226 | inline uint64_t be64toh(uint64_t big_endian_64bits) 227 | { 228 | return bswap_64(big_endian_64bits); 229 | } 230 | #endif // HAVE_DECL_BE64TOH 231 | 232 | #if HAVE_DECL_LE64TOH == 0 233 | inline uint64_t le64toh(uint64_t little_endian_64bits) 234 | { 235 | return little_endian_64bits; 236 | } 237 | #endif // HAVE_DECL_LE64TOH 238 | 239 | #endif // WORDS_BIGENDIAN 240 | 241 | #endif // BITCOIN_COMPAT_ENDIAN_H 242 | -------------------------------------------------------------------------------- /src/state.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef UTREEXO_STATE_H 3 | #define UTREEXO_STATE_H 4 | 5 | #include 6 | #include 7 | 8 | namespace utreexo { 9 | 10 | /** 11 | * A wrapper around the number of leaves the accumulator forest 12 | * that provides utility functions to compute positions and swaps, 13 | * check for roots, etc. 14 | */ 15 | class ForestState 16 | { 17 | public: 18 | /** 19 | * Swap represents a swap between two nodes in the forest. 20 | */ 21 | class Swap 22 | { 23 | public: 24 | // Swap from <-> to 25 | uint64_t m_from, m_to; 26 | // Does this swap resemble a collapse. 27 | // See `makeCollapse` for collapse definition. 28 | bool m_collapse; 29 | 30 | bool m_is_range_swap; 31 | uint64_t m_range; 32 | 33 | explicit Swap(uint64_t from, uint64_t to) 34 | : m_from(from), m_to(to), m_collapse(false), m_is_range_swap(false), m_range(0) {} 35 | explicit Swap(uint64_t from, uint64_t to, bool collapse) 36 | : m_from(from), m_to(to), m_collapse(collapse), m_is_range_swap(false), m_range(0) {} 37 | explicit Swap(uint64_t from, uint64_t to, uint64_t range) 38 | : m_from(from), m_to(to), m_collapse(false), m_is_range_swap(true), m_range(range) {} 39 | 40 | Swap ToLeaves(ForestState state) const; 41 | }; 42 | 43 | // The number of leaves in the forest. 44 | // TODO: make this private. 45 | uint64_t m_num_leaves; 46 | 47 | ForestState() : m_num_leaves(0) {} 48 | ForestState(uint64_t n) : m_num_leaves(n) {} 49 | 50 | // Functions to compute positions: 51 | 52 | // Return the parent positon. 53 | // Same as ancestor(pos, 1) 54 | uint64_t Parent(uint64_t pos) const; 55 | uint64_t Ancestor(uint64_t pos, uint8_t rise) const; 56 | // Return the position of the left child. 57 | // Same as leftDescendant(pos, 1). 58 | uint64_t LeftChild(uint64_t pos) const; 59 | uint64_t Child(uint64_t pos, uint64_t placement) const; 60 | uint64_t LeftDescendant(uint64_t pos, uint8_t drop) const; 61 | // Return the position of the cousin. 62 | // Placement (left,right) remains. 63 | uint64_t Cousin(uint64_t pos) const; 64 | // Return the position of the right sibling. 65 | // A right position is its own right sibling. 66 | uint64_t RightSibling(uint64_t pos) const; 67 | // Return the position of the sibling. 68 | uint64_t Sibling(uint64_t pos) const; 69 | 70 | /** 71 | * Compute the path to the position. 72 | * Return the index of the tree the position is in, the distance from the node 73 | * to its root and the bitfield indicating the path. 74 | */ 75 | std::tuple Path(uint64_t pos) const; 76 | 77 | /** 78 | * Compute the proof positions needed to proof the existence of some targets. 79 | * Return the proof positions and the positions of the nodes that are computed 80 | * when verifying a proof. 81 | */ 82 | std::pair, std::vector> 83 | ProofPositions(const std::vector& targets) const; 84 | 85 | // Functions for root stuff: 86 | 87 | // Return the number of roots. 88 | uint8_t NumRoots() const; 89 | // Check if there is a root on a row. 90 | bool HasRoot(uint8_t row) const; 91 | // Return the root position on a row. 92 | uint64_t RootPosition(uint8_t row) const; 93 | uint64_t RootPosition(uint8_t row, uint64_t num_leaves) const; 94 | // Return the positions of the roots in the forest 95 | std::vector RootPositions() const; 96 | std::vector RootPositions(uint64_t num_leaves) const; 97 | 98 | uint8_t RootIndex(uint64_t pos) const; 99 | 100 | // Functions for rows: 101 | 102 | // Return the number of rows. 103 | uint8_t NumRows() const; 104 | // Return the row of the position. 105 | uint8_t DetectRow(uint64_t pos) const; 106 | // Return the position of the first node in the row. 107 | uint64_t RowOffset(uint64_t pos) const; 108 | uint64_t RowOffset(uint8_t row) const; 109 | 110 | /** 111 | * Compute the remove transformation swaps. 112 | * Return a vector of swaps for every row in the forest (from bottom to top). 113 | */ 114 | std::vector> 115 | Transform(const std::vector& targets) const; 116 | 117 | std::vector UndoTransform(const std::vector& targets) const; 118 | 119 | // Misc: 120 | 121 | // Return the maximum number of nodes in the forest. 122 | uint64_t MaxNodes() const; 123 | 124 | bool CheckTargetsSanity(const std::vector& targets) const; 125 | 126 | private: 127 | /* 128 | * Return all targets of this row and all targets of the next row. 129 | * (targets are the nodes that will be deleted) 130 | */ 131 | std::pair, std::vector> 132 | ComputeNextRowTargets(const std::vector& targets, 133 | bool deletion_remains, 134 | bool root_present) const; 135 | 136 | /* 137 | * 138 | */ 139 | std::vector MakeSwaps(const std::vector& targets, 140 | bool deletion_remains, 141 | bool root_present, 142 | uint64_t rootPos) const; 143 | 144 | /* 145 | * 146 | */ 147 | ForestState::Swap MakeCollapse(const std::vector& targets, 148 | bool deletion_remains, 149 | bool root_present, 150 | uint8_t row, 151 | uint64_t next_num_leaves) const; 152 | 153 | /* 154 | * 155 | */ 156 | void ConvertCollapses(std::vector>& swaps, 157 | std::vector& collapses) const; 158 | 159 | void SwapInRow(ForestState::Swap swap, 160 | std::vector& collapses, 161 | uint8_t swapRow) const; 162 | 163 | void SwapIfDescendant(ForestState::Swap swap, 164 | ForestState::Swap& collapse, 165 | uint8_t swapRow, 166 | uint8_t collapse_row) const; 167 | }; 168 | 169 | // TODO: remove these 170 | void print_vector(const std::vector& vec); 171 | void print_swaps(const std::vector& vec); 172 | 173 | }; // namespace utreexo 174 | #endif // UTREEXO_STATE_H 175 | -------------------------------------------------------------------------------- /configure.ac: -------------------------------------------------------------------------------- 1 | AC_INIT([utreexo], [0.0.1], [http://github.com/dergoegge/libutreexo]) 2 | 3 | m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) 4 | 5 | AC_PREREQ(2.60) 6 | AC_CONFIG_SRCDIR([src/accumulator.cpp]) 7 | AC_CONFIG_AUX_DIR([build-aux]) 8 | AC_CONFIG_MACRO_DIR([build-aux/m4]) 9 | AM_INIT_AUTOMAKE([subdir-objects foreign]) 10 | 11 | LT_INIT 12 | LT_LANG([C++]) 13 | AC_LANG([C++]) 14 | 15 | AC_PATH_PROG(CCACHE,ccache) 16 | 17 | AC_ARG_ENABLE([ccache], 18 | [AS_HELP_STRING([--disable-ccache], 19 | [do not use ccache for building (default is to use if found)])], 20 | [use_ccache=$enableval], 21 | [use_ccache=auto]) 22 | 23 | AC_ARG_ENABLE(tests, 24 | AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), 25 | [use_tests=$enableval], 26 | [use_tests=yes]) 27 | 28 | AC_ARG_ENABLE([debug], 29 | [AS_HELP_STRING([--enable-debug], 30 | [use compiler flags and macros suited for debugging (default is no)])], 31 | [enable_debug=$enableval], 32 | [enable_debug=no]) 33 | 34 | AC_ARG_ENABLE(bench, 35 | AS_HELP_STRING([--disable-bench],[do not compile benchmarks (default is to compile)]), 36 | [use_bench=$enableval], 37 | [use_bench=yes]) 38 | 39 | AC_ARG_ENABLE([fuzz], 40 | AS_HELP_STRING([--enable-fuzz], 41 | [build for fuzzing (default no). enabling this will disable all other targets.]), 42 | [enable_fuzz=$enableval], 43 | [enable_fuzz=no]) 44 | 45 | AC_ARG_ENABLE([fuzz-cov], 46 | AS_HELP_STRING([--enable-fuzz-cov], 47 | [enable fuzz coverage reporting.]), 48 | [enable_fuzz_cov=$enableval], 49 | [enable_fuzz_cov=no]) 50 | 51 | AX_CHECK_COMPILE_FLAG([-Werror],[CXXFLAG_WERROR="-Werror"],[CXXFLAG_WERROR=""]) 52 | 53 | AX_CXX_COMPILE_STDCXX([17], [noext], [mandatory], [nodefault]) 54 | 55 | AX_CHECK_COMPILE_FLAG([-Wall],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wall"],,[[$CXXFLAG_WERROR]]) 56 | ## Some compilers (gcc) ignore unknown -Wno-* options, but warn about all 57 | ## unknown options if any other warning is produced. Test the -Wfoo case, and 58 | ## set the -Wno-foo case if it works. 59 | AX_CHECK_COMPILE_FLAG([-Wshift-count-overflow],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-shift-count-overflow"],,[[$CXXFLAG_WERROR]]) 60 | 61 | dnl SUPPRESSED_CPPFLAGS=SUPPRESS_WARNINGS([$SOME_CPPFLAGS]) 62 | dnl Replace -I with -isystem in $SOME_CPPFLAGS to suppress warnings from 63 | dnl headers from its include directories and return the result. 64 | dnl See -isystem documentation: 65 | dnl https://gcc.gnu.org/onlinedocs/gcc/Directory-Options.html 66 | dnl https://clang.llvm.org/docs/ClangCommandLineReference.html#cmdoption-clang-isystem-directory 67 | dnl Do not change "-I/usr/include" to "-isystem /usr/include" because that 68 | dnl is not necessary (/usr/include is already a system directory) and because 69 | dnl it would break GCC's #include_next. 70 | AC_DEFUN([SUPPRESS_WARNINGS], 71 | [$(echo $1 |${SED} -E -e 's/(^| )-I/\1-isystem /g' -e 's;-isystem /usr/include([/ ]|$);-I/usr/include\1;g')]) 72 | 73 | if test x$enable_fuzz = xyes; then 74 | use_tests=no 75 | use_bench=no 76 | ## TODO make these configurable 77 | SANITIZER_LDFLAGS="-fsanitize=fuzzer,address,undefined" 78 | SANITIZER_CXXFLAGS="-fsanitize=fuzzer,address,undefined" 79 | fi 80 | 81 | if test x$enable_fuzz_cov = xyes; then 82 | use_tests=no 83 | use_bench=no 84 | CXXFLAGS="$CXXFLAGS -fprofile-instr-generate -fcoverage-mapping" 85 | fi 86 | 87 | if test "x$enable_debug" = xyes; then 88 | dnl Clear default -g -O2 flags 89 | ## TODO: dont reset CXXFLAGS when they are already set. 90 | CXXFLAGS="" 91 | 92 | dnl Disable all optimizations 93 | AX_CHECK_COMPILE_FLAG([-O0], [[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -O0"]],,[[$CXXFLAG_WERROR]]) 94 | 95 | dnl Prefer -g3, fall back to -g if that is unavailable. 96 | AX_CHECK_COMPILE_FLAG( 97 | [-g3], 98 | [[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g3"]], 99 | [AX_CHECK_COMPILE_FLAG([-g],[[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -g"]],,[[$CXXFLAG_WERROR]])], 100 | [[$CXXFLAG_WERROR]]) 101 | 102 | AX_CHECK_PREPROC_FLAG([-DDEBUG],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG"]],,[[$CXXFLAG_WERROR]]) 103 | AX_CHECK_PREPROC_FLAG([-DDEBUG_LOCKORDER],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DDEBUG_LOCKORDER"]],,[[$CXXFLAG_WERROR]]) 104 | AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"]],,[[$CXXFLAG_WERROR]]) 105 | AX_CHECK_COMPILE_FLAG([-ftrapv],[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -ftrapv"],,[[$CXXFLAG_WERROR]]) 106 | fi 107 | 108 | ## Check for boost test framework if test are enabled. 109 | if test x$use_tests = xyes; then 110 | 111 | dnl Minimum required Boost version 112 | define(MINIMUM_REQUIRED_BOOST, 1.58.0) 113 | 114 | dnl Check for Boost libs 115 | AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) 116 | 117 | if test x$suppress_external_warnings != xno; then 118 | BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) 119 | fi 120 | 121 | AX_BOOST_UNIT_TEST_FRAMEWORK 122 | 123 | dnl Determine if -DBOOST_TEST_DYN_LINK is needed 124 | AC_MSG_CHECKING([for dynamic linked boost test]) 125 | TEMP_LIBS="$LIBS" 126 | LIBS="$LIBS $BOOST_LDFLAGS $BOOST_UNIT_TEST_FRAMEWORK_LIB" 127 | TEMP_CPPFLAGS="$CPPFLAGS" 128 | CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" 129 | AC_LINK_IFELSE([AC_LANG_SOURCE([ 130 | #define BOOST_TEST_DYN_LINK 131 | #define BOOST_TEST_MAIN 132 | #include 133 | ])], 134 | [AC_MSG_RESULT(yes)] 135 | [TEST_DEFINES="$TEST_DEFINES -DBOOST_TEST_DYN_LINK"], 136 | [AC_MSG_RESULT(no)]) 137 | LIBS="$TEMP_LIBS" 138 | CPPFLAGS="$TEMP_CPPFLAGS" 139 | 140 | fi 141 | 142 | if test "x$use_ccache" != "xno"; then 143 | AC_MSG_CHECKING(if ccache should be used) 144 | if test x$CCACHE = x; then 145 | if test "x$use_ccache" = "xyes"; then 146 | AC_MSG_ERROR([ccache not found.]); 147 | else 148 | use_ccache=no 149 | fi 150 | else 151 | use_ccache=yes 152 | CC="$ac_cv_path_CCACHE $CC" 153 | CXX="$ac_cv_path_CCACHE $CXX" 154 | fi 155 | AC_MSG_RESULT($use_ccache) 156 | fi 157 | if test "x$use_ccache" = "xyes"; then 158 | AX_CHECK_COMPILE_FLAG([-Qunused-arguments],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Qunused-arguments"],,[[$CXXFLAG_WERROR]]) 159 | fi 160 | 161 | VERIFY_DEFINES=-DUTREEXO_VERIFY 162 | RELEASE_DEFINES= 163 | 164 | AC_CONFIG_FILES([ 165 | Makefile 166 | ]) 167 | 168 | AC_SUBST(DEBUG_CPPFLAGS) 169 | AC_SUBST(DEBUG_CXXFLAGS) 170 | AC_SUBST(WARN_CXXFLAGS) 171 | AC_SUBST(NOWARN_CXXFLAGS) 172 | AC_SUBST(VERIFY_DEFINES) 173 | AC_SUBST(RELEASE_DEFINES) 174 | AC_SUBST(SANITIZER_LDFLAGS) 175 | AC_SUBST(SANITIZER_CXXFLAGS) 176 | AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) 177 | AM_CONDITIONAL([ENABLE_BENCH], [test "$use_bench" = "yes"]) 178 | AM_CONDITIONAL([ENABLE_FUZZ], [test x"$enable_fuzz" != x"no"]) 179 | AC_OUTPUT 180 | 181 | -------------------------------------------------------------------------------- /src/batchproof.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/batchproof.h" 2 | #include "../include/accumulator.h" 3 | #include "crypto/common.h" 4 | #include "state.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace utreexo { 11 | 12 | // https://github.com/bitcoin/bitcoin/blob/7f653c3b22f0a5267822eec017aea6a16752c597/src/util/strencodings.cpp#L580 13 | template 14 | std::string HexStr(const T s) 15 | { 16 | std::string rv; 17 | static constexpr char hexmap[16] = {'0', '1', '2', '3', '4', '5', '6', '7', 18 | '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; 19 | rv.reserve(s.size() * 2); 20 | for (uint8_t v : s) { 21 | rv.push_back(hexmap[v >> 4]); 22 | rv.push_back(hexmap[v & 15]); 23 | } 24 | return rv; 25 | } 26 | 27 | void BatchProof::Serialize(std::vector& bytes) const 28 | { 29 | // Number of targets: 4 bytes 30 | // Number of proof hashes: 4 bytes 31 | // Targets: 4 bytes each 32 | // Proof hashes: 32 bytes each 33 | bytes.resize(4 + 4 + m_targets.size() * 4 + m_proof.size() * 32); 34 | 35 | int data_offset = 0; 36 | WriteBE32(bytes.data(), uint32_t(m_targets.size())); 37 | data_offset += 4; 38 | WriteBE32(bytes.data() + data_offset, uint32_t(m_proof.size())); 39 | data_offset += 4; 40 | 41 | for (const uint64_t target : m_targets) { 42 | WriteBE32(bytes.data() + data_offset, uint32_t(target)); 43 | data_offset += 4; 44 | } 45 | 46 | for (const Hash& hash : m_proof) { 47 | std::memcpy(bytes.data() + data_offset, hash.data(), 32); 48 | data_offset += 32; 49 | } 50 | } 51 | 52 | bool BatchProof::Unserialize(const std::vector& bytes) 53 | { 54 | if (bytes.size() < 8) { 55 | // 8 byte minimum for the number of targets and proof hashses 56 | return false; 57 | } 58 | 59 | int data_offset = 0; 60 | uint32_t num_targets = ReadBE32(bytes.data()); 61 | data_offset += 4; 62 | uint32_t num_hashes = ReadBE32(bytes.data() + data_offset); 63 | data_offset += 4; 64 | 65 | if (bytes.size() != 8ULL + num_targets * 4ULL + num_hashes * 32ULL) { 66 | return false; 67 | } 68 | 69 | m_targets.clear(); 70 | m_proof.clear(); 71 | m_targets.reserve(num_targets); 72 | m_sorted_targets.reserve(num_targets); 73 | m_proof.reserve(num_hashes); 74 | 75 | for (uint32_t i = 0; i < num_targets; ++i) { 76 | m_targets.push_back(uint64_t(ReadBE32(bytes.data() + data_offset))); 77 | m_sorted_targets.push_back(uint64_t(ReadBE32(bytes.data() + data_offset))); 78 | data_offset += 4; 79 | } 80 | 81 | std::sort(m_sorted_targets.begin(), m_sorted_targets.end()); 82 | 83 | for (uint32_t i = 0; i < num_hashes; ++i) { 84 | Hash hash; 85 | std::memcpy(hash.data(), bytes.data() + data_offset, 32); 86 | data_offset += 32; 87 | m_proof.push_back(hash); 88 | } 89 | 90 | assert(data_offset == bytes.size()); 91 | 92 | return true; 93 | } 94 | 95 | bool BatchProof::CheckSanity(uint64_t num_leaves) const 96 | { 97 | ForestState state(num_leaves); 98 | 99 | if (!state.CheckTargetsSanity(m_sorted_targets)) { 100 | return false; 101 | } 102 | 103 | std::vector proof_positions, tmp; 104 | std::tie(proof_positions, tmp) = state.ProofPositions(m_sorted_targets); 105 | return proof_positions.size() >= m_proof.size(); 106 | } 107 | 108 | bool BatchProof::operator==(const BatchProof& other) 109 | { 110 | return m_targets.size() == other.m_targets.size() && m_proof.size() == other.m_proof.size() && 111 | m_targets == other.m_targets && m_proof == other.m_proof; 112 | } 113 | 114 | void BatchProof::Print() 115 | { 116 | std::cout << "targets: "; 117 | print_vector(m_targets); 118 | 119 | std::cout << "proof: "; 120 | for (const Hash& hash : m_proof) { 121 | std::cout << HexStr(hash) << ", "; 122 | } 123 | 124 | std::cout << std::endl; 125 | } 126 | 127 | const std::vector& BatchProof::GetTargets() const { return m_targets; } 128 | 129 | const std::vector& BatchProof::GetSortedTargets() const { return m_sorted_targets; } 130 | 131 | const std::vector>& BatchProof::GetHashes() const { return m_proof; } 132 | 133 | void UndoBatch::Serialize(std::vector& bytes) const 134 | { 135 | // num adds: 4 bytes 136 | // numm dels: 4 bytes 137 | // del positions: 4bytes * num dels 138 | // del hashes: 32bytes * num dels 139 | bytes.resize(4 + 4 + 4 * m_deleted_positions.size() + 32 * m_deleted_hashes.size()); 140 | 141 | int data_offset = 0; 142 | WriteBE32(bytes.data(), uint32_t(m_num_additions)); 143 | data_offset += 4; 144 | WriteBE32(bytes.data() + data_offset, uint32_t(m_deleted_positions.size())); 145 | data_offset += 4; 146 | 147 | for (const uint64_t& target : m_deleted_positions) { 148 | WriteBE32(bytes.data() + data_offset, uint32_t(target)); 149 | data_offset += 4; 150 | } 151 | 152 | for (const Hash& hash : m_deleted_hashes) { 153 | std::memcpy(bytes.data() + data_offset, hash.data(), 32); 154 | data_offset += 32; 155 | } 156 | } 157 | 158 | bool UndoBatch::Unserialize(const std::vector& bytes) 159 | { 160 | if (bytes.size() < 8) { 161 | // 8 byte minmum for the number of additions and number of targets 162 | return false; 163 | } 164 | 165 | int data_offset = 0; 166 | m_num_additions = static_cast(ReadBE32(bytes.data())); 167 | data_offset += 4; 168 | uint32_t num_targets = ReadBE32(bytes.data() + data_offset); 169 | data_offset += 4; 170 | 171 | if (bytes.size() != 4 + 4 + 4 * num_targets + 32 * num_targets) { 172 | return false; 173 | } 174 | 175 | m_deleted_positions.clear(); 176 | m_deleted_hashes.clear(); 177 | m_deleted_positions.reserve(num_targets); 178 | m_deleted_hashes.reserve(num_targets); 179 | 180 | for (uint32_t i = 0; i < num_targets; ++i) { 181 | m_deleted_positions.push_back(static_cast(ReadBE32(bytes.data() + data_offset))); 182 | data_offset += 4; 183 | } 184 | 185 | for (uint32_t i = 0; i < num_targets; ++i) { 186 | Hash hash; 187 | std::memcpy(hash.data(), bytes.data() + data_offset, 32); 188 | data_offset += 32; 189 | m_deleted_hashes.push_back(hash); 190 | } 191 | 192 | assert(data_offset == bytes.size()); 193 | 194 | return true; 195 | } 196 | 197 | uint64_t UndoBatch::GetNumAdds() const { return m_num_additions; } 198 | 199 | const std::vector& UndoBatch::GetDeletedPositions() const { return m_deleted_positions; } 200 | 201 | const std::vector>& UndoBatch::GetDeletedHashes() const { return m_deleted_hashes; } 202 | 203 | bool UndoBatch::operator==(const UndoBatch& other) 204 | { 205 | return m_num_additions == other.m_num_additions && 206 | m_deleted_positions.size() == other.m_deleted_positions.size() && 207 | m_deleted_hashes.size() == other.m_deleted_hashes.size() && 208 | m_deleted_positions == other.m_deleted_positions && 209 | m_deleted_hashes == other.m_deleted_hashes; 210 | } 211 | 212 | void UndoBatch::Print() 213 | { 214 | std::cout << "prev num adds: " << m_num_additions << std::endl; 215 | std::cout << "deleted positions: "; 216 | print_vector(m_deleted_positions); 217 | 218 | std::cout << "deleted hashes: "; 219 | for (const Hash& hash : m_deleted_hashes) { 220 | std::cout << HexStr(hash) << ", "; 221 | } 222 | 223 | std::cout << std::endl; 224 | } 225 | 226 | }; // namespace utreexo 227 | -------------------------------------------------------------------------------- /src/accumulator.cpp: -------------------------------------------------------------------------------- 1 | #include "include/accumulator.h" 2 | #include "check.h" 3 | #include "crypto/common.h" 4 | #include "crypto/sha512.h" 5 | #include "include/batchproof.h" 6 | #include "node.h" 7 | #include "state.h" 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | namespace utreexo { 15 | 16 | Accumulator::Accumulator(uint64_t num_leaves) 17 | { 18 | m_num_leaves = num_leaves; 19 | m_roots.reserve(64); 20 | } 21 | 22 | Accumulator::~Accumulator() {} 23 | 24 | bool Accumulator::Modify(const std::vector& leaves, const std::vector& targets) 25 | { 26 | if (!Remove(targets)) return false; 27 | if (!Add(leaves)) return false; 28 | 29 | return true; 30 | } 31 | 32 | void Accumulator::Roots(std::vector& roots) const 33 | { 34 | roots.clear(); 35 | roots.reserve(m_roots.size()); 36 | 37 | for (auto root : m_roots) { 38 | roots.push_back(root->GetHash()); 39 | } 40 | } 41 | 42 | uint64_t Accumulator::NumLeaves() const 43 | { 44 | return m_num_leaves; 45 | } 46 | 47 | // https://github.com/bitcoin/bitcoin/blob/7f653c3b22f0a5267822eec017aea6a16752c597/src/util/strencodings.cpp#L580 48 | template 49 | std::string HexStr(const T s) 50 | { 51 | std::string rv; 52 | static constexpr char hexmap[16] = {'0', '1', '2', '3', '4', '5', '6', '7', 53 | '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; 54 | rv.reserve(s.size() * 2); 55 | for (uint8_t v : s) { 56 | rv.push_back(hexmap[v >> 4]); 57 | rv.push_back(hexmap[v & 15]); 58 | } 59 | return rv; 60 | } 61 | 62 | void Accumulator::ParentHash(Hash& parent, const Hash& left, const Hash& right) 63 | { 64 | CSHA512 hasher(CSHA512::OUTPUT_SIZE_256); 65 | hasher.Write(left.data(), 32); 66 | hasher.Write(right.data(), 32); 67 | hasher.Finalize256(parent.data()); 68 | } 69 | 70 | bool Accumulator::ComparePositionMap(Accumulator& other) const 71 | { 72 | for (auto [hash, pos] : m_posmap) { 73 | auto it_other = other.m_posmap.find(hash); 74 | if (it_other->second != pos) { 75 | return false; 76 | } 77 | } 78 | 79 | return true; 80 | } 81 | 82 | void Accumulator::PrintPositionMap() const 83 | { 84 | std::cout << "pos map:" << std::endl; 85 | for (auto [hash, pos] : m_posmap) { 86 | std::cout << HexStr(hash) << " -> " << pos << std::endl; 87 | } 88 | } 89 | 90 | void Accumulator::PrintRoots() const 91 | { 92 | for (auto root : m_roots) { 93 | std::cout << "root: " << root->m_position << ":" << HexStr(root->GetHash()) << std::endl; 94 | } 95 | } 96 | 97 | void Accumulator::UpdatePositionMapForRange(uint64_t from, uint64_t to, uint64_t range) 98 | { 99 | if (m_posmap.size() == 0) { 100 | // Nothing to update. 101 | return; 102 | } 103 | 104 | std::vector from_range = ReadLeafRange(from, range); 105 | std::vector to_range = ReadLeafRange(to, range); 106 | 107 | int64_t offset = static_cast(to) - static_cast(from); 108 | for (const Hash& hash : from_range) { 109 | auto pos_it = m_posmap.find(hash); 110 | if (m_posmap.find(hash) != m_posmap.end()) { 111 | m_posmap[hash] = static_cast(pos_it->second + offset); 112 | } 113 | } 114 | 115 | for (const Hash& hash : to_range) { 116 | auto pos_it = m_posmap.find(hash); 117 | if (m_posmap.find(hash) != m_posmap.end()) { 118 | m_posmap[hash] = static_cast(pos_it->second - offset); 119 | } 120 | } 121 | } 122 | 123 | void Accumulator::UpdatePositionMapForSubtreeSwap(uint64_t from, uint64_t to) 124 | { 125 | ForestState current_state = ForestState(m_num_leaves); 126 | uint8_t row = current_state.DetectRow(from); 127 | 128 | uint64_t start_from = current_state.LeftDescendant(from, row); 129 | uint64_t start_to = current_state.LeftDescendant(to, row); 130 | uint64_t range = 1ULL << row; 131 | 132 | UpdatePositionMapForRange(start_from, start_to, range); 133 | } 134 | 135 | bool Accumulator::Add(const std::vector& leaves) 136 | { 137 | CHECK_SAFE([](const std::unordered_map& posmap, 138 | const std::vector& leaves) { 139 | // Each leaf should be unique, that means we can't add a leaf that 140 | // already exits in the position map. 141 | for (const Leaf& leaf : leaves) { 142 | if (posmap.find(leaf.first) != posmap.end()) return false; 143 | } 144 | return true; 145 | }(m_posmap, leaves)); 146 | 147 | ForestState current_state(m_num_leaves); 148 | // TODO Adding leaves can be batched. Do implement this later. 149 | for (auto leaf = leaves.begin(); leaf < leaves.end(); ++leaf) { 150 | int root = m_roots.size() - 1; 151 | // Create a new leaf and append it to the end of roots. 152 | NodePtr new_root = this->NewLeaf(*leaf); 153 | 154 | // Merge the last two roots into one for every consecutive root from row 0 upwards. 155 | for (uint8_t row = 0; current_state.HasRoot(row); ++row) { 156 | Hash parent_hash; 157 | Accumulator::ParentHash(parent_hash, m_roots[root]->GetHash(), new_root->GetHash()); 158 | new_root = MergeRoot(current_state.Parent(new_root->m_position), parent_hash); 159 | // Decreasing because we are going in reverse order. 160 | --root; 161 | } 162 | 163 | uint8_t prev_rows = current_state.NumRows(); 164 | 165 | ++m_num_leaves; 166 | current_state = ForestState(m_num_leaves); 167 | 168 | // Update the root positions. 169 | // This only needs to happen if the number of rows in the forest changes. 170 | // In this case there will always be exactly two roots, one on row 0 and one 171 | // on the next-to-last row. 172 | 173 | if (prev_rows == 0 || prev_rows == current_state.NumRows()) { 174 | continue; 175 | } 176 | 177 | assert(m_roots.size() >= 2); 178 | m_roots[1]->m_position = current_state.RootPosition(0); 179 | m_roots[0]->m_position = current_state.RootPosition(current_state.NumRows() - 1); 180 | } 181 | 182 | return true; 183 | } 184 | 185 | bool Accumulator::Remove(const std::vector& targets) 186 | { 187 | if (targets.size() == 0) { 188 | return true; 189 | } 190 | 191 | ForestState current_state(m_num_leaves); 192 | 193 | // Perform sanity checks on targets. (e.g.: sorted no duplicates) 194 | if (!current_state.CheckTargetsSanity(targets)) { 195 | return false; 196 | } 197 | 198 | std::vector> swaps = current_state.Transform(targets); 199 | // Store the nodes that have to be rehashed because their children changed. 200 | // These nodes are "dirty". 201 | std::vector> dirty_nodes; 202 | 203 | for (uint8_t row = 0; row < current_state.NumRows(); ++row) { 204 | std::vector> next_dirty_nodes; 205 | 206 | if (row < swaps.size()) { 207 | // Execute all the swaps in this row. 208 | for (const ForestState::Swap swap : swaps.at(row)) { 209 | UpdatePositionMapForSubtreeSwap(swap.m_from, swap.m_to); 210 | NodePtr swap_dirt = SwapSubTrees(swap.m_from, swap.m_to); 211 | if (!swap.m_collapse) dirty_nodes.push_back(swap_dirt); 212 | } 213 | } 214 | 215 | // Rehash all the dirt after swapping. 216 | for (NodePtr dirt : dirty_nodes) { 217 | dirt->ReHash(); 218 | if (next_dirty_nodes.size() == 0 || next_dirty_nodes.back()->m_position != current_state.Parent(dirt->m_position)) { 219 | NodePtr parent = dirt->Parent(); 220 | if (parent) next_dirty_nodes.push_back(parent); 221 | } 222 | } 223 | 224 | dirty_nodes = next_dirty_nodes; 225 | } 226 | 227 | assert(dirty_nodes.size() == 0); 228 | 229 | uint64_t next_num_leaves = m_num_leaves - targets.size(); 230 | FinalizeRemove(next_num_leaves); 231 | m_num_leaves = next_num_leaves; 232 | 233 | return true; 234 | } 235 | 236 | bool Accumulator::Prove(BatchProof& proof, const std::vector& target_hashes) const 237 | { 238 | // Figure out the positions of the target hashes via the position map. 239 | std::vector targets; 240 | targets.reserve(target_hashes.size()); 241 | for (const Hash& hash : target_hashes) { 242 | auto posmap_it = m_posmap.find(hash); 243 | if (posmap_it == m_posmap.end()) { 244 | // TODO: error 245 | return false; 246 | } 247 | targets.push_back(posmap_it->second); 248 | } 249 | 250 | // We need the sorted targets to compute the proof positions. 251 | std::vector sorted_targets(targets); 252 | std::sort(sorted_targets.begin(), sorted_targets.end()); 253 | 254 | if (!ForestState(m_num_leaves).CheckTargetsSanity(sorted_targets)) { 255 | return false; 256 | } 257 | 258 | // Read proof hashes from the forest using the proof positions 259 | auto proof_positions = ForestState(m_num_leaves).ProofPositions(sorted_targets); 260 | std::vector proof_hashes(proof_positions.first.size()); 261 | for (int i = 0; i < proof_hashes.size(); i++) { 262 | std::optional hash = Read(proof_positions.first[i]); 263 | if (!hash) return false; 264 | 265 | proof_hashes[i] = hash.value(); 266 | } 267 | 268 | // Create the batch proof from the *unsorted* targets and the proof hashes. 269 | proof = BatchProof(targets, proof_hashes); 270 | return true; 271 | } 272 | 273 | }; // namespace utreexo 274 | -------------------------------------------------------------------------------- /src/crypto/sha512.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014-2019 The Bitcoin Core developers 2 | // Distributed under the MIT software license, see the accompanying 3 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include 10 | 11 | // Internal implementation code. 12 | namespace { 13 | /// Internal SHA-512 implementation. 14 | namespace sha512 { 15 | uint64_t inline Ch(uint64_t x, uint64_t y, uint64_t z) { return z ^ (x & (y ^ z)); } 16 | uint64_t inline Maj(uint64_t x, uint64_t y, uint64_t z) { return (x & y) | (z & (x | y)); } 17 | uint64_t inline Sigma0(uint64_t x) { return (x >> 28 | x << 36) ^ (x >> 34 | x << 30) ^ (x >> 39 | x << 25); } 18 | uint64_t inline Sigma1(uint64_t x) { return (x >> 14 | x << 50) ^ (x >> 18 | x << 46) ^ (x >> 41 | x << 23); } 19 | uint64_t inline sigma0(uint64_t x) { return (x >> 1 | x << 63) ^ (x >> 8 | x << 56) ^ (x >> 7); } 20 | uint64_t inline sigma1(uint64_t x) { return (x >> 19 | x << 45) ^ (x >> 61 | x << 3) ^ (x >> 6); } 21 | 22 | /** One round of SHA-512. */ 23 | void inline Round(uint64_t a, uint64_t b, uint64_t c, uint64_t& d, uint64_t e, uint64_t f, uint64_t g, uint64_t& h, uint64_t k, uint64_t w) 24 | { 25 | uint64_t t1 = h + Sigma1(e) + Ch(e, f, g) + k + w; 26 | uint64_t t2 = Sigma0(a) + Maj(a, b, c); 27 | d += t1; 28 | h = t1 + t2; 29 | } 30 | 31 | /** Initialize SHA-512 state. */ 32 | void inline Initialize(uint64_t* s) 33 | { 34 | s[0] = 0x6a09e667f3bcc908ull; 35 | s[1] = 0xbb67ae8584caa73bull; 36 | s[2] = 0x3c6ef372fe94f82bull; 37 | s[3] = 0xa54ff53a5f1d36f1ull; 38 | s[4] = 0x510e527fade682d1ull; 39 | s[5] = 0x9b05688c2b3e6c1full; 40 | s[6] = 0x1f83d9abfb41bd6bull; 41 | s[7] = 0x5be0cd19137e2179ull; 42 | } 43 | 44 | /** Initialize SHA-512/256 state. */ 45 | void inline Initialize256(uint64_t* s) 46 | { 47 | s[0] = 0x22312194fc2bf72cull; 48 | s[1] = 0x9f555fa3c84c64c2ull; 49 | s[2] = 0x2393b86b6f53b151ull; 50 | s[3] = 0x963877195940eabdull; 51 | s[4] = 0x96283ee2a88effe3ull; 52 | s[5] = 0xbe5e1e2553863992ull; 53 | s[6] = 0x2b0199fc2c85b8aaull; 54 | s[7] = 0x0eb72ddc81c52ca2ull; 55 | } 56 | 57 | /** Perform one SHA-512 transformation, processing a 128-byte chunk. */ 58 | void Transform(uint64_t* s, const unsigned char* chunk) 59 | { 60 | uint64_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; 61 | uint64_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; 62 | 63 | Round(a, b, c, d, e, f, g, h, 0x428a2f98d728ae22ull, w0 = ReadBE64(chunk + 0)); 64 | Round(h, a, b, c, d, e, f, g, 0x7137449123ef65cdull, w1 = ReadBE64(chunk + 8)); 65 | Round(g, h, a, b, c, d, e, f, 0xb5c0fbcfec4d3b2full, w2 = ReadBE64(chunk + 16)); 66 | Round(f, g, h, a, b, c, d, e, 0xe9b5dba58189dbbcull, w3 = ReadBE64(chunk + 24)); 67 | Round(e, f, g, h, a, b, c, d, 0x3956c25bf348b538ull, w4 = ReadBE64(chunk + 32)); 68 | Round(d, e, f, g, h, a, b, c, 0x59f111f1b605d019ull, w5 = ReadBE64(chunk + 40)); 69 | Round(c, d, e, f, g, h, a, b, 0x923f82a4af194f9bull, w6 = ReadBE64(chunk + 48)); 70 | Round(b, c, d, e, f, g, h, a, 0xab1c5ed5da6d8118ull, w7 = ReadBE64(chunk + 56)); 71 | Round(a, b, c, d, e, f, g, h, 0xd807aa98a3030242ull, w8 = ReadBE64(chunk + 64)); 72 | Round(h, a, b, c, d, e, f, g, 0x12835b0145706fbeull, w9 = ReadBE64(chunk + 72)); 73 | Round(g, h, a, b, c, d, e, f, 0x243185be4ee4b28cull, w10 = ReadBE64(chunk + 80)); 74 | Round(f, g, h, a, b, c, d, e, 0x550c7dc3d5ffb4e2ull, w11 = ReadBE64(chunk + 88)); 75 | Round(e, f, g, h, a, b, c, d, 0x72be5d74f27b896full, w12 = ReadBE64(chunk + 96)); 76 | Round(d, e, f, g, h, a, b, c, 0x80deb1fe3b1696b1ull, w13 = ReadBE64(chunk + 104)); 77 | Round(c, d, e, f, g, h, a, b, 0x9bdc06a725c71235ull, w14 = ReadBE64(chunk + 112)); 78 | Round(b, c, d, e, f, g, h, a, 0xc19bf174cf692694ull, w15 = ReadBE64(chunk + 120)); 79 | 80 | Round(a, b, c, d, e, f, g, h, 0xe49b69c19ef14ad2ull, w0 += sigma1(w14) + w9 + sigma0(w1)); 81 | Round(h, a, b, c, d, e, f, g, 0xefbe4786384f25e3ull, w1 += sigma1(w15) + w10 + sigma0(w2)); 82 | Round(g, h, a, b, c, d, e, f, 0x0fc19dc68b8cd5b5ull, w2 += sigma1(w0) + w11 + sigma0(w3)); 83 | Round(f, g, h, a, b, c, d, e, 0x240ca1cc77ac9c65ull, w3 += sigma1(w1) + w12 + sigma0(w4)); 84 | Round(e, f, g, h, a, b, c, d, 0x2de92c6f592b0275ull, w4 += sigma1(w2) + w13 + sigma0(w5)); 85 | Round(d, e, f, g, h, a, b, c, 0x4a7484aa6ea6e483ull, w5 += sigma1(w3) + w14 + sigma0(w6)); 86 | Round(c, d, e, f, g, h, a, b, 0x5cb0a9dcbd41fbd4ull, w6 += sigma1(w4) + w15 + sigma0(w7)); 87 | Round(b, c, d, e, f, g, h, a, 0x76f988da831153b5ull, w7 += sigma1(w5) + w0 + sigma0(w8)); 88 | Round(a, b, c, d, e, f, g, h, 0x983e5152ee66dfabull, w8 += sigma1(w6) + w1 + sigma0(w9)); 89 | Round(h, a, b, c, d, e, f, g, 0xa831c66d2db43210ull, w9 += sigma1(w7) + w2 + sigma0(w10)); 90 | Round(g, h, a, b, c, d, e, f, 0xb00327c898fb213full, w10 += sigma1(w8) + w3 + sigma0(w11)); 91 | Round(f, g, h, a, b, c, d, e, 0xbf597fc7beef0ee4ull, w11 += sigma1(w9) + w4 + sigma0(w12)); 92 | Round(e, f, g, h, a, b, c, d, 0xc6e00bf33da88fc2ull, w12 += sigma1(w10) + w5 + sigma0(w13)); 93 | Round(d, e, f, g, h, a, b, c, 0xd5a79147930aa725ull, w13 += sigma1(w11) + w6 + sigma0(w14)); 94 | Round(c, d, e, f, g, h, a, b, 0x06ca6351e003826full, w14 += sigma1(w12) + w7 + sigma0(w15)); 95 | Round(b, c, d, e, f, g, h, a, 0x142929670a0e6e70ull, w15 += sigma1(w13) + w8 + sigma0(w0)); 96 | 97 | Round(a, b, c, d, e, f, g, h, 0x27b70a8546d22ffcull, w0 += sigma1(w14) + w9 + sigma0(w1)); 98 | Round(h, a, b, c, d, e, f, g, 0x2e1b21385c26c926ull, w1 += sigma1(w15) + w10 + sigma0(w2)); 99 | Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc5ac42aedull, w2 += sigma1(w0) + w11 + sigma0(w3)); 100 | Round(f, g, h, a, b, c, d, e, 0x53380d139d95b3dfull, w3 += sigma1(w1) + w12 + sigma0(w4)); 101 | Round(e, f, g, h, a, b, c, d, 0x650a73548baf63deull, w4 += sigma1(w2) + w13 + sigma0(w5)); 102 | Round(d, e, f, g, h, a, b, c, 0x766a0abb3c77b2a8ull, w5 += sigma1(w3) + w14 + sigma0(w6)); 103 | Round(c, d, e, f, g, h, a, b, 0x81c2c92e47edaee6ull, w6 += sigma1(w4) + w15 + sigma0(w7)); 104 | Round(b, c, d, e, f, g, h, a, 0x92722c851482353bull, w7 += sigma1(w5) + w0 + sigma0(w8)); 105 | Round(a, b, c, d, e, f, g, h, 0xa2bfe8a14cf10364ull, w8 += sigma1(w6) + w1 + sigma0(w9)); 106 | Round(h, a, b, c, d, e, f, g, 0xa81a664bbc423001ull, w9 += sigma1(w7) + w2 + sigma0(w10)); 107 | Round(g, h, a, b, c, d, e, f, 0xc24b8b70d0f89791ull, w10 += sigma1(w8) + w3 + sigma0(w11)); 108 | Round(f, g, h, a, b, c, d, e, 0xc76c51a30654be30ull, w11 += sigma1(w9) + w4 + sigma0(w12)); 109 | Round(e, f, g, h, a, b, c, d, 0xd192e819d6ef5218ull, w12 += sigma1(w10) + w5 + sigma0(w13)); 110 | Round(d, e, f, g, h, a, b, c, 0xd69906245565a910ull, w13 += sigma1(w11) + w6 + sigma0(w14)); 111 | Round(c, d, e, f, g, h, a, b, 0xf40e35855771202aull, w14 += sigma1(w12) + w7 + sigma0(w15)); 112 | Round(b, c, d, e, f, g, h, a, 0x106aa07032bbd1b8ull, w15 += sigma1(w13) + w8 + sigma0(w0)); 113 | 114 | Round(a, b, c, d, e, f, g, h, 0x19a4c116b8d2d0c8ull, w0 += sigma1(w14) + w9 + sigma0(w1)); 115 | Round(h, a, b, c, d, e, f, g, 0x1e376c085141ab53ull, w1 += sigma1(w15) + w10 + sigma0(w2)); 116 | Round(g, h, a, b, c, d, e, f, 0x2748774cdf8eeb99ull, w2 += sigma1(w0) + w11 + sigma0(w3)); 117 | Round(f, g, h, a, b, c, d, e, 0x34b0bcb5e19b48a8ull, w3 += sigma1(w1) + w12 + sigma0(w4)); 118 | Round(e, f, g, h, a, b, c, d, 0x391c0cb3c5c95a63ull, w4 += sigma1(w2) + w13 + sigma0(w5)); 119 | Round(d, e, f, g, h, a, b, c, 0x4ed8aa4ae3418acbull, w5 += sigma1(w3) + w14 + sigma0(w6)); 120 | Round(c, d, e, f, g, h, a, b, 0x5b9cca4f7763e373ull, w6 += sigma1(w4) + w15 + sigma0(w7)); 121 | Round(b, c, d, e, f, g, h, a, 0x682e6ff3d6b2b8a3ull, w7 += sigma1(w5) + w0 + sigma0(w8)); 122 | Round(a, b, c, d, e, f, g, h, 0x748f82ee5defb2fcull, w8 += sigma1(w6) + w1 + sigma0(w9)); 123 | Round(h, a, b, c, d, e, f, g, 0x78a5636f43172f60ull, w9 += sigma1(w7) + w2 + sigma0(w10)); 124 | Round(g, h, a, b, c, d, e, f, 0x84c87814a1f0ab72ull, w10 += sigma1(w8) + w3 + sigma0(w11)); 125 | Round(f, g, h, a, b, c, d, e, 0x8cc702081a6439ecull, w11 += sigma1(w9) + w4 + sigma0(w12)); 126 | Round(e, f, g, h, a, b, c, d, 0x90befffa23631e28ull, w12 += sigma1(w10) + w5 + sigma0(w13)); 127 | Round(d, e, f, g, h, a, b, c, 0xa4506cebde82bde9ull, w13 += sigma1(w11) + w6 + sigma0(w14)); 128 | Round(c, d, e, f, g, h, a, b, 0xbef9a3f7b2c67915ull, w14 += sigma1(w12) + w7 + sigma0(w15)); 129 | Round(b, c, d, e, f, g, h, a, 0xc67178f2e372532bull, w15 += sigma1(w13) + w8 + sigma0(w0)); 130 | 131 | Round(a, b, c, d, e, f, g, h, 0xca273eceea26619cull, w0 += sigma1(w14) + w9 + sigma0(w1)); 132 | Round(h, a, b, c, d, e, f, g, 0xd186b8c721c0c207ull, w1 += sigma1(w15) + w10 + sigma0(w2)); 133 | Round(g, h, a, b, c, d, e, f, 0xeada7dd6cde0eb1eull, w2 += sigma1(w0) + w11 + sigma0(w3)); 134 | Round(f, g, h, a, b, c, d, e, 0xf57d4f7fee6ed178ull, w3 += sigma1(w1) + w12 + sigma0(w4)); 135 | Round(e, f, g, h, a, b, c, d, 0x06f067aa72176fbaull, w4 += sigma1(w2) + w13 + sigma0(w5)); 136 | Round(d, e, f, g, h, a, b, c, 0x0a637dc5a2c898a6ull, w5 += sigma1(w3) + w14 + sigma0(w6)); 137 | Round(c, d, e, f, g, h, a, b, 0x113f9804bef90daeull, w6 += sigma1(w4) + w15 + sigma0(w7)); 138 | Round(b, c, d, e, f, g, h, a, 0x1b710b35131c471bull, w7 += sigma1(w5) + w0 + sigma0(w8)); 139 | Round(a, b, c, d, e, f, g, h, 0x28db77f523047d84ull, w8 += sigma1(w6) + w1 + sigma0(w9)); 140 | Round(h, a, b, c, d, e, f, g, 0x32caab7b40c72493ull, w9 += sigma1(w7) + w2 + sigma0(w10)); 141 | Round(g, h, a, b, c, d, e, f, 0x3c9ebe0a15c9bebcull, w10 += sigma1(w8) + w3 + sigma0(w11)); 142 | Round(f, g, h, a, b, c, d, e, 0x431d67c49c100d4cull, w11 += sigma1(w9) + w4 + sigma0(w12)); 143 | Round(e, f, g, h, a, b, c, d, 0x4cc5d4becb3e42b6ull, w12 += sigma1(w10) + w5 + sigma0(w13)); 144 | Round(d, e, f, g, h, a, b, c, 0x597f299cfc657e2aull, w13 += sigma1(w11) + w6 + sigma0(w14)); 145 | Round(c, d, e, f, g, h, a, b, 0x5fcb6fab3ad6faecull, w14 + sigma1(w12) + w7 + sigma0(w15)); 146 | Round(b, c, d, e, f, g, h, a, 0x6c44198c4a475817ull, w15 + sigma1(w13) + w8 + sigma0(w0)); 147 | 148 | s[0] += a; 149 | s[1] += b; 150 | s[2] += c; 151 | s[3] += d; 152 | s[4] += e; 153 | s[5] += f; 154 | s[6] += g; 155 | s[7] += h; 156 | } 157 | 158 | } // namespace sha512 159 | 160 | } // namespace 161 | 162 | namespace utreexo { 163 | 164 | ////// SHA-512 165 | 166 | CSHA512::CSHA512() : bytes(0) 167 | { 168 | sha512::Initialize(s); 169 | } 170 | 171 | CSHA512::CSHA512(int output_size) : bytes(0) 172 | { 173 | switch (output_size) { 174 | case OUTPUT_SIZE_256: 175 | sha512::Initialize256(s); 176 | break; 177 | default: 178 | sha512::Initialize(s); 179 | } 180 | } 181 | 182 | CSHA512& CSHA512::Write(const unsigned char* data, size_t len) 183 | { 184 | const unsigned char* end = data + len; 185 | size_t bufsize = bytes % 128; 186 | if (bufsize && bufsize + len >= 128) { 187 | // Fill the buffer, and process it. 188 | memcpy(buf + bufsize, data, 128 - bufsize); 189 | bytes += 128 - bufsize; 190 | data += 128 - bufsize; 191 | sha512::Transform(s, buf); 192 | bufsize = 0; 193 | } 194 | while (end - data >= 128) { 195 | // Process full chunks directly from the source. 196 | sha512::Transform(s, data); 197 | data += 128; 198 | bytes += 128; 199 | } 200 | if (end > data) { 201 | // Fill the buffer with what remains. 202 | memcpy(buf + bufsize, data, end - data); 203 | bytes += end - data; 204 | } 205 | return *this; 206 | } 207 | 208 | void CSHA512::Finalize(unsigned char hash[OUTPUT_SIZE]) 209 | { 210 | static const unsigned char pad[128] = {0x80}; 211 | unsigned char sizedesc[16] = {0x00}; 212 | WriteBE64(sizedesc + 8, bytes << 3); 213 | Write(pad, 1 + ((239 - (bytes % 128)) % 128)); 214 | Write(sizedesc, 16); 215 | WriteBE64(hash, s[0]); 216 | WriteBE64(hash + 8, s[1]); 217 | WriteBE64(hash + 16, s[2]); 218 | WriteBE64(hash + 24, s[3]); 219 | WriteBE64(hash + 32, s[4]); 220 | WriteBE64(hash + 40, s[5]); 221 | WriteBE64(hash + 48, s[6]); 222 | WriteBE64(hash + 56, s[7]); 223 | } 224 | 225 | void CSHA512::Finalize256(unsigned char hash[OUTPUT_SIZE]) 226 | { 227 | static const unsigned char pad[128] = {0x80}; 228 | unsigned char sizedesc[16] = {0x00}; 229 | WriteBE64(sizedesc + 8, bytes << 3); 230 | Write(pad, 1 + ((239 - (bytes % 128)) % 128)); 231 | Write(sizedesc, 16); 232 | WriteBE64(hash, s[0]); 233 | WriteBE64(hash + 8, s[1]); 234 | WriteBE64(hash + 16, s[2]); 235 | WriteBE64(hash + 24, s[3]); 236 | } 237 | 238 | CSHA512& CSHA512::Reset() 239 | { 240 | bytes = 0; 241 | sha512::Initialize(s); 242 | return *this; 243 | } 244 | 245 | }; // namespace utreexo 246 | -------------------------------------------------------------------------------- /src/ram_forest.cpp: -------------------------------------------------------------------------------- 1 | #include "include/ram_forest.h" 2 | #include "include/batchproof.h" 3 | 4 | #include "check.h" 5 | #include "crypto/common.h" 6 | #include "node.h" 7 | #include "state.h" 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | namespace utreexo { 14 | 15 | size_t RamForest::LeafHasher::operator()(const Hash& hash) const { return ReadLE64(hash.data()); } 16 | 17 | class RamForest::Node : public Accumulator::Node 18 | { 19 | public: 20 | Hash m_hash; 21 | // TODO: yikes. 22 | RamForest* m_forest; 23 | 24 | Node() {} 25 | Node(RamForest* forest, 26 | uint64_t num_leaves, 27 | uint64_t pos) 28 | : m_forest(forest) 29 | { 30 | m_num_leaves = num_leaves; 31 | m_position = pos; 32 | } 33 | Node(RamForest* forest, 34 | const Hash& hash, 35 | uint64_t num_leaves, 36 | uint64_t pos) 37 | : RamForest::Node(forest, num_leaves, pos) 38 | { 39 | m_hash = hash; 40 | } 41 | 42 | const Hash& GetHash() const override; 43 | void ReHash() override; 44 | NodePtr Parent() const override; 45 | }; 46 | 47 | // RamForest::Node 48 | const Hash& RamForest::Node::GetHash() const 49 | { 50 | return this->m_hash; 51 | } 52 | 53 | void RamForest::Node::ReHash() 54 | { 55 | ForestState state(m_num_leaves); 56 | // get the children hashes 57 | uint64_t left_child_pos = state.Child(this->m_position, 0), 58 | right_child_pos = state.Child(this->m_position, 1); 59 | std::optional left_child_hash = m_forest->Read(left_child_pos); 60 | std::optional right_child_hash = m_forest->Read(right_child_pos); 61 | 62 | // compute the hash 63 | Accumulator::ParentHash(m_hash, left_child_hash.value(), right_child_hash.value()); 64 | 65 | // write hash back 66 | uint8_t row = state.DetectRow(m_position); 67 | uint64_t offset = state.RowOffset(m_position); 68 | std::vector& rowData = m_forest->m_data.at(row); 69 | rowData[m_position - offset] = m_hash; 70 | } 71 | 72 | NodePtr RamForest::Node::Parent() const 73 | { 74 | ForestState state(m_num_leaves); 75 | uint64_t parent_pos = state.Parent(this->m_position); 76 | 77 | // Check if this node is a root. 78 | // If so return nullptr becauce roots do not have parents. 79 | uint8_t row = state.DetectRow(this->m_position); 80 | bool row_has_root = state.HasRoot(row); 81 | bool is_root = state.RootPosition(row) == this->m_position; 82 | if (row_has_root && is_root) { 83 | return nullptr; 84 | } 85 | 86 | // Return the parent of this node. 87 | return Accumulator::MakeNodePtr(m_forest, m_num_leaves, parent_pos); 88 | } 89 | 90 | // RamForest 91 | 92 | RamForest::RamForest(uint64_t num_leaves) : Accumulator(num_leaves) 93 | { 94 | this->m_data = std::vector>(); 95 | this->m_data.push_back(std::vector()); 96 | } 97 | 98 | RamForest::RamForest(const std::string& file) : Accumulator(0) 99 | { 100 | this->m_data = std::vector>(); 101 | this->m_data.push_back(std::vector()); 102 | m_file_path = file; 103 | 104 | if (static_cast(std::fstream(file))) { 105 | // We can restore the forest from an existing file. 106 | m_file = std::fstream(file, 107 | std::fstream::in | std::fstream::out | std::fstream::binary); 108 | Restore(); 109 | } else { 110 | m_num_leaves = 0; 111 | m_file = std::fstream(file, 112 | std::fstream::in | std::fstream::out | std::fstream::binary | std::fstream::trunc); 113 | Commit(); 114 | } 115 | } 116 | 117 | RamForest::~RamForest() 118 | { 119 | if (m_file.good()) { 120 | Commit(); 121 | m_file.flush(); 122 | m_file.close(); 123 | } 124 | } 125 | 126 | bool RamForest::Restore() 127 | { 128 | char uint64_buf[8]; 129 | m_file.seekg(0); 130 | 131 | // restore number of leaves 132 | m_file.read(reinterpret_cast(uint64_buf), 8); 133 | m_num_leaves = ReadBE64(reinterpret_cast(uint64_buf)); 134 | 135 | ForestState state(m_num_leaves); 136 | // restore forest hashes 137 | uint64_t num_hashes = m_num_leaves; 138 | uint8_t row = 0; 139 | uint64_t pos = 0; 140 | while (num_hashes > 0) { 141 | pos = state.RowOffset(row); 142 | for (uint64_t i = 0; i < num_hashes; ++i) { 143 | Hash hash; 144 | m_file.read(reinterpret_cast(hash.data()), 32); 145 | m_data[row].push_back(hash); 146 | 147 | if (num_hashes == m_num_leaves) { 148 | // populate position map 149 | m_posmap[hash] = pos; 150 | } 151 | ++pos; 152 | } 153 | 154 | m_data.push_back({}); 155 | row++; 156 | num_hashes >>= 1; 157 | } 158 | 159 | RestoreRoots(); 160 | 161 | return true; 162 | } 163 | 164 | bool RamForest::Commit() 165 | { 166 | char uint64_buf[8]; 167 | m_file.seekg(0); 168 | 169 | // commit number of leaves 170 | WriteBE64(reinterpret_cast(uint64_buf), m_num_leaves); 171 | m_file.write(reinterpret_cast(uint64_buf), 8); 172 | 173 | // commit forest hashes 174 | ForestState state(m_num_leaves); 175 | uint64_t num_hashes = m_num_leaves; 176 | for (uint8_t i = 0; i <= state.NumRows(); ++i) { 177 | assert(num_hashes <= m_data[i].size()); 178 | for (int j = 0; j < num_hashes; ++j) { 179 | m_file.write(reinterpret_cast(m_data[i][j].data()), 32); 180 | } 181 | num_hashes >>= 1; 182 | } 183 | 184 | return true; 185 | } 186 | 187 | std::optional RamForest::Read(ForestState state, uint64_t pos) const 188 | { 189 | uint8_t row = state.DetectRow(pos); 190 | uint64_t offset = state.RowOffset(pos); 191 | 192 | assert(row < m_data.size()); 193 | const std::vector& row_data = m_data.at(row); 194 | 195 | assert((pos - offset) < row_data.size()); 196 | return std::optional{row_data.at(pos - offset)}; 197 | } 198 | 199 | std::optional RamForest::Read(uint64_t pos) const 200 | { 201 | ForestState state(m_num_leaves); 202 | return Read(state, pos); 203 | } 204 | 205 | std::vector RamForest::ReadLeafRange(uint64_t pos, uint64_t range) const 206 | { 207 | std::vector hashes; 208 | for (uint64_t i = pos; i < pos + range; ++i) { 209 | hashes.push_back(Read(i).value()); 210 | } 211 | return hashes; 212 | } 213 | 214 | void RamForest::SwapRange(uint64_t from, uint64_t to, uint64_t range) 215 | { 216 | ForestState current_state = ForestState(m_num_leaves); 217 | uint8_t row = current_state.DetectRow(from); 218 | uint64_t offset_from = current_state.RowOffset(from); 219 | uint64_t offset_to = current_state.RowOffset(to); 220 | std::vector& rowData = m_data.at(row); 221 | 222 | for (uint64_t i = 0; i < range; ++i) { 223 | std::swap(rowData[(from - offset_from) + i], rowData[(to - offset_to) + i]); 224 | } 225 | } 226 | 227 | NodePtr RamForest::SwapSubTrees(uint64_t from, uint64_t to) 228 | { 229 | ForestState current_state(m_num_leaves); 230 | // posA and posB are on the same row 231 | uint8_t row = current_state.DetectRow(from); 232 | assert(row == current_state.DetectRow(to)); 233 | 234 | from = current_state.LeftDescendant(from, row); 235 | to = current_state.LeftDescendant(to, row); 236 | 237 | for (uint64_t range = 1 << row; range != 0; range >>= 1) { 238 | this->SwapRange(from, to, range); 239 | from = current_state.Parent(from); 240 | to = current_state.Parent(to); 241 | } 242 | 243 | return Accumulator::MakeNodePtr(this, m_num_leaves, to); 244 | } 245 | 246 | NodePtr RamForest::MergeRoot(uint64_t parent_pos, Hash parent_hash) 247 | { 248 | assert(m_roots.size() >= 2); 249 | 250 | m_roots.pop_back(); 251 | m_roots.pop_back(); 252 | // compute row 253 | ForestState state(m_num_leaves); 254 | uint8_t row = state.DetectRow(parent_pos); 255 | assert(m_data.size() > row); 256 | 257 | // add hash to forest 258 | m_data.at(row).push_back(parent_hash); 259 | uint64_t offset = state.RowOffset(parent_pos); 260 | m_data[row][parent_pos - offset] = parent_hash; 261 | 262 | NodePtr node = Accumulator::MakeNodePtr(this, m_data.at(row).back(), m_num_leaves, parent_pos); 263 | m_roots.push_back(node); 264 | 265 | return m_roots.back(); 266 | } 267 | 268 | NodePtr RamForest::NewLeaf(const Leaf& leaf) 269 | { 270 | // append new hash on row 0 (as a leaf) 271 | this->m_data[0][m_num_leaves] = leaf.first; 272 | 273 | NodePtr new_root = Accumulator::MakeNodePtr(this, leaf.first, m_num_leaves, m_num_leaves); 274 | m_roots.push_back(new_root); 275 | 276 | m_posmap[leaf.first] = new_root->m_position; 277 | return this->m_roots.back(); 278 | } 279 | 280 | void RamForest::FinalizeRemove(uint64_t next_num_leaves) 281 | { 282 | ForestState current_state(m_num_leaves), next_state(next_num_leaves); 283 | 284 | assert(next_state.m_num_leaves <= current_state.m_num_leaves); 285 | 286 | // Remove deleted leaf hashes from the position map. 287 | for (uint64_t pos = next_state.m_num_leaves; pos < current_state.m_num_leaves; ++pos) { 288 | m_posmap.erase(Read(pos).value()); 289 | } 290 | 291 | assert(m_posmap.size() == next_num_leaves); 292 | 293 | // Compute the positions of the new roots in the current state. 294 | std::vector new_positions = current_state.RootPositions(next_state.m_num_leaves); 295 | 296 | // Select the new roots. 297 | std::vector> new_roots; 298 | new_roots.reserve(new_positions.size()); 299 | 300 | for (uint64_t new_pos : new_positions) { 301 | NodePtr new_root = Accumulator::MakeNodePtr(this, next_num_leaves, new_pos); 302 | new_root->m_hash = Read(new_pos).value(); 303 | new_roots.push_back(new_root); 304 | } 305 | 306 | this->m_roots = new_roots; 307 | } 308 | 309 | 310 | bool RamForest::Verify(const BatchProof& proof, const std::vector& target_hashes) 311 | { 312 | // TODO: verify the actual proof. a bitcoin bridge node would like to validate proofs to ensure 313 | // that he does not give invalid proofs to anyone. 314 | // For now just check that the target hashes exist. 315 | for (const Hash& hash : target_hashes) { 316 | auto it = m_posmap.find(hash); 317 | if (it == m_posmap.end()) return false; 318 | } 319 | 320 | return true; 321 | } 322 | 323 | bool RamForest::Add(const std::vector& leaves) 324 | { 325 | // Preallocate data with the required size. 326 | ForestState next_state(m_num_leaves + leaves.size()); 327 | for (uint8_t row = 0; row <= next_state.NumRows(); ++row) { 328 | if (row >= this->m_data.size()) { 329 | m_data.push_back(std::vector()); 330 | } 331 | 332 | m_data.at(row).resize(next_state.m_num_leaves >> row); 333 | } 334 | assert(m_data.size() > next_state.NumRows()); 335 | 336 | bool ok = Accumulator::Add(leaves); 337 | assert(next_state.m_num_leaves == m_num_leaves); 338 | assert(m_posmap.size() == m_num_leaves); 339 | 340 | return ok; 341 | } 342 | 343 | bool RamForest::Modify(UndoBatch& undo, 344 | const std::vector& leaves, 345 | const std::vector& targets) 346 | { 347 | if (!RamForest::Remove(targets)) return false; 348 | if (!BuildUndoBatch(undo, leaves.size(), targets)) return false; 349 | if (!RamForest::Add(leaves)) return false; 350 | 351 | return true; 352 | } 353 | 354 | void RamForest::RestoreRoots() 355 | { 356 | m_roots.clear(); 357 | std::vector root_positions = ForestState(m_num_leaves).RootPositions(); 358 | for (const uint64_t& pos : root_positions) { 359 | m_roots.push_back(Accumulator::MakeNodePtr(this, Read(pos).value(), m_num_leaves, pos)); 360 | } 361 | } 362 | 363 | bool RamForest::BuildUndoBatch(UndoBatch& undo, uint64_t num_adds, const std::vector& targets) const 364 | { 365 | ForestState prev_state(m_num_leaves + targets.size()); 366 | 367 | std::vector deleted_hashes; 368 | for (int i = 0; i < targets.size(); ++i) { 369 | uint64_t pos = m_num_leaves + static_cast(i); 370 | if (m_data.size() == 0 || pos >= m_data[0].size()) return false; 371 | deleted_hashes.push_back(Read(prev_state, pos).value()); 372 | } 373 | 374 | undo = UndoBatch(num_adds, targets, deleted_hashes); 375 | return true; 376 | } 377 | 378 | bool RamForest::Undo(const UndoBatch& undo) 379 | { 380 | if (m_data.size() == 0) return true; 381 | 382 | ForestState prev_state(m_num_leaves + undo.GetDeletedPositions().size() - undo.GetNumAdds()); 383 | 384 | auto undo_swaps = prev_state.UndoTransform(undo.GetDeletedPositions()); 385 | 386 | // Erase the added leaves from the position map. 387 | for (uint64_t i = m_num_leaves - undo.GetNumAdds(); i < m_num_leaves; ++i) { 388 | const Hash hash = Read(i).value(); 389 | if (m_posmap.find(hash) == m_posmap.end()) return false; 390 | m_posmap.erase(hash); 391 | } 392 | 393 | m_num_leaves -= undo.GetNumAdds(); 394 | 395 | // Place all deleted hashes at the end of the bottom row. 396 | // After this the forest is in the same state as right after the deletion 397 | // in the previous modification. 398 | int i = 0; 399 | std::unordered_set dirt_set; 400 | m_data[0].resize(prev_state.m_num_leaves); 401 | for (const Hash& hash : undo.GetDeletedHashes()) { 402 | if ((m_num_leaves + i) >= m_data[0].size()) return false; 403 | m_data[0][m_num_leaves + i] = hash; 404 | 405 | // Check that the hash is not already in the forest. 406 | if (m_posmap.find(hash) != m_posmap.end()) return false; 407 | m_posmap[hash] = m_num_leaves + i; 408 | dirt_set.insert(m_num_leaves + i); 409 | ++i; 410 | } 411 | 412 | m_num_leaves = prev_state.m_num_leaves; 413 | 414 | // Swap the delted hashes into their positions pre-deletion. 415 | for (auto swap_it = undo_swaps.crbegin(); swap_it != undo_swaps.crend(); ++swap_it) { 416 | auto swap = *swap_it; 417 | 418 | uint64_t range = swap.m_range; 419 | if (!swap.m_is_range_swap) { 420 | range = 1; 421 | } 422 | 423 | for (uint64_t i = 0; i < range; ++i) { 424 | dirt_set.insert(swap.m_from + i); 425 | dirt_set.insert(swap.m_to + i); 426 | } 427 | 428 | UpdatePositionMapForRange(swap.m_from, swap.m_to, range); 429 | SwapRange(swap.m_from, swap.m_to, range); 430 | } 431 | 432 | 433 | // Rehash all the "dirty" parts of the forest. 434 | std::vector dirt_list(dirt_set.begin(), dirt_set.end()); 435 | std::sort(dirt_list.begin(), dirt_list.end()); 436 | 437 | // Construct the first row of dirt. 438 | std::vector> dirt; 439 | for (const uint64_t& pos : dirt_list) { 440 | uint64_t parent_pos = prev_state.Parent(pos); 441 | // Skip positions that are past the bottom row root. 442 | // The parents of those positions do not exist in the new forest. 443 | if (prev_state.HasRoot(0) && prev_state.RootPosition(0) <= pos) continue; 444 | 445 | // Dont add the same parent to the next row dirt. 446 | if (dirt.size() != 0 && dirt.back()->m_position == prev_state.Parent(pos)) continue; 447 | 448 | dirt.push_back(Accumulator::MakeNodePtr(this, m_num_leaves, parent_pos)); 449 | } 450 | 451 | for (uint8_t r = 1; r <= prev_state.NumRows(); ++r) { 452 | m_data[r].resize(m_num_leaves >> r); 453 | std::vector> next_dirt; 454 | 455 | for (NodePtr dirt_node : dirt) { 456 | dirt_node->ReHash(); 457 | auto parent = dirt_node->Parent(); 458 | if (parent && (next_dirt.size() == 0 || next_dirt.back()->m_position != parent->m_position)) { 459 | next_dirt.push_back(std::dynamic_pointer_cast(parent)); 460 | } 461 | } 462 | dirt = next_dirt; 463 | } 464 | 465 | RestoreRoots(); 466 | 467 | CHECK_SAFE(m_data[0].size() == m_posmap.size()); 468 | CHECK_SAFE([](const std::unordered_map& posmap, 469 | const std::vector>& data) { 470 | int pos = 0; 471 | for (const Hash& hash : data[0]) { 472 | auto it = posmap.find(hash); 473 | if (it == posmap.end()) return false; 474 | if (it->second != pos) return false; 475 | ++pos; 476 | } 477 | 478 | return true; 479 | }(m_posmap, m_data)); 480 | 481 | return true; 482 | } 483 | 484 | Hash RamForest::GetLeaf(uint64_t pos) const 485 | { 486 | assert(pos < m_num_leaves); 487 | return Read(pos).value(); 488 | } 489 | 490 | bool RamForest::operator==(const RamForest& other) 491 | { 492 | std::vector roots, other_roots; 493 | Roots(roots); 494 | other.Roots(other_roots); 495 | return m_num_leaves == other.m_num_leaves && 496 | roots == other_roots && 497 | m_posmap == other.m_posmap; 498 | } 499 | 500 | }; // namespace utreexo 501 | -------------------------------------------------------------------------------- /src/state.cpp: -------------------------------------------------------------------------------- 1 | #include "crypto/common.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | namespace utreexo { 12 | 13 | void print_vector(const std::vector& vec) 14 | { 15 | for (auto i : vec) 16 | std::cout << i << ' '; 17 | std::cout << std::endl; 18 | } 19 | 20 | void print_swaps(const std::vector& vec) 21 | { 22 | for (auto i : vec) 23 | std::cout << (i.m_is_range_swap ? 'r' : '_') << (i.m_collapse ? 'c' : '_') << " (" << i.m_from << ", " << i.m_to << ") " << i.m_range << " "; 24 | std::cout << std::endl; 25 | } 26 | 27 | // Return the number of trailing one bits in n. 28 | uint8_t trailingOnes(uint64_t n) 29 | { 30 | uint64_t b = ~n & (n + 1); 31 | --b; 32 | b = (b & 0x5555555555555555) + 33 | ((b >> 1) & 0x5555555555555555); 34 | b = (b & 0x3333333333333333) + 35 | ((b >> 2) & 0x3333333333333333); 36 | b = (b & 0x0f0f0f0f0f0f0f0f) + 37 | ((b >> 4) & 0x0f0f0f0f0f0f0f0f); 38 | b = (b & 0x00ff00ff00ff00ff) + 39 | ((b >> 8) & 0x00ff00ff00ff00ff); 40 | b = (b & 0x0000ffff0000ffff) + 41 | ((b >> 16) & 0x0000ffff0000ffff); 42 | b = (b & 0x00000000ffffffff) + 43 | ((b >> 32) & 0x00000000ffffffff); 44 | 45 | return b; 46 | } 47 | 48 | uint8_t trailingZeros(uint64_t n) 49 | { 50 | return trailingOnes(~n); 51 | } 52 | 53 | // TODO: Maybe export these? 54 | uint64_t _rootPosition(uint8_t row, uint64_t num_leaves, uint8_t rows); 55 | uint8_t _numRows(uint64_t num_leaves); 56 | uint8_t _hasRoot(uint64_t num_leaves); 57 | uint64_t _maxNodes(uint64_t num_leaves); 58 | 59 | // positions 60 | 61 | uint64_t ForestState::Parent(uint64_t pos) const 62 | { 63 | return (pos >> 1ULL) | (1ULL << this->NumRows()); 64 | } 65 | 66 | uint64_t ForestState::Ancestor(uint64_t pos, uint8_t rise) const 67 | { 68 | if (rise == 0) { 69 | return pos; 70 | } 71 | 72 | uint8_t rows = this->NumRows(); 73 | uint64_t mask = this->MaxNodes(); 74 | return (pos >> rise | (mask << (rows - (rise - 1)))) & mask; 75 | } 76 | 77 | uint64_t ForestState::LeftChild(uint64_t pos) const 78 | { 79 | CHECK_SAFE(pos >= this->m_num_leaves); 80 | return (pos << 1) & (this->MaxNodes()); 81 | } 82 | 83 | uint64_t ForestState::Child(uint64_t pos, uint64_t placement) const 84 | { 85 | return this->LeftChild(pos) | placement; 86 | } 87 | 88 | uint64_t ForestState::LeftDescendant(uint64_t pos, uint8_t drop) const 89 | { 90 | CHECK_SAFE(drop <= this->DetectRow(pos)); 91 | 92 | if (drop == 0) { 93 | return pos; 94 | } 95 | 96 | uint64_t mask = this->MaxNodes(); 97 | return (pos << drop) & mask; 98 | } 99 | 100 | uint64_t ForestState::Cousin(uint64_t pos) const { return pos ^ 2; } 101 | 102 | uint64_t ForestState::RightSibling(uint64_t pos) const { return pos | 1; } 103 | 104 | uint64_t ForestState::Sibling(uint64_t pos) const { return pos ^ 1; } 105 | 106 | std::tuple ForestState::Path(uint64_t pos) const 107 | { 108 | uint8_t rows = this->NumRows(); 109 | uint8_t row = this->DetectRow(pos); 110 | 111 | // This is a bit of an ugly predicate. The goal is to detect if we've 112 | // gone past the node we're looking for by inspecting progressively shorter 113 | // trees; once we have, the loop is over. 114 | 115 | // The predicate breaks down into 3 main terms: 116 | // A: pos << nh 117 | // B: mask 118 | // C: 1<= C) 120 | // A is position up-shifted by the row of the node we're targeting. 121 | // B is the "mask" we use in other functions; a bunch of 0s at the MSB side 122 | // and then a bunch of 1s on the LSB side, such that we can use bitwise AND 123 | // to discard high bits. Together, A&B is shifting position up by nh bits, 124 | // and then discarding (zeroing out) the high bits. This is the same as in 125 | // childMany. C checks for whether a tree exists at the current tree 126 | // rows. If there is no tree at th, C is 0. If there is a tree, it will 127 | // return a power of 2: the base size of that tree. 128 | // The C term actually is used 3 times here, which is ugly; it's redefined 129 | // right on the next line. 130 | // In total, what this loop does is to take a node position, and 131 | // see if it's in the next largest tree. If not, then subtract everything 132 | // covered by that tree from the position, and proceed to the next tree, 133 | // skipping trees that don't exist. 134 | 135 | uint8_t biggerTrees = 0; 136 | for (; ((pos << row) & ((2ULL << rows) - 1)) >= ((1ULL << rows) & this->m_num_leaves); 137 | --rows) { 138 | uint64_t treeSize = (1ULL << rows) & this->m_num_leaves; 139 | if (treeSize != 0) { 140 | pos -= treeSize; 141 | ++biggerTrees; 142 | } 143 | } 144 | 145 | return std::make_tuple(biggerTrees, rows - row, ~pos); 146 | } 147 | 148 | std::pair, std::vector> 149 | ForestState::ProofPositions(const std::vector& targets) const 150 | { 151 | uint64_t rows = this->NumRows(); 152 | 153 | // store for the proof and computed positions 154 | // proof positions are needed to verify, 155 | // computed positions are the positions of the targets as well as 156 | // positions that are computed while verifying. 157 | std::vector proof, computed; 158 | 159 | std::vector::const_iterator start = targets.cbegin(), 160 | end = targets.cend(); 161 | 162 | // saves the reference to nextTargets in the loop from being destroyed 163 | std::vector savior; 164 | std::vector nextTargets; 165 | 166 | for (uint8_t row = 0; row <= rows; ++row) { 167 | computed.insert(computed.end(), start, end); 168 | 169 | if (this->HasRoot(row) && start < end && 170 | *(end - 1) == this->RootPosition(row)) { 171 | // remove roots from targets 172 | --end; 173 | } 174 | 175 | while (start < end) { 176 | int size = end - start; 177 | 178 | // look at the first 4 targets 179 | if (size > 3 && this->Cousin(this->RightSibling(start[0])) == 180 | this->RightSibling(start[3])) { 181 | // the first and fourth target are cousins 182 | // => target 2 and 3 are also targets, both parents are targets of next 183 | // row 184 | nextTargets.insert(nextTargets.end(), 185 | {this->Parent(start[0]), this->Parent(start[3])}); 186 | start += 4; 187 | continue; 188 | } 189 | 190 | // look at the first 3 targets 191 | if (size > 2 && this->Cousin(this->RightSibling(start[0])) == 192 | this->RightSibling(start[2])) { 193 | // the first and third target are cousins 194 | // => the second target is either the sibling of the first 195 | // OR the sibiling of the third 196 | // => only the sibling that is not a target is appended to the proof positions 197 | if (this->RightSibling(start[1]) == this->RightSibling(start[0])) { 198 | proof.push_back(this->Sibling(start[2])); 199 | } else { 200 | proof.push_back(this->Sibling(start[0])); 201 | } 202 | 203 | nextTargets.insert(nextTargets.end(), 204 | {this->Parent(start[0]), this->Parent(start[2])}); 205 | start += 3; 206 | continue; 207 | } 208 | 209 | // look at the first 2 targets 210 | if (size > 1) { 211 | if (this->RightSibling(start[0]) == start[1]) { 212 | // the first and the second target are siblings 213 | // => parent is a target for the next. 214 | nextTargets.push_back(this->Parent(start[0])); 215 | start += 2; 216 | continue; 217 | } 218 | 219 | if (this->Cousin(this->RightSibling(start[0])) == 220 | this->RightSibling(start[1])) { 221 | // the first and the second target are cousins 222 | // => both siblings are part of the proof 223 | // => both parents are targets for the next row 224 | proof.insert(proof.end(), 225 | {this->Sibling(start[0]), this->Sibling(start[1])}); 226 | nextTargets.insert(nextTargets.end(), 227 | {this->Parent(start[0]), this->Parent(start[1])}); 228 | start += 2; 229 | continue; 230 | } 231 | } 232 | 233 | // look at the first target 234 | proof.push_back(this->Sibling(start[0])); 235 | nextTargets.push_back(this->Parent(start[0])); 236 | ++start; 237 | } 238 | 239 | savior = nextTargets; 240 | start = savior.cbegin(); 241 | end = savior.cend(); 242 | nextTargets.clear(); 243 | } 244 | 245 | return std::make_pair(proof, computed); 246 | } 247 | 248 | // roots 249 | 250 | uint8_t ForestState::NumRoots() const 251 | { 252 | std::bitset<64> bits(this->m_num_leaves); 253 | return bits.count(); 254 | } 255 | 256 | bool _hasRoot(uint64_t num_leaves, uint8_t row) 257 | { 258 | return (num_leaves >> row) & 1; 259 | } 260 | 261 | bool ForestState::HasRoot(uint8_t row) const 262 | { 263 | return _hasRoot(this->m_num_leaves, row); 264 | } 265 | 266 | uint64_t _rootPosition(uint8_t row, uint64_t num_leaves, uint8_t rows) 267 | { 268 | uint64_t mask = (2ULL << rows) - 1; 269 | uint64_t before = num_leaves & (mask << (row + 1)); 270 | uint64_t shifted = (before >> row) | (mask << (rows + 1 - row)); 271 | return shifted & mask; 272 | } 273 | 274 | uint64_t ForestState::RootPosition(uint8_t row) const 275 | { 276 | return _rootPosition(row, this->m_num_leaves, this->NumRows()); 277 | } 278 | 279 | std::vector ForestState::RootPositions() const 280 | { 281 | std::vector roots; 282 | for (uint8_t row = this->NumRows(); row >= 0 && row < 64; --row) { 283 | if (this->HasRoot(row)) { 284 | roots.push_back(this->RootPosition(row)); 285 | } 286 | } 287 | return roots; 288 | } 289 | 290 | std::vector ForestState::RootPositions(uint64_t num_leaves) const 291 | { 292 | std::vector roots; 293 | for (uint8_t row = this->NumRows(); row >= 0 && row < 64; --row) { 294 | if (_hasRoot(num_leaves, row)) { 295 | roots.push_back(_rootPosition(row, num_leaves, this->NumRows())); 296 | } 297 | } 298 | return roots; 299 | } 300 | 301 | uint8_t ForestState::RootIndex(uint64_t pos) const 302 | { 303 | uint8_t root_index{0}; 304 | std::tie(root_index, std::ignore, std::ignore) = Path(pos); 305 | return root_index; 306 | } 307 | 308 | 309 | // rows 310 | 311 | uint8_t _numRows(uint64_t num_leaves) 312 | { 313 | if (num_leaves == 0) return 0; 314 | return CountBits(num_leaves - 1); 315 | } 316 | 317 | 318 | uint8_t ForestState::NumRows() const 319 | { 320 | return _numRows(this->m_num_leaves); 321 | } 322 | 323 | uint8_t ForestState::DetectRow(uint64_t pos) const 324 | { 325 | uint64_t marker = 1ULL << this->NumRows(); 326 | uint8_t row = 0; 327 | 328 | for (; (pos & marker) != 0; ++row) { 329 | marker >>= 1; 330 | } 331 | 332 | return row; 333 | } 334 | 335 | uint64_t ForestState::RowOffset(uint64_t pos) const 336 | { 337 | return RowOffset(DetectRow(pos)); 338 | } 339 | 340 | uint64_t ForestState::RowOffset(uint8_t row) const 341 | { 342 | uint64_t marker = MaxNodes(); 343 | return (0xFFFFFFFFFFFFFFFF << (NumRows() + 1 - row)) & marker; 344 | } 345 | 346 | // transform 347 | 348 | std::vector> 349 | ForestState::Transform(const std::vector& targets) const 350 | { 351 | uint8_t rows = this->NumRows(); 352 | uint64_t next_num_leaves = this->m_num_leaves - targets.size(); 353 | 354 | std::vector> swaps; 355 | std::vector collapses; 356 | swaps.reserve(rows); 357 | collapses.reserve(rows); 358 | 359 | std::vector current_row_targets(targets); 360 | 361 | for (uint8_t row = 0; row < rows && current_row_targets.size() > 0; ++row) { 362 | bool root_present = this->HasRoot(row); 363 | uint64_t root_pos = this->RootPosition(row); 364 | 365 | if (root_present && *(current_row_targets.end() - 1) == root_pos) { 366 | current_row_targets.pop_back(); 367 | root_present = false; 368 | } 369 | 370 | bool deletion_remains = current_row_targets.size() % 2 != 0; 371 | 372 | //extract_pair.first are the parents of the siblings, extract_pair.second is the input with out siblings. 373 | std::pair, std::vector> extract_pair = 374 | ComputeNextRowTargets(current_row_targets, deletion_remains, root_present); 375 | 376 | // TODO: avoid sorting (the go version does this differently) 377 | std::sort(extract_pair.first.begin(), extract_pair.first.end()); 378 | 379 | swaps.push_back(this->MakeSwaps(extract_pair.second, deletion_remains, root_present, root_pos)); 380 | collapses.push_back(this->MakeCollapse(extract_pair.second, deletion_remains, root_present, row, next_num_leaves)); 381 | 382 | current_row_targets = extract_pair.first; 383 | } 384 | 385 | // Convert collapses to swaps and append them to the swaps list. 386 | this->ConvertCollapses(swaps, collapses); 387 | 388 | return swaps; 389 | } 390 | 391 | std::vector ForestState::UndoTransform(const std::vector& targets) const 392 | { 393 | std::vector undo_swaps; 394 | auto prev_swaps = Transform(targets); 395 | 396 | for (int r = 0; r < prev_swaps.size(); ++r) { 397 | auto row = prev_swaps[r]; 398 | for (const ForestState::Swap& swap : row) { 399 | if (swap.m_from == swap.m_to) continue; 400 | undo_swaps.push_back(swap.ToLeaves(*this)); 401 | } 402 | } 403 | 404 | return undo_swaps; 405 | } 406 | 407 | // misc 408 | 409 | uint64_t _maxNodes(uint64_t num_leaves) { return (2ULL << _numRows(num_leaves)) - 1; } 410 | uint64_t ForestState::MaxNodes() const { return _maxNodes(this->m_num_leaves); } 411 | 412 | 413 | // Check that the targets are sorted in ascending order and dont have any duplicates. 414 | bool IsSortedNoDupes(const std::vector& targets) 415 | { 416 | for (uint64_t i = 0; i < targets.size() - 1; ++i) { 417 | if (targets[i] >= targets[i + 1]) { 418 | return false; 419 | } 420 | } 421 | 422 | return true; 423 | } 424 | 425 | bool ForestState::CheckTargetsSanity(const std::vector& targets) const 426 | { 427 | if (targets.size() == 0) { 428 | // An empty target list is OK. 429 | return true; 430 | } 431 | 432 | if (m_num_leaves < targets.size()) { 433 | // It is not possible to delete more targets than leaves in the forest. 434 | return false; 435 | } 436 | 437 | if (!IsSortedNoDupes(targets)) { 438 | // Targets have to be sorted in ascending order and cant have any duplicates. 439 | return false; 440 | } 441 | 442 | if (targets.back() >= m_num_leaves) { 443 | // All targets must be leaves. 444 | // Checking that the last target is a leaf is enough since 445 | // the targets are sorted in ascending order. 446 | return false; 447 | } 448 | 449 | return true; 450 | } 451 | 452 | // private 453 | 454 | std::pair, std::vector> 455 | ForestState::ComputeNextRowTargets(const std::vector& targets, 456 | bool deletion_remains, 457 | bool root_present) const 458 | { 459 | std::vector targets_without_siblings, parents; 460 | 461 | std::vector::const_iterator start = targets.begin(); 462 | while (start < targets.end()) { 463 | if (start < targets.end() - 1 && this->RightSibling(start[0]) == start[1]) { 464 | // These two targets are siblings. In the context of computing swaps there is no need to swap them, 465 | // but we might want to swap their parent in the next row. 466 | // => store the parent. 467 | parents.push_back(this->Parent(start[0])); 468 | start += 2; 469 | continue; 470 | } 471 | 472 | // This target has no sibling. 473 | targets_without_siblings.push_back(start[0]); 474 | if (targets_without_siblings.size() % 2 == 0) { 475 | parents.push_back(this->Parent(start[0])); 476 | } 477 | ++start; 478 | } 479 | 480 | if (deletion_remains && !root_present) { 481 | parents.push_back(this->Parent(targets_without_siblings.back())); 482 | } 483 | 484 | return std::make_pair(parents, targets_without_siblings); 485 | } 486 | 487 | std::vector ForestState::MakeSwaps(const std::vector& targets, 488 | bool deletion_remains, 489 | bool root_present, 490 | uint64_t root_pos) const 491 | { 492 | // +1 for deletion_remains && root_present == true 493 | uint32_t num_swaps = (targets.size() >> 1) + 1; 494 | std::vector swaps; 495 | swaps.reserve(num_swaps); 496 | 497 | std::vector::const_iterator start = targets.begin(); 498 | while (targets.end() - start > 1) { 499 | // Look at 2 targets at a time and create a swap that turns both deletions into siblings. 500 | // This is possible because all nodes in `targets` are not siblings (thanks to `computeNextRowTargets`). 501 | swaps.push_back(ForestState::Swap(this->Sibling(start[1]), start[0])); 502 | start += 2; 503 | } 504 | 505 | if (deletion_remains && root_present) { 506 | // there is a remaining deletion and a root on this row 507 | // => swap target with the root. 508 | swaps.push_back(ForestState::Swap(root_pos, start[0])); 509 | } 510 | 511 | return swaps; 512 | } 513 | 514 | ForestState::Swap ForestState::MakeCollapse(const std::vector& targets, 515 | bool deletion_remains, 516 | bool root_present, 517 | uint8_t row, 518 | uint64_t next_num_leaves) const 519 | { 520 | // The position of the root on this row after the deletion. 521 | uint64_t root_dest = _rootPosition(row, next_num_leaves, this->NumRows()); 522 | 523 | if (!deletion_remains && root_present) { 524 | // No deletion remaining but there is a root. 525 | // => Collapse the root to its position after the deletion. 526 | return ForestState::Swap(this->RootPosition(row), root_dest, true); 527 | } 528 | 529 | if (deletion_remains && !root_present) { 530 | // There is no root but there is a remaining deletion. 531 | // => The sibling of the remaining deletion becomes a root. 532 | assert(targets.size() > 0); 533 | return ForestState::Swap(this->Sibling(targets.back()), root_dest, true); 534 | } 535 | 536 | // No collapse on this row. 537 | // This will be ignored in `ConvertCollapses` because collapse=false. 538 | return ForestState::Swap(0, 0); 539 | } 540 | 541 | void ForestState::ConvertCollapses(std::vector>& swaps, 542 | std::vector& collapses) const 543 | { 544 | if (collapses.size() == 0) { 545 | // If there is nothing to collapse, we're done 546 | return; 547 | } 548 | 549 | 550 | for (uint8_t row = collapses.size() - 1; row != 0; --row) { 551 | for (ForestState::Swap swap : swaps.at(row)) { 552 | // For every swap in the row, convert the collapses below the swap. 553 | this->SwapInRow(swap, collapses, row); 554 | } 555 | 556 | if (!collapses.at(row).m_collapse) { 557 | // There is no collapse in this row. 558 | continue; 559 | } 560 | 561 | // For the collapse on this row, convert the other collapses located below. 562 | this->SwapInRow(collapses.at(row), collapses, row); 563 | } 564 | 565 | // Append collapses to swaps. 566 | uint8_t row = 0; 567 | for (ForestState::Swap collapse : collapses) { 568 | if (collapse.m_from != collapse.m_to && collapse.m_collapse) { 569 | swaps.at(row).push_back(collapse); 570 | } 571 | ++row; 572 | } 573 | } 574 | 575 | void ForestState::SwapInRow(ForestState::Swap swap, 576 | std::vector& collapses, 577 | uint8_t swap_row) const 578 | { 579 | for (uint8_t collapse_row = 0; collapse_row < swap_row; ++collapse_row) { 580 | if (!collapses.at(collapse_row).m_collapse) { 581 | continue; 582 | } 583 | 584 | this->SwapIfDescendant(swap, collapses.at(collapse_row), swap_row, collapse_row); 585 | } 586 | } 587 | 588 | void ForestState::SwapIfDescendant(ForestState::Swap swap, 589 | ForestState::Swap& collapse, 590 | uint8_t swap_row, 591 | uint8_t collapse_row) const 592 | { 593 | uint8_t row_diff = swap_row - collapse_row; 594 | uint64_t ancestor = this->Ancestor(collapse.m_to, row_diff); 595 | if ((ancestor == swap.m_from) != (ancestor == swap.m_to)) { 596 | collapse.m_to ^= (swap.m_from ^ swap.m_to) << row_diff; 597 | } 598 | } 599 | 600 | 601 | ForestState::Swap ForestState::Swap::ToLeaves(ForestState state) const 602 | { 603 | uint8_t row = state.DetectRow(m_from); 604 | if (row == 0) { 605 | return *this; 606 | } 607 | 608 | return ForestState::Swap( 609 | state.LeftDescendant(m_from, row), 610 | state.LeftDescendant(m_to, row), 611 | static_cast(1ULL << static_cast(row))); 612 | } 613 | }; // namespace utreexo 614 | -------------------------------------------------------------------------------- /src/test/accumulator_tests.cpp: -------------------------------------------------------------------------------- 1 | #include "../../include/utreexo.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "state.h" 9 | 10 | BOOST_AUTO_TEST_SUITE(accumulator_tests) 11 | 12 | using namespace utreexo; 13 | 14 | void SetHash(Hash& hash, int num) 15 | { 16 | hash[0] = num; 17 | hash[1] = num >> 8; 18 | hash[2] = num >> 16; 19 | hash[3] = num >> 24; 20 | hash[4] = 0xFF; 21 | } 22 | 23 | void CreateTestLeaves(std::vector& leaves, int count, int offset) 24 | { 25 | for (int i = 0; i < count; i++) { 26 | Hash hash = {}; // initialize all elements to 0 27 | SetHash(hash, offset + i); 28 | leaves.emplace_back(std::move(hash), false); 29 | } 30 | } 31 | 32 | void CreateTestLeaves(std::vector& leaves, int count) 33 | { 34 | CreateTestLeaves(leaves, count, 0); 35 | } 36 | 37 | Hash HashFromStr(const std::string& hex) 38 | { 39 | const signed char p_util_hexdigit[256] = 40 | {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 42 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 43 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, 44 | -1, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 46 | -1, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 48 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 49 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 52 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 53 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 54 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; 56 | 57 | Hash h; 58 | assert(hex.size() == 64); 59 | int digits = 64; 60 | 61 | for (int i = 31; i >= 0;) { 62 | h[i] = p_util_hexdigit[hex[--digits]]; 63 | if (digits > 0) { 64 | h[i] |= p_util_hexdigit[hex[--digits]] << 4; 65 | i--; 66 | } 67 | } 68 | 69 | return h; 70 | } 71 | 72 | // https://github.com/bitcoin/bitcoin/blob/7f653c3b22f0a5267822eec017aea6a16752c597/src/util/strencodings.cpp#L580 73 | template 74 | std::string HexStr(const T s) 75 | { 76 | std::string rv; 77 | static constexpr char hexmap[16] = {'0', '1', '2', '3', '4', '5', '6', '7', 78 | '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; 79 | rv.reserve(s.size() * 2); 80 | for (uint8_t v : s) { 81 | rv.push_back(hexmap[v >> 4]); 82 | rv.push_back(hexmap[v & 15]); 83 | } 84 | return rv; 85 | } 86 | 87 | utreexo::UndoBatch unused_undo; 88 | 89 | // Simple test that adds and deletes a bunch of elements from both a forest and pollard. 90 | BOOST_AUTO_TEST_CASE(simple) 91 | { 92 | Pollard pruned(0); 93 | RamForest full(0); 94 | 95 | // 1. Add 64 elements 96 | std::vector leaves; 97 | CreateTestLeaves(leaves, 64); 98 | 99 | BOOST_CHECK(pruned.Modify(leaves, {})); 100 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 101 | 102 | // 2. Check that roots of the forest and pollard match 103 | std::vector pruned_roots, full_roots; 104 | pruned.Roots(pruned_roots); 105 | full.Roots(full_roots); 106 | 107 | BOOST_CHECK(pruned_roots.size() == 1); 108 | BOOST_CHECK(full_roots.size() == 1); 109 | 110 | BOOST_CHECK(pruned_roots[0] == HashFromStr("6692c043bfd19c717a07a931833b1748cff69aa4349110948907ab125f744c25")); 111 | BOOST_CHECK(pruned_roots == full_roots); 112 | 113 | // 3. Prove elements 0, 2, 3 and 9 in the forest 114 | BatchProof proof; 115 | std::vector leaf_hashes = {leaves[0].first, leaves[2].first, leaves[3].first, leaves[9].first}; 116 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 117 | 118 | // 4. Let the pollard verify the proof 119 | BOOST_CHECK(pruned.Verify(proof, leaf_hashes)); 120 | 121 | // 5. Delete the elements from both the forest and pollard 122 | utreexo::UndoBatch undo; 123 | BOOST_CHECK(full.Modify(undo, {}, proof.GetSortedTargets())); 124 | BOOST_CHECK(pruned.Modify({}, proof.GetSortedTargets())); 125 | 126 | // 6. Check that the roots match after the deletion 127 | pruned.Roots(pruned_roots); 128 | full.Roots(full_roots); 129 | 130 | BOOST_CHECK(pruned_roots.size() == 4); 131 | BOOST_CHECK(full_roots.size() == 4); 132 | 133 | BOOST_CHECK(pruned_roots[0] == HashFromStr("b868b67e97610dc20fd4052d08c390cf8fc95eef3c7ee717aebc85a02e81cd68")); 134 | BOOST_CHECK(pruned_roots[1] == HashFromStr("4a812f9dc3a1691f4b6de9ec07ecb2014a3c8839182592549a2d8508631cd908")); 135 | BOOST_CHECK(pruned_roots[2] == HashFromStr("891899fa84a5c8659b007ce655b7cc5cf4b92493477db1095518cfc732024ef2")); 136 | BOOST_CHECK(pruned_roots[3] == HashFromStr("aee875faf7276a9817d0db6195414118b1348697d2e2abd4b3fcee46c579833b")); 137 | BOOST_CHECK(pruned_roots == full_roots); 138 | 139 | std::vector undo_bytes; 140 | undo.Serialize(undo_bytes); 141 | { 142 | UndoBatch copy; 143 | BOOST_CHECK(copy.Unserialize(undo_bytes)); 144 | BOOST_CHECK(copy == undo); 145 | } 146 | // Undo last modification 147 | BOOST_CHECK(full.Undo(undo)); 148 | 149 | full.Roots(full_roots); 150 | BOOST_CHECK(full_roots.size() == 1); 151 | BOOST_CHECK(full_roots[0] == HashFromStr("6692c043bfd19c717a07a931833b1748cff69aa4349110948907ab125f744c25")); 152 | } 153 | 154 | BOOST_AUTO_TEST_CASE(ramforest_disk) 155 | { 156 | std::remove("./test_forest"); 157 | BatchProof proof; 158 | std::vector leaves; 159 | { 160 | RamForest full("./test_forest"); 161 | Pollard pollard(0); 162 | 163 | CreateTestLeaves(leaves, 32); 164 | 165 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 166 | BOOST_CHECK(pollard.Modify(leaves, {})); 167 | BOOST_CHECK(full.Prove(proof, {leaves[0].first})); 168 | BOOST_CHECK(pollard.Verify(proof, {leaves[0].first})); 169 | } 170 | 171 | RamForest full("./test_forest"); 172 | BatchProof copy; 173 | BOOST_CHECK(full.Prove(copy, {leaves[0].first})); 174 | BOOST_CHECK(copy == proof); 175 | } 176 | 177 | 178 | BOOST_AUTO_TEST_CASE(batchproof_serialization) 179 | { 180 | RamForest full(0); 181 | 182 | std::vector leaves; 183 | CreateTestLeaves(leaves, 32); 184 | 185 | full.Modify(unused_undo, leaves, {}); 186 | 187 | std::vector proof_bytes; 188 | BatchProof proof1; 189 | BOOST_CHECK(full.Prove(proof1, {leaves[0].first, leaves[1].first})); 190 | proof1.Serialize(proof_bytes); 191 | 192 | BatchProof proof2; 193 | BOOST_CHECK(proof2.Unserialize(proof_bytes)); 194 | BOOST_CHECK(proof1 == proof2); 195 | } 196 | 197 | BOOST_AUTO_TEST_CASE(singular_leaf_prove) 198 | { 199 | Pollard pruned(0); 200 | RamForest full(0); 201 | 202 | std::vector leaves; 203 | CreateTestLeaves(leaves, 8); 204 | 205 | // Add test leaves, dont delete any. 206 | full.Modify(unused_undo, leaves, {}); 207 | pruned.Modify(leaves, {}); 208 | //full.PrintRoots(); 209 | 210 | for (Leaf& leaf : leaves) { 211 | BatchProof proof; 212 | full.Prove(proof, {leaf.first}); 213 | BOOST_CHECK(pruned.Verify(proof, {leaf.first})); 214 | 215 | // Delete all cached leaves. 216 | pruned.Prune(); 217 | } 218 | } 219 | 220 | BOOST_AUTO_TEST_CASE(simple_modified_proof) 221 | { 222 | Pollard pruned(0); 223 | RamForest full(0); 224 | 225 | std::vector leaves; 226 | CreateTestLeaves(leaves, 8); 227 | 228 | // Add test leaves, dont delete any. 229 | full.Modify(unused_undo, leaves, {}); 230 | pruned.Modify(leaves, {}); 231 | // full.PrintRoots(); 232 | 233 | BatchProof proof; 234 | full.Prove(proof, {leaves[0].first}); 235 | std::vector modified_hashes = proof.GetHashes(); 236 | // Fill the last hash with zeros. 237 | // This should cause verification to fail. 238 | modified_hashes.back().fill(0); 239 | BatchProof invalid(proof.GetSortedTargets(), modified_hashes); 240 | // Assert that verification fails. 241 | BOOST_CHECK(pruned.Verify(invalid, {leaves[0].first}) == false); 242 | } 243 | 244 | BOOST_AUTO_TEST_CASE(simple_cached_proof) 245 | { 246 | Pollard pruned(0); 247 | RamForest full(0); 248 | 249 | std::vector leaves; 250 | CreateTestLeaves(leaves, 8); 251 | 252 | // Remember leaf 0 in the pollard. 253 | leaves[0].second = true; 254 | // Add test leaves, dont delete any. 255 | full.Modify(unused_undo, leaves, {}); 256 | pruned.Modify(leaves, {}); 257 | //full.PrintRoots(); 258 | 259 | BatchProof proof; 260 | full.Prove(proof, {leaves[0].first}); 261 | 262 | // Since the proof for leaf 0 is cached, 263 | // the proof can be any subset of the full proof. 264 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[0]}), {leaves[0].first})); 265 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[1]}), {leaves[0].first})); 266 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[2]}), {leaves[0].first})); 267 | 268 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[0], proof.GetHashes()[1]}), {leaves[0].first})); 269 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[0], proof.GetHashes()[2]}), {leaves[0].first})); 270 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {proof.GetHashes()[1], proof.GetHashes()[2]}), {leaves[0].first})); 271 | BOOST_CHECK(pruned.Verify(proof, {leaves[0].first})); 272 | // Empty proof should work since the pollard now holds the computed nodes as well. 273 | BOOST_CHECK(pruned.Verify(BatchProof(proof.GetSortedTargets(), {}), {leaves[0].first})); 274 | } 275 | 276 | BOOST_AUTO_TEST_CASE(simple_batch_proof) 277 | { 278 | Pollard pruned(0); 279 | RamForest full(0); 280 | 281 | std::vector leaves; 282 | CreateTestLeaves(leaves, 15); 283 | 284 | // Add test leaves, dont delete any. 285 | full.Modify(unused_undo, leaves, {}); 286 | pruned.Modify(leaves, {}); 287 | //full.PrintRoots(); 288 | 289 | BatchProof proof; 290 | full.Prove(proof, {leaves[0].first, leaves[7].first, leaves[8].first, leaves[14].first}); 291 | 292 | BOOST_CHECK(pruned.Verify(proof, {leaves[0].first, leaves[7].first, leaves[8].first, leaves[14].first})); 293 | } 294 | 295 | BOOST_AUTO_TEST_CASE(simple_batchproof_verify_and_delete) 296 | { 297 | RamForest full(0); 298 | Pollard pruned(0); 299 | 300 | std::vector leaves; 301 | CreateTestLeaves(leaves, 15); 302 | 303 | full.Modify(unused_undo, leaves, {}); 304 | pruned.Modify(leaves, {}); 305 | 306 | // Check that the roots of the full forest match the pollard roots. 307 | std::vector prev_full_roots, prev_pruned_roots; 308 | full.Roots(prev_full_roots); 309 | pruned.Roots(prev_pruned_roots); 310 | BOOST_CHECK(prev_full_roots == prev_pruned_roots); 311 | 312 | // Prove and verify some leaves. 313 | // This should populate the pollard with the required proof for deletion. 314 | BatchProof proof; 315 | // The order of hashes should be irrelevant for Prove 316 | BOOST_CHECK(full.Prove(proof, {leaves[7].first, leaves[8].first, leaves[14].first, leaves[0].first})); 317 | // The order of hashes should be relevant for Verify 318 | BOOST_CHECK(!pruned.Verify(proof, {leaves[7].first, leaves[8].first, leaves[14].first, leaves[0].first})); 319 | BOOST_CHECK(pruned.Verify(proof, {leaves[0].first, leaves[7].first, leaves[8].first, leaves[14].first})); 320 | 321 | // Deleting with out of order targets should cause modification to fail 322 | BOOST_CHECK(!full.Modify(unused_undo, {}, proof.GetTargets())); 323 | BOOST_CHECK(!pruned.Modify({}, proof.GetTargets())); 324 | 325 | // Delete the targets. 326 | BOOST_CHECK(full.Modify(unused_undo, {}, proof.GetSortedTargets())); 327 | BOOST_CHECK(pruned.Modify({}, proof.GetSortedTargets())); 328 | 329 | // Check that the roots of the full forest match the pollard roots. 330 | std::vector full_roots, pruned_roots; 331 | full.Roots(full_roots); 332 | pruned.Roots(pruned_roots); 333 | BOOST_CHECK(full_roots == pruned_roots); 334 | 335 | // The new roots should be different than the previous ones. 336 | BOOST_CHECK(pruned_roots != prev_pruned_roots); 337 | } 338 | 339 | BOOST_AUTO_TEST_CASE(hash_to_known_invalid_proof) 340 | { 341 | RamForest full(0); 342 | Pollard pruned(0); 343 | 344 | std::vector leaves; 345 | CreateTestLeaves(leaves, 15); 346 | 347 | // Remember leaf 0 348 | leaves[0].second = true; 349 | 350 | full.Modify(unused_undo, leaves, {}); 351 | pruned.Modify(leaves, {}); 352 | BOOST_CHECK(pruned.NumCachedLeaves() == 1); // cached: 0 353 | 354 | BatchProof proof; 355 | std::vector leaf_hashes = {leaves[4].first, leaves[5].first, leaves[6].first, leaves[7].first}; 356 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 357 | 358 | Hash invalid_hash; 359 | invalid_hash.fill(0xff); 360 | 361 | // Verification with an invalid proof hash should not pass. 362 | BOOST_CHECK(!pruned.Verify(BatchProof(proof.GetSortedTargets(), {invalid_hash}), leaf_hashes)); 363 | BOOST_CHECK(pruned.Verify(proof, leaf_hashes)); 364 | BOOST_CHECK(pruned.NumCachedLeaves() == 5); // cached: 0, 4, 5, 6, 7 365 | 366 | leaf_hashes = {leaves[1].first}; 367 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 368 | 369 | // Verification should fail if the number of targets specified in the proof 370 | // does not match the number of provided target hashes 371 | BOOST_CHECK(!pruned.Verify(BatchProof(proof.GetSortedTargets(), {invalid_hash}), leaf_hashes)); 372 | BOOST_CHECK(pruned.Verify(proof, leaf_hashes)); 373 | BOOST_CHECK(pruned.NumCachedLeaves() == 6); // cached: 0, 1, 4, 5, 6, 7 374 | } 375 | 376 | BOOST_AUTO_TEST_CASE(simple_blockchain) 377 | { 378 | RamForest full(0); 379 | Pollard pruned(0); 380 | 381 | // Simulate the addition of 1000 blocks with a uniformly distributed 382 | // number of additions and deletions in each block 383 | int num_blocks = 1000; 384 | int num_max_adds = 128; 385 | int num_max_dels = 128; 386 | int unique_hash = 0; 387 | 388 | std::default_random_engine generator; 389 | std::uniform_int_distribution add_distribution(1, num_max_adds); 390 | 391 | // Add genesis leaves to have something to delete on first iteration 392 | { 393 | std::vector adds; 394 | CreateTestLeaves(adds, 8, unique_hash); 395 | unique_hash += adds.size(); 396 | full.Modify(unused_undo, adds, {}); 397 | pruned.Modify(adds, {}); 398 | } 399 | 400 | // Process blocks while keeping the UndoBatch on every step in order to 401 | // rollback all modifications in the end 402 | std::vector>> undos; 403 | for (int i = 0; i < num_blocks; i++) { 404 | // Create leaves to-add 405 | int num_adds = add_distribution(generator); 406 | std::vector adds; 407 | CreateTestLeaves(adds, num_adds, unique_hash); 408 | unique_hash += adds.size(); 409 | 410 | // Select leaves for deletion 411 | std::vector leaf_hashes; 412 | int min = 0, max = full.NumLeaves() - 1; 413 | while (max > 0 && min != max) { 414 | std::uniform_int_distribution del_distribution(min, max); 415 | int del_index = del_distribution(generator); 416 | leaf_hashes.push_back(full.GetLeaf(del_index)); 417 | min = del_index + 1 > max ? max : del_index + 1; 418 | } 419 | 420 | std::vector roots; 421 | full.Roots(roots); // roots before the modification 422 | 423 | BatchProof proof; 424 | UndoBatch undo; 425 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 426 | BOOST_CHECK(full.Modify(undo, adds, proof.GetSortedTargets())); 427 | 428 | // Keep the UndoBatch with the roots it will rollback to 429 | undos.emplace_back(undo, roots); 430 | 431 | // Undo and redo last modification to test a rollback 432 | BOOST_CHECK(full.Undo(undo)); 433 | full.Roots(roots); // roots after the rollback 434 | // roots after the rollback should match those kept with the UndoBatch 435 | BOOST_CHECK(roots == undos.back().second); 436 | 437 | BOOST_CHECK(full.Modify(unused_undo, adds, proof.GetSortedTargets())); 438 | 439 | // Verify the proof with pollard and modify pollard to new state 440 | BOOST_TEST(pruned.Verify(proof, leaf_hashes)); 441 | BOOST_CHECK(pruned.NumCachedLeaves() == leaf_hashes.size()); 442 | 443 | // The pollard should be able to produce a prove for any of the cached leaves. 444 | { 445 | std::vector roots; 446 | pruned.Roots(roots); 447 | Pollard foo(roots, pruned.NumLeaves()); 448 | 449 | for (const Hash& hash : leaf_hashes) { 450 | BatchProof leaf_proof; 451 | BOOST_CHECK(pruned.Prove(leaf_proof, {hash})); 452 | BOOST_CHECK(foo.Verify(leaf_proof, {hash})); 453 | foo.Prune(); 454 | } 455 | } 456 | 457 | BOOST_CHECK(pruned.Modify(adds, proof.GetSortedTargets())); 458 | BOOST_CHECK(pruned.NumCachedLeaves() == 0); 459 | 460 | if (proof.GetTargets().size() > 0) BOOST_CHECK(!pruned.Verify(proof, leaf_hashes)); 461 | 462 | // check that roots match after modification 463 | std::vector pruned_roots, full_roots; 464 | pruned.Roots(pruned_roots); 465 | full.Roots(full_roots); 466 | BOOST_CHECK(full_roots == pruned_roots); 467 | } 468 | 469 | 470 | std::vector roots; 471 | // Rollback all modifications and check that we arrive at the initial state. 472 | int height = num_blocks - 1; 473 | for (auto it = undos.crbegin(); it != undos.crend(); ++it) { 474 | const UndoBatch& undo = it->first; 475 | BOOST_CHECK(full.Undo(undo)); 476 | roots.clear(); 477 | 478 | full.Roots(roots); 479 | BOOST_CHECK(roots == it->second); 480 | --height; 481 | } 482 | BOOST_CHECK(height == -1); 483 | } 484 | 485 | BOOST_AUTO_TEST_CASE(pollard_restore) 486 | { 487 | RamForest full(0); 488 | Pollard pruned(0); 489 | 490 | std::vector leaves; 491 | CreateTestLeaves(leaves, 15); 492 | 493 | full.Modify(unused_undo, leaves, {}); 494 | pruned.Modify(leaves, {}); 495 | 496 | BatchProof proof; 497 | std::vector leaf_hashes = {leaves[4].first, leaves[5].first, leaves[6].first, leaves[7].first}; 498 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 499 | BOOST_CHECK(pruned.Verify(proof, leaf_hashes)); 500 | 501 | std::vector roots; 502 | pruned.Roots(roots); 503 | 504 | Pollard restored(roots, 15); 505 | BOOST_CHECK(restored.Verify(proof, leaf_hashes)); 506 | } 507 | 508 | BOOST_AUTO_TEST_CASE(pollard_remember) 509 | { 510 | RamForest full(0); 511 | Pollard pruned(0); 512 | 513 | std::vector leaves; 514 | CreateTestLeaves(leaves, 8); 515 | 516 | leaves[1].second = true; 517 | leaves[2].second = true; 518 | leaves[3].second = true; 519 | 520 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 521 | BOOST_CHECK(pruned.Modify(leaves, {})); 522 | 523 | BatchProof proof02; 524 | BOOST_CHECK(full.Prove(proof02, {leaves[0].first, leaves[2].first})); 525 | 526 | BOOST_CHECK(pruned.Verify(BatchProof({0, 2}, {leaves[1].first}), {leaves[0].first, leaves[2].first})); 527 | BOOST_CHECK(pruned.Verify(proof02, {leaves[0].first, leaves[2].first})); 528 | 529 | BOOST_CHECK(pruned.Modify({}, proof02.GetSortedTargets())); 530 | BOOST_CHECK(full.Modify(unused_undo, {}, proof02.GetSortedTargets())); 531 | 532 | // After the removal of 0 and 2, 3 should still be cached. 533 | BatchProof proof3; 534 | BOOST_CHECK(full.Prove(proof3, {leaves[3].first})); 535 | BOOST_CHECK(pruned.Modify({}, proof3.GetSortedTargets())); 536 | BOOST_CHECK(full.Modify(unused_undo, {}, proof3.GetSortedTargets())); 537 | 538 | // Check that the roots of the forest and the pollard are the same. 539 | std::vector full_roots, pruned_roots; 540 | full.Roots(full_roots); 541 | pruned.Roots(pruned_roots); 542 | BOOST_CHECK(full_roots == pruned_roots); 543 | } 544 | 545 | BOOST_AUTO_TEST_CASE(simple_pollard_prove) 546 | { 547 | RamForest full(0); 548 | Pollard pruned(0); 549 | 550 | std::vector leaves; 551 | CreateTestLeaves(leaves, 8); 552 | 553 | // Set the deletion to 0. 554 | uint64_t target = 0; 555 | leaves[target].second = true; 556 | 557 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 558 | BOOST_CHECK(pruned.Modify(leaves, {})); 559 | 560 | // Create the proofs. 561 | BatchProof full_proof; 562 | BatchProof pruned_proof; 563 | BOOST_CHECK(full.Prove(full_proof, {leaves[target].first})); 564 | BOOST_CHECK(pruned.Prove(pruned_proof, {leaves[target].first})); 565 | 566 | BOOST_CHECK(full_proof == pruned_proof); 567 | 568 | // Set the remember to false so that the proof isn't cached. 569 | leaves[target].second = false; 570 | 571 | // Finally check that the batchproof actually verifies. 572 | Pollard verify_pollard(0); 573 | verify_pollard.Modify(leaves, {}); 574 | BOOST_CHECK(verify_pollard.Verify(pruned_proof, {leaves[target].first})); 575 | } 576 | 577 | BOOST_AUTO_TEST_CASE(ramforest_undo) 578 | { 579 | // Two forests with one being on previous state to check against 580 | RamForest full(0), full_prev(0); 581 | int unique_hash = 0; 582 | 583 | // Create leaves to-add 584 | std::vector leaves, additional_leaves; 585 | CreateTestLeaves(leaves, 8, unique_hash); 586 | unique_hash += leaves.size(); 587 | CreateTestLeaves(additional_leaves, 8, unique_hash); 588 | 589 | UndoBatch undo; 590 | BatchProof proof; 591 | std::vector leaf_hashes; 592 | // ADD to state 593 | BOOST_CHECK(full.Modify(undo, leaves, {})); 594 | 595 | // Undo addition and then rollback 596 | BOOST_CHECK(full.Undo(undo)); 597 | BOOST_CHECK(full == full_prev); 598 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 599 | BOOST_CHECK(full_prev.Modify(unused_undo, leaves, {})); 600 | 601 | // REMOVE from state 602 | leaf_hashes = {leaves[5].first, leaves[6].first, leaves[7].first}; 603 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 604 | 605 | BOOST_CHECK(full.Modify(undo, {}, proof.GetSortedTargets())); 606 | BOOST_CHECK(!(full == full_prev)); 607 | // Undo deletion and then rollback 608 | BOOST_CHECK(full.Undo(undo)); 609 | BOOST_CHECK(full == full_prev); 610 | BOOST_CHECK(full.Modify(unused_undo, {}, proof.GetSortedTargets())); 611 | BOOST_CHECK(full_prev.Modify(unused_undo, {}, proof.GetSortedTargets())); 612 | 613 | // ADD & REMOVE to state 614 | leaf_hashes = {leaves[2].first, leaves[4].first}; 615 | BOOST_CHECK(full.Prove(proof, leaf_hashes)); 616 | BOOST_CHECK(full.Modify(undo, additional_leaves, proof.GetSortedTargets())); 617 | BOOST_CHECK(!(full == full_prev)); 618 | // Undo modification 619 | BOOST_CHECK(full.Undo(undo)); 620 | BOOST_CHECK(full == full_prev); 621 | } 622 | 623 | BOOST_AUTO_TEST_CASE(simple_posmap_updates) 624 | { 625 | RamForest full(0); 626 | Pollard pruned(0); 627 | 628 | std::vector leaves; 629 | CreateTestLeaves(leaves, 16); 630 | 631 | leaves[0].second = true; 632 | leaves[7].second = true; 633 | 634 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 635 | BOOST_CHECK(pruned.Modify(leaves, {})); 636 | 637 | BOOST_CHECK(pruned.CountNodes() == 10); 638 | BOOST_CHECK(pruned.ComparePositionMap(full)); 639 | BOOST_CHECK(pruned.NumCachedLeaves() == 2); 640 | 641 | BatchProof proof; 642 | BOOST_CHECK(pruned.Prove(proof, {leaves[0].first})); 643 | BOOST_CHECK(pruned.Verify(proof, {leaves[0].first})); 644 | 645 | BOOST_CHECK(pruned.Modify({}, {0})); 646 | BOOST_CHECK(full.Modify(unused_undo, {}, {0})); 647 | BOOST_CHECK(pruned.ComparePositionMap(full)); 648 | } 649 | 650 | BOOST_AUTO_TEST_CASE(add_memorable_and_remove) 651 | { 652 | RamForest full(0); 653 | Pollard pruned(0); 654 | 655 | std::vector leaves; 656 | CreateTestLeaves(leaves, 8); 657 | 658 | leaves[0].second = true; 659 | 660 | BOOST_CHECK(full.Modify(unused_undo, leaves, {})); 661 | BOOST_CHECK(pruned.Modify(leaves, {})); 662 | 663 | BatchProof proof; 664 | BOOST_CHECK(pruned.Prove(proof, {leaves[0].first})); 665 | BOOST_CHECK(pruned.NumCachedLeaves() == 1); 666 | 667 | BOOST_CHECK(pruned.Modify({}, {0})); 668 | } 669 | 670 | BOOST_AUTO_TEST_SUITE_END() 671 | -------------------------------------------------------------------------------- /src/pollard.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/pollard.h" 2 | #include "../include/batchproof.h" 3 | #include "check.h" 4 | #include "node.h" 5 | #include "state.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | // Get the internal node from a NodePtr. 13 | #define INTERNAL_NODE(acc_node) (((Pollard::Node*)acc_node.get())->m_node) 14 | 15 | namespace utreexo { 16 | 17 | // Recovery flags constants: 18 | static const int RECOVERY_CHOP_LEFT = 0; 19 | static const int RECOVERY_CHOP_RIGHT = 1; 20 | static const int RECOVERY_CHOP_BOTH = 2; 21 | 22 | class Pollard::InternalNode 23 | { 24 | public: 25 | Hash m_hash; 26 | NodePtr m_nieces[2]; 27 | 28 | InternalNode() : InternalNode(nullptr, nullptr) {} 29 | InternalNode(NodePtr left, NodePtr right) 30 | { 31 | m_nieces[0] = left; 32 | m_nieces[1] = right; 33 | m_hash.fill(0); 34 | } 35 | InternalNode(NodePtr left, NodePtr right, const Hash& hash) 36 | { 37 | m_nieces[0] = left; 38 | m_nieces[1] = right; 39 | m_hash = hash; 40 | } 41 | 42 | ~InternalNode() 43 | { 44 | m_nieces[0] = nullptr; 45 | m_nieces[1] = nullptr; 46 | } 47 | 48 | /* Chop of nieces */ 49 | void Chop(); 50 | void ChopLeft(); 51 | void ChopRight(); 52 | 53 | /* Chop of deadend nieces. */ 54 | void Prune(); 55 | 56 | /* 57 | * Return wether or not this node is a deadend. 58 | * A node is a deadend if both nieces do not point to another node. 59 | */ 60 | bool DeadEnd() const; 61 | }; 62 | 63 | class Pollard::Node : public Accumulator::Node 64 | { 65 | private: 66 | bool ReHashAndVerify() const; 67 | 68 | void ReHashNoPrune(); 69 | 70 | public: 71 | friend class Pollard; 72 | 73 | // Verification flags: 74 | // A valid node has the correct hash. 75 | static const int VALID = 1; 76 | // This marks a node as target or ancestor of a target. 77 | static const int TARGET = 1 << 1; 78 | // This marks a node as cached. 79 | static const int CACHED = 1 << 2; 80 | // The sibling of a node is cached. 81 | // Roots have both the CACHED and SIBLING_CACHED bit set. 82 | static const int SIBLING_CACHED = 1 << 3; 83 | 84 | NodePtr m_node; 85 | 86 | // Store the sibling for reHash. 87 | // The siblings nieces are the nodes children. 88 | NodePtr m_sibling; 89 | 90 | uint8_t m_verification_flag{0}; 91 | 92 | Node() {} 93 | Node(NodePtr node, 94 | NodePtr sibling, 95 | NodePtr parent, 96 | uint64_t num_leaves, 97 | uint64_t pos) 98 | : m_node(node), m_sibling(sibling), m_verification_flag(0) 99 | { 100 | m_parent = parent; 101 | m_num_leaves = num_leaves; 102 | m_position = pos; 103 | } 104 | 105 | ~Node() 106 | { 107 | m_verification_flag = 0; 108 | m_node = nullptr; 109 | m_sibling = nullptr; 110 | } 111 | 112 | const Hash& GetHash() const override; 113 | void ReHash() override; 114 | 115 | bool IsTargetOrAncestor() const { return m_verification_flag & TARGET; } 116 | bool IsValid() const { return m_verification_flag & VALID; } 117 | bool IsCached() const { return m_verification_flag & CACHED; } 118 | bool IsSiblingCached() const { return m_verification_flag & SIBLING_CACHED; } 119 | void MarkAsValid() { m_verification_flag |= VALID; } 120 | }; 121 | 122 | // Pollard 123 | Pollard::Pollard(uint64_t num_leaves) : Accumulator(num_leaves) 124 | { 125 | m_remember = std::make_shared(); 126 | } 127 | 128 | Pollard::Pollard(const std::vector& roots, uint64_t num_leaves) 129 | : Pollard(num_leaves) 130 | { 131 | ForestState state(m_num_leaves); 132 | 133 | assert(roots.size() == state.NumRoots()); 134 | auto root_positions = state.RootPositions(); 135 | assert(root_positions.size() == roots.size()); 136 | 137 | // Restore roots 138 | for (int i = 0; i < roots.size(); ++i) { 139 | auto int_node = MakeNodePtr(nullptr, nullptr, roots.at(i)); 140 | m_roots.push_back(MakeNodePtr(int_node, int_node, nullptr, 141 | m_num_leaves, root_positions.at(i))); 142 | } 143 | } 144 | 145 | Pollard::~Pollard() 146 | { 147 | m_roots.clear(); 148 | } 149 | 150 | std::optional Pollard::Read(uint64_t pos) const 151 | { 152 | auto [node, sibling] = ReadSiblings(pos); 153 | if (!node) { 154 | return std::nullopt; 155 | } 156 | 157 | return std::optional{node->m_hash}; 158 | } 159 | 160 | std::vector Pollard::ReadLeafRange(uint64_t pos, uint64_t range) const 161 | { 162 | // TODO: implement efficient way of reading these hashes from the range, 163 | // without trying to read every position. 164 | std::vector hashes; 165 | for (uint64_t i = pos; i < pos + range; ++i) { 166 | std::optional hash = Read(i); 167 | if (hash.has_value()) { 168 | hashes.push_back(Read(i).value()); 169 | } 170 | } 171 | return hashes; 172 | } 173 | 174 | Pollard::InternalSiblings Pollard::ReadSiblings(uint64_t pos, NodePtr& rehash_path, bool record_path) const 175 | { 176 | const ForestState current_state(m_num_leaves); 177 | 178 | // Get the path to the position. 179 | const auto [tree, path_length, path_bits] = current_state.Path(pos); 180 | 181 | // There is no node above a root. 182 | rehash_path = nullptr; 183 | 184 | uint64_t node_pos = current_state.RootPositions()[tree]; 185 | NodePtr node = INTERNAL_NODE(m_roots[tree]); 186 | NodePtr sibling = node; 187 | 188 | if (path_length == 0) { 189 | // Roots act as their own sibling. 190 | return {node, sibling}; 191 | } 192 | 193 | // Traverse the pollard until the desired position is reached. 194 | for (uint8_t i = 0; i < path_length; ++i) { 195 | uint8_t lr = (path_bits >> (path_length - 1 - i)) & 1; 196 | uint8_t lr_sib = current_state.Sibling(lr); 197 | 198 | if (record_path) { 199 | rehash_path = Accumulator::MakeNodePtr(node, sibling, rehash_path, 200 | m_num_leaves, node_pos); 201 | } 202 | 203 | if (!sibling) { 204 | return {nullptr, nullptr}; 205 | } 206 | 207 | node = sibling->m_nieces[lr_sib]; 208 | sibling = sibling->m_nieces[lr]; 209 | 210 | node_pos = current_state.Child(node_pos, lr_sib); 211 | } 212 | 213 | return {node, sibling}; 214 | } 215 | 216 | Pollard::InternalSiblings Pollard::ReadSiblings(uint64_t pos) const 217 | { 218 | NodePtr unused; 219 | return ReadSiblings(pos, unused, false); 220 | } 221 | 222 | NodePtr Pollard::SwapSubTrees(uint64_t from, uint64_t to) 223 | { 224 | ForestState state(m_num_leaves); 225 | 226 | NodePtr rehash_path; 227 | 228 | auto hook_in_sibling = [&rehash_path, state](NodePtr& sibling, uint64_t pos) { 229 | if (!sibling) { 230 | uint8_t lr = pos & 1; 231 | uint8_t lr_sib = state.Sibling(lr); 232 | sibling = Accumulator::MakeNodePtr(); 233 | std::dynamic_pointer_cast(rehash_path)->m_sibling->m_nieces[lr_sib] = sibling; 234 | } 235 | }; 236 | 237 | auto [node_from, sibling_from] = ReadSiblings(from, rehash_path, true); 238 | CHECK_SAFE(node_from); 239 | hook_in_sibling(sibling_from, from); 240 | 241 | NodePtr node_to{nullptr}, sibling_to{nullptr}; 242 | if (state.Sibling(from) == to) { 243 | node_to = sibling_from; 244 | sibling_to = node_from; 245 | } else { 246 | std::tie(node_to, sibling_to) = ReadSiblings(to, rehash_path, true); 247 | CHECK_SAFE(node_to); 248 | hook_in_sibling(sibling_to, to); 249 | } 250 | 251 | std::swap(node_to->m_hash, node_from->m_hash); 252 | std::swap(sibling_to->m_nieces, sibling_from->m_nieces); 253 | 254 | return rehash_path; 255 | } 256 | 257 | NodePtr Pollard::NewLeaf(const Leaf& leaf) 258 | { 259 | assert(m_remember); 260 | NodePtr int_node = Accumulator::MakeNodePtr( 261 | leaf.second ? m_remember : nullptr, nullptr, leaf.first); 262 | 263 | NodePtr node = Accumulator::MakeNodePtr( 264 | /*node*/ int_node, /*sibling*/ int_node, /*parent*/ nullptr, 265 | m_num_leaves, m_num_leaves); 266 | m_roots.push_back(node); 267 | 268 | // Only keep the hash in the map if the leaf is marked to be 269 | // remembered. 270 | if (leaf.second) { 271 | m_posmap[leaf.first] = node->m_position; 272 | } 273 | 274 | return m_roots.back(); 275 | } 276 | 277 | NodePtr Pollard::MergeRoot(uint64_t parent_pos, Hash parent_hash) 278 | { 279 | auto right = m_roots.back(); 280 | NodePtr int_right = INTERNAL_NODE(m_roots.back()); 281 | m_roots.pop_back(); 282 | NodePtr int_left = INTERNAL_NODE(m_roots.back()); 283 | m_roots.pop_back(); 284 | 285 | // swap nieces 286 | std::swap(int_left->m_nieces, int_right->m_nieces); 287 | 288 | // create internal node 289 | NodePtr int_node = Accumulator::MakeNodePtr(int_left, int_right, parent_hash); 290 | 291 | if (int_left->m_nieces[0] != m_remember && 292 | int_right->m_nieces[0] != m_remember) { 293 | int_node->Prune(); 294 | } 295 | 296 | NodePtr node = Accumulator::MakeNodePtr( 297 | /*node*/ int_node, /*sibling*/ int_node, /*parent*/ nullptr, 298 | m_num_leaves, parent_pos); 299 | m_roots.push_back(node); 300 | 301 | return m_roots.back(); 302 | } 303 | 304 | void Pollard::FinalizeRemove(uint64_t next_num_leaves) 305 | { 306 | ForestState current_state(m_num_leaves), next_state(next_num_leaves); 307 | 308 | // Remove deleted leaf hashes from the position map. 309 | for (uint64_t pos = next_state.m_num_leaves; pos < current_state.m_num_leaves; ++pos) { 310 | if (std::optional read_hash = Read(pos)) { 311 | m_posmap.erase(read_hash.value()); 312 | } 313 | } 314 | 315 | // Compute the positions of the new roots in the current state. 316 | std::vector new_positions = current_state.RootPositions(next_state.m_num_leaves); 317 | 318 | // Select the new roots. 319 | std::vector> new_roots(new_positions.size()); 320 | int new_root_index = new_roots.size() - 1; 321 | 322 | while (new_root_index >= 0) { 323 | uint64_t new_pos = new_positions.at(new_root_index); 324 | 325 | auto [int_node, int_sibling] = ReadSiblings(new_pos); 326 | CHECK_SAFE(int_node); 327 | 328 | // TODO: the forest state of these root nodes should reflect the new state 329 | // since they survive the remove op. 330 | NodePtr node = Accumulator::MakeNodePtr( 331 | int_node, int_node, nullptr, 332 | current_state.m_num_leaves, new_pos); 333 | 334 | // When truning a node into a root, it's nieces are really it's children 335 | if (int_sibling) { 336 | node->m_node->m_nieces[0] = int_sibling->m_nieces[0]; 337 | node->m_node->m_nieces[1] = int_sibling->m_nieces[1]; 338 | } else { 339 | node->m_node->Chop(); 340 | } 341 | 342 | new_roots[new_root_index] = node; 343 | --new_root_index; 344 | } 345 | 346 | m_roots.clear(); 347 | m_roots = new_roots; 348 | } 349 | 350 | void Pollard::Prune() 351 | { 352 | for (NodePtr& root : m_roots) { 353 | INTERNAL_NODE(root)->Chop(); 354 | } 355 | assert(m_remember.use_count() == 1); 356 | } 357 | 358 | uint64_t Pollard::CountNodes(const NodePtr& node) const 359 | { 360 | if (!node || node == m_remember) return 0; 361 | return 1 + CountNodes(node->m_nieces[0]) + CountNodes(node->m_nieces[1]); 362 | } 363 | 364 | uint64_t Pollard::CountNodes() const 365 | { 366 | uint64_t res = 0; 367 | for (auto root : m_roots) { 368 | res += CountNodes(INTERNAL_NODE(root)); 369 | } 370 | return res; 371 | } 372 | 373 | void Pollard::InitChildrenOfComputed(NodePtr& node, 374 | NodePtr& left_child, 375 | NodePtr& right_child, 376 | bool& recover_left, 377 | bool& recover_right) 378 | { 379 | recover_left = false; 380 | recover_right = false; 381 | 382 | if (!node->m_sibling->m_nieces[0]) { 383 | // The left child does not exist in the pollard. We need to hook it in. 384 | node->m_sibling->m_nieces[0] = Accumulator::MakeNodePtr(); 385 | recover_left = true; 386 | } 387 | 388 | if (!node->m_sibling->m_nieces[1]) { 389 | // The right child does not exist in the pollard. We need to hook it in. 390 | node->m_sibling->m_nieces[1] = Accumulator::MakeNodePtr(); 391 | recover_right = true; 392 | } 393 | 394 | left_child = Accumulator::MakeNodePtr( 395 | /*node*/ node->m_sibling->m_nieces[0], /*sibling*/ node->m_sibling->m_nieces[1], /*parent*/ node, 396 | m_num_leaves, ForestState(m_num_leaves).LeftChild(node->m_position)); 397 | if (!recover_left) left_child->m_verification_flag |= Pollard::Node::CACHED; 398 | if (!recover_right) left_child->m_verification_flag |= Pollard::Node::SIBLING_CACHED; 399 | 400 | right_child = Accumulator::MakeNodePtr( 401 | /*node*/ node->m_sibling->m_nieces[1], /*sibling*/ node->m_sibling->m_nieces[0], /*parent*/ node, 402 | m_num_leaves, ForestState(m_num_leaves).Child(node->m_position, 1)); 403 | if (!recover_left) right_child->m_verification_flag |= Pollard::Node::SIBLING_CACHED; 404 | if (!recover_right) right_child->m_verification_flag |= Pollard::Node::CACHED; 405 | } 406 | 407 | bool Pollard::CreateProofTree(std::vector>& proof_tree_out, 408 | std::vector, int>>& recovery, 409 | const BatchProof& proof) 410 | { 411 | ForestState state(m_num_leaves); 412 | std::vector proof_positions, computed_positions; 413 | std::tie(proof_positions, computed_positions) = state.ProofPositions(proof.GetSortedTargets()); 414 | 415 | auto proof_hash = proof.GetHashes().crbegin(); 416 | auto proof_pos = proof_positions.crbegin(); 417 | auto computed_pos = computed_positions.crbegin(); 418 | 419 | // We use a std::deque here because we need to be able to append and prepend efficiently 420 | // and a vector does not offer that. 421 | // TODO: use std::deque in all of the verification logic? 422 | std::deque> proof_tree; 423 | 424 | int row = static_cast(state.NumRows()); 425 | 426 | // For each row in the forest, we populate the proof tree. 427 | while (row >= 0) { 428 | // For storing the next row of the proof tree. 429 | std::deque> next_row; 430 | 431 | // Roots are the entry points to the forest from the top down. 432 | // We attach the root to the current row if there is one on this row. 433 | if (computed_pos < computed_positions.crend() && 434 | state.HasRoot(row) && *computed_pos == state.RootPosition(row)) { 435 | NodePtr root = m_roots.at(state.RootIndex(*computed_pos)); 436 | // TODO: make the roots have the correct positions before this. 437 | root->m_position = state.RootPosition(row); 438 | proof_tree.push_back(std::dynamic_pointer_cast(root)); 439 | proof_tree.back()->m_verification_flag = 440 | Pollard::Node::CACHED | Pollard::Node::SIBLING_CACHED; 441 | } 442 | 443 | // Iterate over the proof tree in reverse and for each node: 444 | // - check that the node is a proof or a computed node. 445 | // - if it is a computed node we prepend its children to the next row. 446 | // - if it is a proof node we populate it with the provided proof hash. 447 | // (Because we go in reverse we are able to adjust for missing proof hashes based on what is cached) 448 | for (auto node_it = proof_tree.rbegin(); node_it != proof_tree.rend(); ++node_it) { 449 | NodePtr& node = *node_it; 450 | 451 | // Populate next row 452 | if (node->m_position < m_num_leaves) { 453 | // This is a leaf. 454 | next_row.push_front(node); 455 | } 456 | 457 | bool is_computed = computed_pos < computed_positions.crend() && 458 | *computed_pos == node->m_position; 459 | bool is_proof = proof_pos < proof_positions.crend() && 460 | *proof_pos == node->m_position; 461 | 462 | // Ensure that this node is either part of the proof or will be computed. 463 | assert(!(is_proof && is_computed)); 464 | assert(is_proof || is_computed); 465 | 466 | if (is_computed) { 467 | ++computed_pos; 468 | node->m_verification_flag |= Pollard::Node::TARGET; 469 | 470 | if (node->m_position < m_num_leaves) continue; 471 | 472 | // Since this is computed node it must have two children 473 | // in the proof tree. 474 | NodePtr left_child = nullptr; 475 | NodePtr right_child = nullptr; 476 | bool recover_left = false, recover_right = false; 477 | 478 | // Initialise the children of this computed node. 479 | // If the children dont exist in the pollard we need create and 480 | // insert them. 481 | InitChildrenOfComputed(node, left_child, right_child, 482 | recover_left, recover_right); 483 | 484 | // Remember which nodes were inserted for recovery purposes. 485 | if (recover_left && recover_right) { 486 | // Both children were newly inserted into the pollard. 487 | recovery.emplace_back(node, RECOVERY_CHOP_BOTH); 488 | } else if (recover_left) { 489 | // Only the left child was inserted. 490 | recovery.emplace_back(node, RECOVERY_CHOP_LEFT); 491 | } else if (recover_right) { 492 | // Only the right child was inserted. 493 | recovery.emplace_back(node, RECOVERY_CHOP_RIGHT); 494 | } 495 | 496 | // Prepend the children to next row of the proof tree. 497 | next_row.push_front(right_child); 498 | next_row.push_front(left_child); 499 | } else if (is_proof) { 500 | // This node is a proof node we populate with the provided hash. 501 | // We might not consume a hash from the proof if the node is cached. 502 | // Proof nodes dont have children in the proof tree, so we dont add any node to 503 | // the next row here. 504 | 505 | node->m_verification_flag &= ~(Pollard::Node::TARGET); 506 | ++proof_pos; 507 | 508 | // Populate the proof hashses. 509 | bool consume = true; 510 | if (node->IsCached()) { 511 | Hash null_hash; 512 | null_hash.fill(0); 513 | const Hash& hash = proof_hash < proof.GetHashes().crend() ? *proof_hash : null_hash; 514 | // If provided proof hash matches the cached hash, we consume the hash. 515 | consume = node->m_node->m_hash == hash; 516 | } else { 517 | // This proof was not cached. 518 | // We populate with the provided proof hash. 519 | // If the proof is invalid then this will lead to verification 520 | // failure during the rehashing of the parent node. 521 | 522 | if (proof_hash >= proof.GetHashes().crend()) { 523 | // The needed proof hash was not supplied. 524 | return false; 525 | } 526 | 527 | node->m_node->m_hash = *proof_hash; 528 | } 529 | 530 | if (consume) ++proof_hash; 531 | } 532 | } 533 | 534 | proof_tree = next_row; 535 | --row; 536 | } 537 | 538 | std::copy(proof_tree.begin(), proof_tree.end(), std::inserter(proof_tree_out, proof_tree_out.begin())); 539 | 540 | return proof_hash == proof.GetHashes().crend(); 541 | } 542 | 543 | 544 | bool Pollard::VerifyProofTree(std::vector> proof_tree, 545 | const std::vector& target_hashes, 546 | const std::vector& proof_hashes) 547 | { 548 | bool verification_success = true; 549 | auto target_hash = target_hashes.begin(); 550 | 551 | while (proof_tree.size() > 0 && verification_success) { 552 | std::vector> next_proof_tree; 553 | 554 | // Iterate over the current proof tree row. 555 | for (NodePtr& proof_node : proof_tree) { 556 | // TODO: get rid of leaf proof nodes 557 | if (!proof_node->IsTargetOrAncestor()) continue; 558 | 559 | // ================================================== 560 | // This node is a target or the ancestor of a target. 561 | 562 | NodePtr parent = 563 | std::dynamic_pointer_cast(proof_node->Parent()); 564 | // If this node is valid, so is its parent. 565 | if (parent && proof_node->IsValid()) parent->MarkAsValid(); 566 | 567 | // Append the parent to the next proof tree row, if it exists. 568 | // (A root does not have a parent.) 569 | NodePtr last_parent = next_proof_tree.size() > 0 ? 570 | next_proof_tree.back() : 571 | nullptr; 572 | if (parent && 573 | (!last_parent || last_parent != parent)) { 574 | next_proof_tree.push_back(parent); 575 | } 576 | 577 | bool is_leaf = proof_node->m_position < m_num_leaves; 578 | if (is_leaf) { 579 | if (target_hash == target_hashes.end()) { 580 | return false; 581 | } 582 | 583 | CHECK_SAFE(proof_node->IsTargetOrAncestor()); 584 | 585 | // ====================== 586 | // This node is a target. 587 | // Either populate with target hash or verify that the hash matches if cached. 588 | if (proof_node->IsCached()) { 589 | verification_success = proof_node->m_node->m_hash == *target_hash; 590 | if (!verification_success) break; 591 | 592 | CHECK_SAFE(proof_node->m_position == ForestState(m_num_leaves).RootPosition(0) || 593 | proof_node->m_node->m_nieces[0] == m_remember || 594 | m_posmap.find(*target_hash) != m_posmap.end()); 595 | 596 | // Mark the parent as valid if this is not a leaf root. 597 | if (verification_success && parent && proof_node->IsSiblingCached()) parent->MarkAsValid(); 598 | } else { 599 | proof_node->m_node->m_hash = *target_hash; 600 | } 601 | 602 | proof_node->m_sibling->m_nieces[0] = m_remember; 603 | 604 | ++target_hash; 605 | continue; 606 | } 607 | 608 | // ================================== 609 | // This node is an ancestor of a target. 610 | 611 | if (!proof_node->IsValid() && proof_node->IsCached()) { 612 | // This node is cached but not marked as valid (e.g.: a root) => we have to verify that the computed hash 613 | // matches the cached one. 614 | // Higher nodes on this branch are all valid. 615 | verification_success = proof_node->ReHashAndVerify(); 616 | if (!verification_success) break; 617 | 618 | // Mark the parent as valid if it exists. 619 | // We do this to avoid hashing higher up on this branch. 620 | if (parent && proof_node->IsSiblingCached()) parent->MarkAsValid(); 621 | } else if (proof_node->IsValid() && proof_node->IsCached()) { 622 | } else { 623 | // This node was not cached => we compute its hash from its children. 624 | proof_node->ReHashNoPrune(); 625 | // This node has to to have a parent because it cant be a root. 626 | assert(parent); 627 | } 628 | } 629 | 630 | proof_tree = next_proof_tree; 631 | } 632 | 633 | // Make sure all proof and target hashes were consumed. 634 | verification_success = verification_success && 635 | target_hash >= target_hashes.end(); 636 | 637 | return verification_success; 638 | } 639 | 640 | bool Pollard::Verify(const BatchProof& proof, const std::vector& target_hashes) 641 | { 642 | // The number of targets specified in the proof must match the number of provided target hashes. 643 | if (target_hashes.size() != proof.GetTargets().size()) return false; 644 | 645 | // If the proof fails the sanity check it fails to verify. 646 | // (e.g.: the targets are not sorted) 647 | if (!proof.CheckSanity(m_num_leaves)) return false; 648 | 649 | // If there are no targets to verify we are done. 650 | if (proof.GetTargets().size() == 0) return true; 651 | 652 | // The proof tree holds the leaves of the partial tree involved in verifying the proof. 653 | // The leaves know their parents, so the tree can be traversed from the bottom up. 654 | std::vector> proof_tree; 655 | 656 | // The recovery tree is needed in case the proof is invalid. 657 | // It holds the intersection nodes where new nodes have been populated. 658 | // In case of verification failure we chop the tree down at these nodes 659 | // to prevent mutating the pollard. 660 | using IntersectionNode = std::pair, int>; 661 | std::vector recovery_tree; 662 | 663 | // Populate the proof tree from top to bottom. 664 | // This adds new empty nodes to the pollard that will either hold 665 | // proof hashes or hashes that were computed during verification. 666 | bool create_ok = CreateProofTree(proof_tree, recovery_tree, proof); 667 | 668 | // Verify the proof tree from bottom to top. 669 | bool verify_ok = create_ok && VerifyProofTree(proof_tree, target_hashes, proof.GetHashes()); 670 | if (!verify_ok) { 671 | // The proof was invalid and we have to revert the changes that were made to the pollard. 672 | // This is where we use the recovery tree to chop of the newly populated branches. 673 | // TODO: the recovery tree currently holds every node of a new branch. In theory we only 674 | // need the top most node to chop of the entire branch. 675 | for (IntersectionNode& intersection : recovery_tree) { 676 | assert(intersection.first); 677 | assert(intersection.first->m_sibling); 678 | 679 | // Chop of the newly created branches. 680 | // Since this is the pollard, the intersection's sibling has 681 | // the its children. 682 | switch (intersection.second) { 683 | case RECOVERY_CHOP_LEFT: 684 | intersection.first->m_sibling->ChopLeft(); 685 | break; 686 | case RECOVERY_CHOP_RIGHT: 687 | intersection.first->m_sibling->ChopRight(); 688 | break; 689 | case RECOVERY_CHOP_BOTH: 690 | intersection.first->m_sibling->Chop(); 691 | break; 692 | } 693 | } 694 | 695 | // Clear the recovery and proof tree, so the references to the internal nodes 696 | // are removed. 697 | recovery_tree.clear(); 698 | proof_tree.clear(); 699 | 700 | return false; 701 | } 702 | 703 | // All targets are now remembered. 704 | for (int i = 0; i < target_hashes.size(); i++) { 705 | m_posmap[target_hashes[i]] = proof.GetSortedTargets()[i]; 706 | } 707 | 708 | // TODO: in theory the proof tree could be used during deletion as well. 709 | // it has references to all nodes that get swaped around. Using the proof 710 | // tree could make deletion more efficient since we would not have to traverse 711 | // the pollard to find the node that need to be swapped. 712 | proof_tree.clear(); 713 | 714 | // Proof verification passed. 715 | return true; 716 | } 717 | 718 | // Pollard::Node 719 | 720 | const Hash& Pollard::Node::GetHash() const 721 | { 722 | return m_node.get()->m_hash; 723 | } 724 | 725 | void Pollard::Node::ReHash() 726 | { 727 | if (!m_sibling->m_nieces[0] || !m_sibling->m_nieces[1]) { 728 | // TODO: error could not rehash one of the children is not known. 729 | // This will happen if there are duplicates in the dirtyNodes in Accumulator::Remove. 730 | return; 731 | } 732 | 733 | Accumulator::ParentHash(m_node->m_hash, m_sibling->m_nieces[0]->m_hash, m_sibling->m_nieces[1]->m_hash); 734 | m_sibling->Prune(); 735 | } 736 | 737 | void Pollard::Node::ReHashNoPrune() 738 | { 739 | if (!m_sibling->m_nieces[0] || !m_sibling->m_nieces[1]) { 740 | // TODO: error could not rehash one of the children is not known. 741 | // This will happen if there are duplicates in the dirtyNodes in Accumulator::Remove. 742 | return; 743 | } 744 | 745 | Accumulator::ParentHash(m_node->m_hash, m_sibling->m_nieces[0]->m_hash, m_sibling->m_nieces[1]->m_hash); 746 | } 747 | 748 | bool Pollard::Node::ReHashAndVerify() const 749 | { 750 | if (!m_sibling->m_nieces[0] || !m_sibling->m_nieces[1]) { 751 | // Leaves cant rehash and verify. 752 | return false; 753 | } 754 | 755 | if (IsValid()) { 756 | return true; 757 | } 758 | 759 | Hash computed_hash; 760 | Accumulator::ParentHash(computed_hash, m_sibling->m_nieces[0]->m_hash, m_sibling->m_nieces[1]->m_hash); 761 | return computed_hash == m_node->m_hash; 762 | } 763 | 764 | // Pollard::InternalNode 765 | 766 | void Pollard::InternalNode::Chop() 767 | { 768 | m_nieces[0] = nullptr; 769 | m_nieces[1] = nullptr; 770 | } 771 | 772 | void Pollard::InternalNode::ChopLeft() 773 | { 774 | m_nieces[0] = nullptr; 775 | } 776 | 777 | void Pollard::InternalNode::ChopRight() 778 | { 779 | m_nieces[1] = nullptr; 780 | } 781 | 782 | void Pollard::InternalNode::Prune() 783 | { 784 | if (!m_nieces[0] || m_nieces[0]->DeadEnd()) { 785 | m_nieces[0] = nullptr; 786 | } 787 | 788 | if (!m_nieces[1] || m_nieces[1]->DeadEnd()) { 789 | m_nieces[1] = nullptr; 790 | } 791 | } 792 | 793 | bool Pollard::InternalNode::DeadEnd() const 794 | { 795 | return !m_nieces[0] && !m_nieces[1]; 796 | } 797 | 798 | }; // namespace utreexo 799 | --------------------------------------------------------------------------------